summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/sync/sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/sync/sync.c')
0 files changed, 0 insertions, 0 deletions
0%;'/> -rw-r--r--drivers/accel/Makefile5
-rw-r--r--drivers/accel/drm_accel.c2
-rw-r--r--drivers/accel/habanalabs/Kconfig (renamed from drivers/misc/habanalabs/Kconfig)8
-rw-r--r--drivers/accel/habanalabs/Makefile (renamed from drivers/misc/habanalabs/Makefile)2
-rw-r--r--drivers/accel/habanalabs/common/Makefile (renamed from drivers/misc/habanalabs/common/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/asid.c (renamed from drivers/misc/habanalabs/common/asid.c)0
-rw-r--r--drivers/accel/habanalabs/common/command_buffer.c (renamed from drivers/misc/habanalabs/common/command_buffer.c)37
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c (renamed from drivers/misc/habanalabs/common/command_submission.c)261
-rw-r--r--drivers/accel/habanalabs/common/context.c (renamed from drivers/misc/habanalabs/common/context.c)0
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c (renamed from drivers/misc/habanalabs/common/debugfs.c)142
-rw-r--r--drivers/accel/habanalabs/common/decoder.c (renamed from drivers/misc/habanalabs/common/decoder.c)46
-rw-r--r--drivers/accel/habanalabs/common/device.c (renamed from drivers/misc/habanalabs/common/device.c)470
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c (renamed from drivers/misc/habanalabs/common/firmware_if.c)206
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h (renamed from drivers/misc/habanalabs/common/habanalabs.h)259
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c (renamed from drivers/misc/habanalabs/common/habanalabs_drv.c)14
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c (renamed from drivers/misc/habanalabs/common/habanalabs_ioctl.c)161
-rw-r--r--drivers/accel/habanalabs/common/hw_queue.c (renamed from drivers/misc/habanalabs/common/hw_queue.c)0
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c (renamed from drivers/misc/habanalabs/common/hwmon.c)2
-rw-r--r--drivers/accel/habanalabs/common/irq.c (renamed from drivers/misc/habanalabs/common/irq.c)104
-rw-r--r--drivers/accel/habanalabs/common/memory.c (renamed from drivers/misc/habanalabs/common/memory.c)423
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c (renamed from drivers/misc/habanalabs/common/memory_mgr.c)18
-rw-r--r--drivers/accel/habanalabs/common/mmu/Makefile (renamed from drivers/misc/habanalabs/common/mmu/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c (renamed from drivers/misc/habanalabs/common/mmu/mmu.c)16
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v1.c (renamed from drivers/misc/habanalabs/common/mmu/mmu_v1.c)1
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c (renamed from drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c)0
-rw-r--r--drivers/accel/habanalabs/common/pci/Makefile (renamed from drivers/misc/habanalabs/common/pci/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/pci/pci.c (renamed from drivers/misc/habanalabs/common/pci/pci.c)12
-rw-r--r--drivers/accel/habanalabs/common/security.c (renamed from drivers/misc/habanalabs/common/security.c)180
-rw-r--r--drivers/accel/habanalabs/common/security.h163
-rw-r--r--drivers/accel/habanalabs/common/state_dump.c (renamed from drivers/misc/habanalabs/common/state_dump.c)2
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c (renamed from drivers/misc/habanalabs/common/sysfs.c)6
-rw-r--r--drivers/accel/habanalabs/gaudi/Makefile (renamed from drivers/misc/habanalabs/gaudi/Makefile)0
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c (renamed from drivers/misc/habanalabs/gaudi/gaudi.c)182
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudiP.h (renamed from drivers/misc/habanalabs/gaudi/gaudiP.h)17
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi_coresight.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_coresight.c)3
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi_security.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_security.c)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/Makefile (renamed from drivers/misc/habanalabs/gaudi2/Makefile)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2.c)2983
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2P.h)65
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c)12
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_masks.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_masks.h)3
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_security.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_security.c)38
-rw-r--r--drivers/accel/habanalabs/goya/Makefile (renamed from drivers/misc/habanalabs/goya/Makefile)0
-rw-r--r--drivers/accel/habanalabs/goya/goya.c (renamed from drivers/misc/habanalabs/goya/goya.c)38
-rw-r--r--drivers/accel/habanalabs/goya/goyaP.h (renamed from drivers/misc/habanalabs/goya/goyaP.h)2
-rw-r--r--drivers/accel/habanalabs/goya/goya_coresight.c (renamed from drivers/misc/habanalabs/goya/goya_coresight.c)2
-rw-r--r--drivers/accel/habanalabs/goya/goya_hwmgr.c (renamed from drivers/misc/habanalabs/goya/goya_hwmgr.c)0
-rw-r--r--drivers/accel/habanalabs/goya/goya_security.c (renamed from drivers/misc/habanalabs/goya/goya_security.c)0
-rw-r--r--drivers/accel/habanalabs/include/common/cpucp_if.h (renamed from drivers/misc/habanalabs/include/common/cpucp_if.h)105
-rw-r--r--drivers/accel/habanalabs/include/common/hl_boot_if.h (renamed from drivers/misc/habanalabs/include/common/hl_boot_if.h)128
-rw-r--r--drivers/accel/habanalabs/include/common/qman_if.h (renamed from drivers/misc/habanalabs/include/common/qman_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h)2
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_packets.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_packets.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h211
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h)15
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h)41
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h)6
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h)9
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h)6
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h1203
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h)10
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h)27
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2.h)2
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h2678
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h)26
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h)4
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h)16
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h157
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya.h (renamed from drivers/misc/habanalabs/include/goya/goya.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_async_events.h (renamed from drivers/misc/habanalabs/include/goya/goya_async_events.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_coresight.h (renamed from drivers/misc/habanalabs/include/goya/goya_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_fw_if.h (renamed from drivers/misc/habanalabs/include/goya/goya_fw_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_packets.h (renamed from drivers/misc/habanalabs/include/goya/goya_packets.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_reg_map.h (renamed from drivers/misc/habanalabs/include/goya/goya_reg_map.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h (renamed from drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h)0
-rw-r--r--drivers/accel/ivpu/Kconfig15
-rw-r--r--drivers/accel/ivpu/Makefile16
-rw-r--r--drivers/accel/ivpu/TODO11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c669
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h195
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c434
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h38
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c749
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h127
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h170
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl.c1041
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl_reg.h280
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h115
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c510
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h93
-rw-r--r--drivers/accel/ivpu/ivpu_job.c618
-rw-r--r--drivers/accel/ivpu/ivpu_job.h67
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c180
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h23
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c883
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h50
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c398
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h50
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c326
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h39
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h349
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h1008
-rw-r--r--drivers/accel/qaic/Kconfig23
-rw-r--r--drivers/accel/qaic/Makefile12
-rw-r--r--drivers/accel/qaic/mhi_controller.c563
-rw-r--r--drivers/accel/qaic/mhi_controller.h16
-rw-r--r--drivers/accel/qaic/qaic.h282
-rw-r--r--drivers/accel/qaic/qaic_control.c1526
-rw-r--r--drivers/accel/qaic/qaic_data.c1902
-rw-r--r--drivers/accel/qaic/qaic_drv.c637
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c3
-rw-r--r--drivers/acpi/acpi_apd.c2
-rw-r--r--drivers/acpi/acpi_lpit.c17
-rw-r--r--drivers/acpi/acpi_lpss.c7
-rw-r--r--drivers/acpi/acpi_pnp.c14
-rw-r--r--drivers/acpi/acpi_processor.c42
-rw-r--r--drivers/acpi/acpi_video.c46
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acapps.h2
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h5
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h4
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h4
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h24
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c3
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c13
-rw-r--r--drivers/acpi/acpica/evevent.c13
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c6
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c16
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c9
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c14
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c11
-rw-r--r--drivers/acpi/acpica/rscalc.c51
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c17
-rw-r--r--drivers/acpi/acpica/rsinfo.c5
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/rsmisc.c10
-rw-r--r--drivers/acpi/acpica/rsserial.c49
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c6
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresdecode.c11
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/einj.c18
-rw-r--r--drivers/acpi/arm64/agdi.c13
-rw-r--r--drivers/acpi/battery.c35
-rw-r--r--drivers/acpi/bus.c96
-rw-r--r--drivers/acpi/cppc_acpi.c189
-rw-r--r--drivers/acpi/device_pm.c19
-rw-r--r--drivers/acpi/device_sysfs.c10
-rw-r--r--drivers/acpi/ec.c18
-rw-r--r--drivers/acpi/glue.c14
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/ioapic.c1
-rw-r--r--drivers/acpi/nfit/core.c8
-rw-r--r--drivers/acpi/numa/hmat.c4
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/pfr_telemetry.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c1
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c26
-rw-r--r--drivers/acpi/power.c19
-rw-r--r--drivers/acpi/pptt.c98
-rw-r--r--drivers/acpi/prmt.c10
-rw-r--r--drivers/acpi/processor_driver.c12
-rw-r--r--drivers/acpi/processor_idle.c30
-rw-r--r--drivers/acpi/processor_pdc.c11
-rw-r--r--drivers/acpi/processor_perflib.c38
-rw-r--r--drivers/acpi/processor_thermal.c14
-rw-r--r--drivers/acpi/property.c80
-rw-r--r--drivers/acpi/resource.c78
-rw-r--r--drivers/acpi/sbs.c27
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/sysfs.c19
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/acpi/thermal.c70
-rw-r--r--drivers/acpi/video_detect.c128
-rw-r--r--drivers/acpi/viot.c5
-rw-r--r--drivers/acpi/x86/apple.c11
-rw-r--r--drivers/acpi/x86/s2idle.c91
-rw-r--r--drivers/acpi/x86/utils.c93
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/binder.c68
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/android/binder_internal.h3
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/Kconfig35
-rw-r--r--drivers/ata/Makefile4
-rw-r--r--drivers/ata/acard-ahci.c10
-rw-r--r--drivers/ata/ahci.c34
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_imx.c4
-rw-r--r--drivers/ata/ahci_mtk.c4
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_octeon.c6
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c2
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c6
-rw-r--r--drivers/ata/libahci.c175
-rw-r--r--drivers/ata/libahci_platform.c4
-rw-r--r--drivers/ata/libata-core.c90
-rw-r--r--drivers/ata/libata-eh.c117
-rw-r--r--drivers/ata/libata-sata.c7
-rw-r--r--drivers/ata/libata-scsi.c66
-rw-r--r--drivers/ata/libata-sff.c18
-rw-r--r--drivers/ata/libata-trace.c2
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_buddha.c2
-rw-r--r--drivers/ata/pata_cmd640.c2
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c2
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_falcon.c2
-rw-r--r--drivers/ata/pata_ftide010.c2
-rw-r--r--drivers/ata/pata_gayle.c2
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c2
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c3
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c18
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_palmld.c137
-rw-r--r--drivers/ata/pata_parport/Kconfig141
-rw-r--r--drivers/ata/pata_parport/Makefile19
-rw-r--r--drivers/ata/pata_parport/aten.c (renamed from drivers/block/paride/aten.c)45
-rw-r--r--drivers/ata/pata_parport/bpck.c (renamed from drivers/block/paride/bpck.c)86
-rw-r--r--drivers/ata/pata_parport/bpck6.c460
-rw-r--r--drivers/ata/pata_parport/comm.c (renamed from drivers/block/paride/comm.c)52
-rw-r--r--drivers/ata/pata_parport/dstr.c (renamed from drivers/block/paride/dstr.c)45
-rw-r--r--drivers/ata/pata_parport/epat.c (renamed from drivers/block/paride/epat.c)48
-rw-r--r--drivers/ata/pata_parport/epia.c (renamed from drivers/block/paride/epia.c)55
-rw-r--r--drivers/ata/pata_parport/fit2.c (renamed from drivers/block/paride/fit2.c)37
-rw-r--r--drivers/ata/pata_parport/fit3.c (renamed from drivers/block/paride/fit3.c)39
-rw-r--r--drivers/ata/pata_parport/friq.c (renamed from drivers/block/paride/friq.c)56
-rw-r--r--drivers/ata/pata_parport/frpw.c (renamed from drivers/block/paride/frpw.c)71
-rw-r--r--drivers/ata/pata_parport/kbic.c (renamed from drivers/block/paride/kbic.c)66
-rw-r--r--drivers/ata/pata_parport/ktti.c (renamed from drivers/block/paride/ktti.c)38
-rw-r--r--drivers/ata/pata_parport/on20.c (renamed from drivers/block/paride/on20.c)45
-rw-r--r--drivers/ata/pata_parport/on26.c (renamed from drivers/block/paride/on26.c)52
-rw-r--r--drivers/ata/pata_parport/pata_parport.c762
-rw-r--r--drivers/ata/pata_parport/pata_parport.h96
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c4
-rw-r--r--drivers/ata/pata_pxa.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/pata_rz1000.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c662
-rw-r--r--drivers/ata/pata_sc1200.c2
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/ata/pata_serverworks.c6
-rw-r--r--drivers/ata/pata_sil680.c2
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c6
-rw-r--r--drivers/ata/sata_fsl.c7
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_inic162x.c16
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/ata/sata_nv.c8
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/ata/sata_sx4.c4
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/atm/idt77252.c11
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/base/Kconfig12
-rw-r--r--drivers/base/arch_topology.c15
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/base.h135
-rw-r--r--drivers/base/bus.c615
-rw-r--r--drivers/base/cacheinfo.c261
-rw-r--r--drivers/base/class.c273
-rw-r--r--drivers/base/component.c2
-rw-r--r--drivers/base/core.c850
-rw-r--r--drivers/base/cpu.c43
-rw-r--r--drivers/base/dd.c72
-rw-r--r--drivers/base/devcoredump.c5
-rw-r--r--drivers/base/devres.c11
-rw-r--r--drivers/base/devtmpfs.c37
-rw-r--r--drivers/base/driver.c29
-rw-r--r--drivers/base/firmware_loader/Kconfig13
-rw-r--r--drivers/base/firmware_loader/main.c65
-rw-r--r--drivers/base/firmware_loader/sysfs.c4
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/physical_location.c5
-rw-r--r--drivers/base/physical_location.h2
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/platform.c48
-rw-r--r--drivers/base/power/domain.c31
-rw-r--r--drivers/base/power/main.c12
-rw-r--r--drivers/base/power/runtime.c28
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/property.c166
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/Makefile5
-rw-r--r--drivers/base/regmap/internal.h24
-rw-r--r--drivers/base/regmap/regcache-lzo.c368
-rw-r--r--drivers/base/regmap/regcache-maple.c279
-rw-r--r--drivers/base/regmap/regcache.c56
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap-irq.c55
-rw-r--r--drivers/base/regmap/regmap-kunit.c739
-rw-r--r--drivers/base/regmap/regmap-mdio.c41
-rw-r--r--drivers/base/regmap/regmap-ram.c85
-rw-r--r--drivers/base/regmap/regmap-sdw.c41
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/base/soc.c19
-rw-r--r--drivers/base/swnode.c63
-rw-r--r--drivers/base/test/property-entry-test.c30
-rw-r--r--drivers/base/test/test_async_driver_probe.c2
-rw-r--r--drivers/base/transport_class.c17
-rw-r--r--drivers/bcma/driver_mips.c6
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/Kconfig89
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoechr.c2
-rw-r--r--drivers/block/brd.c83
-rw-r--r--drivers/block/drbd/Makefile2
-rw-r--r--drivers/block/drbd/drbd_actlog.c13
-rw-r--r--drivers/block/drbd/drbd_bitmap.c13
-rw-r--r--drivers/block/drbd/drbd_buildtag.c22
-rw-r--r--drivers/block/drbd/drbd_debugfs.c2
-rw-r--r--drivers/block/drbd/drbd_int.h133
-rw-r--r--drivers/block/drbd/drbd_interval.c6
-rw-r--r--drivers/block/drbd/drbd_main.c92
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c108
-rw-r--r--drivers/block/drbd/drbd_req.c32
-rw-r--r--drivers/block/drbd/drbd_req.h11
-rw-r--r--drivers/block/drbd/drbd_state.c31
-rw-r--r--drivers/block/drbd/drbd_vli.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c114
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c65
-rw-r--r--drivers/block/nbd.c26
-rw-r--r--drivers/block/null_blk/Kconfig2
-rw-r--r--drivers/block/null_blk/main.c170
-rw-r--r--drivers/block/null_blk/null_blk.h7
-rw-r--r--drivers/block/paride/Kconfig302
-rw-r--r--drivers/block/paride/Makefile29
-rw-r--r--drivers/block/paride/Transition-notes128
-rw-r--r--drivers/block/paride/bpck6.c267
-rw-r--r--drivers/block/paride/mkd31
-rw-r--r--drivers/block/paride/paride.c479
-rw-r--r--drivers/block/paride/paride.h172
-rw-r--r--drivers/block/paride/pcd.c1042
-rw-r--r--drivers/block/paride/pd.c1032
-rw-r--r--drivers/block/paride/pf.c1057
-rw-r--r--drivers/block/paride/pg.c734
-rw-r--r--drivers/block/paride/ppc6lnx.c726
-rw-r--r--drivers/block/paride/pseudo.h102
-rw-r--r--drivers/block/paride/pt.c1024
-rw-r--r--drivers/block/pktcdvd.c2934
-rw-r--r--drivers/block/ps3vram.c5
-rw-r--r--drivers/block/rbd.c61
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c2
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/ublk_drv.c646
-rw-r--r--drivers/block/virtio_blk.c586
-rw-r--r--drivers/block/xen-blkback/blkback.c126
-rw-r--r--drivers/block/xen-blkback/common.h103
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/zram/zram_drv.c467
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bluetooth/Kconfig14
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c49
-rw-r--r--drivers/bluetooth/btintel.c208
-rw-r--r--drivers/bluetooth/btintel.h18
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c6
-rw-r--r--drivers/bluetooth/btnxpuart.c1352
-rw-r--r--drivers/bluetooth/btqca.c14
-rw-r--r--drivers/bluetooth/btqca.h10
-rw-r--r--drivers/bluetooth/btqcomsmd.c17
-rw-r--r--drivers/bluetooth/btrtl.c502
-rw-r--r--drivers/bluetooth/btrtl.h58
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bluetooth/btusb.c344
-rw-r--r--drivers/bluetooth/hci_bcm.c60
-rw-r--r--drivers/bluetooth/hci_h5.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c8
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_mrvl.c90
-rw-r--r--drivers/bluetooth/hci_qca.c85
-rw-r--r--drivers/bluetooth/hci_vhci.c101
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/brcmstb_gisb.c4
-rw-r--r--drivers/bus/bt1-apb.c1
-rw-r--r--drivers/bus/bt1-axi.c1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c10
-rw-r--r--drivers/bus/imx-weim.c31
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c1
-rw-r--r--drivers/bus/mhi/Makefile4
-rw-r--r--drivers/bus/mhi/ep/main.c91
-rw-r--r--drivers/bus/mhi/ep/sm.c42
-rw-r--r--drivers/bus/mhi/host/boot.c16
-rw-r--r--drivers/bus/mhi/host/init.c22
-rw-r--r--drivers/bus/mhi/host/main.c25
-rw-r--r--drivers/bus/mhi/host/pci_generic.c28
-rw-r--r--drivers/bus/mips_cdmm.c4
-rw-r--r--drivers/bus/mvebu-mbus.c58
-rw-r--r--drivers/bus/qcom-ebi2.c1
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c1
-rw-r--r--drivers/bus/simple-pm-bus.c48
-rw-r--r--drivers/bus/sunxi-rsb.c15
-rw-r--r--drivers/bus/tegra-gmi.c4
-rw-r--r--drivers/bus/ti-sysc.c53
-rw-r--r--drivers/bus/uniphier-system-bus.c54
-rw-r--r--drivers/bus/vexpress-config.c2
-rw-r--r--drivers/cdx/Kconfig19
-rw-r--r--drivers/cdx/Makefile8
-rw-r--r--drivers/cdx/cdx.c535
-rw-r--r--drivers/cdx/cdx.h62
-rw-r--r--drivers/cdx/controller/Kconfig31
-rw-r--r--drivers/cdx/controller/Makefile9
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c230
-rw-r--r--drivers/cdx/controller/cdx_controller.h30
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c202
-rw-r--r--drivers/cdx/controller/mc_cdx_pcol.h590
-rw-r--r--drivers/cdx/controller/mcdi.c903
-rw-r--r--drivers/cdx/controller/mcdi.h248
-rw-r--r--drivers/cdx/controller/mcdi_functions.c139
-rw-r--r--drivers/cdx/controller/mcdi_functions.h61
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/agp.h6
-rw-r--r--drivers/char/applicom.c5
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/hw_random/Kconfig10
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/jh7110-trng.c393
-rw-r--r--drivers/char/hw_random/meson-rng.c29
-rw-r--r--drivers/char/hw_random/xgene-rng.c46
-rw-r--r--drivers/char/ipmi/Kconfig3
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c2
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c2
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c16
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c127
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pcmcia/Kconfig68
-rw-r--r--drivers/char/pcmcia/Makefile11
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c1910
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c684
-rw-r--r--drivers/char/pcmcia/cm4040_cs.h48
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c359
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4292
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c11
-rw-r--r--drivers/char/tpm/eventlog/common.c6
-rw-r--r--drivers/char/tpm/eventlog/efi.c13
-rw-r--r--drivers/char/tpm/eventlog/of.c35
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c9
-rw-r--r--drivers/char/tpm/st33zp24/spi.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c115
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm.h75
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpm_atmel.h2
-rw-r--r--drivers/char/tpm/tpm_crb.c100
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c6
-rw-r--r--drivers/char/tpm/tpm_tis.c51
-rw-r--r--drivers/char/tpm/tpm_tis_core.c299
-rw-r--r--drivers/char/tpm/tpm_tis_core.h5
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c5
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c3
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c4
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c6
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/char/xillybus/xillybus_class.c2
-rw-r--r--drivers/clk/Kconfig31
-rw-r--r--drivers/clk/Makefile7
-rw-r--r--drivers/clk/at91/Makefile16
-rw-r--r--drivers/clk/at91/at91rm9200.c2
-rw-r--r--drivers/clk/at91/at91sam9260.c2
-rw-r--r--drivers/clk/at91/at91sam9g45.c10
-rw-r--r--drivers/clk/at91/at91sam9n12.c12
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c17
-rw-r--r--drivers/clk/at91/clk-peripheral.c8
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c2
-rw-r--r--drivers/clk/at91/clk-system.c4
-rw-r--r--drivers/clk/at91/dt-compat.c25
-rw-r--r--drivers/clk/at91/pmc.h4
-rw-r--r--drivers/clk/at91/sam9x60.c20
-rw-r--r--drivers/clk/at91/sama5d2.c22
-rw-r--r--drivers/clk/at91/sama5d3.c20
-rw-r--r--drivers/clk/at91/sama5d4.c22
-rw-r--r--drivers/clk/at91/sama7g5.c4
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c5
-rw-r--r--drivers/clk/axs10x/pll_clock.c11
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile1
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c6
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/bcm/clk-bcm63268-timer.c216
-rw-r--r--drivers/clk/bcm/clk-bcm63xx-gate.c6
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c6
-rw-r--r--drivers/clk/clk-ast2600.c67
-rw-r--r--drivers/clk/clk-axi-clkgen.c12
-rw-r--r--drivers/clk/clk-axm5516.c9
-rw-r--r--drivers/clk/clk-bm1880.c1
-rw-r--r--drivers/clk/clk-cdce706.c11
-rw-r--r--drivers/clk/clk-conf.c12
-rw-r--r--drivers/clk/clk-fixed-factor.c6
-rw-r--r--drivers/clk/clk-fixed-mmio.c7
-rw-r--r--drivers/clk/clk-fixed-rate.c6
-rw-r--r--drivers/clk/clk-fractional-divider.c16
-rw-r--r--drivers/clk/clk-fsl-sai.c1
-rw-r--r--drivers/clk/clk-hsdk-pll.c11
-rw-r--r--drivers/clk/clk-k210.c2
-rw-r--r--drivers/clk/clk-lmk04832.c5
-rw-r--r--drivers/clk/clk-loongson1.c303
-rw-r--r--drivers/clk/clk-loongson2.c341
-rw-r--r--drivers/clk/clk-milbeaut.c4
-rw-r--r--drivers/clk/clk-palmas.c5
-rw-r--r--drivers/clk/clk-pwm.c6
-rw-r--r--drivers/clk/clk-renesas-pcie.c74
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/clk-scpi.c5
-rw-r--r--drivers/clk/clk-si514.c10
-rw-r--r--drivers/clk/clk-si521xx.c395
-rw-r--r--drivers/clk/clk-si5351.c10
-rw-r--r--drivers/clk/clk-si570.c14
-rw-r--r--drivers/clk/clk-sp7021.c713
-rw-r--r--drivers/clk/clk-stm32h7.c1
-rw-r--r--drivers/clk/clk-stm32mp1.c6
-rw-r--r--drivers/clk/clk-versaclock5.c28
-rw-r--r--drivers/clk/clk.c29
-rw-r--r--drivers/clk/davinci/Makefile4
-rw-r--r--drivers/clk/davinci/pll-dm355.c77
-rw-r--r--drivers/clk/davinci/pll-dm365.c146
-rw-r--r--drivers/clk/davinci/pll.c8
-rw-r--r--drivers/clk/davinci/pll.h5
-rw-r--r--drivers/clk/davinci/psc-dm355.c89
-rw-r--r--drivers/clk/davinci/psc-dm365.c111
-rw-r--r--drivers/clk/davinci/psc.c6
-rw-r--r--drivers/clk/davinci/psc.h7
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c5
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c6
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c5
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c5
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/imx/Makefile3
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c4
-rw-r--r--drivers/clk/imx/clk-composite-93.c8
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c91
-rw-r--r--drivers/clk/imx/clk-gpr-mux.c120
-rw-r--r--drivers/clk/imx/clk-imx25.c2
-rw-r--r--drivers/clk/imx/clk-imx27.c2
-rw-r--r--drivers/clk/imx/clk-imx35.c2
-rw-r--r--drivers/clk/imx/clk-imx5.c6
-rw-r--r--drivers/clk/imx/clk-imx6q.c15
-rw-r--r--drivers/clk/imx/clk-imx6sl.c2
-rw-r--r--drivers/clk/imx/clk-imx6sll.c2
-rw-r--r--drivers/clk/imx/clk-imx6sx.c2
-rw-r--r--drivers/clk/imx/clk-imx6ul.c35
-rw-r--r--drivers/clk/imx/clk-imx7d.c2
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c4
-rw-r--r--drivers/clk/imx/clk-imx8mm.c4
-rw-r--r--drivers/clk/imx/clk-imx8mn.c4
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c277
-rw-r--r--drivers/clk/imx/clk-imx8mp.c7
-rw-r--r--drivers/clk/imx/clk-imx8mq.c2
-rw-r--r--drivers/clk/imx/clk-imx8ulp.c36
-rw-r--r--drivers/clk/imx/clk-imx93.c21
-rw-r--r--drivers/clk/imx/clk-imxrt1050.c4
-rw-r--r--drivers/clk/imx/clk-pfd.c2
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/imx/clk-pllv3.c2
-rw-r--r--drivers/clk/imx/clk.c31
-rw-r--r--drivers/clk/imx/clk.h35
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c18
-rw-r--r--drivers/clk/keystone/sci-clk.c6
-rw-r--r--drivers/clk/loongson1/Makefile4
-rw-r--r--drivers/clk/loongson1/clk-loongson1b.c118
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c95
-rw-r--r--drivers/clk/loongson1/clk.c41
-rw-r--r--drivers/clk/loongson1/clk.h15
-rw-r--r--drivers/clk/mediatek/Kconfig448
-rw-r--r--drivers/clk/mediatek/Makefile78
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c8
-rw-r--r--drivers/clk/mediatek/clk-cpumux.h2
-rw-r--r--drivers/clk/mediatek/clk-fhctl.c26
-rw-r--r--drivers/clk/mediatek/clk-fhctl.h9
-rw-r--r--drivers/clk/mediatek/clk-gate.c23
-rw-r--r--drivers/clk/mediatek/clk-gate.h7
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c76
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c25
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c51
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c71
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c53
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c56
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c25
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c68
-rw-r--r--drivers/clk/mediatek/clk-mt2712-apmixedsys.c168
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c66
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c25
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c1053
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c25
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c95
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c25
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c60
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c64
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c7
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c55
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c7
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c85
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c56
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c25
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c15
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c51
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c152
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c99
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c107
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c110
-rw-r--r--drivers/clk/mediatek/clk-mt7622-infracfg.c128
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c369
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c29
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c110
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c64
-rw-r--r--drivers/clk/mediatek/clk-mt7981-apmixed.c104
-rw-r--r--drivers/clk/mediatek/clk-mt7981-eth.c119
-rw-r--r--drivers/clk/mediatek/clk-mt7981-infracfg.c209
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c424
-rw-r--r--drivers/clk/mediatek/clk-mt7986-apmixed.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c112
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c89
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c105
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c266
-rw-r--r--drivers/clk/mediatek/clk-mt8167-apmixedsys.c145
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c46
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c69
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c380
-rw-r--r--drivers/clk/mediatek/clk-mt8173-apmixedsys.c217
-rw-r--r--drivers/clk/mediatek/clk-mt8173-img.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8173-infracfg.c156
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8173-pericfg.c123
-rw-r--r--drivers/clk/mediatek/clk-mt8173-topckgen.c654
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vdecsys.c58
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vencsys.c65
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c1125
-rw-r--r--drivers/clk/mediatek/clk-mt8183-apmixedsys.c195
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c32
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c29
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c811
-rw-r--r--drivers/clk/mediatek/clk-mt8186-apmixedsys.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8186-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c58
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c116
-rw-r--r--drivers/clk/mediatek/clk-mt8186-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-wpe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8188-apmixedsys.c157
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c120
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ccu.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c112
-rw-r--r--drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c82
-rw-r--r--drivers/clk/mediatek/clk-mt8188-infra_ao.c199
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c52
-rw-r--r--drivers/clk/mediatek/clk-mt8188-mfg.c49
-rw-r--r--drivers/clk/mediatek/clk-mt8188-peri_ao.c59
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c1350
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdec.c92
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo0.c107
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8188-venc.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp0.c114
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp1.c109
-rw-r--r--drivers/clk/mediatek/clk-mt8188-wpe.c105
-rw-r--r--drivers/clk/mediatek/clk-mt8192-apmixedsys.c215
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c35
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c33
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c292
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c76
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c13
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c58
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c58
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c20
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c20
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apmixedsys.c166
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c43
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c604
-rw-r--r--drivers/clk/mediatek/clk-mt8516-apmixedsys.c122
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c46
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c238
-rw-r--r--drivers/clk/mediatek/clk-mtk.c214
-rw-r--r--drivers/clk/mediatek/clk-mtk.h42
-rw-r--r--drivers/clk/mediatek/clk-mux.c14
-rw-r--r--drivers/clk/mediatek/clk-mux.h3
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c37
-rw-r--r--drivers/clk/mediatek/clk-pllfh.h1
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c9
-rw-r--r--drivers/clk/meson/clk-dualdiv.c21
-rw-r--r--drivers/clk/meson/clk-mpll.c20
-rw-r--r--drivers/clk/meson/sclk-div.c11
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c11
-rw-r--r--drivers/clk/microchip/clk-mpfs.c4
-rw-r--r--drivers/clk/mmp/clk-audio.c6
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c6
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c6
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c6
-rw-r--r--drivers/clk/qcom/Kconfig110
-rw-r--r--drivers/clk/qcom/Makefile15
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c6
-rw-r--r--drivers/clk/qcom/apcs-msm8996.c89
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c116
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c268
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c1906
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c324
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c144
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h15
-rw-r--r--drivers/clk/qcom/clk-branch.c15
-rw-r--r--drivers/clk/qcom/clk-branch.h44
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c315
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c146
-rw-r--r--drivers/clk/qcom/clk-hfpll.c14
-rw-r--r--drivers/clk/qcom/clk-krait.c10
-rw-r--r--drivers/clk/qcom/clk-rpm.c11
-rw-r--r--drivers/clk/qcom/clk-rpmh.c56
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c1480
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c10
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c16
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c8
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c4
-rw-r--r--drivers/clk/qcom/dispcc-sm6125.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6375.c4
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c9
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c221
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c1807
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1024
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c1175
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c3824
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c4248
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c3303
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c6
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c10
-rw-r--r--drivers/clk/qcom/gcc-msm8976.c30
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c16
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c3
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c842
-rw-r--r--drivers/clk/qcom/gcc-qdu1000.c2653
-rw-r--r--drivers/clk/qcom/gcc-sa8775p.c4785
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c19
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c10
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c18
-rw-r--r--drivers/clk/qcom/gcc-sdx55.c64
-rw-r--r--drivers/clk/qcom/gcc-sdx65.c109
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c54
-rw-r--r--drivers/clk/qcom/gcc-sm6375.c260
-rw-r--r--drivers/clk/qcom/gcc-sm7150.c3048
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c17
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c10
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c62
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c236
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c252
-rw-r--r--drivers/clk/qcom/gdsc.c11
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c8
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c625
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c11
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c7
-rw-r--r--drivers/clk/qcom/gpucc-sm6115.c503
-rw-r--r--drivers/clk/qcom/gpucc-sm6125.c424
-rw-r--r--drivers/clk/qcom/gpucc-sm6375.c458
-rw-r--r--drivers/clk/qcom/krait-cc.c4
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c2
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c24
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c20
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1189
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c25
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8550.c192
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c9
-rw-r--r--drivers/clk/ralink/clk-mt7621.c10
-rw-r--r--drivers/clk/renesas/Kconfig2
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c126
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c18
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c61
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c764
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c26
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c73
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c17
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c156
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c44
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h14
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c42
-rw-r--r--drivers/clk/rockchip/clk.c2
-rw-r--r--drivers/clk/samsung/Kconfig32
-rw-r--r--drivers/clk/samsung/Makefile4
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.c229
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.h3
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c6
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c6
-rw-r--r--drivers/clk/samsung/clk-exynos4.c6
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c3
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c157
-rw-r--r--drivers/clk/samsung/clk-exynos850.c141
-rw-r--r--drivers/clk/samsung/clk-pll.c193
-rw-r--r--drivers/clk/samsung/clk-pll.h22
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c440
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c446
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c254
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c438
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c4
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c6
-rw-r--r--drivers/clk/samsung/clk.c64
-rw-r--r--drivers/clk/samsung/clk.h10
-rw-r--r--drivers/clk/sifive/Kconfig6
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c26
-rw-r--r--drivers/clk/socfpga/clk-gate.c35
-rw-r--r--drivers/clk/socfpga/clk-periph-a10.c22
-rw-r--r--drivers/clk/socfpga/clk-periph.c26
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c30
-rw-r--r--drivers/clk/socfpga/clk-pll.c32
-rw-r--r--drivers/clk/sprd/Kconfig2
-rw-r--r--drivers/clk/sprd/common.c11
-rw-r--r--drivers/clk/starfive/Kconfig30
-rw-r--r--drivers/clk/starfive/Makefile6
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c74
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100.c716
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100.h112
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c154
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c497
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110.h11
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c333
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h123
-rw-r--r--drivers/clk/stm32/clk-stm32mp13.c6
-rw-r--r--drivers/clk/sunxi-ng/Kconfig71
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c13
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c9
-rw-r--r--drivers/clk/tegra/clk-dfll.c5
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c17
-rw-r--r--drivers/clk/tegra/clk-tegra20.c28
-rw-r--r--drivers/clk/ti/adpll.c6
-rw-r--r--drivers/clk/ti/clkctrl.c6
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c12
-rw-r--r--drivers/clk/visconti/pll.h1
-rw-r--r--drivers/clk/x86/clk-fch.c7
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c5
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c234
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c8
-rw-r--r--drivers/clk/zynqmp/pll.c2
-rw-r--r--drivers/clocksource/Kconfig13
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/em_sti.c8
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c21
-rw-r--r--drivers/clocksource/ingenic-timer.c3
-rw-r--r--drivers/clocksource/sh_cmt.c8
-rw-r--r--drivers/clocksource/sh_mtu2.c8
-rw-r--r--drivers/clocksource/sh_tmu.c8
-rw-r--r--drivers/clocksource/timer-clint.c65
-rw-r--r--drivers/clocksource/timer-davinci.c30
-rw-r--r--drivers/clocksource/timer-imx-gpt.c19
-rw-r--r--drivers/clocksource/timer-mediatek-cpux.c140
-rw-r--r--drivers/clocksource/timer-mediatek.c114
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c12
-rw-r--r--drivers/clocksource/timer-riscv.c27
-rw-r--r--drivers/clocksource/timer-stm32-lp.c12
-rw-r--r--drivers/clocksource/timer-sun4i.c3
-rw-r--r--drivers/clocksource/timer-tegra186.c7
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c63
-rw-r--r--drivers/clocksource/timer-ti-dm.c16
-rw-r--r--drivers/comedi/Kconfig2
-rw-r--r--drivers/comedi/comedi_fops.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1760.c2
-rw-r--r--drivers/comedi/drivers/comedi_test.c2
-rw-r--r--drivers/counter/104-quad-8.c31
-rw-r--r--drivers/counter/Kconfig102
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/rz-mtu3-cnt.c906
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.arm83
-rw-r--r--drivers/cpufreq/Makefile8
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c40
-rw-r--r--drivers/cpufreq/amd-pstate.c792
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c7
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c11
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c6
-rw-r--r--drivers/cpufreq/cpufreq.c30
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c4
-rw-r--r--drivers/cpufreq/freq_table.c8
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c2
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c222
-rw-r--r--drivers/cpufreq/maple-cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c5
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c98
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c35
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c8
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c68
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c155
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c240
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c492
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c321
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c163
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c648
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c206
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c6
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c3
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c159
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c4
-rw-r--r--drivers/cpuidle/Kconfig1
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/cpuidle-arm.c4
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c12
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c2
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c15
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c12
-rw-r--r--drivers/cpuidle/cpuidle-psci.c26
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c28
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c9
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c29
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c31
-rw-r--r--drivers/cpuidle/cpuidle.c74
-rw-r--r--drivers/cpuidle/cpuidle.h2
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/dt_idle_states.c3
-rw-r--r--drivers/cpuidle/governors/teo.c102
-rw-r--r--drivers/cpuidle/poll_state.c8
-rw-r--r--drivers/cpuidle/sysfs.c19
-rw-r--r--drivers/crypto/Kconfig31
-rw-r--r--drivers/crypto/Makefile5
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c13
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c11
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c12
-rw-r--r--drivers/crypto/aspeed/Kconfig11
-rw-r--r--drivers/crypto/aspeed/Makefile4
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c828
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c5
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h2
-rw-r--r--drivers/crypto/atmel-aes.c23
-rw-r--r--drivers/crypto/atmel-ecc.c7
-rw-r--r--drivers/crypto/atmel-i2c.c8
-rw-r--r--drivers/crypto/atmel-i2c.h6
-rw-r--r--drivers/crypto/atmel-sha.c44
-rw-r--r--drivers/crypto/atmel-sha204a.c5
-rw-r--r--drivers/crypto/atmel-tdes.c19
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c14
-rw-r--r--drivers/crypto/bcm/cipher.c102
-rw-r--r--drivers/crypto/bcm/cipher.h7
-rw-r--r--drivers/crypto/caam/blob_gen.c4
-rw-r--r--drivers/crypto/caam/caamalg.c51
-rw-r--r--drivers/crypto/caam/caamalg_qi.c42
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c56
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h10
-rw-r--r--drivers/crypto/caam/caamhash.c28
-rw-r--r--drivers/crypto/caam/caampkc.c37
-rw-r--r--drivers/crypto/caam/caamprng.c12
-rw-r--r--drivers/crypto/caam/caamrng.c17
-rw-r--r--drivers/crypto/caam/ctrl.c116
-rw-r--r--drivers/crypto/caam/debugfs.c12
-rw-r--r--drivers/crypto/caam/debugfs.h7
-rw-r--r--drivers/crypto/caam/desc_constr.h3
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c2
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c61
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/qi.c12
-rw-r--r--drivers/crypto/caam/qi.h12
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c12
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c21
-rw-r--r--drivers/crypto/ccp/platform-access.c215
-rw-r--r--drivers/crypto/ccp/platform-access.h35
-rw-r--r--drivers/crypto/ccp/psp-dev.c38
-rw-r--r--drivers/crypto/ccp/psp-dev.h11
-rw-r--r--drivers/crypto/ccp/sev-dev.c54
-rw-r--r--drivers/crypto/ccp/sev-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-dev.h10
-rw-r--r--drivers/crypto/ccp/sp-pci.c57
-rw-r--r--drivers/crypto/ccp/tee-dev.c17
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c6
-rw-r--r--drivers/crypto/hifn_795x.c28
-rw-r--r--drivers/crypto/hisilicon/Kconfig15
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c1
-rw-r--r--drivers/crypto/hisilicon/qm.c228
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c10
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c1
-rw-r--r--drivers/crypto/hisilicon/sgl.c10
-rw-r--r--drivers/crypto/hisilicon/trng/Makefile3
-rw-r--r--drivers/crypto/hisilicon/trng/trng-stb.c176
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c1
-rw-r--r--drivers/crypto/img-hash.c19
-rw-r--r--drivers/crypto/inside-secure/safexcel.c54
-rw-r--r--drivers/crypto/inside-secure/safexcel.h6
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c21
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c54
-rw-r--r--drivers/crypto/intel/Kconfig5
-rw-r--r--drivers/crypto/intel/Makefile5
-rw-r--r--drivers/crypto/intel/ixp4xx/Kconfig14
-rw-r--r--drivers/crypto/intel/ixp4xx/Makefile2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c (renamed from drivers/crypto/ixp4xx_crypto.c)19
-rw-r--r--drivers/crypto/intel/keembay/Kconfig (renamed from drivers/crypto/keembay/Kconfig)0
-rw-r--r--drivers/crypto/intel/keembay/Makefile (renamed from drivers/crypto/keembay/Makefile)0
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c (renamed from drivers/crypto/keembay/keembay-ocs-aes-core.c)2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c (renamed from drivers/crypto/keembay/keembay-ocs-ecc.c)0
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c (renamed from drivers/crypto/keembay/keembay-ocs-hcu-core.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c (renamed from drivers/crypto/keembay/ocs-aes.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.h (renamed from drivers/crypto/keembay/ocs-aes.h)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.c (renamed from drivers/crypto/keembay/ocs-hcu.c)0
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.h (renamed from drivers/crypto/keembay/ocs-hcu.h)0
-rw-r--r--drivers/crypto/intel/qat/Kconfig (renamed from drivers/crypto/qat/Kconfig)0
-rw-r--r--drivers/crypto/intel/qat/Makefile (renamed from drivers/crypto/qat/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile (renamed from drivers/crypto/qat/qat_4xxx/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c (renamed from drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c)62
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h (renamed from drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h)9
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c (renamed from drivers/crypto/qat/qat_4xxx/adf_drv.c)31
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile (renamed from drivers/crypto/qat/qat_c3xxx/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c (renamed from drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h (renamed from drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c (renamed from drivers/crypto/qat/qat_c3xxx/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile (renamed from drivers/crypto/qat/qat_c3xxxvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c (renamed from drivers/crypto/qat/qat_c3xxxvf/adf_drv.c)13
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile (renamed from drivers/crypto/qat/qat_c62x/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c (renamed from drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h (renamed from drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c (renamed from drivers/crypto/qat/qat_c62x/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile (renamed from drivers/crypto/qat/qat_c62xvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c (renamed from drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h (renamed from drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c (renamed from drivers/crypto/qat/qat_c62xvf/adf_drv.c)13
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile (renamed from drivers/crypto/qat/qat_common/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h (renamed from drivers/crypto/qat/qat_common/adf_accel_devices.h)5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c (renamed from drivers/crypto/qat/qat_common/adf_accel_engine.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c (renamed from drivers/crypto/qat/qat_common/adf_admin.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c (renamed from drivers/crypto/qat/qat_common/adf_aer.c)39
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c (renamed from drivers/crypto/qat/qat_common/adf_cfg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h (renamed from drivers/crypto/qat/qat_common/adf_cfg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_common.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_strings.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_user.h (renamed from drivers/crypto/qat/qat_common/adf_cfg_user.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h (renamed from drivers/crypto/qat/qat_common/adf_common_drv.h)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c (renamed from drivers/crypto/qat/qat_common/adf_ctl_drv.c)32
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c (renamed from drivers/crypto/qat/qat_common/adf_dev_mgr.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_config.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_config.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_dc.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_dc.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c (renamed from drivers/crypto/qat/qat_common/adf_gen2_pfvf.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h (renamed from drivers/crypto/qat/qat_common/adf_gen2_pfvf.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_dc.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_dc.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_pfvf.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_pfvf.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c (renamed from drivers/crypto/qat/qat_common/adf_gen4_pm.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h (renamed from drivers/crypto/qat/qat_common/adf_gen4_pm.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c (renamed from drivers/crypto/qat/qat_common/adf_hw_arbiter.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c (renamed from drivers/crypto/qat/qat_common/adf_init.c)96
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c (renamed from drivers/crypto/qat/qat_common/adf_isr.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_utils.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_utils.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h (renamed from drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c (renamed from drivers/crypto/qat/qat_common/adf_sriov.c)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c (renamed from drivers/crypto/qat/qat_common/adf_sysfs.c)23
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c (renamed from drivers/crypto/qat/qat_common/adf_transport.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.h (renamed from drivers/crypto/qat/qat_common/adf_transport.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h (renamed from drivers/crypto/qat/qat_common/adf_transport_access_macros.h)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c (renamed from drivers/crypto/qat/qat_common/adf_transport_debug.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_internal.h (renamed from drivers/crypto/qat/qat_common/adf_transport_internal.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c (renamed from drivers/crypto/qat/qat_common/adf_vf_isr.c)3
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_comp.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_la.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h (renamed from drivers/crypto/qat/qat_common/icp_qat_fw_pke.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hal.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hal.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h (renamed from drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h (renamed from drivers/crypto/qat/qat_common/icp_qat_uclo.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c (renamed from drivers/crypto/qat/qat_common/qat_algs.c)6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.c (renamed from drivers/crypto/qat/qat_common/qat_algs_send.c)3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.h (renamed from drivers/crypto/qat/qat_common/qat_algs_send.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c (renamed from drivers/crypto/qat/qat_common/qat_asym_algs.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c (renamed from drivers/crypto/qat/qat_common/qat_bl.c)115
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h (renamed from drivers/crypto/qat/qat_common/qat_bl.h)4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c (renamed from drivers/crypto/qat/qat_common/qat_comp_algs.c)169
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h (renamed from drivers/crypto/qat/qat_common/qat_comp_req.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c (renamed from drivers/crypto/qat/qat_common/qat_compression.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h (renamed from drivers/crypto/qat/qat_common/qat_compression.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c (renamed from drivers/crypto/qat/qat_common/qat_crypto.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.h (renamed from drivers/crypto/qat/qat_common/qat_crypto.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c (renamed from drivers/crypto/qat/qat_common/qat_hal.c)1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c (renamed from drivers/crypto/qat/qat_common/qat_uclo.c)1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile (renamed from drivers/crypto/qat/qat_dh895xcc/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c)2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h (renamed from drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_drv.c)24
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile (renamed from drivers/crypto/qat/qat_dh895xccvf/Makefile)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h)0
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c (renamed from drivers/crypto/qat/qat_dh895xccvf/adf_drv.c)13
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa/hash.c41
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile11
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c9
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c11
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c2
-rw-r--r--drivers/crypto/mxs-dcp.c29
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c13
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c6
-rw-r--r--drivers/crypto/qce/core.c27
-rw-r--r--drivers/crypto/qce/core.h1
-rw-r--r--drivers/crypto/s5p-sss.c8
-rw-r--r--drivers/crypto/sa2ul.c6
-rw-r--r--drivers/crypto/sahara.c8
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c37
-rw-r--r--drivers/crypto/stm32/stm32-hash.c559
-rw-r--r--drivers/crypto/talitos.c6
-rw-r--r--drivers/crypto/ux500/Kconfig22
-rw-r--r--drivers/crypto/ux500/Makefile7
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h398
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1966
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c3
-rw-r--r--drivers/cxl/Kconfig14
-rw-r--r--drivers/cxl/acpi.c6
-rw-r--r--drivers/cxl/core/Makefile3
-rw-r--r--drivers/cxl/core/core.h18
-rw-r--r--drivers/cxl/core/hdm.c220
-rw-r--r--drivers/cxl/core/mbox.c414
-rw-r--r--drivers/cxl/core/memdev.c234
-rw-r--r--drivers/cxl/core/pci.c462
-rw-r--r--drivers/cxl/core/pmem.c48
-rw-r--r--drivers/cxl/core/port.c172
-rw-r--r--drivers/cxl/core/region.c1047
-rw-r--r--drivers/cxl/core/trace.c99
-rw-r--r--drivers/cxl/core/trace.h709
-rw-r--r--drivers/cxl/cxl.h106
-rw-r--r--drivers/cxl/cxlmem.h297
-rw-r--r--drivers/cxl/cxlpci.h26
-rw-r--r--drivers/cxl/mem.c71
-rw-r--r--drivers/cxl/pci.c457
-rw-r--r--drivers/cxl/pmem.c25
-rw-r--r--drivers/cxl/port.c129
-rw-r--r--drivers/dax/Kconfig18
-rw-r--r--drivers/dax/Makefile2
-rw-r--r--drivers/dax/bus.c57
-rw-r--r--drivers/dax/bus.h12
-rw-r--r--drivers/dax/cxl.c53
-rw-r--r--drivers/dax/device.c5
-rw-r--r--drivers/dax/hmem/Makefile3
-rw-r--r--drivers/dax/hmem/device.c102
-rw-r--r--drivers/dax/hmem/hmem.c148
-rw-r--r--drivers/dax/kmem.c5
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dca/dca-core.c4
-rw-r--r--drivers/dca/dca-sysfs.c2
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/devfreq/devfreq-event.c2
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c3
-rw-r--r--drivers/devfreq/exynos-bus.c4
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c9
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.h4
-rw-r--r--drivers/dma-buf/dma-buf.c98
-rw-r--r--drivers/dma-buf/dma-fence-array.c11
-rw-r--r--drivers/dma-buf/dma-fence-chain.c12
-rw-r--r--drivers/dma-buf/dma-fence.c61
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma-buf/dma-resv.c22
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c1
-rw-r--r--drivers/dma-buf/heaps/system_heap.c6
-rw-r--r--drivers/dma-buf/udmabuf.c29
-rw-r--r--drivers/dma/Kconfig29
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/apple-admac.c20
-rw-r--r--drivers/dma/at_xdmac.c297
-rw-r--r--drivers/dma/bcm2835-dma.c4
-rw-r--r--drivers/dma/bestcomm/sram.c19
-rw-r--r--drivers/dma/dma-axi-dmac.c4
-rw-r--r--drivers/dma/dmaengine.c33
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c52
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h1
-rw-r--r--drivers/dma/dw-edma/Kconfig5
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c223
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h10
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c56
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c154
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h1
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c374
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h5
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsl-qdma.c10
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c336
-rw-r--r--drivers/dma/idxd/compat.c4
-rw-r--r--drivers/dma/idxd/debugfs.c138
-rw-r--r--drivers/dma/idxd/device.c151
-rw-r--r--drivers/dma/idxd/dma.c6
-rw-r--r--drivers/dma/idxd/idxd.h69
-rw-r--r--drivers/dma/idxd/init.c100
-rw-r--r--drivers/dma/idxd/irq.c212
-rw-r--r--drivers/dma/idxd/registers.h126
-rw-r--r--drivers/dma/idxd/sysfs.c150
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/imx-dma.c5
-rw-r--r--drivers/dma/imx-sdma.c47
-rw-r--r--drivers/dma/ioat/init.c12
-rw-r--r--drivers/dma/ioat/registers.h7
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1
-rw-r--r--drivers/dma/lgm/lgm-dma.c10
-rw-r--r--drivers/dma/mcf-edma.c5
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mmp_pdma.c4
-rw-r--r--drivers/dma/mmp_tdma.c11
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c43
-rw-r--r--drivers/dma/mxs-dma.c4
-rw-r--r--drivers/dma/nbpfaxi.c4
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c12
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c7
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/ptdma/ptdma.h2
-rw-r--r--drivers/dma/pxa_dma.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/qcom/gpi.c2
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c2
-rw-r--r--drivers/dma/s3c24xx-dma.c1428
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c7
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h1
-rw-r--r--drivers/dma/sh/rz-dmac.c18
-rw-r--r--drivers/dma/sh/shdma-base.c1
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/stm32-dmamux.c5
-rw-r--r--drivers/dma/stm32-mdma.c5
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/sun6i-dma.c7
-rw-r--r--drivers/dma/tegra186-gpc-dma.c1
-rw-r--r--drivers/dma/tegra20-apb-dma.c5
-rw-r--r--drivers/dma/tegra210-adma.c6
-rw-r--r--drivers/dma/ti/Makefile4
-rw-r--r--drivers/dma/ti/cppi41.c10
-rw-r--r--drivers/dma/ti/edma.c8
-rw-r--r--drivers/dma/ti/k3-psil-am62a.c196
-rw-r--r--drivers/dma/ti/k3-psil-j784s4.c354
-rw-r--r--drivers/dma/ti/k3-psil-priv.h2
-rw-r--r--drivers/dma/ti/k3-psil.c2
-rw-r--r--drivers/dma/ti/k3-udma.c122
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/dma/xilinx/Makefile1
-rw-r--r--drivers/dma/xilinx/xdma-regs.h166
-rw-r--r--drivers/dma/xilinx/xdma.c974
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c4
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c10
-rw-r--r--drivers/edac/Kconfig8
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c1217
-rw-r--r--drivers/edac/amd64_edac.h89
-rw-r--r--drivers/edac/amd8111_edac.c2
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c3
-rw-r--r--drivers/edac/edac_device.c30
-rw-r--r--drivers/edac/edac_device_sysfs.c16
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci_sysfs.c14
-rw-r--r--drivers/edac/highbank_mc_edac.c7
-rw-r--r--drivers/edac/i10nm_base.c460
-rw-r--r--drivers/edac/i5000_edac.c7
-rw-r--r--drivers/edac/i5100_edac.c5
-rw-r--r--drivers/edac/i82860_edac.c3
-rw-r--r--drivers/edac/layerscape_edac.c3
-rw-r--r--drivers/edac/mpc85xx_edac.c3
-rw-r--r--drivers/edac/qcom_edac.c76
-rw-r--r--drivers/edac/r82600_edac.c3
-rw-r--r--drivers/edac/skx_base.c4
-rw-r--r--drivers/edac/skx_common.c78
-rw-r--r--drivers/edac/skx_common.h61
-rw-r--r--drivers/edac/zynqmp_edac.c467
-rw-r--r--drivers/eisa/eisa-bus.c4
-rw-r--r--drivers/eisa/pci_eisa.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c1
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firewire/core-cdev.c43
-rw-r--r--drivers/firewire/core-device.c8
-rw-r--r--drivers/firewire/core-transaction.c53
-rw-r--r--drivers/firewire/core.h9
-rw-r--r--drivers/firewire/init_ohci1394_dma.c4
-rw-r--r--drivers/firewire/net.c21
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/arm_ffa/bus.c4
-rw-r--r--drivers/firmware/arm_scmi/Kconfig32
-rw-r--r--drivers/firmware/arm_scmi/Makefile9
-rw-r--r--drivers/firmware/arm_scmi/bus.c397
-rw-r--r--drivers/firmware/arm_scmi/common.h100
-rw-r--r--drivers/firmware/arm_scmi/driver.c1231
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c126
-rw-r--r--drivers/firmware/arm_scmi/optee.c8
-rw-r--r--drivers/firmware/arm_scmi/protocols.h7
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c1443
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.h31
-rw-r--r--drivers/firmware/arm_scmi/shmem.c9
-rw-r--r--drivers/firmware/arm_scmi/smc.c6
-rw-r--r--drivers/firmware/arm_scmi/virtio.c11
-rw-r--r--drivers/firmware/arm_sdei.c37
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c1
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c48
-rw-r--r--drivers/firmware/dmi-sysfs.c18
-rw-r--r--drivers/firmware/edd.c2
-rw-r--r--drivers/firmware/efi/cper-arm.c1
-rw-r--r--drivers/firmware/efi/cper_cxl.c12
-rw-r--r--drivers/firmware/efi/earlycon.c57
-rw-r--r--drivers/firmware/efi/efi-init.c5
-rw-r--r--drivers/firmware/efi/efi.c85
-rw-r--r--drivers/firmware/efi/esrt.c15
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot43
-rw-r--r--drivers/firmware/efi/libstub/arm64-entry.S67
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c31
-rw-r--r--drivers/firmware/efi/libstub/arm64.c90
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-entry.c11
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c67
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/efistub.h69
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c24
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c1
-rw-r--r--drivers/firmware/efi/libstub/screen_info.c9
-rw-r--r--drivers/firmware/efi/libstub/smbios.c15
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S53
-rw-r--r--drivers/firmware/efi/libstub/zboot.c16
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds7
-rw-r--r--drivers/firmware/efi/memattr.c9
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c3
-rw-r--r--drivers/firmware/efi/sysfb_efi.c21
-rw-r--r--drivers/firmware/efi/vars.c40
-rw-r--r--drivers/firmware/google/Kconfig8
-rw-r--r--drivers/firmware/google/coreboot_table.c9
-rw-r--r--drivers/firmware/google/coreboot_table.h1
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/gsmi.c9
-rw-r--r--drivers/firmware/imx/imx-scu.c5
-rw-r--r--drivers/firmware/imx/scu-pd.c4
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/psci/psci.c48
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm-smc.c88
-rw-r--r--drivers/firmware/qcom_scm.c108
-rw-r--r--drivers/firmware/qcom_scm.h8
-rw-r--r--drivers/firmware/smccc/smccc.c26
-rw-r--r--drivers/firmware/smccc/soc_id.c28
-rw-r--r--drivers/firmware/stratix10-svc.c29
-rw-r--r--drivers/firmware/sysfb.c4
-rw-r--r--drivers/firmware/sysfb_simplefb.c47
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c12
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/turris-mox-rwtm.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c62
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-afu-region.c1
-rw-r--r--drivers/fpga/dfl-afu.h2
-rw-r--r--drivers/fpga/dfl-fme-perf.c2
-rw-r--r--drivers/fpga/dfl-fme-pr.c4
-rw-r--r--drivers/fpga/dfl-fme-pr.h2
-rw-r--r--drivers/fpga/dfl-pci.c20
-rw-r--r--drivers/fpga/dfl.c253
-rw-r--r--drivers/fpga/dfl.h43
-rw-r--r--drivers/fpga/fpga-bridge.c18
-rw-r--r--drivers/fpga/fpga-mgr.c2
-rw-r--r--drivers/fpga/fpga-region.c2
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c432
-rw-r--r--drivers/fpga/lattice-sysconfig-spi.c1
-rw-r--r--drivers/fpga/microchip-spi.c145
-rw-r--r--drivers/fpga/stratix10-soc.c4
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fpga/zynqmp-fpga.c21
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/gnss/core.c2
-rw-r--r--drivers/gpio/Kconfig123
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/TODO15
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c394
-rw-r--r--drivers/gpio/gpio-104-idi-48.c337
-rw-r--r--drivers/gpio/gpio-adnp.c9
-rw-r--r--drivers/gpio/gpio-aggregator.c9
-rw-r--r--drivers/gpio/gpio-altera.c29
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c45
-rw-r--r--drivers/gpio/gpio-aspeed.c82
-rw-r--r--drivers/gpio/gpio-ath79.c8
-rw-r--r--drivers/gpio/gpio-cadence.c10
-rw-r--r--drivers/gpio/gpio-davinci.c10
-rw-r--r--drivers/gpio/gpio-eic-sprd.c23
-rw-r--r--drivers/gpio/gpio-elkhartlake.c90
-rw-r--r--drivers/gpio/gpio-ep93xx.c38
-rw-r--r--drivers/gpio/gpio-ftgpio010.c2
-rw-r--r--drivers/gpio/gpio-fxl6408.c158
-rw-r--r--drivers/gpio/gpio-ge.c1
-rw-r--r--drivers/gpio/gpio-gpio-mm.c154
-rw-r--r--drivers/gpio/gpio-hisi.c25
-rw-r--r--drivers/gpio/gpio-hlwd.c33
-rw-r--r--drivers/gpio/gpio-i8255.c320
-rw-r--r--drivers/gpio/gpio-i8255.h54
-rw-r--r--drivers/gpio/gpio-ich.c10
-rw-r--r--drivers/gpio/gpio-idt3243x.c11
-rw-r--r--drivers/gpio/gpio-imx-scu.c1
-rw-r--r--drivers/gpio/gpio-iop.c59
-rw-r--r--drivers/gpio/gpio-ljca.c454
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c238
-rw-r--r--drivers/gpio/gpio-loongson1.c71
-rw-r--r--drivers/gpio/gpio-max732x.c8
-rw-r--r--drivers/gpio/gpio-merrifield.c453
-rw-r--r--drivers/gpio/gpio-mlxbf2.c32
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mpc5200.c2
-rw-r--r--drivers/gpio/gpio-msc313.c32
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpio/gpio-mxc.c16
-rw-r--r--drivers/gpio/gpio-mxs.c1
-rw-r--r--drivers/gpio/gpio-omap.c85
-rw-r--r--drivers/gpio/gpio-pca953x.c37
-rw-r--r--drivers/gpio/gpio-pca9570.c24
-rw-r--r--drivers/gpio/gpio-pcf857x.c118
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c12
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c21
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c29
-rw-r--r--drivers/gpio/gpio-pxa.c5
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c2
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-rda.c23
-rw-r--r--drivers/gpio/gpio-reg.c12
-rw-r--r--drivers/gpio/gpio-regmap.c29
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c2
-rw-r--r--drivers/gpio/gpio-sifive.c3
-rw-r--r--drivers/gpio/gpio-sim.c13
-rw-r--r--drivers/gpio/gpio-siox.c75
-rw-r--r--drivers/gpio/gpio-sprd.c9
-rw-r--r--drivers/gpio/gpio-stmpe.c8
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-tangier.c536
-rw-r--r--drivers/gpio/gpio-tangier.h117
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c42
-rw-r--r--drivers/gpio/gpio-thunderx.c26
-rw-r--r--drivers/gpio/gpio-tqmx86.c28
-rw-r--r--drivers/gpio/gpio-ucb1400.c85
-rw-r--r--drivers/gpio/gpio-vf610.c43
-rw-r--r--drivers/gpio/gpio-visconti.c52
-rw-r--r--drivers/gpio/gpio-wcd934x.c1
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c32
-rw-r--r--drivers/gpio/gpio-xilinx.c34
-rw-r--r--drivers/gpio/gpio-xlp.c14
-rw-r--r--drivers/gpio/gpio-xra1403.c2
-rw-r--r--drivers/gpio/gpio-zevio.c9
-rw-r--r--drivers/gpio/gpiolib-acpi.c81
-rw-r--r--drivers/gpio/gpiolib-acpi.h6
-rw-r--r--drivers/gpio/gpiolib-cdev.c21
-rw-r--r--drivers/gpio/gpiolib-devres.c55
-rw-r--r--drivers/gpio/gpiolib-of.c149
-rw-r--r--drivers/gpio/gpiolib-of.h6
-rw-r--r--drivers/gpio/gpiolib-swnode.c5
-rw-r--r--drivers/gpio/gpiolib-sysfs.c39
-rw-r--r--drivers/gpio/gpiolib.c244
-rw-r--r--drivers/gpio/gpiolib.h10
-rw-r--r--drivers/gpu/drm/Kconfig76
-rw-r--r--drivers/gpu/drm/Makefile13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c327
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c351
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c324
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c254
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c196
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c329
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c225
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c430
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c471
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c477
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c369
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c224
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c1967
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c303
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c268
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c327
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h487
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c100
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c12
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig15
-rw-r--r--drivers/gpu/drm/amd/display/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1238
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h88
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c165
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c238
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c156
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c316
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c180
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c171
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c535
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4961
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c7553
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c480
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h807
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h348
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h584
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c404
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c237
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c305
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c352
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h267
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h319
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c1011
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h)15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c)23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c)46
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c1427
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c2541
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c836
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c)434
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h92
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c2259
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h (renamed from drivers/gpu/drm/tdfx/tdfx_drv.h)44
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c492
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c391
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c208
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1716
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h185
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c259
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c416
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c)317
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c955
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c240
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h19
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h188
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_msg_types.h (renamed from drivers/gpu/drm/amd/display/include/hdcp_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h82
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h33
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c140
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c78
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h36
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c55
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c46
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_offset.h411
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_sh_mask.h1807
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h157
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h50
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h7258
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h30535
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h663
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h3314
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h22315
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h456
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h674
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h10002
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h38900
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h11
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_offset.h263
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_sh_mask.h995
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h1109
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h3276
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h87
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h9
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h16
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c71
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c137
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c87
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c157
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h212
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c114
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c166
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c2069
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c93
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c32
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c9
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c7
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c1
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c10
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c40
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c19
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h84
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c8
-rw-r--r--drivers/gpu/drm/ast/ast_main.c34
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c111
-rw-r--r--drivers/gpu/drm/ast/ast_post.c94
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c13
-rw-r--r--drivers/gpu/drm/bridge/Kconfig25
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c25
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig21
-rw-r--r--drivers/gpu/drm/bridge/cadence/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c (renamed from drivers/gpu/drm/bridge/cdns-dsi.c)89
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h84
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c51
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h16
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c1
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c5
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c15
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c185
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c6
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c167
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c321
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c31
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c346
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c5
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c6
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c83
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c85
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c1967
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c38
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c10
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c5
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c16
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c5
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c16
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c9
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c11
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c7
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_dev.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c71
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c6
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c46
-rw-r--r--drivers/gpu/drm/drm_atomic.c84
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c63
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c124
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_blend.c13
-rw-r--r--drivers/gpu/drm/drm_bridge.c294
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c27
-rw-r--r--drivers/gpu/drm/drm_buddy.c85
-rw-r--r--drivers/gpu/drm/drm_bufs.c12
-rw-r--r--drivers/gpu/drm/drm_client.c54
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c192
-rw-r--r--drivers/gpu/drm/drm_context.c36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c114
-rw-r--r--drivers/gpu/drm/drm_displayid.c62
-rw-r--r--drivers/gpu/drm/drm_drv.c33
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c593
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c458
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c268
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c358
-rw-r--r--drivers/gpu/drm/drm_file.c20
-rw-r--r--drivers/gpu/drm/drm_format_helper.c496
-rw-r--r--drivers/gpu/drm/drm_fourcc.c4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c66
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c147
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c23
-rw-r--r--drivers/gpu/drm/drm_internal.h8
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c25
-rw-r--r--drivers/gpu/drm/drm_lease.c66
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c158
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c62
-rw-r--r--drivers/gpu/drm/drm_mode_config.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c554
-rw-r--r--drivers/gpu/drm/drm_of.c51
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c58
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c165
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c2
-rw-r--r--drivers/gpu/drm/drm_suballoc.c457
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/drm_vblank.c59
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c76
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c66
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c18
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h86
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1797
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c173
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c12
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c344
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c344
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h19
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c11
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c10
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c18
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c223
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c14
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1266
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c101
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h246
-rw-r--r--drivers/gpu/drm/i915/Kconfig40
-rw-r--r--drivers/gpu/drm/i915/Kconfig.unstable21
-rw-r--r--drivers/gpu/drm/i915/Makefile38
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c13
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c55
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c94
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c (renamed from drivers/gpu/drm/i915/intel_pm.c)1465
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h21
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c85
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c392
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c664
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c381
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c259
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1378
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c334
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1076
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h137
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h70
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c673
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c141
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h85
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c530
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c471
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_regs.h84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c384
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c169
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c348
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h67
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c407
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_regs.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c158
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi_regs.h151
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c173
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c831
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c370
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_mg_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c376
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h78
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c654
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h260
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c187
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c186
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c1446
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv_regs.h490
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h489
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c408
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm_types.h (renamed from drivers/gpu/drm/i915/intel_pm_types.h)8
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c66
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c19
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c353
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h9
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c186
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c75
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c118
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h305
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c65
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c20
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c42
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c35
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c15
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h2
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c244
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c234
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c293
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c150
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h54
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c42
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c70
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c584
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c22
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c388
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c20
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c7
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c72
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c209
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c155
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h49
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c109
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c47
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c93
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c159
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_print.h51
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c252
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c55
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c141
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c430
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_config.c5
-rw-r--r--drivers/gpu/drm/i915/i915_config.h23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c33
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c259
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h156
-rw-r--r--drivers/gpu/drm/i915/i915_file_private.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c40
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c120
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c583
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c97
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c69
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c621
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h4
-rw-r--r--drivers/gpu/drm/i915/i915_perf_oa_regs.h78
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h75
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2263
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h31
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c15
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c9
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c87
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h52
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h17
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c888
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.h14
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c35
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c82
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h34
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c18
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c123
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h13
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h23
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c193
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h11
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h7
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c13
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c10
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c20
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c132
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c362
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c142
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c (renamed from drivers/gpu/drm/i915/intel_dram.c)152
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h (renamed from drivers/gpu/drm/i915/intel_dram.h)0
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c171
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.h18
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c (renamed from drivers/gpu/drm/i915/intel_pch.c)0
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h (renamed from drivers/gpu/drm/i915/intel_pch.h)0
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c4
-rw-r--r--drivers/gpu/drm/imx/Kconfig42
-rw-r--r--drivers/gpu/drm/imx/Makefile11
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c23
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h7
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c15
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig41
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Makefile11
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c (renamed from drivers/gpu/drm/imx/dw_hdmi-imx.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c (renamed from drivers/gpu/drm/imx/imx-drm-core.c)4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h (renamed from drivers/gpu/drm/imx/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c (renamed from drivers/gpu/drm/imx/imx-ldb.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c (renamed from drivers/gpu/drm/imx/imx-tve.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c (renamed from drivers/gpu/drm/imx/ipuv3-crtc.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c (renamed from drivers/gpu/drm/imx/ipuv3-plane.c)14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h (renamed from drivers/gpu/drm/imx/ipuv3-plane.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c (renamed from drivers/gpu/drm/imx/parallel-display.c)0
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig7
-rw-r--r--drivers/gpu/drm/imx/lcdc/Makefile1
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c546
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c6
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c12
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c15
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c6
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c95
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c547
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c39
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c91
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c135
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h78
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c480
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c370
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c24
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c17
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c23
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c5
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c2
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c1168
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c104
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h685
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c197
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c169
-rw-r--r--drivers/gpu/drm/mga/mga_state.c1099
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c167
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c22
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h54
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h44
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h809
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c81
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c194
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c63
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h66
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h52
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c184
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h210
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h210
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h237
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h217
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h244
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h156
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h129
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h226
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h158
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h222
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h234
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h239
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c350
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c111
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h47
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c1445
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c198
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c189
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c110
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c868
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h40
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h40
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c92
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c82
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c90
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c201
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c176
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c36
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c29
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c50
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h27
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h41
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c161
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c83
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h36
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h40
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c243
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c18
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h62
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h40
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c29
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c109
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h27
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c171
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c88
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c152
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h29
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c57
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c91
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h39
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c150
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c1
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c38
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c188
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c2
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0012.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/outp.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c613
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c54
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c163
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/panel/Kconfig69
-rw-r--r--drivers/gpu/drm/panel/Makefile7
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c96
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c308
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c42
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c142
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c13
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c46
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c451
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c6
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c209
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c58
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c106
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c522
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c24
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c24
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c777
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c364
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c2
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c6
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c44
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c33
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c12
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c19
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c144
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c341
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c329
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c398
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c350
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c112
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c82
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c15
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c228
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.h31
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c944
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c116
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h544
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c199
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c118
-rw-r--r--drivers/gpu/drm/r128/r128_state.c1641
-rw-r--r--drivers/gpu/drm/radeon/Kconfig3
-rw-r--r--drivers/gpu/drm/radeon/Makefile3
-rw-r--r--drivers/gpu/drm/radeon/atombios.h10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/r300.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c402
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c422
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c316
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c11
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c73
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c82
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h34
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c52
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c242
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c497
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c42
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c86
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c18
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c1082
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c91
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h580
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1169
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c13
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c46
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c51
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c143
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h80
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c363
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c33
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c5
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c1
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c1
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c141
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c2
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c90
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c22
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.c25
-rw-r--r--drivers/gpu/drm/tegra/drm.h27
-rw-r--r--drivers/gpu/drm/tegra/dsi.c51
-rw-r--r--drivers/gpu/drm/tegra/fb.c242
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c241
-rw-r--r--drivers/gpu/drm/tegra/firewall.c3
-rw-r--r--drivers/gpu/drm/tegra/gem.c6
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c14
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c14
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c14
-rw-r--r--drivers/gpu/drm/tegra/hub.c13
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c30
-rw-r--r--drivers/gpu/drm/tegra/output.c3
-rw-r--r--drivers/gpu/drm/tegra/plane.c16
-rw-r--r--drivers/gpu/drm/tegra/rgb.c7
-rw-r--r--drivers/gpu/drm/tegra/sor.c59
-rw-r--r--drivers/gpu/drm/tegra/submit.c19
-rw-r--r--drivers/gpu/drm/tegra/vic.c53
-rw-r--r--drivers/gpu/drm/tests/Makefile10
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c3
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c110
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c68
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c76
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c490
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c99
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.h11
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c6
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c218
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c18
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h8
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c5
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c21
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c13
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c501
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c15
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c6
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c36
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c20
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c5
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c48
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c10
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c171
-rw-r--r--drivers/gpu/drm/tiny/st7586.c39
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c263
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c152
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c106
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c88
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig16
-rw-r--r--drivers/gpu/drm/vc4/Makefile7
-rw-r--r--drivers/gpu/drm/vc4/tests/.kunitconfig13
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c200
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.h63
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c41
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c138
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_plane.c47
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c1039
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c217
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h150
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c189
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c118
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c272
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c139
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c145
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c62
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c365
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h11
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c1
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h1771
-rw-r--r--drivers/gpu/drm/via/via_dri1.c3630
-rw-r--r--drivers/gpu/drm/virtio/Kconfig11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c27
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c451
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c55
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c65
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c56
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h269
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c281
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c97
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c297
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c35
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h0
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c68
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c279
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c323
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c115
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c4
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/bus.c8
-rw-r--r--drivers/gpu/host1x/cdma.c16
-rw-r--r--drivers/gpu/host1x/cdma.h2
-rw-r--r--drivers/gpu/host1x/context.c32
-rw-r--r--drivers/gpu/host1x/debug.c7
-rw-r--r--drivers/gpu/host1x/dev.c9
-rw-r--r--drivers/gpu/host1x/dev.h10
-rw-r--r--drivers/gpu/host1x/fence.c118
-rw-r--r--drivers/gpu/host1x/fence.h19
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c60
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x08_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c74
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c3
-rw-r--r--drivers/gpu/host1x/intr.c334
-rw-r--r--drivers/gpu/host1x/intr.h83
-rw-r--r--drivers/gpu/host1x/job.c12
-rw-r--r--drivers/gpu/host1x/mipi.c4
-rw-r--r--drivers/gpu/host1x/syncpt.c104
-rw-r--r--drivers/gpu/host1x/syncpt.h3
-rw-r--r--drivers/gpu/ipu-v3/Kconfig2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c1
-rw-r--r--drivers/greybus/core.c14
-rw-r--r--drivers/hid/.kunitconfig1
-rw-r--r--drivers/hid/Kconfig41
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c16
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c2
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c13
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c10
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h8
-rw-r--r--drivers/hid/bpf/Kconfig16
-rw-r--r--drivers/hid/bpf/Makefile11
-rw-r--r--drivers/hid/bpf/entrypoints/Makefile93
-rw-r--r--drivers/hid/bpf/entrypoints/README4
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.bpf.c25
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.lskel.h248
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c548
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h25
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c565
-rw-r--r--drivers/hid/hid-apple.c20
-rw-r--r--drivers/hid/hid-asus.c38
-rw-r--r--drivers/hid/hid-betopff.c17
-rw-r--r--drivers/hid/hid-bigbenff.c78
-rw-r--r--drivers/hid/hid-core.c89
-rw-r--r--drivers/hid/hid-cp2112.c1
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-elecom.c16
-rw-r--r--drivers/hid/hid-evision.c53
-rw-r--r--drivers/hid/hid-hyperv.c6
-rw-r--r--drivers/hid/hid-ids.h28
-rw-r--r--drivers/hid/hid-input-test.c80
-rw-r--r--drivers/hid/hid-input.c70
-rw-r--r--drivers/hid/hid-kye.c924
-rw-r--r--drivers/hid/hid-letsketch.c2
-rw-r--r--drivers/hid/hid-lg-g15.c1
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c413
-rw-r--r--drivers/hid/hid-mcp2221.c9
-rw-r--r--drivers/hid/hid-multitouch.c39
-rw-r--r--drivers/hid/hid-nintendo.c95
-rw-r--r--drivers/hid/hid-playstation.c104
-rw-r--r--drivers/hid/hid-quirks.c20
-rw-r--r--drivers/hid/hid-roccat-arvo.c2
-rw-r--r--drivers/hid/hid-roccat-isku.c2
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c2
-rw-r--r--drivers/hid/hid-roccat-konepure.c2
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hid-roccat-pyra.c2
-rw-r--r--drivers/hid/hid-roccat-ryos.c2
-rw-r--r--drivers/hid/hid-roccat-savu.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c240
-rw-r--r--drivers/hid/hid-sensor-hub.c6
-rw-r--r--drivers/hid/hid-sony.c1021
-rw-r--r--drivers/hid/hid-steam.c385
-rw-r--r--drivers/hid/hid-steelseries.c1
-rw-r--r--drivers/hid/hid-topre.c2
-rw-r--r--drivers/hid/hid-uclogic-core-test.c105
-rw-r--r--drivers/hid/hid-uclogic-core.c63
-rw-r--r--drivers/hid/hid-uclogic-params-test.c16
-rw-r--r--drivers/hid/hid-uclogic-params.c126
-rw-r--r--drivers/hid/hid-uclogic-params.h40
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c3
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c6
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h5
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig35
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c26
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c24
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c42
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c98
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c38
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h3
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c10
-rw-r--r--drivers/hid/surface-hid/surface_hid.c8
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c2
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c8
-rw-r--r--drivers/hid/uhid.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hid/wacom_sys.c10
-rw-r--r--drivers/hid/wacom_wac.c84
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hsi/hsi_core.c4
-rw-r--r--drivers/hte/hte-tegra194-test.c12
-rw-r--r--drivers/hte/hte-tegra194.c169
-rw-r--r--drivers/hte/hte.c4
-rw-r--r--drivers/hv/Kconfig30
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/connection.c117
-rw-r--r--drivers/hv/hv.c97
-rw-r--r--drivers/hv/hv_balloon.c6
-rw-r--r--drivers/hv/hv_common.c251
-rw-r--r--drivers/hv/hv_util.c4
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/ring_buffer.c62
-rw-r--r--drivers/hv/vmbus_drv.c330
-rw-r--r--drivers/hwmon/Kconfig49
-rw-r--r--drivers/hwmon/Makefile6
-rw-r--r--drivers/hwmon/adm1177.c2
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/adt7475.c14
-rw-r--r--drivers/hwmon/adt7x10.c2
-rw-r--r--drivers/hwmon/aht10.c5
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c739
-rw-r--r--drivers/hwmon/as370-hwmon.c2
-rw-r--r--drivers/hwmon/asus-ec-sensors.c23
-rw-r--r--drivers/hwmon/axi-fan-control.c2
-rw-r--r--drivers/hwmon/bt1-pvt.c4
-rw-r--r--drivers/hwmon/coretemp.c140
-rw-r--r--drivers/hwmon/corsair-cpro.c2
-rw-r--r--drivers/hwmon/corsair-psu.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c2
-rw-r--r--drivers/hwmon/drivetemp.c17
-rw-r--r--drivers/hwmon/emc2305.c26
-rw-r--r--drivers/hwmon/ftsteutates.c555
-rw-r--r--drivers/hwmon/g762.c7
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/gxp-fan-ctrl.c253
-rw-r--r--drivers/hwmon/hih6130.c4
-rw-r--r--drivers/hwmon/hwmon.c16
-rw-r--r--drivers/hwmon/i5500_temp.c2
-rw-r--r--drivers/hwmon/ibmpex.c2
-rw-r--r--drivers/hwmon/ibmpowernv.c4
-rw-r--r--drivers/hwmon/iio_hwmon.c8
-rw-r--r--drivers/hwmon/ina238.c2
-rw-r--r--drivers/hwmon/ina2xx.c4
-rw-r--r--drivers/hwmon/ina3221.c4
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c237
-rw-r--r--drivers/hwmon/it87.c495
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/lan966x-hwmon.c2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm83.c2
-rw-r--r--drivers/hwmon/lm95241.c2
-rw-r--r--drivers/hwmon/lm95245.c2
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c3
-rw-r--r--drivers/hwmon/ltc2945.c132
-rw-r--r--drivers/hwmon/ltc2947-core.c2
-rw-r--r--drivers/hwmon/ltc2992.c3
-rw-r--r--drivers/hwmon/ltc4245.c4
-rw-r--r--drivers/hwmon/ltq-cputemp.c2
-rw-r--r--drivers/hwmon/max127.c2
-rw-r--r--drivers/hwmon/max31730.c2
-rw-r--r--drivers/hwmon/max31760.c2
-rw-r--r--drivers/hwmon/max31790.c2
-rw-r--r--drivers/hwmon/max6620.c2
-rw-r--r--drivers/hwmon/max6621.c2
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/mc34vr500.c263
-rw-r--r--drivers/hwmon/mcp3021.c2
-rw-r--r--drivers/hwmon/mlxreg-fan.c8
-rw-r--r--drivers/hwmon/nct6775-core.c2
-rw-r--r--drivers/hwmon/nct6775-platform.c439
-rw-r--r--drivers/hwmon/nct7904.c2
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/nzxt-kraken2.c2
-rw-r--r--drivers/hwmon/nzxt-smart2.c14
-rw-r--r--drivers/hwmon/oxp-sensors.c54
-rw-r--r--drivers/hwmon/peci/cputemp.c12
-rw-r--r--drivers/hwmon/peci/dimmtemp.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig45
-rw-r--r--drivers/hwmon/pmbus/Makefile3
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c85
-rw-r--r--drivers/hwmon/pmbus/adm1266.c1
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c272
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c16
-rw-r--r--drivers/hwmon/pmbus/max16601.c14
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c156
-rw-r--r--drivers/hwmon/pmbus/pmbus.h9
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c395
-rw-r--r--drivers/hwmon/pmbus/tda38640.c74
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c75
-rw-r--r--drivers/hwmon/powr1220.c2
-rw-r--r--drivers/hwmon/pwm-fan.c10
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c2
-rw-r--r--drivers/hwmon/s3c-hwmon.c379
-rw-r--r--drivers/hwmon/sbrmi.c2
-rw-r--r--drivers/hwmon/sbtsi_temp.c2
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/hwmon/scmi-hwmon.c4
-rw-r--r--drivers/hwmon/scpi-hwmon.c2
-rw-r--r--drivers/hwmon/sfctemp.c331
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/hwmon/sht21.c4
-rw-r--r--drivers/hwmon/sht4x.c2
-rw-r--r--drivers/hwmon/sl28cpld-hwmon.c2
-rw-r--r--drivers/hwmon/smpro-hwmon.c2
-rw-r--r--drivers/hwmon/sparx5-temp.c2
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c2
-rw-r--r--drivers/hwmon/tmp464.c2
-rw-r--r--drivers/hwmon/tmp513.c4
-rw-r--r--drivers/hwmon/tps23861.c2
-rw-r--r--drivers/hwmon/vt1211.c6
-rw-r--r--drivers/hwmon/w83627ehf.c2
-rw-r--r--drivers/hwmon/w83773g.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c15
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c3
-rw-r--r--drivers/hwtracing/coresight/Kconfig35
-rw-r--r--drivers/hwtracing/coresight/Makefile5
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c87
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c115
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h23
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c211
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.h35
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c259
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h62
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c297
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h156
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c648
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h125
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h4
-rw-r--r--drivers/hwtracing/intel_th/msu.c2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c10
-rw-r--r--drivers/hwtracing/stm/Kconfig1
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c77
-rw-r--r--drivers/i2c/busses/Kconfig37
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c4
-rw-r--r--drivers/i2c/busses/i2c-au1550.c4
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c4
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c138
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c46
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c22
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h6
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c33
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c21
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c4
-rw-r--r--drivers/i2c/busses/i2c-gpio.c47
-rw-r--r--drivers/i2c/busses/i2c-gxp.c609
-rw-r--r--drivers/i2c/busses/i2c-hisi.c13
-rw-r--r--drivers/i2c/busses/i2c-i801.c310
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c10
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c370
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c60
-rw-r--r--drivers/i2c/busses/i2c-mpc.c37
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c22
-rw-r--r--drivers/i2c/busses/i2c-mxs.c22
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c35
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-owl.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c44
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c72
-rw-r--r--drivers/i2c/busses/i2c-st.c9
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c40
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c3
-rw-r--r--drivers/i2c/busses/i2c-xiic.c593
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/i2c/i2c-core-base.c120
-rw-r--r--drivers/i2c/i2c-core-of.c75
-rw-r--r--drivers/i2c/i2c-dev.c42
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c2
-rw-r--r--drivers/i2c/i2c-slave-testunit.c2
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i3c/device.c14
-rw-r--r--drivers/i3c/master.c41
-rw-r--r--drivers/i3c/master/Kconfig14
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c189
-rw-r--r--drivers/i3c/master/dw-i3c-master.c440
-rw-r--r--drivers/i3c/master/dw-i3c-master.h84
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c6
-rw-r--r--drivers/i3c/master/svc-i3c-master.c11
-rw-r--r--drivers/idle/intel_idle.c68
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/bma400.h4
-rw-r--r--drivers/iio/accel/bma400_core.c31
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/accel/kionix-kx022a.c5
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/mma9551_core.c10
-rw-r--r--drivers/iio/accel/msa311.c2
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_core.c2
-rw-r--r--drivers/iio/accel/st_accel_i2c.c10
-rw-r--r--drivers/iio/accel/st_accel_spi.c10
-rw-r--r--drivers/iio/adc/Kconfig50
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/ad7292.c1
-rw-r--r--drivers/iio/adc/ad7606.c2
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c14
-rw-r--r--drivers/iio/adc/axp20x_adc.c77
-rw-r--r--drivers/iio/adc/berlin2-adc.c4
-rw-r--r--drivers/iio/adc/ep93xx_adc.c8
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c11
-rw-r--r--drivers/iio/adc/imx93_adc.c484
-rw-r--r--drivers/iio/adc/ltc2497.c6
-rw-r--r--drivers/iio/adc/max11410.c27
-rw-r--r--drivers/iio/adc/meson_saradc.c21
-rw-r--r--drivers/iio/adc/palmas_gpadc.c615
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c18
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c2
-rw-r--r--drivers/iio/adc/stm32-adc.c6
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c99
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h60
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c2
-rw-r--r--drivers/iio/adc/ti-adc128s052.c54
-rw-r--r--drivers/iio/adc/ti-ads1100.c445
-rw-r--r--drivers/iio/adc/ti-ads7924.c474
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/adc/ti-lmp92064.c332
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c32
-rw-r--r--drivers/iio/adc/xilinx-ams.c11
-rw-r--r--drivers/iio/addac/Kconfig2
-rw-r--r--drivers/iio/addac/ad74413r.c44
-rw-r--r--drivers/iio/addac/stx104.c462
-rw-r--r--drivers/iio/cdc/ad7746.c3
-rw-r--r--drivers/iio/chemical/scd30_core.c46
-rw-r--r--drivers/iio/chemical/sps30_i2c.c6
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c4
-rw-r--r--drivers/iio/dac/Kconfig22
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5592r-base.c5
-rw-r--r--drivers/iio/dac/ad5686.c7
-rw-r--r--drivers/iio/dac/ad5686.h1
-rw-r--r--drivers/iio/dac/ad5696-i2c.c2
-rw-r--r--drivers/iio/dac/ad5755.c1
-rw-r--r--drivers/iio/dac/cio-dac.c72
-rw-r--r--drivers/iio/dac/max5522.c207
-rw-r--r--drivers/iio/frequency/admv1013.c21
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/humidity/hts221_buffer.c2
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/imu/adis16400.c2
-rw-r--r--drivers/iio/imu/adis16475.c6
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.c2
-rw-r--r--drivers/iio/imu/fxos8700_core.c111
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h20
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c59
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c99
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c21
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c21
-rw-r--r--drivers/iio/industrialio-core.c64
-rw-r--r--drivers/iio/industrialio-gts-helper.c1077
-rw-r--r--drivers/iio/industrialio-trigger.c17
-rw-r--r--drivers/iio/light/Kconfig14
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/acpi-als.c2
-rw-r--r--drivers/iio/light/cm32181.c21
-rw-r--r--drivers/iio/light/hid-sensor-als.c27
-rw-r--r--drivers/iio/light/hid-sensor-prox.c37
-rw-r--r--drivers/iio/light/max44009.c18
-rw-r--r--drivers/iio/light/rohm-bu27034.c1497
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/st_uvis25_core.c2
-rw-r--r--drivers/iio/light/tsl2563.c189
-rw-r--r--drivers/iio/light/tsl2772.c1
-rw-r--r--drivers/iio/light/vcnl4000.c452
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig14
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c5
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c5
-rw-r--r--drivers/iio/magnetometer/tmag5273.c743
-rw-r--r--drivers/iio/potentiostat/lmp91000.c2
-rw-r--r--drivers/iio/pressure/Kconfig6
-rw-r--r--drivers/iio/pressure/bmp280-core.c765
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c45
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c60
-rw-r--r--drivers/iio/pressure/bmp280-spi.c47
-rw-r--r--drivers/iio/pressure/bmp280.h273
-rw-r--r--drivers/iio/pressure/ms5611.h4
-rw-r--r--drivers/iio/pressure/ms5611_core.c49
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c6
-rw-r--r--drivers/iio/pressure/ms5611_spi.c6
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/sx9324.c96
-rw-r--r--drivers/iio/proximity/sx9360.c32
-rw-r--r--drivers/iio/proximity/sx9500.c4
-rw-r--r--drivers/iio/proximity/sx_common.c21
-rw-r--r--drivers/iio/proximity/sx_common.h6
-rw-r--r--drivers/iio/temperature/tmp117.c80
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c2
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c366
-rw-r--r--drivers/infiniband/core/sa_query.c171
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/core/user_mad.c27
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/core/verbs.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c716
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c211
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c97
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h66
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c337
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h68
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_tlv.h162
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h7183
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h26
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h17
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h42
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c11
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c3
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.h10
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c42
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c9
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h12
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c41
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c21
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c77
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h5
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c104
-rw-r--r--drivers/infiniband/hw/hfi1/init.c14
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c84
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h22
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c25
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h31
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h1
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h7
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h4
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c255
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h3
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c61
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c600
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c85
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c298
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c66
-rw-r--r--drivers/infiniband/hw/irdma/cm.c21
-rw-r--r--drivers/infiniband/hw/irdma/cm.h2
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c324
-rw-r--r--drivers/infiniband/hw/irdma/defs.h9
-rw-r--r--drivers/infiniband/hw/irdma/hw.c22
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c60
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c51
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h1
-rw-r--r--drivers/infiniband/hw/irdma/main.h3
-rw-r--r--drivers/infiniband/hw/irdma/pble.c16
-rw-r--r--drivers/infiniband/hw/irdma/pble.h2
-rw-r--r--drivers/infiniband/hw/irdma/protos.h8
-rw-r--r--drivers/infiniband/hw/irdma/type.h166
-rw-r--r--drivers/infiniband/hw/irdma/utils.c177
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c493
-rw-r--r--drivers/infiniband/hw/mana/main.c22
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c129
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c45
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h3
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c28
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c177
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c33
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c18
-rw-r--r--drivers/infiniband/hw/mlx5/main.c110
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h54
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c500
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c67
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c232
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h4
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c7
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c11
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h3
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c28
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c11
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h82
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c161
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c617
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c68
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c263
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h108
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c104
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c328
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c268
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h23
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1026
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h46
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c23
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c21
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h8
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c56
-rw-r--r--drivers/input/Kconfig10
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/input.c23
-rw-r--r--drivers/input/joystick/xpad.c32
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/applespi.c10
-rw-r--r--drivers/input/keyboard/cap11xx.c19
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c15
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c315
-rw-r--r--drivers/input/keyboard/gpio_keys.c3
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c6
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c17
-rw-r--r--drivers/input/keyboard/omap4-keypad.c9
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c195
-rw-r--r--drivers/input/keyboard/samsung-keypad.c15
-rw-r--r--drivers/input/keyboard/spear-keyboard.c4
-rw-r--r--drivers/input/keyboard/st-keyscan.c6
-rw-r--r--drivers/input/keyboard/tegra-kbc.c7
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c2
-rw-r--r--drivers/input/misc/88pm860x_onkey.c9
-rw-r--r--drivers/input/misc/Kconfig11
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x-i2c.c14
-rw-r--r--drivers/input/misc/ad714x-spi.c14
-rw-r--r--drivers/input/misc/ad714x.c12
-rw-r--r--drivers/input/misc/ad714x.h4
-rw-r--r--drivers/input/misc/adxl34x-i2c.c25
-rw-r--r--drivers/input/misc/adxl34x-spi.c25
-rw-r--r--drivers/input/misc/adxl34x.c16
-rw-r--r--drivers/input/misc/adxl34x.h4
-rw-r--r--drivers/input/misc/axp20x-pek.c12
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c6
-rw-r--r--drivers/input/misc/da7280.c8
-rw-r--r--drivers/input/misc/drv260x.c8
-rw-r--r--drivers/input/misc/drv2665.c8
-rw-r--r--drivers/input/misc/drv2667.c8
-rw-r--r--drivers/input/misc/e3x0-button.c10
-rw-r--r--drivers/input/misc/gpio-vibra.c10
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/iqs269a.c335
-rw-r--r--drivers/input/misc/iqs626a.c164
-rw-r--r--drivers/input/misc/kxtj9.c8
-rw-r--r--drivers/input/misc/max77693-haptic.c11
-rw-r--r--drivers/input/misc/max8925_onkey.c9
-rw-r--r--drivers/input/misc/max8997_haptic.c7
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c193
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c10
-rw-r--r--drivers/input/misc/pcf8574_keypad.c16
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c10
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c8
-rw-r--r--drivers/input/misc/pwm-beeper.c10
-rw-r--r--drivers/input/misc/pwm-vibra.c10
-rw-r--r--drivers/input/misc/regulator-haptic.c8
-rw-r--r--drivers/input/misc/rotary_encoder.c10
-rw-r--r--drivers/input/misc/stpmic1_onkey.c12
-rw-r--r--drivers/input/misc/twl4030-vibra.c10
-rw-r--r--drivers/input/misc/twl6040-vibra.c7
-rw-r--r--drivers/input/misc/wistron_btns.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/input/mouse/Kconfig6
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c16
-rw-r--r--drivers/input/mouse/cyapa.c14
-rw-r--r--drivers/input/mouse/elan_i2c_core.c8
-rw-r--r--drivers/input/mouse/focaltech.c8
-rw-r--r--drivers/input/mouse/navpoint.c9
-rw-r--r--drivers/input/mouse/pxa930_trkball.c250
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/synaptics_i2c.c10
-rw-r--r--drivers/input/rmi4/rmi_bus.c2
-rw-r--r--drivers/input/rmi4/rmi_i2c.c11
-rw-r--r--drivers/input/rmi4/rmi_smbus.c15
-rw-r--r--drivers/input/rmi4/rmi_spi.c13
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/apbps2.c4
-rw-r--r--drivers/input/serio/arc_ps2.c4
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h43
-rw-r--r--drivers/input/serio/olpc_apsp.c4
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c6
-rw-r--r--drivers/input/tests/.kunitconfig3
-rw-r--r--drivers/input/tests/Makefile3
-rw-r--r--drivers/input/tests/input_test.c150
-rw-r--r--drivers/input/touchscreen/Kconfig53
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7877.c8
-rw-r--r--drivers/input/touchscreen/ads7846.c36
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c8
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c10
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c2
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c8
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c8
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c6
-rw-r--r--drivers/input/touchscreen/chipone_icn8505.c8
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c9
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c9
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c9
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c2
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c2
-rw-r--r--drivers/input/touchscreen/cyttsp5.c3
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c7
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c508
-rw-r--r--drivers/input/touchscreen/eeti_ts.c8
-rw-r--r--drivers/input/touchscreen/egalax_ts.c9
-rw-r--r--drivers/input/touchscreen/ektf2127.c10
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/exc3000.c10
-rw-r--r--drivers/input/touchscreen/goodix.c22
-rw-r--r--drivers/input/touchscreen/hideep.c41
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c8
-rw-r--r--drivers/input/touchscreen/imagis.c8
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c10
-rw-r--r--drivers/input/touchscreen/ipaq-micro-ts.c11
-rw-r--r--drivers/input/touchscreen/iqs5xx.c8
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c10
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c9
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c27
-rw-r--r--drivers/input/touchscreen/migor_ts.c8
-rw-r--r--drivers/input/touchscreen/mms114.c8
-rw-r--r--drivers/input/touchscreen/msg2638.c8
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c301
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c10
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c3
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c12
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c464
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/silead.c8
-rw-r--r--drivers/input/touchscreen/st1232.c10
-rw-r--r--drivers/input/touchscreen/stmfts.c14
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c4
-rw-r--r--drivers/input/touchscreen/surface3_spi.c12
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c8
-rw-r--r--drivers/input/touchscreen/tsc2004.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c17
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c7
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c458
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c8
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c8
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c10
-rw-r--r--drivers/input/touchscreen/zforce_ts.c8
-rw-r--r--drivers/input/touchscreen/zinitix.c10
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c220
-rw-r--r--drivers/interconnect/core.c138
-rw-r--r--drivers/interconnect/imx/imx.c20
-rw-r--r--drivers/interconnect/qcom/Kconfig38
-rw-r--r--drivers/interconnect/qcom/Makefile8
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c69
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h17
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c30
-rw-r--r--drivers/interconnect/qcom/msm8974.c20
-rw-r--r--drivers/interconnect/qcom/msm8996.c20
-rw-r--r--drivers/interconnect/qcom/osm-l3.c23
-rw-r--r--drivers/interconnect/qcom/qcm2290.c4
-rw-r--r--drivers/interconnect/qcom/qdu1000.c1067
-rw-r--r--drivers/interconnect/qcom/qdu1000.h95
-rw-r--r--drivers/interconnect/qcom/sa8775p.c2541
-rw-r--r--drivers/interconnect/qcom/sc7180.h6
-rw-r--r--drivers/interconnect/qcom/sc7280.h2
-rw-r--r--drivers/interconnect/qcom/sc8180x.c38
-rw-r--r--drivers/interconnect/qcom/sc8180x.h6
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c25
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h4
-rw-r--r--drivers/interconnect/qcom/sdm670.c440
-rw-r--r--drivers/interconnect/qcom/sdm670.h128
-rw-r--r--drivers/interconnect/qcom/sdm845.h2
-rw-r--r--drivers/interconnect/qcom/sdx55.h4
-rw-r--r--drivers/interconnect/qcom/sm8150.c21
-rw-r--r--drivers/interconnect/qcom/sm8150.h6
-rw-r--r--drivers/interconnect/qcom/sm8250.c21
-rw-r--r--drivers/interconnect/qcom/sm8250.h6
-rw-r--r--drivers/interconnect/qcom/sm8450.c98
-rw-r--r--drivers/interconnect/qcom/sm8550.c2223
-rw-r--r--drivers/interconnect/qcom/sm8550.h178
-rw-r--r--drivers/interconnect/samsung/exynos.c30
-rw-r--r--drivers/iommu/Kconfig23
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/amd_iommu.h9
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h12
-rw-r--r--drivers/iommu/amd/init.c46
-rw-r--r--drivers/iommu/amd/io_pgtable.c4
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c25
-rw-r--r--drivers/iommu/amd/iommu.c111
-rw-r--r--drivers/iommu/apple-dart.c638
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c36
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c20
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c32
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c37
-rw-r--r--drivers/iommu/dma-iommu.c25
-rw-r--r--drivers/iommu/exynos-iommu.c251
-rw-r--r--drivers/iommu/fsl_pamu.c9
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel/Kconfig12
-rw-r--r--drivers/iommu/intel/Makefile1
-rw-r--r--drivers/iommu/intel/cap_audit.c2
-rw-r--r--drivers/iommu/intel/dmar.c49
-rw-r--r--drivers/iommu/intel/iommu.c383
-rw-r--r--drivers/iommu/intel/iommu.h152
-rw-r--r--drivers/iommu/intel/irq_remapping.c11
-rw-r--r--drivers/iommu/intel/pasid.c63
-rw-r--r--drivers/iommu/intel/pasid.h7
-rw-r--r--drivers/iommu/intel/perfmon.c897
-rw-r--r--drivers/iommu/intel/perfmon.h64
-rw-r--r--drivers/iommu/intel/svm.c93
-rw-r--r--drivers/iommu/ioasid.c422
-rw-r--r--drivers/iommu/iommu-sva.c68
-rw-r--r--drivers/iommu/iommu-sva.h4
-rw-r--r--drivers/iommu/iommu-traces.c1
-rw-r--r--drivers/iommu/iommu.c568
-rw-r--r--drivers/iommu/iommufd/Kconfig2
-rw-r--r--drivers/iommu/iommufd/device.c211
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c70
-rw-r--r--drivers/iommu/iommufd/ioas.c14
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h41
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h2
-rw-r--r--drivers/iommu/iommufd/main.c3
-rw-r--r--drivers/iommu/iommufd/pages.c22
-rw-r--r--drivers/iommu/iommufd/selftest.c219
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c109
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/ipmmu-vmsa.c51
-rw-r--r--drivers/iommu/msm_iommu.c11
-rw-r--r--drivers/iommu/mtk_iommu.c167
-rw-r--r--drivers/iommu/mtk_iommu_v1.c13
-rw-r--r--drivers/iommu/of_iommu.c96
-rw-r--r--drivers/iommu/omap-iommu.c13
-rw-r--r--drivers/iommu/rockchip-iommu.c62
-rw-r--r--drivers/iommu/s390-iommu.c24
-rw-r--r--drivers/iommu/sprd-iommu.c76
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c6
-rw-r--r--drivers/iommu/tegra-smmu.c9
-rw-r--r--drivers/ipack/devices/ipoctal.c6
-rw-r--r--drivers/ipack/ipack.c4
-rw-r--r--drivers/irqchip/Kconfig11
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-al-fic.c1
-rw-r--r--drivers/irqchip/irq-alpine-msi.c9
-rw-r--r--drivers/irqchip/irq-apple-aic.c214
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c3
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c5
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c163
-rw-r--r--drivers/irqchip/irq-gic-v2m.c7
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c56
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c138
-rw-r--r--drivers/irqchip/irq-gic-v4.c9
-rw-r--r--drivers/irqchip/irq-gic.c66
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c37
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c9
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c6
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c14
-rw-r--r--drivers/irqchip/irq-mchp-eic.c1
-rw-r--r--drivers/irqchip/irq-mips-gic.c26
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c13
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c1
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c1
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c1
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c1
-rw-r--r--drivers/irqchip/irq-riscv-intc.c71
-rw-r--r--drivers/irqchip/irq-sifive-plic.c93
-rw-r--r--drivers/irqchip/irq-sl28cpld.c1
-rw-r--r--drivers/irqchip/irq-st.c15
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c2
-rw-r--r--drivers/irqchip/irqchip.c8
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c31
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/isdn/mISDN/core.c7
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c15
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c2
-rw-r--r--drivers/leds/Kconfig35
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/flash/Kconfig28
-rw-r--r--drivers/leds/flash/Makefile2
-rw-r--r--drivers/leds/flash/leds-mt6360.c38
-rw-r--r--drivers/leds/flash/leds-mt6370-flash.c573
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c773
-rw-r--r--drivers/leds/led-class.c141
-rw-r--r--drivers/leds/leds-an30259a.c21
-rw-r--r--drivers/leds/leds-asic3.c177
-rw-r--r--drivers/leds/leds-bcm6328.c49
-rw-r--r--drivers/leds/leds-bcm6358.c32
-rw-r--r--drivers/leds/leds-bd2606mvv.c160
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-blinkm.c5
-rw-r--r--drivers/leds/leds-is31fl319x.c7
-rw-r--r--drivers/leds/leds-is31fl32xx.c5
-rw-r--r--drivers/leds/leds-lm3530.c5
-rw-r--r--drivers/leds/leds-lm3532.c5
-rw-r--r--drivers/leds/leds-lm355x.c6
-rw-r--r--drivers/leds/leds-lm3642.c5
-rw-r--r--drivers/leds/leds-lm3692x.c6
-rw-r--r--drivers/leds/leds-lm3697.c5
-rw-r--r--drivers/leds/leds-lp3944.c5
-rw-r--r--drivers/leds/leds-lp3952.c5
-rw-r--r--drivers/leds/leds-lp5521.c6
-rw-r--r--drivers/leds/leds-lp5523.c6
-rw-r--r--drivers/leds/leds-lp5562.c5
-rw-r--r--drivers/leds/leds-lp8501.c6
-rw-r--r--drivers/leds/leds-lp8860.c15
-rw-r--r--drivers/leds/leds-mt6323.c30
-rw-r--r--drivers/leds/leds-pca9532.c9
-rw-r--r--drivers/leds/leds-pca955x.c26
-rw-r--r--drivers/leds/leds-pca963x.c6
-rw-r--r--drivers/leds/leds-pm8058.c29
-rw-r--r--drivers/leds/leds-pwm.c4
-rw-r--r--drivers/leds/leds-s3c24xx.c83
-rw-r--r--drivers/leds/leds-syscon.c49
-rw-r--r--drivers/leds/leds-tca6507.c13
-rw-r--r--drivers/leds/leds-tlc591xx.c7
-rw-r--r--drivers/leds/leds-turris-omnia.c5
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/rgb/Kconfig13
-rw-r--r--drivers/leds/rgb/Makefile1
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c1011
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c4
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c160
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c2
-rw-r--r--drivers/leds/trigger/Kconfig1
-rw-r--r--drivers/leds/trigger/ledtrig-disk.c4
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/adb.c4
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c4
-rw-r--r--drivers/mailbox/Kconfig4
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c2
-rw-r--r--drivers/mailbox/hi6220-mailbox.c5
-rw-r--r--drivers/mailbox/mailbox-mpfs.c55
-rw-r--r--drivers/mailbox/mailbox-test.c8
-rw-r--r--drivers/mailbox/mailbox.c96
-rw-r--r--drivers/mailbox/omap-mailbox.c25
-rw-r--r--drivers/mailbox/pcc.c84
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c12
-rw-r--r--drivers/mailbox/rockchip-mailbox.c3
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c19
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/mcb/mcb-lpc.c35
-rw-r--r--drivers/mcb/mcb-parse.c15
-rw-r--r--drivers/mcb/mcb-pci.c27
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/bcache/bcache_ondisk.h11
-rw-r--r--drivers/md/bcache/journal.c3
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/dm-audit.c2
-rw-r--r--drivers/md/dm-bio-prison-v1.c113
-rw-r--r--drivers/md/dm-bio-prison-v1.h16
-rw-r--r--drivers/md/dm-bio-prison-v2.c15
-rw-r--r--drivers/md/dm-bio-prison-v2.h11
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c2032
-rw-r--r--drivers/md/dm-builtin.c3
-rw-r--r--drivers/md/dm-cache-background-tracker.c17
-rw-r--r--drivers/md/dm-cache-background-tracker.h47
-rw-r--r--drivers/md/dm-cache-block-types.h1
-rw-r--r--drivers/md/dm-cache-metadata.c76
-rw-r--r--drivers/md/dm-cache-metadata.h5
-rw-r--r--drivers/md/dm-cache-policy-internal.h14
-rw-r--r--drivers/md/dm-cache-policy-smq.c166
-rw-r--r--drivers/md/dm-cache-policy.c3
-rw-r--r--drivers/md/dm-cache-policy.h7
-rw-r--r--drivers/md/dm-cache-target.c142
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c161
-rw-r--r--drivers/md/dm-delay.c33
-rw-r--r--drivers/md/dm-dust.c21
-rw-r--r--drivers/md/dm-ebs-target.c24
-rw-r--r--drivers/md/dm-era-target.c144
-rw-r--r--drivers/md/dm-exception-store.c7
-rw-r--r--drivers/md/dm-exception-store.h57
-rw-r--r--drivers/md/dm-flakey.c127
-rw-r--r--drivers/md/dm-ima.c5
-rw-r--r--drivers/md/dm-ima.h7
-rw-r--r--drivers/md/dm-init.c5
-rw-r--r--drivers/md/dm-integrity.c560
-rw-r--r--drivers/md/dm-io-rewind.c8
-rw-r--r--drivers/md/dm-io-tracker.h1
-rw-r--r--drivers/md/dm-io.c92
-rw-r--r--drivers/md/dm-ioctl.c180
-rw-r--r--drivers/md/dm-kcopyd.c65
-rw-r--r--drivers/md/dm-linear.c11
-rw-r--r--drivers/md/dm-log-userspace-base.c15
-rw-r--r--drivers/md/dm-log-userspace-transfer.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.h1
-rw-r--r--drivers/md/dm-log-writes.c44
-rw-r--r--drivers/md/dm-log.c87
-rw-r--r--drivers/md/dm-mpath.c130
-rw-r--r--drivers/md/dm-mpath.h3
-rw-r--r--drivers/md/dm-path-selector.c4
-rw-r--r--drivers/md/dm-path-selector.h28
-rw-r--r--drivers/md/dm-ps-historical-service-time.c2
-rw-r--r--drivers/md/dm-ps-io-affinity.c6
-rw-r--r--drivers/md/dm-ps-queue-length.c15
-rw-r--r--drivers/md/dm-ps-round-robin.c22
-rw-r--r--drivers/md/dm-ps-service-time.c26
-rw-r--r--drivers/md/dm-raid.c57
-rw-r--r--drivers/md/dm-raid1.c106
-rw-r--r--drivers/md/dm-region-hash.c29
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h3
-rw-r--r--drivers/md/dm-snap-persistent.c50
-rw-r--r--drivers/md/dm-snap-transient.c18
-rw-r--r--drivers/md/dm-snap.c103
-rw-r--r--drivers/md/dm-stats.c110
-rw-r--r--drivers/md/dm-stats.h8
-rw-r--r--drivers/md/dm-stripe.c57
-rw-r--r--drivers/md/dm-switch.c67
-rw-r--r--drivers/md/dm-sysfs.c12
-rw-r--r--drivers/md/dm-table.c83
-rw-r--r--drivers/md/dm-target.c26
-rw-r--r--drivers/md/dm-thin-metadata.c68
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c215
-rw-r--r--drivers/md/dm-uevent.c6
-rw-r--r--drivers/md/dm-uevent.h6
-rw-r--r--drivers/md/dm-unstripe.c15
-rw-r--r--drivers/md/dm-verity-fec.c34
-rw-r--r--drivers/md/dm-verity-fec.h18
-rw-r--r--drivers/md/dm-verity-target.c125
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/dm-verity-verify-sig.h2
-rw-r--r--drivers/md/dm-verity.h8
-rw-r--r--drivers/md/dm-writecache.c193
-rw-r--r--drivers/md/dm-zero.c32
-rw-r--r--drivers/md/dm-zone.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c28
-rw-r--r--drivers/md/dm-zoned-target.c17
-rw-r--r--drivers/md/dm.c180
-rw-r--r--drivers/md/dm.h38
-rw-r--r--drivers/md/md-bitmap.c143
-rw-r--r--drivers/md/md-linear.c14
-rw-r--r--drivers/md/md.c130
-rw-r--r--drivers/md/md.h19
-rw-r--r--drivers/md/persistent-data/dm-array.c82
-rw-r--r--drivers/md/persistent-data/dm-array.h3
-rw-r--r--drivers/md/persistent-data/dm-bitset.c14
-rw-r--r--drivers/md/persistent-data/dm-bitset.h1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c32
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c52
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c21
-rw-r--r--drivers/md/persistent-data/dm-btree.c130
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/md/persistent-data/dm-persistent-data-internal.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c52
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h11
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c13
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c24
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map.h1
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c18
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h3
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid10.c102
-rw-r--r--drivers/md/raid5.c50
-rw-r--r--drivers/media/cec/core/cec-adap.c7
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c5
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c22
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c6
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c6
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c5
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c6
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c6
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c6
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c6
-rw-r--r--drivers/media/common/Kconfig4
-rw-r--r--drivers/media/common/Makefile3
-rw-r--r--drivers/media/common/btcx-risc.h29
-rw-r--r--drivers/media/common/saa7146/Kconfig (renamed from drivers/staging/media/deprecated/saa7146/common/Kconfig)2
-rw-r--r--drivers/media/common/saa7146/Makefile (renamed from drivers/staging/media/deprecated/saa7146/common/Makefile)0
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c (renamed from drivers/staging/media/deprecated/saa7146/common/saa7146_core.c)42
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c (renamed from drivers/staging/media/deprecated/saa7146/common/saa7146_fops.c)375
-rw-r--r--drivers/media/common/saa7146/saa7146_hlp.c (renamed from drivers/staging/media/deprecated/saa7146/common/saa7146_hlp.c)357
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c (renamed from drivers/staging/media/deprecated/saa7146/common/saa7146_i2c.c)2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c (renamed from drivers/staging/media/deprecated/saa7146/common/saa7146_vbi.c)289
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c725
-rw-r--r--drivers/media/common/uvc.c183
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c30
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd_dvbt.c14
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd_dvbt2.c14
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c11
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c5
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c6
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c9
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c6
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/tc90522.c6
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c6
-rw-r--r--drivers/media/i2c/Kconfig108
-rw-r--r--drivers/media/i2c/Makefile11
-rw-r--r--drivers/media/i2c/ad9389b.c1215
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c21
-rw-r--r--drivers/media/i2c/adv7604.c11
-rw-r--r--drivers/media/i2c/ak7375.c38
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c157
-rw-r--r--drivers/media/i2c/ccs/ccs.h14
-rw-r--r--drivers/media/i2c/cs53l32a.c6
-rw-r--r--drivers/media/i2c/hi556.c150
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/imx219.c311
-rw-r--r--drivers/media/i2c/imx258.c33
-rw-r--r--drivers/media/i2c/imx290.c1491
-rw-r--r--drivers/media/i2c/imx296.c1163
-rw-r--r--drivers/media/i2c/imx334.c322
-rw-r--r--drivers/media/i2c/imx415.c1300
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c5
-rw-r--r--drivers/media/i2c/m5mols/Kconfig8
-rw-r--r--drivers/media/i2c/m5mols/Makefile4
-rw-r--r--drivers/media/i2c/m5mols/m5mols.h349
-rw-r--r--drivers/media/i2c/m5mols/m5mols_capture.c158
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c625
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c1051
-rw-r--r--drivers/media/i2c/m5mols/m5mols_reg.h359
-rw-r--r--drivers/media/i2c/max9286.c464
-rw-r--r--drivers/media/i2c/msp3400-driver.c5
-rw-r--r--drivers/media/i2c/mt9m032.c891
-rw-r--r--drivers/media/i2c/mt9p031.c6
-rw-r--r--drivers/media/i2c/mt9t001.c992
-rw-r--r--drivers/media/i2c/mt9v032.c6
-rw-r--r--drivers/media/i2c/noon010pc30.c821
-rw-r--r--drivers/media/i2c/ov13b10.c75
-rw-r--r--drivers/media/i2c/ov2685.c85
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5640.c86
-rw-r--r--drivers/media/i2c/ov5647.c56
-rw-r--r--drivers/media/i2c/ov5670.c400
-rw-r--r--drivers/media/i2c/ov5675.c198
-rw-r--r--drivers/media/i2c/ov5695.c5
-rw-r--r--drivers/media/i2c/ov7670.c19
-rw-r--r--drivers/media/i2c/ov772x.c3
-rw-r--r--drivers/media/i2c/ov8856.c40
-rw-r--r--drivers/media/i2c/ov8858.c2008
-rw-r--r--drivers/media/i2c/ov9282.c9
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c22
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c1
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h1
-rw-r--r--drivers/media/i2c/s5k6aa.c1652
-rw-r--r--drivers/media/i2c/saa7115.c6
-rw-r--r--drivers/media/i2c/saa7127.c6
-rw-r--r--drivers/media/i2c/sr030pc30.c762
-rw-r--r--drivers/media/i2c/st-vgxy61.c27
-rw-r--r--drivers/media/i2c/tc358746.c13
-rw-r--r--drivers/media/i2c/tda1997x.c6
-rw-r--r--drivers/media/i2c/tvaudio.c5
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/video-i2c.c6
-rw-r--r--drivers/media/i2c/vs6624.c854
-rw-r--r--drivers/media/i2c/vs6624_regs.h325
-rw-r--r--drivers/media/mc/mc-device.c3
-rw-r--r--drivers/media/mc/mc-entity.c86
-rw-r--r--drivers/media/pci/Kconfig2
-rw-r--r--drivers/media/pci/Makefile4
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c153
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.h9
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c15
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c436
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c131
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h28
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c21
-rw-r--r--drivers/media/pci/cx18/Kconfig2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h24
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c85
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h3
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c391
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c27
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c278
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c13
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c50
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c34
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c412
-rw-r--r--drivers/media/pci/saa7134/saa7134.h13
-rw-r--r--drivers/media/pci/saa7146/Kconfig (renamed from drivers/staging/media/deprecated/saa7146/saa7146/Kconfig)15
-rw-r--r--drivers/media/pci/saa7146/Makefile (renamed from drivers/staging/media/deprecated/saa7146/saa7146/Makefile)0
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c (renamed from drivers/staging/media/deprecated/saa7146/saa7146/hexium_gemini.c)25
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c (renamed from drivers/staging/media/deprecated/saa7146/saa7146/hexium_orion.c)26
-rw-r--r--drivers/media/pci/saa7146/mxb.c (renamed from drivers/staging/media/deprecated/saa7146/saa7146/mxb.c)55
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c10
-rw-r--r--drivers/media/pci/ttpci/Kconfig (renamed from drivers/staging/media/deprecated/saa7146/ttpci/Kconfig)17
-rw-r--r--drivers/media/pci/ttpci/Makefile (renamed from drivers/staging/media/deprecated/saa7146/ttpci/Makefile)0
-rw-r--r--drivers/media/pci/ttpci/budget-av.c (renamed from drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c)7
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c (renamed from drivers/staging/media/deprecated/saa7146/ttpci/budget-ci.c)0
-rw-r--r--drivers/media/pci/ttpci/budget-core.c (renamed from drivers/staging/media/deprecated/saa7146/ttpci/budget-core.c)0
-rw-r--r--drivers/media/pci/ttpci/budget.c (renamed from drivers/staging/media/deprecated/saa7146/ttpci/budget.c)0
-rw-r--r--drivers/media/pci/ttpci/budget.h (renamed from drivers/staging/media/deprecated/saa7146/ttpci/budget.h)2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c16
-rw-r--r--drivers/media/pci/zoran/zoran_device.h2
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c6
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c6
-rw-r--r--drivers/media/platform/amphion/vdec.c53
-rw-r--r--drivers/media/platform/amphion/venc.c18
-rw-r--r--drivers/media/platform/amphion/vpu_codec.h3
-rw-r--r--drivers/media/platform/amphion/vpu_color.c6
-rw-r--r--drivers/media/platform/amphion/vpu_core.c6
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c6
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c45
-rw-r--r--drivers/media/platform/amphion/vpu_malone.h1
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c6
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c10
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c6
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c6
-rw-r--r--drivers/media/platform/chips-media/coda-common.c5
-rw-r--r--drivers/media/platform/chips-media/imx-vdoa.c6
-rw-r--r--drivers/media/platform/intel/pxa_camera.c10
-rw-r--r--drivers/media/platform/m2m-deinterlace.c6
-rw-r--r--drivers/media/platform/marvell/mcam-core.c4
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c18
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c141
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h28
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c43
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c38
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c5
-rw-r--r--drivers/media/platform/mediatek/mdp3/Kconfig7
-rw-r--r--drivers/media/platform/mediatek/mdp3/Makefile2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c453
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_sm_mt8183.h144
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h189
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h20
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c148
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c537
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h24
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c56
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h18
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c36
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c293
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h214
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-type.h53
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c193
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h29
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c31
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c16
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c12
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c24
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c95
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h12
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c4
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c6
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c6
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c114
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c6
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c6
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c6
-rw-r--r--drivers/media/platform/nxp/Kconfig2
-rw-r--r--drivers/media/platform/nxp/Makefile1
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c12
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c19
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h5
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c379
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h4
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c258
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c365
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c270
-rw-r--r--drivers/media/platform/nxp/imx8-isi/Kconfig22
-rw-r--r--drivers/media/platform/nxp/imx8-isi/Makefile8
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c539
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h394
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c529
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c109
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c651
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c858
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c867
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-regs.h418
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c1512
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c54
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c44
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h11
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c61
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen1.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c26
-rw-r--r--drivers/media/platform/qcom/camss/camss.c8
-rw-r--r--drivers/media/platform/qcom/venus/core.c6
-rw-r--r--drivers/media/platform/qcom/venus/core.h10
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c8
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c4
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c29
-rw-r--r--drivers/media/platform/qcom/venus/venc.c115
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c6
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c11
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c42
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c21
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c21
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c8
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c21
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c6
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c10
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c6
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c8
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c2
-rw-r--r--drivers/media/platform/renesas/sh_vou.c5
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c26
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c21
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h28
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c64
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c67
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c14
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c18
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.h3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c10
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c14
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c18
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/samsung/s3c-camif/Kconfig8
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c5
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c11
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c5
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c8
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c6
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c6
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c11
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.h2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c7
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c7
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c6
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c6
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c6
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c6
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c6
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c41
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c37
-rw-r--r--drivers/media/platform/ti/cal/cal.c10
-rw-r--r--drivers/media/platform/ti/davinci/Kconfig16
-rw-r--r--drivers/media/platform/ti/davinci/Makefile3
-rw-r--r--drivers/media/platform/ti/davinci/vpbe.c840
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_display.c1510
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd.c1582
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd_regs.h352
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc.c676
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc_regs.h165
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c8
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c5
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpss.c529
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c5
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c15
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c5
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c40
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c58
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c109
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.h3
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c2
-rw-r--r--drivers/media/platform/via/via-camera.c13
-rw-r--r--drivers/media/platform/video-mux.c6
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c6
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c33
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c6
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c6
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c8
-rw-r--r--drivers/media/radio/radio-shark.c10
-rw-r--r--drivers/media/radio/radio-shark2.c10
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/radio-timb.c5
-rw-r--r--drivers/media/radio/radio-wl1273.c6
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c6
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/Kconfig4
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/gpio-ir-recv.c18
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c5
-rw-r--r--drivers/media/rc/ir-hix5hd2.c5
-rw-r--r--drivers/media/rc/ir-rx51.c6
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-mxiii.c57
-rw-r--r--drivers/media/rc/keymaps/rc-dreambox.c151
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/meson-ir-tx.c6
-rw-r--r--drivers/media/rc/meson-ir.c6
-rw-r--r--drivers/media/rc/mtk-cir.c6
-rw-r--r--drivers/media/rc/pwm-ir-tx.c2
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/rc/st_rc.c5
-rw-r--r--drivers/media/rc/sunxi-cir.c6
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c6
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c6
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.c8
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c54
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c131
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c272
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c74
-rw-r--r--drivers/media/tuners/it913x.c6
-rw-r--r--drivers/media/tuners/mxl5005s.c12
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/usb/au0828/au0828-core.c11
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c14
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c2
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c6
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-main.c18
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c59
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.h16
-rw-r--r--drivers/media/usb/siano/smsusb.c1
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/uvc/Kconfig1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c342
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c185
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_status.c125
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c111
-rw-r--r--drivers/media/usb/uvc/uvc_video.c58
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h39
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c85
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c105
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c15
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev-priv.h14
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c1213
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/bt1-l2-ctl.c1
-rw-r--r--drivers/memory/da8xx-ddrctl.c1
-rw-r--r--drivers/memory/fsl_ifc.c1
-rw-r--r--drivers/memory/mtk-smi.c6
-rw-r--r--drivers/memory/mvebu-devbus.c4
-rw-r--r--drivers/memory/omap-gpmc.c3
-rw-r--r--drivers/memory/renesas-rpc-if.c155
-rw-r--r--drivers/memory/tegra/mc.c17
-rw-r--r--drivers/memory/tegra/tegra124-emc.c12
-rw-r--r--drivers/memory/tegra/tegra186-emc.c1
-rw-r--r--drivers/memory/tegra/tegra186.c36
-rw-r--r--drivers/memory/tegra/tegra20-emc.c12
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c2
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c2
-rw-r--r--drivers/memory/tegra/tegra30-emc.c12
-rw-r--r--drivers/memory/ti-emif-pm.c7
-rw-r--r--drivers/memstick/core/Kconfig2
-rw-r--r--drivers/memstick/core/memstick.c11
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptscsih.c1
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/88pm860x-core.c4
-rw-r--r--drivers/mfd/Kconfig118
-rw-r--r--drivers/mfd/Makefile13
-rw-r--r--drivers/mfd/altera-sysmgr.c1
-rw-r--r--drivers/mfd/arizona-core.c2
-rw-r--r--drivers/mfd/arizona-i2c.c1
-rw-r--r--drivers/mfd/arizona-spi.c1
-rw-r--r--drivers/mfd/asic3.c1071
-rw-r--r--drivers/mfd/atc260x-i2c.c2
-rw-r--r--drivers/mfd/atmel-flexcom.c4
-rw-r--r--drivers/mfd/atmel-smc.c2
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c135
-rw-r--r--drivers/mfd/bcm2835-pm.c3
-rw-r--r--drivers/mfd/cros_ec_dev.c6
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/da9052-i2c.c1
-rw-r--r--drivers/mfd/da9052-spi.c1
-rw-r--r--drivers/mfd/da9055-core.c1
-rw-r--r--drivers/mfd/da9055-i2c.c1
-rw-r--r--drivers/mfd/da9062-core.c176
-rw-r--r--drivers/mfd/dln2.c1
-rw-r--r--drivers/mfd/ezx-pcap.c1
-rw-r--r--drivers/mfd/hi6421-pmic-core.c4
-rw-r--r--drivers/mfd/htc-pasic3.c210
-rw-r--r--drivers/mfd/intel-lpss-pci.c15
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c122
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c455
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c168
-rw-r--r--drivers/mfd/intel-m10-bmc.c238
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c22
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c1
-rw-r--r--drivers/mfd/ipaq-micro.c4
-rw-r--r--drivers/mfd/kempld-core.c7
-rw-r--r--drivers/mfd/khadas-mcu.c2
-rw-r--r--drivers/mfd/lm3533-core.c2
-rw-r--r--drivers/mfd/lp8788.c1
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/mfd-core.c26
-rw-r--r--drivers/mfd/ntxec.c1
-rw-r--r--drivers/mfd/ocelot-core.c81
-rw-r--r--drivers/mfd/ocelot-spi.c3
-rw-r--r--drivers/mfd/omap-usb-host.c1
-rw-r--r--drivers/mfd/omap-usb-tll.c6
-rw-r--r--drivers/mfd/pcf50633-adc.c7
-rw-r--r--drivers/mfd/qcom-pm8008.c132
-rw-r--r--drivers/mfd/qcom-pm8xxx.c3
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/rk808.c1
-rw-r--r--drivers/mfd/rsmu.h2
-rw-r--r--drivers/mfd/rsmu_i2c.c165
-rw-r--r--drivers/mfd/rsmu_spi.c48
-rw-r--r--drivers/mfd/rz-mtu3.c391
-rw-r--r--drivers/mfd/rz-mtu3.h147
-rw-r--r--drivers/mfd/sec-core.c46
-rw-r--r--drivers/mfd/sec-irq.c89
-rw-r--r--drivers/mfd/si476x-cmd.c14
-rw-r--r--drivers/mfd/simple-mfd-i2c.c15
-rw-r--r--drivers/mfd/ssbi.c4
-rw-r--r--drivers/mfd/stmpe-i2c.c1
-rw-r--r--drivers/mfd/stmpe-spi.c1
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c4
-rw-r--r--drivers/mfd/syscon.c27
-rw-r--r--drivers/mfd/t7l66xb.c427
-rw-r--r--drivers/mfd/tc3589x.c1
-rw-r--r--drivers/mfd/tc6387xb.c228
-rw-r--r--drivers/mfd/tc6393xb.c907
-rw-r--r--drivers/mfd/tmio_core.c70
-rw-r--r--drivers/mfd/tps6586x.c1
-rw-r--r--drivers/mfd/tqmx86.c52
-rw-r--r--drivers/mfd/twl-core.c74
-rw-r--r--drivers/mfd/twl4030-audio.c1
-rw-r--r--drivers/mfd/twl4030-power.c6
-rw-r--r--drivers/mfd/twl6040.c1
-rw-r--r--drivers/mfd/ucb1400_core.c158
-rw-r--r--drivers/mfd/wm8994-core.c19
-rw-r--r--drivers/mfd/wm97xx-core.c4
-rw-r--r--drivers/misc/Kconfig21
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c6
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cardreader/alcor_pci.c167
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/cxl/file.c2
-rw-r--r--drivers/misc/eeprom/at25.c8
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c10
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/fastrpc.c100
-rw-r--r--drivers/misc/genwqe/card_base.c4
-rw-r--r--drivers/misc/genwqe/card_utils.c8
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h213
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h2670
-rw-r--r--drivers/misc/hpilo.c8
-rw-r--r--drivers/misc/isl29003.c10
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c66
-rw-r--r--drivers/misc/lkdtm/heap.c1
-rw-r--r--drivers/misc/lkdtm/stackleak.c6
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c10
-rw-r--r--drivers/misc/mei/bus-fixup.c28
-rw-r--r--drivers/misc/mei/bus.c19
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c111
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h354
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/main.c3
-rw-r--r--drivers/misc/mei/mei_dev.h5
-rw-r--r--drivers/misc/mei/pci-me.c22
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c6
-rw-r--r--drivers/misc/ocxl/context.c4
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/ocxl/sysfs.c2
-rw-r--r--drivers/misc/open-dice.c18
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c4
-rw-r--r--drivers/misc/sgi-gru/grukservices.c8
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c24
-rw-r--r--drivers/misc/smpro-errmon.c82
-rw-r--r--drivers/misc/sram.c28
-rw-r--r--drivers/misc/sram.h1
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/tifm_core.c4
-rw-r--r--drivers/misc/uacce/uacce.c54
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c49
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c10
-rw-r--r--drivers/misc/xilinx_tmr_inject.c171
-rw-r--r--drivers/misc/xilinx_tmr_manager.c220
-rw-r--r--drivers/mmc/core/Kconfig3
-rw-r--r--drivers/mmc/core/block.c27
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/core.c5
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/host.c26
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/mmc_test.c6
-rw-r--r--drivers/mmc/core/pwrseq_simple.c4
-rw-r--r--drivers/mmc/core/regulator.c44
-rw-r--r--drivers/mmc/core/sdio_bus.c21
-rw-r--r--drivers/mmc/core/sdio_cis.c12
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/core/sdio_uart.c23
-rw-r--r--drivers/mmc/core/slot-gpio.c17
-rw-r--r--drivers/mmc/host/Kconfig80
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/atmel-mci.c3
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c3
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c186
-rw-r--r--drivers/mmc/host/jz4740_mmc.c51
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c139
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/mmci.c22
-rw-r--r--drivers/mmc/host/moxart-mmc.c9
-rw-r--r--drivers/mmc/host/omap.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c8
-rw-r--r--drivers/mmc/host/owl-mmc.c3
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c21
-rw-r--r--drivers/mmc/host/s3cmci.c1777
-rw-r--r--drivers/mmc/host/s3cmci.h75
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c8
-rw-r--r--drivers/mmc/host/sdhci-cadence.c175
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c113
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c92
-rw-r--r--drivers/mmc/host/sdhci-iproc.c14
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c275
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c3
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c28
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c24
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c30
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c4
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c156
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c4
-rw-r--r--drivers/mmc/host/sdhci-s3c.c4
-rw-r--r--drivers/mmc/host/sdhci-sprd.c6
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c151
-rw-r--r--drivers/mmc/host/sdricoh_cs.c8
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mmc/host/tmio_mmc.c227
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c2
-rw-r--r--drivers/mmc/host/uniphier-sd.c83
-rw-r--r--drivers/mmc/host/usdhi6rol0.c3
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c6
-rw-r--r--drivers/most/Kconfig2
-rw-r--r--drivers/most/most_cdev.c7
-rw-r--r--drivers/most/most_snd.c10
-rw-r--r--drivers/most/most_usb.c6
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c11
-rw-r--r--drivers/mtd/devices/spear_smi.c4
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c18
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c7
-rw-r--r--drivers/mtd/maps/pismo.c5
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtdblock.c12
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdcore.c40
-rw-r--r--drivers/mtd/mtdpart.c10
-rw-r--r--drivers/mtd/nand/ecc-mtk.c28
-rw-r--r--drivers/mtd/nand/ecc-mxic.c7
-rw-r--r--drivers/mtd/nand/onenand/Kconfig2
-rw-r--r--drivers/mtd/nand/onenand/generic.c6
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c6
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c6
-rw-r--r--drivers/mtd/nand/raw/Kconfig11
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c6
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c5
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c6
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c6
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c14
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c6
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c8
-rw-r--r--drivers/mtd/nand/raw/gpio.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c5
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c6
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c6
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c6
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c22
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c6
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c6
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c16
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c149
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c13
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c3
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c5
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c3
-rw-r--r--drivers/mtd/nand/raw/nandsim.c17
-rw-r--r--drivers/mtd/nand/raw/ndfc.c6
-rw-r--r--drivers/mtd/nand/raw/omap2.c5
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c5
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c10
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c6
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c69
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c11
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c68
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c6
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c6
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c9
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c126
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c533
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c9
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c6
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c153
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/nand/spi/esmt.c135
-rw-r--r--drivers/mtd/nand/spi/macronix.c3
-rw-r--r--drivers/mtd/parsers/Kconfig2
-rw-r--r--drivers/mtd/parsers/bcm63xxpart.c1
-rw-r--r--drivers/mtd/parsers/ofpart_core.c19
-rw-r--r--drivers/mtd/parsers/scpart.c2
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c4
-rw-r--r--drivers/mtd/spi-nor/core.c537
-rw-r--r--drivers/mtd/spi-nor/core.h74
-rw-r--r--drivers/mtd/spi-nor/debugfs.c15
-rw-r--r--drivers/mtd/spi-nor/issi.c2
-rw-r--r--drivers/mtd/spi-nor/macronix.c13
-rw-r--r--drivers/mtd/spi-nor/micron-st.c36
-rw-r--r--drivers/mtd/spi-nor/otp.c8
-rw-r--r--drivers/mtd/spi-nor/sfdp.c185
-rw-r--r--drivers/mtd/spi-nor/sfdp.h36
-rw-r--r--drivers/mtd/spi-nor/spansion.c487
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/mtd/spi-nor/swp.c6
-rw-r--r--drivers/mtd/spi-nor/winbond.c24
-rw-r--r--drivers/mtd/spi-nor/xilinx.c1
-rw-r--r--drivers/mtd/ubi/block.c112
-rw-r--r--drivers/mtd/ubi/build.c44
-rw-r--r--drivers/mtd/ubi/debug.c19
-rw-r--r--drivers/mtd/ubi/eba.c21
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c12
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/mtd/ubi/kapi.c1
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/vmt.c18
-rw-r--r--drivers/mtd/ubi/wl.c31
-rw-r--r--drivers/mux/core.c1
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c106
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/bonding/bond_options.c10
-rw-r--r--drivers/net/bonding/bond_sysfs.c18
-rw-r--r--drivers/net/can/Kconfig12
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/bxcan.c1098
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c12
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c8
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c4
-rw-r--r--drivers/net/can/dev/bittiming.c120
-rw-r--r--drivers/net/can/dev/calc_bittiming.c34
-rw-r--r--drivers/net/can/dev/dev.c21
-rw-r--r--drivers/net/can/dev/netlink.c49
-rw-r--r--drivers/net/can/kvaser_pciefd.c1
-rw-r--r--drivers/net/can/m_can/m_can.c37
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c274
-rw-r--r--drivers/net/can/sja1000/ems_pci.c154
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c18
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h26
-rw-r--r--drivers/net/can/usb/esd_usb.c241
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c102
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c44
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c122
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c68
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h1
-rw-r--r--drivers/net/dsa/Kconfig31
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_common.c78
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c5
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c45
-rw-r--r--drivers/net/dsa/b53/b53_priv.h17
-rw-r--r--drivers/net/dsa/b53/b53_regs.h1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c45
-rw-r--r--drivers/net/dsa/lan9303-core.c169
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2
-rw-r--r--drivers/net/dsa/microchip/Kconfig10
-rw-r--r--drivers/net/dsa/microchip/Makefile5
-rw-r--r--drivers/net/dsa/microchip/ksz8.h8
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c192
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c9
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c29
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h33
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c484
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h79
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c1201
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h86
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h142
-rw-r--r--drivers/net/dsa/microchip/lan937x.h1
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c9
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h3
-rw-r--r--drivers/net/dsa/mt7530-mdio.c271
-rw-r--r--drivers/net/dsa/mt7530-mmio.c101
-rw-r--r--drivers/net/dsa/mt7530.c969
-rw-r--r--drivers/net/dsa/mt7530.h104
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig4
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c590
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c106
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h19
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.c83
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.h19
-rw-r--r--drivers/net/dsa/ocelot/Kconfig32
-rw-r--r--drivers/net/dsa/ocelot/Makefile13
-rw-r--r--drivers/net/dsa/ocelot/felix.c83
-rw-r--r--drivers/net/dsa/ocelot/felix.h9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c101
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c164
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c7
-rw-r--r--drivers/net/dsa/qca/Kconfig8
-rw-r--r--drivers/net/dsa/qca/Makefile3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c277
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c49
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c277
-rw-r--r--drivers/net/dsa/qca/qca8k.h84
-rw-r--r--drivers/net/dsa/qca/qca8k_leds.h16
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c5
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c40
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c137
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c24
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c3
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c5
-rw-r--r--drivers/net/ethernet/alteon/acenic.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c29
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c87
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c350
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h32
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/Makefile13
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c290
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c264
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c597
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h312
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c170
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c351
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c183
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c194
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c480
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h49
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c52
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c415
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c4
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c22
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h92
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c477
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h37
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c264
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c87
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c34
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h32
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c1310
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c21
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c85
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/fealnx.c1953
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig15
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c762
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h44
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c339
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h144
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c119
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c113
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c29
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c227
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c149
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c7
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve.h112
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c96
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c741
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c147
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c310
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c269
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c192
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c33
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1038
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c72
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c519
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h643
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c430
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c231
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c179
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c86
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c93
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c1897
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c1916
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h328
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c415
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1019
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1142
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c466
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c265
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c198
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c208
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c224
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c37
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c29
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c19
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c141
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h179
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c580
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h79
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c642
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c1229
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h767
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h23
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2285
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c442
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c108
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c251
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c30
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c37
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c72
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c276
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h88
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c387
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h196
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c184
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h18
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c110
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c63
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c309
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c72
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c143
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/Makefile2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c797
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h155
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c171
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h28
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c51
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h20
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c185
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c146
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c375
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c699
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c857
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c528
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c759
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c927
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c287
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c1126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c456
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c241
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c381
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c304
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c168
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c43
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c103
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h81
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c13
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c30
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h36
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c397
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c1402
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c219
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c241
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c151
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h2511
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c102
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h41
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h74
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1461
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c2531
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c293
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c1626
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c4
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h688
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c1262
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h24
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c77
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c19
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c127
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h15
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c1910
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c48
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c22
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c459
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c52
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c223
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c50
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c300
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c50
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c377
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c220
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c177
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/netronome/Kconfig2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c93
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c284
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c55
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c229
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
-rw-r--r--drivers/net/ethernet/ni/nixge.c143
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c76
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c119
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c233
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h42
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c109
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c1
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c18
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c191
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c54
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c325
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c37
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c589
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h63
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c49
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c105
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c38
-rw-r--r--drivers/net/ethernet/sfc/ef100.c3
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c30
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c114
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h7
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c57
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h10
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c731
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.h47
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/mae.c457
-rw-r--r--drivers/net/ethernet/sfc/mae.h51
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c72
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/ptp.c274
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/tc.c642
-rw-r--r--drivers/net/ethernet/sfc/tc.h41
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c11
-rw-r--r--drivers/net/ethernet/socionext/netsec.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c182
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c171
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c116
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c201
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h184
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c337
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1130
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c194
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c135
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h4
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c190
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c41
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h5
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c1216
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h43
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2007
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h32
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h414
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h79
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c70
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c590
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c286
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h12
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h97
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h43
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c116
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c578
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h34
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c5
-rw-r--r--drivers/net/fddi/skfp/rmt.c6
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c77
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c92
-rw-r--r--drivers/net/ieee802154/ca8210.c13
-rw-r--r--drivers/net/ieee802154/cc2520.c136
-rw-r--r--drivers/net/ieee802154/mcr20a.c2
-rw-r--r--drivers/net/ipa/Makefile17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c2
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c481
-rw-r--r--drivers/net/ipa/gsi.c486
-rw-r--r--drivers/net/ipa/gsi.h11
-rw-r--r--drivers/net/ipa/gsi_reg.c161
-rw-r--r--drivers/net/ipa/gsi_reg.h509
-rw-r--r--drivers/net/ipa/gsi_trans.c2
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c38
-rw-r--r--drivers/net/ipa/ipa_data.h3
-rw-r--r--drivers/net/ipa/ipa_endpoint.c585
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_interrupt.c155
-rw-r--r--drivers/net/ipa/ipa_interrupt.h64
-rw-r--r--drivers/net/ipa/ipa_main.c130
-rw-r--r--drivers/net/ipa/ipa_mem.c28
-rw-r--r--drivers/net/ipa/ipa_mem.h8
-rw-r--r--drivers/net/ipa/ipa_power.c36
-rw-r--r--drivers/net/ipa/ipa_power.h12
-rw-r--r--drivers/net/ipa/ipa_reg.c102
-rw-r--r--drivers/net/ipa/ipa_reg.h190
-rw-r--r--drivers/net/ipa/ipa_resource.c16
-rw-r--r--drivers/net/ipa/ipa_sysfs.c2
-rw-r--r--drivers/net/ipa/ipa_table.c68
-rw-r--r--drivers/net/ipa/ipa_uc.c27
-rw-r--r--drivers/net/ipa/ipa_uc.h8
-rw-r--r--drivers/net/ipa/ipa_version.h6
-rw-r--r--drivers/net/ipa/reg.h134
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c291
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c303
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c308
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c311
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c312
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c317
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c283
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c269
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c255
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c287
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c564
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c8
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/ipvlan/ipvtap.c1
-rw-r--r--drivers/net/macsec.c147
-rw-r--r--drivers/net/macvlan.c98
-rw-r--r--drivers/net/macvtap.c1
-rw-r--r--drivers/net/mdio/Kconfig14
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/acpi_mdio.c10
-rw-r--r--drivers/net/mdio/fwnode_mdio.c8
-rw-r--r--drivers/net/mdio/mdio-aspeed.c48
-rw-r--r--drivers/net/mdio/mdio-bitbang.c77
-rw-r--r--drivers/net/mdio/mdio-cavium.c111
-rw-r--r--drivers/net/mdio/mdio-cavium.h9
-rw-r--r--drivers/net/mdio/mdio-i2c.c38
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c154
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c8
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c15
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c54
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c61
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c164
-rw-r--r--drivers/net/mdio/mdio-mvusb.c17
-rw-r--r--drivers/net/mdio/mdio-octeon.c6
-rw-r--r--drivers/net/mdio/mdio-thunder.c7
-rw-r--r--drivers/net/mdio/of_mdio.c16
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/bpf.c4
-rw-r--r--drivers/net/netdevsim/bus.c4
-rw-r--r--drivers/net/netdevsim/dev.c50
-rw-r--r--drivers/net/netdevsim/health.c20
-rw-r--r--drivers/net/netdevsim/ipsec.c14
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/pcs/Kconfig7
-rw-r--r--drivers/net/pcs/Makefile1
-rw-r--r--drivers/net/pcs/pcs-lynx.c24
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c305
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs.c29
-rw-r--r--drivers/net/phy/Kconfig27
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/aquantia_hwmon.c2
-rw-r--r--drivers/net/phy/at803x.c3
-rw-r--r--drivers/net/phy/bcm-phy-lib.h5
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c24
-rw-r--r--drivers/net/phy/dp83822.c6
-rw-r--r--drivers/net/phy/dp83867.c62
-rw-r--r--drivers/net/phy/dp83869.c6
-rw-r--r--drivers/net/phy/marvell-88x2222.c4
-rw-r--r--drivers/net/phy/marvell.c85
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio-open-alliance.h46
-rw-r--r--drivers/net/phy/mdio_bus.c471
-rw-r--r--drivers/net/phy/mdio_devres.c11
-rw-r--r--drivers/net/phy/meson-gxl.c85
-rw-r--r--drivers/net/phy/micrel.c1430
-rw-r--r--drivers/net/phy/microchip.c32
-rw-r--r--drivers/net/phy/microchip_t1.c70
-rw-r--r--drivers/net/phy/microchip_t1s.c138
-rw-r--r--drivers/net/phy/motorcomm.c559
-rw-r--r--drivers/net/phy/mscc/mscc_main.c24
-rw-r--r--drivers/net/phy/mxl-gpy.c42
-rw-r--r--drivers/net/phy/ncn26000.c171
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c16
-rw-r--r--drivers/net/phy/nxp-cbtx.c227
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c544
-rw-r--r--drivers/net/phy/phy-core.c5
-rw-r--r--drivers/net/phy/phy.c473
-rw-r--r--drivers/net/phy/phy_device.c199
-rw-r--r--drivers/net/phy/phylink.c84
-rw-r--r--drivers/net/phy/sfp-bus.c14
-rw-r--r--drivers/net/phy/sfp.c135
-rw-r--r--drivers/net/phy/smsc.c187
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c3
-rw-r--r--drivers/net/tap.c21
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/thunderbolt/Kconfig12
-rw-r--r--drivers/net/thunderbolt/Makefile6
-rw-r--r--drivers/net/thunderbolt/main.c (renamed from drivers/net/thunderbolt.c)73
-rw-r--r--drivers/net/thunderbolt/trace.c10
-rw-r--r--drivers/net/thunderbolt/trace.h141
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/asix_devices.c32
-rw-r--r--drivers/net/usb/cdc_ether.c120
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/lan78xx.c45
-rw-r--r--drivers/net/usb/plusb.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c265
-rw-r--r--drivers/net/usb/rndis_host.c3
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/usbnet.c29
-rw-r--r--drivers/net/veth.c213
-rw-r--r--drivers/net/virtio_net.c597
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c64
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/vxlan/Makefile2
-rw-r--r--drivers/net/vxlan/vxlan_core.c128
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c1462
-rw-r--r--drivers/net/vxlan/vxlan_private.h84
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c17
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/net/wireless/Kconfig75
-rw-r--r--drivers/net/wireless/Makefile11
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c65
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h73
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c164
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c33
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h20
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c400
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c393
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c59
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c658
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h372
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig34
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile27
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c964
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h184
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c939
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h823
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c357
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c102
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1577
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1816
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2597
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h106
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4242
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h145
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1215
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2222
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1142
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2961
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c850
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h704
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h194
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c789
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1041
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h312
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c7097
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h46
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1401
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h141
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c342
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3089
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h569
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c732
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h95
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1441
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h152
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c6606
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h4803
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h148
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c32
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h4
-rw-r--r--drivers/net/wireless/ath/key.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c372
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c125
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c49
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h157
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c96
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h10
-rw-r--r--drivers/net/wireless/cisco/Kconfig2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c31
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c16
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c174
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h418
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c278
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c494
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c309
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c1101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c1167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h565
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c326
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c772
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c742
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c436
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c10
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/legacy/Kconfig55
-rw-r--r--drivers/net/wireless/legacy/Makefile6
-rw-r--r--drivers/net/wireless/legacy/ray_cs.c (renamed from drivers/net/wireless/ray_cs.c)0
-rw-r--r--drivers/net/wireless/legacy/ray_cs.h (renamed from drivers/net/wireless/ray_cs.h)0
-rw-r--r--drivers/net/wireless/legacy/rayctl.h (renamed from drivers/net/wireless/rayctl.h)0
-rw-r--r--drivers/net/wireless/legacy/rndis_wlan.c (renamed from drivers/net/wireless/rndis_wlan.c)27
-rw-r--r--drivers/net/wireless/legacy/wl3501.h (renamed from drivers/net/wireless/wl3501.h)0
-rw-r--r--drivers/net/wireless/legacy/wl3501_cs.c (renamed from drivers/net/wireless/wl3501_cs.c)2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c76
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h21
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c263
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c217
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c308
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c165
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c162
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c482
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c634
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c429
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h67
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h476
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c1899
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c39
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c105
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1887
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c37
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c29
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c828
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig36
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile12
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c76
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c163
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c58
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c35
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c1394
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.h178
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c2809
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c566
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h722
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c56
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c896
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h510
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c282
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c95
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c73
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c202
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h40
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c534
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c14824
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c62
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c148
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c148
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c37
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c10
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c2
-rw-r--r--drivers/net/wireless/ti/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c3
-rw-r--r--drivers/net/wireless/virtual/Kconfig20
-rw-r--r--drivers/net/wireless/virtual/Makefile3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c (renamed from drivers/net/wireless/mac80211_hwsim.c)936
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h (renamed from drivers/net/wireless/mac80211_hwsim.h)58
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c (renamed from drivers/net/wireless/virt_wifi.c)0
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c2
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c3
-rw-r--r--drivers/net/wwan/t7xx/Makefile2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c29
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c16
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c36
-rw-r--r--drivers/net/wwan/wwan_core.c63
-rw-r--r--drivers/net/wwan/wwan_hwsim.c4
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/netback.c40
-rw-r--r--drivers/net/xen-netback/xenbus.c5
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/main.c6
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h30
-rw-r--r--drivers/nfc/nfcmrvl/uart.c11
-rw-r--r--drivers/nfc/nfcsim.c5
-rw-r--r--drivers/nfc/pn533/usb.c45
-rw-r--r--drivers/nfc/st-nci/ndlc.c6
-rw-r--r--drivers/nfc/st-nci/se.c6
-rw-r--r--drivers/nfc/st21nfca/se.c6
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c6
-rw-r--r--drivers/nubus/bus.c6
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/btt.c16
-rw-r--r--drivers/nvdimm/bus.c25
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/nd-core.h11
-rw-r--r--drivers/nvdimm/nd.h6
-rw-r--r--drivers/nvdimm/pfn_devs.c42
-rw-r--r--drivers/nvdimm/pmem.c24
-rw-r--r--drivers/nvdimm/region_devs.c4
-rw-r--r--drivers/nvdimm/virtio_pmem.c11
-rw-r--r--drivers/nvme/host/apple.c34
-rw-r--r--drivers/nvme/host/auth.c46
-rw-r--r--drivers/nvme/host/constants.c16
-rw-r--r--drivers/nvme/host/core.c211
-rw-r--r--drivers/nvme/host/fabrics.c21
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c19
-rw-r--r--drivers/nvme/host/ioctl.c163
-rw-r--r--drivers/nvme/host/multipath.c10
-rw-r--r--drivers/nvme/host/nvme.h18
-rw-r--r--drivers/nvme/host/pci.c203
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c98
-rw-r--r--drivers/nvme/host/trace.h15
-rw-r--r--drivers/nvme/target/admin-cmd.c120
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/fcloop.c50
-rw-r--r--drivers/nvme/target/io-cmd-file.c10
-rw-r--r--drivers/nvme/target/nvmet.h12
-rw-r--r--drivers/nvme/target/passthru.c16
-rw-r--r--drivers/nvme/target/tcp.c44
-rw-r--r--drivers/nvme/target/zns.c23
-rw-r--r--drivers/nvmem/Kconfig17
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/bcm-ocotp.c4
-rw-r--r--drivers/nvmem/brcm_nvram.c3
-rw-r--r--drivers/nvmem/core.c367
-rw-r--r--drivers/nvmem/imx-ocotp.c34
-rw-r--r--drivers/nvmem/layouts/Kconfig23
-rw-r--r--drivers/nvmem/layouts/Makefile7
-rw-r--r--drivers/nvmem/layouts/onie-tlv.c244
-rw-r--r--drivers/nvmem/layouts/sl28vpd.c153
-rw-r--r--drivers/nvmem/mtk-efuse.c53
-rw-r--r--drivers/nvmem/nintendo-otp.c4
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c14
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c2
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.c298
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.h80
-rw-r--r--drivers/nvmem/stm32-romem.c86
-rw-r--r--drivers/nvmem/sunxi_sid.c23
-rw-r--r--drivers/nvmem/u-boot-env.c26
-rw-r--r--drivers/nvmem/vf610-ocotp.c3
-rw-r--r--drivers/of/Kconfig18
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c411
-rw-r--r--drivers/of/base.c208
-rw-r--r--drivers/of/cpu.c210
-rw-r--r--drivers/of/device.c81
-rw-r--r--drivers/of/dynamic.c32
-rw-r--r--drivers/of/fdt.c82
-rw-r--r--drivers/of/irq.c12
-rw-r--r--drivers/of/kobj.c2
-rw-r--r--drivers/of/module.c74
-rw-r--r--drivers/of/of_private.h1
-rw-r--r--drivers/of/of_reserved_mem.c13
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/platform.c24
-rw-r--r--drivers/of/property.c94
-rw-r--r--drivers/of/unittest-data/testcases_common.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi9
-rw-r--r--drivers/of/unittest-data/tests-lifecycle.dtsi8
-rw-r--r--drivers/of/unittest.c321
-rw-r--r--drivers/opp/Kconfig1
-rw-r--r--drivers/opp/core.c78
-rw-r--r--drivers/opp/debugfs.c2
-rw-r--r--drivers/opp/of.c9
-rw-r--r--drivers/opp/opp.h4
-rw-r--r--drivers/parisc/Kconfig1
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/parisc/power.c16
-rw-r--r--drivers/parport/Kconfig11
-rw-r--r--drivers/parport/Makefile1
-rw-r--r--drivers/parport/parport_ax88796.c418
-rw-r--r--drivers/parport/parport_pc.c145
-rw-r--r--drivers/pci/bus.c28
-rw-r--r--drivers/pci/controller/Kconfig422
-rw-r--r--drivers/pci/controller/cadence/Kconfig10
-rw-r--r--drivers/pci/controller/dwc/Kconfig432
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c207
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c205
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h21
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1261
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig19
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c288
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c10
-rw-r--r--drivers/pci/controller/pci-loongson.c71
-rw-r--r--drivers/pci/controller/pci-tegra.c10
-rw-r--r--drivers/pci/controller/pci-versatile.c1
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c1
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-mt7621.c6
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c4
-rw-r--r--drivers/pci/controller/pcie-rcar.h2
-rw-r--r--drivers/pci/controller/vmd.c97
-rw-r--r--drivers/pci/doe.c342
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c38
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c35
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c1
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c15
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c8
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi/api.c4
-rw-r--r--drivers/pci/msi/msi.c9
-rw-r--r--drivers/pci/of.c32
-rw-r--r--drivers/pci/p2pdma.c11
-rw-r--r--drivers/pci/pci-acpi.c45
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h71
-rw-r--r--drivers/pci/pcie/aer.c51
-rw-r--r--drivers/pci/pcie/aspm.c165
-rw-r--r--drivers/pci/pcie/dpc.c3
-rw-r--r--drivers/pci/pcie/edr.c12
-rw-r--r--drivers/pci/pcie/portdrv.c16
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c44
-rw-r--r--drivers/pci/remove.c11
-rw-r--r--drivers/pci/setup-bus.c265
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/switch/switchtec.c15
-rw-r--r--drivers/pci/vgaarb.c17
-rw-r--r--drivers/pci/xen-pcifront.c8
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile5
-rw-r--r--drivers/pcmcia/cs.c2
-rw-r--r--drivers/pcmcia/ds.c10
-rw-r--r--drivers/pcmcia/pxa2xx_base.c8
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c122
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/sa1100_generic.c5
-rw-r--r--drivers/pcmcia/sa1100_h3600.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c115
-rw-r--r--drivers/pcmcia/sa1111_badge4.c158
-rw-r--r--drivers/pcmcia/sa1111_generic.c8
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c155
-rw-r--r--drivers/peci/sysfs.c2
-rw-r--r--drivers/perf/Kconfig10
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c3
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c8
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c34
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c15
-rw-r--r--drivers/perf/arm-cmn.c69
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c6
-rw-r--r--drivers/perf/arm_dmc620_pmu.c3
-rw-r--r--drivers/perf/arm_pmu.c19
-rw-r--r--drivers/perf/arm_pmuv3.c1419
-rw-r--r--drivers/perf/arm_spe_pmu.c160
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c19
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c13
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c10
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c22
-rw-r--r--drivers/perf/qcom_l3_pmu.c3
-rw-r--r--drivers/perf/riscv_pmu_sbi.c81
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c67
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c4
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c6
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c32
-rw-r--r--drivers/phy/cadence/cdns-dphy.c6
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c250
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c6
-rw-r--r--drivers/phy/intel/Kconfig10
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c6
-rw-r--r--drivers/phy/intel/phy-intel-thunderbay-emmc.c509
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-hsic.c2
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-usb2.c2
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c491
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h113
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c15
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c5
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c6
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c6
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c9
-rw-r--r--drivers/phy/phy-can-transceiver.c9
-rw-r--r--drivers/phy/phy-core.c53
-rw-r--r--drivers/phy/phy-lgm-usb.c6
-rw-r--r--drivers/phy/qualcomm/Kconfig68
-rw-r--r--drivers/phy/qualcomm/Makefile14
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c257
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-pcie2.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c735
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c851
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v2.h25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h31
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h31
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h18
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h82
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v6.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h30
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h24
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h77
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h45
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c786
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c83
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c441
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c13
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c6
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c6
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c6
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c73
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c6
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c6
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c184
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c15
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c13
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c7
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c7
-rw-r--r--drivers/phy/st/phy-miphy28lp.c42
-rw-r--r--drivers/phy/st/phy-spear1310-miphy.c2
-rw-r--r--drivers/phy/st/phy-spear1340-miphy.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c9
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c3
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c84
-rw-r--r--drivers/phy/tegra/xusb.c31
-rw-r--r--drivers/phy/tegra/xusb.h24
-rw-r--r--drivers/phy/ti/Kconfig4
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c6
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c6
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c6
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c85
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c14
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c6
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c6
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c5
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile4
-rw-r--r--drivers/pinctrl/actions/pinctrl-s500.c1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c13
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c29
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c38
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c23
-rw-r--r--drivers/pinctrl/core.c15
-rw-r--r--drivers/pinctrl/freescale/Kconfig2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c80
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h24
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h6
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c31
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c31
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c24
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c21
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c90
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h55
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c23
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c30
-rw-r--r--drivers/pinctrl/mediatek/Kconfig101
-rw-r--r--drivers/pinctrl/mediatek/Makefile62
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c3
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7620.c137
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7621.c (renamed from drivers/pinctrl/ralink/pinctrl-mt7621.c)55
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt76x8.c283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c1048
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtmips.c (renamed from drivers/pinctrl/ralink/pinctrl-ralink.c)90
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtmips.h (renamed from drivers/pinctrl/ralink/pinctrl-ralink.h)16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-rt2880.c61
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-rt305x.c140
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-rt3883.c (renamed from drivers/pinctrl/ralink/pinctrl-rt3883.c)51
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c34
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c34
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.h4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c32
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h5
-rw-r--r--drivers/pinctrl/nuvoton/Kconfig1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c35
-rw-r--r--drivers/pinctrl/nxp/Kconfig15
-rw-r--r--drivers/pinctrl/nxp/Makefile4
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32.h57
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c973
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32g2.c770
-rw-r--r--drivers/pinctrl/pinctrl-amd.c93
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c45
-rw-r--r--drivers/pinctrl/pinctrl-at91.c231
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c6
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c10
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c22
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c81
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.h1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c5
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c320
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c36
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c35
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c32
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/pinctrl/pinctrl-st.c16
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c38
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c72
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c1301
-rw-r--r--drivers/pinctrl/pinctrl-xway.c252
-rw-r--r--drivers/pinctrl/pinmux.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig71
-rw-r--r--drivers/pinctrl/qcom/Makefile7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c861
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c826
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c47
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c50
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c1274
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c1537
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c1280
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c248
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c1790
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c40
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c26
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c37
-rw-r--r--drivers/pinctrl/ralink/Kconfig35
-rw-r--r--drivers/pinctrl/ralink/Makefile8
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7620.c391
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c60
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt305x.c137
-rw-r--r--drivers/pinctrl/renesas/Kconfig5
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c51
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7740.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77470.c46
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7779.c446
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7790.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7792.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7794.c50
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c5703
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c12
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c12
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c12
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c38
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c49
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c41
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c46
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c16
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c10
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c1207
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7203.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7264.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7269.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7720.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7722.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7757.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7785.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7786.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-shx3.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c26
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c53
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h14
-rw-r--r--drivers/pinctrl/samsung/Kconfig5
-rw-r--r--drivers/pinctrl/samsung/Makefile1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c653
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c12
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/pinctrl/starfive/Kconfig33
-rw-r--r--drivers/pinctrl/starfive/Makefile4
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c177
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c449
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c982
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h70
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c5
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c20
-rw-r--r--drivers/platform/chrome/Kconfig12
-rw-r--r--drivers/platform/chrome/Makefile4
-rw-r--r--drivers/platform/chrome/cros_ec.c17
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c67
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c14
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c12
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c13
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c40
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c123
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h85
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c362
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c17
-rw-r--r--drivers/platform/chrome/cros_typec_vdm.c148
-rw-r--r--drivers/platform/chrome/cros_typec_vdm.h13
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c3
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c1
-rw-r--r--drivers/platform/mellanox/Kconfig9
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c87
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h6
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c28
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c1
-rw-r--r--drivers/platform/surface/aggregator/bus.c14
-rw-r--r--drivers/platform/surface/aggregator/controller.c48
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h4
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c15
-rw-r--r--drivers/platform/surface/aggregator/trace.h73
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c6
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c8
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c4
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c192
-rw-r--r--drivers/platform/surface/surface_dtx.c20
-rw-r--r--drivers/platform/surface/surface_hotplug.c13
-rw-r--r--drivers/platform/surface/surface_platform_profile.c2
-rw-r--r--drivers/platform/x86/Kconfig46
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/acer-wmi.c5
-rw-r--r--drivers/platform/x86/acerhdf.c98
-rw-r--r--drivers/platform/x86/adv_swbutton.c6
-rw-r--r--drivers/platform/x86/amd/Kconfig3
-rw-r--r--drivers/platform/x86/amd/hsmp.c6
-rw-r--r--drivers/platform/x86/amd/pmc.c294
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c9
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c59
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c28
-rw-r--r--drivers/platform/x86/amilo-rfkill.c5
-rw-r--r--drivers/platform/x86/apple-gmux.c479
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c18
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c24
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c6
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c8
-rw-r--r--drivers/platform/x86/dell/Kconfig8
-rw-r--r--drivers/platform/x86/dell/dcdbas.c6
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c42
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c10
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c538
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c41
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c4
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c9
-rw-r--r--drivers/platform/x86/hp/hp_accel.c5
-rw-r--r--drivers/platform/x86/hp/tc1100-wmi.c6
-rw-r--r--drivers/platform/x86/huawei-wmi.c6
-rw-r--r--drivers/platform/x86/ibm_rtl.c18
-rw-r--r--drivers/platform/x86/ideapad-laptop.c170
-rw-r--r--drivers/platform/x86/ideapad-laptop.h152
-rw-r--r--drivers/platform/x86/intel/Kconfig23
-rw-r--r--drivers/platform/x86/intel/Makefile6
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c5
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c181
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c5
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c6
-rw-r--r--drivers/platform/x86/intel/hid.c10
-rw-r--r--drivers/platform/x86/intel/ifs/core.c81
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h68
-rw-r--r--drivers/platform/x86/intel/ifs/load.c9
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c94
-rw-r--r--drivers/platform/x86/intel/ifs/sysfs.c23
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c5
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c20
-rw-r--r--drivers/platform/x86/intel/int3472/Kconfig1
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile2
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c37
-rw-r--r--drivers/platform/x86/intel/int3472/common.h18
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c110
-rw-r--r--drivers/platform/x86/intel/int3472/led.c75
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c5
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c5
-rw-r--r--drivers/platform/x86/intel/oaktrail.c6
-rw-r--r--drivers/platform/x86/intel/pmc/core.c36
-rw-r--r--drivers/platform/x86/intel/pmc/core.h4
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c31
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c6
-rw-r--r--drivers/platform/x86/intel/pmt/class.c7
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c1
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c3
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c6
-rw-r--r--drivers/platform/x86/intel/sdsi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Kconfig4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Makefile2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c52
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h9
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c72
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c1440
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h18
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c5
-rw-r--r--drivers/platform/x86/intel/tpmi.c406
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c18
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c7
-rw-r--r--drivers/platform/x86/intel/vbtn.c10
-rw-r--r--drivers/platform/x86/intel/vsec.c107
-rw-r--r--drivers/platform/x86/intel/vsec.h15
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c1
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c1
-rw-r--r--drivers/platform/x86/lenovo-ymc.c187
-rw-r--r--drivers/platform/x86/mlx-platform.c1248
-rw-r--r--drivers/platform/x86/msi-ec.c897
-rw-r--r--drivers/platform/x86/msi-ec.h122
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c6
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c1
-rw-r--r--drivers/platform/x86/peaq-wmi.c128
-rw-r--r--drivers/platform/x86/samsung-q10.c6
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c9
-rw-r--r--drivers/platform/x86/simatic-ipc.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c23
-rw-r--r--drivers/platform/x86/think-lmi.c131
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c82
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c75
-rw-r--r--drivers/platform/x86/uv_sysfs.c6
-rw-r--r--drivers/platform/x86/wmi.c21
-rw-r--r--drivers/platform/x86/x86-android-tablets.c1803
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig21
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile9
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c325
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c391
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c165
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c679
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c522
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c100
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h32
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h108
-rw-r--r--drivers/platform/x86/xo1-rfkill.c5
-rw-r--r--drivers/pnp/quirks.c29
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/as3722-poweroff.c1
-rw-r--r--drivers/power/reset/gpio-poweroff.c1
-rw-r--r--drivers/power/reset/gpio-restart.c1
-rw-r--r--drivers/power/reset/keystone-reset.c1
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/reset/mt6323-poweroff.c1
-rw-r--r--drivers/power/reset/odroid-go-ultra-poweroff.c177
-rw-r--r--drivers/power/reset/qcom-pon.c2
-rw-r--r--drivers/power/reset/regulator-poweroff.c1
-rw-r--r--drivers/power/reset/restart-poweroff.c1
-rw-r--r--drivers/power/reset/syscon-reboot.c6
-rw-r--r--drivers/power/reset/tps65086-restart.c1
-rw-r--r--drivers/power/supply/Kconfig72
-rw-r--r--drivers/power/supply/Makefile7
-rw-r--r--drivers/power/supply/ab8500_fg.c22
-rw-r--r--drivers/power/supply/axp288_charger.c15
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c2
-rw-r--r--drivers/power/supply/bq2415x_charger.c42
-rw-r--r--drivers/power/supply/bq24190_charger.c3
-rw-r--r--drivers/power/supply/bq24257_charger.c10
-rw-r--r--drivers/power/supply/bq256xx_charger.c44
-rw-r--r--drivers/power/supply/bq25890_charger.c183
-rw-r--r--drivers/power/supply/bq27xxx_battery.c8
-rw-r--r--drivers/power/supply/charger-manager.c8
-rw-r--r--drivers/power/supply/collie_battery.c4
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c2
-rw-r--r--drivers/power/supply/da9150-charger.c10
-rw-r--r--drivers/power/supply/ds2760_battery.c8
-rw-r--r--drivers/power/supply/ds2780_battery.c8
-rw-r--r--drivers/power/supply/ds2781_battery.c8
-rw-r--r--drivers/power/supply/generic-adc-battery.c245
-rw-r--r--drivers/power/supply/lp8727_charger.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c7
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c14
-rw-r--r--drivers/power/supply/max14577_charger.c2
-rw-r--r--drivers/power/supply/max1721x_battery.c8
-rw-r--r--drivers/power/supply/max77650-charger.c8
-rw-r--r--drivers/power/supply/max77693_charger.c6
-rw-r--r--drivers/power/supply/mp2629_charger.c2
-rw-r--r--drivers/power/supply/olpc_battery.c2
-rw-r--r--drivers/power/supply/pcf50633-charger.c6
-rw-r--r--drivers/power/supply/pda_power.c520
-rw-r--r--drivers/power/supply/power_supply_core.c283
-rw-r--r--drivers/power/supply/power_supply_leds.c1
-rw-r--r--drivers/power/supply/power_supply_sysfs.c33
-rw-r--r--drivers/power/supply/qcom_battmgr.c1410
-rw-r--r--drivers/power/supply/rk817_charger.c46
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/power/supply/rt9467-charger.c1282
-rw-r--r--drivers/power/supply/rt9471.c930
-rw-r--r--drivers/power/supply/s3c_adc_battery.c453
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/surface_charger.c2
-rw-r--r--drivers/power/supply/test_power.c3
-rw-r--r--drivers/power/supply/tosa_battery.c512
-rw-r--r--drivers/power/supply/twl4030_charger.c8
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c1
-rw-r--r--drivers/power/supply/z2_battery.c318
-rw-r--r--drivers/powercap/idle_inject.c65
-rw-r--r--drivers/powercap/intel_rapl_common.c13
-rw-r--r--drivers/powercap/intel_rapl_msr.c2
-rw-r--r--drivers/powercap/powercap_sys.c15
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/ps3/ps3av.c9
-rw-r--r--drivers/ptp/Kconfig14
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ptp/ptp_dfl_tod.c332
-rw-r--r--drivers/ptp/ptp_ines.c2
-rw-r--r--drivers/ptp/ptp_kvm_arm.c4
-rw-r--r--drivers/ptp/ptp_kvm_common.c1
-rw-r--r--drivers/ptp/ptp_kvm_x86.c59
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_qoriq.c52
-rw-r--r--drivers/ptp/ptp_vclock.c44
-rw-r--r--drivers/pwm/Kconfig12
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c83
-rw-r--r--drivers/pwm/pwm-ab8500.c112
-rw-r--r--drivers/pwm/pwm-apple.c159
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c6
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c6
-rw-r--r--drivers/pwm/pwm-atmel.c6
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c6
-rw-r--r--drivers/pwm/pwm-bcm2835.c6
-rw-r--r--drivers/pwm/pwm-berlin.c6
-rw-r--r--drivers/pwm/pwm-brcmstb.c6
-rw-r--r--drivers/pwm/pwm-clk.c6
-rw-r--r--drivers/pwm/pwm-cros-ec.c7
-rw-r--r--drivers/pwm/pwm-dwc.c38
-rw-r--r--drivers/pwm/pwm-hibvt.c7
-rw-r--r--drivers/pwm/pwm-img.c6
-rw-r--r--drivers/pwm/pwm-imx-tpm.c6
-rw-r--r--drivers/pwm/pwm-iqs620a.c5
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c6
-rw-r--r--drivers/pwm/pwm-lpss-platform.c5
-rw-r--r--drivers/pwm/pwm-meson.c14
-rw-r--r--drivers/pwm/pwm-mtk-disp.c40
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c6
-rw-r--r--drivers/pwm/pwm-rcar.c8
-rw-r--r--drivers/pwm/pwm-rockchip.c6
-rw-r--r--drivers/pwm/pwm-samsung.c6
-rw-r--r--drivers/pwm/pwm-sifive.c14
-rw-r--r--drivers/pwm/pwm-spear.c6
-rw-r--r--drivers/pwm/pwm-sprd.c7
-rw-r--r--drivers/pwm/pwm-sti.c6
-rw-r--r--drivers/pwm/pwm-stm32-lp.c4
-rw-r--r--drivers/pwm/pwm-stm32.c10
-rw-r--r--drivers/pwm/pwm-sun4i.c6
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/pwm-vt8500.c6
-rw-r--r--drivers/pwm/pwm-xilinx.c5
-rw-r--r--drivers/pwm/sysfs.c1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c9
-rw-r--r--drivers/rapidio/devices/tsi721.c3
-rw-r--r--drivers/rapidio/rio-driver.c5
-rw-r--r--drivers/rapidio/rio-sysfs.c2
-rw-r--r--drivers/rapidio/rio_cm.c10
-rw-r--r--drivers/regulator/88pg86x.c1
-rw-r--r--drivers/regulator/88pm800-regulator.c1
-rw-r--r--drivers/regulator/88pm8607.c1
-rw-r--r--drivers/regulator/Kconfig31
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/aat2870-regulator.c1
-rw-r--r--drivers/regulator/ab8500-ext.c1
-rw-r--r--drivers/regulator/ab8500.c1
-rw-r--r--drivers/regulator/act8865-regulator.c1
-rw-r--r--drivers/regulator/act8945a-regulator.c7
-rw-r--r--drivers/regulator/ad5398.c1
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/arizona-micsupp.c2
-rw-r--r--drivers/regulator/as3711-regulator.c1
-rw-r--r--drivers/regulator/as3722-regulator.c1
-rw-r--r--drivers/regulator/atc260x-regulator.c1
-rw-r--r--drivers/regulator/axp20x-regulator.c1
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1
-rw-r--r--drivers/regulator/bd71815-regulator.c9
-rw-r--r--drivers/regulator/bd71828-regulator.c3
-rw-r--r--drivers/regulator/bd718x7-regulator.c1
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c1
-rw-r--r--drivers/regulator/bd9576-regulator.c1
-rw-r--r--drivers/regulator/core.c97
-rw-r--r--drivers/regulator/cpcap-regulator.c1
-rw-r--r--drivers/regulator/cros-ec-regulator.c1
-rw-r--r--drivers/regulator/da903x-regulator.c1
-rw-r--r--drivers/regulator/da9052-regulator.c1
-rw-r--r--drivers/regulator/da9055-regulator.c1
-rw-r--r--drivers/regulator/da9062-regulator.c1
-rw-r--r--drivers/regulator/da9063-regulator.c148
-rw-r--r--drivers/regulator/da9121-regulator.c1
-rw-r--r--drivers/regulator/da9210-regulator.c1
-rw-r--r--drivers/regulator/da9211-regulator.c12
-rw-r--r--drivers/regulator/db8500-prcmu.c1
-rw-r--r--drivers/regulator/dummy.c1
-rw-r--r--drivers/regulator/fan53555.c204
-rw-r--r--drivers/regulator/fan53880.c1
-rw-r--r--drivers/regulator/fixed-helper.c2
-rw-r--r--drivers/regulator/fixed.c5
-rw-r--r--drivers/regulator/gpio-regulator.c3
-rw-r--r--drivers/regulator/hi6421-regulator.c1
-rw-r--r--drivers/regulator/hi6421v530-regulator.c1
-rw-r--r--drivers/regulator/hi6421v600-regulator.c1
-rw-r--r--drivers/regulator/hi655x-regulator.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c1
-rw-r--r--drivers/regulator/isl9305.c1
-rw-r--r--drivers/regulator/lm363x-regulator.c1
-rw-r--r--drivers/regulator/lochnagar-regulator.c1
-rw-r--r--drivers/regulator/lp3971.c1
-rw-r--r--drivers/regulator/lp3972.c1
-rw-r--r--drivers/regulator/lp872x.c6
-rw-r--r--drivers/regulator/lp873x-regulator.c1
-rw-r--r--drivers/regulator/lp8755.c1
-rw-r--r--drivers/regulator/lp87565-regulator.c1
-rw-r--r--drivers/regulator/lp8788-buck.c1
-rw-r--r--drivers/regulator/lp8788-ldo.c2
-rw-r--r--drivers/regulator/ltc3589.c1
-rw-r--r--drivers/regulator/ltc3676.c1
-rw-r--r--drivers/regulator/max14577-regulator.c1
-rw-r--r--drivers/regulator/max1586.c1
-rw-r--r--drivers/regulator/max20086-regulator.c3
-rw-r--r--drivers/regulator/max20411-regulator.c164
-rw-r--r--drivers/regulator/max597x-regulator.c55
-rw-r--r--drivers/regulator/max77620-regulator.c1
-rw-r--r--drivers/regulator/max77650-regulator.c1
-rw-r--r--drivers/regulator/max77686-regulator.c1
-rw-r--r--drivers/regulator/max77693-regulator.c1
-rw-r--r--drivers/regulator/max77802-regulator.c35
-rw-r--r--drivers/regulator/max77826-regulator.c1
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/max8893.c1
-rw-r--r--drivers/regulator/max8907-regulator.c1
-rw-r--r--drivers/regulator/max8925-regulator.c1
-rw-r--r--drivers/regulator/max8952.c1
-rw-r--r--drivers/regulator/max8973-regulator.c3
-rw-r--r--drivers/regulator/max8997-regulator.c12
-rw-r--r--drivers/regulator/max8998.c4
-rw-r--r--drivers/regulator/mc13783-regulator.c1
-rw-r--r--drivers/regulator/mc13892-regulator.c1
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mp5416.c1
-rw-r--r--drivers/regulator/mp8859.c3
-rw-r--r--drivers/regulator/mp886x.c1
-rw-r--r--drivers/regulator/mpq7920.c1
-rw-r--r--drivers/regulator/mt6311-regulator.c1
-rw-r--r--drivers/regulator/mt6315-regulator.c1
-rw-r--r--drivers/regulator/mt6323-regulator.c1
-rw-r--r--drivers/regulator/mt6331-regulator.c1
-rw-r--r--drivers/regulator/mt6332-regulator.c1
-rw-r--r--drivers/regulator/mt6357-regulator.c1
-rw-r--r--drivers/regulator/mt6358-regulator.c1
-rw-r--r--drivers/regulator/mt6359-regulator.c1
-rw-r--r--drivers/regulator/mt6360-regulator.c1
-rw-r--r--drivers/regulator/mt6370-regulator.c1
-rw-r--r--drivers/regulator/mt6380-regulator.c1
-rw-r--r--drivers/regulator/mt6397-regulator.c3
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c1
-rw-r--r--drivers/regulator/palmas-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c1
-rw-r--r--drivers/regulator/pca9450-regulator.c1
-rw-r--r--drivers/regulator/pcap-regulator.c1
-rw-r--r--drivers/regulator/pcf50633-regulator.c1
-rw-r--r--drivers/regulator/pf8x00-regulator.c1
-rw-r--r--drivers/regulator/pfuze100-regulator.c1
-rw-r--r--drivers/regulator/pv88060-regulator.c1
-rw-r--r--drivers/regulator/pv88080-regulator.c1
-rw-r--r--drivers/regulator/pv88090-regulator.c1
-rw-r--r--drivers/regulator/pwm-regulator.c3
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c1
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c58
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c1
-rw-r--r--drivers/regulator/qcom_smd-regulator.c6
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c1
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c1
-rw-r--r--drivers/regulator/rc5t583-regulator.c1
-rw-r--r--drivers/regulator/rk808-regulator.c3
-rw-r--r--drivers/regulator/rn5t618-regulator.c1
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c1
-rw-r--r--drivers/regulator/rt4801-regulator.c1
-rw-r--r--drivers/regulator/rt4803.c216
-rw-r--r--drivers/regulator/rt4831-regulator.c1
-rw-r--r--drivers/regulator/rt5033-regulator.c1
-rw-r--r--drivers/regulator/rt5120-regulator.c1
-rw-r--r--drivers/regulator/rt5190a-regulator.c1
-rw-r--r--drivers/regulator/rt5739.c291
-rw-r--r--drivers/regulator/rt5759-regulator.c1
-rw-r--r--drivers/regulator/rt6160-regulator.c1
-rw-r--r--drivers/regulator/rt6190-regulator.c1
-rw-r--r--drivers/regulator/rt6245-regulator.c1
-rw-r--r--drivers/regulator/rtmv20-regulator.c1
-rw-r--r--drivers/regulator/rtq2134-regulator.c1
-rw-r--r--drivers/regulator/rtq6752-regulator.c1
-rw-r--r--drivers/regulator/s2mpa01.c1
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/regulator/s5m8767.c24
-rw-r--r--drivers/regulator/sc2731-regulator.c1
-rw-r--r--drivers/regulator/scmi-regulator.c16
-rw-r--r--drivers/regulator/sky81452-regulator.c1
-rw-r--r--drivers/regulator/slg51000-regulator.c1
-rw-r--r--drivers/regulator/sm5703-regulator.c3
-rw-r--r--drivers/regulator/stm32-booster.c1
-rw-r--r--drivers/regulator/stm32-pwr.c9
-rw-r--r--drivers/regulator/stm32-vrefbuf.c1
-rw-r--r--drivers/regulator/stpmic1_regulator.c3
-rw-r--r--drivers/regulator/stw481x-vmmc.c1
-rw-r--r--drivers/regulator/sy7636a-regulator.c1
-rw-r--r--drivers/regulator/sy8106a-regulator.c1
-rw-r--r--drivers/regulator/sy8824x.c1
-rw-r--r--drivers/regulator/sy8827n.c1
-rw-r--r--drivers/regulator/ti-abb-regulator.c1
-rw-r--r--drivers/regulator/tps51632-regulator.c1
-rw-r--r--drivers/regulator/tps6105x-regulator.c1
-rw-r--r--drivers/regulator/tps62360-regulator.c16
-rw-r--r--drivers/regulator/tps6286x-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c1
-rw-r--r--drivers/regulator/tps6507x-regulator.c1
-rw-r--r--drivers/regulator/tps65086-regulator.c1
-rw-r--r--drivers/regulator/tps65090-regulator.c1
-rw-r--r--drivers/regulator/tps65132-regulator.c1
-rw-r--r--drivers/regulator/tps65217-regulator.c1
-rw-r--r--drivers/regulator/tps65218-regulator.c1
-rw-r--r--drivers/regulator/tps65219-regulator.c25
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6586x-regulator.c1
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/regulator/tps65912-regulator.c1
-rw-r--r--drivers/regulator/tps68470-regulator.c1
-rw-r--r--drivers/regulator/twl-regulator.c1
-rw-r--r--drivers/regulator/twl6030-regulator.c3
-rw-r--r--drivers/regulator/uniphier-regulator.c1
-rw-r--r--drivers/regulator/userspace-consumer.c1
-rw-r--r--drivers/regulator/vctrl-regulator.c1
-rw-r--r--drivers/regulator/vexpress-regulator.c1
-rw-r--r--drivers/regulator/virtual.c1
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/regulator/wm831x-dcdc.c4
-rw-r--r--drivers/regulator/wm831x-isink.c1
-rw-r--r--drivers/regulator/wm831x-ldo.c3
-rw-r--r--drivers/regulator/wm8350-regulator.c1
-rw-r--r--drivers/regulator/wm8400-regulator.c1
-rw-r--r--drivers/regulator/wm8994-regulator.c1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c12
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c249
-rw-r--r--drivers/remoteproc/imx_rproc.c7
-rw-r--r--drivers/remoteproc/mtk_scp.c13
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c34
-rw-r--r--drivers/remoteproc/pru_rproc.c235
-rw-r--r--drivers/remoteproc/qcom_common.c19
-rw-r--r--drivers/remoteproc/qcom_common.h8
-rw-r--r--drivers/remoteproc/qcom_q6v5.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c135
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c279
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c352
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c24
-rw-r--r--drivers/remoteproc/qcom_wcnss.h2
-rw-r--r--drivers/remoteproc/rcar_rproc.c9
-rw-r--r--drivers/remoteproc/remoteproc_core.c6
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c4
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c4
-rw-r--r--drivers/remoteproc/st_remoteproc.c7
-rw-r--r--drivers/remoteproc/stm32_rproc.c14
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c12
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c127
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c324
-rw-r--r--drivers/reset/Kconfig10
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-lantiq.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c1
-rw-r--r--drivers/reset/reset-mpfs.c1
-rw-r--r--drivers/reset/reset-starfive-jh7100.c173
-rw-r--r--drivers/reset/reset-uniphier-glue.c4
-rw-r--r--drivers/reset/starfive/Kconfig20
-rw-r--r--drivers/reset/starfive/Makefile5
-rw-r--r--drivers/reset/starfive/reset-starfive-jh7100.c74
-rw-r--r--drivers/reset/starfive/reset-starfive-jh7110.c73
-rw-r--r--drivers/reset/starfive/reset-starfive-jh71x0.c131
-rw-r--r--drivers/reset/starfive/reset-starfive-jh71x0.h14
-rw-r--r--drivers/rpmsg/qcom_glink_native.c287
-rw-r--r--drivers/rpmsg/qcom_glink_native.h8
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c100
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c102
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c24
-rw-r--r--drivers/rpmsg/rpmsg_char.c8
-rw-r--r--drivers/rpmsg/rpmsg_core.c6
-rw-r--r--drivers/rpmsg/rpmsg_ctrl.c2
-rw-r--r--drivers/rtc/Kconfig33
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-88pm80x.c5
-rw-r--r--drivers/rtc/rtc-88pm860x.c6
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c7
-rw-r--r--drivers/rtc/rtc-ab8500.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c77
-rw-r--r--drivers/rtc/rtc-ac100.c6
-rw-r--r--drivers/rtc/rtc-armada38x.c7
-rw-r--r--drivers/rtc/rtc-asm9260.c5
-rw-r--r--drivers/rtc/rtc-at91sam9.c6
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c158
-rw-r--r--drivers/rtc/rtc-cadence.c6
-rw-r--r--drivers/rtc/rtc-cmos.c5
-rw-r--r--drivers/rtc/rtc-cros-ec.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1390.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c6
-rw-r--r--drivers/rtc/rtc-efi.c50
-rw-r--r--drivers/rtc/rtc-ftrtc010.c6
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c6
-rw-r--r--drivers/rtc/rtc-hym8563.c7
-rw-r--r--drivers/rtc/rtc-isl12022.c93
-rw-r--r--drivers/rtc/rtc-jz4740.c95
-rw-r--r--drivers/rtc/rtc-lpc24xx.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c7
-rw-r--r--drivers/rtc/rtc-max77686.c6
-rw-r--r--drivers/rtc/rtc-max8907.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c6
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c4
-rw-r--r--drivers/rtc/rtc-moxart.c89
-rw-r--r--drivers/rtc/rtc-mpc5121.c6
-rw-r--r--drivers/rtc/rtc-mpfs.c6
-rw-r--r--drivers/rtc/rtc-mt7622.c6
-rw-r--r--drivers/rtc/rtc-mxc_v2.c5
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c226
-rw-r--r--drivers/rtc/rtc-omap.c7
-rw-r--r--drivers/rtc/rtc-palmas.c5
-rw-r--r--drivers/rtc/rtc-pcf2123.c7
-rw-r--r--drivers/rtc/rtc-pcf50633.c6
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-pcf8523.c24
-rw-r--r--drivers/rtc/rtc-pcf85363.c44
-rw-r--r--drivers/rtc/rtc-pcf8563.c7
-rw-r--r--drivers/rtc/rtc-pic32.c6
-rw-r--r--drivers/rtc/rtc-pm8xxx.c538
-rw-r--r--drivers/rtc/rtc-rc5t583.c5
-rw-r--r--drivers/rtc/rtc-rtd119x.c6
-rw-r--r--drivers/rtc/rtc-rv3028.c7
-rw-r--r--drivers/rtc/rtc-rv3029c2.c7
-rw-r--r--drivers/rtc/rtc-rv3032.c14
-rw-r--r--drivers/rtc/rtc-rv8803.c52
-rw-r--r--drivers/rtc/rtc-rx6110.c1
-rw-r--r--drivers/rtc/rtc-rx8010.c8
-rw-r--r--drivers/rtc/rtc-rzn1.c6
-rw-r--r--drivers/rtc/rtc-s3c.c6
-rw-r--r--drivers/rtc/rtc-s5m.c82
-rw-r--r--drivers/rtc/rtc-sa1100.c6
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-stm32.c6
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c8
-rw-r--r--drivers/rtc/rtc-sun6i.c18
-rw-r--r--drivers/rtc/rtc-sunplus.c13
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/rtc/rtc-ti-k3.c3
-rw-r--r--drivers/rtc/rtc-tps6586x.c5
-rw-r--r--drivers/rtc/rtc-twl.c6
-rw-r--r--drivers/rtc/rtc-v3020.c369
-rw-r--r--drivers/rtc/rtc-vt8500.c6
-rw-r--r--drivers/rtc/rtc-wm8350.c6
-rw-r--r--drivers/rtc/rtc-xgene.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c6
-rw-r--r--drivers/s390/block/dasd.c80
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_devmap.c126
-rw-r--r--drivers/s390/block/dasd_eckd.c105
-rw-r--r--drivers/s390/block/dasd_eer.c3
-rw-r--r--drivers/s390/block/dasd_fba.c14
-rw-r--r--drivers/s390/block/dasd_int.h32
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/con3270.c2331
-rw-r--r--drivers/s390/char/diag_ftp.c4
-rw-r--r--drivers/s390/char/fs3270.c124
-rw-r--r--drivers/s390/char/hmcdrv_dev.c2
-rw-r--r--drivers/s390/char/raw3270.c378
-rw-r--r--drivers/s390/char/raw3270.h227
-rw-r--r--drivers/s390/char/sclp.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c8
-rw-r--r--drivers/s390/char/sclp_ftp.c6
-rw-r--r--drivers/s390/char/tape_class.c2
-rw-r--r--drivers/s390/char/tty3270.c1963
-rw-r--r--drivers/s390/char/tty3270.h15
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/css.c25
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/scm.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c365
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h3
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c300
-rw-r--r--drivers/s390/crypto/ap_bus.h70
-rw-r--r--drivers/s390/crypto/ap_card.c23
-rw-r--r--drivers/s390/crypto/ap_queue.c412
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c9
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c136
-rw-r--r--drivers/s390/crypto/zcrypt_api.c76
-rw-r--r--drivers/s390/crypto/zcrypt_card.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h37
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c74
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c66
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c141
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c139
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c4
-rw-r--r--drivers/s390/net/ctcm_fsms.c32
-rw-r--r--drivers/s390/net/ctcm_main.c16
-rw-r--r--drivers/s390/net/ctcm_mpc.c15
-rw-r--r--drivers/s390/net/ism.h19
-rw-r--r--drivers/s390/net/ism_drv.c382
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_core_sys.c78
-rw-r--r--drivers/s390/net/qeth_ethtool.c6
-rw-r--r--drivers/s390/net/qeth_l2_main.c53
-rw-r--r--drivers/s390/net/qeth_l2_sys.c28
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c83
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c46
-rw-r--r--drivers/s390/scsi/zfcp_def.h6
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h26
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c68
-rw-r--r--drivers/sbus/char/display7seg.c5
-rw-r--r--drivers/sbus/char/oradax.c6
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c4
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c5
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c5
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c3
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c24
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c29
-rw-r--r--drivers/scsi/be2iscsi/be_main.h1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c4
-rw-r--r--drivers/scsi/ch.c32
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c36
-rw-r--r--drivers/scsi/cxlflash/vlun.c34
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c13
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c22
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c12
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c20
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c6
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c17
-rw-r--r--drivers/scsi/g_NCR5380.c4
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c158
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c194
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hpsa.c11
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c30
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c858
-rw-r--r--drivers/scsi/ipr.h64
-rw-r--r--drivers/scsi/ips.c11
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c30
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libiscsi.c40
-rw-r--r--drivers/scsi/libsas/sas_ata.c102
-rw-r--r--drivers/scsi/libsas/sas_discover.c35
-rw-r--r--drivers/scsi/libsas/sas_expander.c125
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c175
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c87
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c58
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c132
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c180
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h5
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpi3mr/Makefile2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h112
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h23
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h31
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c37
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c176
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c115
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c26
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c14
-rw-r--r--drivers/scsi/mvme147.c2
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvumi.c6
-rw-r--r--drivers/scsi/mvumi.h6
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c46
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h1
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h52
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c100
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h110
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c13
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c1007
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_ioctl.c7
-rw-r--r--drivers/scsi/scsi_lib.c79
-rw-r--r--drivers/scsi/scsi_scan.c36
-rw-r--r--drivers/scsi/scsi_sysctl.c16
-rw-r--r--drivers/scsi/scsi_sysfs.c12
-rw-r--r--drivers/scsi/scsi_transport_fc.c13
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c50
-rw-r--r--drivers/scsi/scsi_transport_spi.c31
-rw-r--r--drivers/scsi/sd.c156
-rw-r--r--drivers/scsi/sd_dif.c10
-rw-r--r--drivers/scsi/sd_zbc.c16
-rw-r--r--drivers/scsi/ses.c94
-rw-r--r--drivers/scsi/sg.c12
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/scsi/snic/snic_debugfs.c4
-rw-r--r--drivers/scsi/snic/snic_main.c2
-rw-r--r--drivers/scsi/snic/snic_scsi.c7
-rw-r--r--drivers/scsi/sr.c18
-rw-r--r--drivers/scsi/sr_ioctl.c17
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/storvsc_drv.c23
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/virtio_scsi.c20
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/scsi/xen-scsifront.c6
-rw-r--r--drivers/scsi/zorro_esp.c2
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/sh/intc/userimask.c10
-rw-r--r--drivers/sh/maple/maple.c7
-rw-r--r--drivers/slimbus/core.c4
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile4
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c17
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c8
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c5
-rw-r--r--drivers/soc/apple/apple-pmgr-pwrstate.c12
-rw-r--r--drivers/soc/apple/rtkit-crashlog.c93
-rw-r--r--drivers/soc/apple/rtkit.c52
-rw-r--r--drivers/soc/atmel/soc.c9
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/bcm/bcm2835-power.c7
-rw-r--r--drivers/soc/bcm/brcmstb/Kconfig4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c4
-rw-r--r--drivers/soc/bcm/brcmstb/pm/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/pm/aon_defs.h105
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c874
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-arm.S69
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c1
-rw-r--r--drivers/soc/canaan/Kconfig5
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c8
-rw-r--r--drivers/soc/fsl/qe/Kconfig23
-rw-r--r--drivers/soc/fsl/qe/Makefile2
-rw-r--r--drivers/soc/fsl/qe/gpio.c2
-rw-r--r--drivers/soc/fsl/qe/qmc.c1537
-rw-r--r--drivers/soc/fsl/qe/tsa.c846
-rw-r--r--drivers/soc/fsl/qe/tsa.h42
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c1
-rw-r--r--drivers/soc/imx/Kconfig13
-rw-r--r--drivers/soc/imx/Makefile6
-rw-r--r--drivers/soc/imx/gpcv2.c2
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c38
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c120
-rw-r--r--drivers/soc/imx/imx93-pd.c1
-rw-r--r--drivers/soc/imx/imx93-src.c1
-rw-r--r--drivers/soc/imx/soc-imx8m.c5
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c6
-rw-r--r--drivers/soc/mediatek/Kconfig8
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h95
-rw-r--r--drivers/soc/mediatek/mt8186-pm-domains.h4
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h149
-rw-r--r--drivers/soc/mediatek/mt8188-pm-domains.h623
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h159
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c11
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c309
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h4
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c301
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c13
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h5
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c159
-rw-r--r--drivers/soc/mediatek/mtk-svs.c281
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c56
-rw-r--r--drivers/soc/nuvoton/Kconfig11
-rw-r--r--drivers/soc/nuvoton/Makefile2
-rw-r--r--drivers/soc/nuvoton/wpcm450-soc.c109
-rw-r--r--drivers/soc/qcom/Kconfig33
-rw-r--r--drivers/soc/qcom/Makefile4
-rw-r--r--drivers/soc/qcom/apr.c7
-rw-r--r--drivers/soc/qcom/cpr.c6
-rw-r--r--drivers/soc/qcom/icc-bwmon.c231
-rw-r--r--drivers/soc/qcom/ice.c366
-rw-r--r--drivers/soc/qcom/llcc-qcom.c110
-rw-r--r--drivers/soc/qcom/mdt_loader.c2
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c379
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c478
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c2
-rw-r--r--drivers/soc/qcom/qcom_aoss.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c2
-rw-r--r--drivers/soc/qcom/qcom_stats.c10
-rw-r--r--drivers/soc/qcom/qmi_interface.c3
-rw-r--r--drivers/soc/qcom/ramp_controller.c343
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c38
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmhpd.c34
-rw-r--r--drivers/soc/qcom/rpmpd.c851
-rw-r--r--drivers/soc/qcom/smd-rpm.c3
-rw-r--r--drivers/soc/qcom/smem.c4
-rw-r--r--drivers/soc/qcom/smsm.c11
-rw-r--r--drivers/soc/qcom/socinfo.c128
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c141
-rw-r--r--drivers/soc/renesas/r8a7795-sysc.c10
-rw-r--r--drivers/soc/renesas/r8a779g0-sysc.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c2
-rw-r--r--drivers/soc/renesas/renesas-soc.c19
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c2
-rw-r--r--drivers/soc/samsung/Kconfig26
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/s3c-pm-debug.c79
-rw-r--r--drivers/soc/sifive/Kconfig2
-rw-r--r--drivers/soc/starfive/Kconfig12
-rw-r--r--drivers/soc/starfive/Makefile3
-rw-r--r--drivers/soc/starfive/jh71xx_pmu.c383
-rw-r--r--drivers/soc/sunxi/Kconfig9
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sun20i-ppu.c207
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c2
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c4
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c1
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c6
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c8
-rw-r--r--drivers/soc/tegra/flowctrl.c4
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c4
-rw-r--r--drivers/soc/tegra/pmc.c26
-rw-r--r--drivers/soc/tegra/powergate-bpmp.c2
-rw-r--r--drivers/soc/ti/k3-ringacc.c7
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c4
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c4
-rw-r--r--drivers/soc/ti/omap_prm.c2
-rw-r--r--drivers/soc/ti/pm33xx.c5
-rw-r--r--drivers/soc/ti/smartreflex.c34
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c6
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c4
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c2
-rw-r--r--drivers/soundwire/Kconfig10
-rw-r--r--drivers/soundwire/Makefile7
-rw-r--r--drivers/soundwire/amd_manager.c1208
-rw-r--r--drivers/soundwire/amd_manager.h258
-rw-r--r--drivers/soundwire/bus.c164
-rw-r--r--drivers/soundwire/bus.h23
-rw-r--r--drivers/soundwire/bus_type.c13
-rw-r--r--drivers/soundwire/cadence_master.c215
-rw-r--r--drivers/soundwire/cadence_master.h27
-rw-r--r--drivers/soundwire/debugfs.c13
-rw-r--r--drivers/soundwire/dmi-quirks.c25
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c15
-rw-r--r--drivers/soundwire/intel.c363
-rw-r--r--drivers/soundwire/intel.h67
-rw-r--r--drivers/soundwire/intel_auxdevice.c7
-rw-r--r--drivers/soundwire/intel_bus_common.c259
-rw-r--r--drivers/soundwire/qcom.c20
-rw-r--r--drivers/soundwire/stream.c62
-rw-r--r--drivers/spi/Kconfig79
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel-quadspi.c44
-rw-r--r--drivers/spi/spi-altera-core.c32
-rw-r--r--drivers/spi/spi-altera-dfl.c36
-rw-r--r--drivers/spi/spi-altera-platform.c36
-rw-r--r--drivers/spi/spi-amd.c4
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c456
-rw-r--r--drivers/spi/spi-ar934x.c18
-rw-r--r--drivers/spi/spi-armada-3700.c108
-rw-r--r--drivers/spi/spi-aspeed-smc.c18
-rw-r--r--drivers/spi/spi-at91-usart.c48
-rw-r--r--drivers/spi/spi-ath79.c50
-rw-r--r--drivers/spi/spi-atmel.c286
-rw-r--r--drivers/spi/spi-au1550.c9
-rw-r--r--drivers/spi/spi-axi-spi-engine.c8
-rw-r--r--drivers/spi/spi-bcm-qspi.c12
-rw-r--r--drivers/spi/spi-bcm2835.c36
-rw-r--r--drivers/spi/spi-bcm2835aux.c10
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c517
-rw-r--r--drivers/spi/spi-bcm63xx.c24
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c652
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c148
-rw-r--r--drivers/spi/spi-cadence-xspi.c9
-rw-r--r--drivers/spi/spi-cadence.c334
-rw-r--r--drivers/spi/spi-cavium-octeon.c6
-rw-r--r--drivers/spi/spi-cavium.c8
-rw-r--r--drivers/spi/spi-coldfire-qspi.c14
-rw-r--r--drivers/spi/spi-davinci.c23
-rw-r--r--drivers/spi/spi-dln2.c12
-rw-r--r--drivers/spi/spi-dw-bt1.c6
-rw-r--r--drivers/spi/spi-dw-core.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c68
-rw-r--r--drivers/spi/spi-ep93xx.c6
-rw-r--r--drivers/spi/spi-falcon.c2
-rw-r--r--drivers/spi/spi-fsi.c2
-rw-r--r--drivers/spi/spi-fsl-cpm.c23
-rw-r--r--drivers/spi/spi-fsl-dspi.c24
-rw-r--r--drivers/spi/spi-fsl-espi.c12
-rw-r--r--drivers/spi/spi-fsl-lpspi.c7
-rw-r--r--drivers/spi/spi-fsl-qspi.c12
-rw-r--r--drivers/spi/spi-fsl-spi.c92
-rw-r--r--drivers/spi/spi-geni-qcom.c224
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-gxp.c4
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c6
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c2
-rw-r--r--drivers/spi/spi-img-spfi.c20
-rw-r--r--drivers/spi/spi-imx.c75
-rw-r--r--drivers/spi/spi-ingenic.c4
-rw-r--r--drivers/spi/spi-intel-pci.c14
-rw-r--r--drivers/spi/spi-intel.c12
-rw-r--r--drivers/spi/spi-iproc-qspi.c6
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-lantiq-ssc.c12
-rw-r--r--drivers/spi/spi-loopback-test.c16
-rw-r--r--drivers/spi/spi-mem.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c8
-rw-r--r--drivers/spi/spi-meson-spifc.c6
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c6
-rw-r--r--drivers/spi/spi-microchip-core.c12
-rw-r--r--drivers/spi/spi-mpc512x-psc.c142
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c145
-rw-r--r--drivers/spi/spi-mpc52xx.c8
-rw-r--r--drivers/spi/spi-mt65xx.c18
-rw-r--r--drivers/spi/spi-mt7621.c2
-rw-r--r--drivers/spi/spi-mtk-nor.c6
-rw-r--r--drivers/spi/spi-mtk-snfi.c46
-rw-r--r--drivers/spi/spi-mux.c8
-rw-r--r--drivers/spi/spi-mxic.c16
-rw-r--r--drivers/spi/spi-mxs.c8
-rw-r--r--drivers/spi/spi-npcm-fiu.c25
-rw-r--r--drivers/spi/spi-npcm-pspi.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c74
-rw-r--r--drivers/spi/spi-oc-tiny.c5
-rw-r--r--drivers/spi/spi-omap-100k.c490
-rw-r--r--drivers/spi/spi-omap-uwire.c29
-rw-r--r--drivers/spi/spi-omap2-mcspi.c35
-rw-r--r--drivers/spi/spi-orion.c13
-rw-r--r--drivers/spi/spi-pci1xxxx.c22
-rw-r--r--drivers/spi/spi-pic32-sqi.c8
-rw-r--r--drivers/spi/spi-pic32.c13
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-ppc4xx.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/spi/spi-qcom-qspi.c13
-rw-r--r--drivers/spi/spi-qup.c31
-rw-r--r--drivers/spi/spi-rb4xx.c8
-rw-r--r--drivers/spi/spi-rockchip-sfc.c14
-rw-r--r--drivers/spi/spi-rockchip.c36
-rw-r--r--drivers/spi/spi-rpc-if.c20
-rw-r--r--drivers/spi/spi-rspi.c24
-rw-r--r--drivers/spi/spi-s3c24xx-regs.h41
-rw-r--r--drivers/spi/spi-s3c24xx.c596
-rw-r--r--drivers/spi/spi-s3c64xx.c8
-rw-r--r--drivers/spi/spi-sc18is602.c6
-rw-r--r--drivers/spi/spi-sh-hspi.c6
-rw-r--r--drivers/spi/spi-sh-msiof.c13
-rw-r--r--drivers/spi/spi-sh-sci.c7
-rw-r--r--drivers/spi/spi-sh.c6
-rw-r--r--drivers/spi/spi-sifive.c12
-rw-r--r--drivers/spi/spi-slave-mt27xx.c6
-rw-r--r--drivers/spi/spi-sn-f-ospi.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c8
-rw-r--r--drivers/spi/spi-sprd.c23
-rw-r--r--drivers/spi/spi-st-ssc4.c8
-rw-r--r--drivers/spi/spi-stm32-qspi.c18
-rw-r--r--drivers/spi/spi-stm32.c15
-rw-r--r--drivers/spi/spi-sun4i.c8
-rw-r--r--drivers/spi/spi-sun6i.c7
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c5
-rw-r--r--drivers/spi/spi-synquacer.c19
-rw-r--r--drivers/spi/spi-tegra114.c37
-rw-r--r--drivers/spi/spi-tegra20-sflash.c8
-rw-r--r--drivers/spi/spi-tegra20-slink.c11
-rw-r--r--drivers/spi/spi-tegra210-quad.c46
-rw-r--r--drivers/spi/spi-ti-qspi.c16
-rw-r--r--drivers/spi/spi-topcliff-pch.c10
-rw-r--r--drivers/spi/spi-uniphier.c6
-rw-r--r--drivers/spi/spi-wpcm-fiu.c12
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spi-xilinx.c24
-rw-r--r--drivers/spi/spi-xlp.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c6
-rw-r--r--drivers/spi/spi-zynq-qspi.c8
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c8
-rw-r--r--drivers/spi/spi.c205
-rw-r--r--drivers/spi/spidev.c77
-rw-r--r--drivers/spmi/hisi-spmi-controller.c5
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c7
-rw-r--r--drivers/spmi/spmi-pmic-arb.c9
-rw-r--r--drivers/spmi/spmi.c10
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c34
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c13
-rw-r--r--drivers/staging/fbtft/fbtft-core.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c7
-rw-r--r--drivers/staging/fieldbus/dev_core.c1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c4
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c6
-rw-r--r--drivers/staging/greybus/arche-platform.c6
-rw-r--r--drivers/staging/greybus/audio_codec.c6
-rw-r--r--drivers/staging/greybus/audio_manager_module.c47
-rw-r--r--drivers/staging/greybus/audio_topology.c5
-rw-r--r--drivers/staging/greybus/authentication.c2
-rw-r--r--drivers/staging/greybus/fw-management.c2
-rw-r--r--drivers/staging/greybus/gbphy.c14
-rw-r--r--drivers/staging/greybus/gpio.c13
-rw-r--r--drivers/staging/greybus/greybus_authentication.h1
-rw-r--r--drivers/staging/greybus/hid.c2
-rw-r--r--drivers/staging/greybus/loopback.c1
-rw-r--r--drivers/staging/greybus/pwm.c6
-rw-r--r--drivers/staging/greybus/raw.c2
-rw-r--r--drivers/staging/greybus/spilib.c2
-rw-r--r--drivers/staging/greybus/tools/.gitignore2
-rw-r--r--drivers/staging/greybus/tools/Android.mk10
-rw-r--r--drivers/staging/greybus/tools/Makefile33
-rw-r--r--drivers/staging/greybus/tools/README.loopback198
-rwxr-xr-xdrivers/staging/greybus/tools/lbtest169
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c979
-rw-r--r--drivers/staging/greybus/uart.c4
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c1
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/meter/Kconfig37
-rw-r--r--drivers/staging/iio/meter/Makefile8
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c153
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c160
-rw-r--r--drivers/staging/iio/meter/ade7854.c556
-rw-r--r--drivers/staging/iio/meter/ade7854.h173
-rw-r--r--drivers/staging/iio/meter/meter.h398
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c3
-rw-r--r--drivers/staging/ks7010/ks_hostif.c5
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c3
-rw-r--r--drivers/staging/media/Kconfig10
-rw-r--r--drivers/staging/media/Makefile9
-rw-r--r--drivers/staging/media/atomisp/Kconfig2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c1248
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c176
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c206
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c1263
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c195
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h426
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h31
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h15
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h835
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h36
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c195
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h61
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h78
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h2
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h22
-rw-r--r--drivers/staging/media/atomisp/notes.txt6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c1168
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c420
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c208
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c409
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c286
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c360
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c240
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c20
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c7
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c38
-rw-r--r--drivers/staging/media/av7110/Kconfig (renamed from drivers/staging/media/deprecated/saa7146/av7110/Kconfig)20
-rw-r--r--drivers/staging/media/av7110/Makefile (renamed from drivers/staging/media/deprecated/saa7146/av7110/Makefile)3
-rw-r--r--drivers/staging/media/av7110/TODO3
-rw-r--r--drivers/staging/media/av7110/audio-bilingual-channel-select.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-bilingual-channel-select.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-channel-select.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-channel-select.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-clear-buffer.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-continue.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-continue.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fclose.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-fclose.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fopen.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-fopen.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fwrite.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-fwrite.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-get-capabilities.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-get-status.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-get-status.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-pause.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-pause.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-play.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-play.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-select-source.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-select-source.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-av-sync.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-av-sync.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-bypass-mode.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-bypass-mode.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-id.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-id.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-mixer.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-mixer.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-mute.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-mute.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-streamtype.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-stop.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio-stop.rst)0
-rw-r--r--drivers/staging/media/av7110/audio.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio.rst)0
-rw-r--r--drivers/staging/media/av7110/audio_data_types.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio_data_types.rst)0
-rw-r--r--drivers/staging/media/av7110/audio_function_calls.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/audio_function_calls.rst)0
-rw-r--r--drivers/staging/media/av7110/av7110.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110.c)6
-rw-r--r--drivers/staging/media/av7110/av7110.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110.h)2
-rw-r--r--drivers/staging/media/av7110/av7110_av.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c)4
-rw-r--r--drivers/staging/media/av7110/av7110_av.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_av.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_ca.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_hw.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.c)3
-rw-r--r--drivers/staging/media/av7110/av7110_hw.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ir.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_ir.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/av7110_v4l.c)148
-rw-r--r--drivers/staging/media/av7110/budget-patch.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/budget-patch.c)0
-rw-r--r--drivers/staging/media/av7110/dvb_filter.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.c)0
-rw-r--r--drivers/staging/media/av7110/dvb_filter.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.h)0
-rw-r--r--drivers/staging/media/av7110/sp8870.c (renamed from drivers/staging/media/deprecated/saa7146/av7110/sp8870.c)0
-rw-r--r--drivers/staging/media/av7110/sp8870.h (renamed from drivers/staging/media/deprecated/saa7146/av7110/sp8870.h)0
-rw-r--r--drivers/staging/media/av7110/video-clear-buffer.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/av7110/video-command.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-command.rst)0
-rw-r--r--drivers/staging/media/av7110/video-continue.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-continue.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fast-forward.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-fast-forward.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fclose.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-fclose.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fopen.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-fopen.rst)0
-rw-r--r--drivers/staging/media/av7110/video-freeze.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-freeze.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fwrite.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-fwrite.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-capabilities.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-event.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-event.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-frame-count.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-frame-count.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-pts.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-pts.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-size.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-size.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-status.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-get-status.rst)0
-rw-r--r--drivers/staging/media/av7110/video-play.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-play.rst)0
-rw-r--r--drivers/staging/media/av7110/video-select-source.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-select-source.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-blank.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-set-blank.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-display-format.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-set-display-format.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-format.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-set-format.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-streamtype.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/av7110/video-slowmotion.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-slowmotion.rst)0
-rw-r--r--drivers/staging/media/av7110/video-stillpicture.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-stillpicture.rst)0
-rw-r--r--drivers/staging/media/av7110/video-stop.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-stop.rst)0
-rw-r--r--drivers/staging/media/av7110/video-try-command.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video-try-command.rst)0
-rw-r--r--drivers/staging/media/av7110/video.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video.rst)0
-rw-r--r--drivers/staging/media/av7110/video_function_calls.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video_function_calls.rst)0
-rw-r--r--drivers/staging/media/av7110/video_types.rst (renamed from drivers/staging/media/deprecated/saa7146/av7110/video_types.rst)0
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c9
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c10
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c10
-rw-r--r--drivers/staging/media/deprecated/cpia2/Kconfig13
-rw-r--r--drivers/staging/media/deprecated/cpia2/Makefile4
-rw-r--r--drivers/staging/media/deprecated/cpia2/TODO6
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2.h475
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_core.c2434
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_registers.h463
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_usb.c966
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_v4l.c1226
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/Kconfig15
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/Makefile2
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/TODO7
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/fsl-viu.c1599
-rw-r--r--drivers/staging/media/deprecated/meye/Kconfig19
-rw-r--r--drivers/staging/media/deprecated/meye/Makefile2
-rw-r--r--drivers/staging/media/deprecated/meye/TODO6
-rw-r--r--drivers/staging/media/deprecated/meye/meye.c1814
-rw-r--r--drivers/staging/media/deprecated/meye/meye.h311
-rw-r--r--drivers/staging/media/deprecated/saa7146/Kconfig5
-rw-r--r--drivers/staging/media/deprecated/saa7146/Makefile2
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/TODO9
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146.h472
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_video.c1286
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h266
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/TODO7
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/TODO7
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/Kconfig18
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/Makefile5
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/TODO12
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-sensor.c587
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-webcam.c1434
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-webcam.h123
-rw-r--r--drivers/staging/media/deprecated/tm6000/Kconfig37
-rw-r--r--drivers/staging/media/deprecated/tm6000/Makefile14
-rw-r--r--drivers/staging/media/deprecated/tm6000/TODO7
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-alsa.c440
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-cards.c1397
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-core.c916
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-dvb.c454
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-i2c.c317
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-input.c503
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-regs.h588
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-stds.c623
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-usb-isoc.h38
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-video.c1703
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000.h396
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Kconfig58
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Makefile4
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/TODO7
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h80
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c934
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h308
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h297
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c879
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h171
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h140
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.c1127
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.h518
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif_regs.h256
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c1902
-rw-r--r--drivers/staging/media/deprecated/zr364xx/Kconfig18
-rw-r--r--drivers/staging/media/deprecated/zr364xx/Makefile3
-rw-r--r--drivers/staging/media/deprecated/zr364xx/TODO7
-rw-r--r--drivers/staging/media/deprecated/zr364xx/zr364xx.c1635
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c40
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c13
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c14
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c6
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c13
-rw-r--r--drivers/staging/media/imx/imx-media-of.c5
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c76
-rw-r--r--drivers/staging/media/imx/imx-media.h19
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c6
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c158
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c3
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c6
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c82
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c7
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c7
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c6
-rw-r--r--drivers/staging/media/tegra-video/csi.c8
-rw-r--r--drivers/staging/media/tegra-video/vi.c18
-rw-r--r--drivers/staging/most/dim2/dim2.c15
-rw-r--r--drivers/staging/most/dim2/hal.c5
-rw-r--r--drivers/staging/most/i2c/i2c.c5
-rw-r--r--drivers/staging/most/video/video.c3
-rw-r--r--drivers/staging/nvec/nvec.c6
-rw-r--r--drivers/staging/nvec/nvec_kbd.c6
-rw-r--r--drivers/staging/nvec/nvec_paz00.c5
-rw-r--r--drivers/staging/nvec/nvec_power.c6
-rw-r--r--drivers/staging/nvec/nvec_ps2.c6
-rw-r--r--drivers/staging/octeon/ethernet.c5
-rw-r--r--drivers/staging/octeon/octeon-stubs.h4
-rw-r--r--drivers/staging/pi433/TODO3
-rw-r--r--drivers/staging/pi433/pi433_if.c13
-rw-r--r--drivers/staging/qlge/qlge_dbg.c35
-rw-r--r--drivers/staging/r8188eu/Kconfig16
-rw-r--r--drivers/staging/r8188eu/Makefile48
-rw-r--r--drivers/staging/r8188eu/TODO16
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c1181
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c658
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c1568
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c74
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c337
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c1150
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c479
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c160
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c255
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c2072
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c7834
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c1918
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c448
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c2018
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c29
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c1374
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c498
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c1551
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c2333
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c654
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c733
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c212
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c269
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c900
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c149
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c139
-rw-r--r--drivers/staging/r8188eu/hal/hal_intf.c50
-rw-r--r--drivers/staging/r8188eu/hal/odm.c821
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c349
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c264
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c694
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c146
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c922
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c704
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c406
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c161
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c641
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c1076
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c502
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h97
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyReg.h1072
-rw-r--r--drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h49
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_BB.h27
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h12
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_RF.h13
-rw-r--r--drivers/staging/r8188eu/include/HalPhyRf_8188e.h36
-rw-r--r--drivers/staging/r8188eu/include/HalPwrSeqCmd.h18
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h42
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h228
-rw-r--r--drivers/staging/r8188eu/include/hal_com.h146
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h44
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h817
-rw-r--r--drivers/staging/r8188eu/include/odm.h416
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h69
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h35
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11N.h47
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h62
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h153
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_cmd.h90
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h28
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h181
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h40
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h18
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h1163
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_xmit.h144
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h34
-rw-r--r--drivers/staging/r8188eu/include/rtw_br_ext.h43
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h926
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h15
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_event.h97
-rw-r--r--drivers/staging/r8188eu/include/rtw_fw.h17
-rw-r--r--drivers/staging/r8188eu/include/rtw_ht.h28
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h288
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl.h13
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_set.h25
-rw-r--r--drivers/staging/r8188eu/include/rtw_iol.h55
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h57
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h574
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h753
-rw-r--r--drivers/staging/r8188eu/include/rtw_p2p.h118
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h114
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h347
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h80
-rw-r--r--drivers/staging/r8188eu/include/rtw_security.h231
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h373
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h313
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h59
-rw-r--r--drivers/staging/r8188eu/include/usb_ops_linux.h29
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h21
-rw-r--r--drivers/staging/r8188eu/include/wifi.h773
-rw-r--r--drivers/staging/r8188eu/include/wlan_bssdef.h272
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c3777
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c810
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c227
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c472
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c198
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c32
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c423
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h226
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h33
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c799
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h32
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h39
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c290
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h275
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c607
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h45
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.c (renamed from drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c)22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.h27
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib.h44
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c32
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c80
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c47
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c12
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c42
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c244
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c87
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c97
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c17
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c108
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c15
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h6
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h9
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h49
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h20
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h9
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c32
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c33
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c2
-rw-r--r--drivers/staging/rts5208/xd.c7
-rw-r--r--drivers/staging/sm750fb/sm750.c16
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c16
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c6
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h65
-rw-r--r--drivers/staging/vc04_services/interface/TODO5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c144
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c226
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h38
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c36
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h11
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/Makefile5
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c15
-rw-r--r--drivers/staging/vme_user/Kconfig3
-rw-r--r--drivers/staging/vme_user/vme.h26
-rw-r--r--drivers/staging/vme_user/vme_bridge.h36
-rw-r--r--drivers/staging/vme_user/vme_fake.c5
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c13
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h534
-rw-r--r--drivers/staging/vme_user/vme_user.c2
-rw-r--r--drivers/staging/vt6655/baseband.c44
-rw-r--r--drivers/staging/vt6655/baseband.h2
-rw-r--r--drivers/staging/vt6656/card.c21
-rw-r--r--drivers/staging/vt6656/card.h1
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h171
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c8
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c51
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c12
-rw-r--r--drivers/target/loopback/tcm_loop.c50
-rw-r--r--drivers/target/sbp/sbp_target.c31
-rw-r--r--drivers/target/target_core_alua.c4
-rw-r--r--drivers/target/target_core_configfs.c94
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_fabric_configfs.c47
-rw-r--r--drivers/target/target_core_file.c18
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_pscsi.c12
-rw-r--r--drivers/target/target_core_spc.c7
-rw-r--r--drivers/target/target_core_stat.c6
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_tpg.c73
-rw-r--r--drivers/target/target_core_transport.c199
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/target/target_core_xcopy.c23
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c5
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c15
-rw-r--r--drivers/target/tcm_remote/Kconfig8
-rw-r--r--drivers/target/tcm_remote/Makefile2
-rw-r--r--drivers/target/tcm_remote/tcm_remote.c268
-rw-r--r--drivers/target/tcm_remote/tcm_remote.h20
-rw-r--r--drivers/tee/amdtee/call.c2
-rw-r--r--drivers/tee/amdtee/core.c29
-rw-r--r--drivers/tee/amdtee/shm_pool.c2
-rw-r--r--drivers/tee/optee/Kconfig17
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/tee/optee/optee_msg.h12
-rw-r--r--drivers/tee/optee/optee_private.h24
-rw-r--r--drivers/tee/optee/optee_smc.h24
-rw-r--r--drivers/tee/optee/smc_abi.c259
-rw-r--r--drivers/tee/tee_core.c4
-rw-r--r--drivers/tee/tee_shm.c37
-rw-r--r--drivers/thermal/Kconfig18
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/amlogic_thermal.c12
-rw-r--r--drivers/thermal/armada_thermal.c46
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c3
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c18
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c12
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c2
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c4
-rw-r--r--drivers/thermal/cpuidle_cooling.c6
-rw-r--r--drivers/thermal/da9062-thermal.c65
-rw-r--r--drivers/thermal/db8500_thermal.c9
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/dove_thermal.c14
-rw-r--r--drivers/thermal/gov_bang_bang.c37
-rw-r--r--drivers/thermal/gov_fair_share.c20
-rw-r--r--drivers/thermal/gov_power_allocator.c53
-rw-r--r--drivers/thermal/gov_step_wise.c52
-rw-r--r--drivers/thermal/hisi_thermal.c27
-rw-r--r--drivers/thermal/imx8mm_thermal.c7
-rw-r--r--drivers/thermal/imx_sc_thermal.c18
-rw-r--r--drivers/thermal/imx_thermal.c138
-rw-r--r--drivers/thermal/intel/Kconfig23
-rw-r--r--drivers/thermal/intel/Makefile2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c60
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c340
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c132
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c65
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c96
-rw-r--r--drivers/thermal/intel/intel_hfi.c3
-rw-r--r--drivers/thermal/intel/intel_menlow.c521
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c411
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c556
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c73
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c48
-rw-r--r--drivers/thermal/intel/intel_tcc.c139
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c37
-rw-r--r--drivers/thermal/intel/therm_throt.c73
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c182
-rw-r--r--drivers/thermal/k3_bandgap.c4
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c2
-rw-r--r--drivers/thermal/kirkwood_thermal.c11
-rw-r--r--drivers/thermal/max77620_thermal.c6
-rw-r--r--drivers/thermal/mediatek/Kconfig37
-rw-r--r--drivers/thermal/mediatek/Makefile2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c (renamed from drivers/thermal/mtk_thermal.c)337
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c1280
-rw-r--r--drivers/thermal/qcom/lmh.c2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c9
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c51
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c655
-rw-r--r--drivers/thermal/qcom/tsens-v1.c340
-rw-r--r--drivers/thermal/qcom/tsens.c225
-rw-r--r--drivers/thermal/qcom/tsens.h46
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c86
-rw-r--r--drivers/thermal/rcar_thermal.c61
-rw-r--r--drivers/thermal/rockchip_thermal.c340
-rw-r--r--drivers/thermal/rzg2l_thermal.c3
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c69
-rw-r--r--drivers/thermal/spear_thermal.c14
-rw-r--r--drivers/thermal/sprd_thermal.c2
-rw-r--r--drivers/thermal/st/Kconfig4
-rw-r--r--drivers/thermal/st/Makefile1
-rw-r--r--drivers/thermal/st/st_thermal.c52
-rw-r--r--drivers/thermal/st/st_thermal_syscfg.c174
-rw-r--r--drivers/thermal/st/stm_thermal.c6
-rw-r--r--drivers/thermal/sun8i_thermal.c8
-rw-r--r--drivers/thermal/tegra/soctherm.c41
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c15
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c49
-rw-r--r--drivers/thermal/thermal-generic-adc.c7
-rw-r--r--drivers/thermal/thermal_acpi.c117
-rw-r--r--drivers/thermal/thermal_core.c412
-rw-r--r--drivers/thermal/thermal_core.h30
-rw-r--r--drivers/thermal/thermal_helpers.c76
-rw-r--r--drivers/thermal/thermal_hwmon.c5
-rw-r--r--drivers/thermal/thermal_hwmon.h4
-rw-r--r--drivers/thermal/thermal_mmio.c6
-rw-r--r--drivers/thermal/thermal_netlink.c24
-rw-r--r--drivers/thermal/thermal_netlink.h3
-rw-r--r--drivers/thermal/thermal_of.c124
-rw-r--r--drivers/thermal/thermal_sysfs.c207
-rw-r--r--drivers/thermal/thermal_trace.h205
-rw-r--r--drivers/thermal/thermal_trace_ipa.h94
-rw-r--r--drivers/thermal/thermal_trip.c182
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c20
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal.h15
-rw-r--r--drivers/thermal/uniphier_thermal.c33
-rw-r--r--drivers/thunderbolt/acpi.c15
-rw-r--r--drivers/thunderbolt/ctl.c54
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c17
-rw-r--r--drivers/thunderbolt/eeprom.c204
-rw-r--r--drivers/thunderbolt/nhi.c52
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/quirks.c44
-rw-r--r--drivers/thunderbolt/retimer.c43
-rw-r--r--drivers/thunderbolt/sb_regs.h1
-rw-r--r--drivers/thunderbolt/switch.c54
-rw-r--r--drivers/thunderbolt/tb.c528
-rw-r--r--drivers/thunderbolt/tb.h56
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h36
-rw-r--r--drivers/thunderbolt/tunnel.c508
-rw-r--r--drivers/thunderbolt/tunnel.h18
-rw-r--r--drivers/thunderbolt/usb4.c677
-rw-r--r--drivers/thunderbolt/xdomain.c47
-rw-r--r--drivers/tty/Kconfig11
-rw-r--r--drivers/tty/amiserial.c18
-rw-r--r--drivers/tty/hvc/hvc_console.c4
-rw-r--r--drivers/tty/hvc/hvc_console.h2
-rw-r--r--drivers/tty/hvc/hvc_iucv.c6
-rw-r--r--drivers/tty/hvc/hvc_xen.c69
-rw-r--r--drivers/tty/hvc/hvcs.c91
-rw-r--r--drivers/tty/moxa.c82
-rw-r--r--drivers/tty/mxser.c17
-rw-r--r--drivers/tty/n_gsm.c391
-rw-r--r--drivers/tty/n_tty.c43
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serdev/core.c21
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c16
-rw-r--r--drivers/tty/serial/8250/8250.h12
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c18
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_dfl.c167
-rw-r--r--drivers/tty/serial/8250/8250_dma.c21
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_em.c117
-rw-r--r--drivers/tty/serial/8250/8250_exar.c14
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c25
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c494
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c40
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h15
-rw-r--r--drivers/tty/serial/8250/8250_port.c79
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig29
-rw-r--r--drivers/tty/serial/8250/Makefile3
-rw-r--r--drivers/tty/serial/Kconfig35
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/arc_uart.c7
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c38
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c5
-rw-r--r--drivers/tty/serial/earlycon-semihost.c (renamed from drivers/tty/serial/earlycon-arm-semihost.c)25
-rw-r--r--drivers/tty/serial/earlycon.c9
-rw-r--r--drivers/tty/serial/fsl_lpuart.c158
-rw-r--r--drivers/tty/serial/imx.c356
-rw-r--r--drivers/tty/serial/kgdboc.c20
-rw-r--r--drivers/tty/serial/liteuart.c241
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c20
-rw-r--r--drivers/tty/serial/meson_uart.c8
-rw-r--r--drivers/tty/serial/msm_serial.c1
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c662
-rw-r--r--drivers/tty/serial/samsung_tty.c199
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c57
-rw-r--r--drivers/tty/serial/sccnxp.c12
-rw-r--r--drivers/tty/serial/serial-tegra.c7
-rw-r--r--drivers/tty/serial/serial_core.c207
-rw-r--r--drivers/tty/serial/sh-sci.c125
-rw-r--r--drivers/tty/serial/sh-sci.h3
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c45
-rw-r--r--drivers/tty/serial/stm32-usart.h1
-rw-r--r--drivers/tty/serial/sunhv.c8
-rw-r--r--drivers/tty/serial/sunzilog.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c9
-rw-r--r--drivers/tty/synclink_gt.c45
-rw-r--r--drivers/tty/tty.h2
-rw-r--r--drivers/tty/tty_io.c56
-rw-r--r--drivers/tty/tty_ioctl.c62
-rw-r--r--drivers/tty/tty_ldisc.c3
-rw-r--r--drivers/tty/tty_port.c22
-rw-r--r--drivers/tty/vt/vc_screen.c16
-rw-r--r--drivers/tty/vt/vt.c522
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-mcq.c431
-rw-r--r--drivers/ufs/core/ufs_bsg.c144
-rw-r--r--drivers/ufs/core/ufshcd-priv.h110
-rw-r--r--drivers/ufs/core/ufshcd.c1091
-rw-r--r--drivers/ufs/core/ufshpb.c4
-rw-r--r--drivers/ufs/host/Kconfig21
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c12
-rw-r--r--drivers/ufs/host/ufs-hisi.c2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c2
-rw-r--r--drivers/ufs/host/ufs-qcom-ice.c2
-rw-r--r--drivers/ufs/host/ufs-qcom.c550
-rw-r--r--drivers/ufs/host/ufs-qcom.h97
-rw-r--r--drivers/ufs/host/ufs-sprd.c458
-rw-r--r--drivers/ufs/host/ufs-sprd.h85
-rw-r--r--drivers/ufs/host/ufshcd-pci.c1
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_hv_generic.c5
-rw-r--r--drivers/usb/Kconfig29
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-debug.h8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h28
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c22
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h4
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c27
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c110
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h12
-rw-r--r--drivers/usb/chipidea/Makefile2
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c16
-rw-r--r--drivers/usb/chipidea/core.c19
-rw-r--r--drivers/usb/chipidea/debug.c57
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c3
-rw-r--r--drivers/usb/common/ulpi.c24
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/driver.c8
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/message.c48
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c55
-rw-r--r--drivers/usb/core/usb-acpi.c61
-rw-r--r--drivers/usb/core/usb.c86
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/drd.c3
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2
-rw-r--r--drivers/usb/dwc2/params.c3
-rw-r--r--drivers/usb/dwc2/platform.c54
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c443
-rw-r--r--drivers/usb/dwc3/core.h25
-rw-r--r--drivers/usb/dwc3/debug.h5
-rw-r--r--drivers/usb/dwc3/debugfs.c24
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c52
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c132
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c2
-rw-r--r--drivers/usb/dwc3/ep0.c19
-rw-r--r--drivers/usb/dwc3/gadget.c302
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/dwc3/trace.h6
-rw-r--r--drivers/usb/early/xhci-dbc.c8
-rw-r--r--drivers/usb/fotg210/Kconfig2
-rw-r--r--drivers/usb/fotg210/fotg210-core.c88
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c69
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.h1
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c164
-rw-r--r--drivers/usb/fotg210/fotg210-udc.h4
-rw-r--r--drivers/usb/fotg210/fotg210.h27
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c228
-rw-r--r--drivers/usb/gadget/configfs.c515
-rw-r--r--drivers/usb/gadget/function/f_ecm.c22
-rw-r--r--drivers/usb/gadget/function/f_fs.c118
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c35
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/f_uvc.c150
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.c105
-rw-r--r--drivers/usb/gadget/function/u_ether.h4
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c23
-rw-r--r--drivers/usb/gadget/function/u_uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1227
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h52
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c16
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c9
-rw-r--r--drivers/usb/gadget/legacy/hid.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c30
-rw-r--r--drivers/usb/gadget/legacy/webcam.c3
-rw-r--r--drivers/usb/gadget/udc/Kconfig48
-rw-r--r--drivers/usb/gadget/udc/Makefile4
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c1
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c14
-rw-r--r--drivers/usb/gadget/udc/core.c184
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c11
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c1
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c7
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c1
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c25
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c64
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c162
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c3395
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c139
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c1319
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c1980
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h99
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc_regs.h146
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c57
-rw-r--r--drivers/usb/gadget/udc/trace.h5
-rw-r--r--drivers/usb/host/Kconfig81
-rw-r--r--drivers/usb/host/Makefile9
-rw-r--r--drivers/usb/host/ehci-exynos.c23
-rw-r--r--drivers/usb/host/ehci-fsl.c4
-rw-r--r--drivers/usb/host/ehci-ppc-of.c6
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c14
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c17
-rw-r--r--drivers/usb/host/ohci-exynos.c23
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-omap.c14
-rw-r--r--drivers/usb/host/ohci-pxa27x.c9
-rw-r--r--drivers/usb/host/ohci-sa1111.c5
-rw-r--r--drivers/usb/host/ohci-tmio.c364
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/u132-hcd.c3219
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xen-hcd.c4
-rw-r--r--drivers/usb/host/xhci-dbgcap.c191
-rw-r--r--drivers/usb/host/xhci-dbgcap.h4
-rw-r--r--drivers/usb/host/xhci-debugfs.c3
-rw-r--r--drivers/usb/host/xhci-hub.c257
-rw-r--r--drivers/usb/host/xhci-mem.c419
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-mvebu.c2
-rw-r--r--drivers/usb/host/xhci-pci.c261
-rw-r--r--drivers/usb/host/xhci-plat.c157
-rw-r--r--drivers/usb/host/xhci-plat.h7
-rw-r--r--drivers/usb/host/xhci-rcar.c134
-rw-r--r--drivers/usb/host/xhci-rcar.h55
-rw-r--r--drivers/usb/host/xhci-ring.c94
-rw-r--r--drivers/usb/host/xhci-rzv2m.c38
-rw-r--r--drivers/usb/host/xhci-rzv2m.h16
-rw-r--r--drivers/usb/host/xhci-tegra.c415
-rw-r--r--drivers/usb/host/xhci-trace.c1
-rw-r--r--drivers/usb/host/xhci-trace.h20
-rw-r--r--drivers/usb/host/xhci.c280
-rw-r--r--drivers/usb/host/xhci.h46
-rw-r--r--drivers/usb/image/microtek.c2
-rw-r--r--drivers/usb/misc/Kconfig51
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/ftdi-elan.c2780
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c23
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusbvga.c14
-rw-r--r--drivers/usb/misc/usb251xb.c43
-rw-r--r--drivers/usb/misc/usb3503.c64
-rw-r--r--drivers/usb/mon/mon_bin.c5
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c1
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c5
-rw-r--r--drivers/usb/mtu3/mtu3_host.c2
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c2
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c51
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/musb/jz4740.c6
-rw-r--r--drivers/usb/musb/mediatek.c9
-rw-r--r--drivers/usb/musb/mpfs.c6
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c6
-rw-r--r--drivers/usb/musb/omap2430.c12
-rw-r--r--drivers/usb/musb/sunxi.c105
-rw-r--r--drivers/usb/musb/tusb6010.c6
-rw-r--r--drivers/usb/musb/ux500.c6
-rw-r--r--drivers/usb/phy/Kconfig17
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c6
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c6
-rw-r--r--drivers/usb/phy/phy-generic.c6
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c6
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c1639
-rw-r--r--drivers/usb/phy/phy-keystone.c6
-rw-r--r--drivers/usb/phy/phy-mv-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c8
-rw-r--r--drivers/usb/phy/phy-tahvo.c6
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c8
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c6
-rw-r--r--drivers/usb/phy/phy.c6
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/roles/class.c5
-rw-r--r--drivers/usb/serial/bus.c2
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/option.c37
-rw-r--r--drivers/usb/serial/quatech2.c8
-rw-r--r--drivers/usb/serial/usb-serial.c6
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/uas-detect.h13
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/storage/usb.c2
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c48
-rw-r--r--drivers/usb/typec/bus.c39
-rw-r--r--drivers/usb/typec/bus.h4
-rw-r--r--drivers/usb/typec/class.c18
-rw-r--r--drivers/usb/typec/hd3ss3220.c31
-rw-r--r--drivers/usb/typec/mux.c1
-rw-r--r--drivers/usb/typec/mux/Kconfig6
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c172
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c13
-rw-r--r--drivers/usb/typec/pd.c10
-rw-r--r--drivers/usb/typec/retimer.c1
-rw-r--r--drivers/usb/typec/retimer.h2
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c4
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c387
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c19
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h89
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c (renamed from drivers/usb/typec/tcpm/tcpci_maxim.c)53
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c115
-rw-r--r--drivers/usb/typec/tipd/core.c87
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c230
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h9
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c46
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c22
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c345
-rw-r--r--drivers/vdpa/Kconfig30
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c32
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h10
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c164
-rw-r--r--drivers/vdpa/mlx5/Makefile2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h6
-rw-r--r--drivers/vdpa/mlx5/core/mr.c47
-rw-r--r--drivers/vdpa/mlx5/core/resources.c3
-rw-r--r--drivers/vdpa/mlx5/net/debug.c152
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c598
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h94
-rw-r--r--drivers/vdpa/solidrun/Makefile7
-rw-r--r--drivers/vdpa/solidrun/snet_ctrl.c330
-rw-r--r--drivers/vdpa/solidrun/snet_hwmon.c188
-rw-r--r--drivers/vdpa/solidrun/snet_main.c1117
-rw-r--r--drivers/vdpa/solidrun/snet_vdpa.h208
-rw-r--r--drivers/vdpa/vdpa.c121
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c391
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h21
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c96
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c265
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h1
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c419
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c4
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/container.c14
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c2
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/group.c55
-rw-r--r--drivers/vfio/iommufd.c52
-rw-r--r--drivers/vfio/mdev/Kconfig8
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c4
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c79
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h28
-rw-r--r--drivers/vfio/pci/mlx5/main.c257
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c12
-rw-r--r--drivers/vfio/platform/vfio_platform_irq.c8
-rw-r--r--drivers/vfio/vfio.h33
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c96
-rw-r--r--drivers/vfio/vfio_iommu_type1.c304
-rw-r--r--drivers/vfio/vfio_main.c84
-rw-r--r--drivers/vfio/virqfd.c2
-rw-r--r--drivers/vhost/Kconfig5
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/scsi.c201
-rw-r--r--drivers/vhost/test.c3
-rw-r--r--drivers/vhost/vdpa.c142
-rw-r--r--drivers/vhost/vhost.c144
-rw-r--r--drivers/vhost/vhost.h14
-rw-r--r--drivers/vhost/vringh.c197
-rw-r--r--drivers/vhost/vsock.c226
-rw-r--r--drivers/video/Kconfig3
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aperture.c8
-rw-r--r--drivers/video/backlight/Kconfig23
-rw-r--r--drivers/video/backlight/Makefile3
-rw-r--r--drivers/video/backlight/aat2870_bl.c13
-rw-r--r--drivers/video/backlight/adp5520_bl.c6
-rw-r--r--drivers/video/backlight/apple_bl.c31
-rw-r--r--drivers/video/backlight/arcxcnn_bl.c7
-rw-r--r--drivers/video/backlight/as3711_bl.c24
-rw-r--r--drivers/video/backlight/backlight.c4
-rw-r--r--drivers/video/backlight/cr_bllcd.c6
-rw-r--r--drivers/video/backlight/da9052_bl.c6
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/hx8357.c2
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c7
-rw-r--r--drivers/video/backlight/ktd253-backlight.c9
-rw-r--r--drivers/video/backlight/ktz8866.c208
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/led_bl.c6
-rw-r--r--drivers/video/backlight/lm3533_bl.c6
-rw-r--r--drivers/video/backlight/locomolcd.c10
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c6
-rw-r--r--drivers/video/backlight/mt6370-backlight.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c74
-rw-r--r--drivers/video/backlight/qcom-wled.c7
-rw-r--r--drivers/video/backlight/rt4831-backlight.c6
-rw-r--r--drivers/video/backlight/sky81452-backlight.c8
-rw-r--r--drivers/video/backlight/tosa_bl.c172
-rw-r--r--drivers/video/backlight/tosa_bl.h8
-rw-r--r--drivers/video/backlight/tosa_lcd.c284
-rw-r--r--drivers/video/cmdline.c133
-rw-r--r--drivers/video/console/newport_con.c9
-rw-r--r--drivers/video/console/sticon.c9
-rw-r--r--drivers/video/console/vgacon.c8
-rw-r--r--drivers/video/fbdev/68328fb.c15
-rw-r--r--drivers/video/fbdev/Kconfig81
-rw-r--r--drivers/video/fbdev/Makefile3
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/arcfb.c15
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c24
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c11
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c6
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/au1200fb.c3
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg14.c8
-rw-r--r--drivers/video/fbdev/cg3.c8
-rw-r--r--drivers/video/fbdev/cg6.c6
-rw-r--r--drivers/video/fbdev/chipsfb.c14
-rw-r--r--drivers/video/fbdev/clps711x-fb.c19
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c6
-rw-r--r--drivers/video/fbdev/controlfb.c34
-rw-r--r--drivers/video/fbdev/core/Makefile3
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c94
-rw-r--r--drivers/video/fbdev/core/fb_defio.c31
-rw-r--r--drivers/video/fbdev/core/fbcon.c101
-rw-r--r--drivers/video/fbdev/core/fbmem.c41
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c1
-rw-r--r--drivers/video/fbdev/core/modedb.c13
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/efifb.c41
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c6
-rw-r--r--drivers/video/fbdev/ffb.c6
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c6
-rw-r--r--drivers/video/fbdev/g364fb.c6
-rw-r--r--drivers/video/fbdev/gbefb.c6
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c3
-rw-r--r--drivers/video/fbdev/goldfishfb.c5
-rw-r--r--drivers/video/fbdev/grvga.c6
-rw-r--r--drivers/video/fbdev/hecubafb.c5
-rw-r--r--drivers/video/fbdev/hgafb.c42
-rw-r--r--drivers/video/fbdev/hitfb.c6
-rw-r--r--drivers/video/fbdev/hpfb.c8
-rw-r--r--drivers/video/fbdev/hyperv_fb.c26
-rw-r--r--drivers/video/fbdev/imsttfb.c15
-rw-r--r--drivers/video/fbdev/imxfb.c6
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c3
-rw-r--r--drivers/video/fbdev/leo.c6
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/fbdev/maxinefb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c5
-rw-r--r--drivers/video/fbdev/metronomefb.c5
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c12
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c83
-rw-r--r--drivers/video/fbdev/ocfb.c6
-rw-r--r--drivers/video/fbdev/offb.c45
-rw-r--r--drivers/video/fbdev/omap/Kconfig9
-rw-r--r--drivers/video/fbdev/omap/Makefile7
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c82
-rw-r--r--drivers/video/fbdev/omap/lcd_htcherald.c59
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1510.c69
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1610.c99
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c86
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c65
-rw-r--r--drivers/video/fbdev/omap/lcd_palmz71.c59
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c41
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c33
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c3
-rw-r--r--drivers/video/fbdev/p9100.c10
-rw-r--r--drivers/video/fbdev/platinumfb.c36
-rw-r--r--drivers/video/fbdev/ps3fb.c1
-rw-r--r--drivers/video/fbdev/pxa168fb.c8
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c9
-rw-r--r--drivers/video/fbdev/pxafb.c8
-rw-r--r--drivers/video/fbdev/riva/fbdev.c8
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c5
-rw-r--r--drivers/video/fbdev/s3c-fb.c6
-rw-r--r--drivers/video/fbdev/s3c2410fb-regs-lcd.h143
-rw-r--r--drivers/video/fbdev/s3c2410fb.c1142
-rw-r--r--drivers/video/fbdev/s3c2410fb.h48
-rw-r--r--drivers/video/fbdev/sa1100fb.c33
-rw-r--r--drivers/video/fbdev/sh7760fb.c6
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c5
-rw-r--r--drivers/video/fbdev/simplefb.c25
-rw-r--r--drivers/video/fbdev/sm501fb.c10
-rw-r--r--drivers/video/fbdev/stifb.c184
-rw-r--r--drivers/video/fbdev/tcx.c9
-rw-r--r--drivers/video/fbdev/tgafb.c3
-rw-r--r--drivers/video/fbdev/tmiofb.c1040
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/valkyriefb.c14
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/video/fbdev/vesafb.c43
-rw-r--r--drivers/video/fbdev/vfb.c16
-rw-r--r--drivers/video/fbdev/vga16fb.c21
-rw-r--r--drivers/video/fbdev/via/via-gpio.c5
-rw-r--r--drivers/video/fbdev/via/via_i2c.c5
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c6
-rw-r--r--drivers/video/fbdev/w100fb.c1644
-rw-r--r--drivers/video/fbdev/w100fb.h924
-rw-r--r--drivers/video/fbdev/wm8505fb.c11
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c6
-rw-r--r--drivers/video/fbdev/xen-fbfront.c6
-rw-r--r--drivers/video/fbdev/xilinxfb.c12
-rw-r--r--drivers/video/logo/pnmtologo.c674
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c195
-rw-r--r--drivers/virt/fsl_hypervisor.c2
-rw-r--r--drivers/virtio/virtio.c16
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c12
-rw-r--r--drivers/virtio/virtio_mmio.c19
-rw-r--r--drivers/virtio/virtio_pci_modern.c24
-rw-r--r--drivers/virtio/virtio_ring.c224
-rw-r--r--drivers/virtio/virtio_vdpa.c129
-rw-r--r--drivers/w1/masters/Kconfig9
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/ds1wm.c675
-rw-r--r--drivers/w1/masters/ds2482.c18
-rw-r--r--drivers/w1/masters/ds2490.c13
-rw-r--r--drivers/w1/masters/matrox_w1.c16
-rw-r--r--drivers/w1/masters/omap_hdq.c14
-rw-r--r--drivers/w1/masters/w1-gpio.c6
-rw-r--r--drivers/w1/slaves/w1_ds2406.c35
-rw-r--r--drivers/w1/slaves/w1_ds2408.c12
-rw-r--r--drivers/w1/slaves/w1_ds2413.c8
-rw-r--r--drivers/w1/slaves/w1_ds2433.c23
-rw-r--r--drivers/w1/slaves/w1_ds2780.c1
-rw-r--r--drivers/w1/slaves/w1_ds2781.c1
-rw-r--r--drivers/w1/slaves/w1_ds2805.c2
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c21
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c6
-rw-r--r--drivers/w1/w1.c16
-rw-r--r--drivers/w1/w1_int.c5
-rw-r--r--drivers/watchdog/Kconfig24
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/acquirewdt.c6
-rw-r--r--drivers/watchdog/advantechwdt.c6
-rw-r--r--drivers/watchdog/apple_wdt.c18
-rw-r--r--drivers/watchdog/ar7_wdt.c5
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c15
-rw-r--r--drivers/watchdog/aspeed_wdt.c3
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/watchdog/at91sam9_wdt.c7
-rw-r--r--drivers/watchdog/ath79_wdt.c5
-rw-r--r--drivers/watchdog/bcm2835_wdt.c6
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c12
-rw-r--r--drivers/watchdog/bcm7038_wdt.c15
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c6
-rw-r--r--drivers/watchdog/cadence_wdt.c17
-rw-r--r--drivers/watchdog/cpwd.c6
-rw-r--r--drivers/watchdog/da9062_wdt.c15
-rw-r--r--drivers/watchdog/da9063_wdt.c15
-rw-r--r--drivers/watchdog/davinci_wdt.c18
-rw-r--r--drivers/watchdog/diag288_wdt.c161
-rw-r--r--drivers/watchdog/dw_wdt.c56
-rw-r--r--drivers/watchdog/gef_wdt.c6
-rw-r--r--drivers/watchdog/geodewdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/ib700wdt.c5
-rw-r--r--drivers/watchdog/ie6xx_wdt.c6
-rw-r--r--drivers/watchdog/imgpdc_wdt.c31
-rw-r--r--drivers/watchdog/imx2_wdt.c55
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c15
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c18
-rw-r--r--drivers/watchdog/loongson1_wdt.c36
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c36
-rw-r--r--drivers/watchdog/menz69_wdt.c18
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c16
-rw-r--r--drivers/watchdog/mt7621_wdt.c122
-rw-r--r--drivers/watchdog/mtk_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c5
-rw-r--r--drivers/watchdog/nic7018_wdt.c6
-rw-r--r--drivers/watchdog/nv_tco.c6
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c16
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/orion_wdt.c5
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pic32-dmt.c15
-rw-r--r--drivers/watchdog/pic32-wdt.c17
-rw-r--r--drivers/watchdog/pnx4008_wdt.c15
-rw-r--r--drivers/watchdog/qcom-wdt.c16
-rw-r--r--drivers/watchdog/rc32434_wdt.c5
-rw-r--r--drivers/watchdog/rdc321x_wdt.c6
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c17
-rw-r--r--drivers/watchdog/renesas_wdt.c6
-rw-r--r--drivers/watchdog/riowd.c6
-rw-r--r--drivers/watchdog/rn5t618_wdt.c12
-rw-r--r--drivers/watchdog/rt2880_wdt.c89
-rw-r--r--drivers/watchdog/rtd119x_wdt.c16
-rw-r--r--drivers/watchdog/rti_wdt.c6
-rw-r--r--drivers/watchdog/rzg2l_wdt.c45
-rw-r--r--drivers/watchdog/rzn1_wdt.c18
-rw-r--r--drivers/watchdog/s3c2410_wdt.c210
-rw-r--r--drivers/watchdog/sa1100_wdt.c6
-rw-r--r--drivers/watchdog/sbsa_gwdt.c5
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/st_lpc_wdt.c6
-rw-r--r--drivers/watchdog/starfive-wdt.c606
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c5
-rw-r--r--drivers/watchdog/visconti_wdt.c17
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c27
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c3
-rw-r--r--drivers/watchdog/wdat_wdt.c6
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wm8350_wdt.c9
-rw-r--r--drivers/watchdog/ziirave_wdt.c5
-rw-r--r--drivers/xen/balloon.c20
-rw-r--r--drivers/xen/efi.c61
-rw-r--r--drivers/xen/events/events_base.c9
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/grant-dma-iommu.c11
-rw-r--r--drivers/xen/pcpu.c20
-rw-r--r--drivers/xen/platform-pci.c5
-rw-r--r--drivers/xen/privcmd-buf.c2
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--drivers/xen/pvcalls-back.c13
-rw-r--r--drivers/xen/pvcalls-front.c53
-rw-r--r--drivers/xen/sys-hypervisor.c71
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c6
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c61
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
-rw-r--r--drivers/xen/xenfs/xensyms.c10
-rw-r--r--drivers/zorro/zorro-driver.c4
10265 files changed, 698992 insertions, 392958 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 968bd0a6fd78..514ae6b24cb2 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -241,4 +241,6 @@ source "drivers/peci/Kconfig"
source "drivers/hte/Kconfig"
+source "drivers/cdx/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index bdf1c66141c9..7241d80a7b29 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -6,6 +6,11 @@
# Rewritten to use lists instead of if-statements.
#
+# Some driver Makefiles miss $(srctree)/ for include directive.
+ifdef building_out_of_srctree
+MAKEFLAGS += --include-dir=$(srctree)
+endif
+
obj-y += irqchip/
obj-y += bus/
@@ -71,7 +76,7 @@ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
obj-$(CONFIG_PARPORT) += parport/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
-obj-$(CONFIG_DAX) += dax/
+obj-y += dax/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += cxl/
@@ -98,7 +103,6 @@ obj-$(CONFIG_DIO) += dio/
obj-$(CONFIG_SBUS) += sbus/
obj-$(CONFIG_ZORRO) += zorro/
obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
-obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
@@ -137,7 +141,7 @@ obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-y += clocksource/
obj-$(CONFIG_DCA) += dca/
-obj-$(CONFIG_HID) += hid/
+obj-$(CONFIG_HID_SUPPORT) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
@@ -189,3 +193,5 @@ obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_PECI) += peci/
obj-$(CONFIG_HTE) += hte/
+obj-$(CONFIG_DRM_ACCEL) += accel/
+obj-$(CONFIG_CDX_BUS) += cdx/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index c9ce849b2984..64065fb8922b 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -6,9 +6,10 @@
# as, but not limited to, Machine-Learning and Deep-Learning acceleration
# devices
#
+if DRM
+
menuconfig DRM_ACCEL
bool "Compute Acceleration Framework"
- depends on DRM
help
Framework for device drivers of compute acceleration devices, such
as, but not limited to, Machine-Learning and Deep-Learning
@@ -22,3 +23,9 @@ menuconfig DRM_ACCEL
major number than GPUs, and will be exposed to user-space using
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+
+source "drivers/accel/habanalabs/Kconfig"
+source "drivers/accel/ivpu/Kconfig"
+source "drivers/accel/qaic/Kconfig"
+
+endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
new file mode 100644
index 000000000000..ab3df932937f
--- /dev/null
+++ b/drivers/accel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
+obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
+obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 1b69824286fd..4a9baf02439e 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -34,7 +34,7 @@ static char *accel_devnode(const struct device *dev, umode_t *mode)
static int accel_sysfs_init(void)
{
- accel_class = class_create(THIS_MODULE, "accel");
+ accel_class = class_create("accel");
if (IS_ERR(accel_class))
return PTR_ERR(accel_class);
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index bd01d0d940c0..be85336107f9 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -3,8 +3,10 @@
# HabanaLabs AI accelerators driver
#
-config HABANA_AI
- tristate "HabanaAI accelerators (habanalabs)"
+config DRM_ACCEL_HABANALABS
+ tristate "HabanaLabs AI accelerators"
+ depends on DRM_ACCEL
+ depends on X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
@@ -19,7 +21,7 @@ config HABANA_AI
the user to submit workloads to the devices.
The user-space interface is described in
- include/uapi/misc/habanalabs.h
+ include/uapi/drm/habanalabs_accel.h
If unsure, say N.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/accel/habanalabs/Makefile
index a48a9e0969ed..98510cdd5066 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/accel/habanalabs/Makefile
@@ -3,7 +3,7 @@
# Makefile for HabanaLabs AI accelerators driver
#
-obj-$(CONFIG_HABANA_AI) := habanalabs.o
+obj-$(CONFIG_DRM_ACCEL_HABANALABS) := habanalabs.o
include $(src)/common/Makefile
habanalabs-y += $(HL_COMMON_FILES)
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..e6abffea9f87 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/accel/habanalabs/common/asid.c
index c9c2619cc43d..c9c2619cc43d 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/accel/habanalabs/common/asid.c
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/accel/habanalabs/common/command_buffer.c
index 2b332991ac6a..6e09f48750a0 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/accel/habanalabs/common/command_buffer.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/mm.h>
@@ -45,20 +45,29 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
- goto err_va_umap;
+ goto err_va_pool_free;
}
+
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
+ if (rc)
+ goto err_mmu_unmap;
+
mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
- return rc;
-err_va_umap:
+ return 0;
+
+err_mmu_unmap:
+ hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
+err_va_pool_free:
mutex_unlock(&hdev->mmu_lock);
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
+
return rc;
}
@@ -88,6 +97,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_pool) {
+ atomic_set(&cb->is_handle_destroyed, 0);
spin_lock(&hdev->cb_pool_lock);
list_add(&cb->pool_list, &hdev->cb_pool);
spin_unlock(&hdev->cb_pool_lock);
@@ -298,8 +308,25 @@ int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{
+ struct hl_cb *cb;
int rc;
+ cb = hl_cb_get(mmg, cb_handle);
+ if (!cb) {
+ dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
+ cb_handle);
+ return -EINVAL;
+ }
+
+ /* Make sure that CB handle isn't destroyed more than once */
+ rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
+ hl_cb_put(cb);
+ if (rc) {
+ dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
+ cb_handle);
+ return -EINVAL;
+ }
+
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
if (rc < 0)
return rc; /* Invalid handle */
@@ -350,7 +377,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
int rc;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute CB IOCTL\n",
hdev->status[status]);
return -EBUSY;
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index ea0e5101c10e..af9d2e22c6e7 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/uaccess.h>
@@ -13,10 +13,11 @@
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
- HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+ HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
+ HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
-#define MAX_TS_ITER_NUM 10
+#define MAX_TS_ITER_NUM 100
/**
* enum hl_cs_wait_status - cs wait status
@@ -397,8 +398,16 @@ static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
* flow by calling 'hl_hw_queue_update_ci'.
*/
if (cs_needs_completion(cs) &&
- (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW))
+ (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
+
+ /* In CS based completions, the timestamp is already available,
+ * so no need to extract it from job
+ */
+ if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
+ cs->completion_timestamp = job->timestamp;
+
cs_put(cs);
+ }
hl_cs_job_put(job);
}
@@ -648,7 +657,7 @@ static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
/*
* we get refcount upon reservation of signals or signal/wait cs for the
* hw_sob object, and need to put it when the first staged cs
- * (which cotains the encaps signals) or cs signal/wait is completed.
+ * (which contains the encaps signals) or cs signal/wait is completed.
*/
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
@@ -775,7 +784,7 @@ out:
}
if (cs->timestamp) {
- cs->fence->timestamp = ktime_get();
+ cs->fence->timestamp = cs->completion_timestamp;
hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
cs->fence->timestamp, cs->fence->error);
}
@@ -1073,9 +1082,8 @@ static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend, *temp;
- unsigned long flags;
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
@@ -1086,7 +1094,7 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
complete_all(&pend->fence.completion);
}
}
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -1117,6 +1125,27 @@ void hl_release_pending_user_interrupts(struct hl_device *hdev)
wake_pending_user_interrupt_threads(interrupt);
}
+static void force_complete_cs(struct hl_device *hdev)
+{
+ struct hl_cs *cs;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
+ cs->fence->error = -EIO;
+ complete_all(&cs->fence->completion);
+ }
+
+ spin_unlock(&hdev->cs_mirror_lock);
+}
+
+void hl_abort_waitings_for_completion(struct hl_device *hdev)
+{
+ force_complete_cs(hdev);
+ force_complete_multi_cs(hdev);
+ hl_release_pending_user_interrupts(hdev);
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -1138,6 +1167,22 @@ static void cs_completion(struct work_struct *work)
hl_complete_job(hdev, job);
}
+u32 hl_get_active_cs_num(struct hl_device *hdev)
+{
+ u32 active_cs_num = 0;
+ struct hl_cs *cs;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
+ if (!cs->completed)
+ active_cs_num++;
+
+ spin_unlock(&hdev->cs_mirror_lock);
+
+ return active_cs_num;
+}
+
static int validate_queue_index(struct hl_device *hdev,
struct hl_cs_chunk *chunk,
enum hl_queue_type *queue_type,
@@ -1274,6 +1319,10 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_UNRESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
return CS_TYPE_ENGINE_CORE;
+ else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
+ return CS_TYPE_ENGINES;
+ else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
+ return CS_TYPE_FLUSH_PCI_HBW_WRITES;
else
return CS_TYPE_DEFAULT;
}
@@ -1286,6 +1335,13 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
enum hl_device_status status;
enum hl_cs_type cs_type;
bool is_sync_stream;
+ int i;
+
+ for (i = 0 ; i < sizeof(args->in.pad) ; i++)
+ if (args->in.pad[i]) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
if (!hl_device_operational(hdev, &status)) {
return -EBUSY;
@@ -2390,10 +2446,13 @@ out:
static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
u32 num_engine_cores, u32 core_command)
{
- int rc;
struct hl_device *hdev = hpriv->hdev;
void __user *engine_cores_arr;
u32 *cores;
+ int rc;
+
+ if (!hdev->asic_prop.supports_engine_modes)
+ return -EPERM;
if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
@@ -2422,6 +2481,63 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
return rc;
}
+static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
+ u32 num_engines, enum hl_engine_command command)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 *engines, max_num_of_engines;
+ void __user *engines_arr;
+ int rc;
+
+ if (!hdev->asic_prop.supports_engine_modes)
+ return -EPERM;
+
+ if (command >= HL_ENGINE_COMMAND_MAX) {
+ dev_err(hdev->dev, "Engine command is invalid\n");
+ return -EINVAL;
+ }
+
+ max_num_of_engines = hdev->asic_prop.max_num_of_engines;
+ if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
+ max_num_of_engines = hdev->asic_prop.num_engine_cores;
+
+ if (!num_engines || num_engines > max_num_of_engines) {
+ dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
+ return -EINVAL;
+ }
+
+ engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
+ engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
+ dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
+ kfree(engines);
+ return -EFAULT;
+ }
+
+ rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
+ kfree(engines);
+
+ return rc;
+}
+
+static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (!prop->hbw_flush_reg) {
+ dev_dbg(hdev->dev, "HBW flush is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ RREG32(prop->hbw_flush_reg);
+
+ return 0;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
@@ -2478,6 +2594,13 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
args->in.num_engine_cores, args->in.core_command);
break;
+ case CS_TYPE_ENGINES:
+ rc = cs_ioctl_engines(hpriv, args->in.engines,
+ args->in.num_engines, args->in.engine_command);
+ break;
+ case CS_TYPE_FLUSH_PCI_HBW_WRITES:
+ rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
@@ -2569,7 +2692,9 @@ report_results:
*status = CS_WAIT_STATUS_BUSY;
}
- if (error == -ETIMEDOUT || error == -EIO)
+ if (completion_rc == -ERESTARTSYS)
+ rc = completion_rc;
+ else if (error == -ETIMEDOUT || error == -EIO)
rc = error;
return rc;
@@ -2699,7 +2824,8 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
break;
default:
dev_err(hdev->dev, "Invalid fence status\n");
- return -EINVAL;
+ rc = -EINVAL;
+ break;
}
}
@@ -2828,6 +2954,9 @@ static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
if (completion_rc > 0)
mcs_data->timestamp = mcs_compl->timestamp;
+ if (completion_rc == -ERESTARTSYS)
+ return completion_rc;
+
mcs_data->wait_status = completion_rc;
return 0;
@@ -2870,7 +2999,13 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
u32 size_to_copy;
u64 *cs_seq_arr;
u8 seq_arr_len;
- int rc;
+ int rc, i;
+
+ for (i = 0 ; i < sizeof(args->in.pad) ; i++)
+ if (args->in.pad[i]) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
if (!hdev->supports_wait_for_multi_cs) {
dev_err(hdev->dev, "Wait for multi CS is not supported\n");
@@ -2973,15 +3108,15 @@ put_ctx:
free_seq_arr:
kfree(cs_seq_arr);
- if (rc)
- return rc;
-
- if (mcs_data.wait_status == -ERESTARTSYS) {
+ if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for Multi-CS\n");
- return -EINTR;
+ rc = -EINTR;
}
+ if (rc)
+ return rc;
+
/* update output args */
memset(args, 0, sizeof(*args));
@@ -3074,8 +3209,9 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_user_pending_interrupt *cb_last =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
- unsigned long flags, iter_counter = 0;
+ unsigned long iter_counter = 0;
u64 current_cq_counter;
+ ktime_t timestamp;
/* Validate ts_offset not exceeding last max */
if (requested_offset_record >= cb_last) {
@@ -3084,8 +3220,10 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
return -EINVAL;
}
+ timestamp = ktime_get();
+
start_over:
- spin_lock_irqsave(wait_list_lock, flags);
+ spin_lock(wait_list_lock);
/* Unregister only if we didn't reach the target value
* since in this case there will be no handling in irq context
@@ -3096,7 +3234,7 @@ start_over:
current_cq_counter = *requested_offset_record->cq_kernel_addr;
if (current_cq_counter < requested_offset_record->cq_target_value) {
list_del(&requested_offset_record->wait_list_node);
- spin_unlock_irqrestore(wait_list_lock, flags);
+ spin_unlock(wait_list_lock);
hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
@@ -3107,30 +3245,30 @@ start_over:
dev_dbg(buf->mmg->dev,
"ts node in middle of irq handling\n");
- /* irq handling in the middle give it time to finish */
- spin_unlock_irqrestore(wait_list_lock, flags);
- usleep_range(1, 10);
+ /* irq thread handling in the middle give it time to finish */
+ spin_unlock(wait_list_lock);
+ usleep_range(100, 1000);
if (++iter_counter == MAX_TS_ITER_NUM) {
dev_err(buf->mmg->dev,
- "handling registration interrupt took too long!!\n");
- return -EINVAL;
+ "Timestamp offset processing reached timeout of %lld ms\n",
+ ktime_ms_delta(ktime_get(), timestamp));
+ return -EAGAIN;
}
goto start_over;
}
} else {
- spin_unlock_irqrestore(wait_list_lock, flags);
- }
+ /* Fill up the new registration node info */
+ requested_offset_record->ts_reg_info.buf = buf;
+ requested_offset_record->ts_reg_info.cq_cb = cq_cb;
+ requested_offset_record->ts_reg_info.timestamp_kernel_addr =
+ (u64 *) ts_buff->user_buff_address + ts_offset;
+ requested_offset_record->cq_kernel_addr =
+ (u64 *) cq_cb->kernel_address + cq_offset;
+ requested_offset_record->cq_target_value = target_value;
- /* Fill up the new registration node info */
- requested_offset_record->ts_reg_info.in_use = 1;
- requested_offset_record->ts_reg_info.buf = buf;
- requested_offset_record->ts_reg_info.cq_cb = cq_cb;
- requested_offset_record->ts_reg_info.timestamp_kernel_addr =
- (u64 *) ts_buff->user_buff_address + ts_offset;
- requested_offset_record->cq_kernel_addr =
- (u64 *) cq_cb->kernel_address + cq_offset;
- requested_offset_record->cq_target_value = target_value;
+ spin_unlock(wait_list_lock);
+ }
*pend = requested_offset_record;
@@ -3149,7 +3287,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_user_pending_interrupt *pend;
struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
- unsigned long timeout, flags;
+ unsigned long timeout;
long completion_rc;
int rc = 0;
@@ -3179,7 +3317,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto put_cq_cb;
}
- /* Find first available record */
+ /* get ts buffer record */
rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
cq_counters_offset, target_value,
&interrupt->wait_list_lock, &pend);
@@ -3196,7 +3334,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
pend->cq_target_value = target_value;
}
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@@ -3204,7 +3342,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (*pend->cq_kernel_addr >= target_value) {
if (register_ts_record)
pend->ts_reg_info.in_use = 0;
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_COMPLETED;
@@ -3216,7 +3354,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto set_timestamp;
}
} else if (!timeout_us) {
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_BUSY;
pend->fence.timestamp = ktime_get();
goto set_timestamp;
@@ -3227,9 +3365,21 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* Note that we cannot have sorted list by target value,
* in order to shorten the list pass loop, since
* same list could have nodes for different cq counter handle.
+ * Note:
+ * Mark ts buff offset as in use here in the spinlock protection area
+ * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
+ * before adding the node to the list. this scenario might happen when
+ * multiple threads are racing on same offset and one thread could
+ * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
+ * takes over and get to ts_buff_get_kernel_ts_record and then we will try
+ * to re-use the same ts buff offset, and will try to delete a non existing
+ * node from the list.
*/
+ if (register_ts_record)
+ pend->ts_reg_info.in_use = 1;
+
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
if (register_ts_record) {
rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
@@ -3273,9 +3423,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* for ts record, the node will be deleted in the irq handler after
* we reach the target value.
*/
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
set_timestamp:
*timestamp = ktime_to_ns(pend->fence.timestamp);
@@ -3303,7 +3453,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- unsigned long timeout, flags;
+ unsigned long timeout;
u64 completion_value;
long completion_rc;
int rc = 0;
@@ -3323,9 +3473,9 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@@ -3356,14 +3506,14 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
@@ -3400,9 +3550,9 @@ wait_again:
}
remove_pending_user_interrupt:
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*timestamp = ktime_to_ns(pend->fence.timestamp);
@@ -3489,14 +3639,15 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
+ struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
u32 flags = args->in.flags;
int rc;
- /* If the device is not operational, no point in waiting for any command submission or
- * user interrupt
+ /* If the device is not operational, or if an error has happened and user should release the
+ * device, there is no point in waiting for any command submission or user interrupt.
*/
- if (!hl_device_operational(hpriv->hdev, NULL))
+ if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index 9c8b1b37b510..9c8b1b37b510 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 945c0e6758ca..22dd17c077c0 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -258,7 +258,7 @@ static int vm_show(struct seq_file *s, void *data)
if (!dev_entry->hdev->mmu_enable)
return 0;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
once = false;
@@ -329,7 +329,7 @@ static int vm_show(struct seq_file *s, void *data)
}
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
ctx = hl_get_compute_ctx(dev_entry->hdev);
if (ctx) {
@@ -1583,209 +1583,216 @@ static const struct file_operations hl_debugfs_fops = {
.release = single_release,
};
-static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
+static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
{
debugfs_create_u8("i2c_bus",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_bus);
debugfs_create_u8("i2c_addr",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_addr);
debugfs_create_u8("i2c_reg",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_reg);
debugfs_create_u8("i2c_len",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_len);
debugfs_create_file("i2c_data",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_i2c_data_fops);
debugfs_create_file("led0",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led0_fops);
debugfs_create_file("led1",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led1_fops);
debugfs_create_file("led2",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led2_fops);
}
-void hl_debugfs_add_device(struct hl_device *hdev)
+static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
+ struct dentry *root)
{
- struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
int count = ARRAY_SIZE(hl_debugfs_list);
struct hl_debugfs_entry *entry;
int i;
- dev_entry->hdev = hdev;
- dev_entry->entry_arr = kmalloc_array(count,
- sizeof(struct hl_debugfs_entry),
- GFP_KERNEL);
- if (!dev_entry->entry_arr)
- return;
-
- dev_entry->data_dma_blob_desc.size = 0;
- dev_entry->data_dma_blob_desc.data = NULL;
- dev_entry->mon_dump_blob_desc.size = 0;
- dev_entry->mon_dump_blob_desc.data = NULL;
-
- INIT_LIST_HEAD(&dev_entry->file_list);
- INIT_LIST_HEAD(&dev_entry->cb_list);
- INIT_LIST_HEAD(&dev_entry->cs_list);
- INIT_LIST_HEAD(&dev_entry->cs_job_list);
- INIT_LIST_HEAD(&dev_entry->userptr_list);
- INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
- mutex_init(&dev_entry->file_mutex);
- init_rwsem(&dev_entry->state_dump_sem);
- spin_lock_init(&dev_entry->cb_spinlock);
- spin_lock_init(&dev_entry->cs_spinlock);
- spin_lock_init(&dev_entry->cs_job_spinlock);
- spin_lock_init(&dev_entry->userptr_spinlock);
- spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
-
- dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
- hl_debug_root);
-
debugfs_create_x64("memory_scrub_val",
0644,
- dev_entry->root,
+ root,
&hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_mem_scrub_fops);
debugfs_create_x64("addr",
0644,
- dev_entry->root,
+ root,
&dev_entry->addr);
debugfs_create_file("data32",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_data32b_fops);
debugfs_create_file("data64",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_clk_gate_fops);
debugfs_create_file("stop_on_err",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_razwi_check_fops);
debugfs_create_file("dma_size",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_dma_size_fops);
debugfs_create_blob("data_dma",
0400,
- dev_entry->root,
+ root,
&dev_entry->data_dma_blob_desc);
debugfs_create_file("monitor_dump_trig",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_monitor_dump_fops);
debugfs_create_blob("monitor_dump",
0400,
- dev_entry->root,
+ root,
&dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
- dev_entry->root,
+ root,
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
- dev_entry->root,
+ root,
dev_entry,
&hl_state_dump_fops);
debugfs_create_file("timeout_locked",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
- dev_entry->root,
+ root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
- dev_entry->root,
+ root,
entry,
&hl_debugfs_fops);
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
+}
+
+void hl_debugfs_add_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+ int count = ARRAY_SIZE(hl_debugfs_list);
+
+ dev_entry->hdev = hdev;
+ dev_entry->entry_arr = kmalloc_array(count,
+ sizeof(struct hl_debugfs_entry),
+ GFP_KERNEL);
+ if (!dev_entry->entry_arr)
+ return;
+
+ dev_entry->data_dma_blob_desc.size = 0;
+ dev_entry->data_dma_blob_desc.data = NULL;
+ dev_entry->mon_dump_blob_desc.size = 0;
+ dev_entry->mon_dump_blob_desc.data = NULL;
+
+ INIT_LIST_HEAD(&dev_entry->file_list);
+ INIT_LIST_HEAD(&dev_entry->cb_list);
+ INIT_LIST_HEAD(&dev_entry->cs_list);
+ INIT_LIST_HEAD(&dev_entry->cs_job_list);
+ INIT_LIST_HEAD(&dev_entry->userptr_list);
+ INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
+ mutex_init(&dev_entry->file_mutex);
+ init_rwsem(&dev_entry->state_dump_sem);
+ spin_lock_init(&dev_entry->cb_spinlock);
+ spin_lock_init(&dev_entry->cs_spinlock);
+ spin_lock_init(&dev_entry->cs_job_spinlock);
+ spin_lock_init(&dev_entry->userptr_spinlock);
+ mutex_init(&dev_entry->ctx_mem_hash_mutex);
+
+ dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
+ hl_debug_root);
+ add_files_to_device(hdev, dev_entry, dev_entry->root);
if (!hdev->asic_prop.fw_security_enabled)
- add_secured_nodes(dev_entry);
+ add_secured_nodes(dev_entry, dev_entry->root);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
@@ -1795,6 +1802,7 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
debugfs_remove_recursive(entry->root);
+ mutex_destroy(&entry->ctx_mem_hash_mutex);
mutex_destroy(&entry->file_mutex);
vfree(entry->data_dma_blob_desc.data);
@@ -1901,18 +1909,18 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_del(&ctx->debugfs_list);
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
/**
diff --git a/drivers/misc/habanalabs/common/decoder.c b/drivers/accel/habanalabs/common/decoder.c
index 2aab14d74b53..c03a6da45d00 100644
--- a/drivers/misc/habanalabs/common/decoder.c
+++ b/drivers/accel/habanalabs/common/decoder.c
@@ -43,36 +43,44 @@ static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
}
-static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
+static void dec_abnrm_intr_work(struct work_struct *work)
{
+ struct hl_dec *dec = container_of(work, struct hl_dec, abnrm_intr_work);
+ struct hl_device *hdev = dec->hdev;
+ u32 irq_status, event_mask = 0;
bool reset_required = false;
- u32 irq_status;
- irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
+ irq_status = RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
- dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, core_id);
+ dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, dec->core_id);
dec_print_abnrm_intr_source(hdev, irq_status);
- if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
- reset_required = true;
-
/* Clear the interrupt */
- WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
+ WREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
/* Flush the interrupt clear */
- RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
-
- if (reset_required)
- hl_device_reset(hdev, HL_DRV_RESET_HARD);
-}
+ RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
-static void dec_completion_abnrm(struct work_struct *work)
-{
- struct hl_dec *dec = container_of(work, struct hl_dec, completion_abnrm_work);
- struct hl_device *hdev = dec->hdev;
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK) {
+ reset_required = true;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ }
- dec_error_intr_work(hdev, dec->base_addr, dec->core_id);
+ if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
+ event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
+
+ if (irq_status & (VCMD_IRQ_STATUS_ENDCMD_MASK |
+ VCMD_IRQ_STATUS_BUSERR_MASK |
+ VCMD_IRQ_STATUS_ABORT_MASK))
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+
+ if (reset_required) {
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
+ hl_device_cond_reset(hdev, 0, event_mask);
+ } else if (event_mask) {
+ hl_notifier_event_send_all(hdev, event_mask);
+ }
}
void hl_dec_fini(struct hl_device *hdev)
@@ -98,7 +106,7 @@ int hl_dec_init(struct hl_device *hdev)
dec = hdev->dec + j;
dec->hdev = hdev;
- INIT_WORK(&dec->completion_abnrm_work, dec_completion_abnrm);
+ INIT_WORK(&dec->abnrm_intr_work, dec_abnrm_intr_work);
dec->core_id = j;
dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
if (!dec->base_addr) {
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 87ab329e65d4..fabfc501ef54 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -7,7 +7,7 @@
#define pr_fmt(fmt) "habanalabs: " fmt
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/pci.h>
@@ -22,7 +22,6 @@
enum dma_alloc_type {
DMA_ALLOC_COHERENT,
- DMA_ALLOC_CPU_ACCESSIBLE,
DMA_ALLOC_POOL,
};
@@ -121,9 +120,6 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
case DMA_ALLOC_COHERENT:
ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
break;
- case DMA_ALLOC_CPU_ACCESSIBLE:
- ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
- break;
case DMA_ALLOC_POOL:
ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
break;
@@ -147,9 +143,6 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
case DMA_ALLOC_COHERENT:
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
break;
- case DMA_ALLOC_CPU_ACCESSIBLE:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
- break;
case DMA_ALLOC_POOL:
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
@@ -170,18 +163,6 @@ void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void
hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
-void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
- dma_addr_t *dma_handle, const char *caller)
-{
- return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
-}
-
-void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
- const char *caller)
-{
- hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
-}
-
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller)
{
@@ -194,6 +175,16 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_
hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+{
+ return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+}
+
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
+}
+
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -389,18 +380,17 @@ bool hl_ctrl_device_operational(struct hl_device *hdev,
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
- u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {};
-
- BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4);
-
- pad_width[3] = idle_mask[3] ? 16 : 0;
- pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0;
- pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0;
- pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0;
-
- dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n",
- message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2],
- pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]);
+ if (idle_mask[3])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
+ message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
+ else if (idle_mask[2])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
+ message, idle_mask[2], idle_mask[1], idle_mask[0]);
+ else if (idle_mask[1])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
+ message, idle_mask[1], idle_mask[0]);
+ else
+ dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
}
static void hpriv_release(struct kref *ref)
@@ -423,13 +413,18 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
+ /* There should be no memory buffers at this point and handles IDR can be destroyed */
+ hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
+
/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
* reset that waits for device release.
*/
reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
- /* Unless device is reset in any case, check idle status and reset if device is not idle */
- if (!reset_device && hdev->pdev && !hdev->pldm)
+ /* Check the device idle status and reset if not idle.
+ * Skip it if already in reset, or if device is going to be reset in any case.
+ */
+ if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm)
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
if (!device_is_idle) {
@@ -490,6 +485,36 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
return kref_put(&hpriv->refcount, hpriv_release);
}
+static void print_device_in_use_info(struct hl_device *hdev, const char *message)
+{
+ u32 active_cs_num, dmabuf_export_cnt;
+ bool unknown_reason = true;
+ char buf[128];
+ size_t size;
+ int offset;
+
+ size = sizeof(buf);
+ offset = 0;
+
+ active_cs_num = hl_get_active_cs_num(hdev);
+ if (active_cs_num) {
+ unknown_reason = false;
+ offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
+ }
+
+ dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
+ if (dmabuf_export_cnt) {
+ unknown_reason = false;
+ offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
+ dmabuf_export_cnt);
+ }
+
+ if (unknown_reason)
+ scnprintf(buf + offset, size - offset, " [unknown reason]");
+
+ dev_notice(hdev->dev, "%s%s\n", message, buf);
+}
+
/*
* hl_device_release - release function for habanalabs device
*
@@ -511,23 +536,21 @@ static int hl_device_release(struct inode *inode, struct file *filp)
return 0;
}
- /* Each pending user interrupt holds the user's context, hence we
- * must release them all before calling hl_ctx_mgr_fini().
- */
- hl_release_pending_user_interrupts(hpriv->hdev);
-
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+
+ /* Memory buffers might be still in use at this point and thus the handles IDR destruction
+ * is postponed to hpriv_release().
+ */
hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) {
- dev_notice(hdev->dev, "User process closed FD but device still in use\n");
+ print_device_in_use_info(hdev, "User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
- hdev->last_open_session_duration_jif =
- jiffies - hdev->last_successful_open_jif;
+ hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
return 0;
}
@@ -620,7 +643,7 @@ static void device_release_func(struct device *dev)
* device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
- * @hclass: pointer to the class object of the device
+ * @class: pointer to the class object of the device
* @minor: minor number of the specific device
* @fpos: file operations to install for this device
* @name: name of the device as it will appear in the filesystem
@@ -629,7 +652,7 @@ static void device_release_func(struct device *dev)
*
* Initialize a cdev and a Linux device for habanalabs's device.
*/
-static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
+static int device_init_cdev(struct hl_device *hdev, struct class *class,
int minor, const struct file_operations *fops,
char *name, struct cdev *cdev,
struct device **dev)
@@ -643,7 +666,7 @@ static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
device_initialize(*dev);
(*dev)->devt = MKDEV(hdev->major, minor);
- (*dev)->class = hclass;
+ (*dev)->class = class;
(*dev)->release = device_release_func;
dev_set_drvdata(*dev, hdev);
dev_set_name(*dev, "%s", name);
@@ -736,14 +759,14 @@ static void device_hard_reset_pending(struct work_struct *work)
static void device_release_watchdog_func(struct work_struct *work)
{
- struct hl_device_reset_work *device_release_watchdog_work =
- container_of(work, struct hl_device_reset_work, reset_work.work);
- struct hl_device *hdev = device_release_watchdog_work->hdev;
+ struct hl_device_reset_work *watchdog_work =
+ container_of(work, struct hl_device_reset_work, reset_work.work);
+ struct hl_device *hdev = watchdog_work->hdev;
u32 flags;
- dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n");
+ dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
- flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR;
+ flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
hl_device_reset(hdev, flags);
}
@@ -808,7 +831,7 @@ static int device_early_init(struct hl_device *hdev)
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
- snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
+ snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
@@ -817,14 +840,16 @@ static int device_early_init(struct hl_device *hdev)
}
}
- hdev->eq_wq = create_singlethread_workqueue("hl-events");
+ snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
+ hdev->eq_wq = create_singlethread_workqueue(workq_name);
if (hdev->eq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
- hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
+ hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->cs_cmplt_wq) {
dev_err(hdev->dev,
"Failed to allocate CS completions workqueue\n");
@@ -832,7 +857,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
- hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
+ hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
@@ -840,15 +866,15 @@ static int device_early_init(struct hl_device *hdev)
goto free_cs_cmplt_wq;
}
- hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
+ hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->prefetch_wq) {
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
rc = -ENOMEM;
goto free_ts_free_wq;
}
- hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
- GFP_KERNEL);
+ hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
goto free_prefetch_wq;
@@ -860,7 +886,8 @@ static int device_early_init(struct hl_device *hdev)
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
- hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
+ snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
+ hdev->reset_wq = create_singlethread_workqueue(workq_name);
if (!hdev->reset_wq) {
rc = -ENOMEM;
dev_err(hdev->dev, "Failed to create device reset WQ\n");
@@ -890,6 +917,7 @@ static int device_early_init(struct hl_device *hdev)
free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
free_prefetch_wq:
@@ -933,6 +961,7 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
@@ -956,6 +985,8 @@ static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
work_heartbeat.work);
+ struct hl_info_fw_err_info info = {0};
+ u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
if (!hl_device_operational(hdev, NULL))
goto reschedule;
@@ -966,7 +997,10 @@ static void hl_device_heartbeat(struct work_struct *work)
if (hl_device_operational(hdev, NULL))
dev_err(hdev->dev, "Device heartbeat failed!\n");
- hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
+ info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
+ info.event_mask = &event_mask;
+ hl_handle_fw_err(hdev, &info);
+ hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
return;
@@ -1237,7 +1271,6 @@ int hl_device_resume(struct hl_device *hdev)
return 0;
disable_device:
- pci_clear_master(hdev->pdev);
pci_disable_device(hdev->pdev);
return rc;
@@ -1347,6 +1380,34 @@ static void device_disable_open_processes(struct hl_device *hdev, bool control_d
mutex_unlock(fd_lock);
}
+static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
+{
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ */
+ if ((flags & HL_DRV_RESET_HARD) &&
+ !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
+ dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
+ return;
+ }
+
+ /* verify that last EQs are handled before disabled is set */
+ if (hdev->cpu_queues_enable)
+ synchronize_irq(pci_irq_vector(hdev->pdev,
+ hdev->asic_prop.eq_interrupt_id));
+ }
+}
+
static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
{
u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
@@ -1385,28 +1446,6 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
} else {
hdev->reset_info.reset_trigger_repeated = 1;
}
-
- /* If reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it.
- *
- * If F/W is performing the reset, no need to send it a message to disable
- * PCI access
- */
- if ((flags & HL_DRV_RESET_HARD) &&
- !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
- /* Disable PCI access from device F/W so he won't send
- * us additional interrupts. We disable MSI/MSI-X at
- * the halt_engines function and we can't have the F/W
- * sending us interrupts after that. We need to disable
- * the access here because if the device is marked
- * disable, the message won't be send. Also, in case
- * of heartbeat, the device CPU is marked as disable
- * so this message won't be sent
- */
- if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
- dev_warn(hdev->dev,
- "Failed to disable PCI access by F/W\n");
- }
}
/*
@@ -1427,12 +1466,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
- bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
- reset_upon_device_release = false, schedule_hard_reset = false, delay_reset,
- from_dev_release, from_watchdog_thread;
+ bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
+ schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx;
- int i, rc;
+ int i, rc, hw_fini_rc;
if (!hdev->init_done) {
dev_err(hdev->dev, "Can't reset before initialization is done\n");
@@ -1445,32 +1483,34 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
+ reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+
+ if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
+ dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
+ return 0;
+ }
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
- hard_instead_soft = true;
+ dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
hard_reset = true;
}
- if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
+ if (reset_upon_device_release) {
if (hard_reset) {
dev_crit(hdev->dev,
"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
return -EINVAL;
}
- reset_upon_device_release = true;
-
goto do_reset;
}
if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
- hard_instead_soft = true;
+ dev_dbg(hdev->dev,
+ "asic doesn't allow inference soft reset - do hard-reset instead\n");
hard_reset = true;
}
- if (hard_instead_soft)
- dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
-
do_reset:
/* Re-entry of reset thread */
if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
@@ -1478,14 +1518,14 @@ do_reset:
/*
* Prevent concurrency in this function - only one reset should be
- * done at any given time. Only need to perform this if we didn't
- * get from the dedicated hard reset thread
+ * done at any given time. We need to perform this only if we didn't
+ * get here from a dedicated hard reset thread.
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
- /* We only allow scheduling of a hard reset during compute reset */
+ /* We allow scheduling of a hard reset only during a compute reset */
if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
@@ -1503,15 +1543,18 @@ do_reset:
/* Cancel the device release watchdog work if required.
* In case of reset-upon-device-release while the release watchdog work is
- * scheduled, do hard-reset instead of compute-reset.
+ * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
*/
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
+ struct hl_device_reset_work *watchdog_work =
+ &hdev->device_release_watchdog_work;
+
hdev->reset_info.watchdog_active = 0;
if (!from_watchdog_thread)
- cancel_delayed_work_sync(
- &hdev->device_release_watchdog_work.reset_work);
+ cancel_delayed_work_sync(&watchdog_work->reset_work);
- if (from_dev_release) {
+ if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
+ hdev->reset_info.in_compute_reset = 0;
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
@@ -1521,7 +1564,9 @@ do_reset:
if (delay_reset)
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
+escalate_reset_flow:
handle_reset_trigger(hdev, flags);
+ send_disable_pci_access(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -1536,7 +1581,6 @@ do_reset:
dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
-again:
if ((hard_reset) && (!from_hard_reset_thread)) {
hdev->reset_info.hard_reset_pending = true;
@@ -1566,7 +1610,8 @@ kill_processes:
if (rc == -EBUSY) {
if (hdev->device_fini_pending) {
dev_crit(hdev->dev,
- "Failed to kill all open processes, stopping hard reset\n");
+ "%s Failed to kill all open processes, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
goto out_err;
}
@@ -1576,7 +1621,8 @@ kill_processes:
if (rc) {
dev_crit(hdev->dev,
- "Failed to kill all open processes, stopping hard reset\n");
+ "%s Failed to kill all open processes, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
goto out_err;
}
@@ -1587,7 +1633,7 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
+ hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@@ -1614,6 +1660,10 @@ kill_processes:
hl_ctx_put(ctx);
}
+ if (hw_fini_rc) {
+ rc = hw_fini_rc;
+ goto out_err;
+ }
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
@@ -1627,14 +1677,16 @@ kill_processes:
* ensure driver puts the driver in a unusable state
*/
dev_crit(hdev->dev,
- "Consecutive FW fatal errors received, stopping hard reset\n");
+ "%s Consecutive FW fatal errors received, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
rc = -EIO;
goto out_err;
}
if (hdev->kernel_ctx) {
dev_crit(hdev->dev,
- "kernel ctx was alive during hard reset, something is terribly wrong\n");
+ "%s kernel ctx was alive during hard reset, something is terribly wrong\n",
+ dev_name(&(hdev)->pdev->dev));
rc = -EBUSY;
goto out_err;
}
@@ -1732,7 +1784,7 @@ kill_processes:
rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc) {
dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
- return rc;
+ goto out_err;
}
spin_lock(&hdev->reset_info.lock);
@@ -1752,9 +1804,13 @@ kill_processes:
hdev->reset_info.needs_reset = false;
if (hard_reset)
- dev_info(hdev->dev, "Successfully finished resetting the device\n");
+ dev_info(hdev->dev,
+ "Successfully finished resetting the %s device\n",
+ dev_name(&(hdev)->pdev->dev));
else
- dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
+ dev_dbg(hdev->dev,
+ "Successfully finished resetting the %s device\n",
+ dev_name(&(hdev)->pdev->dev));
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
@@ -1773,10 +1829,8 @@ kill_processes:
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
flags = hdev->reset_info.hard_reset_schedule_flags;
hdev->reset_info.hard_reset_schedule_flags = 0;
- hdev->disabled = true;
hard_reset = true;
- handle_reset_trigger(hdev, flags);
- goto again;
+ goto escalate_reset_flow;
}
}
@@ -1789,22 +1843,23 @@ out_err:
hdev->reset_info.in_compute_reset = 0;
if (hard_reset) {
- dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
+ dev_err(hdev->dev,
+ "%s Failed to reset! Device is NOT usable\n",
+ dev_name(&(hdev)->pdev->dev));
hdev->reset_info.hard_reset_cnt++;
- } else if (reset_upon_device_release) {
- spin_unlock(&hdev->reset_info.lock);
- dev_err(hdev->dev, "Failed to reset device after user release\n");
- flags |= HL_DRV_RESET_HARD;
- flags &= ~HL_DRV_RESET_DEV_RELEASE;
- hard_reset = true;
- goto again;
} else {
+ if (reset_upon_device_release) {
+ dev_err(hdev->dev, "Failed to reset device after user release\n");
+ flags &= ~HL_DRV_RESET_DEV_RELEASE;
+ } else {
+ dev_err(hdev->dev, "Failed to do compute reset\n");
+ hdev->reset_info.compute_reset_cnt++;
+ }
+
spin_unlock(&hdev->reset_info.lock);
- dev_err(hdev->dev, "Failed to do compute reset\n");
- hdev->reset_info.compute_reset_cnt++;
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
- goto again;
+ goto escalate_reset_flow;
}
hdev->reset_info.in_reset = 0;
@@ -1827,10 +1882,6 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
{
struct hl_ctx *ctx = NULL;
- /* Device release watchdog is only for hard reset */
- if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset)
- goto device_reset;
-
/* F/W reset cannot be postponed */
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
goto device_reset;
@@ -1858,7 +1909,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
goto out;
hdev->device_release_watchdog_work.flags = flags;
- dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n",
+ dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
@@ -1870,6 +1921,8 @@ out:
hl_ctx_put(ctx);
+ hl_abort_waitings_for_completion(hdev);
+
return 0;
device_reset:
@@ -1924,37 +1977,27 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
-/*
- * hl_device_init - main initialization function for habanalabs device
- *
- * @hdev: pointer to habanalabs device structure
- *
- * Allocate an id for the device, do early initialization and then call the
- * ASIC specific initialization functions. Finally, create the cdev and the
- * Linux device to expose it to the user
- */
-int hl_device_init(struct hl_device *hdev, struct class *hclass)
+static int create_cdev(struct hl_device *hdev)
{
- int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
char *name;
- bool add_cdev_sysfs_on_err = false;
+ int rc;
hdev->cdev_idx = hdev->id / 2;
name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
- goto out_disabled;
+ goto out_err;
}
/* Initialize cdev and device structures */
- rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
+ rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name,
&hdev->cdev, &hdev->dev);
kfree(name);
if (rc)
- goto out_disabled;
+ goto out_err;
name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
@@ -1963,7 +2006,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
/* Initialize cdev and device structures for control device */
- rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
+ rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops,
name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
kfree(name);
@@ -1971,10 +2014,36 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto free_dev;
+ return 0;
+
+free_dev:
+ put_device(hdev->dev);
+out_err:
+ return rc;
+}
+
+/*
+ * hl_device_init - main initialization function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Allocate an id for the device, do early initialization and then call the
+ * ASIC specific initialization functions. Finally, create the cdev and the
+ * Linux device to expose it to the user
+ */
+int hl_device_init(struct hl_device *hdev)
+{
+ int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
+ bool add_cdev_sysfs_on_err = false;
+
+ rc = create_cdev(hdev);
+ if (rc)
+ goto out_disabled;
+
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
- goto free_dev_ctrl;
+ goto free_dev;
user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
hdev->asic_prop.user_interrupt_count;
@@ -2186,7 +2255,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
dev_notice(hdev->dev,
- "Successfully added device to habanalabs driver\n");
+ "Successfully added device %s to habanalabs driver\n",
+ dev_name(&(hdev)->pdev->dev));
hdev->init_done = true;
@@ -2225,9 +2295,8 @@ free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
-free_dev_ctrl:
- put_device(hdev->dev_ctrl);
free_dev:
+ put_device(hdev->dev_ctrl);
put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
@@ -2235,11 +2304,11 @@ out_disabled:
device_cdev_sysfs_add(hdev);
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
- "Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->cdev_idx);
+ "Failed to initialize hl%d. Device %s is NOT usable !\n",
+ hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
else
- pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->cdev_idx);
+ pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n",
+ hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
return rc;
}
@@ -2295,7 +2364,8 @@ void hl_device_fini(struct hl_device *hdev)
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_crit(hdev->dev,
- "Failed to remove device because reset function did not finish\n");
+ "%s Failed to remove device because reset function did not finish\n",
+ dev_name(&(hdev)->pdev->dev));
return;
}
}
@@ -2347,7 +2417,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@@ -2363,7 +2435,7 @@ void hl_device_fini(struct hl_device *hdev)
hl_mmu_fini(hdev);
- vfree(hdev->captured_err_info.pgf_info.user_mappings);
+ vfree(hdev->captured_err_info.page_fault_info.user_mappings);
hl_eq_fini(hdev, &hdev->event_queue);
@@ -2402,7 +2474,12 @@ void hl_device_fini(struct hl_device *hdev)
*/
inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
{
- return readl(hdev->rmmio + reg);
+ u32 val = readl(hdev->rmmio + reg);
+
+ if (unlikely(trace_habanalabs_rreg32_enabled()))
+ trace_habanalabs_rreg32(hdev->dev, reg, val);
+
+ return val;
}
/*
@@ -2417,12 +2494,17 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
*/
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{
+ if (unlikely(trace_habanalabs_wreg32_enabled()))
+ trace_habanalabs_wreg32(hdev->dev, reg, val);
+
writel(val, hdev->rmmio + reg);
}
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags)
{
+ struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
+
if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
dev_err(hdev->dev,
"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
@@ -2431,15 +2513,17 @@ void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_
}
/* In case it's the first razwi since the device was opened, capture its parameters */
- if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1))
+ if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
return;
- hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get());
- hdev->captured_err_info.razwi.addr = addr;
- hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines;
- memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0],
+ razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
+ razwi_info->razwi.addr = addr;
+ razwi_info->razwi.num_of_possible_engines = num_of_engines;
+ memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
num_of_engines * sizeof(u16));
- hdev->captured_err_info.razwi.flags = flags;
+ razwi_info->razwi.flags = flags;
+
+ razwi_info->razwi_info_available = true;
}
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
@@ -2453,7 +2537,7 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o
static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
{
- struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info;
+ struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
@@ -2515,14 +2599,18 @@ finish:
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
{
+ struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
+
/* Capture only the first page fault */
- if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1))
+ if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
return;
- hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get());
- hdev->captured_err_info.pgf_info.pgf.addr = addr;
- hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id;
+ pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
+ pgf_info->page_fault.addr = addr;
+ pgf_info->page_fault.engine_id = eng_id;
hl_capture_user_mappings(hdev, is_pmmu);
+
+ pgf_info->page_fault_info_available = true;
}
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
@@ -2533,3 +2621,49 @@ void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
}
+
+static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
+{
+ struct hw_err_info *info = &hdev->captured_err_info.hw_err;
+
+ /* Capture only the first HW err */
+ if (atomic_cmpxchg(&info->event_detected, 0, 1))
+ return;
+
+ info->event.timestamp = ktime_to_ns(ktime_get());
+ info->event.event_id = event_id;
+
+ info->event_info_available = true;
+}
+
+void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_capture_hw_err(hdev, event_id);
+
+ if (event_mask)
+ *event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
+}
+
+static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
+{
+ struct fw_err_info *info = &hdev->captured_err_info.fw_err;
+
+ /* Capture only the first FW error */
+ if (atomic_cmpxchg(&info->event_detected, 0, 1))
+ return;
+
+ info->event.timestamp = ktime_to_ns(ktime_get());
+ info->event.err_type = fw_info->err_type;
+ if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
+ info->event.event_id = fw_info->event_id;
+
+ info->event_info_available = true;
+}
+
+void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
+{
+ hl_capture_fw_err(hdev, info);
+
+ if (info->event_mask)
+ *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
+}
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 228b92278e48..59f61ec66445 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -14,8 +14,32 @@
#include <linux/ctype.h>
#include <linux/vmalloc.h>
+#include <trace/events/habanalabs.h>
+
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
+static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
+ [COMMS_NOOP] = __stringify(COMMS_NOOP),
+ [COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
+ [COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
+ [COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
+ [COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
+ [COMMS_EXEC] = __stringify(COMMS_EXEC),
+ [COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
+ [COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
+ [COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
+ [COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
+};
+
+static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
+ [COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
+ [COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
+ [COMMS_STS_OK] = __stringify(COMMS_STS_OK),
+ [COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
+ [COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
+ [COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
+};
+
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -47,7 +71,7 @@ free_fw_ver:
return NULL;
}
-static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
+static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
{
char major[8], minor[8], *first_dot, *second_dot;
int rc;
@@ -62,7 +86,7 @@ static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
if (rc) {
dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
- goto out;
+ return rc;
}
/* skip the first dot */
@@ -78,9 +102,6 @@ static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
if (rc)
dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
-
-out:
- kfree(preboot_ver);
return rc;
}
@@ -311,7 +332,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
tmp);
else
- dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
+ dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
@@ -1239,7 +1260,7 @@ void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
COMMS_RST_DEV, 0, false,
hdev->fw_loader.cpu_timeout);
if (rc)
- dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
+ dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
}
@@ -1257,10 +1278,10 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
/* Stop device CPU to make sure nothing bad happens */
if (hdev->asic_prop.dynamic_fw_load) {
rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
- COMMS_GOTO_WFE, 0, true,
+ COMMS_GOTO_WFE, 0, false,
hdev->fw_loader.cpu_timeout);
if (rc)
- dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
+ dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
@@ -1322,13 +1343,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
break;
default:
dev_err(hdev->dev,
- "Device boot progress - Invalid status code %d\n",
- status);
+ "Device boot progress - Invalid or unexpected status code %d\n", status);
break;
}
}
-static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
+int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
u32 status;
@@ -1353,8 +1373,8 @@ static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
pre_fw_load->wait_for_preboot_timeout);
if (rc) {
- dev_err(hdev->dev, "CPU boot ready status timeout\n");
detect_cpu_boot_status(hdev, status);
+ dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
/* If we read all FF, then something is totally wrong, no point
* of reading specific errors
@@ -1634,6 +1654,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
+ trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
}
@@ -1691,6 +1712,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+ trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
+
/* Wait for expected status */
rc = hl_poll_timeout(
hdev,
@@ -1706,6 +1729,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
return -EIO;
}
+ trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
+
/*
* skip storing FW response for NOOP to preserve the actual desired
* FW status
@@ -1778,6 +1803,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
{
int rc;
+ trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
+
/* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
@@ -1884,7 +1911,7 @@ static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
- * @fw_desc: the descriptor form FW
+ * @fw_desc: the descriptor from FW
*
* @return 0 on success, otherwise non-zero error code
*/
@@ -1901,11 +1928,11 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
int rc;
if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
- dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
+ dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
fw_desc->header.magic);
if (fw_desc->header.version != HL_COMMS_DESC_VER)
- dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
+ dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
fw_desc->header.version);
/*
@@ -1976,6 +2003,43 @@ static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
return rc;
}
+/*
+ * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_desc: the descriptor from FW
+ */
+static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
+ struct lkd_fw_comms_desc *fw_desc)
+{
+ int i;
+ char *msg;
+
+ for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
+ if (!fw_desc->ascii_msg[i].valid)
+ return;
+
+ /* force NULL termination */
+ msg = fw_desc->ascii_msg[i].msg;
+ msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
+
+ switch (fw_desc->ascii_msg[i].msg_lvl) {
+ case LKD_FW_ASCII_MSG_ERR:
+ dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ case LKD_FW_ASCII_MSG_WRN:
+ dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ case LKD_FW_ASCII_MSG_INF:
+ dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ default:
+ dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ }
+ }
+}
+
/**
* hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
*
@@ -1988,9 +2052,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct lkd_fw_comms_desc *fw_desc;
- void __iomem *src, *temp_fw_desc;
struct pci_mem_region *region;
struct fw_response *response;
+ void *temp_fw_desc;
+ void __iomem *src;
u16 fw_data_size;
enum pci_region region_id;
int rc;
@@ -2039,6 +2104,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc);
+
+ if (!rc)
+ hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
+
vfree(temp_fw_desc);
return rc;
@@ -2109,8 +2178,8 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
- /* This function takes care of freeing preboot_ver */
- rc = extract_fw_sub_versions(hdev, preboot_ver);
+ rc = hl_get_preboot_major_minor(hdev, preboot_ver);
+ kfree(preboot_ver);
if (rc)
return rc;
}
@@ -2354,7 +2423,7 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
hdev->fw_poll_interval_usec,
dyn_loader->wait_for_bl_timeout);
if (rc) {
- dev_err(hdev->dev, "failed to wait for boot\n");
+ dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
return rc;
}
@@ -2381,7 +2450,7 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
hdev->fw_poll_interval_usec,
fw_loader->cpu_timeout);
if (rc) {
- dev_err(hdev->dev, "failed to wait for Linux\n");
+ dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
return rc;
}
@@ -2459,51 +2528,54 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
{
- struct lkd_msg_comms msg;
+ struct lkd_msg_comms *msg;
int rc;
- memset(&msg, 0, sizeof(msg));
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
/* create message to be sent */
- msg.header.type = msg_type;
- msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
- msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
+ msg->header.type = msg_type;
+ msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
+ msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
switch (msg_type) {
case HL_COMMS_RESET_CAUSE_TYPE:
- msg.reset_cause = *(__u8 *) data;
+ msg->reset_cause = *(__u8 *) data;
break;
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
msg_type);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
sizeof(struct lkd_msg_comms));
if (rc)
- return rc;
+ goto out;
/* copy message to space allocated by FW */
- rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader);
+ rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
if (rc)
- return rc;
+ goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
0, true,
fw_loader->cpu_timeout);
if (rc)
- return rc;
+ goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
0, true,
fw_loader->cpu_timeout);
- if (rc)
- return rc;
- return 0;
+out:
+ kfree(msg);
+ return rc;
}
/**
@@ -2560,13 +2632,43 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
}
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
+ struct lkd_fw_binning_info *binning_info;
+
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
if (rc)
goto protocol_err;
/* read preboot version */
- return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+ rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
+
+ if (rc)
+ return rc;
+
+ /* read binning info from preboot */
+ if (hdev->support_preboot_binning) {
+ binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
+ hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
+ hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
+ hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
+ hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
+ hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
+
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
+ if (rc)
+ return rc;
+
+ rc = hdev->asic_funcs->set_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(hdev->dev,
+ "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
+ hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
+ hdev->decoder_binning, hdev->rotator_binning);
+ }
+
+ return 0;
}
/* load boot fit to FW */
@@ -2687,7 +2789,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_dbg(hdev->dev,
- "No boot fit request received, resuming boot\n");
+ "No boot fit request received (status = %d), resuming boot\n", status);
} else {
rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
if (rc)
@@ -2710,7 +2812,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_err(hdev->dev,
- "Timeout waiting for boot fit load ack\n");
+ "Timeout waiting for boot fit load ack (status = %d)\n", status);
goto out;
}
@@ -2788,7 +2890,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_err(hdev->dev,
- "Failed to get ACK on skipping BMC, %d\n",
+ "Failed to get ACK on skipping BMC (status = %d)\n",
status);
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
rc = -EIO;
@@ -2815,7 +2917,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
- "Failed to load firmware to device, %d\n",
+ "Failed to load firmware to device (status = %d)\n",
status);
rc = -EIO;
@@ -3043,3 +3145,27 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in
sizeof(struct cpucp_sec_attest_info), nonce,
HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
}
+
+int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
+ dma_addr_t buff, u32 *size)
+{
+ struct cpucp_packet pkt = {};
+ u64 result;
+ int rc = 0;
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(buff);
+ pkt.data_max_size = cpu_to_le32(*size);
+ pkt.pkt_subidx = cpu_to_le32(sub_opcode);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc)
+ dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
+ else
+ dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
+
+ *size = (u32)result;
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index e2527d976ee0..eaae69a9f817 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -11,7 +11,7 @@
#include "../include/common/cpucp_if.h"
#include "../include/common/qman_if.h"
#include "../include/hw_ip/mmu/mmu_general.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
@@ -29,6 +29,8 @@
#include <linux/coresight.h>
#include <linux/dma-buf.h>
+#include "security.h"
+
#define HL_NAME "habanalabs"
struct hl_device;
@@ -153,18 +155,12 @@ enum hl_mmu_enablement {
#define hl_asic_dma_alloc_coherent(hdev, size, dma_handle, flags) \
hl_asic_dma_alloc_coherent_caller(hdev, size, dma_handle, flags, __func__)
-#define hl_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle) \
- hl_cpu_accessible_dma_pool_alloc_caller(hdev, size, dma_handle, __func__)
-
#define hl_asic_dma_pool_zalloc(hdev, size, mem_flags, dma_handle) \
hl_asic_dma_pool_zalloc_caller(hdev, size, mem_flags, dma_handle, __func__)
#define hl_asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle) \
hl_asic_dma_free_coherent_caller(hdev, size, cpu_addr, dma_handle, __func__)
-#define hl_cpu_accessible_dma_pool_free(hdev, size, vaddr) \
- hl_cpu_accessible_dma_pool_free_caller(hdev, size, vaddr, __func__)
-
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
@@ -375,7 +371,9 @@ enum hl_cs_type {
CS_TYPE_COLLECTIVE_WAIT,
CS_RESERVE_SIGNALS,
CS_UNRESERVE_SIGNALS,
- CS_TYPE_ENGINE_CORE
+ CS_TYPE_ENGINE_CORE,
+ CS_TYPE_ENGINES,
+ CS_TYPE_FLUSH_PCI_HBW_WRITES,
};
/*
@@ -545,6 +543,8 @@ struct hl_hints_range {
/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
+ * @special_blocks: points to an array containing special blocks info.
+ * @skip_special_blocks_cfg: special blocks skip configs.
* @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
@@ -587,6 +587,8 @@ struct hl_hints_range {
* @host_base_address: host physical start address for host DMA from device
* @host_end_address: host physical end address for host DMA from device
* @max_freq_value: current max clk frequency.
+ * @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
+ * in order to raise events toward FW.
* @clk_pll_index: clock PLL index that specify which PLL determines the clock
* we display to the user
* @mmu_pgt_size: MMU page tables total size.
@@ -607,8 +609,8 @@ struct hl_hints_range {
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @decoder_enabled_mask: which decoders are enabled.
- * @decoder_binning_mask: which decoders are binned, 0 means usable and 1
- * means binned (at most one binned decoder per dcore).
+ * @decoder_binning_mask: which decoders are binned, 0 means usable and 1 means binned.
+ * @rotator_enabled_mask: which rotators are enabled.
* @edma_enabled_mask: which EDMAs are enabled.
* @edma_binning_mask: which EDMAs are binned, 0 means usable and 1 means
* binned (at most one binned DMA).
@@ -643,7 +645,12 @@ struct hl_hints_range {
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
- * @num_engine_cores: number of engine cpu cores
+ * @num_engine_cores: number of engine cpu cores.
+ * @max_num_of_engines: maximum number of all engines in the ASIC.
+ * @num_of_special_blocks: special_blocks array size.
+ * @glbl_err_cause_num: global err cause number.
+ * @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
+ * not supported.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -654,6 +661,8 @@ struct hl_hints_range {
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
* @user_dec_intr_count: number of decoder interrupts exposed to user.
+ * @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
+ * @eq_interrupt_id: interrupt id for EQ, uses to synchronize EQ interrupts in hard-reset.
* @cache_line_size: device cache line size.
* @server_type: Server type that the ASIC is currently installed in.
* The value is according to enum hl_server_type in uapi file.
@@ -689,9 +698,12 @@ struct hl_hints_range {
* @supports_user_set_page_size: true if user can set the allocation page size.
* @dma_mask: the dma mask to be set for this device
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
+ * @supports_engine_modes: true if changing engines/engine_cores modes is supported.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
+ struct hl_special_block_info *special_blocks;
+ struct hl_skip_blocks_cfg skip_special_blocks_cfg;
struct cpucp_info cpucp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
@@ -728,6 +740,7 @@ struct asic_fixed_properties {
u64 host_base_address;
u64 host_end_address;
u64 max_freq_value;
+ u64 engine_core_interrupt_reg_addr;
u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
@@ -748,6 +761,7 @@ struct asic_fixed_properties {
u32 cb_pool_cb_size;
u32 decoder_enabled_mask;
u32 decoder_binning_mask;
+ u32 rotator_enabled_mask;
u32 edma_enabled_mask;
u32 edma_binning_mask;
u32 max_pending_cs;
@@ -764,6 +778,10 @@ struct asic_fixed_properties {
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u32 num_engine_cores;
+ u32 max_num_of_engines;
+ u32 num_of_special_blocks;
+ u32 glbl_err_cause_num;
+ u32 hbw_flush_reg;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -774,6 +792,8 @@ struct asic_fixed_properties {
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
u16 user_dec_intr_count;
+ u16 tpc_interrupt_id;
+ u16 eq_interrupt_id;
u16 cache_line_size;
u16 server_type;
u8 completion_queues_count;
@@ -797,6 +817,7 @@ struct asic_fixed_properties {
u8 supports_user_set_page_size;
u8 dma_mask;
u8 supports_advanced_cpucp_rc;
+ u8 supports_engine_modes;
};
/**
@@ -935,6 +956,7 @@ struct hl_mmap_mem_buf {
* @size: holds the CB's size.
* @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in.
+ * @is_handle_destroyed: atomic boolean indicating whether or not the CB handle was destroyed.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
@@ -951,6 +973,7 @@ struct hl_cb {
u32 size;
u32 roundup_size;
atomic_t cs_cnt;
+ atomic_t is_handle_destroyed;
u8 is_pool;
u8 is_internal;
u8 is_mmu_mapped;
@@ -1077,20 +1100,29 @@ struct hl_cq {
atomic_t free_slots_cnt;
};
+enum hl_user_interrupt_type {
+ HL_USR_INTERRUPT_CQ = 0,
+ HL_USR_INTERRUPT_DECODER,
+ HL_USR_INTERRUPT_TPC,
+ HL_USR_INTERRUPT_UNEXPECTED
+};
+
/**
* struct hl_user_interrupt - holds user interrupt information
* @hdev: pointer to the device structure
+ * @type: user interrupt type
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
+ * @timestamp: last timestamp taken upon interrupt
* @interrupt_id: msix interrupt id
- * @is_decoder: whether this entry represents a decoder interrupt
*/
struct hl_user_interrupt {
- struct hl_device *hdev;
- struct list_head wait_list_head;
- spinlock_t wait_list_lock;
- u32 interrupt_id;
- bool is_decoder;
+ struct hl_device *hdev;
+ enum hl_user_interrupt_type type;
+ struct list_head wait_list_head;
+ spinlock_t wait_list_lock;
+ ktime_t timestamp;
+ u32 interrupt_id;
};
/**
@@ -1179,15 +1211,15 @@ struct hl_eq {
/**
* struct hl_dec - describes a decoder sw instance.
* @hdev: pointer to the device structure.
- * @completion_abnrm_work: workqueue object to run when decoder generates an error interrupt
+ * @abnrm_intr_work: workqueue work item to run when decoder generates an error interrupt.
* @core_id: ID of the decoder.
* @base_addr: base address of the decoder.
*/
struct hl_dec {
- struct hl_device *hdev;
- struct work_struct completion_abnrm_work;
- u32 core_id;
- u32 base_addr;
+ struct hl_device *hdev;
+ struct work_struct abnrm_intr_work;
+ u32 core_id;
+ u32 base_addr;
};
/**
@@ -1540,8 +1572,11 @@ struct engines_data {
* @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
- * @set_engine_cores: set a config command to enigne cores
+ * @set_engine_cores: set a config command to engine cores
+ * @set_engines: set a config command to user engines
* @send_device_activity: indication to FW about device availability
+ * @set_dram_properties: set DRAM related properties.
+ * @set_binning_masks: set binning/enable masks for all relevant components.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1551,7 +1586,7 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
- void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
+ int (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
@@ -1678,7 +1713,11 @@ struct hl_asic_funcs {
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
u32 num_cores, u32 core_command);
+ int (*set_engines)(struct hl_device *hdev, u32 *engine_ids,
+ u32 num_engines, u32 engine_command);
int (*send_device_activity)(struct hl_device *hdev, bool open);
+ int (*set_dram_properties)(struct hl_device *hdev);
+ int (*set_binning_masks)(struct hl_device *hdev);
};
@@ -1739,8 +1778,9 @@ struct hl_cs_counters_atomic {
* struct hl_dmabuf_priv - a dma-buf private object.
* @dmabuf: pointer to dma-buf object.
* @ctx: pointer to the dma-buf owner's context.
- * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
- * memory allocation handle.
+ * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported
+ * where virtual memory is supported.
+ * @memhash_hnode: pointer to the memhash node. this object holds the export count.
* @device_address: physical address of the device's memory. Relevant only
* if phys_pg_pack is NULL (dma-buf was exported from address).
* The total size can be taken from the dmabuf object.
@@ -1749,6 +1789,7 @@ struct hl_dmabuf_priv {
struct dma_buf *dmabuf;
struct hl_ctx *ctx;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_vm_hash_node *memhash_hnode;
uint64_t device_address;
};
@@ -1797,7 +1838,7 @@ struct hl_cs_outcome_store {
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
- * this hits 0l. It is incremented on CS and CS_WAIT.
+ * this hits 0. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
* @outcome_store: storage data structure used to remember outcomes of completed
* command submissions for a long time after CS id wraparound.
@@ -1923,6 +1964,7 @@ struct hl_userptr {
* @type: CS_TYPE_*.
* @jobs_cnt: counter of submitted jobs on all queues.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
+ * @completion_timestamp: timestamp of the last completed cs job.
* @sob_addr_offset: sob offset from the configuration base address.
* @initial_sob_count: count of completed signals in SOB before current submission of signal or
* cs with encaps signals.
@@ -1955,6 +1997,7 @@ struct hl_cs {
struct list_head staged_cs_node;
struct list_head debugfs_list;
struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ ktime_t completion_timestamp;
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
@@ -1990,6 +2033,7 @@ struct hl_cs {
* @debugfs_list: node in debugfs list of command submission jobs.
* @refcount: reference counter for usage of the CS job.
* @queue_type: the type of the H/W queue this job is submitted to.
+ * @timestamp: timestamp upon job completion
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
@@ -2016,6 +2060,7 @@ struct hl_cs_job {
struct list_head debugfs_list;
struct kref refcount;
enum hl_queue_type queue_type;
+ ktime_t timestamp;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
@@ -2076,12 +2121,16 @@ struct hl_cs_parser {
* hl_userptr).
* @node: node to hang on the hash table in context object.
* @vaddr: key virtual address.
+ * @handle: memory handle for device memory allocation.
* @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
+ * @export_cnt: number of exports from within the VA block.
*/
struct hl_vm_hash_node {
struct hlist_node node;
u64 vaddr;
+ u64 handle;
void *ptr;
+ int export_cnt;
};
/**
@@ -2109,10 +2158,10 @@ struct hl_vm_hw_block_list_node {
* @pages: the physical page array.
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
+ * @exported_size: buffer exported size.
* @node: used to attach to deletion list that is used when all the allocations are cleared
* at the teardown of the context.
* @mapping_cnt: number of shared mappings.
- * @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list.
* @page_size: size of each page in the pack.
* @flags: HL_MEM_* flags related to this list.
@@ -2126,9 +2175,9 @@ struct hl_vm_phys_pg_pack {
u64 *pages;
u64 npages;
u64 total_size;
+ u64 exported_size;
struct list_head node;
atomic_t mapping_cnt;
- u32 exporting_cnt;
u32 asid;
u32 page_size;
u32 flags;
@@ -2283,7 +2332,7 @@ struct hl_debugfs_entry {
* @userptr_list: list of available userptrs (virtual memory chunk descriptor).
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
- * @ctx_mem_hash_spinlock: protects cb_list.
+ * @ctx_mem_hash_mutex: protects list of available contexts with MMU mappings.
* @data_dma_blob_desc: data DMA descriptor of blob.
* @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
@@ -2314,7 +2363,7 @@ struct hl_dbg_device_entry {
struct list_head userptr_list;
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
- spinlock_t ctx_mem_hash_spinlock;
+ struct mutex ctx_mem_hash_mutex;
struct debugfs_blob_wrapper data_dma_blob_desc;
struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];
@@ -2675,11 +2724,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
p->size = sz; \
})
-#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, decoder) \
+#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, intr_type) \
({ \
usr_intr.hdev = hdev; \
usr_intr.interrupt_id = intr_id; \
- usr_intr.is_decoder = decoder; \
+ usr_intr.type = intr_type; \
INIT_LIST_HEAD(&usr_intr.wait_list_head); \
spin_lock_init(&usr_intr.wait_list_lock); \
})
@@ -2939,8 +2988,8 @@ struct cs_timeout_info {
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
* @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
- * should be equal to 1 incase of undefined opcode
- * in Upper-CP (specific stream) and equal to 4 incase
+ * should be equal to 1 in case of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 in case
* of undefined opcode in Lower-CP.
* @engine_id: engine-id that the error occurred on
* @stream_id: the stream id the error occurred on. In case the stream equals to
@@ -2961,37 +3010,91 @@ struct undefined_opcode_info {
};
/**
- * struct page_fault_info - info about page fault
- * @pgf_info: page fault information.
+ * struct page_fault_info - page fault information.
+ * @page_fault: holds information collected during a page fault.
* @user_mappings: buffer containing user mappings.
* @num_of_user_mappings: number of user mappings.
+ * @page_fault_detected: if set as 1, then a page-fault was discovered for the
+ * first time after the driver has finished booting-up.
+ * Since we're looking for the page-fault's root cause,
+ * we don't care of the others that might follow it-
+ * so once changed to 1, it will remain that way.
+ * @page_fault_info_available: indicates that a page fault info is now available.
*/
struct page_fault_info {
- struct hl_page_fault_info pgf;
+ struct hl_page_fault_info page_fault;
struct hl_user_mapping *user_mappings;
u64 num_of_user_mappings;
+ atomic_t page_fault_detected;
+ bool page_fault_info_available;
+};
+
+/**
+ * struct razwi_info - RAZWI information.
+ * @razwi: holds information collected during a RAZWI
+ * @razwi_detected: if set as 1, then a RAZWI was discovered for the
+ * first time after the driver has finished booting-up.
+ * Since we're looking for the RAZWI's root cause,
+ * we don't care of the others that might follow it-
+ * so once changed to 1, it will remain that way.
+ * @razwi_info_available: indicates that a RAZWI info is now available.
+ */
+struct razwi_info {
+ struct hl_info_razwi_event razwi;
+ atomic_t razwi_detected;
+ bool razwi_info_available;
+};
+
+/**
+ * struct hw_err_info - HW error information.
+ * @event: holds information on the event.
+ * @event_detected: if set as 1, then a HW event was discovered for the
+ * first time after the driver has finished booting-up.
+ * currently we assume that only fatal events (that require hard-reset) are
+ * reported so we don't care of the others that might follow it.
+ * so once changed to 1, it will remain that way.
+ * TODO: support multiple events.
+ * @event_info_available: indicates that a HW event info is now available.
+ */
+struct hw_err_info {
+ struct hl_info_hw_err_event event;
+ atomic_t event_detected;
+ bool event_info_available;
+};
+
+/**
+ * struct fw_err_info - FW error information.
+ * @event: holds information on the event.
+ * @event_detected: if set as 1, then a FW event was discovered for the
+ * first time after the driver has finished booting-up.
+ * currently we assume that only fatal events (that require hard-reset) are
+ * reported so we don't care of the others that might follow it.
+ * so once changed to 1, it will remain that way.
+ * TODO: support multiple events.
+ * @event_info_available: indicates that a HW event info is now available.
+ */
+struct fw_err_info {
+ struct hl_info_fw_err_event event;
+ atomic_t event_detected;
+ bool event_info_available;
};
/**
* struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information.
- * @razwi: razwi information.
- * @razwi_info_recorded: if set writing to razwi information is enabled.
- * otherwise - disabled, so the first (root cause) razwi will not be
- * overwritten.
- * @undef_opcode: undefined opcode information
- * @pgf_info: page fault information.
- * @pgf_info_recorded: if set writing to page fault information is enabled.
- * otherwise - disabled, so the first (root cause) page fault will not be
- * overwritten.
+ * @razwi_info: RAZWI information.
+ * @undef_opcode: undefined opcode information.
+ * @page_fault_info: page fault information.
+ * @hw_err: (fatal) hardware error information.
+ * @fw_err: firmware error information.
*/
struct hl_error_info {
struct cs_timeout_info cs_timeout;
- struct hl_info_razwi_event razwi;
- atomic_t razwi_info_recorded;
+ struct razwi_info razwi_info;
struct undefined_opcode_info undef_opcode;
- struct page_fault_info pgf_info;
- atomic_t pgf_info_recorded;
+ struct page_fault_info page_fault_info;
+ struct hw_err_info hw_err;
+ struct fw_err_info fw_err;
};
/**
@@ -3039,6 +3142,7 @@ struct hl_reset_info {
* (required only for PCI address match mode)
* @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
+ * @hclass: pointer to the habanalabs class.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
@@ -3053,6 +3157,8 @@ struct hl_reset_info {
* @user_interrupt: array of hl_user_interrupt. upon the corresponding user
* interrupt, driver will monitor the list of fences
* registered to this interrupt.
+ * @tpc_interrupt: single TPC interrupt for all TPCs.
+ * @unexpected_error_interrupt: single interrupt for unexpected user error indication.
* @common_user_cq_interrupt: common user CQ interrupt for all user CQ interrupts.
* upon any user CQ interrupt, driver will monitor the
* list of fences registered to this common structure.
@@ -3148,6 +3254,7 @@ struct hl_reset_info {
* drams are binned-out
* @tpc_binning: contains mask of tpc engines that is received from the f/w which indicates which
* tpc engines are binned-out
+ * @dmabuf_export_cnt: number of dma-buf exporting.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
@@ -3157,6 +3264,8 @@ struct hl_reset_info {
* @edma_binning: contains mask of edma engines that is received from the f/w which
* indicates which edma engines are binned-out
* @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds.
+ * @rotator_binning: contains mask of rotators engines that is received from the f/w
+ * which indicates which rotator engines are binned-out(Gaudi3 and above).
* @id: device minor.
* @id_control: minor of the control device.
* @cdev_idx: char device index. Used for setting its name.
@@ -3200,6 +3309,8 @@ struct hl_reset_info {
* @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
* @reset_upon_device_release: reset the device when the user closes the file descriptor of the
* device.
+ * @supports_ctx_switch: true if a ctx switch is required upon first submission.
+ * @support_preboot_binning: true if we support read binning info from preboot.
* @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
* @fw_components: Controls which f/w components to load to the device. There are multiple f/w
* stages and sometimes we want to stop at a certain stage. Used only for testing.
@@ -3213,13 +3324,13 @@ struct hl_reset_info {
* Used only for testing.
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
- * @supports_ctx_switch: true if a ctx switch is required upon first submission.
*/
struct hl_device {
struct pci_dev *pdev;
u64 pcie_bar_phys[HL_PCI_NUM_BARS];
void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
+ struct class *hclass;
struct cdev cdev;
struct cdev cdev_ctrl;
struct device *dev;
@@ -3232,6 +3343,8 @@ struct hl_device {
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct hl_user_interrupt *user_interrupt;
+ struct hl_user_interrupt tpc_interrupt;
+ struct hl_user_interrupt unexpected_error_interrupt;
struct hl_user_interrupt common_user_cq_interrupt;
struct hl_user_interrupt common_decoder_interrupt;
struct hl_cs **shadow_cs_queue;
@@ -3315,13 +3428,14 @@ struct hl_device {
u64 fw_comms_poll_interval_usec;
u64 dram_binning;
u64 tpc_binning;
-
+ atomic_t dmabuf_export_cnt;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
u32 decoder_binning;
u32 edma_binning;
u32 device_release_watchdog_timeout_sec;
+ u32 rotator_binning;
u16 id;
u16 id_control;
u16 cdev_idx;
@@ -3355,8 +3469,9 @@ struct hl_device {
u8 supports_mmu_prefetch;
u8 reset_upon_device_release;
u8 supports_ctx_switch;
+ u8 support_preboot_binning;
- /* Parameters for bring-up */
+ /* Parameters for bring-up to be upstreamed */
u64 nic_ports_mask;
u64 fw_components;
u8 mmu_enable;
@@ -3394,6 +3509,20 @@ struct hl_cs_encaps_sig_handle {
u32 count;
};
+/**
+ * struct hl_info_fw_err_info - firmware error information structure
+ * @err_type: The type of error detected (or reported).
+ * @event_mask: Pointer to the event mask to be modified with the detected error flag
+ * (can be NULL)
+ * @event_id: The id of the event that reported the error
+ * (applicable when err_type is HL_INFO_FW_REPORTED_ERR).
+ */
+struct hl_info_fw_err_info {
+ enum hl_info_fw_err_type err_type;
+ u64 *event_mask;
+ u16 event_id;
+};
+
/*
* IOCTLs
*/
@@ -3418,6 +3547,10 @@ struct hl_ioctl_desc {
hl_ioctl_t *func;
};
+static inline bool hl_is_fw_ver_below_1_9(struct hl_device *hdev)
+{
+ return (hdev->fw_major_version < 42);
+}
/*
* Kernel module functions that can be accessed by entire module
@@ -3481,14 +3614,12 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, const char *caller);
void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, const char *caller);
-void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
- dma_addr_t *dma_handle, const char *caller);
-void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
- const char *caller);
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller);
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
@@ -3535,7 +3666,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
-irqreturn_t hl_irq_handler_default(int irq, void *arg);
+irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
int hl_asid_init(struct hl_device *hdev);
@@ -3556,7 +3687,7 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
-int hl_device_init(struct hl_device *hdev, struct class *hclass);
+int hl_device_init(struct hl_device *hdev);
void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev);
@@ -3606,6 +3737,7 @@ bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
void hl_multi_cs_completion_init(struct hl_device *hdev);
+u32 hl_get_active_cs_num(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -3729,6 +3861,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
int hl_fw_init_cpu(struct hl_device *hdev);
+int hl_fw_wait_preboot_ready(struct hl_device *hdev);
int hl_fw_read_preboot_status(struct hl_device *hdev);
int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
@@ -3772,6 +3905,8 @@ int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
void hl_fw_set_pll_profile(struct hl_device *hdev);
void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp);
void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp);
+int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
+ dma_addr_t buff, u32 *size);
void hw_sob_get(struct hl_hw_sob *hw_sob);
void hw_sob_put(struct hl_hw_sob *hw_sob);
@@ -3786,6 +3921,7 @@ void hl_dec_fini(struct hl_device *hdev);
void hl_dec_ctx_fini(struct hl_ctx *ctx);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
+void hl_abort_waitings_for_completion(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
@@ -3801,6 +3937,7 @@ const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
@@ -3819,6 +3956,8 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu);
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask);
+void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask);
+void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 7815c60df54e..d9df64e75f33 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -12,7 +12,6 @@
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
@@ -221,9 +220,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
+ memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info));
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
- atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0);
- atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0);
hdev->captured_err_info.undef_opcode.write_enable = true;
hdev->open_counter++;
@@ -235,6 +233,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
hl_mem_mgr_fini(&hpriv->mem_mgr);
+ hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->ctx_lock);
@@ -322,6 +321,7 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->major = hl_major;
+ hdev->hclass = hl_class;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
@@ -548,9 +548,7 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdev);
- pci_enable_pcie_error_reporting(pdev);
-
- rc = hl_device_init(hdev, hl_class);
+ rc = hl_device_init(hdev);
if (rc) {
dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
rc = -ENODEV;
@@ -560,7 +558,6 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
disable_device:
- pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
@@ -583,7 +580,6 @@ static void hl_pci_remove(struct pci_dev *pdev)
return;
hl_device_fini(hdev);
- pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
}
@@ -700,7 +696,7 @@ static int __init hl_init(void)
hl_major = MAJOR(dev);
- hl_class = class_create(THIS_MODULE, HL_NAME);
+ hl_class = class_create(HL_NAME);
if (IS_ERR(hl_class)) {
pr_err("failed to allocate class\n");
rc = PTR_ERR(hl_class);
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index b6abfa7761a7..203ee857810c 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -7,7 +7,7 @@
#define pr_fmt(fmt) "habanalabs: " fmt
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/fs.h>
@@ -102,11 +102,15 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
+ hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
hw_ip.security_enabled = prop->fw_security_enabled;
hw_ip.revision_id = hdev->pdev->revision;
+ hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
+ hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
+ hw_ip.reserved_dram_size = dram_kmd_size;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@@ -607,16 +611,20 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
- struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct razwi_info *razwi_info;
if ((!max_size) || (!out))
return -EINVAL;
- return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event)))
- ? -EFAULT : 0;
+ razwi_info = &hdev->captured_err_info.razwi_info;
+ if (!razwi_info->razwi_info_available)
+ return 0;
+
+ return copy_to_user(out, &razwi_info->razwi,
+ min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
}
static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
@@ -786,16 +794,20 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
- struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct page_fault_info *pgf_info;
if ((!max_size) || (!out))
return -EINVAL;
- return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info)))
- ? -EFAULT : 0;
+ pgf_info = &hdev->captured_err_info.page_fault_info;
+ if (!pgf_info->page_fault_info_available)
+ return 0;
+
+ return copy_to_user(out, &pgf_info->page_fault,
+ min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
}
static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
@@ -806,18 +818,112 @@ static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
struct page_fault_info *pgf_info;
u64 actual_size;
- pgf_info = &hdev->captured_err_info.pgf_info;
- args->array_size = pgf_info->num_of_user_mappings;
-
if (!out)
return -EINVAL;
+ pgf_info = &hdev->captured_err_info.page_fault_info;
+ if (!pgf_info->page_fault_info_available)
+ return 0;
+
+ args->array_size = pgf_info->num_of_user_mappings;
+
actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
if (user_buf_size < actual_size)
return -ENOMEM;
- return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size))
- ? -EFAULT : 0;
+ return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
+}
+
+static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_device *hdev = hpriv->hdev;
+ u32 user_buf_size = args->return_size;
+ struct hw_err_info *info;
+ int rc;
+
+ if ((!user_buf_size) || (!user_buf))
+ return -EINVAL;
+
+ if (user_buf_size < sizeof(struct hl_info_hw_err_event))
+ return -ENOMEM;
+
+ info = &hdev->captured_err_info.hw_err;
+ if (!info->event_info_available)
+ return -ENOENT;
+
+ rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
+ return rc ? -EFAULT : 0;
+}
+
+static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_device *hdev = hpriv->hdev;
+ u32 user_buf_size = args->return_size;
+ struct fw_err_info *info;
+ int rc;
+
+ if ((!user_buf_size) || (!user_buf))
+ return -EINVAL;
+
+ if (user_buf_size < sizeof(struct hl_info_fw_err_event))
+ return -ENOMEM;
+
+ info = &hdev->captured_err_info.fw_err;
+ if (!info->event_info_available)
+ return -ENOENT;
+
+ rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
+ return rc ? -EFAULT : 0;
+}
+
+static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
+{
+ void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
+ u32 size = info_args->return_size;
+ dma_addr_t dma_handle;
+ bool need_input_buff;
+ void *fw_buff;
+ int rc = 0;
+
+ switch (info_args->fw_sub_opcode) {
+ case HL_PASSTHROUGH_VERSIONS:
+ need_input_buff = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size > SZ_1M) {
+ dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
+ return -EINVAL;
+ }
+
+ fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
+ if (!fw_buff)
+ return -ENOMEM;
+
+
+ if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
+ dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
+ rc = -EFAULT;
+ goto free_buff;
+ }
+
+ rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
+ if (rc)
+ goto free_buff;
+
+ if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
+ dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
+ rc = -EFAULT;
+ }
+
+free_buff:
+ hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
+
+ return rc;
}
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
@@ -826,9 +932,13 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
enum hl_device_status status;
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
-
int rc;
+ if (args->pad) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
+
/*
* Information is returned for the following opcodes even if the device
* is disabled or in reset.
@@ -888,22 +998,26 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
+ case HL_INFO_HW_ERR_EVENT:
+ return hw_err_info(hpriv, args);
+
+ case HL_INFO_FW_ERR_EVENT:
+ return fw_err_info(hpriv, args);
+
+ case HL_INFO_DRAM_USAGE:
+ return dram_usage_info(hpriv, args);
default:
break;
}
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(dev,
+ dev_dbg_ratelimited(dev,
"Device is %s. Can't execute INFO IOCTL\n",
hdev->status[status]);
return -EBUSY;
}
switch (args->op) {
- case HL_INFO_DRAM_USAGE:
- rc = dram_usage_info(hpriv, args);
- break;
-
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;
@@ -947,6 +1061,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_ENGINE_STATUS:
return engine_status_info(hpriv, args);
+ case HL_INFO_FW_GENERIC_REQ:
+ return send_fw_generic_request(hdev, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
@@ -975,7 +1092,7 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
int rc = 0;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute DEBUG IOCTL\n",
hdev->status[status]);
return -EBUSY;
@@ -1072,8 +1189,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = -EFAULT;
goto out_err;
}
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
}
retcode = func(hpriv, kdata);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c
index d0087c0ec48c..d0087c0ec48c 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/accel/habanalabs/common/hw_queue.c
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 55eb0203817f..8598056216e7 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -914,7 +914,7 @@ void hl_hwmon_fini(struct hl_device *hdev)
void hl_hwmon_release_resources(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
+ const struct hwmon_channel_info * const *channel_info_arr;
int i = 0;
if (!hdev->hl_chip_info->info)
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
index 94d537fd4fde..c67895b1cdeb 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/accel/habanalabs/common/irq.c
@@ -72,15 +72,17 @@ static void irq_handle_eqe(struct work_struct *work)
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
* @cq: completion queue
+ * @timestamp: interrupt timestamp
*
*/
-static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
+static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp)
{
struct hl_hw_queue *queue;
struct hl_cs_job *job;
queue = &hdev->kernel_queues[cq->hw_queue_id];
job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
+ job->timestamp = timestamp;
queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
atomic_inc(&queue->ci);
@@ -91,9 +93,10 @@ static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
*
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
+ * @timestamp: interrupt timestamp
*
*/
-static void cs_finish(struct hl_device *hdev, u16 cs_seq)
+static void cs_finish(struct hl_device *hdev, u16 cs_seq, ktime_t timestamp)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_hw_queue *queue;
@@ -113,6 +116,7 @@ static void cs_finish(struct hl_device *hdev, u16 cs_seq)
atomic_inc(&queue->ci);
}
+ cs->completion_timestamp = timestamp;
queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
}
@@ -130,6 +134,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
bool shadow_index_valid, entry_ready;
u16 shadow_index;
struct hl_cq_entry *cq_entry, *cq_base;
+ ktime_t timestamp = ktime_get();
if (hdev->disabled) {
dev_dbg(hdev->dev,
@@ -171,9 +176,9 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if (shadow_index_valid && !hdev->disabled) {
if (hdev->asic_prop.completion_mode ==
HL_COMPLETION_MODE_CS)
- cs_finish(hdev, shadow_index);
+ cs_finish(hdev, shadow_index, timestamp);
else
- job_finish(hdev, shadow_index, cq);
+ job_finish(hdev, shadow_index, cq, timestamp);
}
/* Clear CQ entry ready bit */
@@ -228,7 +233,7 @@ static void hl_ts_free_objects(struct work_struct *work)
* list to a dedicated workqueue to do the actual put.
*/
static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
- struct list_head **free_list)
+ struct list_head **free_list, ktime_t now)
{
struct timestamp_reg_free_node *free_node;
u64 timestamp;
@@ -246,7 +251,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
if (!free_node)
return -ENOMEM;
- timestamp = ktime_get_ns();
+ timestamp = ktime_to_ns(now);
*pend->ts_reg_info.timestamp_kernel_addr = timestamp;
@@ -275,7 +280,6 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
struct list_head *ts_reg_free_list_head = NULL;
struct timestamp_reg_work_obj *job;
bool reg_node_handle_fail = false;
- ktime_t now = ktime_get();
int rc;
/* For registration nodes:
@@ -298,13 +302,13 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
- &ts_reg_free_list_head);
+ &ts_reg_free_list_head, intr->timestamp);
if (rc)
reg_node_handle_fail = true;
}
} else {
/* Handle wait target value node */
- pend->fence.timestamp = now;
+ pend->fence.timestamp = intr->timestamp;
complete_all(&pend->fence.completion);
}
}
@@ -321,6 +325,26 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
}
}
+static void handle_tpc_interrupt(struct hl_device *hdev)
+{
+ u64 event_mask;
+ u32 flags;
+
+ event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
+ HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
+ HL_NOTIFIER_EVENT_DEVICE_RESET;
+
+ flags = HL_DRV_RESET_DELAY;
+
+ dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
+ hl_device_cond_reset(hdev, flags, event_mask);
+}
+
+static void handle_unexpected_user_interrupt(struct hl_device *hdev)
+{
+ dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
+}
+
/**
* hl_irq_handler_user_interrupt - irq handler for user interrupts
*
@@ -331,33 +355,47 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
- struct hl_device *hdev = user_int->hdev;
- if (user_int->is_decoder)
- handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
- else
- handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
-
- /* Handle user cq or decoder interrupts registered on this specific irq */
- handle_user_interrupt(hdev, user_int);
+ user_int->timestamp = ktime_get();
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
/**
- * hl_irq_handler_default - default irq handler
+ * hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
+ * This function is invoked by threaded irq mechanism
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
-irqreturn_t hl_irq_handler_default(int irq, void *arg)
+irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
{
- struct hl_user_interrupt *user_interrupt = arg;
- struct hl_device *hdev = user_interrupt->hdev;
- u32 interrupt_id = user_interrupt->interrupt_id;
+ struct hl_user_interrupt *user_int = arg;
+ struct hl_device *hdev = user_int->hdev;
- dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
+ switch (user_int->type) {
+ case HL_USR_INTERRUPT_CQ:
+ handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
+
+ /* Handle user cq interrupt registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
+ break;
+ case HL_USR_INTERRUPT_DECODER:
+ handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
+
+ /* Handle decoder interrupt registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
+ break;
+ case HL_USR_INTERRUPT_TPC:
+ handle_tpc_interrupt(hdev);
+ break;
+ case HL_USR_INTERRUPT_UNEXPECTED:
+ handle_unexpected_user_interrupt(hdev);
+ break;
+ default:
+ break;
+ }
return IRQ_HANDLED;
}
@@ -377,8 +415,8 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
struct hl_eq_entry *eq_base;
struct hl_eqe_work *handle_eqe_work;
bool entry_ready;
- u32 cur_eqe;
- u16 cur_eqe_index;
+ u32 cur_eqe, ctl;
+ u16 cur_eqe_index, event_type;
eq_base = eq->kernel_address;
@@ -391,11 +429,10 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
if ((hdev->event_queue.check_eqe_index) &&
- (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
- != cur_eqe_index)) {
+ (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
dev_dbg(hdev->dev,
- "EQE 0x%x in queue is ready but index does not match %d!=%d",
- eq_base[eq->ci].hdr.ctl,
+ "EQE %#x in queue is ready but index does not match %d!=%d",
+ cur_eqe,
((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
cur_eqe_index);
break;
@@ -412,7 +449,10 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
dma_rmb();
if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
- dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
+ ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
+ dev_warn(hdev->dev,
+ "Device disabled but received an EQ event (%u)\n", event_type);
goto skip_irq;
}
@@ -449,7 +489,7 @@ irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
{
struct hl_dec *dec = arg;
- schedule_work(&dec->completion_abnrm_work);
+ schedule_work(&dec->abnrm_intr_work);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 5e9ae7600d75..a7b6a273ce21 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include "../include/hw_ip/mmu/mmu_general.h"
@@ -19,7 +19,9 @@ MODULE_IMPORT_NS(DMA_BUF);
#define HL_MMU_DEBUG 0
/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
-#define DRAM_POOL_PAGE_SIZE SZ_8M
+#define DRAM_POOL_PAGE_SIZE SZ_8M
+
+#define MEM_HANDLE_INVALID ULONG_MAX
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
struct hl_mem_in *args, u64 *handle);
@@ -233,10 +235,8 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
}
rc = hl_pin_host_memory(hdev, addr, size, userptr);
- if (rc) {
- dev_err(hdev->dev, "Failed to pin host memory\n");
+ if (rc)
goto pin_err;
- }
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
@@ -371,12 +371,6 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
return -EINVAL;
}
- if (phys_pg_pack->exporting_cnt) {
- spin_unlock(&vm->idr_lock);
- dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
- return -EINVAL;
- }
-
/* must remove from idr before the freeing of the physical pages as the refcount of the pool
* is also the trigger of the idr destroy
*/
@@ -611,6 +605,7 @@ static u64 get_va_block(struct hl_device *hdev,
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
bool force_hint = flags & HL_MEM_FORCE_HINT;
+ int rc;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
@@ -728,9 +723,13 @@ static u64 get_va_block(struct hl_device *hdev,
kfree(new_va_block);
}
- if (add_prev)
- add_va_block_locked(hdev, &va_range->list, prev_start,
- prev_end);
+ if (add_prev) {
+ rc = add_va_block_locked(hdev, &va_range->list, prev_start, prev_end);
+ if (rc) {
+ reserved_valid_start = 0;
+ goto out;
+ }
+ }
print_va_list_locked(hdev, &va_range->list);
out:
@@ -1101,10 +1100,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
- if (rc) {
- dev_err(hdev->dev, "failed to get userptr from va\n");
+ if (rc)
return rc;
- }
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack, false);
@@ -1240,6 +1237,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
hnode->ptr = vm_type;
hnode->vaddr = ret_vaddr;
+ hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle;
mutex_lock(&ctx->mem_hash_lock);
hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
@@ -1273,6 +1271,18 @@ init_page_pack_err:
return rc;
}
+/* Should be called while the context's mem_hash_lock is taken */
+static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
+{
+ struct hl_vm_hash_node *hnode;
+
+ hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
+ if (vaddr == hnode->vaddr)
+ return hnode;
+
+ return NULL;
+}
+
/**
* unmap_device_va() - unmap the given device virtual address.
* @ctx: pointer to the context structure.
@@ -1288,10 +1298,10 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
{
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
u64 vaddr = args->unmap.device_virt_addr;
- struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
+ struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
enum vm_type *vm_type;
bool is_userptr;
@@ -1301,15 +1311,16 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
- hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
- if (vaddr == hnode->vaddr)
- break;
-
+ hnode = get_vm_hash_node_locked(ctx, vaddr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
- dev_err(hdev->dev,
- "unmap failed, no mem hnode for vaddr 0x%llx\n",
- vaddr);
+ dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
+ return -EINVAL;
+ }
+
+ if (hnode->export_cnt) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
return -EINVAL;
}
@@ -1545,10 +1556,10 @@ static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
}
static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
- u64 page_size, struct device *dev,
- enum dma_data_direction dir)
+ u64 page_size, u64 exported_size,
+ struct device *dev, enum dma_data_direction dir)
{
- u64 chunk_size, bar_address, dma_max_seg_size;
+ u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
struct asic_fixed_properties *prop;
int rc, i, j, nents, cur_page;
struct scatterlist *sg;
@@ -1574,16 +1585,23 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (!sgt)
return ERR_PTR(-ENOMEM);
+ /* remove export size restrictions in case not explicitly defined */
+ cur_size_to_export = exported_size ? exported_size : (npages * page_size);
+
/* If the size of each page is larger than the dma max segment size,
* then we can't combine pages and the number of entries in the SGL
* will just be the
* <number of pages> * <chunks of max segment size in each page>
*/
- if (page_size > dma_max_seg_size)
- nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
- else
+ if (page_size > dma_max_seg_size) {
+ /* we should limit number of pages according to the exported size */
+ cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
+ nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
+ } else {
+ cur_npages = npages;
+
/* Get number of non-contiguous chunks */
- for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
+ for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
if (pages[i - 1] + page_size != pages[i] ||
chunk_size + page_size > dma_max_seg_size) {
nents++;
@@ -1593,6 +1611,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
chunk_size += page_size;
}
+ }
rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
if (rc)
@@ -1615,7 +1634,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
else
cur_device_address += dma_max_seg_size;
- chunk_size = min(size_left, dma_max_seg_size);
+ /* make sure not to export over exported size */
+ chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
bar_address = hdev->dram_pci_bar_start + cur_device_address;
@@ -1623,6 +1643,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (rc)
goto error_unmap;
+ cur_size_to_export -= chunk_size;
+
if (size_left > dma_max_seg_size) {
size_left -= dma_max_seg_size;
} else {
@@ -1634,7 +1656,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
/* Merge pages and put them into the scatterlist */
for_each_sgtable_dma_sg(sgt, sg, i) {
chunk_size = page_size;
- for (j = cur_page + 1 ; j < npages ; j++) {
+ for (j = cur_page + 1 ; j < cur_npages ; j++) {
if (pages[j - 1] + page_size != pages[j] ||
chunk_size + page_size > dma_max_seg_size)
break;
@@ -1645,10 +1667,13 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
bar_address = hdev->dram_pci_bar_start +
(pages[cur_page] - prop->dram_base_address);
+ /* make sure not to export over exported size */
+ chunk_size = min(chunk_size, cur_size_to_export);
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
if (rc)
goto error_unmap;
+ cur_size_to_export -= chunk_size;
cur_page = j;
}
}
@@ -1719,6 +1744,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
phys_pg_pack->pages,
phys_pg_pack->npages,
phys_pg_pack->page_size,
+ phys_pg_pack->exported_size,
attachment->dev,
dir);
else
@@ -1726,6 +1752,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
&hl_dmabuf->device_address,
1,
hl_dmabuf->dmabuf->size,
+ 0,
attachment->dev,
dir);
@@ -1760,20 +1787,62 @@ static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
kfree(sgt);
}
-static void hl_release_dmabuf(struct dma_buf *dmabuf)
+static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
{
- struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
- struct hl_ctx *ctx = hl_dmabuf->ctx;
struct hl_device *hdev = ctx->hdev;
- struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_hash_node *hnode;
- if (hl_dmabuf->phys_pg_pack) {
- spin_lock(&vm->idr_lock);
- hl_dmabuf->phys_pg_pack->exporting_cnt--;
- spin_unlock(&vm->idr_lock);
+ /* get the memory handle */
+ mutex_lock(&ctx->mem_hash_lock);
+ hnode = get_vm_hash_node_locked(ctx, addr);
+ if (!hnode) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (upper_32_bits(hnode->handle)) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
+ hnode->handle, addr);
+ return ERR_PTR(-EINVAL);
}
- hl_ctx_put(hl_dmabuf->ctx);
+ /*
+ * node found, increase export count so this memory cannot be unmapped
+ * and the hash node cannot be deleted.
+ */
+ hnode->export_cnt++;
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ return hnode;
+}
+
+static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
+{
+ mutex_lock(&ctx->mem_hash_lock);
+ hnode->export_cnt--;
+ mutex_unlock(&ctx->mem_hash_lock);
+}
+
+static void hl_release_dmabuf(struct dma_buf *dmabuf)
+{
+ struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
+ struct hl_ctx *ctx;
+
+ if (!hl_dmabuf)
+ return;
+
+ ctx = hl_dmabuf->ctx;
+
+ if (hl_dmabuf->memhash_hnode)
+ memhash_node_export_put(ctx, hl_dmabuf->memhash_hnode);
+
+ atomic_dec(&ctx->hdev->dmabuf_export_cnt);
+ hl_ctx_put(ctx);
+
+ /* Paired with get_file() in export_dmabuf() */
+ fput(ctx->hpriv->filp);
kfree(hl_dmabuf);
}
@@ -1785,7 +1854,7 @@ static const struct dma_buf_ops habanalabs_dmabuf_ops = {
.release = hl_release_dmabuf,
};
-static int export_dmabuf_common(struct hl_ctx *ctx,
+static int export_dmabuf(struct hl_ctx *ctx,
struct hl_dmabuf_priv *hl_dmabuf,
u64 total_size, int flags, int *dmabuf_fd)
{
@@ -1806,49 +1875,33 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
rc = fd;
goto err_dma_buf_put;
}
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
+ atomic_inc(&ctx->hdev->dmabuf_export_cnt);
+
+ /* Get compute device file to enforce release order, such that all exported dma-buf will be
+ * released first and only then the compute device.
+ * Paired with fput() in hl_release_dmabuf().
+ */
+ get_file(ctx->hpriv->filp);
*dmabuf_fd = fd;
return 0;
err_dma_buf_put:
+ hl_dmabuf->dmabuf->priv = NULL;
dma_buf_put(hl_dmabuf->dmabuf);
return rc;
}
-/**
- * export_dmabuf_from_addr() - export a dma-buf object for the given memory
- * address and size.
- * @ctx: pointer to the context structure.
- * @device_addr: device memory physical address.
- * @size: size of device memory.
- * @flags: DMA-BUF file/FD flags.
- * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
- *
- * Create and export a dma-buf object for an existing memory allocation inside
- * the device memory, and return a FD which is associated with the dma-buf
- * object.
- *
- * Return: 0 on success, non-zero for failure.
- */
-static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
- u64 size, int flags, int *dmabuf_fd)
+static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
{
- struct hl_dmabuf_priv *hl_dmabuf;
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop;
- u64 bar_address;
- int rc;
-
- prop = &hdev->asic_prop;
-
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
dev_dbg(hdev->dev,
"exported device memory address 0x%llx should be aligned to 0x%lx\n",
@@ -1863,49 +1916,109 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
return -EINVAL;
}
+ return 0;
+}
+
+static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_address;
+ int rc;
+
+ rc = validate_export_params_common(hdev, device_addr, size);
+ if (rc)
+ return rc;
+
if (device_addr < prop->dram_user_base_address ||
- device_addr + size > prop->dram_end_address ||
- device_addr + size < device_addr) {
+ (device_addr + size) > prop->dram_end_address ||
+ (device_addr + size) < device_addr) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
device_addr, size);
return -EINVAL;
}
- bar_address = hdev->dram_pci_bar_start +
- (device_addr - prop->dram_base_address);
+ bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
- if (bar_address + size >
- hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
- bar_address + size < bar_address) {
+ if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
+ (bar_address + size) < bar_address) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
device_addr, size);
return -EINVAL;
}
- hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
- if (!hl_dmabuf)
- return -ENOMEM;
+ return 0;
+}
- hl_dmabuf->device_address = device_addr;
+static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_address;
+ int i, rc;
- rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
+ rc = validate_export_params_common(hdev, device_addr, size);
if (rc)
- goto err_free_dmabuf_wrapper;
+ return rc;
+
+ if ((offset + size) > phys_pg_pack->total_size) {
+ dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
+ offset, size, phys_pg_pack->total_size);
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+
+ bar_address = hdev->dram_pci_bar_start +
+ (phys_pg_pack->pages[i] - prop->dram_base_address);
+
+ if ((bar_address + phys_pg_pack->page_size) >
+ (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
+ (bar_address + phys_pg_pack->page_size) < bar_address) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+
+ return -EINVAL;
+ }
+ }
return 0;
+}
-err_free_dmabuf_wrapper:
- kfree(hl_dmabuf);
- return rc;
+static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
+ struct hl_vm_hash_node *hnode)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_vm *vm = &hdev->vm;
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_unlock(&vm->idr_lock);
+
+ if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
+ dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return phys_pg_pack;
}
/**
- * export_dmabuf_from_handle() - export a dma-buf object for the given memory
- * handle.
+ * export_dmabuf_from_addr() - export a dma-buf object for the given memory
+ * address and size.
* @ctx: pointer to the context structure.
- * @handle: device memory allocation handle.
+ * @addr: device address.
+ * @size: size of device memory to export.
+ * @offset: the offset into the buffer from which to start exporting
* @flags: DMA-BUF file/FD flags.
* @dmabuf_fd: pointer to result FD that represents the dma-buf object.
*
@@ -1915,87 +2028,69 @@ err_free_dmabuf_wrapper:
*
* Return: 0 on success, non-zero for failure.
*/
-static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
- int *dmabuf_fd)
+static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset,
+ int flags, int *dmabuf_fd)
{
- struct hl_vm_phys_pg_pack *phys_pg_pack;
- struct hl_dmabuf_priv *hl_dmabuf;
- struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
- struct hl_vm *vm = &hdev->vm;
- u64 bar_address;
- int rc, i;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ u64 export_addr;
+ int rc;
+ hdev = ctx->hdev;
prop = &hdev->asic_prop;
- if (upper_32_bits(handle)) {
- dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
+ /* offset must be 0 in devices without virtual memory support */
+ if (!prop->dram_supports_virtual_memory && offset) {
+ dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
return -EINVAL;
}
- spin_lock(&vm->idr_lock);
-
- phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
- if (!phys_pg_pack) {
- spin_unlock(&vm->idr_lock);
- dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
- return -EINVAL;
- }
-
- /* increment now to avoid freeing device memory while exporting */
- phys_pg_pack->exporting_cnt++;
-
- spin_unlock(&vm->idr_lock);
-
- if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
- dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
- rc = -EINVAL;
- goto err_dec_exporting_cnt;
- }
+ export_addr = addr + offset;
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
-
- bar_address = hdev->dram_pci_bar_start +
- (phys_pg_pack->pages[i] -
- prop->dram_base_address);
-
- if (bar_address + phys_pg_pack->page_size >
- hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
- bar_address + phys_pg_pack->page_size < bar_address) {
-
- dev_dbg(hdev->dev,
- "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
- phys_pg_pack->pages[i],
- phys_pg_pack->page_size);
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf)
+ return -ENOMEM;
- rc = -EINVAL;
- goto err_dec_exporting_cnt;
+ if (prop->dram_supports_virtual_memory) {
+ hnode = memhash_node_export_get(ctx, addr);
+ if (IS_ERR(hnode)) {
+ rc = PTR_ERR(hnode);
+ goto err_free_dmabuf_wrapper;
}
- }
+ phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
+ if (IS_ERR(phys_pg_pack)) {
+ rc = PTR_ERR(phys_pg_pack);
+ goto dec_memhash_export_cnt;
+ }
+ rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
+ if (rc)
+ goto dec_memhash_export_cnt;
- hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
- if (!hl_dmabuf) {
- rc = -ENOMEM;
- goto err_dec_exporting_cnt;
+ phys_pg_pack->exported_size = size;
+ hl_dmabuf->phys_pg_pack = phys_pg_pack;
+ hl_dmabuf->memhash_hnode = hnode;
+ } else {
+ rc = validate_export_params_no_mmu(hdev, export_addr, size);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
}
- hl_dmabuf->phys_pg_pack = phys_pg_pack;
+ hl_dmabuf->device_address = export_addr;
- rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
- flags, dmabuf_fd);
+ rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
if (rc)
- goto err_free_dmabuf_wrapper;
+ goto dec_memhash_export_cnt;
return 0;
+dec_memhash_export_cnt:
+ if (prop->dram_supports_virtual_memory)
+ memhash_node_export_put(ctx, hnode);
err_free_dmabuf_wrapper:
kfree(hl_dmabuf);
-
-err_dec_exporting_cnt:
- spin_lock(&vm->idr_lock);
- phys_pg_pack->exporting_cnt--;
- spin_unlock(&vm->idr_lock);
-
return rc;
}
@@ -2082,19 +2177,20 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
{
struct hl_ts_buff *ts_buff = buf->private;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_ts_buff *ts_buff = NULL;
- u32 size, num_elements;
+ u32 num_elements;
+ size_t size;
void *p;
num_elements = *(u32 *)args;
- ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
+ ts_buff = kzalloc(sizeof(*ts_buff), gfp);
if (!ts_buff)
return -ENOMEM;
@@ -2139,11 +2235,11 @@ static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
* in order to be able to read the timestamp.
- * in additon it'll allocate an extra buffer for registration management.
+ * in addition it'll allocate an extra buffer for registration management.
* since we cannot fail during registration for out-of-memory situation, so
* we'll prepare a pool which will be used as user interrupt nodes and instead
* of dynamically allocating nodes while registration we'll pick the node from
- * this pool. in addtion it'll add node to the mapping hash which will be used
+ * this pool. in addition it'll add node to the mapping hash which will be used
* to map user ts buffer to the internal kernel ts buffer.
* @hpriv: pointer to the private data of the fd
* @args: ioctl input
@@ -2180,7 +2276,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
int rc, dmabuf_fd = -EBADF;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute MEMORY IOCTL\n",
hdev->status[status]);
return -EBUSY;
@@ -2269,17 +2365,12 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_EXPORT_DMABUF_FD:
- if (hdev->asic_prop.dram_supports_virtual_memory)
- rc = export_dmabuf_from_handle(ctx,
- args->in.export_dmabuf_fd.handle,
- args->in.flags,
- &dmabuf_fd);
- else
- rc = export_dmabuf_from_addr(ctx,
- args->in.export_dmabuf_fd.handle,
- args->in.export_dmabuf_fd.mem_size,
- args->in.flags,
- &dmabuf_fd);
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.addr,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.export_dmabuf_fd.offset,
+ args->in.flags,
+ &dmabuf_fd);
memset(args, 0, sizeof(*args));
args->out.fd = dmabuf_fd;
break;
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 1936d653699e..c4d84df355b0 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -25,8 +25,7 @@ struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
- dev_warn(mmg->dev,
- "Buff get failed, no match to handle %#llx\n", handle);
+ dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
return NULL;
}
kref_get(&buf->refcount);
@@ -276,7 +275,7 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
- "%s, Memory mmap failed, already mmaped to user\n",
+ "%s, Memory mmap failed, already mapped to user\n",
buf->behavior->topic);
rc = -EINVAL;
goto put_mem;
@@ -342,8 +341,19 @@ void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
}
+}
- /* TODO: can it happen that some buffer is still in use at this point? */
+/**
+ * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
+ * @mmg: parent unified memory manager
+ *
+ * Destroy the memory manager IDR.
+ * Shall be called when IDR is empty and no memory buffers are in use.
+ */
+void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
+{
+ if (!idr_is_empty(&mmg->handles))
+ dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
idr_destroy(&mmg->handles);
}
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile
index 1806c524e04a..1806c524e04a 100644
--- a/drivers/misc/habanalabs/common/mmu/Makefile
+++ b/drivers/accel/habanalabs/common/mmu/Makefile
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index 2c1005f74cf4..f379e5b461a6 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -540,8 +540,8 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
u32 page_off;
/*
- * Bit arithmetics cannot be used for non power of two page
- * sizes. In addition, since bit arithmetics is not used,
+ * Bit arithmetic cannot be used for non power of two page
+ * sizes. In addition, since bit arithmetic is not used,
* we cannot ignore dram base. All that shall be considered.
*/
@@ -679,7 +679,9 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc)
- dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n");
+ dev_err_ratelimited(hdev->dev,
+ "%s cache invalidation failed, rc=%d\n",
+ flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc;
}
@@ -692,7 +694,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
asid, va, size);
if (rc)
- dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n");
+ dev_err_ratelimited(hdev->dev,
+ "%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
+ flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
return rc;
}
@@ -757,7 +761,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
* @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
- * @virt_addr: virtual address fro the translation.
+ * @virt_addr: virtual address for the translation.
*
* @return the matching PTE value on success, otherwise U64_MAX.
*/
@@ -781,7 +785,7 @@ static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
struct gen_pool_chunk *chunk,
void *data)
{
- struct hl_device *hdev = (struct hl_device *)data;
+ struct hl_device *hdev = data;
hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
(void *)chunk->start_addr, chunk->phys_addr);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
index 8a40de4a4761..d925dc4dd097 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
@@ -345,7 +345,6 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
}
hop2_pte_addr = hop2_addr;
- hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
clear_pte(ctx, hop2_pte_addr);
put_pte(ctx, hop2_addr);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
index afe7ef964f82..afe7ef964f82 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
diff --git a/drivers/misc/habanalabs/common/pci/Makefile b/drivers/accel/habanalabs/common/pci/Makefile
index dc922a686683..dc922a686683 100644
--- a/drivers/misc/habanalabs/common/pci/Makefile
+++ b/drivers/accel/habanalabs/common/pci/Makefile
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/accel/habanalabs/common/pci/pci.c
index 5fe3da5fba30..191e0e3cf3a5 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/accel/habanalabs/common/pci/pci.c
@@ -10,6 +10,8 @@
#include <linux/pci.h>
+#include <trace/events/habanalabs.h>
+
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100)
#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
@@ -120,6 +122,9 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+ if (unlikely(trace_habanalabs_elbi_read_enabled()))
+ trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val);
+
return 0;
}
@@ -179,8 +184,11 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
usleep_range(300, 500);
}
- if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
+ if (unlikely(trace_habanalabs_elbi_write_enabled()))
+ trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val);
return 0;
+ }
if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
@@ -412,7 +420,6 @@ int hl_pci_init(struct hl_device *hdev)
unmap_pci_bars:
hl_pci_bars_unmap(hdev);
disable_device:
- pci_clear_master(pdev);
pci_disable_device(pdev);
return rc;
@@ -428,6 +435,5 @@ void hl_pci_fini(struct hl_device *hdev)
{
hl_pci_bars_unmap(hdev);
- pci_clear_master(hdev->pdev);
pci_disable_device(hdev->pdev);
}
diff --git a/drivers/misc/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c
index 6196c0487c8b..297e6e44fd0c 100644
--- a/drivers/misc/habanalabs/common/security.c
+++ b/drivers/accel/habanalabs/common/security.c
@@ -7,6 +7,19 @@
#include "habanalabs.h"
+static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
+ "Error due to un-priv read",
+ "Error due to un-secure read",
+ "Error due to read from unmapped reg",
+ "Error due to un-priv write",
+ "Error due to un-secure write",
+ "Error due to write to unmapped reg",
+ "External I/F write sec violation",
+ "External I/F write to un-mapped reg",
+ "Read to write only",
+ "Write to read only"
+};
+
/**
* hl_get_pb_block - return the relevant block within the block array
*
@@ -489,7 +502,7 @@ free_glbl_sec:
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
- const struct range *regs_range_array, u32 regs_range_array_size)
+ const struct range *user_regs_range_array, u32 user_regs_range_array_size)
{
int i;
struct hl_block_glbl_sec *glbl_sec;
@@ -501,8 +514,8 @@ int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
- hl_unsecure_registers_range(hdev, regs_range_array,
- regs_range_array_size, 0, pb_blocks, glbl_sec,
+ hl_unsecure_registers_range(hdev, user_regs_range_array,
+ user_regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
/* Fill all blocks with the same configuration */
@@ -598,3 +611,164 @@ void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
blocks_array_size);
}
+
+static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
+ struct hl_special_block_info *block_info,
+ u32 major, u32 minor, u32 sub_minor)
+{
+ u32 fw_block_base_address = block_info->base_addr +
+ major * block_info->major_offset +
+ minor * block_info->minor_offset +
+ sub_minor * block_info->sub_minor_offset;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* Calculation above returns an address for FW use, and therefore should
+ * be casted for driver use.
+ */
+ return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
+}
+
+static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
+ int block_type)
+{
+ int i;
+
+ /* Check if block type is listed in the exclusion list of block types */
+ for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
+ if (block_type == skip_blocks_cfg->block_types[i])
+ return true;
+
+ return false;
+}
+
+static bool hl_check_block_range_exclusion(struct hl_device *hdev,
+ struct hl_skip_blocks_cfg *skip_blocks_cfg,
+ struct hl_special_block_info *block_info,
+ u32 major, u32 minor, u32 sub_minor)
+{
+ u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
+ int i, j;
+
+ block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
+ major, minor, sub_minor);
+
+ for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
+ blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
+ skip_blocks_cfg->block_ranges[i].start) /
+ HL_BLOCK_SIZE + 1;
+ for (j = 0 ; j < blocks_in_range ; j++) {
+ block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
+ j * HL_BLOCK_SIZE;
+ if (block_base_addr == block_base_addr_in_range)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hl_read_glbl_errors(struct hl_device *hdev,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
+{
+ struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
+ struct hl_special_block_info *current_block = &special_blocks[blk_idx];
+ u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
+ base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
+ int i;
+
+ block_base = base + major * current_block->major_offset +
+ minor * current_block->minor_offset +
+ sub_minor * current_block->sub_minor_offset;
+
+ glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
+ cause_val = RREG32(glbl_err_cause);
+ if (!cause_val)
+ return 0;
+
+ glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
+ addr_val = RREG32(glbl_err_addr);
+
+ for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
+ if (cause_val & BIT(i))
+ dev_err_ratelimited(hdev->dev,
+ "%s, addr %#llx\n",
+ hl_glbl_error_cause[i],
+ hdev->asic_prop.cfg_base_address + block_base +
+ FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
+ }
+
+ WREG32(glbl_err_cause, cause_val);
+
+ return 0;
+}
+
+void hl_check_for_glbl_errors(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_special_blocks_cfg special_blocks_cfg;
+ struct iterate_special_ctx glbl_err_iter;
+ int rc;
+
+ memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
+ special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
+
+ glbl_err_iter.fn = &hl_read_glbl_errors;
+ glbl_err_iter.data = &special_blocks_cfg;
+
+ rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
+ if (rc)
+ dev_err_ratelimited(hdev->dev,
+ "Could not iterate special blocks, glbl error check failed\n");
+}
+
+int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
+{
+ struct hl_special_blocks_cfg *special_blocks_cfg =
+ (struct hl_special_blocks_cfg *)ctx->data;
+ struct hl_skip_blocks_cfg *skip_blocks_cfg =
+ special_blocks_cfg->skip_blocks_cfg;
+ u32 major, minor, sub_minor, blk_idx, num_blocks;
+ struct hl_special_block_info *block_info_arr;
+ int rc;
+
+ block_info_arr = hdev->asic_prop.special_blocks;
+ if (!block_info_arr)
+ return -EINVAL;
+
+ num_blocks = hdev->asic_prop.num_of_special_blocks;
+
+ for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
+ if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
+ continue;
+
+ for (major = 0 ; major < block_info_arr->major ; major++) {
+ minor = 0;
+ do {
+ sub_minor = 0;
+ do {
+ if ((hl_check_block_range_exclusion(hdev,
+ skip_blocks_cfg, block_info_arr,
+ major, minor, sub_minor)) ||
+ (skip_blocks_cfg->skip_block_hook &&
+ skip_blocks_cfg->skip_block_hook(hdev,
+ special_blocks_cfg,
+ blk_idx, major, minor, sub_minor))) {
+ sub_minor++;
+ continue;
+ }
+
+ rc = ctx->fn(hdev, blk_idx, major, minor,
+ sub_minor, ctx->data);
+ if (rc)
+ return rc;
+
+ sub_minor++;
+ } while (sub_minor < block_info_arr->sub_minor);
+
+ minor++;
+ } while (minor < block_info_arr->minor);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h
new file mode 100644
index 000000000000..d7a3b3e82ea4
--- /dev/null
+++ b/drivers/accel/habanalabs/common/security.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef SECURITY_H_
+#define SECURITY_H_
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+struct hl_device;
+
+/* special blocks */
+#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10
+#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
+/* GLBL_ERR_ADDR register offset from the start of the block */
+#define HL_GLBL_ERR_ADDR_OFFSET 0xF44
+/* GLBL_ERR_CAUSE register offset from the start of the block */
+#define HL_GLBL_ERR_CAUSE_OFFSET 0xF48
+
+/*
+ * struct hl_special_block_info - stores address details of a particular type of
+ * IP block which has a SPECIAL part.
+ *
+ * @block_type: block type as described in every ASIC's block_types enum.
+ * @base_addr: base address of the first block of particular type,
+ * e.g., address of NIC0_UMR0_0 of 'NIC_UMR' block.
+ * @major: number of major blocks of particular type.
+ * @minor: number of minor blocks of particular type.
+ * @sub_minor: number of sub minor blocks of particular type.
+ * @major_offset: address gap between 2 consecutive major blocks of particular type,
+ * e.g., offset between NIC0_UMR0_0 and NIC1_UMR0_0 is 0x80000.
+ * @minor_offset: address gap between 2 consecutive minor blocks of particular type,
+ * e.g., offset between NIC0_UMR0_0 and NIC0_UMR1_0 is 0x20000.
+ * @sub_minor_offset: address gap between 2 consecutive sub_minor blocks of particular
+ * type, e.g., offset between NIC0_UMR0_0 and NIC0_UMR0_1 is 0x1000.
+ *
+ * e.g., in Gaudi2, NIC_UMR blocks can be interpreted as:
+ * NIC<major>_UMR<minor>_<sub_minor> where major=12, minor=2, sub_minor=15.
+ * In other words, for each of 12 major numbers (i.e 0 to 11) there are
+ * 2 blocks with different minor numbers (i.e. 0 to 1). Again, for each minor
+ * number there are 15 blocks with different sub_minor numbers (i.e. 0 to 14).
+ * So different blocks are NIC0_UMR0_0, NIC0_UMR0_1, ..., NIC0_UMR1_0, ....,
+ * NIC11_UMR1_14.
+ *
+ * Struct's formatted data is located in the SOL-based auto-generated protbits headers.
+ */
+struct hl_special_block_info {
+ int block_type;
+ u32 base_addr;
+ u32 major;
+ u32 minor;
+ u32 sub_minor;
+ u32 major_offset;
+ u32 minor_offset;
+ u32 sub_minor_offset;
+};
+
+/*
+ * struct hl_automated_pb_cfg - represents configurations of a particular type
+ * of IP block which has protection bits.
+ *
+ * @addr: address details as described in hl_automation_pb_addr struct.
+ * @prot_map: each bit corresponds to one among 32 protection configuration regs
+ * (e.g., SPECIAL_GLBL_PRIV). '1' means 0xffffffff and '0' means 0x0
+ * to be written into the corresponding protection configuration reg.
+ * This bit is meaningful if same bit in data_map is 0, otherwise ignored.
+ * @data_map: each bit corresponds to one among 32 protection configuration regs
+ * (e.g., SPECIAL_GLBL_PRIV). '1' means corresponding protection
+ * configuration reg is to be written with a value in array pointed
+ * by 'data', otherwise the value is decided by 'prot_map'.
+ * @data: pointer to data array which stores the config value(s) to be written
+ * to corresponding protection configuration reg(s).
+ * @data_size: size of the data array.
+ *
+ * Each bit of 'data_map' and 'prot_map' fields corresponds to one among 32
+ * protection configuration registers e.g., SPECIAL GLBL PRIV regs (starting at
+ * offset 0xE80). '1' in 'data_map' means protection configuration to be done
+ * using configuration in data array. '0' in 'data_map" means protection
+ * configuration to be done as per the value of corresponding bit in 'prot_map'.
+ * '1' in 'prot_map' means the register to be programmed with 0xFFFFFFFF
+ * (all non-protected). '0' in 'prot_map' means the register to be programmed
+ * with 0x0 (all protected).
+ *
+ * e.g., prot_map = 0x00000001, data_map = 0xC0000000 , data = {0xff, 0x12}
+ * SPECIAL_GLBL_PRIV[0] = 0xFFFFFFFF
+ * SPECIAL_GLBL_PRIV[1..29] = 0x0
+ * SPECIAL_GLBL_PRIV[30] = 0xFF
+ * SPECIAL_GLBL_PRIV[31] = 0x12
+ */
+struct hl_automated_pb_cfg {
+ struct hl_special_block_info addr;
+ u32 prot_map;
+ u32 data_map;
+ const u32 *data;
+ u8 data_size;
+};
+
+/* struct hl_special_blocks_cfg - holds special blocks cfg data.
+ *
+ * @priv_automated_pb_cfg: points to the main privileged PB array.
+ * @sec_automated_pb_cfg: points to the main secured PB array.
+ * @skip_blocks_cfg: holds arrays of block types & block ranges to be excluded.
+ * @priv_cfg_size: size of the main privileged PB array.
+ * @sec_cfg_size: size of the main secured PB array.
+ * @prot_lvl_priv: indication if it's a privileged/secured PB configurations.
+ */
+struct hl_special_blocks_cfg {
+ struct hl_automated_pb_cfg *priv_automated_pb_cfg;
+ struct hl_automated_pb_cfg *sec_automated_pb_cfg;
+ struct hl_skip_blocks_cfg *skip_blocks_cfg;
+ u32 priv_cfg_size;
+ u32 sec_cfg_size;
+ u8 prot_lvl_priv;
+};
+
+/* Automated security */
+
+/* struct hl_skip_blocks_cfg - holds arrays of block types & block ranges to be
+ * excluded from special blocks configurations.
+ *
+ * @block_types: an array of block types NOT to be configured.
+ * @block_types_len: len of an array of block types not to be configured.
+ * @block_ranges: an array of block ranges not to be configured.
+ * @block_ranges_len: len of an array of block ranges not to be configured.
+ * @skip_block_hook: hook that will be called before initializing special blocks.
+ */
+struct hl_skip_blocks_cfg {
+ int *block_types;
+ size_t block_types_len;
+ struct range *block_ranges;
+ size_t block_ranges_len;
+ bool (*skip_block_hook)(struct hl_device *hdev,
+ struct hl_special_blocks_cfg *special_blocks_cfg,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor);
+};
+
+/**
+ * struct iterate_special_ctx - HW module special block iterator
+ * @fn: function to apply to each HW module special block instance
+ * @data: optional internal data to the function iterator
+ */
+struct iterate_special_ctx {
+ /*
+ * callback for the HW module special block iterator
+ * @hdev: pointer to the habanalabs device structure
+ * @block_id: block (ASIC specific definition can be dcore/hdcore)
+ * @major: major block index within block_id
+ * @minor: minor block index within the major block
+ * @sub_minor: sub_minor block index within the minor block
+ * @data: function specific data
+ */
+ int (*fn)(struct hl_device *hdev, u32 block_id, u32 major, u32 minor,
+ u32 sub_minor, void *data);
+ void *data;
+};
+
+int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx);
+void hl_check_for_glbl_errors(struct hl_device *hdev);
+
+#endif /* SECURITY_H_ */
diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/accel/habanalabs/common/state_dump.c
index 74726907c95e..3a9931f24259 100644
--- a/drivers/misc/habanalabs/common/state_dump.c
+++ b/drivers/accel/habanalabs/common/state_dump.c
@@ -6,7 +6,7 @@
*/
#include <linux/vmalloc.h>
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
/**
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 735d8bed0066..01f89f029355 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -497,10 +497,14 @@ int hl_sysfs_init(struct hl_device *hdev)
if (rc) {
dev_err(hdev->dev,
"Failed to add groups to device, error %d\n", rc);
- return rc;
+ goto remove_groups;
}
return 0;
+
+remove_groups:
+ device_remove_groups(hdev->dev, hl_dev_attr_groups);
+ return rc;
}
void hl_sysfs_fini(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/accel/habanalabs/gaudi/Makefile
index 10577c33a816..10577c33a816 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/accel/habanalabs/gaudi/Makefile
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index 9f5e208701ba..a29aa8f7b6f3 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -656,6 +656,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI_EVENT_SIZE;
+ prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
set_default_power_values(hdev);
@@ -679,6 +680,10 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = USHRT_MAX;
+ prop->tpc_interrupt_id = USHRT_MAX;
+
+ /* single msi */
+ prop->eq_interrupt_id = 0;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -701,6 +706,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dma_mask = 48;
+ prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL;
+
return 0;
}
@@ -865,13 +872,18 @@ pci_init:
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
return 0;
@@ -2008,38 +2020,6 @@ static int gaudi_enable_msi_single(struct hl_device *hdev)
return rc;
}
-static int gaudi_enable_msi_multi(struct hl_device *hdev)
-{
- int cq_cnt = hdev->asic_prop.completion_queues_count;
- int rc, i, irq_cnt_init, irq;
-
- for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
- irq = gaudi_pci_irq_vector(hdev, i, false);
- rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
- &hdev->completion_queue[i]);
- if (rc) {
- dev_err(hdev->dev, "Failed to request IRQ %d", irq);
- goto free_irqs;
- }
- }
-
- irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
- rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
- &hdev->event_queue);
- if (rc) {
- dev_err(hdev->dev, "Failed to request IRQ %d", irq);
- goto free_irqs;
- }
-
- return 0;
-
-free_irqs:
- for (i = 0 ; i < irq_cnt_init ; i++)
- free_irq(gaudi_pci_irq_vector(hdev, i, false),
- &hdev->completion_queue[i]);
- return rc;
-}
-
static int gaudi_enable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -2054,14 +2034,7 @@ static int gaudi_enable_msi(struct hl_device *hdev)
return rc;
}
- if (rc < NUMBER_OF_INTERRUPTS) {
- gaudi->multi_msi_mode = false;
- rc = gaudi_enable_msi_single(hdev);
- } else {
- gaudi->multi_msi_mode = true;
- rc = gaudi_enable_msi_multi(hdev);
- }
-
+ rc = gaudi_enable_msi_single(hdev);
if (rc)
goto free_pci_irq_vectors;
@@ -2077,47 +2050,23 @@ free_pci_irq_vectors:
static void gaudi_sync_irqs(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- int i, cq_cnt = hdev->asic_prop.completion_queues_count;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
/* Wait for all pending IRQs to be finished */
- if (gaudi->multi_msi_mode) {
- for (i = 0 ; i < cq_cnt ; i++)
- synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
-
- synchronize_irq(gaudi_pci_irq_vector(hdev,
- GAUDI_EVENT_QUEUE_MSI_IDX,
- true));
- } else {
- synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
- }
+ synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
}
static void gaudi_disable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
gaudi_sync_irqs(hdev);
-
- if (gaudi->multi_msi_mode) {
- irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
- true);
- free_irq(irq, &hdev->event_queue);
-
- for (i = 0 ; i < cq_cnt ; i++) {
- irq = gaudi_pci_irq_vector(hdev, i, false);
- free_irq(irq, &hdev->completion_queue[i]);
- }
- } else {
- free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
- }
-
+ free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
pci_free_irq_vectors(hdev->pdev);
gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
@@ -3716,7 +3665,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
if (rc) {
dev_err(hdev->dev,
"failed to set hop0 addr for asid %d\n", i);
- goto err;
+ return rc;
}
}
@@ -3727,7 +3676,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
/* mem cache invalidation */
WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
- hl_mmu_invalidate_cache(hdev, true, 0);
+ rc = hl_mmu_invalidate_cache(hdev, true, 0);
+ if (rc)
+ return rc;
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@@ -3743,9 +3694,6 @@ static int gaudi_mmu_init(struct hl_device *hdev)
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
-
-err:
- return rc;
}
static int gaudi_load_firmware_to_device(struct hl_device *hdev)
@@ -3913,11 +3861,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
WREG32(mmCPU_IF_PF_PQ_PI, 0);
- if (gaudi->multi_msi_mode)
- WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
- else
- WREG32(mmCPU_IF_QUEUE_INIT,
- PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
+ WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
irq_handler_offset = prop->gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
@@ -4066,7 +4010,7 @@ disable_queues:
return rc;
}
-static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -4076,7 +4020,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
if (!hard_reset) {
dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
- return;
+ return 0;
}
if (hdev->pldm) {
@@ -4197,10 +4141,10 @@ skip_reset:
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
- if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
- dev_err(hdev->dev,
- "Timeout while waiting for device to reset 0x%x\n",
- status);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
+ dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
+ return -ETIMEDOUT;
+ }
if (gaudi) {
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM |
@@ -4213,6 +4157,7 @@ skip_reset:
hdev->device_cpu_is_halted = false;
}
+ return 0;
}
static int gaudi_suspend(struct hl_device *hdev)
@@ -4236,8 +4181,8 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -5593,7 +5538,6 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_add
u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
u32 msi_vec, bool eb)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
struct packet_nop *cq_padding;
u64 msi_addr;
@@ -5623,12 +5567,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_add
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
-
- if (gaudi->multi_msi_mode)
- msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
- else
- msi_addr = mmPCIE_CORE_MSI_REQ;
-
+ msi_addr = hdev->pdev ? mmPCIE_CORE_MSI_REQ : mmPCIE_MSI_INTR_0 + msi_vec * 4;
cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
@@ -6432,12 +6371,6 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
- dev_err_ratelimited(hdev->dev,
- "Can't send driver job on QMAN0 because the device is not idle\n");
- return -EBUSY;
- }
-
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
@@ -7301,7 +7234,7 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
- bool razwi, u64 *event_mask)
+ bool check_razwi, u64 *event_mask)
{
bool is_read = false, is_write = false;
u16 engine_id[2], num_of_razwi_eng = 0;
@@ -7320,7 +7253,7 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- if (razwi) {
+ if (check_razwi) {
gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
&is_write);
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
@@ -7337,8 +7270,9 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
num_of_razwi_eng = 1;
}
- hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags,
- event_mask);
+ if (razwi_flags)
+ hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng,
+ razwi_flags, event_mask);
}
}
@@ -7584,7 +7518,7 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
}
-static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
@@ -7612,6 +7546,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
@@ -7619,6 +7554,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -7635,6 +7571,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_info_fw_err_info fw_err_info;
u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u32 fw_fatal_err_flag = 0, flags = 0;
@@ -7887,8 +7824,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break;
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
- event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
- gaudi_print_clk_change_info(hdev, event_type);
+ gaudi_print_clk_change_info(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -7914,7 +7850,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ fw_err_info.err_type = HL_INFO_FW_REPORTED_ERR;
+ fw_err_info.event_id = event_type;
+ fw_err_info.event_mask = &event_mask;
+ hl_handle_fw_err(hdev, &fw_err_info);
goto reset_device;
default:
@@ -7945,6 +7884,10 @@ reset_device:
}
if (reset_required) {
+ /* escalate general hw errors to critical/fatal error */
+ if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
+ hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+
hl_device_cond_reset(hdev, flags, event_mask);
} else {
hl_fw_unmask_irq(hdev, event_type);
@@ -8406,19 +8349,26 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
-
- hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&hdev->mmu_lock);
-
if (rc)
goto unreserve_internal_cb_pool;
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ if (rc)
+ goto unmap_internal_cb_pool;
+
+ mutex_unlock(&hdev->mmu_lock);
+
return 0;
+unmap_internal_cb_pool:
+ hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
+ HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
@@ -9133,6 +9083,16 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
return gaudi_stream_master;
}
+static int gaudi_set_dram_properties(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static int gaudi_set_binning_masks(struct hl_device *hdev)
+{
+ return 0;
+}
+
static void gaudi_check_if_razwi_happened(struct hl_device *hdev)
{
}
@@ -9259,6 +9219,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
.send_device_activity = gaudi_send_device_activity,
+ .set_dram_properties = gaudi_set_dram_properties,
+ .set_binning_masks = gaudi_set_binning_masks,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/accel/habanalabs/gaudi/gaudiP.h
index 4fbcf3f0afe5..b8fa724be5a1 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/accel/habanalabs/gaudi/gaudiP.h
@@ -8,7 +8,7 @@
#ifndef GAUDIP_H_
#define GAUDIP_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/gaudi/gaudi_packets.h"
@@ -28,20 +28,8 @@
#define NUMBER_OF_COLLECTIVE_QUEUES 12
#define NUMBER_OF_SOBS_IN_GRP 11
-/*
- * Number of MSI interrupts IDS:
- * Each completion queue has 1 ID
- * The event queue has 1 ID
- */
-#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
- NUMBER_OF_CPU_HW_QUEUES)
-
#define GAUDI_STREAM_MASTER_ARR_SIZE 8
-#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
-#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
-#endif
-
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
@@ -324,8 +312,6 @@ struct gaudi_internal_qman_info {
* signal we can use this engine in later code paths.
* Each bit is cleared upon reset of its corresponding H/W
* engine.
- * @multi_msi_mode: whether we are working in multi MSI single MSI mode.
- * Multi MSI is possible only with IOMMU enabled.
* @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
* 8-bit value so use u8.
*/
@@ -345,7 +331,6 @@ struct gaudi_device {
u32 events_stat[GAUDI_EVENT_SIZE];
u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
u32 hw_cap_initialized;
- u8 multi_msi_mode;
u8 mmu_cache_inv_pi;
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c
index 08108f5fed67..3455b14554c6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c
@@ -11,7 +11,8 @@
#include "../include/gaudi/gaudi_masks.h"
#include "../include/gaudi/gaudi_reg_map.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
+
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
#define SPMU_EVENT_TYPES_OFFSET 0x400
#define SPMU_MAX_COUNTERS 6
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/accel/habanalabs/gaudi/gaudi_security.c
index 81a3c79a8bc6..81a3c79a8bc6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi_security.c
diff --git a/drivers/misc/habanalabs/gaudi2/Makefile b/drivers/accel/habanalabs/gaudi2/Makefile
index 1e047883ba74..1e047883ba74 100644
--- a/drivers/misc/habanalabs/gaudi2/Makefile
+++ b/drivers/accel/habanalabs/gaudi2/Makefile
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index e793fb2bdcbe..b778cf764a68 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -7,6 +7,7 @@
#include "gaudi2P.h"
#include "gaudi2_masks.h"
+#include "../include/gaudi2/gaudi2_special_blocks.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include "../include/hw_ip/mmu/mmu_v2_0.h"
#include "../include/gaudi2/gaudi2_packets.h"
@@ -22,7 +23,8 @@
#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
#define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
-#define GAUDI2_RESET_POLL_TIMEOUT_USEC 50000 /* 50ms */
+
+#define GAUDI2_RESET_POLL_TIMEOUT_USEC 500000 /* 500ms */
#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC 3000000 /* 3s */
@@ -53,6 +55,7 @@
#define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
#define GAUDI2_DECODER_FULL_MASK 0x3FF
+#define GAUDI2_NA_EVENT_CAUSE 0xFF
#define GAUDI2_NUM_OF_QM_ERR_CAUSE 18
#define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25
#define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3
@@ -84,10 +87,11 @@
#define KDMA_TIMEOUT_USEC USEC_PER_SEC
-#define IS_DMA_IDLE(dma_core_idle_ind_mask) \
- (!((dma_core_idle_ind_mask) & \
- ((DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_MASK) | \
- (DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_MASK))))
+#define IS_DMA_IDLE(dma_core_sts0) \
+ (!((dma_core_sts0) & (DCORE0_EDMA0_CORE_STS0_BUSY_MASK)))
+
+#define IS_DMA_HALTED(dma_core_sts1) \
+ ((dma_core_sts1) & (DCORE0_EDMA0_CORE_STS1_IS_HALT_MASK))
#define IS_MME_IDLE(mme_arch_sts) (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
@@ -130,6 +134,282 @@
#define ENGINE_ID_DCORE_OFFSET (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
+/* RAZWI initiator coordinates */
+#define RAZWI_GET_AXUSER_XY(x) \
+ ((x & 0xF8001FF0) >> 4)
+
+#define RAZWI_GET_AXUSER_LOW_XY(x) \
+ ((x & 0x00001FF0) >> 4)
+
+#define RAZWI_INITIATOR_AXUER_L_X_SHIFT 0
+#define RAZWI_INITIATOR_AXUER_L_X_MASK 0x1F
+#define RAZWI_INITIATOR_AXUER_L_Y_SHIFT 5
+#define RAZWI_INITIATOR_AXUER_L_Y_MASK 0xF
+
+#define RAZWI_INITIATOR_AXUER_H_X_SHIFT 23
+#define RAZWI_INITIATOR_AXUER_H_X_MASK 0x1F
+
+#define RAZWI_INITIATOR_ID_X_Y_LOW(x, y) \
+ ((((y) & RAZWI_INITIATOR_AXUER_L_Y_MASK) << RAZWI_INITIATOR_AXUER_L_Y_SHIFT) | \
+ (((x) & RAZWI_INITIATOR_AXUER_L_X_MASK) << RAZWI_INITIATOR_AXUER_L_X_SHIFT))
+
+#define RAZWI_INITIATOR_ID_X_HIGH(x) \
+ (((x) & RAZWI_INITIATOR_AXUER_H_X_MASK) << RAZWI_INITIATOR_AXUER_H_X_SHIFT)
+
+#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
+ (RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
+
+#define PSOC_RAZWI_ENG_STR_SIZE 128
+#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
+
+struct gaudi2_razwi_info {
+ u32 axuser_xy;
+ u32 rtr_ctrl;
+ u16 eng_id;
+ char *eng_name;
+};
+
+static struct gaudi2_razwi_info common_razwi_info[] = {
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_0, "DEC0"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_1, "DEC1"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 18), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_0, "DEC2"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_1, "DEC3"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_0, "DEC4"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_1, "DEC5"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 18), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_0, "DEC6"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_1, "DEC7"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 6), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC8"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 7), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC9"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 4, 2), mmDCORE0_RTR1_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_0, "TPC0"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 4, 4), mmDCORE0_RTR1_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_1, "TPC1"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 4, 2), mmDCORE0_RTR2_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_2, "TPC2"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 4, 4), mmDCORE0_RTR2_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_3, "TPC3"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 4, 2), mmDCORE0_RTR3_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_4, "TPC4"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 4, 4), mmDCORE0_RTR3_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_5, "TPC5"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 4, 14), mmDCORE1_RTR6_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_0, "TPC6"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 4, 16), mmDCORE1_RTR6_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_1, "TPC7"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 4, 14), mmDCORE1_RTR5_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_2, "TPC8"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 4, 16), mmDCORE1_RTR5_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_3, "TPC9"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 4, 14), mmDCORE1_RTR4_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_4, "TPC10"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 4, 16), mmDCORE1_RTR4_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_5, "TPC11"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 11, 2), mmDCORE2_RTR3_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_0, "TPC12"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 11, 4), mmDCORE2_RTR3_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_1, "TPC13"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 11, 2), mmDCORE2_RTR2_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_2, "TPC14"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 11, 4), mmDCORE2_RTR2_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_3, "TPC15"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 11, 2), mmDCORE2_RTR1_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_4, "TPC16"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 11, 4), mmDCORE2_RTR1_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_5, "TPC17"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 11, 14), mmDCORE3_RTR4_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_0, "TPC18"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 11, 16), mmDCORE3_RTR4_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_1, "TPC19"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 11, 14), mmDCORE3_RTR5_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_2, "TPC20"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 11, 16), mmDCORE3_RTR5_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_3, "TPC21"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 11, 14), mmDCORE3_RTR6_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_4, "TPC22"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 11, 16), mmDCORE3_RTR6_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC23"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC24"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 8), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC0_0, "NIC0"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 10), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC0_1, "NIC1"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 12), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC1_0, "NIC2"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC1_1, "NIC3"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 15), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC2_0, "NIC4"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC2_1, "NIC5"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC3_0, "NIC6"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 6), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC3_1, "NIC7"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 8), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC4_0, "NIC8"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 12), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC4_1, "NIC9"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC5_0, "NIC10"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC5_1, "NIC11"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PDMA_0, "PDMA0"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 3), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PDMA_1, "PDMA1"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "PMMU"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 5), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "PCIE"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 16), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ARC_FARM, "ARC_FARM"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 17), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_KDMA, "KDMA"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_0, "EDMA0"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_1, "EDMA1"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_0, "EDMA2"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_1, "EDMA3"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_0, "EDMA4"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_1, "EDMA5"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_0, "EDMA6"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_1, "EDMA7"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU0"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU1"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU2"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU3"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU4"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU5"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU6"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU7"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU8"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU9"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU10"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU11"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU12"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU13"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU14"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU15"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ROT_0, "ROT0"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ROT_1, "ROT1"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PSOC, "CPU"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 11), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PSOC, "PSOC"}
+};
+
+static struct gaudi2_razwi_info mme_razwi_info[] = {
+ /* MME X high coordinate is N/A, hence using only low coordinates */
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE4"}
+};
+
enum hl_pmmu_fatal_cause {
LATENCY_RD_OUT_FIFO_OVERRUN,
LATENCY_WR_OUT_FIFO_OVERRUN,
@@ -675,14 +955,13 @@ static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CO
struct gaudi2_sm_sei_cause_data {
const char *cause_name;
const char *log_name;
- u32 log_mask;
};
static const struct gaudi2_sm_sei_cause_data
gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = {
- {"calculated SO value overflow/underflow", "SOB group ID", 0x7FF},
- {"payload address of monitor is not aligned to 4B", "monitor addr", 0xFFFF},
- {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id", 0xFFFF},
+ {"calculated SO value overflow/underflow", "SOB ID"},
+ {"payload address of monitor is not aligned to 4B", "monitor addr"},
+ {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id"},
};
static const char * const
@@ -1436,6 +1715,34 @@ static const u32 gaudi2_tpc_cfg_blocks_bases[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_CFG_BASE,
};
+static const u32 gaudi2_tpc_eml_cfg_blocks_bases[TPC_ID_SIZE] = {
+ [TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_EML_CFG_BASE,
+};
+
const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_0] = mmROT0_BASE,
[ROTATOR_ID_1] = mmROT1_BASE
@@ -1474,6 +1781,56 @@ static const u32 gaudi2_rot_id_to_queue_id[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_1] = GAUDI2_QUEUE_ID_ROT_1_0,
};
+static const u32 gaudi2_tpc_engine_id_to_tpc_id[] = {
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_0] = TPC_ID_DCORE0_TPC0,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_1] = TPC_ID_DCORE0_TPC1,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_2] = TPC_ID_DCORE0_TPC2,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_3] = TPC_ID_DCORE0_TPC3,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_4] = TPC_ID_DCORE0_TPC4,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_5] = TPC_ID_DCORE0_TPC5,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_0] = TPC_ID_DCORE1_TPC0,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_1] = TPC_ID_DCORE1_TPC1,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_2] = TPC_ID_DCORE1_TPC2,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_3] = TPC_ID_DCORE1_TPC3,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_4] = TPC_ID_DCORE1_TPC4,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_5] = TPC_ID_DCORE1_TPC5,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_0] = TPC_ID_DCORE2_TPC0,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_1] = TPC_ID_DCORE2_TPC1,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_2] = TPC_ID_DCORE2_TPC2,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_3] = TPC_ID_DCORE2_TPC3,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_4] = TPC_ID_DCORE2_TPC4,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_5] = TPC_ID_DCORE2_TPC5,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_0] = TPC_ID_DCORE3_TPC0,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_1] = TPC_ID_DCORE3_TPC1,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_2] = TPC_ID_DCORE3_TPC2,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_3] = TPC_ID_DCORE3_TPC3,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_4] = TPC_ID_DCORE3_TPC4,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_5] = TPC_ID_DCORE3_TPC5,
+ /* the PCI TPC is placed last (mapped liked HW) */
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_6] = TPC_ID_DCORE0_TPC6,
+};
+
+static const u32 gaudi2_mme_engine_id_to_mme_id[] = {
+ [GAUDI2_DCORE0_ENGINE_ID_MME] = MME_ID_DCORE0,
+ [GAUDI2_DCORE1_ENGINE_ID_MME] = MME_ID_DCORE1,
+ [GAUDI2_DCORE2_ENGINE_ID_MME] = MME_ID_DCORE2,
+ [GAUDI2_DCORE3_ENGINE_ID_MME] = MME_ID_DCORE3,
+};
+
+static const u32 gaudi2_edma_engine_id_to_edma_id[] = {
+ [GAUDI2_ENGINE_ID_PDMA_0] = DMA_CORE_ID_PDMA0,
+ [GAUDI2_ENGINE_ID_PDMA_1] = DMA_CORE_ID_PDMA1,
+ [GAUDI2_DCORE0_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA0,
+ [GAUDI2_DCORE0_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA1,
+ [GAUDI2_DCORE1_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA2,
+ [GAUDI2_DCORE1_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA3,
+ [GAUDI2_DCORE2_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA4,
+ [GAUDI2_DCORE2_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA5,
+ [GAUDI2_DCORE3_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA6,
+ [GAUDI2_DCORE3_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA7,
+ [GAUDI2_ENGINE_ID_KDMA] = DMA_CORE_ID_KDMA,
+};
+
const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0,
@@ -1498,41 +1855,6 @@ static const char gaudi2_vdec_irq_name[GAUDI2_VDEC_MSIX_ENTRIES][GAUDI2_MAX_STRI
"gaudi2 vdec s_1", "gaudi2 vdec s_1 abnormal"
};
-static const u32 rtr_coordinates_to_rtr_id[NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES] = {
- RTR_ID_X_Y(2, 4),
- RTR_ID_X_Y(3, 4),
- RTR_ID_X_Y(4, 4),
- RTR_ID_X_Y(5, 4),
- RTR_ID_X_Y(6, 4),
- RTR_ID_X_Y(7, 4),
- RTR_ID_X_Y(8, 4),
- RTR_ID_X_Y(9, 4),
- RTR_ID_X_Y(10, 4),
- RTR_ID_X_Y(11, 4),
- RTR_ID_X_Y(12, 4),
- RTR_ID_X_Y(13, 4),
- RTR_ID_X_Y(14, 4),
- RTR_ID_X_Y(15, 4),
- RTR_ID_X_Y(16, 4),
- RTR_ID_X_Y(17, 4),
- RTR_ID_X_Y(2, 11),
- RTR_ID_X_Y(3, 11),
- RTR_ID_X_Y(4, 11),
- RTR_ID_X_Y(5, 11),
- RTR_ID_X_Y(6, 11),
- RTR_ID_X_Y(7, 11),
- RTR_ID_X_Y(8, 11),
- RTR_ID_X_Y(9, 11),
- RTR_ID_X_Y(0, 0),/* 24 no id */
- RTR_ID_X_Y(0, 0),/* 25 no id */
- RTR_ID_X_Y(0, 0),/* 26 no id */
- RTR_ID_X_Y(0, 0),/* 27 no id */
- RTR_ID_X_Y(14, 11),
- RTR_ID_X_Y(15, 11),
- RTR_ID_X_Y(16, 11),
- RTR_ID_X_Y(17, 11)
-};
-
enum rtr_id {
DCORE0_RTR0,
DCORE0_RTR1,
@@ -1568,7 +1890,7 @@ enum rtr_id {
DCORE3_RTR7,
};
-static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
+static const u32 gaudi2_tpc_initiator_hbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3,
DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4,
DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1,
@@ -1576,33 +1898,61 @@ static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORE
DCORE0_RTR0
};
-static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = {
+static const u32 gaudi2_tpc_initiator_lbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
+ DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2,
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5,
+ DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, DCORE2_RTR0, DCORE2_RTR0,
+ DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, DCORE3_RTR7, DCORE3_RTR7,
+ DCORE0_RTR0
+};
+
+static const u32 gaudi2_dec_initiator_hbw_rtr_id[NUMBER_OF_DEC] = {
DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0,
DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0
};
-static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = {
+static const u32 gaudi2_dec_initiator_lbw_rtr_id[NUMBER_OF_DEC] = {
+ DCORE0_RTR1, DCORE0_RTR1, DCORE1_RTR6, DCORE1_RTR6, DCORE2_RTR1, DCORE2_RTR1,
+ DCORE3_RTR6, DCORE3_RTR6, DCORE0_RTR0, DCORE0_RTR0
+};
+
+static const u32 gaudi2_nic_initiator_hbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
-struct sft_info {
- u8 interface_id;
- u8 dcore_id;
+static const u32 gaudi2_nic_initiator_lbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
+ DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
-static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
- {0, 0}, {1, 0}, {0, 1}, {1, 1}, {1, 2}, {1, 3}, {0, 2}, {0, 3},
+static const u32 gaudi2_edma_initiator_hbw_sft[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
+ mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE
};
-static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = {
+static const u32 gaudi2_pdma_initiator_hbw_rtr_id[NUM_OF_PDMA] = {
DCORE0_RTR0, DCORE0_RTR0
};
-static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = {
+static const u32 gaudi2_pdma_initiator_lbw_rtr_id[NUM_OF_PDMA] = {
+ DCORE0_RTR2, DCORE0_RTR2
+};
+
+static const u32 gaudi2_rot_initiator_hbw_rtr_id[NUM_OF_ROT] = {
DCORE2_RTR0, DCORE3_RTR7
};
+static const u32 gaudi2_rot_initiator_lbw_rtr_id[NUM_OF_ROT] = {
+ DCORE2_RTR2, DCORE3_RTR5
+};
+
struct mme_initiators_rtr_id {
u32 wap0;
u32 wap1;
@@ -1655,6 +2005,30 @@ struct hbm_mc_error_causes {
char cause[50];
};
+static struct hl_special_block_info gaudi2_special_blocks[] = GAUDI2_SPECIAL_BLOCKS;
+
+/* Special blocks iterator is currently used to configure security protection bits,
+ * and read global errors. Most HW blocks are addressable and those who aren't (N/A)-
+ * must be skipped. Following configurations are commonly used for both PB config
+ * and global error reading, since currently they both share the same settings.
+ * Once it changes, we must remember to use separate configurations for either one.
+ */
+static int gaudi2_iterator_skip_block_types[] = {
+ GAUDI2_BLOCK_TYPE_PLL,
+ GAUDI2_BLOCK_TYPE_EU_BIST,
+ GAUDI2_BLOCK_TYPE_HBM,
+ GAUDI2_BLOCK_TYPE_XFT
+};
+
+static struct range gaudi2_iterator_skip_block_ranges[] = {
+ /* Skip all PSOC blocks except for PSOC_GLOBAL_CONF */
+ {mmPSOC_I2C_M0_BASE, mmPSOC_EFUSE_BASE},
+ {mmPSOC_BTL_BASE, mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE},
+ /* Skip all CPU blocks except for CPU_IF */
+ {mmCPU_CA53_CFG_BASE, mmCPU_CA53_CFG_BASE},
+ {mmCPU_TIMESTAMP_BASE, mmCPU_MSTR_IF_RR_SHRD_HBW_BASE}
+};
+
static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = {
{HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed"},
{HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged"},
@@ -1731,7 +2105,14 @@ static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id);
static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val);
static int gaudi2_send_job_to_kdma(struct hl_device *hdev, u64 src_addr, u64 dst_addr, u32 size,
bool is_memset);
+static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr);
+static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr);
static void gaudi2_init_scrambler_hbm(struct hl_device *hdev)
{
@@ -1935,6 +2316,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hints_range_reservation = true;
+ prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
+
if (hdev->pldm)
prop->mmu_pgt_size = 0x800000; /* 8MB */
else
@@ -1958,7 +2341,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
prop->dmmu.last_mask = LAST_MASK;
prop->dmmu.host_resident = 1;
- /* TODO: will be duplicated until implementing per-MMU props */
prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
@@ -1974,7 +2356,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.host_resident = 1;
prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
prop->pmmu.last_mask = LAST_MASK;
- /* TODO: will be duplicated until implementing per-MMU props */
prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
@@ -2031,11 +2412,14 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
}
+ prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
+ prop->supports_engine_modes = true;
+
prop->dc_power_default = DC_POWER_DEFAULT;
prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT;
@@ -2054,6 +2438,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST;
+ prop->tpc_interrupt_id = GAUDI2_IRQ_NUM_TPC_ASSERT;
+ prop->eq_interrupt_id = GAUDI2_IRQ_NUM_EVENT_QUEUE;
prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER;
@@ -2070,6 +2456,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->dma_mask = 64;
+ prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
+
return 0;
}
@@ -2434,6 +2822,25 @@ static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev)
return 0;
}
+static int gaudi2_set_binning_masks(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = gaudi2_set_cluster_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_tpc_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_dec_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int gaudi2_cpucp_info_get(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
@@ -2481,23 +2888,19 @@ static int gaudi2_cpucp_info_get(struct hl_device *hdev)
hdev->tpc_binning = le64_to_cpu(prop->cpucp_info.tpc_binning_mask);
hdev->decoder_binning = lower_32_bits(le64_to_cpu(prop->cpucp_info.decoder_binning_mask));
+ dev_dbg(hdev->dev, "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x\n",
+ hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
+ hdev->decoder_binning);
+
/*
* at this point the DRAM parameters need to be updated according to data obtained
* from the FW
*/
- rc = gaudi2_set_dram_properties(hdev);
- if (rc)
- return rc;
-
- rc = gaudi2_set_cluster_binning_masks(hdev);
- if (rc)
- return rc;
-
- rc = gaudi2_set_tpc_binning_masks(hdev);
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
if (rc)
return rc;
- rc = gaudi2_set_dec_binning_masks(hdev);
+ rc = hdev->asic_funcs->set_binning_masks(hdev);
if (rc)
return rc;
@@ -2578,13 +2981,18 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi2_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
return 0;
@@ -2626,6 +3034,7 @@ static bool gaudi2_is_arc_tpc_owned(u64 arc_id)
static void gaudi2_init_arcs(struct hl_device *hdev)
{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 arc_id;
u32 i;
@@ -2655,6 +3064,10 @@ static void gaudi2_init_arcs(struct hl_device *hdev)
gaudi2_set_arc_id_cap(hdev, arc_id);
}
+
+ /* Fetch ARC scratchpad address */
+ hdev->asic_prop.engine_core_interrupt_reg_addr =
+ CFG_BASE + le32_to_cpu(dyn_regs->eng_arc_irq_ctrl);
}
static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
@@ -2706,16 +3119,21 @@ static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
return 0;
}
-static void gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
+static int gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
{
u16 arc_id;
+ int rc;
for (arc_id = CPU_ID_SCHED_ARC0 ; arc_id < CPU_ID_MAX ; arc_id++) {
if (!gaudi2_is_arc_enabled(hdev, arc_id))
continue;
- gaudi2_scrub_arc_dccm(hdev, arc_id);
+ rc = gaudi2_scrub_arc_dccm(hdev, arc_id);
+ if (rc)
+ return rc;
}
+
+ return 0;
}
static int gaudi2_late_init(struct hl_device *hdev)
@@ -2739,7 +3157,13 @@ static int gaudi2_late_init(struct hl_device *hdev)
}
gaudi2_init_arcs(hdev);
- gaudi2_scrub_arcs_dccm(hdev);
+
+ rc = gaudi2_scrub_arcs_dccm(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
+ goto disable_pci_access;
+ }
+
gaudi2_init_security(hdev);
return 0;
@@ -2923,13 +3347,20 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i, j, k;
+ /* Initialize TPC interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->tpc_interrupt, hdev, 0, HL_USR_INTERRUPT_TPC);
+
+ /* Initialize unexpected error interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->unexpected_error_interrupt, hdev, 0,
+ HL_USR_INTERRUPT_UNEXPECTED);
+
/* Initialize common user CQ interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev,
- HL_COMMON_USER_CQ_INTERRUPT_ID, false);
+ HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ);
/* Initialize common decoder interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev,
- HL_COMMON_DEC_INTERRUPT_ID, true);
+ HL_COMMON_DEC_INTERRUPT_ID, HL_USR_INTERRUPT_DECODER);
/* User interrupts structure holds both decoder and user interrupts from various engines.
* We first initialize the decoder interrupts and then we add the user interrupts.
@@ -2942,10 +3373,11 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
*/
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM;
i += 2, j++)
- HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, true);
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i,
+ HL_USR_INTERRUPT_DECODER);
for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++)
- HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, false);
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, HL_USR_INTERRUPT_CQ);
}
static inline int gaudi2_get_non_zero_random_int(void)
@@ -2955,6 +3387,141 @@ static inline int gaudi2_get_non_zero_random_int(void)
return rand ? rand : 1;
}
+static void gaudi2_special_blocks_free(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_skip_blocks_cfg *skip_special_blocks_cfg =
+ &prop->skip_special_blocks_cfg;
+
+ kfree(prop->special_blocks);
+ kfree(skip_special_blocks_cfg->block_types);
+ kfree(skip_special_blocks_cfg->block_ranges);
+}
+
+static void gaudi2_special_blocks_iterator_free(struct hl_device *hdev)
+{
+ gaudi2_special_blocks_free(hdev);
+}
+
+static bool gaudi2_special_block_skip(struct hl_device *hdev,
+ struct hl_special_blocks_cfg *special_blocks_cfg,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor)
+{
+ return false;
+}
+
+static int gaudi2_special_blocks_config(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i, rc;
+
+ /* Configure Special blocks */
+ prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
+ prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
+ prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
+ sizeof(*prop->special_blocks), GFP_KERNEL);
+ if (!prop->special_blocks)
+ return -ENOMEM;
+
+ for (i = 0 ; i < prop->num_of_special_blocks ; i++)
+ memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i],
+ sizeof(*prop->special_blocks));
+
+ /* Configure when to skip Special blocks */
+ memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg));
+ prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip;
+
+ if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) {
+ prop->skip_special_blocks_cfg.block_types =
+ kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_types),
+ sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL);
+ if (!prop->skip_special_blocks_cfg.block_types) {
+ rc = -ENOMEM;
+ goto free_special_blocks;
+ }
+
+ memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types,
+ sizeof(gaudi2_iterator_skip_block_types));
+
+ prop->skip_special_blocks_cfg.block_types_len =
+ ARRAY_SIZE(gaudi2_iterator_skip_block_types);
+ }
+
+ if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) {
+ prop->skip_special_blocks_cfg.block_ranges =
+ kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_ranges),
+ sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL);
+ if (!prop->skip_special_blocks_cfg.block_ranges) {
+ rc = -ENOMEM;
+ goto free_skip_special_blocks_types;
+ }
+
+ for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++)
+ memcpy(&prop->skip_special_blocks_cfg.block_ranges[i],
+ &gaudi2_iterator_skip_block_ranges[i],
+ sizeof(struct range));
+
+ prop->skip_special_blocks_cfg.block_ranges_len =
+ ARRAY_SIZE(gaudi2_iterator_skip_block_ranges);
+ }
+
+ return 0;
+
+free_skip_special_blocks_types:
+ kfree(prop->skip_special_blocks_cfg.block_types);
+free_special_blocks:
+ kfree(prop->special_blocks);
+
+ return rc;
+}
+
+static int gaudi2_special_blocks_iterator_config(struct hl_device *hdev)
+{
+ return gaudi2_special_blocks_config(hdev);
+}
+
+static void gaudi2_test_queues_msgs_free(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info;
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) {
+ /* bail-out if this is an allocation failure point */
+ if (!msg_info[i].kern_addr)
+ break;
+
+ hl_asic_dma_pool_free(hdev, msg_info[i].kern_addr, msg_info[i].dma_addr);
+ msg_info[i].kern_addr = NULL;
+ }
+}
+
+static int gaudi2_test_queues_msgs_alloc(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info;
+ int i, rc;
+
+ /* allocate a message-short buf for each Q we intend to test */
+ for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) {
+ msg_info[i].kern_addr =
+ (void *)hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_short),
+ GFP_KERNEL, &msg_info[i].dma_addr);
+ if (!msg_info[i].kern_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate dma memory for H/W queue %d testing\n", i);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
+ return 0;
+
+err_exit:
+ gaudi2_test_queues_msgs_free(hdev);
+ return rc;
+}
+
static int gaudi2_sw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -3050,8 +3617,21 @@ static int gaudi2_sw_init(struct hl_device *hdev)
hdev->asic_funcs->set_pci_memory_regions(hdev);
+ rc = gaudi2_special_blocks_iterator_config(hdev);
+ if (rc)
+ goto free_scratchpad_mem;
+
+ rc = gaudi2_test_queues_msgs_alloc(hdev);
+ if (rc)
+ goto special_blocks_free;
+
return 0;
+special_blocks_free:
+ gaudi2_special_blocks_iterator_free(hdev);
+free_scratchpad_mem:
+ hl_asic_dma_pool_free(hdev, gaudi2->scratchpad_kernel_address,
+ gaudi2->scratchpad_bus_address);
free_virt_msix_db_mem:
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
free_cpu_accessible_dma_pool:
@@ -3071,6 +3651,10 @@ static int gaudi2_sw_fini(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ gaudi2_test_queues_msgs_free(hdev);
+
+ gaudi2_special_blocks_iterator_free(hdev);
+
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
@@ -3477,6 +4061,10 @@ static const char *gaudi2_irq_name(u16 irq_number)
return "gaudi2 completion";
case GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ... GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM:
return gaudi2_vdec_irq_name[irq_number - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM];
+ case GAUDI2_IRQ_NUM_TPC_ASSERT:
+ return "gaudi2 tpc assert";
+ case GAUDI2_IRQ_NUM_UNEXPECTED_ERROR:
+ return "gaudi2 unexpected error";
case GAUDI2_IRQ_NUM_USER_FIRST ... GAUDI2_IRQ_NUM_USER_LAST:
return "gaudi2 user completion";
default:
@@ -3508,7 +4096,6 @@ static void gaudi2_dec_disable_msix(struct hl_device *hdev, u32 max_irq_num)
static int gaudi2_dec_enable_msix(struct hl_device *hdev)
{
int rc, i, irq_init_cnt, irq, relative_idx;
- irq_handler_t irq_handler;
struct hl_dec *dec;
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0;
@@ -3518,20 +4105,24 @@ static int gaudi2_dec_enable_msix(struct hl_device *hdev)
irq = pci_irq_vector(hdev->pdev, i);
relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
- irq_handler = (relative_idx % 2) ?
- hl_irq_handler_dec_abnrm :
- hl_irq_handler_user_interrupt;
-
- dec = hdev->dec + relative_idx / 2;
-
/* We pass different structures depending on the irq handler. For the abnormal
* interrupt we pass hl_dec and for the regular interrupt we pass the relevant
* user_interrupt entry
+ *
+ * TODO: change the dec abnrm to threaded irq
*/
- rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i),
- ((relative_idx % 2) ?
- (void *) dec :
- (void *) &hdev->user_interrupt[dec->core_id]));
+
+ dec = hdev->dec + relative_idx / 2;
+ if (relative_idx % 2) {
+ rc = request_irq(irq, hl_irq_handler_dec_abnrm, 0,
+ gaudi2_irq_name(i), (void *) dec);
+ } else {
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(i),
+ (void *) &hdev->user_interrupt[dec->core_id]);
+ }
+
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_dec_irqs;
@@ -3550,7 +4141,6 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc, irq, i, j, user_irq_init_cnt;
- irq_handler_t irq_handler;
struct hl_cq *cq;
if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
@@ -3586,14 +4176,33 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
goto free_event_irq;
}
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(GAUDI2_IRQ_NUM_TPC_ASSERT), &hdev->tpc_interrupt);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_dec_irq;
+ }
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ rc = request_irq(irq, hl_irq_handler_user_interrupt, 0,
+ gaudi2_irq_name(GAUDI2_IRQ_NUM_UNEXPECTED_ERROR),
+ &hdev->unexpected_error_interrupt);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_tpc_irq;
+ }
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
user_irq_init_cnt < prop->user_interrupt_count;
i++, j++, user_irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
- irq_handler = hl_irq_handler_user_interrupt;
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(i), &hdev->user_interrupt[j]);
- rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i), &hdev->user_interrupt[j]);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_user_irq;
@@ -3611,9 +4220,13 @@ free_user_irq:
irq = pci_irq_vector(hdev->pdev, i);
free_irq(irq, &hdev->user_interrupt[j]);
}
-
- gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
-
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ free_irq(irq, &hdev->unexpected_error_interrupt);
+free_tpc_irq:
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ free_irq(irq, &hdev->tpc_interrupt);
+free_dec_irq:
+ gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_DEC_LAST + 1);
free_event_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
free_irq(irq, cq);
@@ -3645,6 +4258,9 @@ static void gaudi2_sync_irqs(struct hl_device *hdev)
synchronize_irq(irq);
}
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT));
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR));
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count;
i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
@@ -3671,6 +4287,12 @@ static void gaudi2_disable_msix(struct hl_device *hdev)
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ free_irq(irq, &hdev->tpc_interrupt);
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ free_irq(irq, &hdev->unexpected_error_interrupt);
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0;
k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
@@ -3868,7 +4490,6 @@ static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
{
int i, rc;
-
for (i = 0 ; i < num_cores ; i++) {
if (gaudi2_is_arc_enabled(hdev, core_ids[i]))
gaudi2_set_arc_running_mode(hdev, core_ids[i], core_command);
@@ -3890,6 +4511,139 @@ static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
return 0;
}
+static int gaudi2_set_tpc_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, tpc_id;
+
+ if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
+ return 0;
+
+ tpc_id = gaudi2_tpc_engine_id_to_tpc_id[engine_id];
+ if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + tpc_id)))
+ return 0;
+
+ reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id];
+ reg_addr = reg_base + TPC_CFG_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ if (engine_command == HL_ENGINE_RESUME) {
+ reg_base = gaudi2_tpc_eml_cfg_blocks_bases[tpc_id];
+ reg_addr = reg_base + TPC_EML_CFG_DBG_CNT_OFFSET;
+ RMWREG32(reg_addr, 0x1, DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK);
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_mme_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, mme_id;
+
+ mme_id = gaudi2_mme_engine_id_to_mme_id[engine_id];
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + mme_id)))
+ return 0;
+
+ reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id];
+ reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ return 0;
+}
+
+static int gaudi2_set_edma_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, edma_id;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
+ return 0;
+
+ edma_id = gaudi2_edma_engine_id_to_edma_id[engine_id];
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + edma_id)))
+ return 0;
+
+ reg_base = gaudi2_dma_core_blocks_bases[edma_id];
+ reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ if (engine_command == HL_ENGINE_STALL) {
+ reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 0x1) |
+ FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK, 0x1);
+ WREG32(reg_addr, reg_val);
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_engine_modes(struct hl_device *hdev,
+ u32 *engine_ids, u32 num_engines, u32 engine_command)
+{
+ int i, rc;
+
+ for (i = 0 ; i < num_engines ; ++i) {
+ switch (engine_ids[i]) {
+ case GAUDI2_DCORE0_ENGINE_ID_TPC_0 ... GAUDI2_DCORE0_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE1_ENGINE_ID_TPC_0 ... GAUDI2_DCORE1_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE2_ENGINE_ID_TPC_0 ... GAUDI2_DCORE2_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE3_ENGINE_ID_TPC_0 ... GAUDI2_DCORE3_ENGINE_ID_TPC_5:
+ rc = gaudi2_set_tpc_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ case GAUDI2_DCORE0_ENGINE_ID_MME:
+ case GAUDI2_DCORE1_ENGINE_ID_MME:
+ case GAUDI2_DCORE2_ENGINE_ID_MME:
+ case GAUDI2_DCORE3_ENGINE_ID_MME:
+ rc = gaudi2_set_mme_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ case GAUDI2_DCORE0_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE0_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE1_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE1_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE2_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE2_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE3_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE3_ENGINE_ID_EDMA_1:
+ rc = gaudi2_set_edma_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid engine ID %u\n", engine_ids[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_engines(struct hl_device *hdev, u32 *engine_ids,
+ u32 num_engines, u32 engine_command)
+{
+ switch (engine_command) {
+ case HL_ENGINE_CORE_HALT:
+ case HL_ENGINE_CORE_RUN:
+ return gaudi2_set_engine_cores(hdev, engine_ids, num_engines, engine_command);
+
+ case HL_ENGINE_STALL:
+ case HL_ENGINE_RESUME:
+ return gaudi2_set_engine_modes(hdev, engine_ids, num_engines, engine_command);
+
+ default:
+ dev_err(hdev->dev, "failed to execute command id %u\n", engine_command);
+ return -EINVAL;
+ }
+}
+
static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -5340,11 +6094,10 @@ static void gaudi2_send_hard_reset_cmd(struct hl_device *hdev)
* gaudi2_execute_hard_reset - execute hard reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
- * @reset_sleep_ms: sleep time in msec after reset
*
* This function executes hard reset based on if driver/FW should do the reset
*/
-static void gaudi2_execute_hard_reset(struct hl_device *hdev, u32 reset_sleep_ms)
+static void gaudi2_execute_hard_reset(struct hl_device *hdev)
{
if (hdev->asic_prop.hard_reset_done_by_fw) {
gaudi2_send_hard_reset_cmd(hdev);
@@ -5362,17 +6115,37 @@ static void gaudi2_execute_hard_reset(struct hl_device *hdev, u32 reset_sleep_ms
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
}
+static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
+{
+ int i, rc = 0;
+ u32 reg_val;
+
+ for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
+ rc = hl_poll_timeout(
+ hdev,
+ mmCPU_RST_STATUS_TO_HOST,
+ reg_val,
+ reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
+ 1000,
+ poll_timeout_us);
+
+ if (rc)
+ dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
+ reg_val);
+ return rc;
+}
+
/**
* gaudi2_execute_soft_reset - execute soft reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
- * @reset_sleep_ms: sleep time in msec after reset
* @driver_performs_reset: true if driver should perform reset instead of f/w.
+ * @poll_timeout_us: time to wait for response from f/w.
*
* This function executes soft reset based on if driver/FW should do the reset
*/
-static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms,
- bool driver_performs_reset)
+static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
+ u32 poll_timeout_us)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -5385,7 +6158,8 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
- return;
+
+ return gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
}
/* Block access to engines, QMANs and SM during reset, these
@@ -5400,17 +6174,14 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE + HL_BLOCK_SIZE);
WREG32(mmPSOC_RESET_CONF_SOFT_RST, 1);
+ return 0;
}
-static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 reset_sleep_ms,
- u32 poll_timeout_us)
+static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 poll_timeout_us)
{
int i, rc = 0;
u32 reg_val;
- /* without this sleep reset will not work */
- msleep(reset_sleep_ms);
-
/* We poll the BTM done indication multiple times after reset due to
* a HW errata 'GAUDI2_0300'
*/
@@ -5427,30 +6198,12 @@ static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 reset_sleep_m
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val);
}
-static void gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
-{
- int i, rc = 0;
- u32 reg_val;
-
- for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
- rc = hl_poll_timeout(
- hdev,
- mmCPU_RST_STATUS_TO_HOST,
- reg_val,
- reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
- 1000,
- poll_timeout_us);
-
- if (rc)
- dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
- reg_val);
-}
-
-static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 poll_timeout_us, reset_sleep_ms;
bool driver_performs_reset = false;
+ int rc;
if (hdev->pldm) {
reset_sleep_ms = hard_reset ? GAUDI2_PLDM_HRESET_TIMEOUT_MSEC :
@@ -5468,7 +6221,7 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese
if (hard_reset) {
driver_performs_reset = !hdev->asic_prop.hard_reset_done_by_fw;
- gaudi2_execute_hard_reset(hdev, reset_sleep_ms);
+ gaudi2_execute_hard_reset(hdev);
} else {
/*
* As we have to support also work with preboot only (which does not supports
@@ -5478,17 +6231,44 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese
*/
driver_performs_reset = (hdev->fw_components == FW_TYPE_PREBOOT_CPU &&
!hdev->asic_prop.fw_security_enabled);
- gaudi2_execute_soft_reset(hdev, reset_sleep_ms, driver_performs_reset);
+ rc = gaudi2_execute_soft_reset(hdev, driver_performs_reset, poll_timeout_us);
+ if (rc)
+ return rc;
}
skip_reset:
- if (driver_performs_reset || hard_reset)
- gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us);
- else
- gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
+ if (driver_performs_reset || hard_reset) {
+ /*
+ * Instead of waiting for BTM indication we should wait for preboot ready:
+ * Consider the below scenario:
+ * 1. FW update is being triggered
+ * - setting the dirty bit
+ * 2. hard reset will be triggered due to the dirty bit
+ * 3. FW initiates the reset:
+ * - dirty bit cleared
+ * - BTM indication cleared
+ * - preboot ready indication cleared
+ * 4. during hard reset:
+ * - BTM indication will be set
+ * - BIST test performed and another reset triggered
+ * 5. only after this reset the preboot will set the preboot ready
+ *
+ * when polling on BTM indication alone we can lose sync with FW while trying to
+ * communicate with FW that is during reset.
+ * to overcome this we will always wait to preboot ready indication
+ */
+
+ /* without this sleep reset will not work */
+ msleep(reset_sleep_ms);
+
+ if (hdev->fw_components & FW_TYPE_PREBOOT_CPU)
+ hl_fw_wait_preboot_ready(hdev);
+ else
+ gaudi2_poll_btm_indication(hdev, poll_timeout_us);
+ }
if (!gaudi2)
- return;
+ return 0;
gaudi2->dec_hw_cap_initialized &= ~(HW_CAP_DEC_MASK);
gaudi2->tpc_hw_cap_initialized &= ~(HW_CAP_TPC_MASK);
@@ -5515,6 +6295,7 @@ skip_reset:
HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_MME_MASK |
HW_CAP_ROT_MASK);
}
+ return 0;
}
static int gaudi2_suspend(struct hl_device *hdev)
@@ -5538,8 +6319,8 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
@@ -6066,28 +6847,29 @@ static void gaudi2_qman_set_test_mode(struct hl_device *hdev, u32 hw_queue_id, b
}
}
-static int gaudi2_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+static inline u32 gaudi2_test_queue_hw_queue_id_to_sob_id(struct hl_device *hdev, u32 hw_queue_id)
{
- u32 sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4;
+ return hdev->asic_prop.first_available_user_sob[0] +
+ hw_queue_id - GAUDI2_QUEUE_ID_PDMA_0_0;
+}
+
+static void gaudi2_test_queue_clear(struct hl_device *hdev, u32 hw_queue_id)
+{
+ u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
- u32 timeout_usec, tmp, sob_base = 1, sob_val = 0x5a5a;
- struct packet_msg_short *msg_short_pkt;
- dma_addr_t pkt_dma_addr;
- size_t pkt_size;
- int rc;
- if (hdev->pldm)
- timeout_usec = GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC;
- else
- timeout_usec = GAUDI2_TEST_QUEUE_WAIT_USEC;
+ /* Reset the SOB value */
+ WREG32(sob_addr, 0);
+}
- pkt_size = sizeof(*msg_short_pkt);
- msg_short_pkt = hl_asic_dma_pool_zalloc(hdev, pkt_size, GFP_KERNEL, &pkt_dma_addr);
- if (!msg_short_pkt) {
- dev_err(hdev->dev, "Failed to allocate packet for H/W queue %d testing\n",
- hw_queue_id);
- return -ENOMEM;
- }
+static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val,
+ struct gaudi2_queues_test_info *msg_info)
+{
+ u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
+ u32 tmp, sob_base = 1;
+ struct packet_msg_short *msg_short_pkt = msg_info->kern_addr;
+ size_t pkt_size = sizeof(struct packet_msg_short);
+ int rc;
tmp = (PACKET_MSG_SHORT << GAUDI2_PKT_CTL_OPCODE_SHIFT) |
(1 << GAUDI2_PKT_CTL_EB_SHIFT) |
@@ -6098,15 +6880,25 @@ static int gaudi2_test_queue(struct hl_device *hdev, u32 hw_queue_id)
msg_short_pkt->value = cpu_to_le32(sob_val);
msg_short_pkt->ctl = cpu_to_le32(tmp);
- /* Reset the SOB value */
- WREG32(sob_addr, 0);
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to send msg_short packet to H/W queue %d\n",
- hw_queue_id);
- goto free_pkt;
- }
+ return rc;
+}
+
+static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val)
+{
+ u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
+ u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
+ u32 timeout_usec, tmp;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC;
+ else
+ timeout_usec = GAUDI2_TEST_QUEUE_WAIT_USEC;
rc = hl_poll_timeout(
hdev,
@@ -6122,11 +6914,6 @@ static int gaudi2_test_queue(struct hl_device *hdev, u32 hw_queue_id)
rc = -EIO;
}
- /* Reset the SOB value */
- WREG32(sob_addr, 0);
-
-free_pkt:
- hl_asic_dma_pool_free(hdev, (void *) msg_short_pkt, pkt_dma_addr);
return rc;
}
@@ -6146,42 +6933,60 @@ static int gaudi2_test_cpu_queue(struct hl_device *hdev)
static int gaudi2_test_queues(struct hl_device *hdev)
{
- int i, rc, ret_val = 0;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_queues_test_info *msg_info;
+ u32 sob_val = 0x5a5a;
+ int i, rc;
+ /* send test message on all enabled Qs */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
if (!gaudi2_is_queue_enabled(hdev, i))
continue;
+ msg_info = &gaudi2->queues_test_info[i - GAUDI2_QUEUE_ID_PDMA_0_0];
gaudi2_qman_set_test_mode(hdev, i, true);
- rc = gaudi2_test_queue(hdev, i);
- gaudi2_qman_set_test_mode(hdev, i, false);
-
- if (rc) {
- ret_val = -EINVAL;
+ gaudi2_test_queue_clear(hdev, i);
+ rc = gaudi2_test_queue_send_msg_short(hdev, i, sob_val, msg_info);
+ if (rc)
goto done;
- }
}
rc = gaudi2_test_cpu_queue(hdev);
- if (rc) {
- ret_val = -EINVAL;
+ if (rc)
goto done;
+
+ /* verify that all messages were processed */
+ for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
+ if (!gaudi2_is_queue_enabled(hdev, i))
+ continue;
+
+ rc = gaudi2_test_queue_wait_completion(hdev, i, sob_val);
+ if (rc)
+ /* chip is not usable, no need for cleanups, just bail-out with error */
+ goto done;
+
+ gaudi2_test_queue_clear(hdev, i);
+ gaudi2_qman_set_test_mode(hdev, i, false);
}
done:
- return ret_val;
+ return rc;
}
static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
size_t irq_arr_size;
+ int rc;
- /* TODO: missing gaudi2_nic_resume.
- * Until implemented nic_hw_cap_initialized will remain zeroed
- */
gaudi2_init_arcs(hdev);
- gaudi2_scrub_arcs_dccm(hdev);
+
+ rc = gaudi2_scrub_arcs_dccm(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
+ return rc;
+ }
+
gaudi2_init_security(hdev);
/* Unmask all IRQs since some could have been received during the soft reset */
@@ -6189,74 +6994,21 @@ static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
return hl_fw_unmask_irq_arr(hdev, gaudi2->hw_events, irq_arr_size);
}
-static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
- struct iterate_module_ctx *ctx)
+static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
- struct gaudi2_tpc_idle_data *idle_data = ctx->data;
- u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
- bool is_eng_idle;
- int engine_idx;
-
- if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
- engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
- else
- engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
- dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
-
- tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
- qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
- qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
- qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
-
- is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_TPC_IDLE(tpc_cfg_sts);
- *(idle_data->is_idle) &= is_eng_idle;
-
- if (idle_data->mask && !is_eng_idle)
- set_bit(engine_idx, idle_data->mask);
-
- if (idle_data->e)
- hl_engine_data_sprintf(idle_data->e,
- idle_data->tpc_fmt, dcore, inst,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
-}
-
-static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
- struct engines_data *e)
-{
- u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_idle_ind_mask,
- mme_arch_sts, dec_swreg15, dec_enabled_bit;
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-12x%s\n";
unsigned long *mask = (unsigned long *) mask_arr;
- const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#x\n";
- const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
- const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
- const char *pdma_fmt = "%-6d%-9s%#-14x%#x\n";
- const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
- const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
+ const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#-15x%#x\n";
bool is_idle = true, is_eng_idle;
- u64 offset;
-
- struct gaudi2_tpc_idle_data tpc_idle_data = {
- .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
- .e = e,
- .mask = mask,
- .is_idle = &is_idle,
- };
- struct iterate_module_ctx tpc_iter = {
- .fn = &gaudi2_is_tpc_engine_idle,
- .data = &tpc_idle_data,
- };
-
int engine_idx, i, j;
+ u64 offset;
- /* EDMA, Two engines per Dcore */
if (e)
hl_engine_data_sprintf(e,
- "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ---- ------- ------------ ----------------------\n");
+ "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
+ "---- ---- ------- ------------ ------------- -------------\n");
for (i = 0; i < NUM_OF_DCORES; i++) {
for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) {
@@ -6269,45 +7021,56 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
offset = i * DCORE_OFFSET + j * DCORE_EDMA_OFFSET;
- dma_core_idle_ind_mask =
- RREG32(mmDCORE0_EDMA0_CORE_IDLE_IND_MASK + offset);
+ dma_core_sts0 = RREG32(mmDCORE0_EDMA0_CORE_STS0 + offset);
+ dma_core_sts1 = RREG32(mmDCORE0_EDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmDCORE0_EDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_DMA_IDLE(dma_core_idle_ind_mask);
+ IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
- hl_engine_data_sprintf(e, edma_fmt, i, j,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ hl_engine_data_sprintf(e, edma_fmt, i, j, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
}
- /* PDMA, Two engines in Full chip */
+ return is_idle;
+}
+
+static bool gaudi2_get_pdma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *pdma_fmt = "%-6d%-9s%#-14x%#-15x%#x\n";
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
- "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ------- ------------ ----------------------\n");
+ "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
+ "---- ------- ------------ ------------- -------------\n");
for (i = 0 ; i < NUM_OF_PDMA ; i++) {
engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
offset = i * PDMA_OFFSET;
- dma_core_idle_ind_mask = RREG32(mmPDMA0_CORE_IDLE_IND_MASK + offset);
+ dma_core_sts0 = RREG32(mmPDMA0_CORE_STS0 + offset);
+ dma_core_sts1 = RREG32(mmPDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmPDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmPDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmPDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_DMA_IDLE(dma_core_idle_ind_mask);
+ IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
@@ -6315,9 +7078,22 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
if (e)
hl_engine_data_sprintf(e, pdma_fmt, i, is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, dma_core_idle_ind_mask);
+ qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_nic_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset = 0;
+
/* NIC, twelve macros in Full chip */
if (e && hdev->nic_ports_mask)
hl_engine_data_sprintf(e,
@@ -6351,6 +7127,19 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
qm_glbl_sts0, qm_cgm_sts);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, mme_arch_sts;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
"\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
@@ -6381,16 +7170,82 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
set_bit(engine_idx, mask);
}
- /*
- * TPC
- */
+ return is_idle;
+}
+
+static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ struct iterate_module_ctx *ctx)
+{
+ struct gaudi2_tpc_idle_data *idle_data = ctx->data;
+ u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_eng_idle;
+ int engine_idx;
+
+ if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
+ else
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
+ dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
+
+ tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
+ qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ *(idle_data->is_idle) &= is_eng_idle;
+
+ if (idle_data->mask && !is_eng_idle)
+ set_bit(engine_idx, idle_data->mask);
+
+ if (idle_data->e)
+ hl_engine_data_sprintf(idle_data->e,
+ idle_data->tpc_fmt, dcore, inst,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
+}
+
+static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ bool is_idle = true;
+
+ struct gaudi2_tpc_idle_data tpc_idle_data = {
+ .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
+ .e = e,
+ .mask = mask,
+ .is_idle = &is_idle,
+ };
+ struct iterate_module_ctx tpc_iter = {
+ .fn = &gaudi2_is_tpc_engine_idle,
+ .data = &tpc_idle_data,
+ };
+
if (e && prop->tpc_enabled_mask)
hl_engine_data_sprintf(e,
- "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_IDLE_IND_MASK\n"
- "---- --- -------- ------------ ---------- ----------------------\n");
+ "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS STATUS\n"
+ "---- --- ------- ------------ ---------- ------\n");
gaudi2_iterate_tpcs(hdev, &tpc_iter);
+ return tpc_idle_data.is_idle;
+}
+
+static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
+ const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
+ bool is_idle = true, is_eng_idle;
+ u32 dec_swreg15, dec_enabled_bit;
+ int engine_idx, i, j;
+ u64 offset;
+
/* Decoders, two each Dcore and two shared PCIe decoders */
if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
hl_engine_data_sprintf(e,
@@ -6445,10 +7300,23 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
is_eng_idle ? "Y" : "N", dec_swreg15);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_rotator_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *) mask_arr;
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
- "\nCORE ROT is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
- "---- ---- ------- ------------ ---------- -------------\n");
+ "\nCORE ROT is_idle QM_GLBL_STS0 QM_GLBL_STS1 QM_CGM_STS\n"
+ "---- --- ------- ------------ ------------ ----------\n");
for (i = 0 ; i < NUM_OF_ROT ; i++) {
engine_idx = GAUDI2_ENGINE_ID_ROT_0 + i;
@@ -6467,12 +7335,28 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
if (e)
hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, qm_cgm_sts, "-");
+ qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
}
return is_idle;
}
+static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ bool is_idle = true;
+
+ is_idle &= gaudi2_get_edma_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_pdma_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_nic_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_mme_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_tpc_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_decoder_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_rotator_idle_status(hdev, mask_arr, mask_len, e);
+
+ return is_idle;
+}
+
static void gaudi2_hw_queues_lock(struct hl_device *hdev)
__acquires(&gaudi2->hw_queues_lock)
{
@@ -6803,38 +7687,37 @@ static inline bool is_info_event(u32 event)
switch (event) {
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
+
+ /* return in case of NIC status event - these events are received periodically and not as
+ * an indication to an error.
+ */
+ case GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 ... GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1:
return true;
default:
return false;
}
}
-static void gaudi2_print_irq_info(struct hl_device *hdev, u16 event_type)
+static void gaudi2_print_event(struct hl_device *hdev, u16 event_type,
+ bool ratelimited, const char *fmt, ...)
{
- char desc[64] = "";
- bool event_valid = false;
+ struct va_format vaf;
+ va_list args;
- /* return in case of NIC status event - these events are received periodically and not as
- * an indication to an error, thus not printed.
- */
- if (event_type >= GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 &&
- event_type <= GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1)
- return;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
- if (gaudi2_irq_map_table[event_type].valid) {
- snprintf(desc, sizeof(desc), gaudi2_irq_map_table[event_type].name);
- event_valid = true;
- }
-
- if (!event_valid)
- snprintf(desc, sizeof(desc), "N/A");
-
- if (is_info_event(event_type))
- dev_info_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
- event_type, desc);
+ if (ratelimited)
+ dev_err_ratelimited(hdev->dev, "%s: %pV\n",
+ gaudi2_irq_map_table[event_type].valid ?
+ gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
else
- dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
- event_type, desc);
+ dev_err(hdev->dev, "%s: %pV\n",
+ gaudi2_irq_map_table[event_type].valid ?
+ gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
+
+ va_end(args);
}
static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
@@ -6847,8 +7730,8 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
memory_wrapper_idx = ecc_data->memory_wrapper_idx;
- dev_err(hdev->dev,
- "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.\n",
+ gaudi2_print_event(hdev, event_type, !ecc_data->is_critical,
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.",
ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical);
return !!ecc_data->is_critical;
@@ -6987,10 +7870,10 @@ static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 str
gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false);
}
-static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *qm_name,
- u64 qman_base, u32 qid_base)
+static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type,
+ u64 qman_base, u32 qid_base)
{
- u32 i, j, glbl_sts_val, arb_err_val, num_error_causes;
+ u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0;
u64 glbl_sts_addr, arb_err_addr;
char reg_desc[32];
@@ -7013,12 +7896,14 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q
}
for (j = 0 ; j < num_error_causes ; j++)
- if (glbl_sts_val & BIT(j))
- dev_err_ratelimited(hdev->dev, "%s %s. err cause: %s\n",
- qm_name, reg_desc,
- i == QMAN_STREAMS ?
- gaudi2_qman_lower_cp_error_cause[j] :
- gaudi2_qman_error_cause[j]);
+ if (glbl_sts_val & BIT(j)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "%s. err cause: %s", reg_desc,
+ i == QMAN_STREAMS ?
+ gaudi2_qman_lower_cp_error_cause[j] :
+ gaudi2_qman_error_cause[j]);
+ error_count++;
+ }
print_qman_data_on_err(hdev, qid_base, i, qman_base);
}
@@ -7026,18 +7911,23 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q
arb_err_val = RREG32(arb_err_addr);
if (!arb_err_val)
- return;
+ goto out;
for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
- if (arb_err_val & BIT(j))
- dev_err_ratelimited(hdev->dev, "%s ARB_ERR. err cause: %s\n",
- qm_name, gaudi2_qman_arb_error_cause[j]);
+ if (arb_err_val & BIT(j)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "ARB_ERR. err cause: %s",
+ gaudi2_qman_arb_error_cause[j]);
+ error_count++;
+ }
}
+
+out:
+ return error_count;
}
static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
- bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info,
enum gaudi2_engine_id id, u64 *event_mask)
{
u32 razwi_hi, razwi_lo, razwi_xy;
@@ -7045,26 +7935,14 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
u8 rd_wr_flag;
if (is_write) {
- if (read_razwi_regs) {
- razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
- razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
- } else {
- razwi_hi = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_hi_reg);
- razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg);
- razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg);
- }
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
- if (read_razwi_regs) {
- razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
- razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
- } else {
- razwi_hi = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_hi_reg);
- razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg);
- razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg);
- }
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
@@ -7078,38 +7956,26 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
- bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info,
enum gaudi2_engine_id id, u64 *event_mask)
{
- u32 razwi_addr, razwi_xy;
+ u64 razwi_addr = CFG_BASE;
+ u32 razwi_xy;
u16 eng_id = id;
u8 rd_wr_flag;
if (is_write) {
- if (read_razwi_regs) {
- razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
- } else {
- razwi_addr = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_reg);
- razwi_xy = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_id_reg);
- }
-
+ razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
- if (read_razwi_regs) {
- razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
- } else {
- razwi_addr = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_reg);
- razwi_xy = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_id_reg);
- }
-
+ razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
hl_handle_razwi(hdev, razwi_addr, &eng_id, 1, rd_wr_flag | HL_RAZWI_LBW, event_mask);
dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n",
+ "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x%x\n",
name, is_write ? "WR" : "RD", rtr_mstr_if_base_addr, razwi_addr,
razwi_xy);
}
@@ -7164,183 +8030,146 @@ static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev,
*/
static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
enum razwi_event_sources module, u8 module_idx,
- u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info,
- u64 *event_mask)
+ u8 module_sub_idx, u64 *event_mask)
{
- bool via_sft = false, read_razwi_regs = false;
- u32 rtr_id, dcore_id, dcore_rtr_id, sft_id, eng_id;
- u64 rtr_mstr_if_base_addr;
+ bool via_sft = false;
+ u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id;
+ u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr;
u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0;
u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0;
char initiator_name[64];
- if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX) || !razwi_info)
- read_razwi_regs = true;
-
switch (module) {
case RAZWI_TPC:
- rtr_id = gaudi2_tpc_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
+
+ if (hl_is_fw_ver_below_1_9(hdev) &&
+ !hdev->asic_prop.fw_security_enabled &&
+ ((module_idx == 0) || (module_idx == 1)))
+ lbw_rtr_id = DCORE0_RTR0;
+ else
+ lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "TPC_%u", module_idx);
break;
case RAZWI_MME:
sprintf(initiator_name, "MME_%u", module_idx);
switch (module_sub_idx) {
case MME_WAP0:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
break;
case MME_WAP1:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
break;
case MME_WRITE:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
break;
case MME_READ:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
break;
case MME_SBTE0:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
break;
case MME_SBTE1:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
break;
case MME_SBTE2:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
break;
case MME_SBTE3:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
break;
case MME_SBTE4:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
break;
default:
return;
}
+ lbw_rtr_id = hbw_rtr_id;
break;
case RAZWI_EDMA:
- sft_id = gaudi2_edma_initiator_sft_id[module_idx].interface_id;
- dcore_id = gaudi2_edma_initiator_sft_id[module_idx].dcore_id;
+ hbw_rtr_mstr_if_base_addr = gaudi2_edma_initiator_hbw_sft[module_idx];
+ dcore_id = module_idx / NUM_OF_EDMA_PER_DCORE;
+ /* SFT has separate MSTR_IF for LBW, only there we can
+ * read the LBW razwi related registers
+ */
+ lbw_rtr_mstr_if_base_addr = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE +
+ dcore_id * SFT_DCORE_OFFSET;
via_sft = true;
sprintf(initiator_name, "EDMA_%u", module_idx);
break;
case RAZWI_PDMA:
- rtr_id = gaudi2_pdma_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_pdma_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_pdma_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "PDMA_%u", module_idx);
break;
case RAZWI_NIC:
- rtr_id = gaudi2_nic_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_nic_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_nic_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "NIC_%u", module_idx);
break;
case RAZWI_DEC:
- rtr_id = gaudi2_dec_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "DEC_%u", module_idx);
break;
case RAZWI_ROT:
- rtr_id = gaudi2_rot_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_rot_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "ROT_%u", module_idx);
break;
default:
return;
}
- if (!read_razwi_regs) {
- if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_HBW) {
- hbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AW;
- hbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AR;
- } else if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_LBW) {
- lbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AW;
- lbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AR;
- }
- rtr_mstr_if_base_addr = 0;
-
- goto dump_info;
- }
-
/* Find router mstr_if register base */
- if (via_sft) {
- rtr_mstr_if_base_addr = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE +
- dcore_id * SFT_DCORE_OFFSET +
- sft_id * SFT_IF_OFFSET +
- RTR_MSTR_IF_OFFSET;
- } else {
- dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
- dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
- rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
+ if (!via_sft) {
+ dcore_id = hbw_rtr_id / NUM_OF_RTR_PER_DCORE;
+ dcore_rtr_id = hbw_rtr_id % NUM_OF_RTR_PER_DCORE;
+ hbw_rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
dcore_id * DCORE_OFFSET +
dcore_rtr_id * DCORE_RTR_OFFSET +
RTR_MSTR_IF_OFFSET;
+ lbw_rtr_mstr_if_base_addr = hbw_rtr_mstr_if_base_addr +
+ (((s32)lbw_rtr_id - hbw_rtr_id) * DCORE_RTR_OFFSET);
}
/* Find out event cause by reading "RAZWI_HAPPENED" registers */
- hbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
-
- hbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
-
- if (via_sft) {
- /* SFT has separate MSTR_IF for LBW, only there we can
- * read the LBW razwi related registers
- */
- u64 base;
-
- base = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE + dcore_id * SFT_DCORE_OFFSET +
- RTR_LBW_MSTR_IF_OFFSET;
-
- lbw_shrd_aw = RREG32(base + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
-
- lbw_shrd_ar = RREG32(base + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
- } else {
- lbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
-
- lbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
- }
-
-dump_info:
- /* check if there is no RR razwi indication at all */
- if (!hbw_shrd_aw && !hbw_shrd_ar && !lbw_shrd_aw && !lbw_shrd_ar)
- return;
+ hbw_shrd_aw = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
+ hbw_shrd_ar = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
+ lbw_shrd_aw = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
+ lbw_shrd_ar = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx);
if (hbw_shrd_aw) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, true,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
+ WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
}
if (hbw_shrd_ar) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, false,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
+ WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
}
if (lbw_shrd_aw) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, true,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
+ WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
}
if (lbw_shrd_ar) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, false,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
+ WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
}
}
@@ -7352,424 +8181,205 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
/* check all TPCs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) {
if (prop->tpc_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL);
}
/* check all MMEs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx,
- sub_mod, NULL, NULL);
+ sub_mod, NULL);
/* check all EDMAs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
if (prop->edma_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL);
/* check all PDMAs */
for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL);
/* check all NICs */
for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++)
if (hdev->nic_ports_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0,
- NULL, NULL);
+ NULL);
/* check all DECs */
for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++)
if (prop->decoder_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL);
/* check all ROTs */
for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL, NULL);
-}
-
-static const char *gaudi2_get_initiators_name(u32 rtr_id)
-{
- switch (rtr_id) {
- case DCORE0_RTR0:
- return "DEC0/1/8/9, TPC24, PDMA0/1, PMMU, PCIE_IF, EDMA0/2, HMMU0/2/4/6, CPU";
- case DCORE0_RTR1:
- return "TPC0/1";
- case DCORE0_RTR2:
- return "TPC2/3";
- case DCORE0_RTR3:
- return "TPC4/5";
- case DCORE0_RTR4:
- return "MME0_SBTE0/1";
- case DCORE0_RTR5:
- return "MME0_WAP0/SBTE2";
- case DCORE0_RTR6:
- return "MME0_CTRL_WR/SBTE3";
- case DCORE0_RTR7:
- return "MME0_WAP1/CTRL_RD/SBTE4";
- case DCORE1_RTR0:
- return "MME1_WAP1/CTRL_RD/SBTE4";
- case DCORE1_RTR1:
- return "MME1_CTRL_WR/SBTE3";
- case DCORE1_RTR2:
- return "MME1_WAP0/SBTE2";
- case DCORE1_RTR3:
- return "MME1_SBTE0/1";
- case DCORE1_RTR4:
- return "TPC10/11";
- case DCORE1_RTR5:
- return "TPC8/9";
- case DCORE1_RTR6:
- return "TPC6/7";
- case DCORE1_RTR7:
- return "DEC2/3, NIC0/1/2/3/4, ARC_FARM, KDMA, EDMA1/3, HMMU1/3/5/7";
- case DCORE2_RTR0:
- return "DEC4/5, NIC5/6/7/8, EDMA4/6, HMMU8/10/12/14, ROT0";
- case DCORE2_RTR1:
- return "TPC16/17";
- case DCORE2_RTR2:
- return "TPC14/15";
- case DCORE2_RTR3:
- return "TPC12/13";
- case DCORE2_RTR4:
- return "MME2_SBTE0/1";
- case DCORE2_RTR5:
- return "MME2_WAP0/SBTE2";
- case DCORE2_RTR6:
- return "MME2_CTRL_WR/SBTE3";
- case DCORE2_RTR7:
- return "MME2_WAP1/CTRL_RD/SBTE4";
- case DCORE3_RTR0:
- return "MME3_WAP1/CTRL_RD/SBTE4";
- case DCORE3_RTR1:
- return "MME3_CTRL_WR/SBTE3";
- case DCORE3_RTR2:
- return "MME3_WAP0/SBTE2";
- case DCORE3_RTR3:
- return "MME3_SBTE0/1";
- case DCORE3_RTR4:
- return "TPC18/19";
- case DCORE3_RTR5:
- return "TPC20/21";
- case DCORE3_RTR6:
- return "TPC22/23";
- case DCORE3_RTR7:
- return "DEC6/7, NIC9/10/11, EDMA5/7, HMMU9/11/13/15, ROT1, PSOC";
- default:
- return "N/A";
- }
-}
-
-static u16 gaudi2_get_razwi_initiators(u32 rtr_id, u16 *engines)
-{
- switch (rtr_id) {
- case DCORE0_RTR0:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_PCIE_ENGINE_ID_DEC_0;
- engines[3] = GAUDI2_PCIE_ENGINE_ID_DEC_1;
- engines[4] = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
- engines[5] = GAUDI2_ENGINE_ID_PDMA_0;
- engines[6] = GAUDI2_ENGINE_ID_PDMA_1;
- engines[7] = GAUDI2_ENGINE_ID_PCIE;
- engines[8] = GAUDI2_DCORE0_ENGINE_ID_EDMA_0;
- engines[9] = GAUDI2_DCORE1_ENGINE_ID_EDMA_0;
- engines[10] = GAUDI2_ENGINE_ID_PSOC;
- return 11;
-
- case DCORE0_RTR1:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE0_RTR2:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE0_RTR3:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE0_RTR4:
- case DCORE0_RTR5:
- case DCORE0_RTR6:
- case DCORE0_RTR7:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_MME;
- return 1;
-
- case DCORE1_RTR0:
- case DCORE1_RTR1:
- case DCORE1_RTR2:
- case DCORE1_RTR3:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_MME;
- return 1;
-
- case DCORE1_RTR4:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE1_RTR5:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE1_RTR6:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE1_RTR7:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC0_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC1_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC2_0;
- engines[5] = GAUDI2_ENGINE_ID_NIC3_0;
- engines[6] = GAUDI2_ENGINE_ID_NIC4_0;
- engines[7] = GAUDI2_ENGINE_ID_ARC_FARM;
- engines[8] = GAUDI2_ENGINE_ID_KDMA;
- engines[9] = GAUDI2_DCORE0_ENGINE_ID_EDMA_1;
- engines[10] = GAUDI2_DCORE1_ENGINE_ID_EDMA_1;
- return 11;
-
- case DCORE2_RTR0:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC5_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC6_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC7_0;
- engines[5] = GAUDI2_ENGINE_ID_NIC8_0;
- engines[6] = GAUDI2_DCORE2_ENGINE_ID_EDMA_0;
- engines[7] = GAUDI2_DCORE3_ENGINE_ID_EDMA_0;
- engines[8] = GAUDI2_ENGINE_ID_ROT_0;
- return 9;
-
- case DCORE2_RTR1:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE2_RTR2:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE2_RTR3:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE2_RTR4:
- case DCORE2_RTR5:
- case DCORE2_RTR6:
- case DCORE2_RTR7:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_MME;
- return 1;
- case DCORE3_RTR0:
- case DCORE3_RTR1:
- case DCORE3_RTR2:
- case DCORE3_RTR3:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_MME;
- return 1;
- case DCORE3_RTR4:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_1;
- return 2;
- case DCORE3_RTR5:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_3;
- return 2;
- case DCORE3_RTR6:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_5;
- return 2;
- case DCORE3_RTR7:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC9_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC10_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC11_0;
- engines[5] = GAUDI2_DCORE2_ENGINE_ID_EDMA_1;
- engines[6] = GAUDI2_DCORE3_ENGINE_ID_EDMA_1;
- engines[7] = GAUDI2_ENGINE_ID_ROT_1;
- engines[8] = GAUDI2_ENGINE_ID_ROT_0;
- return 9;
- default:
- return 0;
- }
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
-static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u32 rtr_id,
- u64 rtr_ctrl_base_addr, bool is_write,
- u64 *event_mask)
+static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u32 array_size,
+ u32 axuser_xy, u32 *base, u16 *eng_id,
+ char *eng_name)
{
- u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng;
- u32 razwi_hi, razwi_lo;
- u8 rd_wr_flag;
- num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]);
-
- if (is_write) {
- razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI);
- razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO);
- rd_wr_flag = HL_RAZWI_WRITE;
+ int i, num_of_eng = 0;
+ u16 str_size = 0;
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1);
- } else {
- razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI);
- razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO);
- rd_wr_flag = HL_RAZWI_READ;
+ for (i = 0 ; i < array_size ; i++) {
+ if (axuser_xy != razwi_info[i].axuser_xy)
+ continue;
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1);
+ eng_id[num_of_eng] = razwi_info[i].eng_id;
+ base[num_of_eng] = razwi_info[i].rtr_ctrl;
+ if (!num_of_eng)
+ str_size += snprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
+ razwi_info[i].eng_name);
+ else
+ str_size += snprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
+ razwi_info[i].eng_name);
+ num_of_eng++;
}
- hl_handle_razwi(hdev, (u64)razwi_hi << 32 | razwi_lo, &engines[0], num_of_eng,
- rd_wr_flag | HL_RAZWI_HBW, event_mask);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW %s error, rtr id %u, address %#llx\n",
- is_write ? "WR" : "RD", rtr_id, (u64)razwi_hi << 32 | razwi_lo);
-
- dev_err_ratelimited(hdev->dev,
- "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
+ return num_of_eng;
}
-static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u32 rtr_id,
- u64 rtr_ctrl_base_addr, bool is_write,
- u64 *event_mask)
+static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_reg,
+ u64 *event_mask)
{
- u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng;
- u32 razwi_addr;
- u8 rd_wr_flag;
+ u32 axuser_xy = RAZWI_GET_AXUSER_XY(razwi_reg), addr_hi = 0, addr_lo = 0;
+ u32 base[PSOC_RAZWI_MAX_ENG_PER_RTR];
+ u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR];
+ char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE];
+ bool razwi_happened = false;
+ int i;
- num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]);
+ num_of_eng = gaudi2_psoc_razwi_get_engines(common_razwi_info, ARRAY_SIZE(common_razwi_info),
+ axuser_xy, base, eng_id, eng_name_str);
- if (is_write) {
- razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
- rd_wr_flag = HL_RAZWI_WRITE;
+ /* If no match for XY coordinates, try to find it in MME razwi table */
+ if (!num_of_eng) {
+ axuser_xy = RAZWI_GET_AXUSER_LOW_XY(razwi_reg);
+ num_of_eng = gaudi2_psoc_razwi_get_engines(mme_razwi_info,
+ ARRAY_SIZE(mme_razwi_info),
+ axuser_xy, base, eng_id,
+ eng_name_str);
+ }
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
- } else {
- razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
- rd_wr_flag = HL_RAZWI_READ;
+ for (i = 0 ; i < num_of_eng ; i++) {
+ if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) {
+ addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI);
+ addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO);
+ dev_err(hdev->dev,
+ "PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
+ eng_name_str, ((u64)addr_hi << 32) + addr_lo);
+ hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask);
+ razwi_happened = true;
+ }
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1);
- }
+ if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) {
+ addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI);
+ addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO);
+ dev_err(hdev->dev,
+ "PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
+ eng_name_str, ((u64)addr_hi << 32) + addr_lo);
+ hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask);
+ razwi_happened = true;
+ }
- hl_handle_razwi(hdev, razwi_addr, &engines[0], num_of_eng, rd_wr_flag | HL_RAZWI_LBW,
- event_mask);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n",
- is_write ? "WR" : "RD", rtr_id, razwi_addr);
+ if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) {
+ addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR);
+ dev_err(hdev->dev,
+ "PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
+ eng_name_str, addr_lo);
+ hl_handle_razwi(hdev, addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask);
+ razwi_happened = true;
+ }
- dev_err_ratelimited(hdev->dev,
- "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
+ if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) {
+ addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR);
+ dev_err(hdev->dev,
+ "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
+ eng_name_str, addr_lo);
+ hl_handle_razwi(hdev, addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask);
+ razwi_happened = true;
+ }
+ /* In common case the loop will break, when there is only one engine id, or
+ * several engines with the same router. The exceptional case is with psoc razwi
+ * from EDMA, where it's possible to get axuser id which fits 2 routers (2
+ * interfaces of sft router). In this case, maybe the first router won't hold info
+ * and we will need to iterate on the other router.
+ */
+ if (razwi_happened)
+ break;
+ }
+
+ return razwi_happened;
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
-static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
+static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
{
- u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy,
- razwi_mask_info, razwi_intr = 0;
- int rtr_map_arr_len = NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES;
- u64 rtr_ctrl_base_addr;
+ u32 razwi_mask_info, razwi_intr = 0, error_count = 0;
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) {
razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT);
if (!razwi_intr)
- return;
+ return 0;
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
- xy = FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info);
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info),
- xy,
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info));
- if (xy == 0) {
- dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: received event from 0 rtr coordinates\n");
- goto clear;
- }
-
- /* Find router id by router coordinates */
- for (rtr_id = 0 ; rtr_id < rtr_map_arr_len ; rtr_id++)
- if (rtr_coordinates_to_rtr_id[rtr_id] == xy)
- break;
-
- if (rtr_id == rtr_map_arr_len) {
+ if (gaudi2_handle_psoc_razwi_happened(hdev, razwi_mask_info, event_mask))
+ error_count++;
+ else
dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: invalid rtr coordinates (0x%x)\n", xy);
- goto clear;
- }
-
- /* Find router mstr_if register base */
- dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
- dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
- rtr_ctrl_base_addr = mmDCORE0_RTR0_CTRL_BASE + dcore_id * DCORE_OFFSET +
- dcore_rtr_id * DCORE_RTR_OFFSET;
-
- hbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET);
- hbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET);
- lbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET);
- lbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET);
-
- if (hbw_aw_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, true, event_mask);
+ "PSOC RAZWI interrupt: invalid razwi info (0x%x)\n",
+ razwi_mask_info);
- if (hbw_ar_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, false, event_mask);
-
- if (lbw_aw_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, true, event_mask);
-
- if (lbw_ar_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, false, event_mask);
-
-clear:
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX))
WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr);
+
+ return error_count;
}
-static void _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base)
+static int _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "QM SEI. err cause: %s\n",
- gaudi2_qm_sei_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_qm_sei_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
+ bool extended_err_check, u64 *event_mask)
{
enum razwi_event_sources module;
+ u32 error_count = 0;
u64 qman_base;
u8 index;
@@ -7808,221 +8418,243 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
module = RAZWI_ROT;
break;
default:
- return;
+ return 0;
}
- _gaudi2_handle_qm_sei_err(hdev, qman_base);
+ error_count = _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
/* There is a single event per NIC macro, so should check its both QMAN blocks */
if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE &&
event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE)
- _gaudi2_handle_qm_sei_err(hdev, qman_base + NIC_QM_OFFSET);
+ error_count += _gaudi2_handle_qm_sei_err(hdev,
+ qman_base + NIC_QM_OFFSET, event_type);
- /* check if RAZWI happened */
- if (razwi_info)
- gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, razwi_info, event_mask);
+ if (extended_err_check) {
+ /* check if RAZWI happened */
+ gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+ }
+
+ return error_count;
}
-static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type)
+static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
- u32 qid_base;
+ u32 qid_base, error_count = 0;
u64 qman_base;
- char desc[32];
- u8 index;
+ u8 index = 0;
switch (event_type) {
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC5_QM:
index = event_type - GAUDI2_EVENT_TPC0_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM:
index = event_type - GAUDI2_EVENT_TPC6_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM:
index = event_type - GAUDI2_EVENT_TPC12_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM:
index = event_type - GAUDI2_EVENT_TPC18_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC24_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
qman_base = mmDCORE0_TPC6_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC6_QM");
break;
case GAUDI2_EVENT_MME0_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
qman_base = mmDCORE0_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_MME_QM");
break;
case GAUDI2_EVENT_MME1_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
qman_base = mmDCORE1_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_MME_QM");
break;
case GAUDI2_EVENT_MME2_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
qman_base = mmDCORE2_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_MME_QM");
break;
case GAUDI2_EVENT_MME3_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
qman_base = mmDCORE3_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_MME_QM");
break;
case GAUDI2_EVENT_HDMA0_QM:
+ index = 0;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0;
qman_base = mmDCORE0_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA1_QM:
+ index = 1;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0;
qman_base = mmDCORE0_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA2_QM:
+ index = 2;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0;
qman_base = mmDCORE1_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA3_QM:
+ index = 3;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0;
qman_base = mmDCORE1_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA4_QM:
+ index = 4;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0;
qman_base = mmDCORE2_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA5_QM:
+ index = 5;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0;
qman_base = mmDCORE2_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA6_QM:
+ index = 6;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0;
qman_base = mmDCORE3_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA7_QM:
+ index = 7;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0;
qman_base = mmDCORE3_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA1_QM");
break;
case GAUDI2_EVENT_PDMA0_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_0_0;
qman_base = mmPDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PDMA0_QM");
break;
case GAUDI2_EVENT_PDMA1_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_1_0;
qman_base = mmPDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PDMA1_QM");
break;
case GAUDI2_EVENT_ROTATOR0_ROT0_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_0_0;
qman_base = mmROT0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "ROTATOR0_QM");
break;
case GAUDI2_EVENT_ROTATOR1_ROT1_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_1_0;
qman_base = mmROT1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "ROTATOR1_QM");
break;
default:
- return;
+ return 0;
}
- gaudi2_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
+ error_count = gaudi2_handle_qman_err_generic(hdev, event_type, qman_base, qid_base);
/* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */
- if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM)
- _gaudi2_handle_qm_sei_err(hdev, qman_base);
+ if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) {
+ error_count += _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, index, 0, event_mask);
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev)
+static int gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val, error_count = 0, arc_farm;
- sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS);
+ for (arc_farm = 0 ; arc_farm < NUM_OF_ARC_FARMS_ARC ; arc_farm++) {
+ sts_clr_val = 0;
+ sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS +
+ (arc_farm * ARC_FARM_OFFSET));
- for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) {
- if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "ARC SEI. err cause: %s\n",
- gaudi2_arc_sei_error_cause[i]);
- sts_clr_val |= BIT(i);
+ for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "ARC FARM ARC %u err cause: %s",
+ arc_farm, gaudi2_arc_sei_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ error_count++;
+ }
}
+ WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR + (arc_farm * ARC_FARM_OFFSET),
+ sts_clr_val);
}
- WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR, sts_clr_val);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_cpu_sei_err(struct hl_device *hdev)
+static int gaudi2_handle_cpu_sei_err(struct hl_device *hdev, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS);
for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "CPU SEI. err cause: %s\n",
- gaudi2_cpu_sei_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_cpu_sei_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
+ hl_check_for_glbl_errors(hdev);
+
WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index,
+static int gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "ROT%u. err cause: %s\n",
- rot_index, guadi2_rot_error_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_rot_error_cause[i]);
+ error_count++;
+ }
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0,
- &razwi_with_intr_cause->razwi_info, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char *interrupt_name,
+static int gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "TPC%d_%s interrupt cause: %s\n",
- tpc_index, interrupt_name, gaudi2_tpc_interrupts_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "interrupt cause: %s", gaudi2_tpc_interrupts_cause[i]);
+ error_count++;
+ }
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0,
- &razwi_with_intr_cause->razwi_info, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const char *interrupt_name,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0;
int i;
if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES)
@@ -8039,24 +8671,27 @@ static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const ch
for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "DEC%u_%s err cause: %s\n",
- dec_index, interrupt_name, gaudi2_dec_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_dec_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
/* Write 1 clear errors */
WREG32(sts_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const char *interrupt_name,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index;
@@ -8066,35 +8701,45 @@ static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const ch
for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "MME%u_%s err cause: %s\n",
- mme_index, interrupt_name, guadi2_mme_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened */
for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, event_mask);
+
+ hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u8 mme_index, u8 sbte_index,
+static int gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
- int i;
+ int i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "MME%uSBTE%u_AXI_ERR_RSP err cause: %s\n",
- mme_index, sbte_index, guadi2_mme_sbte_error_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_sbte_error_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index;
@@ -8104,24 +8749,27 @@ static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index,
for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev,
- "MME%u_WAP_SOURCE_RESULT_INVALID err cause: %s\n",
- mme_index, guadi2_mme_wap_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_wap_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened on WAP0/1 */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info,
- event_mask);
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, event_mask);
+ hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
/* If an AXI read or write error is received, an error is reported and
@@ -8130,19 +8778,53 @@ static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause
* the actual error caused by a LBW KDMA transaction.
*/
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "kdma core err cause: %s\n",
- gaudi2_kdma_core_interrupts_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_kdma_core_interrupts_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, int sts_addr)
{
+ u32 error_count = 0, sts_val = RREG32(sts_addr);
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "dma core err cause: %s\n",
- gaudi2_dma_core_interrupts_cause[i]);
+ if (sts_val & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_dma_core_interrupts_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
+}
+
+static int gaudi2_handle_pdma_core_event(struct hl_device *hdev, u16 event_type, int pdma_idx)
+{
+ u32 sts_addr;
+
+ sts_addr = mmPDMA0_CORE_ERR_CAUSE + pdma_idx * PDMA_OFFSET;
+ return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
+}
+
+static int gaudi2_handle_edma_core_event(struct hl_device *hdev, u16 event_type, int edma_idx)
+{
+ static const int edma_event_index_map[] = {2, 3, 0, 1, 6, 7, 4, 5};
+ u32 sts_addr, index;
+
+ index = edma_event_index_map[edma_idx];
+
+ sts_addr = mmDCORE0_EDMA0_CORE_ERR_CAUSE +
+ DCORE_OFFSET * (index / NUM_OF_EDMA_PER_DCORE) +
+ DCORE_EDMA_OFFSET * (index % NUM_OF_EDMA_PER_DCORE);
+ return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
}
static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask)
@@ -8151,80 +8833,92 @@ static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev,
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
}
-static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data,
- u64 *event_mask)
+static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data, u64 *event_mask)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
if (!(intr_cause_data & BIT_ULL(i)))
continue;
- dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
- gaudi2_pcie_addr_dec_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
+ error_count++;
switch (intr_cause_data & BIT_ULL(i)) {
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ hl_check_for_glbl_errors(hdev);
break;
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
break;
}
}
+
+ return error_count;
}
-static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_pif_fatal(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "PMMU PIF err cause: %s\n",
- gaudi2_pmmu_fatal_interrupts_cause[i]);
+ if (intr_cause_data & BIT_ULL(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_pmmu_fatal_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ return error_count;
}
-static void gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
+static int gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
{
- u32 dcore_id, hif_id;
+ u32 error_count = 0;
int i;
- dcore_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) / 4;
- hif_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) % 4;
-
for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "DCORE%u_HIF%u: %s\n", dcore_id, hif_id,
- gaudi2_hif_fatal_interrupts_cause[i]);
+ if (intr_cause_data & BIT_ULL(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_hif_fatal_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ return error_count;
}
static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu,
@@ -8243,11 +8937,14 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
addr <<= 32;
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA));
+ if (!is_pmmu)
+ addr = gaudi2_mmu_descramble_addr(hdev, addr);
+
dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n",
is_pmmu ? "PMMU" : "HMMU", addr);
hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask);
- WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0);
+ WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
}
static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu)
@@ -8265,23 +8962,26 @@ static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, boo
addr <<= 32;
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA));
+ if (!is_pmmu)
+ addr = gaudi2_mmu_descramble_addr(hdev, addr);
+
dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n",
is_pmmu ? "PMMU" : "HMMU", addr);
- WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE), 0);
+ WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
}
-static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char *mmu_name,
+static int gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, u16 event_type,
u64 mmu_base, bool is_pmmu, u64 *event_mask)
{
- u32 spi_sei_cause, interrupt_clr = 0x0;
+ u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0;
int i;
spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) {
if (spi_sei_cause & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "%s SPI_SEI ERR. err cause: %s\n",
- mmu_name, gaudi2_mmu_spi_sei[i].cause);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_mmu_spi_sei[i].cause);
if (i == 0)
gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask);
@@ -8290,6 +8990,8 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char
if (gaudi2_mmu_spi_sei[i].clear_bit >= 0)
interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit);
+
+ error_count++;
}
}
@@ -8298,12 +9000,14 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char
/* Clear interrupt */
WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr);
+
+ return error_count;
}
-static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
+static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_index)
{
- u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log;
- u32 cq_intr_addr, cq_intr_val, cq_intr_queue_index;
+ u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log,
+ cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0;
int i;
sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index;
@@ -8323,11 +9027,12 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
if (!(sei_cause_cause & BIT(i)))
continue;
- dev_err_ratelimited(hdev->dev, "SM%u SEI ERR. err cause: %s. %s: 0x%X\n",
- sm_index,
- gaudi2_sm_sei_cause[i].cause_name,
- gaudi2_sm_sei_cause[i].log_name,
- sei_cause_log & gaudi2_sm_sei_cause[i].log_mask);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s. %s: 0x%X",
+ gaudi2_sm_sei_cause[i].cause_name,
+ gaudi2_sm_sei_cause[i].log_name,
+ sei_cause_log);
+ error_count++;
break;
}
@@ -8343,71 +9048,138 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n",
sm_index, cq_intr_queue_index);
+ error_count++;
/* Clear CQ_INTR */
WREG32(cq_intr_addr, 0);
}
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
+static u64 get_hmmu_base(u16 event_type)
+{
+ u8 dcore, index_in_dcore;
+
+ switch (event_type) {
+ case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU0_SECURITY_ERROR:
+ dcore = 0;
+ index_in_dcore = 0;
+ break;
+ case GAUDI2_EVENT_HMMU_1_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU1_SPI_BASE ... GAUDI2_EVENT_HMMU1_SECURITY_ERROR:
+ dcore = 1;
+ index_in_dcore = 0;
+ break;
+ case GAUDI2_EVENT_HMMU_2_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU2_SPI_BASE ... GAUDI2_EVENT_HMMU2_SECURITY_ERROR:
+ dcore = 0;
+ index_in_dcore = 1;
+ break;
+ case GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU3_SPI_BASE ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR:
+ dcore = 1;
+ index_in_dcore = 1;
+ break;
+ case GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU4_SPI_BASE ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR:
+ dcore = 3;
+ index_in_dcore = 2;
+ break;
+ case GAUDI2_EVENT_HMMU_5_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU5_SPI_BASE ... GAUDI2_EVENT_HMMU5_SECURITY_ERROR:
+ dcore = 2;
+ index_in_dcore = 2;
+ break;
+ case GAUDI2_EVENT_HMMU_6_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU6_SPI_BASE ... GAUDI2_EVENT_HMMU6_SECURITY_ERROR:
+ dcore = 3;
+ index_in_dcore = 3;
+ break;
+ case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU7_SPI_BASE ... GAUDI2_EVENT_HMMU7_SECURITY_ERROR:
+ dcore = 2;
+ index_in_dcore = 3;
+ break;
+ case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU8_SPI_BASE ... GAUDI2_EVENT_HMMU8_SECURITY_ERROR:
+ dcore = 0;
+ index_in_dcore = 2;
+ break;
+ case GAUDI2_EVENT_HMMU_9_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU9_SPI_BASE ... GAUDI2_EVENT_HMMU9_SECURITY_ERROR:
+ dcore = 1;
+ index_in_dcore = 2;
+ break;
+ case GAUDI2_EVENT_HMMU_10_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU10_SPI_BASE ... GAUDI2_EVENT_HMMU10_SECURITY_ERROR:
+ dcore = 0;
+ index_in_dcore = 3;
+ break;
+ case GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU11_SPI_BASE ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR:
+ dcore = 1;
+ index_in_dcore = 3;
+ break;
+ case GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU12_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
+ dcore = 3;
+ index_in_dcore = 0;
+ break;
+ case GAUDI2_EVENT_HMMU_13_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU13_SPI_BASE ... GAUDI2_EVENT_HMMU13_SECURITY_ERROR:
+ dcore = 2;
+ index_in_dcore = 0;
+ break;
+ case GAUDI2_EVENT_HMMU_14_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU14_SPI_BASE ... GAUDI2_EVENT_HMMU14_SECURITY_ERROR:
+ dcore = 3;
+ index_in_dcore = 1;
+ break;
+ case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU15_SPI_BASE ... GAUDI2_EVENT_HMMU15_SECURITY_ERROR:
+ dcore = 2;
+ index_in_dcore = 1;
+ break;
+ default:
+ return ULONG_MAX;
+ }
+
+ return mmDCORE0_HMMU0_MMU_BASE + dcore * DCORE_OFFSET + index_in_dcore * DCORE_HMMU_OFFSET;
+}
+
+static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
bool is_pmmu = false;
- char desc[32];
+ u32 error_count = 0;
u64 mmu_base;
- u8 index;
switch (event_type) {
- case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR:
- index = (event_type - GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM) / 3;
- mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP);
- mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR:
- index = (event_type - GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM) / 3;
- mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP);
- mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR:
- index = (event_type - GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM) / 3;
- mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP);
- mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
- index = (event_type - GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM) / 3;
- mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
- break;
- case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP);
- mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
+ case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
+ case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
+ mmu_base = get_hmmu_base(event_type);
break;
+
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
is_pmmu = true;
mmu_base = mmPMMU_HBW_MMU_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PMMU");
break;
default:
- return;
+ return 0;
}
- gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu, event_mask);
+ if (mmu_base == ULONG_MAX)
+ return 0;
+
+ error_count = gaudi2_handle_mmu_spi_sei_generic(hdev, event_type, mmu_base,
+ is_pmmu, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
@@ -8527,22 +9299,17 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
cause_idx = sei_data->hdr.sei_cause;
if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) {
- dev_err_ratelimited(hdev->dev, "Invalid HBM SEI event cause (%d) provided by FW\n",
- cause_idx);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s",
+ "Invalid HBM SEI event cause (%d) provided by FW", cause_idx);
return true;
}
- if (sei_data->hdr.is_critical)
- dev_err(hdev->dev,
- "System Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- hbm_mc_sei_cause[cause_idx]);
-
- else
- dev_err_ratelimited(hdev->dev,
- "System Non-Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- hbm_mc_sei_cause[cause_idx]);
+ gaudi2_print_event(hdev, event_type, !sei_data->hdr.is_critical,
+ "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s",
+ sei_data->hdr.is_critical ? "Critical" : "Non-critical",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
/* Print error-specific info */
switch (cause_idx) {
@@ -8586,24 +9353,33 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
return require_hard_reset;
}
-static void gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
- dev_err(hdev->dev,
- "HBM catastrophic temperature error (CATTRIP) cause %#llx\n",
- intr_cause_data);
+ if (intr_cause_data) {
+ gaudi2_print_event(hdev, event_type, true,
+ "temperature error cause: %#llx", intr_cause_data);
+ return 1;
+ }
+
+ return 0;
}
-static void gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
{
- u32 i;
+ u32 i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++)
- if (intr_cause_data & hbm_mc_spi[i].mask)
+ if (intr_cause_data & hbm_mc_spi[i].mask) {
dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n",
hbm_mc_spi[i].cause);
+ error_count++;
+ }
+
+ return error_count;
}
-static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
@@ -8615,13 +9391,13 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
- dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
+ dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
- dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
+ dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
@@ -8629,12 +9405,14 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
break;
@@ -8646,43 +9424,49 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
mutex_unlock(&hdev->clk_throttling.lock);
}
-static void gaudi2_print_out_of_sync_info(struct hl_device *hdev,
+static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
- dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
- le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
+ gaudi2_print_event(hdev, event_type, false,
+ "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
+ le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci),
+ q->pi, atomic_read(&q->ci));
}
-static void gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev)
+static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
{
- u32 p2p_intr, msix_gw_intr;
+ u32 p2p_intr, msix_gw_intr, error_count = 0;
p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR);
msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR);
if (p2p_intr) {
- dev_err_ratelimited(hdev->dev,
- "pcie p2p transaction terminated due to security, req_id(0x%x)\n",
+ gaudi2_print_event(hdev, event_type, true,
+ "pcie p2p transaction terminated due to security, req_id(0x%x)",
RREG32(mmPCIE_WRAP_P2P_REQ_ID));
WREG32(mmPCIE_WRAP_P2P_INTR, 0x1);
+ error_count++;
}
if (msix_gw_intr) {
- dev_err_ratelimited(hdev->dev,
- "pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n",
+ gaudi2_print_event(hdev, event_type, true,
+ "pcie msi-x gen denied due to vector num check failure, vec(0x%X)",
RREG32(mmPCIE_WRAP_MSIX_GW_VEC));
WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1);
+ error_count++;
}
+
+ return error_count;
}
-static void gaudi2_handle_pcie_drain(struct hl_device *hdev,
+static int gaudi2_handle_pcie_drain(struct hl_device *hdev,
struct hl_eq_pcie_drain_ind_data *drain_data)
{
- u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause;
+ u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0;
cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
@@ -8690,39 +9474,52 @@ static void gaudi2_handle_pcie_drain(struct hl_device *hdev,
hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
- if (cause & BIT_ULL(0))
+ if (cause & BIT_ULL(0)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
!!lbw_rd, !!lbw_wr);
+ error_count++;
+ }
- if (cause & BIT_ULL(1))
+ if (cause & BIT_ULL(1)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
hbw_rd, hbw_wr);
+ error_count++;
+ }
+
+ return error_count;
}
-static void gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
+ if (intr_cause_data & BIT_ULL(i)) {
dev_err_ratelimited(hdev->dev, "PSOC %s completed\n",
gaudi2_psoc_axi_drain_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev,
+static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
- dev_warn(hdev->dev,
- "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
+ gaudi2_print_event(hdev, event_type, false,
+ "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
-static void hl_arc_event_handle(struct hl_device *hdev,
+static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
struct hl_eq_engine_arc_intr_data *data)
{
struct hl_engine_arc_dccm_queue_full_irq *q;
@@ -8737,12 +9534,13 @@ static void hl_arc_event_handle(struct hl_device *hdev,
case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ:
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
- dev_err_ratelimited(hdev->dev,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u\n",
+ gaudi2_print_event(hdev, event_type, true,
+ "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
engine_id, intr_type, q->queue_index);
- break;
+ return 1;
default:
- dev_err_ratelimited(hdev->dev, "Unknown ARC event type\n");
+ gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
+ return 0;
}
}
@@ -8750,8 +9548,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, is_critical = false;
- u32 ctl, reset_flags = HL_DRV_RESET_HARD;
- int index, sbte_index;
+ u32 index, ctl, reset_flags = 0, error_count = 0;
u64 event_mask = 0;
u16 event_type;
@@ -8767,8 +9564,6 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2->events_stat[event_type]++;
gaudi2->events_stat_aggregate[event_type]++;
- gaudi2_print_irq_info(hdev, event_type);
-
switch (event_type) {
case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR:
fallthrough;
@@ -8777,6 +9572,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
is_critical = eq_entry->ecc_data.is_critical;
+ error_count++;
break;
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM:
@@ -8784,48 +9580,47 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM:
fallthrough;
case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
- gaudi2_handle_qman_err(hdev, event_type);
+ error_count = gaudi2_handle_qman_err(hdev, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
- reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
- gaudi2_handle_arc_farm_sei_err(hdev);
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = gaudi2_handle_arc_farm_sei_err(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
- gaudi2_handle_cpu_sei_err(hdev);
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = gaudi2_handle_cpu_sei_err(hdev, event_type);
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
- reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
- gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_qm_sei_err(hdev, event_type, true, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
- gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count = gaudi2_handle_rot_err(hdev, index, event_type,
+ &eq_entry->razwi_with_intr_cause, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
- gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP",
+ error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
&eq_entry->razwi_with_intr_cause, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
- gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info,
- &event_mask);
+ error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8856,8 +9651,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_TPC24_KERNEL_ERR:
index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
(GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
- gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause,
- &event_mask);
+ error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
+ &eq_entry->razwi_with_intr_cause, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8873,7 +9668,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC9_SPI:
index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
(GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
- gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8884,9 +9679,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
(GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
- gaudi2_handle_mme_err(hdev, index,
- "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8897,8 +9691,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) /
(GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
- gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info,
- &event_mask);
+ error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8909,25 +9702,31 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) /
(GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
- gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_mme_wap_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_KDMA0_CORE:
- gaudi2_handle_kdma_core_event(hdev,
+ error_count = gaudi2_handle_kdma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
- case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
- gaudi2_handle_dma_core_event(hdev,
- le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE:
+ index = event_type - GAUDI2_EVENT_HDMA2_CORE;
+ error_count = gaudi2_handle_edma_core_event(hdev, event_type, index);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ break;
+
+ case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE:
+ index = event_type - GAUDI2_EVENT_PDMA0_CORE;
+ error_count = gaudi2_handle_pdma_core_event(hdev, event_type, index);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
- gaudi2_print_pcie_addr_dec_info(hdev,
+ error_count = gaudi2_print_pcie_addr_dec_info(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data), &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
@@ -8937,27 +9736,27 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
- gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask);
+ error_count = gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
- gaudi2_handle_hif_fatal(hdev, event_type,
+ error_count = gaudi2_handle_hif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PMMU_FATAL_0:
- gaudi2_handle_pif_fatal(hdev,
+ error_count = gaudi2_handle_pif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
- gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask);
+ error_count = gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8967,33 +9766,41 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
}
+ error_count++;
break;
case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
- gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_hbm_cattrip(hdev, event_type,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
- gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_hbm_mc_spi(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
- gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
- gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_psoc_drain(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ECC:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_L2_RAM_ECC:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
@@ -9001,31 +9808,32 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) /
- (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
- GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
- sbte_index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) %
- (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
- GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
- gaudi2_handle_mme_sbte_err(hdev, index, sbte_index,
+ error_count = gaudi2_handle_mme_sbte_err(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_PRSTN_FALL:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FATAL_ERR:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_TPC0_BMON_SPMU:
@@ -9078,6 +9886,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC8_BMON_SPMU:
case GAUDI2_EVENT_DEC9_BMON_SPMU:
case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -9085,71 +9894,99 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
- gaudi2_print_clk_change_info(hdev, event_type);
- event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ gaudi2_print_clk_change_info(hdev, event_type, &event_mask);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
break;
case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
- gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ gaudi2_print_out_of_sync_info(hdev, event_type, &eq_entry->pkt_sync_err);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = GAUDI2_NA_EVENT_CAUSE;
/* Do nothing- FW will handle it */
break;
case GAUDI2_EVENT_PCIE_P2P_MSIX:
- gaudi2_handle_pcie_p2p_msix(hdev);
+ error_count = gaudi2_handle_pcie_p2p_msix(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
- gaudi2_handle_sm_err(hdev, index);
+ error_count = gaudi2_handle_sm_err(hdev, event_type, index);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
- gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err);
+ gaudi2_print_cpu_pkt_failure_info(hdev, event_type, &eq_entry->pkt_sync_err);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_ARC_DCCM_FULL:
- hl_arc_event_handle(hdev, &eq_entry->arc_data);
+ error_count = hl_arc_event_handle(hdev, event_type, &eq_entry->arc_data);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED:
+ case GAUDI2_EVENT_CPU_DEV_RESET_REQ:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = GAUDI2_NA_EVENT_CAUSE;
is_critical = true;
break;
default:
- if (gaudi2_irq_map_table[event_type].valid)
+ if (gaudi2_irq_map_table[event_type].valid) {
dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n",
event_type);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ }
}
- if ((gaudi2_irq_map_table[event_type].reset || reset_required) &&
- (hdev->hard_reset_on_fw_events ||
- (hdev->asic_prop.fw_security_enabled && is_critical)))
- goto reset_device;
+ /* Make sure to dump an error in case no error cause was printed so far.
+ * Note that although we have counted the errors, we use this number as
+ * a boolean.
+ */
+ if (error_count == GAUDI2_NA_EVENT_CAUSE && !is_info_event(event_type))
+ gaudi2_print_event(hdev, event_type, true, "%d", event_type);
+ else if (error_count == 0)
+ gaudi2_print_event(hdev, event_type, true,
+ "No error cause for H/W event %u", event_type);
+
+ if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) ||
+ reset_required) {
+ if (reset_required ||
+ (gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
+ reset_flags |= HL_DRV_RESET_HARD;
+
+ if (hdev->hard_reset_on_fw_events ||
+ (hdev->asic_prop.fw_security_enabled && is_critical))
+ goto reset_device;
+ }
/* Send unmask irq only for interrupts not classified as MSG */
if (!gaudi2_irq_map_table[event_type].msg)
@@ -9167,6 +10004,10 @@ reset_device:
} else {
reset_flags |= HL_DRV_RESET_DELAY;
}
+ /* escalate general hw errors to critical/fatal error */
+ if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
+ hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -9573,16 +10414,23 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
/* Create mapping on asic side */
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
- hl_mmu_invalidate_cache_range(hdev, false,
+ if (rc) {
+ dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
+ goto unreserve_va;
+ }
+
+ rc = hl_mmu_invalidate_cache_range(hdev, false,
MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&hdev->mmu_lock);
if (rc) {
- dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
+ hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
goto unreserve_va;
}
+ mutex_unlock(&hdev->mmu_lock);
+
/* Enable MMU on KDMA */
gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
@@ -9611,11 +10459,16 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
mutex_lock(&hdev->mmu_lock);
- hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
- hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
+
+ rc = hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
+ if (rc)
+ goto unreserve_va;
+
+ rc = hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&hdev->mmu_lock);
+
unreserve_va:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
free_data_buffer:
hl_asic_dma_free_coherent(hdev, SZ_2M, host_mem_virtual_addr, host_mem_dma_addr);
@@ -9668,17 +10521,24 @@ static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *c
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
- hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&hdev->mmu_lock);
-
if (rc)
goto unreserve_internal_cb_pool;
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ if (rc)
+ goto unmap_internal_cb_pool;
+
+ mutex_unlock(&hdev->mmu_lock);
+
return 0;
+unmap_internal_cb_pool:
+ hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
gen_pool_destroy(hdev->internal_cb_pool);
@@ -10116,8 +10976,8 @@ static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
block_size, vma->vm_page_prot);
@@ -10465,7 +11325,10 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
.set_engine_cores = gaudi2_set_engine_cores,
+ .set_engines = gaudi2_set_engines,
.send_device_activity = gaudi2_send_device_activity,
+ .set_dram_properties = gaudi2_set_dram_properties,
+ .set_binning_masks = gaudi2_set_binning_masks,
};
void gaudi2_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index b4383c199bbb..1cebe707772e 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -8,7 +8,7 @@
#ifndef GAUDI2P_H_
#define GAUDI2P_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/gaudi2/gaudi2.h"
@@ -240,6 +240,10 @@
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
+#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+
+#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
@@ -385,6 +389,8 @@ enum gaudi2_edma_id {
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/
#define GAUDI2_NUM_USER_INTERRUPTS 255
+#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
+#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
@@ -408,12 +414,15 @@ enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_DEC_LAST = GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
GAUDI2_IRQ_NUM_COMPLETION,
GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
+ GAUDI2_IRQ_NUM_TPC_ASSERT,
GAUDI2_IRQ_NUM_RESERVED_FIRST,
- GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_USER_INTERRUPTS - 1),
- GAUDI2_IRQ_NUM_USER_FIRST,
+ GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
+ GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
+ GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
};
@@ -446,6 +455,17 @@ struct dup_block_ctx {
};
/**
+ * struct gaudi2_queues_test_info - Holds the address of a the messages used for testing the
+ * device queues.
+ * @dma_addr: the address used by the HW for accessing the message.
+ * @kern_addr: The address used by the driver for accessing the message.
+ */
+struct gaudi2_queues_test_info {
+ dma_addr_t dma_addr;
+ void *kern_addr;
+};
+
+/**
* struct gaudi2_device - ASIC specific manage structure.
* @cpucp_info_get: get information on device from CPU-CP
* @mapped_blocks: array that holds the base address and size of all blocks
@@ -503,6 +523,7 @@ struct dup_block_ctx {
* @flush_db_fifo: flag to force flush DB FIFO after a write.
* @hbm_cfg: HBM subsystem settings
* @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
+ * @queues_test_info: information used by the driver when testing the HW queues.
*/
struct gaudi2_device {
int (*cpucp_info_get)(struct hl_device *hdev);
@@ -530,6 +551,44 @@ struct gaudi2_device {
u32 events_stat[GAUDI2_EVENT_SIZE];
u32 events_stat_aggregate[GAUDI2_EVENT_SIZE];
u32 num_of_valid_hw_events;
+
+ /* Queue testing */
+ struct gaudi2_queues_test_info queues_test_info[GAUDI2_NUM_TESTED_QS];
+};
+
+/*
+ * Types of the Gaudi2 IP blocks, used by special blocks iterator.
+ * Required for scenarios where only particular block types can be
+ * addressed (e.g., special PLDM images).
+ */
+enum gaudi2_block_types {
+ GAUDI2_BLOCK_TYPE_PLL,
+ GAUDI2_BLOCK_TYPE_RTR,
+ GAUDI2_BLOCK_TYPE_CPU,
+ GAUDI2_BLOCK_TYPE_HIF,
+ GAUDI2_BLOCK_TYPE_HBM,
+ GAUDI2_BLOCK_TYPE_NIC,
+ GAUDI2_BLOCK_TYPE_PCIE,
+ GAUDI2_BLOCK_TYPE_PCIE_PMA,
+ GAUDI2_BLOCK_TYPE_PDMA,
+ GAUDI2_BLOCK_TYPE_EDMA,
+ GAUDI2_BLOCK_TYPE_PMMU,
+ GAUDI2_BLOCK_TYPE_PSOC,
+ GAUDI2_BLOCK_TYPE_ROT,
+ GAUDI2_BLOCK_TYPE_ARC_FARM,
+ GAUDI2_BLOCK_TYPE_DEC,
+ GAUDI2_BLOCK_TYPE_MME,
+ GAUDI2_BLOCK_TYPE_EU_BIST,
+ GAUDI2_BLOCK_TYPE_SYNC_MNGR,
+ GAUDI2_BLOCK_TYPE_STLB,
+ GAUDI2_BLOCK_TYPE_TPC,
+ GAUDI2_BLOCK_TYPE_HMMU,
+ GAUDI2_BLOCK_TYPE_SRAM,
+ GAUDI2_BLOCK_TYPE_XBAR,
+ GAUDI2_BLOCK_TYPE_KDMA,
+ GAUDI2_BLOCK_TYPE_XDMA,
+ GAUDI2_BLOCK_TYPE_XFT,
+ GAUDI2_BLOCK_TYPE_MAX
};
extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 56c6ab692482..25b5368f37dd 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
#include "gaudi2_coresight_regs.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#define GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 2000)
#define SPMU_MAX_COUNTERS 6
@@ -2376,10 +2376,10 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, upper_32_bits(input->start_addr2));
WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, lower_32_bits(input->end_addr2));
WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, upper_32_bits(input->end_addr2));
- WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr2));
- WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr2));
- WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr2));
- WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr2));
+ WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr3));
+ WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr3));
+ WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr3));
+ WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr3));
WREG32(base_reg + mmBMON_IDL_OFFSET, 0x0);
WREG32(base_reg + mmBMON_IDH_OFFSET, 0x0);
@@ -2657,7 +2657,7 @@ int gaudi2_coresight_init(struct hl_device *hdev)
/*
* Mask out all the disabled binned offsets.
* so when user request to configure a binned or masked out component,
- * driver will ignore programing it ( happens when offset value is set to 0x0 )
+ * driver will ignore programming it ( happens when offset value is set to 0x0 )
* this is being set in gaudi2_coresight_set_disabled_components
*/
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h
index df8729286e06..df8729286e06 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
index e9ac87828221..e6664c4a2cf5 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
@@ -79,7 +79,6 @@
DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_MASK)
#define TPC_IDLE_MASK (DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
- DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_SB_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_QM_IDLE_MASK | \
@@ -87,6 +86,8 @@
#define DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40
+
/* CGM_IDLE_MASK is valid for all engines CGM idle check */
#define CGM_IDLE_MASK DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
index 768c2f3dc900..694735f9e6e6 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
@@ -1561,6 +1561,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG,
mmDCORE0_TPC0_CFG_KERNEL_SRF_0,
mmDCORE0_TPC0_CFG_KERNEL_SRF_1,
mmDCORE0_TPC0_CFG_KERNEL_SRF_2,
@@ -1594,6 +1595,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
+ mmDCORE0_TPC0_CFG_TPC_ID,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_1,
@@ -1666,6 +1668,10 @@ static const u32 gaudi2_pb_dcr0_sm_glbl[] = {
mmDCORE0_SYNC_MNGR_GLBL_BASE,
};
+static const u32 gaudi2_pb_dcr1_sm_glbl[] = {
+ mmDCORE1_SYNC_MNGR_GLBL_BASE,
+};
+
static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
@@ -1678,14 +1684,14 @@ static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
};
static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = {
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
};
static const u32 gaudi2_pb_arc_sched[] = {
@@ -3358,14 +3364,6 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
/* Sync Manager GLBL */
- /* Unsecure all CQ registers */
- rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES, DCORE_OFFSET,
- HL_PB_SINGLE_INSTANCE, HL_PB_NA,
- gaudi2_pb_dcr0_sm_glbl,
- ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl),
- gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
- ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
-
/* Secure Dcore0 CQ0 registers */
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
@@ -3374,6 +3372,14 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
gaudi2_pb_dcr0_sm_glbl_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl_unsecured_regs));
+ /* Unsecure all other CQ registers */
+ rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES - 1, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr1_sm_glbl,
+ ARRAY_SIZE(gaudi2_pb_dcr1_sm_glbl),
+ gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
+
/* PSOC.
* Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
* protected by privileged RR.
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/accel/habanalabs/goya/Makefile
index b3f3b7b96683..b3f3b7b96683 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/accel/habanalabs/goya/Makefile
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index 0f083fcf81a6..fb0ac9df841a 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -472,6 +472,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
prop->first_available_user_interrupt = USHRT_MAX;
+ prop->tpc_interrupt_id = USHRT_MAX;
+ prop->eq_interrupt_id = GOYA_EVENT_QUEUE_MSIX_IDX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -668,13 +670,18 @@ pci_init:
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
if (!hdev->pldm) {
@@ -2782,7 +2789,7 @@ disable_queues:
return rc;
}
-static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
@@ -2828,17 +2835,17 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
- if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
- dev_err(hdev->dev,
- "Timeout while waiting for device to reset 0x%x\n",
- status);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
+ dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
+ return -ETIMEDOUT;
+ }
if (!hard_reset && goya) {
goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
HW_CAP_GOLDEN | HW_CAP_TPC);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_SOFT_RESET);
- return;
+ return 0;
}
/* Chicken bit to re-initiate boot sequencer flow */
@@ -2857,6 +2864,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
memset(goya->events_stat, 0, sizeof(goya->events_stat));
}
+ return 0;
}
int goya_suspend(struct hl_device *hdev)
@@ -2880,8 +2888,8 @@ static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -5420,6 +5428,16 @@ static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
return -EOPNOTSUPP;
}
+static int goya_set_dram_properties(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static int goya_set_binning_masks(struct hl_device *hdev)
+{
+ return 0;
+}
+
static int goya_send_device_activity(struct hl_device *hdev, bool open)
{
return 0;
@@ -5518,6 +5536,8 @@ static const struct hl_asic_funcs goya_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
.send_device_activity = goya_send_device_activity,
+ .set_dram_properties = goya_set_dram_properties,
+ .set_binning_masks = goya_set_binning_masks,
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/accel/habanalabs/goya/goyaP.h
index d6ec43d6f6b0..5df3d30b91fd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/accel/habanalabs/goya/goyaP.h
@@ -8,7 +8,7 @@
#ifndef GOYAP_H_
#define GOYAP_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/goya/goya_packets.h"
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/accel/habanalabs/goya/goya_coresight.c
index 2c5133cfae65..e7ac3046cfaa 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/accel/habanalabs/goya/goya_coresight.c
@@ -10,7 +10,7 @@
#include "../include/goya/asic_reg/goya_regs.h"
#include "../include/goya/asic_reg/goya_masks.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/accel/habanalabs/goya/goya_hwmgr.c
index b595721751c1..b595721751c1 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/accel/habanalabs/goya/goya_hwmgr.c
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/accel/habanalabs/goya/goya_security.c
index 14c3bae3ccdc..14c3bae3ccdc 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/accel/habanalabs/goya/goya_security.c
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/accel/habanalabs/include/common/cpucp_if.h
index baa5aa43b6f4..8bbe685458c4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/accel/habanalabs/include/common/cpucp_if.h
@@ -344,9 +344,20 @@ struct hl_eq_engine_arc_intr_data {
__le64 pad[5];
};
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
+ __le64 data_placeholder;
struct hl_eq_ecc_data ecc_data;
struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Gaudi1 HBM */
struct hl_eq_sm_sei_data sm_sei_data;
@@ -358,6 +369,7 @@ struct hl_eq_entry {
struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
__le64 data[7];
};
};
@@ -643,9 +655,16 @@ enum pq_init_status {
* data corruption in case of mismatched driver/FW versions.
* Relevant only to Gaudi.
*
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
* CPUCP_PACKET_ACTIVE_STATUS_SET -
* LKD sends FW indication whether device is free or in use, this indication is reported
* also to the BMC.
+ *
+ * CPUCP_PACKET_REGISTER_INTERRUPTS -
+ * Packet to register interrupts indicating LKD is ready to receive events from FW.
*/
enum cpucp_packet_id {
@@ -704,9 +723,14 @@ enum cpucp_packet_id {
CPUCP_PACKET_RESERVED5, /* not used */
CPUCP_PACKET_RESERVED6, /* not used */
CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
CPUCP_PACKET_RESERVED8, /* not used */
- CPUCP_PACKET_RESERVED9, /* not used */
CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_REGISTER_INTERRUPTS, /* internal */
CPUCP_PACKET_ID_MAX /* must be last */
};
@@ -727,6 +751,11 @@ enum cpucp_packet_id {
#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
@@ -805,8 +834,13 @@ struct cpucp_packet {
__le32 nonce;
};
- /* For NIC requests */
- __le32 port_index;
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+ };
};
struct cpucp_unmask_irq_arr_packet {
@@ -881,7 +915,9 @@ enum cpucp_in_attributes {
cpucp_in_max,
cpucp_in_lowest = 6,
cpucp_in_highest = 7,
- cpucp_in_reset_history
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
};
enum cpucp_curr_attributes {
@@ -976,6 +1012,11 @@ enum pll_index {
IC_PLL = 16,
MC_PLL = 17,
EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
PLL_MAX
};
@@ -1092,6 +1133,7 @@ struct cpucp_security_info {
* (0 = functional 1 = binned)
* @interposer_version: Interposer version programmed in eFuse
* @substrate_version: Substrate version programmed in eFuse
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
* @fw_os_version: Firmware OS Version
*/
struct cpucp_info {
@@ -1119,7 +1161,7 @@ struct cpucp_info {
__u8 substrate_version;
__u8 reserved2;
struct cpucp_security_info sec_info;
- __le32 reserved3;
+ __le32 fw_hbm_region_size;
__u8 pll_map[PLL_MAP_LEN];
__le64 mme_binning_mask;
__u8 fw_os_version[VERSION_MAX_LEN];
@@ -1135,8 +1177,9 @@ enum cpucp_serdes_type {
HLS1_SERDES_TYPE,
HLS1H_SERDES_TYPE,
HLS2_SERDES_TYPE,
- UNKNOWN_SERDES_TYPE,
- MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
};
struct cpucp_nic_info {
@@ -1161,6 +1204,21 @@ struct page_discard_info {
};
/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
* struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
* @integer: the integer part of the SER value;
* @exp: the exponent part of the SER value.
@@ -1183,8 +1241,12 @@ struct ser_val {
* @pcs_link: has PCS link.
* @phy_ready: is PHY ready.
* @auto_neg: is Autoneg enabled.
- * @timeout_retransmission_cnt: timeout retransmission events
- * @high_ber_cnt: high ber events
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
*/
struct cpucp_nic_status {
__le32 port;
@@ -1200,6 +1262,10 @@ struct cpucp_nic_status {
__u8 auto_neg;
__le32 timeout_retransmission_cnt;
__le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
};
enum cpucp_hbm_row_replace_cause {
@@ -1292,6 +1358,7 @@ struct cpucp_dev_info_signed {
__u8 certificate[SEC_CERTIFICATE_BUF_SZ];
};
+#define DCORE_MON_REGS_SZ 512
/*
* struct dcore_monitor_regs_data - DCORE monitor regs data.
* the structure follows sync manager block layout. relevant only to Gaudi.
@@ -1302,11 +1369,11 @@ struct cpucp_dev_info_signed {
* @mon_status: array of monitor status.
*/
struct dcore_monitor_regs_data {
- __le32 mon_pay_addrl[512];
- __le32 mon_pay_addrh[512];
- __le32 mon_pay_data[512];
- __le32 mon_arm[512];
- __le32 mon_status[512];
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
};
/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
@@ -1317,4 +1384,14 @@ struct cpucp_monitor_dump {
struct dcore_monitor_regs_data sync_mngr_e_n;
};
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/accel/habanalabs/include/common/hl_boot_if.h
index e0ea51cc7475..c58d76a2705c 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/accel/habanalabs/include/common/hl_boot_if.h
@@ -41,6 +41,19 @@ enum cpu_boot_err {
};
/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
* CPU error bits in BOOT_ERROR registers
*
* CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
@@ -439,7 +452,7 @@ struct cpu_dyn_regs {
/* TODO: remove the desc magic after the code is updated to use message */
/* HCDM - Habana Communications Descriptor Magic */
#define HL_COMMS_DESC_MAGIC 0x4843444D
-#define HL_COMMS_DESC_VER 1
+#define HL_COMMS_DESC_VER 3
/* HCMv - Habana Communications Message + header version */
#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
@@ -450,8 +463,10 @@ struct cpu_dyn_regs {
((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
-#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V1
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
(((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
@@ -474,22 +489,31 @@ enum comms_msg_type {
/*
* Binning information shared between LKD and FW
- * @tpc_mask - TPC binning information
+ * @tpc_mask_l - TPC binning information lower 64 bit
* @dec_mask - Decoder binning information
- * @hbm_mask - HBM binning information
+ * @dram_mask - DRAM binning information
* @edma_mask - EDMA binning information
* @mme_mask_l - MME binning information lower 32
* @mme_mask_h - MME binning information upper 32
- * @reserved - reserved field for 64 bit alignment
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
*/
struct lkd_fw_binning_info {
- __le64 tpc_mask;
+ __le64 tpc_mask_l;
__le32 dec_mask;
- __le32 hbm_mask;
+ __le32 dram_mask;
__le32 edma_mask;
__le32 mme_mask_l;
__le32 mme_mask_h;
- __le32 reserved;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
};
/* TODO: remove this struct after the code is updated to use message */
@@ -512,6 +536,23 @@ struct comms_msg_header {
__u8 reserved[4]; /* pad to 64 bit */
};
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
/* this is the main FW descriptor - consider ABI when changing */
struct lkd_fw_comms_desc {
struct comms_desc_header header;
@@ -521,6 +562,8 @@ struct lkd_fw_comms_desc {
/* can be used for 1 more version w/o ABI change */
char reserved0[VERSION_MAX_LEN];
__le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
};
enum comms_reset_cause {
@@ -545,6 +588,8 @@ struct lkd_fw_comms_msg {
char reserved0[VERSION_MAX_LEN];
/* address for next FW component load */
__le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
};
struct {
__u8 reset_cause;
@@ -552,7 +597,7 @@ struct lkd_fw_comms_msg {
struct {
__u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
};
- struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_binning_info binning_conf;
};
};
@@ -699,4 +744,69 @@ struct comms_status {
};
};
+/**
+ * HL_MODULES_MAX_NUM is determined by the size of modules_mask in struct
+ * hl_component_versions
+ */
+enum hl_modules {
+ HL_MODULES_BOOT_INFO = 0,
+ HL_MODULES_EEPROM,
+ HL_MODULES_FDT,
+ HL_MODULES_I2C,
+ HL_MODULES_LZ4,
+ HL_MODULES_MBEDTLS,
+ HL_MODULES_MAX_NUM = 16
+};
+
+/**
+ * HL_COMPONENTS_MAX_NUM is determined by the size of components_mask in
+ * struct cpucp_versions
+ */
+enum hl_components {
+ HL_COMPONENTS_PID = 0,
+ HL_COMPONENTS_MGMT,
+ HL_COMPONENTS_PREBOOT,
+ HL_COMPONENTS_PPBOOT,
+ HL_COMPONENTS_ARMCP,
+ HL_COMPONENTS_CPLD,
+ HL_COMPONENTS_UBOOT,
+ HL_COMPONENTS_FUSE,
+ HL_COMPONENTS_MAX_NUM = 16
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum
+ * hl_modules is used.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __le16 modules_mask;
+ __u8 modules_counter;
+ __u8 reserved[1];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/qman_if.h b/drivers/accel/habanalabs/include/common/qman_if.h
index 7ed7739575ee..7ed7739575ee 100644
--- a/drivers/misc/habanalabs/include/common/qman_if.h
+++ b/drivers/accel/habanalabs/include/common/qman_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
index cf80e31317ad..cf80e31317ad 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
index d079a37acab8..d079a37acab8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
index 1fdd5d5fc6d2..1fdd5d5fc6d2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
index 48376aabc3ba..48376aabc3ba 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
index 8e56a93d88a1..8e56a93d88a1 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
index 4d8d8f26c5d4..4d8d8f26c5d4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
index c3ef300849be..c3ef300849be 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
index a42862cd5ae0..a42862cd5ae0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
index 8c4d4e016852..8c4d4e016852 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
index fb145f416fe6..fb145f416fe6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
index a4b461ca3d94..a4b461ca3d94 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
index 192d11404b1c..192d11404b1c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
index f0cbda0d1e4d..f0cbda0d1e4d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
index 6e07c6fb6fc9..6e07c6fb6fc9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
index 0faea21756c5..0faea21756c5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
index 4962c13e2e2e..4962c13e2e2e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
index af87adb94c94..af87adb94c94 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
index 8dd705d20195..8dd705d20195 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
index d6c631f63e3e..d6c631f63e3e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
index 8c1c72df4469..8c1c72df4469 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
index b2b593fcec30..b2b593fcec30 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
index 8a10c6a76156..8a10c6a76156 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
index cd61289a1e8a..cd61289a1e8a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
index 3f32370a14c7..3f32370a14c7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
index 78c18da7154b..78c18da7154b 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
index 4ccaf8712948..4ccaf8712948 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
index 9236f4183084..9236f4183084 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
index da60893a5fab..da60893a5fab 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
index 56ffc920d58a..56ffc920d58a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
index cbc642918deb..cbc642918deb 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
index 2382bc41bea6..2382bc41bea6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
index c7596aac7a5c..c7596aac7a5c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 1a6576666794..23ee8691db46 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -320,4 +320,6 @@
#define mmPSOC_TPC_PLL_NR 0xC73100
#define mmIF_W_PLL_NR 0x488100
+#define mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL 0xC01208
+
#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
index 083d073a0128..083d073a0128 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
index e6dd30ce0ca7..e6dd30ce0ca7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
index 4f078b328b00..4f078b328b00 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
index 6c07f7d45490..6c07f7d45490 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
index a1f2eb8b91bd..a1f2eb8b91bd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
index c1ea6a422010..c1ea6a422010 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
index 36f6edc72e3d..36f6edc72e3d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
index 61465b599850..61465b599850 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
index bd37b6452133..bd37b6452133 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
index 7c97f4041b8e..7c97f4041b8e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
index fe96c575b5c6..fe96c575b5c6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
index 0d1caf057ad0..0d1caf057ad0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
index 1b115ee6d6f0..1b115ee6d6f0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
index a89116a4586f..a89116a4586f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
index b7f091ddc89c..b7f091ddc89c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
index 4712cc62b009..4712cc62b009 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
index 7fa040f65004..7fa040f65004 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
index 99d5319672dd..99d5319672dd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
index 34b21b21da52..34b21b21da52 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
index 2efa2a54deb4..2efa2a54deb4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
index a6047d4e2560..a6047d4e2560 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
index 9de8442f9bc2..9de8442f9bc2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
index 34fd47685edd..34fd47685edd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
index 543a98f81767..543a98f81767 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
index 95486b7ddf1d..95486b7ddf1d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
index b79c59887b21..b79c59887b21 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
index 3a6a34ba2958..3a6a34ba2958 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
index 2585c70f59ef..2585c70f59ef 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
index b7c33e025db5..b7c33e025db5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
index 6703e678ee9f..6703e678ee9f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
index 1b5cfcc1d85f..1b5cfcc1d85f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
index 9ce24597d4b0..9ce24597d4b0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
index ddf824392cf7..ddf824392cf7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
index c6d517dbbd54..c6d517dbbd54 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
index 330e5b42d679..330e5b42d679 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
index d749f1968e5e..d749f1968e5e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
index ad48773c4bbd..ad48773c4bbd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
index 6c27850ca3f5..6c27850ca3f5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
index a9ea89aa6405..a9ea89aa6405 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
index a37772c531d9..a37772c531d9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h
index 07d2a9000102..07d2a9000102 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
index 8f67c11c8de9..8f67c11c8de9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
index b82a906265a8..b82a906265a8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
index 8e71532c6f36..8e71532c6f36 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
index f9e310ab6df2..f9e310ab6df2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
index 6736c476d979..6736c476d979 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
index af10ef7a87d9..af10ef7a87d9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
index 3e77c37952bc..3e77c37952bc 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
index 2919e2fa58f8..2919e2fa58f8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
index 6d42469659f1..6d42469659f1 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
index 5f2a0fd86c9e..5f2a0fd86c9e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
index 7a9447f39a74..7a9447f39a74 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
index 80e63402f6e0..80e63402f6e0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
index f428f891935a..f428f891935a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
index cd3a810ff4c4..cd3a810ff4c4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
index eb251e72813f..eb251e72813f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
index e35ef7fd8b1c..e35ef7fd8b1c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
index 1887b10e58e2..1887b10e58e2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
index 5c36c972c027..5c36c972c027 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/accel/habanalabs/include/gaudi/gaudi.h
index ffae107b1693..ffae107b1693 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h
index c07ed4ed304c..c07ed4ed304c 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
index 479b6b038254..479b6b038254 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h
index c45cc7f4d4d7..c45cc7f4d4d7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h
index 2dba02757d37..2dba02757d37 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/accel/habanalabs/include/gaudi/gaudi_masks.h
index 880c57b26c63..880c57b26c63 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/accel/habanalabs/include/gaudi/gaudi_packets.h
index 66fc083a7c6a..66fc083a7c6a 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_packets.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h
index 92f25c2ae083..92f25c2ae083 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h
diff --git a/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
new file mode 100644
index 000000000000..22a6ab9a7f47
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 HabanaLabs Ltd.
+ * All Rights Reserved.
+ */
+
+#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__
+#define __GAUDI2_ARC_COMMON_PACKETS_H__
+
+enum {
+ CPU_ID_SCHED_ARC0 = 0, /* FARM_ARC0 */
+ CPU_ID_SCHED_ARC1 = 1, /* FARM_ARC1 */
+ CPU_ID_SCHED_ARC2 = 2, /* FARM_ARC2 */
+ CPU_ID_SCHED_ARC3 = 3, /* FARM_ARC3 */
+ /* Dcore1 MME Engine ARC instance used as scheduler */
+ CPU_ID_SCHED_ARC4 = 4, /* DCORE1_MME0 */
+ /* Dcore3 MME Engine ARC instance used as scheduler */
+ CPU_ID_SCHED_ARC5 = 5, /* DCORE3_MME0 */
+
+ CPU_ID_TPC_QMAN_ARC0 = 6, /* DCORE0_TPC0 */
+ CPU_ID_TPC_QMAN_ARC1 = 7, /* DCORE0_TPC1 */
+ CPU_ID_TPC_QMAN_ARC2 = 8, /* DCORE0_TPC2 */
+ CPU_ID_TPC_QMAN_ARC3 = 9, /* DCORE0_TPC3 */
+ CPU_ID_TPC_QMAN_ARC4 = 10, /* DCORE0_TPC4 */
+ CPU_ID_TPC_QMAN_ARC5 = 11, /* DCORE0_TPC5 */
+ CPU_ID_TPC_QMAN_ARC6 = 12, /* DCORE1_TPC0 */
+ CPU_ID_TPC_QMAN_ARC7 = 13, /* DCORE1_TPC1 */
+ CPU_ID_TPC_QMAN_ARC8 = 14, /* DCORE1_TPC2 */
+ CPU_ID_TPC_QMAN_ARC9 = 15, /* DCORE1_TPC3 */
+ CPU_ID_TPC_QMAN_ARC10 = 16, /* DCORE1_TPC4 */
+ CPU_ID_TPC_QMAN_ARC11 = 17, /* DCORE1_TPC5 */
+ CPU_ID_TPC_QMAN_ARC12 = 18, /* DCORE2_TPC0 */
+ CPU_ID_TPC_QMAN_ARC13 = 19, /* DCORE2_TPC1 */
+ CPU_ID_TPC_QMAN_ARC14 = 20, /* DCORE2_TPC2 */
+ CPU_ID_TPC_QMAN_ARC15 = 21, /* DCORE2_TPC3 */
+ CPU_ID_TPC_QMAN_ARC16 = 22, /* DCORE2_TPC4 */
+ CPU_ID_TPC_QMAN_ARC17 = 23, /* DCORE2_TPC5 */
+ CPU_ID_TPC_QMAN_ARC18 = 24, /* DCORE3_TPC0 */
+ CPU_ID_TPC_QMAN_ARC19 = 25, /* DCORE3_TPC1 */
+ CPU_ID_TPC_QMAN_ARC20 = 26, /* DCORE3_TPC2 */
+ CPU_ID_TPC_QMAN_ARC21 = 27, /* DCORE3_TPC3 */
+ CPU_ID_TPC_QMAN_ARC22 = 28, /* DCORE3_TPC4 */
+ CPU_ID_TPC_QMAN_ARC23 = 29, /* DCORE3_TPC5 */
+ CPU_ID_TPC_QMAN_ARC24 = 30, /* DCORE0_TPC6 - Never present */
+
+ CPU_ID_MME_QMAN_ARC0 = 31, /* DCORE0_MME0 */
+ CPU_ID_MME_QMAN_ARC1 = 32, /* DCORE2_MME0 */
+
+ CPU_ID_EDMA_QMAN_ARC0 = 33, /* DCORE0_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC1 = 34, /* DCORE0_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC2 = 35, /* DCORE1_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC3 = 36, /* DCORE1_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC4 = 37, /* DCORE2_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC5 = 38, /* DCORE2_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC6 = 39, /* DCORE3_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC7 = 40, /* DCORE3_EDMA1 */
+
+ CPU_ID_PDMA_QMAN_ARC0 = 41, /* DCORE0_PDMA0 */
+ CPU_ID_PDMA_QMAN_ARC1 = 42, /* DCORE0_PDMA1 */
+
+ CPU_ID_ROT_QMAN_ARC0 = 43, /* ROT0 */
+ CPU_ID_ROT_QMAN_ARC1 = 44, /* ROT1 */
+
+ CPU_ID_NIC_QMAN_ARC0 = 45, /* NIC0_0 */
+ CPU_ID_NIC_QMAN_ARC1 = 46, /* NIC0_1 */
+ CPU_ID_NIC_QMAN_ARC2 = 47, /* NIC1_0 */
+ CPU_ID_NIC_QMAN_ARC3 = 48, /* NIC1_1 */
+ CPU_ID_NIC_QMAN_ARC4 = 49, /* NIC2_0 */
+ CPU_ID_NIC_QMAN_ARC5 = 50, /* NIC2_1 */
+ CPU_ID_NIC_QMAN_ARC6 = 51, /* NIC3_0 */
+ CPU_ID_NIC_QMAN_ARC7 = 52, /* NIC3_1 */
+ CPU_ID_NIC_QMAN_ARC8 = 53, /* NIC4_0 */
+ CPU_ID_NIC_QMAN_ARC9 = 54, /* NIC4_1 */
+ CPU_ID_NIC_QMAN_ARC10 = 55, /* NIC5_0 */
+ CPU_ID_NIC_QMAN_ARC11 = 56, /* NIC5_1 */
+ CPU_ID_NIC_QMAN_ARC12 = 57, /* NIC6_0 */
+ CPU_ID_NIC_QMAN_ARC13 = 58, /* NIC6_1 */
+ CPU_ID_NIC_QMAN_ARC14 = 59, /* NIC7_0 */
+ CPU_ID_NIC_QMAN_ARC15 = 60, /* NIC7_1 */
+ CPU_ID_NIC_QMAN_ARC16 = 61, /* NIC8_0 */
+ CPU_ID_NIC_QMAN_ARC17 = 62, /* NIC8_1 */
+ CPU_ID_NIC_QMAN_ARC18 = 63, /* NIC9_0 */
+ CPU_ID_NIC_QMAN_ARC19 = 64, /* NIC9_1 */
+ CPU_ID_NIC_QMAN_ARC20 = 65, /* NIC10_0 */
+ CPU_ID_NIC_QMAN_ARC21 = 66, /* NIC10_1 */
+ CPU_ID_NIC_QMAN_ARC22 = 67, /* NIC11_0 */
+ CPU_ID_NIC_QMAN_ARC23 = 68, /* NIC11_1 */
+
+ CPU_ID_MAX = 69,
+ CPU_ID_SCHED_MAX = 6,
+
+ CPU_ID_ALL = 0xFE,
+ CPU_ID_INVALID = 0xFF,
+};
+
+enum arc_regions_t {
+ ARC_REGION0_UNSED = 0,
+ /*
+ * Extension registers
+ * None
+ */
+ ARC_REGION1_SRAM = 1,
+ /*
+ * Extension registers
+ * AUX_SRAM_LSB_ADDR
+ * AUX_SRAM_MSB_ADDR
+ * ARC Address: 0x1000_0000
+ */
+ ARC_REGION2_CFG = 2,
+ /*
+ * Extension registers
+ * AUX_CFG_LSB_ADDR
+ * AUX_CFG_MSB_ADDR
+ * ARC Address: 0x2000_0000
+ */
+ ARC_REGION3_GENERAL = 3,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_0
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_0
+ * ARC Address: 0x3000_0000
+ */
+ ARC_REGION4_HBM0_FW = 4,
+ /*
+ * Extension registers
+ * AUX_HBM0_LSB_ADDR
+ * AUX_HBM0_MSB_ADDR
+ * AUX_HBM0_OFFSET
+ * ARC Address: 0x4000_0000
+ */
+ ARC_REGION5_HBM1_GC_DATA = 5,
+ /*
+ * Extension registers
+ * AUX_HBM1_LSB_ADDR
+ * AUX_HBM1_MSB_ADDR
+ * AUX_HBM1_OFFSET
+ * ARC Address: 0x5000_0000
+ */
+ ARC_REGION6_HBM2_GC_DATA = 6,
+ /*
+ * Extension registers
+ * AUX_HBM2_LSB_ADDR
+ * AUX_HBM2_MSB_ADDR
+ * AUX_HBM2_OFFSET
+ * ARC Address: 0x6000_0000
+ */
+ ARC_REGION7_HBM3_GC_DATA = 7,
+ /*
+ * Extension registers
+ * AUX_HBM3_LSB_ADDR
+ * AUX_HBM3_MSB_ADDR
+ * AUX_HBM3_OFFSET
+ * ARC Address: 0x7000_0000
+ */
+ ARC_REGION8_DCCM = 8,
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0x8000_0000
+ */
+ ARC_REGION9_PCIE = 9,
+ /*
+ * Extension registers
+ * AUX_PCIE_LSB_ADDR
+ * AUX_PCIE_MSB_ADDR
+ * ARC Address: 0x9000_0000
+ */
+ ARC_REGION10_GENERAL = 10,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_1
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_1
+ * ARC Address: 0xA000_0000
+ */
+ ARC_REGION11_GENERAL = 11,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_2
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_2
+ * ARC Address: 0xB000_0000
+ */
+ ARC_REGION12_GENERAL = 12,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_3
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_3
+ * ARC Address: 0xC000_0000
+ */
+ ARC_REGION13_GENERAL = 13,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_4
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_4
+ * ARC Address: 0xD000_0000
+ */
+ ARC_REGION14_GENERAL = 14,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_5
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_5
+ * ARC Address: 0xE000_0000
+ */
+ ARC_REGION15_LBU = 15
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0xF000_0000
+ */
+};
+
+#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
index 1974df13b5f9..1974df13b5f9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
index fc2c52af6509..fc2c52af6509 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
index 5345b5faa3a2..5345b5faa3a2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
index bde077eed285..bde077eed285 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
index 491af75c12c3..491af75c12c3 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
index 12d6a124a2e9..12d6a124a2e9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
index 23f9d2df52a7..23f9d2df52a7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
index bee4de0b28d6..bee4de0b28d6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
index b9f09e8199e6..b9f09e8199e6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
index d6dd2c066fa9..d6dd2c066fa9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
index 5903dbacec80..5903dbacec80 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
index e312cf810c0e..e312cf810c0e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
index 9b3eceec9d5d..9b3eceec9d5d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
index 296ab832013f..296ab832013f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
index e26f0d77c9dc..e26f0d77c9dc 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
index 8de48939243b..8de48939243b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
index f73e76c8f5bd..f73e76c8f5bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
index d600f6bf70d8..d600f6bf70d8 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
index 84f068e4c602..84f068e4c602 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
index 0fc45300df81..0fc45300df81 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
index 88d2a133f129..88d2a133f129 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
index 0b0a76a5b2a0..0b0a76a5b2a0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
index 102e2a65811c..102e2a65811c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
index 32d475b9ed11..32d475b9ed11 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
index b608a634562f..b608a634562f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
index c3a462f2a9ac..c3a462f2a9ac 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
index df51eac10dd7..2965b6a3b423 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
@@ -150,8 +150,7 @@
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_SHIFT 16
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_MASK 0xF0000
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_SHIFT 20
-#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK \
-0x100000
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK 0x100000
/* DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG */
#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MASK_SHIFT 0
@@ -235,23 +234,19 @@
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD */
#define DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD_R_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
index 08ccd695ec89..08ccd695ec89 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
index 192eba5f07bb..a311778b21e7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
@@ -92,8 +92,7 @@
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_SHIFT 20
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK 0x100000
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_SHIFT 21
-#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK \
-0x7E00000
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK 0x7E00000
/* DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32 */
#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_SHIFT 0
@@ -228,12 +227,8 @@
#define DCORE0_HMMU0_STLB_MEM_READ_ARPROT_R_MASK 0x7
/* DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION */
-#define \
-DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT \
-0
-#define \
-DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
-0x1
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
@@ -261,53 +256,43 @@ DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
index 864a259f68e2..864a259f68e2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
index 07bed3ec740e..07bed3ec740e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
index c9043979fd69..c9043979fd69 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
index 7d74aea4576f..7d74aea4576f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
index f6f519eb5f6f..f6f519eb5f6f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
index 0e0c056ade9b..0e0c056ade9b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
index 34c6134a2f93..34c6134a2f93 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
index 55065032f87c..55065032f87c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
index 6022b387eacf..6022b387eacf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
index f9c9b01f0d1a..f9c9b01f0d1a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
index d96119b8c435..d96119b8c435 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
index c80d6817efe1..c80d6817efe1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
index 753b31dc1760..753b31dc1760 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
index f68d043edcd9..f68d043edcd9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
index a6dce326bd74..a6dce326bd74 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
index 5ace0f43cc78..5ace0f43cc78 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
index b375393dfdc0..b375393dfdc0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
index 7c22b9383f3c..fb53feb0a1a6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
@@ -20,8 +20,7 @@
*****************************************
*/
-#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 \
-0x40CB280
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 0x40CB280
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW 0x40CB284
@@ -29,8 +28,7 @@
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP 0x40CB28C
-#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 \
-0x40CB290
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 0x40CB290
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT 0x40CB294
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
index d17c165faf8b..d17c165faf8b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
index 7b77884e0024..7b77884e0024 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
index a2a2ba454d6d..a2a2ba454d6d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
index 7ad7b197cf87..7ad7b197cf87 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
index f699661d76aa..da0c94075e64 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
@@ -78,8 +78,7 @@
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_SHIFT 15
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_MASK 0x8000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_SHIFT 16
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK \
-0x10000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK 0x10000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_SHIFT 17
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_MASK 0x20000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_SLV_ADR_SHIFT 18
@@ -87,11 +86,9 @@
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_SHIFT 19
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_MASK 0x80000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_SHIFT 20
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK \
-0x100000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK 0x100000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_SHIFT 21
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK \
-0x200000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK 0x200000
/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 */
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0_V_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
index a51617a6f1fb..a51617a6f1fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
index 1b91c9c13132..1b91c9c13132 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
index f702fe6e9365..f702fe6e9365 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
index 917f8ab88373..917f8ab88373 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
index c7ebaf73c51e..c7ebaf73c51e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
index 61654e37335b..61654e37335b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
index 32089b8250ed..32089b8250ed 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
index e168c1cc2a7d..e168c1cc2a7d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
index 543aba18ef68..543aba18ef68 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
index c45583fcc2cf..c45583fcc2cf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
index 077ae5232790..077ae5232790 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
index 211fa2c2c35b..211fa2c2c35b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
index 374a01d2b8d5..374a01d2b8d5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
index 22f4d6c805c5..22f4d6c805c5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
index 3a7290b3a5c9..3a7290b3a5c9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
index 5b52b88fee0f..5b52b88fee0f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
index d9b3f5cd392b..d9b3f5cd392b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
index 1bba940d3031..1bba940d3031 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
index f21540501cdd..f21540501cdd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
index c3c4991e6660..c3c4991e6660 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
index 76b273a41255..76b273a41255 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
index 0bddc734329f..0bddc734329f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
index 3a5b27df0ab4..3a5b27df0ab4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
index 8f082a1c9b1b..8f082a1c9b1b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
index 2d4a22680a23..2d4a22680a23 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
index cdab39debd2c..cdab39debd2c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
index 4ef1c1edc5f7..4ef1c1edc5f7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
index cdecbd0f9d84..cdecbd0f9d84 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
index 4cd9e26a150f..4cd9e26a150f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
index 8da278a3f3fe..8da278a3f3fe 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
index 2e4ff06e4858..2e4ff06e4858 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
index 4d48f0c6880b..4d48f0c6880b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
index 76ab8a1a7f31..76ab8a1a7f31 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
index f07da4a24f06..f07da4a24f06 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
index aee9cbc78c3d..aee9cbc78c3d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
index dee670b666ee..dee670b666ee 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
index 580ae57476bd..580ae57476bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
index 91686c563fe5..91686c563fe5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
index e007dabc5382..e007dabc5382 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
index 149b85f5f045..149b85f5f045 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
index d4aad1875ad6..d4aad1875ad6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
index cca8683cbca1..cca8683cbca1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
index e68667cc795a..e68667cc795a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
index f7ffdcbd1a76..f7ffdcbd1a76 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
index 4c1bb5306cba..4c1bb5306cba 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
index e413905ffe25..e413905ffe25 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
index bce75ac6e279..bce75ac6e279 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
index 68dd98459c86..1c02f3dfdb6e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
@@ -106,8 +106,7 @@
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_SHIFT 2
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_MASK 0x4
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_SHIFT 3
-#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK \
-0x8
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK 0x8
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 4
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x10
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_SHIFT 5
@@ -117,8 +116,7 @@
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
-#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
-0x100
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
index d2844307a6bf..d2844307a6bf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
index 89b522b12998..89b522b12998 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
index 622613dc76fb..622613dc76fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
diff --git a/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h
new file mode 100644
index 000000000000..3d3802755814
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h
@@ -0,0 +1,1203 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_
+#define ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE1_SYNC_MNGR_GLBL
+ * (Prototype: SOB_GLBL)
+ *****************************************
+ */
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_MASK 0x431E000
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_CAUSE 0x431E004
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_L 0x431E008
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_H 0x431E00C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_L 0x431E020
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_H 0x431E024
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_SEC 0x431E030
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_PRIV_ONLY 0x431E034
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DELAY 0x431E038
+
+#define mmDCORE1_SYNC_MNGR_GLBL_PI_SIZE 0x431E03C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SOB_ONLY 0x431E040
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INTR 0x431E044
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV 0x431E048
+
+#define mmDCORE1_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE 0x431E04C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 0x431E050
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1 0x431E054
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_2 0x431E058
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_3 0x431E05C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_4 0x431E060
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_5 0x431E064
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_6 0x431E068
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_7 0x431E06C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_8 0x431E070
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_9 0x431E074
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_10 0x431E078
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_11 0x431E07C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_12 0x431E080
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_13 0x431E084
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_14 0x431E088
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_15 0x431E08C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_16 0x431E090
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_17 0x431E094
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_18 0x431E098
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_19 0x431E09C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_20 0x431E0A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_21 0x431E0A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_22 0x431E0A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_23 0x431E0AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_24 0x431E0B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_25 0x431E0B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_26 0x431E0B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_27 0x431E0BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_28 0x431E0C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_29 0x431E0C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_30 0x431E0C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_31 0x431E0CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_32 0x431E0D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_33 0x431E0D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_34 0x431E0D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_35 0x431E0DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_36 0x431E0E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_37 0x431E0E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_38 0x431E0E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_39 0x431E0EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_40 0x431E0F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_41 0x431E0F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_42 0x431E0F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_43 0x431E0FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_44 0x431E100
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_45 0x431E104
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_46 0x431E108
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_47 0x431E10C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_48 0x431E110
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_49 0x431E114
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_50 0x431E118
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_51 0x431E11C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_52 0x431E120
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_53 0x431E124
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_54 0x431E128
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_55 0x431E12C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_56 0x431E130
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_57 0x431E134
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_58 0x431E138
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_59 0x431E13C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_60 0x431E140
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_61 0x431E144
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_62 0x431E148
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63 0x431E14C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 0x431E150
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1 0x431E154
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_2 0x431E158
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_3 0x431E15C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_4 0x431E160
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_5 0x431E164
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_6 0x431E168
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_7 0x431E16C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_8 0x431E170
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_9 0x431E174
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_10 0x431E178
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_11 0x431E17C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_12 0x431E180
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_13 0x431E184
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_14 0x431E188
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_15 0x431E18C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_16 0x431E190
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_17 0x431E194
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_18 0x431E198
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_19 0x431E19C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_20 0x431E1A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_21 0x431E1A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_22 0x431E1A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_23 0x431E1AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_24 0x431E1B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_25 0x431E1B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_26 0x431E1B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_27 0x431E1BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_28 0x431E1C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_29 0x431E1C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_30 0x431E1C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_31 0x431E1CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_32 0x431E1D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_33 0x431E1D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_34 0x431E1D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_35 0x431E1DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_36 0x431E1E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_37 0x431E1E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_38 0x431E1E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_39 0x431E1EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_40 0x431E1F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_41 0x431E1F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_42 0x431E1F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_43 0x431E1FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_44 0x431E200
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_45 0x431E204
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_46 0x431E208
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_47 0x431E20C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_48 0x431E210
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_49 0x431E214
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_50 0x431E218
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_51 0x431E21C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_52 0x431E220
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_53 0x431E224
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_54 0x431E228
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_55 0x431E22C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_56 0x431E230
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_57 0x431E234
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_58 0x431E238
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_59 0x431E23C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_60 0x431E240
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_61 0x431E244
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_62 0x431E248
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63 0x431E24C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 0x431E250
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1 0x431E254
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_2 0x431E258
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_3 0x431E25C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_4 0x431E260
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_5 0x431E264
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_6 0x431E268
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_7 0x431E26C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_8 0x431E270
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_9 0x431E274
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_10 0x431E278
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_11 0x431E27C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_12 0x431E280
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_13 0x431E284
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_14 0x431E288
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_15 0x431E28C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_16 0x431E290
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_17 0x431E294
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_18 0x431E298
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_19 0x431E29C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_20 0x431E2A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_21 0x431E2A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_22 0x431E2A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_23 0x431E2AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_24 0x431E2B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_25 0x431E2B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_26 0x431E2B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_27 0x431E2BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_28 0x431E2C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_29 0x431E2C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_30 0x431E2C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_31 0x431E2CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_32 0x431E2D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_33 0x431E2D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_34 0x431E2D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_35 0x431E2DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_36 0x431E2E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_37 0x431E2E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_38 0x431E2E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_39 0x431E2EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_40 0x431E2F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_41 0x431E2F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_42 0x431E2F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_43 0x431E2FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_44 0x431E300
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_45 0x431E304
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_46 0x431E308
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_47 0x431E30C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_48 0x431E310
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_49 0x431E314
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_50 0x431E318
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_51 0x431E31C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_52 0x431E320
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_53 0x431E324
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_54 0x431E328
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_55 0x431E32C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_56 0x431E330
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_57 0x431E334
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_58 0x431E338
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_59 0x431E33C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_60 0x431E340
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_61 0x431E344
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_62 0x431E348
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63 0x431E34C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0 0x431E350
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_1 0x431E354
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_2 0x431E358
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_3 0x431E35C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_4 0x431E360
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_5 0x431E364
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_6 0x431E368
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_7 0x431E36C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_8 0x431E370
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_9 0x431E374
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_10 0x431E378
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_11 0x431E37C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_12 0x431E380
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_13 0x431E384
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_14 0x431E388
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_15 0x431E38C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_16 0x431E390
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_17 0x431E394
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_18 0x431E398
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_19 0x431E39C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_20 0x431E3A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_21 0x431E3A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_22 0x431E3A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_23 0x431E3AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_24 0x431E3B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_25 0x431E3B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_26 0x431E3B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_27 0x431E3BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_28 0x431E3C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_29 0x431E3C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_30 0x431E3C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_31 0x431E3CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_32 0x431E3D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_33 0x431E3D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_34 0x431E3D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_35 0x431E3DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_36 0x431E3E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_37 0x431E3E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_38 0x431E3E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_39 0x431E3EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_40 0x431E3F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_41 0x431E3F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_42 0x431E3F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_43 0x431E3FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_44 0x431E400
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_45 0x431E404
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_46 0x431E408
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_47 0x431E40C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_48 0x431E410
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_49 0x431E414
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_50 0x431E418
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_51 0x431E41C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_52 0x431E420
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_53 0x431E424
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_54 0x431E428
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_55 0x431E42C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_56 0x431E430
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_57 0x431E434
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_58 0x431E438
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_59 0x431E43C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_60 0x431E440
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_61 0x431E444
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_62 0x431E448
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63 0x431E44C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_0 0x431E450
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_1 0x431E454
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_2 0x431E458
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_3 0x431E45C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_4 0x431E460
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_5 0x431E464
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_6 0x431E468
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_7 0x431E46C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_8 0x431E470
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_9 0x431E474
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_10 0x431E478
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_11 0x431E47C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_12 0x431E480
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_13 0x431E484
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_14 0x431E488
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_15 0x431E48C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_16 0x431E490
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_17 0x431E494
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_18 0x431E498
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_19 0x431E49C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_20 0x431E4A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_21 0x431E4A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_22 0x431E4A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_23 0x431E4AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_24 0x431E4B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_25 0x431E4B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_26 0x431E4B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_27 0x431E4BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_28 0x431E4C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_29 0x431E4C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_30 0x431E4C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_31 0x431E4CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_32 0x431E4D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_33 0x431E4D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_34 0x431E4D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_35 0x431E4DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_36 0x431E4E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_37 0x431E4E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_38 0x431E4E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_39 0x431E4EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_40 0x431E4F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_41 0x431E4F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_42 0x431E4F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_43 0x431E4FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_44 0x431E500
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_45 0x431E504
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_46 0x431E508
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_47 0x431E50C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_48 0x431E510
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_49 0x431E514
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_50 0x431E518
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_51 0x431E51C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_52 0x431E520
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_53 0x431E524
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_54 0x431E528
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_55 0x431E52C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_56 0x431E530
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_57 0x431E534
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_58 0x431E538
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_59 0x431E53C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_60 0x431E540
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_61 0x431E544
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_62 0x431E548
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_63 0x431E54C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0 0x431E550
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_1 0x431E554
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_2 0x431E558
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_3 0x431E55C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_4 0x431E560
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_5 0x431E564
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_6 0x431E568
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_7 0x431E56C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_8 0x431E570
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_9 0x431E574
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_10 0x431E578
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_11 0x431E57C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_12 0x431E580
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_13 0x431E584
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_14 0x431E588
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_15 0x431E58C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_16 0x431E590
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_17 0x431E594
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_18 0x431E598
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_19 0x431E59C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_20 0x431E5A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_21 0x431E5A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_22 0x431E5A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_23 0x431E5AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_24 0x431E5B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_25 0x431E5B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_26 0x431E5B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_27 0x431E5BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_28 0x431E5C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_29 0x431E5C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_30 0x431E5C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_31 0x431E5CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_32 0x431E5D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_33 0x431E5D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_34 0x431E5D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_35 0x431E5DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_36 0x431E5E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_37 0x431E5E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_38 0x431E5E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_39 0x431E5EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_40 0x431E5F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_41 0x431E5F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_42 0x431E5F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_43 0x431E5FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_44 0x431E600
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_45 0x431E604
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_46 0x431E608
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_47 0x431E60C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_48 0x431E610
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_49 0x431E614
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_50 0x431E618
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_51 0x431E61C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_52 0x431E620
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_53 0x431E624
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_54 0x431E628
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_55 0x431E62C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_56 0x431E630
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_57 0x431E634
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_58 0x431E638
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_59 0x431E63C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_60 0x431E640
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_61 0x431E644
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_62 0x431E648
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63 0x431E64C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0 0x431E650
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_1 0x431E654
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_2 0x431E658
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_3 0x431E65C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_4 0x431E660
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_5 0x431E664
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_6 0x431E668
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_7 0x431E66C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_8 0x431E670
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_9 0x431E674
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_10 0x431E678
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_11 0x431E67C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_12 0x431E680
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_13 0x431E684
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_14 0x431E688
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_15 0x431E68C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_16 0x431E690
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_17 0x431E694
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_18 0x431E698
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_19 0x431E69C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_20 0x431E6A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_21 0x431E6A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_22 0x431E6A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_23 0x431E6AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_24 0x431E6B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_25 0x431E6B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_26 0x431E6B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_27 0x431E6BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_28 0x431E6C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_29 0x431E6C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_30 0x431E6C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_31 0x431E6CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_32 0x431E6D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_33 0x431E6D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_34 0x431E6D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_35 0x431E6DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_36 0x431E6E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_37 0x431E6E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_38 0x431E6E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_39 0x431E6EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_40 0x431E6F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_41 0x431E6F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_42 0x431E6F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_43 0x431E6FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_44 0x431E700
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_45 0x431E704
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_46 0x431E708
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_47 0x431E70C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_48 0x431E710
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_49 0x431E714
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_50 0x431E718
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_51 0x431E71C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_52 0x431E720
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_53 0x431E724
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_54 0x431E728
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_55 0x431E72C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_56 0x431E730
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_57 0x431E734
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_58 0x431E738
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_59 0x431E73C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_60 0x431E740
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_61 0x431E744
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_62 0x431E748
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63 0x431E74C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0 0x431E750
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_1 0x431E754
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_2 0x431E758
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_3 0x431E75C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_4 0x431E760
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_5 0x431E764
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_6 0x431E768
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_7 0x431E76C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_8 0x431E770
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_9 0x431E774
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_10 0x431E778
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_11 0x431E77C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_12 0x431E780
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_13 0x431E784
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_14 0x431E788
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_15 0x431E78C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_16 0x431E790
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_17 0x431E794
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_18 0x431E798
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_19 0x431E79C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_20 0x431E7A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_21 0x431E7A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_22 0x431E7A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_23 0x431E7AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_24 0x431E7B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_25 0x431E7B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_26 0x431E7B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_27 0x431E7BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_28 0x431E7C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_29 0x431E7C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_30 0x431E7C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_31 0x431E7CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_32 0x431E7D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_33 0x431E7D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_34 0x431E7D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_35 0x431E7DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_36 0x431E7E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_37 0x431E7E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_38 0x431E7E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_39 0x431E7EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_40 0x431E7F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_41 0x431E7F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_42 0x431E7F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_43 0x431E7FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_44 0x431E800
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_45 0x431E804
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_46 0x431E808
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_47 0x431E80C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_48 0x431E810
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_49 0x431E814
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_50 0x431E818
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_51 0x431E81C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_52 0x431E820
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_53 0x431E824
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_54 0x431E828
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_55 0x431E82C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_56 0x431E830
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_57 0x431E834
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_58 0x431E838
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_59 0x431E83C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_60 0x431E840
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_61 0x431E844
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_62 0x431E848
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63 0x431E84C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0 0x431E850
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_1 0x431E854
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_2 0x431E858
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_3 0x431E85C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_4 0x431E860
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_5 0x431E864
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_6 0x431E868
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_7 0x431E86C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_8 0x431E870
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_9 0x431E874
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_10 0x431E878
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_11 0x431E87C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_12 0x431E880
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_13 0x431E884
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_14 0x431E888
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_15 0x431E88C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_16 0x431E890
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_17 0x431E894
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_18 0x431E898
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_19 0x431E89C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_20 0x431E8A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_21 0x431E8A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_22 0x431E8A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_23 0x431E8AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_24 0x431E8B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_25 0x431E8B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_26 0x431E8B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_27 0x431E8BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_28 0x431E8C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_29 0x431E8C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_30 0x431E8C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_31 0x431E8CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_32 0x431E8D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_33 0x431E8D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_34 0x431E8D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_35 0x431E8DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_36 0x431E8E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_37 0x431E8E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_38 0x431E8E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_39 0x431E8EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_40 0x431E8F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_41 0x431E8F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_42 0x431E8F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_43 0x431E8FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_44 0x431E900
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_45 0x431E904
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_46 0x431E908
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_47 0x431E90C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_48 0x431E910
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_49 0x431E914
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_50 0x431E918
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_51 0x431E91C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_52 0x431E920
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_53 0x431E924
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_54 0x431E928
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_55 0x431E92C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_56 0x431E930
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_57 0x431E934
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_58 0x431E938
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_59 0x431E93C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_60 0x431E940
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_61 0x431E944
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_62 0x431E948
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63 0x431E94C
+
+#endif /* ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
index b06469f5a279..b06469f5a279 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
index 3caee4515ad6..3caee4515ad6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index 6aa1b1412462..6c58af614236 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2020-2022 HabanaLabs, Ltd.
+ * Copyright 2020-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -31,6 +31,7 @@
#include "dcore0_sync_mngr_objs_regs.h"
#include "dcore0_sync_mngr_glbl_regs.h"
#include "dcore0_sync_mngr_mstr_if_axuser_regs.h"
+#include "dcore1_sync_mngr_glbl_regs.h"
#include "pdma0_qm_arc_aux_regs.h"
#include "pdma0_core_ctx_regs.h"
#include "pdma0_core_regs.h"
@@ -163,6 +164,8 @@
#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x4800040
+#define mmDCORE0_TPC0_EML_CFG_DBG_CNT 0x40000
+
#define SM_OBJS_PROT_BITS_OFFS 0x14000
#define DCORE_OFFSET (mmDCORE1_TPC0_QM_BASE - mmDCORE0_TPC0_QM_BASE)
@@ -184,7 +187,10 @@
#define TPC_CFG_STALL_ON_ERR_OFFSET (mmDCORE0_TPC0_CFG_STALL_ON_ERR - mmDCORE0_TPC0_CFG_BASE)
#define TPC_CFG_TPC_INTR_MASK_OFFSET (mmDCORE0_TPC0_CFG_TPC_INTR_MASK - mmDCORE0_TPC0_CFG_BASE)
#define TPC_CFG_MSS_CONFIG_OFFSET (mmDCORE0_TPC0_CFG_MSS_CONFIG - mmDCORE0_TPC0_CFG_BASE)
+#define TPC_EML_CFG_DBG_CNT_OFFSET (mmDCORE0_TPC0_EML_CFG_DBG_CNT - mmDCORE0_TPC0_EML_CFG_BASE)
+#define EDMA_CORE_CFG_STALL_OFFSET (mmDCORE0_EDMA0_CORE_CFG_1 - mmDCORE0_EDMA0_CORE_BASE)
+#define MME_CTRL_LO_QM_STALL_OFFSET (mmDCORE0_MME_CTRL_LO_QM_STALL - mmDCORE0_MME_CTRL_LO_BASE)
#define MME_ACC_INTR_MASK_OFFSET (mmDCORE0_MME_ACC_INTR_MASK - mmDCORE0_MME_ACC_BASE)
#define MME_ACC_WR_AXI_AGG_COUT0_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT0 - mmDCORE0_MME_ACC_BASE)
#define MME_ACC_WR_AXI_AGG_COUT1_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT1 - mmDCORE0_MME_ACC_BASE)
@@ -537,6 +543,8 @@
#define HBM_MC_SPI_IEEE1500_COMP_MASK BIT(3)
#define HBM_MC_SPI_IEEE1500_PAUSED_MASK BIT(4)
+#define ARC_FARM_OFFSET (mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE)
+
#include "nic0_qpc0_regs.h"
#include "nic0_qm0_regs.h"
#include "nic0_qm_arc_aux0_regs.h"
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
index d49906a68511..d49906a68511 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
index acb19c1cd4bd..acb19c1cd4bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
index 5f380a44dd21..5f380a44dd21 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
index eaee29da4244..eaee29da4244 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
index 2153319a50a0..2153319a50a0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
index de8eac74c2fb..de8eac74c2fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
index 44182fc18234..44182fc18234 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
index cc5842ec6ceb..2ee79d8e62d0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
@@ -48,8 +48,7 @@
#define mmPCIE_DBI_PCI_CAP_PTR_REG 0x4C02034
-#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG \
-0x4C0203C
+#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG 0x4C0203C
#define mmPCIE_DBI_CAP_ID_NXT_PTR_REG 0x4C02040
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
index 2b5af010c7a5..2b5af010c7a5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
index dc7d3f6a4b50..dc7d3f6a4b50 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
index 242c6525bd71..242c6525bd71 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
index 98d035463561..98d035463561 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
index 33ef37619417..33ef37619417 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
index c4587d5d6406..c4587d5d6406 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
index 35349ad375d0..35349ad375d0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
index d29837883216..7a96aebf08b3 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
@@ -116,8 +116,7 @@
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
-#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
-0x100
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
index c7badd212f2b..c7badd212f2b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
index 491b0cd935af..491b0cd935af 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
index a09422f2f281..a09422f2f281 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
index 46558e7a7f63..46558e7a7f63 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
index bacbe4c6fc3c..bacbe4c6fc3c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
index 02b57f07cfaf..02b57f07cfaf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
index 909cda03c246..909cda03c246 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
index 84079b5077e2..84079b5077e2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
index 15d257e3830e..15d257e3830e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
index 9b1cb609d134..9b1cb609d134 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
index d2e0756ec5f2..d2e0756ec5f2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
index 8bf0516b83f7..8bf0516b83f7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
index 96c0ce176e73..96c0ce176e73 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
index b79cae8f5571..b79cae8f5571 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
index 77d803c938d4..77d803c938d4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
index ccc6dfd22dd7..ccc6dfd22dd7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
index 5fd72d050fff..5fd72d050fff 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
index 0276506ea523..b4f32632cd36 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
@@ -228,8 +228,7 @@
/* PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION */
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0
-#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
-0x1
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
index 87c66c08e24a..87c66c08e24a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
index dd12793734b4..dd12793734b4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
index 42e67c1059c4..42e67c1059c4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
index 980a3e0054c5..980a3e0054c5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
index 9be3d656da3a..85a81e2cb546 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
@@ -1306,11 +1306,9 @@
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_SHIFT 12
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_MASK 0x3F000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_SHIFT 18
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK \
-0xFC0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK 0xFC0000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_SHIFT 24
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK \
-0x3F000000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK 0x3F000000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1 */
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC1_SHIFT 0
@@ -1322,24 +1320,17 @@
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_SHIFT 13
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_MASK 0x2000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_SHIFT 14
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK \
-0x4000
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT \
-16
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK \
-0xFF0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK 0x4000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK 0xFF0000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_SHIFT 24
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_MASK 0x7000000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2 */
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT \
-0
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK \
-0xFFFF
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT \
-16
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK \
-0xFFFF0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK 0xFFFF
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK 0xFFFF0000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3 */
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP0_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
index 48980fa8e37b..48980fa8e37b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
index e0cf35226e7f..e0cf35226e7f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
index 6a89624f01d1..6a89624f01d1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
index 699becc28887..699becc28887 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
index 79320320ebcb..79320320ebcb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
index f2e739ede3d9..f2e739ede3d9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
index e83daa33d737..e83daa33d737 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
index 8e040a2ef1c1..8e040a2ef1c1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
index 077ae2347a3d..077ae2347a3d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
index de3c85510af2..de3c85510af2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
index 7d85dc5559da..7d85dc5559da 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
index e8aebd7f5f85..e8aebd7f5f85 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
index 3d39d1a94851..3d39d1a94851 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
index 5b4f9e108798..0231d6c55b4a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
@@ -63,6 +63,8 @@
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
+#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 256
+
#define GAUDI2_MSIX_ENTRIES 512
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
index 305b576222e6..f661068d0c5f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2018-2021 HabanaLabs, Ltd.
+ * Copyright 2018-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -958,6 +958,7 @@ enum gaudi2_async_event_id {
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320,
+ GAUDI2_EVENT_CPU_DEV_RESET_REQ = 1321,
GAUDI2_EVENT_SIZE,
};
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
new file mode 100644
index 000000000000..ad01fc4e9940
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
@@ -0,0 +1,2678 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+enum event_reset_type {
+ EVENT_RESET_TYPE_NONE,
+ EVENT_RESET_TYPE_COMPUTE,
+ EVENT_RESET_TYPE_HARD,
+};
+
+#ifndef __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
+#define __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
+
+struct gaudi2_async_events_ids_map {
+ int fc_id;
+ int cpu_id;
+ int valid;
+ int msg;
+ int reset;
+ char name[64];
+};
+
+static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
+ { .fc_id = 0, .cpu_id = 0, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1, .cpu_id = 1, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 2, .cpu_id = 2, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 3, .cpu_id = 3, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 4, .cpu_id = 4, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 5, .cpu_id = 5, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 6, .cpu_id = 6, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 7, .cpu_id = 7, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 8, .cpu_id = 8, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 9, .cpu_id = 9, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 10, .cpu_id = 10, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 11, .cpu_id = 11, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 12, .cpu_id = 12, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 13, .cpu_id = 13, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 14, .cpu_id = 14, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 15, .cpu_id = 15, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 16, .cpu_id = 16, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 17, .cpu_id = 17, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 18, .cpu_id = 18, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 19, .cpu_id = 19, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 20, .cpu_id = 20, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 21, .cpu_id = 21, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 22, .cpu_id = 22, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 23, .cpu_id = 23, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 24, .cpu_id = 24, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 25, .cpu_id = 25, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 26, .cpu_id = 26, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 27, .cpu_id = 27, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 28, .cpu_id = 28, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 29, .cpu_id = 29, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 30, .cpu_id = 30, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 31, .cpu_id = 31, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 32, .cpu_id = 32, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_CORE_SERR" },
+ { .fc_id = 33, .cpu_id = 33, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_CORE_DERR" },
+ { .fc_id = 34, .cpu_id = 34, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_IF_SERR" },
+ { .fc_id = 35, .cpu_id = 35, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_IF_DERR" },
+ { .fc_id = 36, .cpu_id = 36, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_PHY_SERR" },
+ { .fc_id = 37, .cpu_id = 37, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_PHY_DERR" },
+ { .fc_id = 38, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC0_ECC_SERR" },
+ { .fc_id = 39, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC1_ECC_SERR" },
+ { .fc_id = 40, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC2_ECC_SERR" },
+ { .fc_id = 41, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC3_ECC_SERR" },
+ { .fc_id = 42, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC4_ECC_SERR" },
+ { .fc_id = 43, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC5_ECC_SERR" },
+ { .fc_id = 44, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC6_ECC_SERR" },
+ { .fc_id = 45, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC7_ECC_SERR" },
+ { .fc_id = 46, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC8_ECC_SERR" },
+ { .fc_id = 47, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC9_ECC_SERR" },
+ { .fc_id = 48, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC10_ECC_SERR" },
+ { .fc_id = 49, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC11_ECC_SERR" },
+ { .fc_id = 50, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC12_ECC_SERR" },
+ { .fc_id = 51, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC13_ECC_SERR" },
+ { .fc_id = 52, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC14_ECC_SERR" },
+ { .fc_id = 53, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC15_ECC_SERR" },
+ { .fc_id = 54, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC16_ECC_SERR" },
+ { .fc_id = 55, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC17_ECC_SERR" },
+ { .fc_id = 56, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC18_ECC_SERR" },
+ { .fc_id = 57, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC19_ECC_SERR" },
+ { .fc_id = 58, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC20_ECC_SERR" },
+ { .fc_id = 59, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC21_ECC_SERR" },
+ { .fc_id = 60, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC22_ECC_SERR" },
+ { .fc_id = 61, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC23_ECC_SERR" },
+ { .fc_id = 62, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC24_ECC_SERR" },
+ { .fc_id = 63, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC0_ECC_DERR" },
+ { .fc_id = 64, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC1_ECC_DERR" },
+ { .fc_id = 65, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC2_ECC_DERR" },
+ { .fc_id = 66, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC3_ECC_DERR" },
+ { .fc_id = 67, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC4_ECC_DERR" },
+ { .fc_id = 68, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC5_ECC_DERR" },
+ { .fc_id = 69, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC6_ECC_DERR" },
+ { .fc_id = 70, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC7_ECC_DERR" },
+ { .fc_id = 71, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC8_ECC_DERR" },
+ { .fc_id = 72, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC9_ECC_DERR" },
+ { .fc_id = 73, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC10_ECC_DERR" },
+ { .fc_id = 74, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC11_ECC_DERR" },
+ { .fc_id = 75, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC12_ECC_DERR" },
+ { .fc_id = 76, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC13_ECC_DERR" },
+ { .fc_id = 77, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC14_ECC_DERR" },
+ { .fc_id = 78, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC15_ECC_DERR" },
+ { .fc_id = 79, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC16_ECC_DERR" },
+ { .fc_id = 80, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC17_ECC_DERR" },
+ { .fc_id = 81, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC18_ECC_DERR" },
+ { .fc_id = 82, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC19_ECC_DERR" },
+ { .fc_id = 83, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC20_ECC_DERR" },
+ { .fc_id = 84, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC21_ECC_DERR" },
+ { .fc_id = 85, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC22_ECC_DERR" },
+ { .fc_id = 86, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC23_ECC_DERR" },
+ { .fc_id = 87, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC24_ECC_DERR" },
+ { .fc_id = 88, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE0_ECC_SERR" },
+ { .fc_id = 89, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE1_ECC_SERR" },
+ { .fc_id = 90, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE2_ECC_SERR" },
+ { .fc_id = 91, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE3_ECC_SERR" },
+ { .fc_id = 92, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE4_ECC_SERR" },
+ { .fc_id = 93, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_CTRL_ECC_SERR" },
+ { .fc_id = 94, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_WAP_ECC_SERR" },
+ { .fc_id = 95, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE0_ECC_SERR" },
+ { .fc_id = 96, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE1_ECC_SERR" },
+ { .fc_id = 97, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE2_ECC_SERR" },
+ { .fc_id = 98, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE3_ECC_SERR" },
+ { .fc_id = 99, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE4_ECC_SERR" },
+ { .fc_id = 100, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_CTRL_ECC_SERR" },
+ { .fc_id = 101, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_WAP_ECC_SERR" },
+ { .fc_id = 102, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE0_ECC_SERR" },
+ { .fc_id = 103, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE1_ECC_SERR" },
+ { .fc_id = 104, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE2_ECC_SERR" },
+ { .fc_id = 105, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE3_ECC_SERR" },
+ { .fc_id = 106, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE4_ECC_SERR" },
+ { .fc_id = 107, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_CTRL_ECC_SERR" },
+ { .fc_id = 108, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_WAP_ECC_SERR" },
+ { .fc_id = 109, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE0_ECC_SERR" },
+ { .fc_id = 110, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE1_ECC_SERR" },
+ { .fc_id = 111, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE2_ECC_SERR" },
+ { .fc_id = 112, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE3_ECC_SERR" },
+ { .fc_id = 113, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE4_ECC_SERR" },
+ { .fc_id = 114, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_CTRL_ECC_SERR" },
+ { .fc_id = 115, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_WAP_ECC_SERR" },
+ { .fc_id = 116, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE0_ECC_DERR" },
+ { .fc_id = 117, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE1_ECC_DERR" },
+ { .fc_id = 118, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE2_ECC_DERR" },
+ { .fc_id = 119, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE3_ECC_DERR" },
+ { .fc_id = 120, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE4_ECC_DERR" },
+ { .fc_id = 121, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_CTRL_ECC_DERR" },
+ { .fc_id = 122, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_WAP_ECC_DERR" },
+ { .fc_id = 123, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE0_ECC_DERR" },
+ { .fc_id = 124, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE1_ECC_DERR" },
+ { .fc_id = 125, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE2_ECC_DERR" },
+ { .fc_id = 126, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE3_ECC_DERR" },
+ { .fc_id = 127, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE4_ECC_DERR" },
+ { .fc_id = 128, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_CTRL_ECC_DERR" },
+ { .fc_id = 129, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_WAP_ECC_DERR" },
+ { .fc_id = 130, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE0_ECC_DERR" },
+ { .fc_id = 131, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE1_ECC_DERR" },
+ { .fc_id = 132, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE2_ECC_DERR" },
+ { .fc_id = 133, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE3_ECC_DERR" },
+ { .fc_id = 134, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE4_ECC_DERR" },
+ { .fc_id = 135, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_CTRL_ECC_DERR" },
+ { .fc_id = 136, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_WAP_ECC_DERR" },
+ { .fc_id = 137, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE0_ECC_DERR" },
+ { .fc_id = 138, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE1_ECC_DERR" },
+ { .fc_id = 139, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE2_ECC_DERR" },
+ { .fc_id = 140, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE3_ECC_DERR" },
+ { .fc_id = 141, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE4_ECC_DERR" },
+ { .fc_id = 142, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_CTRL_ECC_DERR" },
+ { .fc_id = 143, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_WAP_ECC_DERR" },
+ { .fc_id = 144, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA2_ECC_SERR" },
+ { .fc_id = 145, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA3_ECC_SERR" },
+ { .fc_id = 146, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA0_ECC_SERR" },
+ { .fc_id = 147, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA1_ECC_SERR" },
+ { .fc_id = 148, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA6_ECC_SERR" },
+ { .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA7_ECC_SERR" },
+ { .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HDMA4_ECC_SERR" },
+ { .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HDMA5_ECC_SERR" },
+ { .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA2_ECC_DERR" },
+ { .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA3_ECC_DERR" },
+ { .fc_id = 154, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA0_ECC_DERR" },
+ { .fc_id = 155, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA1_ECC_DERR" },
+ { .fc_id = 156, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA6_ECC_DERR" },
+ { .fc_id = 157, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA7_ECC_DERR" },
+ { .fc_id = 158, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA4_ECC_DERR" },
+ { .fc_id = 159, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA5_ECC_DERR" },
+ { .fc_id = 160, .cpu_id = 50, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "KDMA0_ECC_SERR" },
+ { .fc_id = 161, .cpu_id = 51, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA0_ECC_SERR" },
+ { .fc_id = 162, .cpu_id = 51, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA1_ECC_SERR" },
+ { .fc_id = 163, .cpu_id = 52, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA0_ECC_DERR" },
+ { .fc_id = 164, .cpu_id = 53, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PDMA0_ECC_DERR" },
+ { .fc_id = 165, .cpu_id = 53, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PDMA1_ECC_DERR" },
+ { .fc_id = 166, .cpu_id = 54, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPU_IF_ECC_SERR" },
+ { .fc_id = 167, .cpu_id = 55, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_IF_ECC_DERR" },
+ { .fc_id = 168, .cpu_id = 56, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_MEM_SERR" },
+ { .fc_id = 169, .cpu_id = 57, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PSOC_MEM_DERR" },
+ { .fc_id = 170, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM0_ECC_SERR" },
+ { .fc_id = 171, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM1_ECC_SERR" },
+ { .fc_id = 172, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM2_ECC_SERR" },
+ { .fc_id = 173, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM3_ECC_SERR" },
+ { .fc_id = 174, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM4_ECC_SERR" },
+ { .fc_id = 175, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM5_ECC_SERR" },
+ { .fc_id = 176, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM6_ECC_SERR" },
+ { .fc_id = 177, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM7_ECC_SERR" },
+ { .fc_id = 178, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM8_ECC_SERR" },
+ { .fc_id = 179, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM9_ECC_SERR" },
+ { .fc_id = 180, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM10_ECC_SERR" },
+ { .fc_id = 181, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM11_ECC_SERR" },
+ { .fc_id = 182, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM12_ECC_SERR" },
+ { .fc_id = 183, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM13_ECC_SERR" },
+ { .fc_id = 184, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM14_ECC_SERR" },
+ { .fc_id = 185, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM15_ECC_SERR" },
+ { .fc_id = 186, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM16_ECC_SERR" },
+ { .fc_id = 187, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM17_ECC_SERR" },
+ { .fc_id = 188, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM18_ECC_SERR" },
+ { .fc_id = 189, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM19_ECC_SERR" },
+ { .fc_id = 190, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM20_ECC_SERR" },
+ { .fc_id = 191, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM21_ECC_SERR" },
+ { .fc_id = 192, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM22_ECC_SERR" },
+ { .fc_id = 193, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM23_ECC_SERR" },
+ { .fc_id = 194, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM24_ECC_SERR" },
+ { .fc_id = 195, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM25_ECC_SERR" },
+ { .fc_id = 196, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM26_ECC_SERR" },
+ { .fc_id = 197, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM27_ECC_SERR" },
+ { .fc_id = 198, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM28_ECC_SERR" },
+ { .fc_id = 199, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM29_ECC_SERR" },
+ { .fc_id = 200, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM30_ECC_SERR" },
+ { .fc_id = 201, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM31_ECC_SERR" },
+ { .fc_id = 202, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM0_ECC_DERR" },
+ { .fc_id = 203, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM1_ECC_DERR" },
+ { .fc_id = 204, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM2_ECC_DERR" },
+ { .fc_id = 205, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM3_ECC_DERR" },
+ { .fc_id = 206, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM4_ECC_DERR" },
+ { .fc_id = 207, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM5_ECC_DERR" },
+ { .fc_id = 208, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM6_ECC_DERR" },
+ { .fc_id = 209, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM7_ECC_DERR" },
+ { .fc_id = 210, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM8_ECC_DERR" },
+ { .fc_id = 211, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM9_ECC_DERR" },
+ { .fc_id = 212, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM10_ECC_DERR" },
+ { .fc_id = 213, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM11_ECC_DERR" },
+ { .fc_id = 214, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM12_ECC_DERR" },
+ { .fc_id = 215, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM13_ECC_DERR" },
+ { .fc_id = 216, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM14_ECC_DERR" },
+ { .fc_id = 217, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM15_ECC_DERR" },
+ { .fc_id = 218, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM16_ECC_DERR" },
+ { .fc_id = 219, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM17_ECC_DERR" },
+ { .fc_id = 220, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM18_ECC_DERR" },
+ { .fc_id = 221, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM19_ECC_DERR" },
+ { .fc_id = 222, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM20_ECC_DERR" },
+ { .fc_id = 223, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM21_ECC_DERR" },
+ { .fc_id = 224, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM22_ECC_DERR" },
+ { .fc_id = 225, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM23_ECC_DERR" },
+ { .fc_id = 226, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM24_ECC_DERR" },
+ { .fc_id = 227, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM25_ECC_DERR" },
+ { .fc_id = 228, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM26_ECC_DERR" },
+ { .fc_id = 229, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM27_ECC_DERR" },
+ { .fc_id = 230, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM28_ECC_DERR" },
+ { .fc_id = 231, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM29_ECC_DERR" },
+ { .fc_id = 232, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM30_ECC_DERR" },
+ { .fc_id = 233, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM31_ECC_DERR" },
+ { .fc_id = 234, .cpu_id = 60, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "GIC500" },
+ { .fc_id = 235, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_0_MC0_ECC_SERR" },
+ { .fc_id = 236, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_1_MC0_ECC_SERR" },
+ { .fc_id = 237, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_2_MC0_ECC_SERR" },
+ { .fc_id = 238, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_3_MC0_ECC_SERR" },
+ { .fc_id = 239, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_4_MC0_ECC_SERR" },
+ { .fc_id = 240, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_5_MC0_ECC_SERR" },
+ { .fc_id = 241, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_0_MC1_ECC_SERR" },
+ { .fc_id = 242, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_1_MC1_ECC_SERR" },
+ { .fc_id = 243, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_2_MC1_ECC_SERR" },
+ { .fc_id = 244, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_3_MC1_ECC_SERR" },
+ { .fc_id = 245, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_4_MC1_ECC_SERR" },
+ { .fc_id = 246, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_5_MC1_ECC_SERR" },
+ { .fc_id = 247, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_0_MC0_ECC_DERR" },
+ { .fc_id = 248, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_1_MC0_ECC_DERR" },
+ { .fc_id = 249, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_2_MC0_ECC_DERR" },
+ { .fc_id = 250, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_3_MC0_ECC_DERR" },
+ { .fc_id = 251, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_4_MC0_ECC_DERR" },
+ { .fc_id = 252, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_5_MC0_ECC_DERR" },
+ { .fc_id = 253, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_0_MC1_ECC_DERR" },
+ { .fc_id = 254, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_1_MC1_ECC_DERR" },
+ { .fc_id = 255, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_2_MC1_ECC_DERR" },
+ { .fc_id = 256, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_3_MC1_ECC_DERR" },
+ { .fc_id = 257, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_4_MC1_ECC_DERR" },
+ { .fc_id = 258, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_5_MC1_ECC_DERR" },
+ { .fc_id = 259, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_0_ECC_SERR" },
+ { .fc_id = 260, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_1_ECC_SERR" },
+ { .fc_id = 261, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_2_ECC_SERR" },
+ { .fc_id = 262, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_3_ECC_SERR" },
+ { .fc_id = 263, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_8_ECC_SERR" },
+ { .fc_id = 264, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_9_ECC_SERR" },
+ { .fc_id = 265, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_10_ECC_SERR" },
+ { .fc_id = 266, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_11_ECC_SERR" },
+ { .fc_id = 267, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_7_ECC_SERR" },
+ { .fc_id = 268, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_6_ECC_SERR" },
+ { .fc_id = 269, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_5_ECC_SERR" },
+ { .fc_id = 270, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_4_ECC_SERR" },
+ { .fc_id = 271, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_15_ECC_SERR" },
+ { .fc_id = 272, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_14_ECC_SERR" },
+ { .fc_id = 273, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_13_ECC_SERR" },
+ { .fc_id = 274, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_12_ECC_SERR" },
+ { .fc_id = 275, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_0_ECC_DERR" },
+ { .fc_id = 276, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_1_ECC_DERR" },
+ { .fc_id = 277, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_2_ECC_DERR" },
+ { .fc_id = 278, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_3_ECC_DERR" },
+ { .fc_id = 279, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_8_ECC_DERR" },
+ { .fc_id = 280, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_9_ECC_DERR" },
+ { .fc_id = 281, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_10_ECC_DERR" },
+ { .fc_id = 282, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_11_ECC_DERR" },
+ { .fc_id = 283, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_7_ECC_DERR" },
+ { .fc_id = 284, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_6_ECC_DERR" },
+ { .fc_id = 285, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_5_ECC_DERR" },
+ { .fc_id = 286, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_4_ECC_DERR" },
+ { .fc_id = 287, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_15_ECC_DERR" },
+ { .fc_id = 288, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_14_ECC_DERR" },
+ { .fc_id = 289, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_13_ECC_DERR" },
+ { .fc_id = 290, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_12_ECC_DERR" },
+ { .fc_id = 291, .cpu_id = 65, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_ECC_SERR" },
+ { .fc_id = 292, .cpu_id = 66, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_ECC_DERR" },
+ { .fc_id = 293, .cpu_id = 67, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 294, .cpu_id = 68, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 295, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_VCD_ECC_SERR" },
+ { .fc_id = 296, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC1_VCD_ECC_SERR" },
+ { .fc_id = 297, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_VCD_ECC_SERR" },
+ { .fc_id = 298, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_VCD_ECC_SERR" },
+ { .fc_id = 299, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_VCD_ECC_SERR" },
+ { .fc_id = 300, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_VCD_ECC_SERR" },
+ { .fc_id = 301, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_VCD_ECC_SERR" },
+ { .fc_id = 302, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_VCD_ECC_SERR" },
+ { .fc_id = 303, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_VCD_ECC_SERR" },
+ { .fc_id = 304, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_VCD_ECC_SERR" },
+ { .fc_id = 305, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_L2C_ECC_SERR" },
+ { .fc_id = 306, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC1_L2C_ECC_SERR" },
+ { .fc_id = 307, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_L2C_ECC_SERR" },
+ { .fc_id = 308, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_L2C_ECC_SERR" },
+ { .fc_id = 309, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_L2C_ECC_SERR" },
+ { .fc_id = 310, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_L2C_ECC_SERR" },
+ { .fc_id = 311, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_L2C_ECC_SERR" },
+ { .fc_id = 312, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_L2C_ECC_SERR" },
+ { .fc_id = 313, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_L2C_ECC_SERR" },
+ { .fc_id = 314, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_L2C_ECC_SERR" },
+ { .fc_id = 315, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC0_VCD_ECC_DERR" },
+ { .fc_id = 316, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC1_VCD_ECC_DERR" },
+ { .fc_id = 317, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC2_VCD_ECC_DERR" },
+ { .fc_id = 318, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC3_VCD_ECC_DERR" },
+ { .fc_id = 319, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC4_VCD_ECC_DERR" },
+ { .fc_id = 320, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC5_VCD_ECC_DERR" },
+ { .fc_id = 321, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC6_VCD_ECC_DERR" },
+ { .fc_id = 322, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC7_VCD_ECC_DERR" },
+ { .fc_id = 323, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC8_VCD_ECC_DERR" },
+ { .fc_id = 324, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC9_VCD_ECC_DERR" },
+ { .fc_id = 325, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC0_L2C_ECC_DERR" },
+ { .fc_id = 326, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC1_L2C_ECC_DERR" },
+ { .fc_id = 327, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC2_L2C_ECC_DERR" },
+ { .fc_id = 328, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC3_L2C_ECC_DERR" },
+ { .fc_id = 329, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC4_L2C_ECC_DERR" },
+ { .fc_id = 330, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC5_L2C_ECC_DERR" },
+ { .fc_id = 331, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC6_L2C_ECC_DERR" },
+ { .fc_id = 332, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC7_L2C_ECC_DERR" },
+ { .fc_id = 333, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC8_L2C_ECC_DERR" },
+ { .fc_id = 334, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC9_L2C_ECC_DERR" },
+ { .fc_id = 335, .cpu_id = 71, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 336, .cpu_id = 72, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 337, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF0_ECC_SERR" },
+ { .fc_id = 338, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF1_ECC_SERR" },
+ { .fc_id = 339, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF2_ECC_SERR" },
+ { .fc_id = 340, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF3_ECC_SERR" },
+ { .fc_id = 341, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF8_ECC_SERR" },
+ { .fc_id = 342, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF9_ECC_SERR" },
+ { .fc_id = 343, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF10_ECC_SERR" },
+ { .fc_id = 344, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF11_ECC_SERR" },
+ { .fc_id = 345, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF7_ECC_SERR" },
+ { .fc_id = 346, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF6_ECC_SERR" },
+ { .fc_id = 347, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF5_ECC_SERR" },
+ { .fc_id = 348, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF4_ECC_SERR" },
+ { .fc_id = 349, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF15_ECC_SERR" },
+ { .fc_id = 350, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF14_ECC_SERR" },
+ { .fc_id = 351, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF13_ECC_SERR" },
+ { .fc_id = 352, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF12_ECC_SERR" },
+ { .fc_id = 353, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF0_ECC_DERR" },
+ { .fc_id = 354, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF1_ECC_DERR" },
+ { .fc_id = 355, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF2_ECC_DERR" },
+ { .fc_id = 356, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF3_ECC_DERR" },
+ { .fc_id = 357, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF8_ECC_DERR" },
+ { .fc_id = 358, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF9_ECC_DERR" },
+ { .fc_id = 359, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF10_ECC_DERR" },
+ { .fc_id = 360, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF11_ECC_DERR" },
+ { .fc_id = 361, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF7_ECC_DERR" },
+ { .fc_id = 362, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF6_ECC_DERR" },
+ { .fc_id = 363, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF5_ECC_DERR" },
+ { .fc_id = 364, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF4_ECC_DERR" },
+ { .fc_id = 365, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF15_ECC_DERR" },
+ { .fc_id = 366, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF14_ECC_DERR" },
+ { .fc_id = 367, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF13_ECC_DERR" },
+ { .fc_id = 368, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF12_ECC_DERR" },
+ { .fc_id = 369, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_ECC_SERR" },
+ { .fc_id = 370, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_ECC_SERR" },
+ { .fc_id = 371, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_ECC_SERR" },
+ { .fc_id = 372, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_ECC_SERR" },
+ { .fc_id = 373, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_ECC_SERR" },
+ { .fc_id = 374, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_ECC_SERR" },
+ { .fc_id = 375, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_ECC_SERR" },
+ { .fc_id = 376, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_ECC_SERR" },
+ { .fc_id = 377, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_ECC_SERR" },
+ { .fc_id = 378, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_ECC_SERR" },
+ { .fc_id = 379, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_ECC_SERR" },
+ { .fc_id = 380, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_ECC_SERR" },
+ { .fc_id = 381, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_ECC_DERR" },
+ { .fc_id = 382, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_ECC_DERR" },
+ { .fc_id = 383, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_ECC_DERR" },
+ { .fc_id = 384, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_ECC_DERR" },
+ { .fc_id = 385, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_ECC_DERR" },
+ { .fc_id = 386, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_ECC_DERR" },
+ { .fc_id = 387, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_ECC_DERR" },
+ { .fc_id = 388, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_ECC_DERR" },
+ { .fc_id = 389, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_ECC_DERR" },
+ { .fc_id = 390, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_ECC_DERR" },
+ { .fc_id = 391, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_ECC_DERR" },
+ { .fc_id = 392, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_ECC_DERR" },
+ { .fc_id = 393, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM0_ECC_DERR" },
+ { .fc_id = 394, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM1_ECC_DERR" },
+ { .fc_id = 395, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM2_ECC_DERR" },
+ { .fc_id = 396, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM3_ECC_DERR" },
+ { .fc_id = 397, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM0_ECC_SERR" },
+ { .fc_id = 398, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM1_ECC_SERR" },
+ { .fc_id = 399, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM2_ECC_SERR" },
+ { .fc_id = 400, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM3_ECC_SERR" },
+ { .fc_id = 401, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR0_ECC_SERR" },
+ { .fc_id = 402, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR1_ECC_SERR" },
+ { .fc_id = 403, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR2_ECC_SERR" },
+ { .fc_id = 404, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR3_ECC_SERR" },
+ { .fc_id = 405, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR0_ECC_DERR" },
+ { .fc_id = 406, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR1_ECC_DERR" },
+ { .fc_id = 407, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR2_ECC_DERR" },
+ { .fc_id = 408, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR3_ECC_DERR" },
+ { .fc_id = 409, .cpu_id = 81, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ARC0_ECC_SERR" },
+ { .fc_id = 410, .cpu_id = 82, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ARC0_ECC_DERR" },
+ { .fc_id = 411, .cpu_id = 83, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_ADDR_DEC_ERR" },
+ { .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_AXI_ERR_RSP" },
+ { .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_AXI_ERR_RSP" },
+ { .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_AXI_ERR_RSP" },
+ { .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_AXI_ERR_RSP" },
+ { .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_AXI_ERR_RSP" },
+ { .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_AXI_ERR_RSP" },
+ { .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_AXI_ERR_RSP" },
+ { .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_AXI_ERR_RSP" },
+ { .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_AXI_ERR_RSP" },
+ { .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_AXI_ERR_RSP" },
+ { .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_AXI_ERR_RSP" },
+ { .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_AXI_ERR_RSP" },
+ { .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_AXI_ERR_RSP" },
+ { .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_AXI_ERR_RSP" },
+ { .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_AXI_ERR_RSP" },
+ { .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_AXI_ERR_RSP" },
+ { .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_AXI_ERR_RSP" },
+ { .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_AXI_ERR_RSP" },
+ { .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_AXI_ERR_RSP" },
+ { .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_AXI_ERR_RSP" },
+ { .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_AXI_ERR_RSP" },
+ { .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_AXI_ERR_RSP" },
+ { .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_AXI_ERR_RSP" },
+ { .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_AXI_ERR_RSP" },
+ { .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_AXI_ERR_RSP" },
+ { .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "AXI_ECC" },
+ { .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "L2_RAM_ECC" },
+ { .fc_id = 440, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 441, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 442, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 443, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 444, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 445, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 446, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_QMAN_SW_ERROR" },
+ { .fc_id = 447, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 448, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 449, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 450, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 451, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 452, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 453, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_QMAN_SW_ERROR" },
+ { .fc_id = 454, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 455, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 456, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 457, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 458, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 459, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 460, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_QMAN_SW_ERROR" },
+ { .fc_id = 461, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 462, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 463, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 464, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 465, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 466, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_QMAN_SW_ERROR" },
+ { .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_MME_PLL_LOCK_ERR" },
+ { .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_CPU_PLL_LOCK_ERR" },
+ { .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_MME_PLL_LOCK_ERR" },
+ { .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_PCI_PLL_LOCK_ERR" },
+ { .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_VID_PLL_LOCK_ERR" },
+ { .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_VID_PLL_LOCK_ERR" },
+ { .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_AXI_ERR_RSP" },
+ { .fc_id = 503, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_0_AXI_ERR_RSP" },
+ { .fc_id = 504, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_1_AXI_ERR_RSP" },
+ { .fc_id = 505, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_2_AXI_ERR_RSP" },
+ { .fc_id = 506, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_3_AXI_ERR_RSP" },
+ { .fc_id = 507, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_8_AXI_ERR_RSP" },
+ { .fc_id = 508, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_9_AXI_ERR_RSP" },
+ { .fc_id = 509, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_10_AXI_ERR_RSP" },
+ { .fc_id = 510, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_11_AXI_ERR_RSP" },
+ { .fc_id = 511, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_7_AXI_ERR_RSP" },
+ { .fc_id = 512, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_6_AXI_ERR_RSP" },
+ { .fc_id = 513, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_5_AXI_ERR_RSP" },
+ { .fc_id = 514, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_4_AXI_ERR_RSP" },
+ { .fc_id = 515, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_15_AXI_ERR_RSP" },
+ { .fc_id = 516, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_14_AXI_ERR_RSP" },
+ { .fc_id = 517, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_13_AXI_ERR_RSP" },
+ { .fc_id = 518, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_12_AXI_ERR_RSP" },
+ { .fc_id = 519, .cpu_id = 95, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_FATAL" },
+ { .fc_id = 520, .cpu_id = 96, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_AXI_ERR_RSP" },
+ { .fc_id = 521, .cpu_id = 97, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM0_ALARM_A" },
+ { .fc_id = 522, .cpu_id = 98, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM0_ALARM_B" },
+ { .fc_id = 523, .cpu_id = 99, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM1_ALARM_A" },
+ { .fc_id = 524, .cpu_id = 100, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM1_ALARM_B" },
+ { .fc_id = 525, .cpu_id = 101, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM2_ALARM_A" },
+ { .fc_id = 526, .cpu_id = 102, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM2_ALARM_B" },
+ { .fc_id = 527, .cpu_id = 103, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM3_ALARM_A" },
+ { .fc_id = 528, .cpu_id = 104, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM3_ALARM_B" },
+ { .fc_id = 529, .cpu_id = 105, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PSOC_AXI_ERR_RSP" },
+ { .fc_id = 530, .cpu_id = 106, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_PRSTN_FALL" },
+ { .fc_id = 531, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 532, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 533, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 534, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 535, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 536, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 537, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 538, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 539, .cpu_id = 108, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 540, .cpu_id = 109, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 541, .cpu_id = 109, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA_CH1_AXI_ERR_RSP" },
+ { .fc_id = 542, .cpu_id = 110, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_0" },
+ { .fc_id = 543, .cpu_id = 111, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_1" },
+ { .fc_id = 544, .cpu_id = 112, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_2" },
+ { .fc_id = 545, .cpu_id = 113, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_3" },
+ { .fc_id = 546, .cpu_id = 114, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_4" },
+ { .fc_id = 547, .cpu_id = 115, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_5" },
+ { .fc_id = 548, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM0_MC0_SEI_SEVERE" },
+ { .fc_id = 549, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 550, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM0_MC1_SEI_SEVERE" },
+ { .fc_id = 551, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 552, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM1_MC0_SEI_SEVERE" },
+ { .fc_id = 553, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 554, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM1_MC1_SEI_SEVERE" },
+ { .fc_id = 555, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 556, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM2_MC0_SEI_SEVERE" },
+ { .fc_id = 557, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 558, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM2_MC1_SEI_SEVERE" },
+ { .fc_id = 559, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 560, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM3_MC0_SEI_SEVERE" },
+ { .fc_id = 561, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 562, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM3_MC1_SEI_SEVERE" },
+ { .fc_id = 563, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 564, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM4_MC0_SEI_SEVERE" },
+ { .fc_id = 565, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 566, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM4_MC1_SEI_SEVERE" },
+ { .fc_id = 567, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 568, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM5_MC0_SEI_SEVERE" },
+ { .fc_id = 569, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 570, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM5_MC1_SEI_SEVERE" },
+ { .fc_id = 571, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 572, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC0_AXI_ERR_RSPONSE" },
+ { .fc_id = 573, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_AXI_ERR_RSPONSE" },
+ { .fc_id = 574, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC2_AXI_ERR_RSPONSE" },
+ { .fc_id = 575, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC3_AXI_ERR_RSPONSE" },
+ { .fc_id = 576, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC4_AXI_ERR_RSPONSE" },
+ { .fc_id = 577, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC5_AXI_ERR_RSPONSE" },
+ { .fc_id = 578, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC6_AXI_ERR_RSPONSE" },
+ { .fc_id = 579, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC7_AXI_ERR_RSPONSE" },
+ { .fc_id = 580, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC8_AXI_ERR_RSPONSE" },
+ { .fc_id = 581, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC9_AXI_ERR_RSPONSE" },
+ { .fc_id = 582, .cpu_id = 118, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 583, .cpu_id = 119, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 584, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF0_FATAL" },
+ { .fc_id = 585, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF1_FATAL" },
+ { .fc_id = 586, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF2_FATAL" },
+ { .fc_id = 587, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF3_FATAL" },
+ { .fc_id = 588, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF8_FATAL" },
+ { .fc_id = 589, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF9_FATAL" },
+ { .fc_id = 590, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF10_FATAL" },
+ { .fc_id = 591, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF11_FATAL" },
+ { .fc_id = 592, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF7_FATAL" },
+ { .fc_id = 593, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF6_FATAL" },
+ { .fc_id = 594, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF5_FATAL" },
+ { .fc_id = 595, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF4_FATAL" },
+ { .fc_id = 596, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF15_FATAL" },
+ { .fc_id = 597, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF14_FATAL" },
+ { .fc_id = 598, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF13_FATAL" },
+ { .fc_id = 599, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF12_FATAL" },
+ { .fc_id = 600, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 601, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 602, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 603, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 604, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_AXI_ERROR_RESPONSE" },
+ { .fc_id = 605, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_AXI_ERROR_RESPONSE" },
+ { .fc_id = 606, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_AXI_ERROR_RESPONSE" },
+ { .fc_id = 607, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_AXI_ERROR_RESPONSE" },
+ { .fc_id = 608, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_AXI_ERROR_RESPONSE" },
+ { .fc_id = 609, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_AXI_ERROR_RESPONSE" },
+ { .fc_id = 610, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_AXI_ERROR_RESPONSE" },
+ { .fc_id = 611, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_AXI_ERROR_RESPONSE" },
+ { .fc_id = 612, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 613, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 614, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 615, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 616, .cpu_id = 123, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ARC_AXI_ERROR_RESPONSE" },
+ { .fc_id = 617, .cpu_id = 124, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 618, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 619, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_FLR_REQUESTED" },
+ { .fc_id = 620, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 621, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 622, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_APB_TIMEOUT" },
+ { .fc_id = 623, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 624, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 625, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 626, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 627, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_FATAL_ERR" },
+ { .fc_id = 628, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 629, .cpu_id = 126, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 630, .cpu_id = 127, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 631, .cpu_id = 128, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_P2P_MSIX" },
+ { .fc_id = 632, .cpu_id = 129, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_DRAIN_COMPLETE" },
+ { .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC0_BMON_SPMU" },
+ { .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_KERNEL_ERR" },
+ { .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC1_BMON_SPMU" },
+ { .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_KERNEL_ERR" },
+ { .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC2_BMON_SPMU" },
+ { .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_KERNEL_ERR" },
+ { .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC3_BMON_SPMU" },
+ { .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_KERNEL_ERR" },
+ { .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC4_BMON_SPMU" },
+ { .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_KERNEL_ERR" },
+ { .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC5_BMON_SPMU" },
+ { .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_KERNEL_ERR" },
+ { .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC6_BMON_SPMU" },
+ { .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_KERNEL_ERR" },
+ { .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC7_BMON_SPMU" },
+ { .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_KERNEL_ERR" },
+ { .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC8_BMON_SPMU" },
+ { .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_KERNEL_ERR" },
+ { .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC9_BMON_SPMU" },
+ { .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_KERNEL_ERR" },
+ { .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC10_BMON_SPMU" },
+ { .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_KERNEL_ERR" },
+ { .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC11_BMON_SPMU" },
+ { .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_KERNEL_ERR" },
+ { .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC12_BMON_SPMU" },
+ { .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_KERNEL_ERR" },
+ { .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC13_BMON_SPMU" },
+ { .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_KERNEL_ERR" },
+ { .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC14_BMON_SPMU" },
+ { .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_KERNEL_ERR" },
+ { .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC15_BMON_SPMU" },
+ { .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_KERNEL_ERR" },
+ { .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC16_BMON_SPMU" },
+ { .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_KERNEL_ERR" },
+ { .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC17_BMON_SPMU" },
+ { .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_KERNEL_ERR" },
+ { .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC18_BMON_SPMU" },
+ { .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_KERNEL_ERR" },
+ { .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC19_BMON_SPMU" },
+ { .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_KERNEL_ERR" },
+ { .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC20_BMON_SPMU" },
+ { .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_KERNEL_ERR" },
+ { .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC21_BMON_SPMU" },
+ { .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_KERNEL_ERR" },
+ { .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC22_BMON_SPMU" },
+ { .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_KERNEL_ERR" },
+ { .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC23_BMON_SPMU" },
+ { .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_KERNEL_ERR" },
+ { .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC24_BMON_SPMU" },
+ { .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_KERNEL_ERR" },
+ { .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 685, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 686, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 687, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 688, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_CTRL_BMON_SPMU" },
+ { .fc_id = 689, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE_BMON_SPMU" },
+ { .fc_id = 690, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_WAP_BMON_SPMU" },
+ { .fc_id = 691, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 692, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 693, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 694, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 695, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 696, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 697, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_CTRL_BMON_SPMU" },
+ { .fc_id = 698, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE_BMON_SPMU" },
+ { .fc_id = 699, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_WAP_BMON_SPMU" },
+ { .fc_id = 700, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 701, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 702, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 703, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 704, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 705, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 706, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_CTRL_BMON_SPMU" },
+ { .fc_id = 707, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE_BMON_SPMU" },
+ { .fc_id = 708, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_WAP_BMON_SPMU" },
+ { .fc_id = 709, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 710, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 711, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 712, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 713, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 714, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 715, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_CTRL_BMON_SPMU" },
+ { .fc_id = 716, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE_BMON_SPMU" },
+ { .fc_id = 717, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_WAP_BMON_SPMU" },
+ { .fc_id = 718, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 719, .cpu_id = 184, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 720, .cpu_id = 184, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU0_PAGE_FAULT_OR_WR_PERM" },
+ { .fc_id = 721, .cpu_id = 184, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU0_SECURITY_ERROR" },
+ { .fc_id = 722, .cpu_id = 185, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 723, .cpu_id = 185, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU1_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 724, .cpu_id = 185, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU1_SECURITY_ERROR" },
+ { .fc_id = 725, .cpu_id = 186, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 726, .cpu_id = 186, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU2_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 727, .cpu_id = 186, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU2_SECURITY_ERROR" },
+ { .fc_id = 728, .cpu_id = 187, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 729, .cpu_id = 187, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU3_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 730, .cpu_id = 187, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU3_SECURITY_ERROR" },
+ { .fc_id = 731, .cpu_id = 188, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 732, .cpu_id = 188, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU8_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 733, .cpu_id = 188, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU8_SECURITY_ERROR" },
+ { .fc_id = 734, .cpu_id = 189, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 735, .cpu_id = 189, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU9_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 736, .cpu_id = 189, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU9_SECURITY_ERROR" },
+ { .fc_id = 737, .cpu_id = 190, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 738, .cpu_id = 190, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU10_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 739, .cpu_id = 190, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU10_SECURITY_ERROR" },
+ { .fc_id = 740, .cpu_id = 191, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 741, .cpu_id = 191, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU11_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 742, .cpu_id = 191, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU11_SECURITY_ERROR" },
+ { .fc_id = 743, .cpu_id = 192, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 744, .cpu_id = 192, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU7_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 745, .cpu_id = 192, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU7_SECURITY_ERROR" },
+ { .fc_id = 746, .cpu_id = 193, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 747, .cpu_id = 193, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU6_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 748, .cpu_id = 193, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU6_SECURITY_ERROR" },
+ { .fc_id = 749, .cpu_id = 194, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 750, .cpu_id = 194, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU5_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 751, .cpu_id = 194, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU5_SECURITY_ERROR" },
+ { .fc_id = 752, .cpu_id = 195, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 753, .cpu_id = 195, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU4_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 754, .cpu_id = 195, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU4_SECURITY_ERROR" },
+ { .fc_id = 755, .cpu_id = 196, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 756, .cpu_id = 196, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU15_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 757, .cpu_id = 196, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU15_SECURITY_ERROR" },
+ { .fc_id = 758, .cpu_id = 197, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 759, .cpu_id = 197, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU14_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 760, .cpu_id = 197, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU14_SECURITY_ERROR" },
+ { .fc_id = 761, .cpu_id = 198, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 762, .cpu_id = 198, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU13_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 763, .cpu_id = 198, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU13_SECURITY_ERROR" },
+ { .fc_id = 764, .cpu_id = 199, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 765, .cpu_id = 199, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU12_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 766, .cpu_id = 199, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU12_SECURITY_ERROR" },
+ { .fc_id = 767, .cpu_id = 200, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 768, .cpu_id = 201, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU0_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 769, .cpu_id = 202, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU0_SECURITY_ERROR" },
+ { .fc_id = 770, .cpu_id = 203, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA2_BM_SPMU" },
+ { .fc_id = 771, .cpu_id = 204, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 772, .cpu_id = 205, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA3_BM_SPMU" },
+ { .fc_id = 773, .cpu_id = 206, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 774, .cpu_id = 207, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA0_BM_SPMU" },
+ { .fc_id = 775, .cpu_id = 208, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 776, .cpu_id = 209, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA1_BM_SPMU" },
+ { .fc_id = 777, .cpu_id = 210, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 778, .cpu_id = 211, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA6_BM_SPMU" },
+ { .fc_id = 779, .cpu_id = 212, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 780, .cpu_id = 213, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA7_BM_SPMU" },
+ { .fc_id = 781, .cpu_id = 214, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 782, .cpu_id = 215, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA4_BM_SPMU" },
+ { .fc_id = 783, .cpu_id = 216, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 784, .cpu_id = 217, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA5_BM_SPMU" },
+ { .fc_id = 785, .cpu_id = 218, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 786, .cpu_id = 219, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "KDMA_BM_SPMU" },
+ { .fc_id = 787, .cpu_id = 220, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 788, .cpu_id = 221, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA0_BM_SPMU" },
+ { .fc_id = 789, .cpu_id = 222, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA1_BM_SPMU" },
+ { .fc_id = 790, .cpu_id = 223, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC0_SPI" },
+ { .fc_id = 791, .cpu_id = 224, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC1_SPI" },
+ { .fc_id = 792, .cpu_id = 225, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC0_SPI" },
+ { .fc_id = 793, .cpu_id = 226, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC1_SPI" },
+ { .fc_id = 794, .cpu_id = 227, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC0_SPI" },
+ { .fc_id = 795, .cpu_id = 228, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC1_SPI" },
+ { .fc_id = 796, .cpu_id = 229, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC0_SPI" },
+ { .fc_id = 797, .cpu_id = 230, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC1_SPI" },
+ { .fc_id = 798, .cpu_id = 231, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC0_SPI" },
+ { .fc_id = 799, .cpu_id = 232, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC1_SPI" },
+ { .fc_id = 800, .cpu_id = 233, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC0_SPI" },
+ { .fc_id = 801, .cpu_id = 234, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC1_SPI" },
+ { .fc_id = 802, .cpu_id = 235, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 803, .cpu_id = 236, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 804, .cpu_id = 237, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 805, .cpu_id = 238, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 806, .cpu_id = 239, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 807, .cpu_id = 240, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 808, .cpu_id = 241, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 809, .cpu_id = 242, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 810, .cpu_id = 243, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 811, .cpu_id = 244, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 812, .cpu_id = 245, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 813, .cpu_id = 246, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 814, .cpu_id = 247, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 815, .cpu_id = 248, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 816, .cpu_id = 249, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 817, .cpu_id = 250, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 818, .cpu_id = 251, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 819, .cpu_id = 252, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 820, .cpu_id = 253, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 821, .cpu_id = 254, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 822, .cpu_id = 255, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 823, .cpu_id = 256, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 824, .cpu_id = 257, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 825, .cpu_id = 258, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 826, .cpu_id = 259, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 827, .cpu_id = 260, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 828, .cpu_id = 261, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 829, .cpu_id = 262, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 830, .cpu_id = 263, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 831, .cpu_id = 264, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 832, .cpu_id = 265, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 833, .cpu_id = 266, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 834, .cpu_id = 267, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 835, .cpu_id = 268, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 836, .cpu_id = 269, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 837, .cpu_id = 270, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 838, .cpu_id = 271, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 839, .cpu_id = 272, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 840, .cpu_id = 273, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 841, .cpu_id = 274, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 842, .cpu_id = 275, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 843, .cpu_id = 276, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 844, .cpu_id = 277, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 845, .cpu_id = 278, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 846, .cpu_id = 279, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 847, .cpu_id = 280, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 848, .cpu_id = 281, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 849, .cpu_id = 282, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 850, .cpu_id = 283, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 851, .cpu_id = 284, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 852, .cpu_id = 285, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 853, .cpu_id = 286, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 854, .cpu_id = 287, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 855, .cpu_id = 288, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 856, .cpu_id = 289, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 857, .cpu_id = 290, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 858, .cpu_id = 291, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 859, .cpu_id = 292, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 860, .cpu_id = 293, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 861, .cpu_id = 294, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 862, .cpu_id = 295, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 863, .cpu_id = 296, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 864, .cpu_id = 297, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 865, .cpu_id = 298, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 866, .cpu_id = 299, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 867, .cpu_id = 300, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 868, .cpu_id = 301, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 869, .cpu_id = 302, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 870, .cpu_id = 303, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 871, .cpu_id = 304, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "RPM_ERROR_OR_DRAIN" },
+ { .fc_id = 872, .cpu_id = 305, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 873, .cpu_id = 306, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 874, .cpu_id = 307, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 875, .cpu_id = 308, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "RAZWI_OR_PID_MIN_MAX_INTERRUPT" },
+ { .fc_id = 876, .cpu_id = 309, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 877, .cpu_id = 310, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 878, .cpu_id = 311, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 879, .cpu_id = 312, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 880, .cpu_id = 313, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 881, .cpu_id = 314, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 882, .cpu_id = 315, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 883, .cpu_id = 316, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 884, .cpu_id = 317, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 885, .cpu_id = 318, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 886, .cpu_id = 319, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 887, .cpu_id = 320, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 888, .cpu_id = 321, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 889, .cpu_id = 322, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 890, .cpu_id = 323, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 891, .cpu_id = 324, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 892, .cpu_id = 325, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 893, .cpu_id = 326, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 894, .cpu_id = 327, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 895, .cpu_id = 328, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 896, .cpu_id = 329, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC0_SPI" },
+ { .fc_id = 897, .cpu_id = 329, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_BMON_SPMU" },
+ { .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_SPI" },
+ { .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_SPI" },
+ { .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC2_SPI" },
+ { .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_BMON_SPMU" },
+ { .fc_id = 902, .cpu_id = 332, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC3_SPI" },
+ { .fc_id = 903, .cpu_id = 332, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_BMON_SPMU" },
+ { .fc_id = 904, .cpu_id = 333, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC4_SPI" },
+ { .fc_id = 905, .cpu_id = 333, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_BMON_SPMU" },
+ { .fc_id = 906, .cpu_id = 334, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC5_SPI" },
+ { .fc_id = 907, .cpu_id = 334, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_BMON_SPMU" },
+ { .fc_id = 908, .cpu_id = 335, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC6_SPI" },
+ { .fc_id = 909, .cpu_id = 335, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_BMON_SPMU" },
+ { .fc_id = 910, .cpu_id = 336, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC7_SPI" },
+ { .fc_id = 911, .cpu_id = 336, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_BMON_SPMU" },
+ { .fc_id = 912, .cpu_id = 337, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC8_SPI" },
+ { .fc_id = 913, .cpu_id = 337, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_BMON_SPMU" },
+ { .fc_id = 914, .cpu_id = 338, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC9_SPI" },
+ { .fc_id = 915, .cpu_id = 338, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_BMON_SPMU" },
+ { .fc_id = 916, .cpu_id = 339, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 917, .cpu_id = 340, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 918, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 919, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 920, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 921, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 922, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 923, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 924, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 925, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 926, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 927, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 928, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 929, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 930, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 931, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 932, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 933, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 934, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 935, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 936, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 937, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 938, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 939, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 940, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 941, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 942, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 943, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 944, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 945, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 946, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 947, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 948, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 949, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 950, .cpu_id = 342, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 951, .cpu_id = 343, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_BMON_SPMU" },
+ { .fc_id = 952, .cpu_id = 343, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_SW_ERROR" },
+ { .fc_id = 953, .cpu_id = 343, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 954, .cpu_id = 343, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 955, .cpu_id = 344, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_BMON_SPMU" },
+ { .fc_id = 956, .cpu_id = 344, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_SW_ERROR" },
+ { .fc_id = 957, .cpu_id = 344, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 958, .cpu_id = 344, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 959, .cpu_id = 345, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_BMON_SPMU" },
+ { .fc_id = 960, .cpu_id = 345, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_SW_ERROR" },
+ { .fc_id = 961, .cpu_id = 345, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 962, .cpu_id = 345, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 963, .cpu_id = 346, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_BMON_SPMU" },
+ { .fc_id = 964, .cpu_id = 346, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_SW_ERROR" },
+ { .fc_id = 965, .cpu_id = 346, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 966, .cpu_id = 346, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 967, .cpu_id = 347, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_BMON_SPMU" },
+ { .fc_id = 968, .cpu_id = 347, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_SW_ERROR" },
+ { .fc_id = 969, .cpu_id = 347, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 970, .cpu_id = 347, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 971, .cpu_id = 348, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_BMON_SPMU" },
+ { .fc_id = 972, .cpu_id = 348, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_SW_ERROR" },
+ { .fc_id = 973, .cpu_id = 348, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 974, .cpu_id = 348, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 975, .cpu_id = 349, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_BMON_SPMU" },
+ { .fc_id = 976, .cpu_id = 349, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_SW_ERROR" },
+ { .fc_id = 977, .cpu_id = 349, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 978, .cpu_id = 349, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 979, .cpu_id = 350, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_BMON_SPMU" },
+ { .fc_id = 980, .cpu_id = 350, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_SW_ERROR" },
+ { .fc_id = 981, .cpu_id = 350, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 982, .cpu_id = 350, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 983, .cpu_id = 351, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_BMON_SPMU" },
+ { .fc_id = 984, .cpu_id = 351, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_SW_ERROR" },
+ { .fc_id = 985, .cpu_id = 351, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 986, .cpu_id = 351, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 987, .cpu_id = 352, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_BMON_SPMU" },
+ { .fc_id = 988, .cpu_id = 352, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_SW_ERROR" },
+ { .fc_id = 989, .cpu_id = 352, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 990, .cpu_id = 352, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 991, .cpu_id = 353, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_BMON_SPMU" },
+ { .fc_id = 992, .cpu_id = 353, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_SW_ERROR" },
+ { .fc_id = 993, .cpu_id = 353, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 994, .cpu_id = 353, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 995, .cpu_id = 354, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_BMON_SPMU" },
+ { .fc_id = 996, .cpu_id = 354, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_SW_ERROR" },
+ { .fc_id = 997, .cpu_id = 354, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 998, .cpu_id = 354, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 999, .cpu_id = 355, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1000, .cpu_id = 356, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1001, .cpu_id = 357, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1002, .cpu_id = 358, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1003, .cpu_id = 359, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1004, .cpu_id = 360, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1005, .cpu_id = 361, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1006, .cpu_id = 362, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1007, .cpu_id = 363, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1008, .cpu_id = 368, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1009, .cpu_id = 369, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1010, .cpu_id = 366, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1011, .cpu_id = 367, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1012, .cpu_id = 364, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1013, .cpu_id = 365, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1014, .cpu_id = 374, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1015, .cpu_id = 375, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1016, .cpu_id = 372, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1017, .cpu_id = 373, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1018, .cpu_id = 370, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1019, .cpu_id = 371, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1020, .cpu_id = 376, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1021, .cpu_id = 377, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1022, .cpu_id = 378, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1023, .cpu_id = 379, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1024, .cpu_id = 380, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1025, .cpu_id = 381, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1026, .cpu_id = 382, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1027, .cpu_id = 383, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1028, .cpu_id = 384, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1029, .cpu_id = 385, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1030, .cpu_id = 386, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1031, .cpu_id = 387, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1032, .cpu_id = 388, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1033, .cpu_id = 389, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1034, .cpu_id = 390, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1035, .cpu_id = 391, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1036, .cpu_id = 392, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1037, .cpu_id = 393, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1038, .cpu_id = 394, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1039, .cpu_id = 395, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1040, .cpu_id = 396, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1041, .cpu_id = 397, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1042, .cpu_id = 398, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1043, .cpu_id = 399, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1044, .cpu_id = 400, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1045, .cpu_id = 401, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1046, .cpu_id = 402, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1047, .cpu_id = 403, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1048, .cpu_id = 404, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1049, .cpu_id = 405, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1050, .cpu_id = 406, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1051, .cpu_id = 407, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1052, .cpu_id = 408, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1053, .cpu_id = 409, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1054, .cpu_id = 410, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1055, .cpu_id = 411, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1056, .cpu_id = 412, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1057, .cpu_id = 413, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1058, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1059, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1060, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1061, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1062, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1063, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1064, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1065, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1066, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1067, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1068, .cpu_id = 415, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1069, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1070, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1071, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1072, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1073, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1074, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1075, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1076, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1077, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1078, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1079, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1080, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1081, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1082, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1083, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1084, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1085, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1086, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1087, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1088, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1089, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1090, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1091, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1092, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1093, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1094, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1095, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1096, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1097, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1098, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1099, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1100, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1101, .cpu_id = 418, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1102, .cpu_id = 419, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1103, .cpu_id = 420, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1104, .cpu_id = 421, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1105, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1106, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1107, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1108, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1109, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1110, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1111, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1112, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1113, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1114, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1115, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1116, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1117, .cpu_id = 423, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1118, .cpu_id = 424, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR0_SERR" },
+ { .fc_id = 1119, .cpu_id = 425, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR1_SERR" },
+ { .fc_id = 1120, .cpu_id = 426, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ROTATOR0_DERR" },
+ { .fc_id = 1121, .cpu_id = 427, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ROTATOR1_DERR" },
+ { .fc_id = 1122, .cpu_id = 428, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROTATOR0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1123, .cpu_id = 429, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROTATOR1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1124, .cpu_id = 430, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1125, .cpu_id = 431, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1126, .cpu_id = 432, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR0_BMON_SPMU" },
+ { .fc_id = 1127, .cpu_id = 433, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1128, .cpu_id = 434, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR1_BMON_SPMU" },
+ { .fc_id = 1129, .cpu_id = 435, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1130, .cpu_id = 436, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM0_BMON_SPMU" },
+ { .fc_id = 1131, .cpu_id = 437, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM1_BMON_SPMU" },
+ { .fc_id = 1132, .cpu_id = 438, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM2_BMON_SPMU" },
+ { .fc_id = 1133, .cpu_id = 439, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM3_BMON_SPMU" },
+ { .fc_id = 1134, .cpu_id = 440, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1135, .cpu_id = 441, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1136, .cpu_id = 442, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1137, .cpu_id = 443, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1138, .cpu_id = 444, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1139, .cpu_id = 445, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1140, .cpu_id = 446, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1141, .cpu_id = 447, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1142, .cpu_id = 448, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1143, .cpu_id = 449, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1144, .cpu_id = 450, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1145, .cpu_id = 451, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1146, .cpu_id = 452, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1147, .cpu_id = 453, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1148, .cpu_id = 454, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1149, .cpu_id = 455, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1150, .cpu_id = 456, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1151, .cpu_id = 457, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1152, .cpu_id = 458, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1153, .cpu_id = 459, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1154, .cpu_id = 460, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1155, .cpu_id = 461, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1156, .cpu_id = 462, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1157, .cpu_id = 463, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1158, .cpu_id = 464, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1159, .cpu_id = 465, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1160, .cpu_id = 466, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1161, .cpu_id = 467, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1162, .cpu_id = 468, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1163, .cpu_id = 469, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1164, .cpu_id = 470, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1165, .cpu_id = 471, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1166, .cpu_id = 472, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1167, .cpu_id = 473, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1168, .cpu_id = 474, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1169, .cpu_id = 475, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1170, .cpu_id = 476, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1171, .cpu_id = 477, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1172, .cpu_id = 478, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1174, .cpu_id = 480, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1177, .cpu_id = 483, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1178, .cpu_id = 484, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1179, .cpu_id = 485, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1180, .cpu_id = 486, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1181, .cpu_id = 487, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1182, .cpu_id = 488, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1183, .cpu_id = 489, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1184, .cpu_id = 490, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1185, .cpu_id = 491, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1186, .cpu_id = 492, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1187, .cpu_id = 493, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1188, .cpu_id = 494, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1189, .cpu_id = 495, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1190, .cpu_id = 496, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1191, .cpu_id = 497, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1192, .cpu_id = 498, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1193, .cpu_id = 499, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1194, .cpu_id = 500, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1195, .cpu_id = 501, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1196, .cpu_id = 502, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1197, .cpu_id = 503, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1198, .cpu_id = 504, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1199, .cpu_id = 505, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1200, .cpu_id = 506, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1201, .cpu_id = 507, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1202, .cpu_id = 508, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1203, .cpu_id = 509, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1204, .cpu_id = 510, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_QM" },
+ { .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_QM" },
+ { .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_QM" },
+ { .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_QM" },
+ { .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_QM" },
+ { .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_QM" },
+ { .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_QM" },
+ { .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_QM" },
+ { .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_QM" },
+ { .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_QM" },
+ { .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_QM" },
+ { .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_QM" },
+ { .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_QM" },
+ { .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_QM" },
+ { .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_QM" },
+ { .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_QM" },
+ { .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_QM" },
+ { .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_QM" },
+ { .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_QM" },
+ { .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_QM" },
+ { .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_QM" },
+ { .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_QM" },
+ { .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_QM" },
+ { .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_QM" },
+ { .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_QM" },
+ { .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_QM" },
+ { .fc_id = 1233, .cpu_id = 539, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_QM" },
+ { .fc_id = 1234, .cpu_id = 540, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_QM" },
+ { .fc_id = 1235, .cpu_id = 541, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_QM" },
+ { .fc_id = 1236, .cpu_id = 542, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA2_QM" },
+ { .fc_id = 1237, .cpu_id = 543, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA3_QM" },
+ { .fc_id = 1238, .cpu_id = 544, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA0_QM" },
+ { .fc_id = 1239, .cpu_id = 545, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA1_QM" },
+ { .fc_id = 1240, .cpu_id = 546, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA6_QM" },
+ { .fc_id = 1241, .cpu_id = 547, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA7_QM" },
+ { .fc_id = 1242, .cpu_id = 548, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA4_QM" },
+ { .fc_id = 1243, .cpu_id = 549, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA5_QM" },
+ { .fc_id = 1244, .cpu_id = 550, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA0_QM" },
+ { .fc_id = 1245, .cpu_id = 551, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA1_QM" },
+ { .fc_id = 1246, .cpu_id = 552, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PI_UPDATE" },
+ { .fc_id = 1247, .cpu_id = 553, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HALT_MACHINE" },
+ { .fc_id = 1248, .cpu_id = 554, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "INTS_REGISTER" },
+ { .fc_id = 1249, .cpu_id = 555, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROT0_QM" },
+ { .fc_id = 1250, .cpu_id = 556, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROT1_QM" },
+ { .fc_id = 1251, .cpu_id = 557, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SOFT_RESET" },
+ { .fc_id = 1252, .cpu_id = 558, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPLD_SHUTDOWN_CAUSE" },
+ { .fc_id = 1253, .cpu_id = 559, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_POWER_ENV_S" },
+ { .fc_id = 1254, .cpu_id = 560, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_POWER_ENV_E" },
+ { .fc_id = 1255, .cpu_id = 561, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_THERMAL_ENV_S" },
+ { .fc_id = 1256, .cpu_id = 562, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_THERMAL_ENV_E" },
+ { .fc_id = 1257, .cpu_id = 563, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPLD_SHUTDOWN_EVENT" },
+ { .fc_id = 1258, .cpu_id = 564, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PKT_QUEUE_OUT_SYNC" },
+ { .fc_id = 1259, .cpu_id = 565, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA2_CORE" },
+ { .fc_id = 1260, .cpu_id = 566, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA3_CORE" },
+ { .fc_id = 1261, .cpu_id = 567, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA0_CORE" },
+ { .fc_id = 1262, .cpu_id = 568, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA1_CORE" },
+ { .fc_id = 1263, .cpu_id = 569, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA6_CORE" },
+ { .fc_id = 1264, .cpu_id = 570, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA7_CORE" },
+ { .fc_id = 1265, .cpu_id = 571, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA4_CORE" },
+ { .fc_id = 1266, .cpu_id = 572, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA5_CORE" },
+ { .fc_id = 1267, .cpu_id = 573, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA0_CORE" },
+ { .fc_id = 1268, .cpu_id = 574, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA1_CORE" },
+ { .fc_id = 1269, .cpu_id = 575, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA0_CORE" },
+ { .fc_id = 1270, .cpu_id = 576, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_QM0" },
+ { .fc_id = 1271, .cpu_id = 577, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_QM1" },
+ { .fc_id = 1272, .cpu_id = 578, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_QM0" },
+ { .fc_id = 1273, .cpu_id = 579, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_QM1" },
+ { .fc_id = 1274, .cpu_id = 580, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_QM0" },
+ { .fc_id = 1275, .cpu_id = 581, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_QM1" },
+ { .fc_id = 1276, .cpu_id = 582, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_QM0" },
+ { .fc_id = 1277, .cpu_id = 583, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_QM1" },
+ { .fc_id = 1278, .cpu_id = 584, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_QM0" },
+ { .fc_id = 1279, .cpu_id = 585, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_QM1" },
+ { .fc_id = 1280, .cpu_id = 586, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_QM0" },
+ { .fc_id = 1281, .cpu_id = 587, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_QM1" },
+ { .fc_id = 1282, .cpu_id = 588, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_QM0" },
+ { .fc_id = 1283, .cpu_id = 589, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_QM1" },
+ { .fc_id = 1284, .cpu_id = 590, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_QM0" },
+ { .fc_id = 1285, .cpu_id = 591, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_QM1" },
+ { .fc_id = 1286, .cpu_id = 592, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_QM0" },
+ { .fc_id = 1287, .cpu_id = 593, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_QM1" },
+ { .fc_id = 1288, .cpu_id = 594, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_QM0" },
+ { .fc_id = 1289, .cpu_id = 595, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_QM1" },
+ { .fc_id = 1290, .cpu_id = 596, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_QM0" },
+ { .fc_id = 1291, .cpu_id = 597, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_QM1" },
+ { .fc_id = 1292, .cpu_id = 598, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_QM0" },
+ { .fc_id = 1293, .cpu_id = 599, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_QM1" },
+ { .fc_id = 1294, .cpu_id = 600, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_PKT_SANITY_FAILED" },
+ { .fc_id = 1295, .cpu_id = 601, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC0_ENG0" },
+ { .fc_id = 1296, .cpu_id = 602, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC0_ENG1" },
+ { .fc_id = 1297, .cpu_id = 603, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC1_ENG0" },
+ { .fc_id = 1298, .cpu_id = 604, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC1_ENG1" },
+ { .fc_id = 1299, .cpu_id = 605, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC2_ENG0" },
+ { .fc_id = 1300, .cpu_id = 606, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC2_ENG1" },
+ { .fc_id = 1301, .cpu_id = 607, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC3_ENG0" },
+ { .fc_id = 1302, .cpu_id = 608, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC3_ENG1" },
+ { .fc_id = 1303, .cpu_id = 609, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC4_ENG0" },
+ { .fc_id = 1304, .cpu_id = 610, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC4_ENG1" },
+ { .fc_id = 1305, .cpu_id = 611, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC5_ENG0" },
+ { .fc_id = 1306, .cpu_id = 612, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC5_ENG1" },
+ { .fc_id = 1307, .cpu_id = 613, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC6_ENG0" },
+ { .fc_id = 1308, .cpu_id = 614, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC6_ENG1" },
+ { .fc_id = 1309, .cpu_id = 615, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC7_ENG0" },
+ { .fc_id = 1310, .cpu_id = 616, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC7_ENG1" },
+ { .fc_id = 1311, .cpu_id = 617, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC8_ENG0" },
+ { .fc_id = 1312, .cpu_id = 618, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC8_ENG1" },
+ { .fc_id = 1313, .cpu_id = 619, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC9_ENG0" },
+ { .fc_id = 1314, .cpu_id = 620, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC9_ENG1" },
+ { .fc_id = 1315, .cpu_id = 621, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC10_ENG0" },
+ { .fc_id = 1316, .cpu_id = 622, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC10_ENG1" },
+ { .fc_id = 1317, .cpu_id = 623, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC11_ENG0" },
+ { .fc_id = 1318, .cpu_id = 624, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC11_ENG1" },
+ { .fc_id = 1319, .cpu_id = 625, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ARC_DCCM_FULL" },
+ { .fc_id = 1320, .cpu_id = 626, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "FP32_NOT_SUPPORTED" },
+ { .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEV_RESET_REQ" },
+};
+
+#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h
index 14f09d7758c7..14f09d7758c7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
index e4a7d5725096..8522f24deac0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
@@ -20,22 +20,25 @@
#define GAUDI2_NUM_MME 4
+#define NUM_OF_GPIOS_PER_PORT 16
+#define GAUDI2_WD_GPIO (62 % NUM_OF_GPIOS_PER_PORT)
+
#define GAUDI2_ARCPID_TX_MB_SIZE 0x1000
#define GAUDI2_ARCPID_RX_MB_SIZE 0x400
#define GAUDI2_ARM_TX_MB_SIZE 0x400
#define GAUDI2_ARM_RX_MB_SIZE 0x1800
#define GAUDI2_DCCM_BASE_ADDR 0x27020000
-#define GAUDI2_ARCPID_TX_MB_ADDR GAUDI2_DCCM_BASE_ADDR
-
-#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + \
- GAUDI2_ARCPID_TX_MB_SIZE)
#define GAUDI2_ARM_TX_MB_ADDR GAUDI2_MAILBOX_BASE_ADDR
#define GAUDI2_ARM_RX_MB_ADDR (GAUDI2_ARM_TX_MB_ADDR + \
GAUDI2_ARM_TX_MB_SIZE)
+#define GAUDI2_ARCPID_TX_MB_ADDR (GAUDI2_ARM_RX_MB_ADDR + GAUDI2_ARM_RX_MB_SIZE)
+
+#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + GAUDI2_ARCPID_TX_MB_SIZE)
+
#define GAUDI2_ARM_TX_MB_OFFSET (GAUDI2_ARM_TX_MB_ADDR - \
GAUDI2_SP_SRAM_BASE_ADDR)
@@ -58,7 +61,12 @@ struct gaudi2_cold_rst_data {
u32 spsram_init_done : 1;
u32 fake_security_enable : 1;
u32 fake_sig_validation_en : 1;
- u32 reserved : 26;
+ u32 bist_skip_enable : 1;
+ u32 bist_need_iatu_config : 1;
+ u32 fake_bis_compliant : 1;
+ u32 wd_rst_cause_arm : 1;
+ u32 wd_rst_cause_arcpid : 1;
+ u32 reserved : 21;
};
__le32 data;
};
@@ -77,10 +85,10 @@ enum gaudi2_rst_src {
};
struct gaudi2_redundancy_ctx {
- int redundant_hbm;
- int redundant_edma;
- int redundant_tpc;
- int redundant_vdec;
+ __le32 redundant_hbm;
+ __le32 redundant_edma;
+ __le32 redundant_tpc;
+ __le32 redundant_vdec;
__le64 hbm_mask;
__le64 edma_mask;
__le64 tpc_mask;
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h
index 8bf90fc18bf5..a812f8503f90 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h
@@ -59,7 +59,7 @@ struct gaudi2_packet {
/* The rest of the packet data follows. Use the corresponding
* packet_XXX struct to deference the data, based on packet type
*/
- u8 contents[0];
+ u8 contents[];
};
struct packet_nop {
@@ -80,7 +80,7 @@ struct packet_wreg32 {
struct packet_wreg_bulk {
__le32 size64;
__le32 ctl;
- __le64 values[0]; /* data starts here */
+ __le64 values[]; /* data starts here */
};
struct packet_msg_long {
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
index ae7feb388f63..f3eaeb6d9b7e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
@@ -24,14 +24,14 @@
#define mmGIC_HOST_HALT_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
#define mmGIC_HOST_INTS_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_11
#define mmGIC_HOST_SOFT_RST_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_12
-#define mmEEPROM_COPY_LOCATION_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_13
#define mmCPU_RST_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_14
-#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15
-#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18
/*
- * TODO: mmGIC_RAZWI_STATUS_REG is temporary
- * macro and to be removed after GAUDI2 PO
+ * Single scratchpad register used for all ARCs to notify dccm queue full event to FW.
+ * So a new event would overwrite any unhandled previous event. In other words, incase
+ * of multiple events before previous ones are handled, last one would be considered.
*/
+#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15
+#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18
#define mmGIC_RAZWI_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_19
#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
@@ -40,11 +40,10 @@
#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmPPBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
#define mmRST_SRC mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0
-#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
#define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2
#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
#define mmPID_CMD_REQ_REG mmPSOC_PID_PID_CMD_0
@@ -55,5 +54,8 @@
#define mmPID_CMD_TELEMETRY_REG_0_HI mmPSOC_PID_PID_CMD_5
#define mmPID_CMD_TELEMETRY_REG_1 mmPSOC_PID_PID_CMD_6
#define mmPID_CMD_TELEMETRY_REG_1_HI mmPSOC_PID_PID_CMD_7
+#define mmWD_GPIO_OUTSET_REG mmPSOC_GPIO3_OUTENSET
+#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT
+#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER
#endif /* GAUDI2_REG_MAP_H_ */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h
new file mode 100644
index 000000000000..a55668f92dd1
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+/*
+ * This file was generated automatically.
+ * DON'T EDIT THIS FILE.
+ */
+
+#ifndef GAUDI2_SPECIAL_BLOCKS_H
+#define GAUDI2_SPECIAL_BLOCKS_H
+
+#define GAUDI2_SPECIAL_BLOCKS { \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc008000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00a000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00b000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00c000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc080000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc081000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc083000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc084000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0c8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0c9000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0ca000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cb000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cc000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EU_BIST, 0xfc0cd000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0ce000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cf000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0d0000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0d1000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0f8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0f9000, 4, 2, 0, 0x200000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11e000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11f000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HIF, 0xfc120000, 4, 4, 0, 0x200000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc140000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc141000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc142000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc143000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc144000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc145000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SRAM, 0xfc180000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc181000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SRAM, 0xfc182000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1c8000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1ca000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cb000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cc000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e3000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e4000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e5000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc07000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc10000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc14000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc15000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc16000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4a000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc54000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc58000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc59000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5a000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5b000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc60000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc61000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc62000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc63000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc64000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6d000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc76000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc79000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7b000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc88000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc0000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc1000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc3000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd00000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd02000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd43000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd44000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd55000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd64000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd65000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce08000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce40000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce41000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce42000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce43000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce49000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4a000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4b000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4c000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce81000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce82000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce88000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce89000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8f000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XFT, 0xfcf40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf43000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf73000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd000000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd001000, 6, 2, 8, 0x80000, 0x20000, 0x1000 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd009000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd400000, 12, 2, 15, 0x80000, 0x20000, 0x1000 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd418000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd41a000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd41f000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd448000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd449000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd44a000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd44c000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd450000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd452000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd454000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd455000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd460000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd468000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd469000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+}
+
+#endif
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
index 4e0dbbbbde20..4e0dbbbbde20 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
index f3faf1aad91a..f3faf1aad91a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h
index cf657918962a..cf657918962a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
index 8c8f9726d4b9..8c8f9726d4b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
index 028143408401..028143408401 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
index 0b246fe6ad04..0b246fe6ad04 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
index 5449031722f2..5449031722f2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
index a4768521d18a..a4768521d18a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
index 619d01897ff8..619d01897ff8 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
index 038617e163f1..038617e163f1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h
index f43b564af1be..f43b564af1be 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h
index c3bfc1b8e3fd..c3bfc1b8e3fd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
index bc977488c072..bc977488c072 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
index c4abc7ff1fc6..c4abc7ff1fc6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
index b17f72c31ab6..b17f72c31ab6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
index bf360b301154..bf360b301154 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
index 51d432d05ac4..51d432d05ac4 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
index 18fc0c2b6cc2..18fc0c2b6cc2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
index 6cf7204bf5cc..6cf7204bf5cc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
index 36fef2682875..36fef2682875 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h
index 85b15010cd7a..85b15010cd7a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h
index 9ff3cb245580..9ff3cb245580 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h
index ce65c9da5c60..ce65c9da5c60 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h
index 4ae7fed8b18c..4ae7fed8b18c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h
index 6d35d852798b..6d35d852798b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
index 6c23f8b96e7e..6c23f8b96e7e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
index 122e9d529939..122e9d529939 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
index 00ce2252bbfb..00ce2252bbfb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
index 8e3eb7fd2070..8e3eb7fd2070 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
index 79b67bbc8567..79b67bbc8567 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
index 0ac3c37ce47f..0ac3c37ce47f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
index 50c49cce72a6..50c49cce72a6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
index fe7d95bdcef9..fe7d95bdcef9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
index 5f8b85d2b4b1..5f8b85d2b4b1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h
index 1882c413cbe0..1882c413cbe0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h
index e464e381555c..e464e381555c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h
index 538708beffc9..538708beffc9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h
index 0396cbfd5c89..0396cbfd5c89 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h
index c3e69062b135..c3e69062b135 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h
index 7ec81f12031e..7ec81f12031e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
index ceb59f2e28b3..ceb59f2e28b3 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
index dd067f301ac2..dd067f301ac2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
index 35b1d8ac6f63..35b1d8ac6f63 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
index d1e55aace4a0..d1e55aace4a0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
index 9271ea95ebe9..9271ea95ebe9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
index b7c33e025db5..b7c33e025db5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
index 324266653c9a..324266653c9a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
index 8141f422e712..8141f422e712 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
index 4789ebb9c337..4789ebb9c337 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
index 27a296ea6c3d..27a296ea6c3d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
index 66aee7fa6b1e..66aee7fa6b1e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
index 9ce24597d4b0..9ce24597d4b0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
index 2ea1770b078f..2ea1770b078f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
index 37e0713efa73..37e0713efa73 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
index d2572279a2b9..d2572279a2b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
index 68c5b402c506..68c5b402c506 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
index a42f1ba06d28..a42f1ba06d28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h
index 94f2ed4a36bd..94f2ed4a36bd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h
index 35013f65acd2..35013f65acd2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
index 89c9507a512f..89c9507a512f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
index 7d71c4b73a5e..7d71c4b73a5e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
index 9395f2458771..9395f2458771 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
index bc51df573bf0..bc51df573bf0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
index 553c6b6bd5ec..553c6b6bd5ec 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
index 8495479c3659..8495479c3659 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
index 43fafcf01041..43fafcf01041 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
index ce3346dd2042..ce3346dd2042 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
index 2e4b45947944..2e4b45947944 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
index 4fa09eb88878..4fa09eb88878 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
index 928eef1808ae..928eef1808ae 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
index 30ae0f307328..30ae0f307328 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
index b95de4f95ba9..b95de4f95ba9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
index 0f91e307879e..0f91e307879e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
index 73421227f35b..73421227f35b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
index 27b66bf2da9f..27b66bf2da9f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
index 31e5b2f53905..31e5b2f53905 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
index 4eddeaa15d94..4eddeaa15d94 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
index ce573a1a8361..ce573a1a8361 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
index 11d81fca0a0f..11d81fca0a0f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
index e41595a19e69..e41595a19e69 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
index 34a438b1efe5..34a438b1efe5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
index d44caf0fc1bb..d44caf0fc1bb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
index f13a6532961f..f13a6532961f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
index db081fc17cfc..db081fc17cfc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
index 8c5372303b28..8c5372303b28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
index 5139fde71011..5139fde71011 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
index 1e7cd6e1e888..1e7cd6e1e888 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
index ac0d3820cd6b..ac0d3820cd6b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
index 57f83bc3b17d..57f83bc3b17d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
index 94e0191c06c1..94e0191c06c1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
index 7a1a0e87b225..7a1a0e87b225 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
index 80fa0fe0f60f..80fa0fe0f60f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
index d6cae8b8af66..d6cae8b8af66 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
index 234147adb779..234147adb779 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
index 4c160632fe7d..4c160632fe7d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
index 0c13d4d167aa..0c13d4d167aa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
index cbe11425bfb0..cbe11425bfb0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
index e25e19660a9d..e25e19660a9d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/accel/habanalabs/include/goya/goya.h
index 1b4ca435021d..1b4ca435021d 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/accel/habanalabs/include/goya/goya.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/accel/habanalabs/include/goya/goya_async_events.h
index 09081401cb1d..09081401cb1d 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/accel/habanalabs/include/goya/goya_async_events.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/accel/habanalabs/include/goya/goya_coresight.h
index 6e933c0ca5cd..6e933c0ca5cd 100644
--- a/drivers/misc/habanalabs/include/goya/goya_coresight.h
+++ b/drivers/accel/habanalabs/include/goya/goya_coresight.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/accel/habanalabs/include/goya/goya_fw_if.h
index bc05f86c73ac..bc05f86c73ac 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/accel/habanalabs/include/goya/goya_fw_if.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/accel/habanalabs/include/goya/goya_packets.h
index 896799204fb0..896799204fb0 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/accel/habanalabs/include/goya/goya_packets.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/accel/habanalabs/include/goya/goya_reg_map.h
index f3ab282cafa4..f3ab282cafa4 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/accel/habanalabs/include/goya/goya_reg_map.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
index d408feecd483..d408feecd483 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
index 86511002e367..86511002e367 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
index 9c727a5d47b4..9c727a5d47b4 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
index cd7bf25d2da9..cd7bf25d2da9 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
index f5d497dc9bdc..f5d497dc9bdc 100644
--- a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
new file mode 100644
index 000000000000..9bdf168bf1d0
--- /dev/null
+++ b/drivers/accel/ivpu/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_IVPU
+ tristate "Intel VPU for Meteor Lake and newer"
+ depends on DRM_ACCEL
+ depends on X86_64 && !UML
+ depends on PCI && PCI_MSI
+ select FW_LOADER
+ select SHMEM
+ help
+ Choose this option if you have a system that has an 14th generation Intel CPU
+ or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
+ inference accelerator for Computer Vision and Deep Learning applications.
+
+ If "M" is selected, the module will be called intel_vpu.
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
new file mode 100644
index 000000000000..80f1fb3548ae
--- /dev/null
+++ b/drivers/accel/ivpu/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023 Intel Corporation
+
+intel_vpu-y := \
+ ivpu_drv.o \
+ ivpu_fw.o \
+ ivpu_gem.o \
+ ivpu_hw_mtl.o \
+ ivpu_ipc.o \
+ ivpu_job.o \
+ ivpu_jsm_msg.o \
+ ivpu_mmu.o \
+ ivpu_mmu_context.o \
+ ivpu_pm.o
+
+obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o \ No newline at end of file
diff --git a/drivers/accel/ivpu/TODO b/drivers/accel/ivpu/TODO
new file mode 100644
index 000000000000..9077217ae10f
--- /dev/null
+++ b/drivers/accel/ivpu/TODO
@@ -0,0 +1,11 @@
+- Move to threaded_irqs to mitigate potential infinite loop in ivpu_ipc_irq_handler()
+- Implement support for BLOB IDs
+- Add debugfs support to improve debugging and testing
+- Add tracing events for performance debugging
+- Implement HW based scheduling support
+- Use syncobjs for submit/sync
+- Refactor IPC protocol to improve message latency
+- Implement BO cache and MADVISE IOCTL
+- Add support for user allocated buffers using prime import and dma-buf heaps
+- Refactor struct ivpu_bo to use struct drm_gem_shmem_object
+- Add driver/device documentation
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
new file mode 100644
index 000000000000..8396db2b5203
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm_accel.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+#include "ivpu_pm.h"
+
+#ifndef DRIVER_VERSION_STR
+#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
+ __stringify(DRM_IVPU_DRIVER_MINOR) "."
+#endif
+
+static const struct drm_driver driver;
+
+static struct lock_class_key submitted_jobs_xa_lock_class_key;
+
+int ivpu_dbg_mask;
+module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
+MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
+
+int ivpu_test_mode;
+module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
+MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
+
+u8 ivpu_pll_min_ratio;
+module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
+MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
+
+u8 ivpu_pll_max_ratio = U8_MAX;
+module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
+MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
+
+struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ kref_get(&file_priv->ref);
+
+ ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ return file_priv;
+}
+
+struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+{
+ struct ivpu_file_priv *file_priv;
+
+ xa_lock_irq(&vdev->context_xa);
+ file_priv = xa_load(&vdev->context_xa, id);
+ /* file_priv may still be in context_xa during file_priv_release() */
+ if (file_priv && !kref_get_unless_zero(&file_priv->ref))
+ file_priv = NULL;
+ xa_unlock_irq(&vdev->context_xa);
+
+ if (file_priv)
+ ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ return file_priv;
+}
+
+static void file_priv_release(struct kref *ref)
+{
+ struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+
+ ivpu_cmdq_release_all(file_priv);
+ ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+}
+
+void ivpu_file_priv_put(struct ivpu_file_priv **link)
+{
+ struct ivpu_file_priv *file_priv = *link;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ drm_WARN_ON(&vdev->drm, !file_priv);
+
+ ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ *link = NULL;
+ kref_put(&file_priv->ref, file_priv_release);
+}
+
+static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ struct drm_ivpu_param *args = data;
+ int ret = 0;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_IVPU_PARAM_DEVICE_ID:
+ args->value = pdev->device;
+ break;
+ case DRM_IVPU_PARAM_DEVICE_REVISION:
+ args->value = pdev->revision;
+ break;
+ case DRM_IVPU_PARAM_PLATFORM_TYPE:
+ args->value = vdev->platform;
+ break;
+ case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+ args->value = ivpu_hw_reg_pll_freq_get(vdev);
+ break;
+ case DRM_IVPU_PARAM_NUM_CONTEXTS:
+ args->value = ivpu_get_context_count(vdev);
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
+ args->value = vdev->hw->ranges.user_low.start;
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
+ args->value = file_priv->priority;
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_ID:
+ args->value = file_priv->ctx.id;
+ break;
+ case DRM_IVPU_PARAM_FW_API_VERSION:
+ if (args->index < VPU_FW_API_VER_NUM) {
+ struct vpu_firmware_header *fw_hdr;
+
+ fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
+ args->value = fw_hdr->api_version[args->index];
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
+ ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
+ break;
+ case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
+ args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
+ break;
+ case DRM_IVPU_PARAM_TILE_CONFIG:
+ args->value = vdev->hw->tile_fuse;
+ break;
+ case DRM_IVPU_PARAM_SKU:
+ args->value = vdev->hw->sku;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_param *args = data;
+ int ret = 0;
+
+ switch (args->param) {
+ case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
+ if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
+ file_priv->priority = args->value;
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ivpu_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct ivpu_file_priv *file_priv;
+ u32 ctx_id;
+ void *old;
+ int ret;
+
+ ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+ return ret;
+ }
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv) {
+ ret = -ENOMEM;
+ goto err_xa_erase;
+ }
+
+ file_priv->vdev = vdev;
+ file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+ kref_init(&file_priv->ref);
+ mutex_init(&file_priv->lock);
+
+ ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
+ if (ret)
+ goto err_mutex_destroy;
+
+ old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
+ goto err_ctx_fini;
+ }
+
+ ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
+ ctx_id, current->comm, task_pid_nr(current));
+
+ file->driver_priv = file_priv;
+ return 0;
+
+err_ctx_fini:
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+err_mutex_destroy:
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+err_xa_erase:
+ xa_erase_irq(&vdev->context_xa, ctx_id);
+ return ret;
+}
+
+static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+
+ ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
+ file_priv->ctx.id, current->comm, task_pid_nr(current));
+
+ ivpu_file_priv_put(&file_priv);
+}
+
+static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
+};
+
+static int ivpu_wait_for_ready(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_consumer cons;
+ struct ivpu_ipc_hdr ipc_hdr;
+ unsigned long timeout;
+ int ret;
+
+ if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
+ return 0;
+
+ ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
+
+ timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
+ while (1) {
+ ret = ivpu_ipc_irq_handler(vdev);
+ if (ret)
+ break;
+ ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
+ if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
+ break;
+
+ cond_resched();
+ }
+
+ ivpu_ipc_consumer_del(vdev, &cons);
+
+ if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
+ ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
+ ipc_hdr.data_addr);
+ return -EIO;
+ }
+
+ if (!ret)
+ ivpu_info(vdev, "VPU ready message received successfully\n");
+ else
+ ivpu_hw_diagnose_failure(vdev);
+
+ return ret;
+}
+
+/**
+ * ivpu_boot() - Start VPU firmware
+ * @vdev: VPU device
+ *
+ * This function is paired with ivpu_shutdown() but it doesn't power up the
+ * VPU because power up has to be called very early in ivpu_probe().
+ */
+int ivpu_boot(struct ivpu_device *vdev)
+{
+ int ret;
+
+ /* Update boot params located at first 4KB of FW memory */
+ ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
+
+ ret = ivpu_hw_boot_fw(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_wait_for_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_hw_irq_clear(vdev);
+ enable_irq(vdev->irq);
+ ivpu_hw_irq_enable(vdev);
+ ivpu_ipc_enable(vdev);
+ return 0;
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
+ ivpu_ipc_disable(vdev);
+ ivpu_mmu_disable(vdev);
+
+ ret = ivpu_hw_power_down(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+
+ return ret;
+}
+
+static const struct file_operations ivpu_fops = {
+ .owner = THIS_MODULE,
+ DRM_ACCEL_FOPS,
+};
+
+static const struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
+
+ .open = ivpu_open,
+ .postclose = ivpu_postclose,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = ivpu_gem_prime_import,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+
+ .ioctls = ivpu_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
+ .fops = &ivpu_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRM_IVPU_DRIVER_MAJOR,
+ .minor = DRM_IVPU_DRIVER_MINOR,
+};
+
+static int ivpu_irq_init(struct ivpu_device *vdev)
+{
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
+ return ret;
+ }
+
+ vdev->irq = pci_irq_vector(pdev, 0);
+
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pci_init(struct ivpu_device *vdev)
+{
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ struct resource *bar0 = &pdev->resource[0];
+ struct resource *bar4 = &pdev->resource[4];
+ int ret;
+
+ ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
+ vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
+ if (IS_ERR(vdev->regv)) {
+ ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
+ return PTR_ERR(vdev->regv);
+ }
+
+ ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
+ vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
+ if (IS_ERR(vdev->regb)) {
+ ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
+ return PTR_ERR(vdev->regb);
+ }
+
+ ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38));
+ if (ret) {
+ ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+ dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
+
+ /* Clear any pending errors */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
+
+ /* VPU MTL does not require PCI spec 10m D3hot delay */
+ if (ivpu_is_mtl(vdev))
+ pdev->d3hot_delay = 0;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int ivpu_dev_init(struct ivpu_device *vdev)
+{
+ int ret;
+
+ vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
+ if (!vdev->hw)
+ return -ENOMEM;
+
+ vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
+ if (!vdev->mmu)
+ return -ENOMEM;
+
+ vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
+ if (!vdev->fw)
+ return -ENOMEM;
+
+ vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
+ if (!vdev->ipc)
+ return -ENOMEM;
+
+ vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
+ if (!vdev->pm)
+ return -ENOMEM;
+
+ vdev->hw->ops = &ivpu_hw_mtl_ops;
+ vdev->platform = IVPU_PLATFORM_INVALID;
+ vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
+ vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
+ atomic64_set(&vdev->unique_id_counter, 0);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+
+ ret = ivpu_pci_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ ret = ivpu_irq_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ /* Init basic HW info based on buttress registers which are accessible before power up */
+ ret = ivpu_hw_info_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ /* Power up early so the rest of init code can access VPU registers */
+ ret = ivpu_hw_power_up(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ ret = ivpu_mmu_global_context_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ivpu_mmu_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret);
+ goto err_mmu_gctx_fini;
+ }
+
+ ret = ivpu_fw_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret);
+ goto err_mmu_gctx_fini;
+ }
+
+ ret = ivpu_ipc_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret);
+ goto err_fw_fini;
+ }
+
+ ret = ivpu_pm_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize PM: %d\n", ret);
+ goto err_ipc_fini;
+ }
+
+ ret = ivpu_job_done_thread_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret);
+ goto err_ipc_fini;
+ }
+
+ ret = ivpu_fw_load(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to load firmware: %d\n", ret);
+ goto err_job_done_thread_fini;
+ }
+
+ ret = ivpu_boot(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to boot: %d\n", ret);
+ goto err_job_done_thread_fini;
+ }
+
+ ivpu_pm_enable(vdev);
+
+ return 0;
+
+err_job_done_thread_fini:
+ ivpu_job_done_thread_fini(vdev);
+err_ipc_fini:
+ ivpu_ipc_fini(vdev);
+err_fw_fini:
+ ivpu_fw_fini(vdev);
+err_mmu_gctx_fini:
+ ivpu_mmu_global_context_fini(vdev);
+err_power_down:
+ ivpu_hw_power_down(vdev);
+ if (IVPU_WA(d3hot_after_power_off))
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_xa_destroy:
+ xa_destroy(&vdev->submitted_jobs_xa);
+ xa_destroy(&vdev->context_xa);
+ return ret;
+}
+
+static void ivpu_dev_fini(struct ivpu_device *vdev)
+{
+ ivpu_pm_disable(vdev);
+ ivpu_shutdown(vdev);
+ if (IVPU_WA(d3hot_after_power_off))
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+ ivpu_job_done_thread_fini(vdev);
+ ivpu_pm_cancel_recovery(vdev);
+
+ ivpu_ipc_fini(vdev);
+ ivpu_fw_fini(vdev);
+ ivpu_mmu_global_context_fini(vdev);
+
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+ xa_destroy(&vdev->submitted_jobs_xa);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
+ xa_destroy(&vdev->context_xa);
+}
+
+static struct pci_device_id ivpu_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
+
+static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ivpu_device *vdev;
+ int ret;
+
+ vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ pci_set_drvdata(pdev, vdev);
+
+ ret = ivpu_dev_init(vdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_dev_register(&vdev->drm, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
+ ivpu_dev_fini(vdev);
+ }
+
+ return ret;
+}
+
+static void ivpu_remove(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+
+ drm_dev_unplug(&vdev->drm);
+ ivpu_dev_fini(vdev);
+}
+
+static const struct dev_pm_ops ivpu_drv_pci_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
+ SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
+};
+
+static const struct pci_error_handlers ivpu_drv_pci_err = {
+ .reset_prepare = ivpu_pm_reset_prepare_cb,
+ .reset_done = ivpu_pm_reset_done_cb,
+};
+
+static struct pci_driver ivpu_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ivpu_pci_ids,
+ .probe = ivpu_probe,
+ .remove = ivpu_remove,
+ .driver = {
+ .pm = &ivpu_drv_pci_pm,
+ },
+ .err_handler = &ivpu_drv_pci_err,
+};
+
+module_pci_driver(ivpu_pci_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(DRIVER_VERSION_STR);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
new file mode 100644
index 000000000000..d3013fbd13b3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_DRV_H__
+#define __IVPU_DRV_H__
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
+
+#include <linux/pci.h>
+#include <linux/xarray.h>
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_mmu_context.h"
+
+#define DRIVER_NAME "intel_vpu"
+#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)"
+#define DRIVER_DATE "20230117"
+
+#define PCI_DEVICE_ID_MTL 0x7d1d
+
+#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
+/* SSID 1 is used by the VPU to represent invalid context */
+#define IVPU_USER_CONTEXT_MIN_SSID 2
+#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
+
+#define IVPU_NUM_ENGINES 2
+
+#define IVPU_PLATFORM_SILICON 0
+#define IVPU_PLATFORM_SIMICS 2
+#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_INVALID 8
+
+#define IVPU_DBG_REG BIT(0)
+#define IVPU_DBG_IRQ BIT(1)
+#define IVPU_DBG_MMU BIT(2)
+#define IVPU_DBG_FILE BIT(3)
+#define IVPU_DBG_MISC BIT(4)
+#define IVPU_DBG_FW_BOOT BIT(5)
+#define IVPU_DBG_PM BIT(6)
+#define IVPU_DBG_IPC BIT(7)
+#define IVPU_DBG_BO BIT(8)
+#define IVPU_DBG_JOB BIT(9)
+#define IVPU_DBG_JSM BIT(10)
+#define IVPU_DBG_KREF BIT(11)
+#define IVPU_DBG_RPM BIT(12)
+
+#define ivpu_err(vdev, fmt, ...) \
+ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_err_ratelimited(vdev, fmt, ...) \
+ drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_warn(vdev, fmt, ...) \
+ drm_warn(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_warn_ratelimited(vdev, fmt, ...) \
+ drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_info(vdev, fmt, ...) drm_info(&(vdev)->drm, fmt, ##__VA_ARGS__)
+
+#define ivpu_dbg(vdev, type, fmt, args...) do { \
+ if (unlikely(IVPU_DBG_##type & ivpu_dbg_mask)) \
+ dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \
+} while (0)
+
+#define IVPU_WA(wa_name) (vdev->wa.wa_name)
+
+struct ivpu_wa_table {
+ bool punit_disabled;
+ bool clear_runtime_mem;
+ bool d3hot_after_power_off;
+};
+
+struct ivpu_hw_info;
+struct ivpu_mmu_info;
+struct ivpu_fw_info;
+struct ivpu_ipc_info;
+struct ivpu_pm_info;
+
+struct ivpu_device {
+ struct drm_device drm;
+ void __iomem *regb;
+ void __iomem *regv;
+ u32 platform;
+ u32 irq;
+
+ struct ivpu_wa_table wa;
+ struct ivpu_hw_info *hw;
+ struct ivpu_mmu_info *mmu;
+ struct ivpu_fw_info *fw;
+ struct ivpu_ipc_info *ipc;
+ struct ivpu_pm_info *pm;
+
+ struct ivpu_mmu_context gctx;
+ struct xarray context_xa;
+ struct xa_limit context_xa_limit;
+
+ struct xarray submitted_jobs_xa;
+ struct task_struct *job_done_thread;
+
+ atomic64_t unique_id_counter;
+
+ struct {
+ int boot;
+ int jsm;
+ int tdr;
+ int reschedule_suspend;
+ } timeout;
+};
+
+/*
+ * file_priv has its own refcount (ref) that allows user space to close the fd
+ * without blocking even if VPU is still processing some jobs.
+ */
+struct ivpu_file_priv {
+ struct kref ref;
+ struct ivpu_device *vdev;
+ struct mutex lock; /* Protects cmdq */
+ struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
+ struct ivpu_mmu_context ctx;
+ u32 priority;
+ bool has_mmu_faults;
+};
+
+extern int ivpu_dbg_mask;
+extern u8 ivpu_pll_min_ratio;
+extern u8 ivpu_pll_max_ratio;
+
+#define IVPU_TEST_MODE_DISABLED 0
+#define IVPU_TEST_MODE_FW_TEST 1
+#define IVPU_TEST_MODE_NULL_HW 2
+extern int ivpu_test_mode;
+
+struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
+struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
+void ivpu_file_priv_put(struct ivpu_file_priv **link);
+
+int ivpu_boot(struct ivpu_device *vdev);
+int ivpu_shutdown(struct ivpu_device *vdev);
+
+static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
+}
+
+static inline u8 ivpu_revision(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->revision;
+}
+
+static inline u16 ivpu_device_id(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->device;
+}
+
+static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
+{
+ return container_of(dev, struct ivpu_device, drm);
+}
+
+static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
+{
+ struct xa_limit ctx_limit = vdev->context_xa_limit;
+
+ return (ctx_limit.max - ctx_limit.min + 1);
+}
+
+static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
+{
+ WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
+ return vdev->platform;
+}
+
+static inline bool ivpu_is_silicon(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_SILICON;
+}
+
+static inline bool ivpu_is_simics(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_SIMICS;
+}
+
+static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+}
+
+#endif /* __IVPU_DRV_H__ */
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
new file mode 100644
index 000000000000..f58951a0d81b
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/firmware.h>
+#include <linux/highmem.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_pm.h"
+
+#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
+#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
+#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
+#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
+#define FW_RUNTIME_MAX_SIZE SZ_512M
+#define FW_SHAVE_NN_MAX_SIZE SZ_2M
+#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
+#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
+#define FW_VERSION_HEADER_SIZE SZ_4K
+#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+
+#define WATCHDOG_MSS_REDIRECT 32
+#define WATCHDOG_NCE_REDIRECT 33
+
+#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
+
+#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
+ ivpu_fw_check_api(vdev, fw_hdr, #name, \
+ VPU_##name##_API_VER_INDEX, \
+ VPU_##name##_API_VER_MAJOR, \
+ VPU_##name##_API_VER_MINOR, min_major)
+
+static char *ivpu_firmware;
+module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
+MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+
+static int ivpu_fw_request(struct ivpu_device *vdev)
+{
+ static const char * const fw_names[] = {
+ "mtl_vpu.bin",
+ "intel/vpu/mtl_vpu_v0.0.bin"
+ };
+ int ret = -ENOENT;
+ int i;
+
+ if (ivpu_firmware)
+ return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+
+ for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
+ ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
+ if (!ret)
+ return 0;
+ }
+
+ ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
+ return ret;
+}
+
+static int
+ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
+ const char *str, int index, u16 expected_major, u16 expected_minor,
+ u16 min_major)
+{
+ u16 major = (u16)(fw_hdr->api_version[index] >> 16);
+ u16 minor = (u16)(fw_hdr->api_version[index]);
+
+ if (major < min_major) {
+ ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
+ str, major, minor, min_major);
+ return -EINVAL;
+ }
+ if (major != expected_major) {
+ ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
+ str, major, minor, expected_major, expected_minor);
+ }
+ ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
+ str, major, minor, expected_major, expected_minor);
+
+ return 0;
+}
+
+static int ivpu_fw_parse(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
+ u64 runtime_addr, image_load_addr, runtime_size, image_size;
+
+ if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
+ ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
+ ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->boot_params_load_address;
+ runtime_size = fw_hdr->runtime_size;
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ return -EINVAL;
+ }
+
+ if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
+ ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ return -EINVAL;
+ }
+
+ if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
+ ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
+ return -EINVAL;
+ }
+
+ if (image_load_addr < runtime_addr ||
+ image_load_addr + image_size > runtime_addr + runtime_size) {
+ ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ image_load_addr, image_size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->entry_point < image_load_addr ||
+ fw_hdr->entry_point >= image_load_addr + image_size) {
+ ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
+ return -EINVAL;
+ }
+ ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
+ fw_hdr->header_version, fw_hdr->image_format);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
+
+ if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
+ return -EINVAL;
+ if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
+ return -EINVAL;
+
+ fw->runtime_addr = runtime_addr;
+ fw->runtime_size = runtime_size;
+ fw->image_load_offset = image_load_addr - runtime_addr;
+ fw->image_size = image_size;
+ fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
+
+ fw->cold_boot_entry_point = fw_hdr->entry_point;
+ fw->entry_point = fw->cold_boot_entry_point;
+
+ ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
+ fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
+ ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
+ fw->runtime_addr, image_load_addr, fw->entry_point);
+
+ return 0;
+}
+
+static void ivpu_fw_release(struct ivpu_device *vdev)
+{
+ release_firmware(vdev->fw->file);
+}
+
+static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
+ u64 size = FW_SHARED_MEM_SIZE;
+
+ if (start + size > FW_GLOBAL_MEM_END) {
+ ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
+ return -EINVAL;
+ }
+
+ ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
+ return 0;
+}
+
+static int ivpu_fw_mem_init(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ int ret;
+
+ ret = ivpu_fw_update_global_range(vdev);
+ if (ret)
+ return ret;
+
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ if (!fw->mem) {
+ ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ return -ENOMEM;
+ }
+
+ if (fw->shave_nn_size) {
+ fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
+ fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
+ if (!fw->mem_shave_nn) {
+ ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
+ ivpu_bo_free_internal(fw->mem);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+
+ if (fw->mem_shave_nn) {
+ ivpu_bo_free_internal(fw->mem_shave_nn);
+ fw->mem_shave_nn = NULL;
+ }
+
+ ivpu_bo_free_internal(fw->mem);
+ fw->mem = NULL;
+}
+
+int ivpu_fw_init(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_fw_request(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_fw_parse(vdev);
+ if (ret)
+ goto err_fw_release;
+
+ ret = ivpu_fw_mem_init(vdev);
+ if (ret)
+ goto err_fw_release;
+
+ return 0;
+
+err_fw_release:
+ ivpu_fw_release(vdev);
+ return ret;
+}
+
+void ivpu_fw_fini(struct ivpu_device *vdev)
+{
+ ivpu_fw_mem_fini(vdev);
+ ivpu_fw_release(vdev);
+}
+
+int ivpu_fw_load(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u64 image_end_offset = fw->image_load_offset + fw->image_size;
+
+ memset(fw->mem->kvaddr, 0, fw->image_load_offset);
+ memcpy(fw->mem->kvaddr + fw->image_load_offset,
+ fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
+
+ if (IVPU_WA(clear_runtime_mem)) {
+ u8 *start = fw->mem->kvaddr + image_end_offset;
+ u64 size = fw->mem->base.size - image_end_offset;
+
+ memset(start, 0, size);
+ }
+
+ wmb(); /* Flush WC buffers after writing fw->mem */
+
+ return 0;
+}
+
+static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
+{
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
+ boot_params->magic);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
+ boot_params->vpu_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
+ boot_params->vpu_count);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
+ boot_params->frequency);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
+ boot_params->perf_clk_frequency);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
+ boot_params->ipc_header_area_start);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
+ boot_params->ipc_header_area_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
+ boot_params->shared_region_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
+ boot_params->shared_region_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
+ boot_params->ipc_payload_area_start);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
+ boot_params->ipc_payload_area_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
+ boot_params->global_aliased_pio_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
+ boot_params->global_aliased_pio_size);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
+ boot_params->autoconfig);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
+ boot_params->global_memory_allocator_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
+ boot_params->global_memory_allocator_size);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
+ boot_params->shave_nn_fw_base);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
+ boot_params->watchdog_irq_mss);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
+ boot_params->watchdog_irq_nce);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
+ boot_params->host_to_vpu_irq);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
+ boot_params->job_done_irq);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
+ boot_params->host_version_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
+ boot_params->si_stepping);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
+ boot_params->device_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
+ boot_params->feature_exclusion);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
+ boot_params->sku);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
+ boot_params->min_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
+ boot_params->pn_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
+ boot_params->max_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
+ boot_params->default_trace_level);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
+ boot_params->tracing_buff_message_format_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
+ boot_params->trace_destination_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
+ boot_params->trace_hw_component_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
+ boot_params->boot_type);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
+ boot_params->punit_telemetry_sram_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
+ boot_params->punit_telemetry_sram_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
+ boot_params->vpu_telemetry_enable);
+}
+
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
+{
+ struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
+
+ /* In case of warm boot we only have to reset the entrypoint addr */
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ boot_params->save_restore_ret_address = 0;
+ vdev->pm->is_warmboot = true;
+ return;
+ }
+
+ vdev->pm->is_warmboot = false;
+
+ boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
+ boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
+ boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
+
+ /*
+ * Uncached region of VPU address space, covers IPC buffers, job queues
+ * and log buffers, programmable to L2$ Uncached by VPU MTRR
+ */
+ boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
+ boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
+ vdev->hw->ranges.global_low.start;
+
+ boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
+ boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
+
+ boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
+ boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
+
+ boot_params->global_aliased_pio_base =
+ vdev->hw->ranges.global_aliased_pio.start;
+ boot_params->global_aliased_pio_size =
+ ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
+
+ /* Allow configuration for L2C_PAGE_TABLE with boot param value */
+ boot_params->autoconfig = 1;
+
+ /* Enable L2 cache for first 2GB of high memory */
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
+ ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
+
+ if (vdev->fw->mem_shave_nn)
+ boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
+
+ boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
+ boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
+ boot_params->si_stepping = ivpu_revision(vdev);
+ boot_params->device_id = ivpu_device_id(vdev);
+ boot_params->feature_exclusion = vdev->hw->tile_fuse;
+ boot_params->sku = vdev->hw->sku;
+
+ boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
+ boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
+ boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
+
+ boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
+ boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
+ boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+
+ wmb(); /* Flush WC buffers after writing bootparams */
+
+ ivpu_fw_boot_params_print(vdev, boot_params);
+}
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
new file mode 100644
index 000000000000..8d275c802d1c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_FW_H__
+#define __IVPU_FW_H__
+
+struct ivpu_device;
+struct ivpu_bo;
+struct vpu_boot_params;
+
+struct ivpu_fw_info {
+ const struct firmware *file;
+ struct ivpu_bo *mem;
+ struct ivpu_bo *mem_shave_nn;
+ struct ivpu_bo *mem_log_crit;
+ struct ivpu_bo *mem_log_verb;
+ u64 runtime_addr;
+ u32 runtime_size;
+ u64 image_load_offset;
+ u32 image_size;
+ u32 shave_nn_size;
+ u64 entry_point; /* Cold or warm boot entry point for next boot */
+ u64 cold_boot_entry_point;
+};
+
+int ivpu_fw_init(struct ivpu_device *vdev);
+void ivpu_fw_fini(struct ivpu_device *vdev);
+int ivpu_fw_load(struct ivpu_device *vdev);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+
+static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
+{
+ return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
+}
+
+#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
new file mode 100644
index 000000000000..52b339aefadc
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/set_memory.h>
+#include <linux/xarray.h>
+
+#include <drm/drm_cache.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+static const struct drm_gem_object_funcs ivpu_gem_funcs;
+
+static struct lock_class_key prime_bo_lock_class_key;
+
+static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ /* Pages are managed by the underlying dma-buf */
+ return 0;
+}
+
+static void prime_free_pages_locked(struct ivpu_bo *bo)
+{
+ /* Pages are managed by the underlying dma-buf */
+}
+
+static int prime_map_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
+
+ sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
+ return PTR_ERR(sgt);
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void prime_unmap_pages_locked(struct ivpu_bo *bo)
+{
+ dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
+ bo->sgt = NULL;
+}
+
+static const struct ivpu_bo_ops prime_ops = {
+ .type = IVPU_BO_TYPE_PRIME,
+ .name = "prime",
+ .alloc_pages = prime_alloc_pages_locked,
+ .free_pages = prime_free_pages_locked,
+ .map_pages = prime_map_pages_locked,
+ .unmap_pages = prime_unmap_pages_locked,
+};
+
+static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ int npages = bo->base.size >> PAGE_SHIFT;
+ struct page **pages;
+
+ pages = drm_gem_get_pages(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ if (bo->flags & DRM_IVPU_BO_WC)
+ set_pages_array_wc(pages, npages);
+ else if (bo->flags & DRM_IVPU_BO_UNCACHED)
+ set_pages_array_uc(pages, npages);
+
+ bo->pages = pages;
+ return 0;
+}
+
+static void shmem_free_pages_locked(struct ivpu_bo *bo)
+{
+ if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
+ set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
+
+ drm_gem_put_pages(&bo->base, bo->pages, true, false);
+ bo->pages = NULL;
+}
+
+static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
+{
+ int npages = bo->base.size >> PAGE_SHIFT;
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
+ if (IS_ERR(sgt)) {
+ ivpu_err(vdev, "Failed to allocate sgtable\n");
+ return PTR_ERR(sgt);
+ }
+
+ ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+ goto err_free_sgt;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+
+err_free_sgt:
+ kfree(sgt);
+ return ret;
+}
+
+static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+ bo->sgt = NULL;
+}
+
+static const struct ivpu_bo_ops shmem_ops = {
+ .type = IVPU_BO_TYPE_SHMEM,
+ .name = "shmem",
+ .alloc_pages = shmem_alloc_pages_locked,
+ .free_pages = shmem_free_pages_locked,
+ .map_pages = ivpu_bo_map_pages_locked,
+ .unmap_pages = ivpu_bo_unmap_pages_locked,
+};
+
+static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
+ struct page **pages;
+ int ret;
+
+ pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!pages[i]) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+ cond_resched();
+ }
+
+ bo->pages = pages;
+ return 0;
+
+err_free_pages:
+ while (i--)
+ put_page(pages[i]);
+ kvfree(pages);
+ return ret;
+}
+
+static void internal_free_pages_locked(struct ivpu_bo *bo)
+{
+ unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++)
+ put_page(bo->pages[i]);
+
+ kvfree(bo->pages);
+ bo->pages = NULL;
+}
+
+static const struct ivpu_bo_ops internal_ops = {
+ .type = IVPU_BO_TYPE_INTERNAL,
+ .name = "internal",
+ .alloc_pages = internal_alloc_pages_locked,
+ .free_pages = internal_free_pages_locked,
+ .map_pages = ivpu_bo_map_pages_locked,
+ .unmap_pages = ivpu_bo_unmap_pages_locked,
+};
+
+static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret;
+
+ lockdep_assert_held(&bo->lock);
+ drm_WARN_ON(&vdev->drm, bo->sgt);
+
+ ret = bo->ops->alloc_pages(bo);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
+ return ret;
+ }
+
+ ret = bo->ops->map_pages(bo);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
+ goto err_free_pages;
+ }
+ return ret;
+
+err_free_pages:
+ bo->ops->free_pages(bo);
+ return ret;
+}
+
+static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
+{
+ mutex_lock(&bo->lock);
+
+ WARN_ON(!bo->sgt);
+ bo->ops->unmap_pages(bo);
+ WARN_ON(bo->sgt);
+ bo->ops->free_pages(bo);
+ WARN_ON(bo->pages);
+
+ mutex_unlock(&bo->lock);
+}
+
+/*
+ * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ *
+ * This function pins physical memory pages, then maps the physical pages
+ * to IOMMU address space and finally updates the VPU MMU page tables
+ * to allow the VPU to translate VPU address to IOMMU address.
+ */
+int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret = 0;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->vpu_addr) {
+ ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
+ bo->ctx->id, bo->handle);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!bo->sgt) {
+ ret = ivpu_bo_alloc_and_map_pages_locked(bo);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!bo->mmu_mapped) {
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
+ ivpu_bo_is_snooped(bo));
+ if (ret) {
+ ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
+ goto unlock;
+ }
+ bo->mmu_mapped = true;
+ }
+
+unlock:
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+static int
+ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret;
+
+ if (!range) {
+ if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
+ range = &vdev->hw->ranges.user_high;
+ else
+ range = &vdev->hw->ranges.user_low;
+ }
+
+ mutex_lock(&ctx->lock);
+ ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
+ if (!ret) {
+ bo->ctx = ctx;
+ bo->vpu_addr = bo->mm_node.start;
+ list_add_tail(&bo->ctx_node, &ctx->bo_list);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct ivpu_mmu_context *ctx = bo->ctx;
+
+ ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
+ ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
+
+ mutex_lock(&bo->lock);
+
+ if (bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->sgt);
+ ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
+ bo->mmu_mapped = false;
+ }
+
+ mutex_lock(&ctx->lock);
+ list_del(&bo->ctx_node);
+ bo->vpu_addr = 0;
+ bo->ctx = NULL;
+ ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
+ mutex_unlock(&ctx->lock);
+
+ mutex_unlock(&bo->lock);
+}
+
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
+{
+ struct ivpu_bo *bo, *tmp;
+
+ list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
+ ivpu_bo_free_vpu_addr(bo);
+}
+
+static struct ivpu_bo *
+ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
+ u64 size, u32 flags, const struct ivpu_bo_ops *ops,
+ const struct ivpu_addr_range *range, u64 user_ptr)
+{
+ struct ivpu_bo *bo;
+ int ret = 0;
+
+ if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
+ return ERR_PTR(-EINVAL);
+
+ switch (flags & DRM_IVPU_BO_CACHE_MASK) {
+ case DRM_IVPU_BO_CACHED:
+ case DRM_IVPU_BO_UNCACHED:
+ case DRM_IVPU_BO_WC:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&bo->lock);
+ bo->base.funcs = &ivpu_gem_funcs;
+ bo->flags = flags;
+ bo->ops = ops;
+ bo->user_ptr = user_ptr;
+
+ if (ops->type == IVPU_BO_TYPE_SHMEM)
+ ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
+ else
+ drm_gem_private_object_init(&vdev->drm, &bo->base, size);
+
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize drm object\n");
+ goto err_free;
+ }
+
+ if (flags & DRM_IVPU_BO_MAPPABLE) {
+ ret = drm_gem_create_mmap_offset(&bo->base);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate mmap offset\n");
+ goto err_release;
+ }
+ }
+
+ if (mmu_context) {
+ ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
+ if (ret) {
+ ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
+ goto err_release;
+ }
+ }
+
+ return bo;
+
+err_release:
+ drm_gem_object_release(&bo->base);
+err_free:
+ kfree(bo);
+ return ERR_PTR(ret);
+}
+
+static void ivpu_bo_free(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ if (bo->ctx)
+ ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
+ bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
+ else
+ ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
+ (bool)bo->sgt, bo->mmu_mapped);
+
+ drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+
+ vunmap(bo->kvaddr);
+
+ if (bo->ctx)
+ ivpu_bo_free_vpu_addr(bo);
+
+ if (bo->sgt)
+ ivpu_bo_unmap_and_free_pages(bo);
+
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sgt);
+
+ drm_gem_object_release(&bo->base);
+
+ mutex_destroy(&bo->lock);
+ kfree(bo);
+}
+
+static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
+ bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
+
+ if (obj->import_attach) {
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(obj);
+ vma->vm_private_data = NULL;
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+ }
+
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
+ vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
+static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ loff_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->sgt)
+ ret = ivpu_bo_alloc_and_map_pages_locked(bo);
+
+ mutex_unlock(&bo->lock);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
+}
+
+static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ loff_t npages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset;
+ struct page *page;
+ vm_fault_t ret;
+ int err;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->sgt) {
+ err = ivpu_bo_alloc_and_map_pages_locked(bo);
+ if (err) {
+ ret = vmf_error(err);
+ goto unlock;
+ }
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ if (page_offset >= npages) {
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ page = bo->pages[page_offset];
+ ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
+ }
+
+unlock:
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+static const struct vm_operations_struct ivpu_vm_ops = {
+ .fault = ivpu_vm_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs ivpu_gem_funcs = {
+ .free = ivpu_bo_free,
+ .mmap = ivpu_bo_mmap,
+ .vm_ops = &ivpu_vm_ops,
+ .get_sg_table = ivpu_bo_get_sg_table,
+};
+
+int
+ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_bo_create *args = data;
+ u64 size = PAGE_ALIGN(args->size);
+ struct ivpu_bo *bo;
+ int ret;
+
+ if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ return -EINVAL;
+
+ if (size == 0)
+ return -EINVAL;
+
+ bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ return PTR_ERR(bo);
+ }
+
+ ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
+ if (!ret) {
+ args->vpu_addr = bo->vpu_addr;
+ args->handle = bo->handle;
+ }
+
+ drm_gem_object_put(&bo->base);
+
+ ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
+ file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
+
+ return ret;
+}
+
+struct ivpu_bo *
+ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
+{
+ const struct ivpu_addr_range *range;
+ struct ivpu_addr_range fixed_range;
+ struct ivpu_bo *bo;
+ pgprot_t prot;
+ int ret;
+
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+
+ if (vpu_addr) {
+ fixed_range.start = vpu_addr;
+ fixed_range.end = vpu_addr + size;
+ range = &fixed_range;
+ } else {
+ range = &vdev->hw->ranges.global_low;
+ }
+
+ bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ bo, vpu_addr, size, flags);
+ return NULL;
+ }
+
+ ret = ivpu_bo_pin(bo);
+ if (ret)
+ goto err_put;
+
+ if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
+ drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
+
+ prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
+ bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
+ if (!bo->kvaddr) {
+ ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
+ goto err_put;
+ }
+
+ ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
+ bo->vpu_addr, bo->base.size, flags);
+
+ return bo;
+
+err_put:
+ drm_gem_object_put(&bo->base);
+ return NULL;
+}
+
+void ivpu_bo_free_internal(struct ivpu_bo *bo)
+{
+ drm_gem_object_put(&bo->base);
+}
+
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct dma_buf_attachment *attach;
+ struct ivpu_bo *bo;
+
+ attach = dma_buf_attach(buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(buf);
+
+ bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
+ goto err_detach;
+ }
+
+ lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
+
+ bo->base.import_attach = attach;
+
+ return &bo->base;
+
+err_detach:
+ dma_buf_detach(buf, attach);
+ dma_buf_put(buf);
+ return ERR_CAST(bo);
+}
+
+int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct drm_ivpu_bo_info *args = data;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->ctx) {
+ ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
+ goto unlock;
+ }
+ }
+
+ args->flags = bo->flags;
+ args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->vpu_addr = bo->vpu_addr;
+ args->size = obj->size;
+unlock:
+ mutex_unlock(&bo->lock);
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_bo_wait *args = data;
+ struct drm_gem_object *obj;
+ unsigned long timeout;
+ long ret;
+
+ timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -EINVAL;
+
+ ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ args->job_status = to_ivpu_bo(obj)->job_status;
+ }
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+{
+ unsigned long dma_refcount = 0;
+
+ if (bo->base.dma_buf && bo->base.dma_buf->file)
+ dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
+
+ drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
+ bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
+ kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
+}
+
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+ struct ivpu_bo *bo;
+
+ drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
+ "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
+
+ mutex_lock(&vdev->gctx.lock);
+ list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
+ ivpu_bo_print_info(bo, p);
+ mutex_unlock(&vdev->gctx.lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
+ if (!file_priv)
+ continue;
+
+ mutex_lock(&file_priv->ctx.lock);
+ list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
+ ivpu_bo_print_info(bo, p);
+ mutex_unlock(&file_priv->ctx.lock);
+
+ ivpu_file_priv_put(&file_priv);
+ }
+}
+
+void ivpu_bo_list_print(struct drm_device *dev)
+{
+ struct drm_printer p = drm_info_printer(dev->dev);
+
+ ivpu_bo_list(dev, &p);
+}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
new file mode 100644
index 000000000000..6b0ceda5f253
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+#ifndef __IVPU_GEM_H__
+#define __IVPU_GEM_H__
+
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+
+struct dma_buf;
+struct ivpu_bo_ops;
+struct ivpu_file_priv;
+
+struct ivpu_bo {
+ struct drm_gem_object base;
+ const struct ivpu_bo_ops *ops;
+
+ struct ivpu_mmu_context *ctx;
+ struct list_head ctx_node;
+ struct drm_mm_node mm_node;
+
+ struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
+ struct sg_table *sgt;
+ struct page **pages;
+ bool mmu_mapped;
+
+ void *kvaddr;
+ u64 vpu_addr;
+ u32 handle;
+ u32 flags;
+ uintptr_t user_ptr;
+ u32 job_status;
+};
+
+enum ivpu_bo_type {
+ IVPU_BO_TYPE_SHMEM = 1,
+ IVPU_BO_TYPE_INTERNAL,
+ IVPU_BO_TYPE_PRIME,
+};
+
+struct ivpu_bo_ops {
+ enum ivpu_bo_type type;
+ const char *name;
+ int (*alloc_pages)(struct ivpu_bo *bo);
+ void (*free_pages)(struct ivpu_bo *bo);
+ int (*map_pages)(struct ivpu_bo *bo);
+ void (*unmap_pages)(struct ivpu_bo *bo);
+};
+
+int ivpu_bo_pin(struct ivpu_bo *bo);
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
+void ivpu_bo_list_print(struct drm_device *dev);
+
+struct ivpu_bo *
+ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
+void ivpu_bo_free_internal(struct ivpu_bo *bo);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
+void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
+
+int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct ivpu_bo, base);
+}
+
+static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
+{
+ if (offset > bo->base.size || !bo->pages)
+ return NULL;
+
+ return bo->pages[offset / PAGE_SIZE];
+}
+
+static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_CACHE_MASK;
+}
+
+static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
+{
+ return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+}
+
+static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
+{
+ if (bo->flags & DRM_IVPU_BO_WC)
+ return pgprot_writecombine(prot);
+
+ if (bo->flags & DRM_IVPU_BO_UNCACHED)
+ return pgprot_noncached(prot);
+
+ return prot;
+}
+
+static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
+{
+ return to_ivpu_device(bo->base.dev);
+}
+
+static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
+{
+ if (vpu_addr < bo->vpu_addr)
+ return NULL;
+
+ if (vpu_addr >= (bo->vpu_addr + bo->base.size))
+ return NULL;
+
+ return bo->kvaddr + (vpu_addr - bo->vpu_addr);
+}
+
+static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
+{
+ if (cpu_addr < bo->kvaddr)
+ return 0;
+
+ if (cpu_addr >= (bo->kvaddr + bo->base.size))
+ return 0;
+
+ return bo->vpu_addr + (cpu_addr - bo->kvaddr);
+}
+
+#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
new file mode 100644
index 000000000000..50a9304ab09c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_H__
+#define __IVPU_HW_H__
+
+#include "ivpu_drv.h"
+
+struct ivpu_hw_ops {
+ int (*info_init)(struct ivpu_device *vdev);
+ int (*power_up)(struct ivpu_device *vdev);
+ int (*boot_fw)(struct ivpu_device *vdev);
+ int (*power_down)(struct ivpu_device *vdev);
+ bool (*is_idle)(struct ivpu_device *vdev);
+ void (*wdt_disable)(struct ivpu_device *vdev);
+ void (*diagnose_failure)(struct ivpu_device *vdev);
+ u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
+ void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id);
+ u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev);
+ u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev);
+ void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr);
+ void (*irq_clear)(struct ivpu_device *vdev);
+ void (*irq_enable)(struct ivpu_device *vdev);
+ void (*irq_disable)(struct ivpu_device *vdev);
+ irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+struct ivpu_addr_range {
+ resource_size_t start;
+ resource_size_t end;
+};
+
+struct ivpu_hw_info {
+ const struct ivpu_hw_ops *ops;
+ struct {
+ struct ivpu_addr_range global_low;
+ struct ivpu_addr_range global_high;
+ struct ivpu_addr_range user_low;
+ struct ivpu_addr_range user_high;
+ struct ivpu_addr_range global_aliased_pio;
+ } ranges;
+ struct {
+ u8 min_ratio;
+ u8 max_ratio;
+ /*
+ * Pll ratio for the efficiency frequency. The VPU has optimum
+ * performance to power ratio at this frequency.
+ */
+ u8 pn_ratio;
+ u32 profiling_freq;
+ } pll;
+ u32 tile_fuse;
+ u32 sku;
+ u16 config;
+};
+
+extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
+
+static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->info_init(vdev);
+};
+
+static inline int ivpu_hw_power_up(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW power up\n");
+
+ return vdev->hw->ops->power_up(vdev);
+};
+
+static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->boot_fw(vdev);
+};
+
+static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->is_idle(vdev);
+};
+
+static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW power down\n");
+
+ return vdev->hw->ops->power_down(vdev);
+};
+
+static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->wdt_disable(vdev);
+};
+
+/* Register indirect accesses */
+static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_pll_freq_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_offset_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_size_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_enable_get(vdev);
+};
+
+static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ vdev->hw->ops->reg_db_set(vdev, db_id);
+};
+
+static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_ipc_rx_addr_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_ipc_rx_count_get(vdev);
+};
+
+static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr);
+};
+
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_clear(vdev);
+};
+
+static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_enable(vdev);
+};
+
+static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_disable(vdev);
+};
+
+static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ range->start = start;
+ range->end = start + size;
+}
+
+static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
+{
+ return range->end - range->start;
+}
+
+static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->diagnose_failure(vdev);
+}
+
+#endif /* __IVPU_HW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
new file mode 100644
index 000000000000..382ec127be8e
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#define TILE_FUSE_ENABLE_BOTH 0x0
+#define TILE_SKU_BOTH_MTL 0x3630
+
+/* Work point configuration values */
+#define CONFIG_1_TILE 0x01
+#define CONFIG_2_TILE 0x02
+#define PLL_RATIO_5_3 0x01
+#define PLL_RATIO_4_3 0x02
+#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
+#define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
+
+#define PLL_REF_CLK_FREQ (50 * 1000000)
+#define PLL_SIMULATION_FREQ (10 * 1000000)
+#define PLL_DEFAULT_EPP_VALUE 0x80
+
+#define TIM_SAFE_ENABLE 0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
+
+#define TIMEOUT_US (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
+
+#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ return "IVPU_PLATFORM_SILICON";
+ case IVPU_PLATFORM_SIMICS:
+ return "IVPU_PLATFORM_SIMICS";
+ case IVPU_PLATFORM_FPGA:
+ return "IVPU_PLATFORM_FPGA";
+ default:
+ return "Invalid platform";
+ }
+}
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+ u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
+ u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
+
+ if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
+ vdev->platform = platform;
+ else
+ vdev->platform = IVPU_PLATFORM_SILICON;
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+ ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+ vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.clear_runtime_mem = false;
+ vdev->wa.d3hot_after_power_off = true;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+ if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
+ vdev->timeout.boot = 100000;
+ vdev->timeout.jsm = 50000;
+ vdev->timeout.tdr = 2000000;
+ vdev->timeout.reschedule_suspend = 1000;
+ } else {
+ vdev->timeout.boot = 1000;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 2000;
+ vdev->timeout.reschedule_suspend = 10;
+ }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+/* Send KMD initiated workpoint change */
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+ u16 target_ratio, u16 config)
+{
+ int ret;
+ u32 val;
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
+{
+ u32 exp_val = enable ? 0x1 : 0x0;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+ hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+ hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+ hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u16 target_ratio;
+ u16 config;
+ int ret;
+
+ if (IVPU_WA(punit_disabled)) {
+ ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
+ ivpu_platform_to_str(vdev->platform));
+ return 0;
+ }
+
+ if (enable) {
+ target_ratio = hw->pll.pn_ratio;
+ config = hw->config;
+ } else {
+ target_ratio = 0;
+ config = 0;
+ }
+
+ ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
+ config, target_ratio);
+
+ ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
+ if (ret) {
+ ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_pll_wait_for_lock(vdev, enable);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL lock\n");
+ return ret;
+ }
+
+ if (enable) {
+ ret = ivpu_pll_wait_for_status_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
+{
+ ivpu_boot_host_ss_rst_clr_assert(vdev);
+
+ return ivpu_boot_noc_qreqn_check(vdev, 0x0);
+}
+
+static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
+{
+ REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+ REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
+
+ ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+ /* FPGA model (UPF) is not power aware, skipped Power Island polling */
+ if (ivpu_is_fpga(vdev))
+ return 0;
+
+ return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
+ exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_pwr_island_trickle_drive(vdev, true);
+ ivpu_boot_pwr_island_drive(vdev, true);
+
+ ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for power island status\n");
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_host_ss_clk_drive(vdev, true);
+ ivpu_boot_pwr_island_isolation_drive(vdev, false);
+ ivpu_boot_host_ss_rst_drive(vdev, true);
+ ivpu_boot_dpu_active_drive(vdev, true);
+
+ return ret;
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+ REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
+
+ if (ivpu_is_fpga(vdev)) {
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+ } else {
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
+ val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
+
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = vdev->fw->entry_point >> 9;
+ REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+ REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
+ hw->sku = TILE_SKU_BOTH_MTL;
+ hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
+
+ ivpu_pll_init_frequency_ratios(vdev);
+
+ ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
+ ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
+ hw->ranges.global_aliased_pio = hw->ranges.user_low;
+
+ return 0;
+}
+
+static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_d0i3_drive(vdev, true);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+ udelay(5); /* VPU requires 5 us to complete the transition */
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_d0i3_drive(vdev, false);
+ if (ret)
+ ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_hw_read_platform(vdev);
+ ivpu_hw_wa_init(vdev);
+ ivpu_hw_timeouts_init(vdev);
+
+ ret = ivpu_hw_mtl_reset(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
+
+ ret = ivpu_hw_mtl_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+ ret = ivpu_pll_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_configure(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The control circuitry for vpu_idle indication logic powers up active.
+ * To ensure unnecessary low power mode signal from LRT during bring up,
+ * KMD disables the circuitry prior to bringing up the Main Power island.
+ */
+ ivpu_boot_vpu_idle_gen_disable(vdev);
+
+ ret = ivpu_boot_pwr_domain_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_axi_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
+{
+ ivpu_boot_no_snoop_enable(vdev);
+ ivpu_boot_tbu_mmu_enable(vdev);
+ ivpu_boot_soc_cpu_boot(vdev);
+
+ return 0;
+}
+
+static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return true;
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (ivpu_hw_mtl_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset the VPU\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_hw_mtl_d0i3_enable(vdev))
+ ivpu_warn(vdev, "Failed to enable D0I3\n");
+
+ return ret;
+}
+
+static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ /* Enable writing and set non-zero WDT value */
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ /* Enable writing and disable watchdog timer */
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
+
+ /* Now clear the timeout interrupt */
+ val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
+{
+ u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
+ u32 cpu_clock;
+
+ if ((config & 0xff) == PLL_RATIO_4_3)
+ cpu_clock = pll_clock * 2 / 4;
+ else
+ cpu_clock = pll_clock * 2 / 5;
+
+ return cpu_clock;
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ u32 pll_curr_ratio;
+
+ pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
+ pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
+
+ if (!ivpu_is_silicon(vdev))
+ return PLL_SIMULATION_FREQ;
+
+ return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
+{
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
+{
+ REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
+{
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+}
+
+static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
+
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
+
+ ivpu_hw_wdt_disable(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
+
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+
+ REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
+
+ return status;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ bool schedule_recovery = false;
+
+ if (status == 0)
+ return 0;
+
+ /* Disable global interrupt before handling local buttress interrupts */
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+ REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
+ u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ /*
+ * Clear local interrupt status by writing 0 to all bits.
+ * This must be done after interrupts are cleared at the source.
+ * Writing 1 triggers an interrupt, so we can't perform read update write.
+ */
+ REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+
+ /* Re-enable global interrupt */
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+
+ return status;
+}
+
+static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
+{
+ struct ivpu_device *vdev = ptr;
+ u32 ret_irqv, ret_irqb;
+
+ ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
+ ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
+
+ return IRQ_RETVAL(ret_irqb | ret_irqv);
+}
+
+static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
+{
+ u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
+ u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ }
+}
+
+const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
+ .info_init = ivpu_hw_mtl_info_init,
+ .power_up = ivpu_hw_mtl_power_up,
+ .is_idle = ivpu_hw_mtl_is_idle,
+ .power_down = ivpu_hw_mtl_power_down,
+ .boot_fw = ivpu_hw_mtl_boot_fw,
+ .wdt_disable = ivpu_hw_mtl_wdt_disable,
+ .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_mtl_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_mtl_irq_clear,
+ .irq_enable = ivpu_hw_mtl_irq_enable,
+ .irq_disable = ivpu_hw_mtl_irq_disable,
+ .irq_handler = ivpu_hw_mtl_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
new file mode 100644
index 000000000000..d83ccfd9a871
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_MTL_REG_H__
+#define __IVPU_HW_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
+
+#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
+#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
+#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
+#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
+#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
+
+#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
+#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
+#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
+#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
+
+#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
+#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
+
+#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
+#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
+
+#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
+#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
+#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
+#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
+#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
+
+#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
+#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
+#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
+
+#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
+#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
+#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
+#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
+
+#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
+
+#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
+#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
+#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
+#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
+#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
+#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
+#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
+#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
+#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
+
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
+#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
+#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
+#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
+#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
+#define MTL_VPU_HOST_MMU_CR0 0x00200020u
+#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
+#define MTL_VPU_HOST_MMU_CR1 0x00200028u
+#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
+#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
+#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
+
+#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
+#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
+
+#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
+
+#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
+#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
+#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
+#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
+#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
+#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
+#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
+#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
+#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
+#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
+
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
+
+#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
+#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
+#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
+#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
+
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
+#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
+
+#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
new file mode 100644
index 000000000000..43c2c0c2d050
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_REG_IO_H__
+#define __IVPU_HW_REG_IO_H__
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "ivpu_drv.h"
+
+#define REG_POLL_SLEEP_US 50
+#define REG_IO_ERROR 0xffffffff
+
+#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
+#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
+#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
+#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
+#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
+
+#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
+#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
+#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
+#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
+#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
+
+#define REGV_WR32I(reg, stride, index, val) \
+ ivpu_hw_reg_wr32_index(vdev, vdev->regv, (reg), (stride), (index), (val), #reg, __func__)
+
+#define REG_FLD(REG, FLD) \
+ (REG##_##FLD##_MASK)
+#define REG_FLD_NUM(REG, FLD, num) \
+ FIELD_PREP(REG##_##FLD##_MASK, num)
+#define REG_GET_FLD(REG, FLD, val) \
+ FIELD_GET(REG##_##FLD##_MASK, val)
+#define REG_CLR_FLD(REG, FLD, val) \
+ ((val) & ~(REG##_##FLD##_MASK))
+#define REG_SET_FLD(REG, FLD, val) \
+ ((val) | (REG##_##FLD##_MASK))
+#define REG_SET_FLD_NUM(REG, FLD, num, val) \
+ (((val) & ~(REG##_##FLD##_MASK)) | FIELD_PREP(REG##_##FLD##_MASK, num))
+#define REG_TEST_FLD(REG, FLD, val) \
+ ((REG##_##FLD##_MASK) == ((val) & (REG##_##FLD##_MASK)))
+#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
+ ((num) == FIELD_GET(REG##_##FLD##_MASK, val))
+
+#define REGB_POLL(reg, var, cond, timeout_us) \
+ read_poll_timeout(REGB_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
+
+#define REGV_POLL(reg, var, cond, timeout_us) \
+ read_poll_timeout(REGV_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
+
+#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
+({ \
+ u32 var; \
+ REGB_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
+})
+
+#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
+({ \
+ u32 var; \
+ REGV_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
+})
+
+static inline u32
+ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ const char *name, const char *func)
+{
+ u32 val = readl(base + reg);
+
+ ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%08x\n", func, name, reg, val);
+ return val;
+}
+
+static inline u64
+ivpu_hw_reg_rd64(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ const char *name, const char *func)
+{
+ u64 val = readq(base + reg);
+
+ ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%016llx\n", func, name, reg, val);
+ return val;
+}
+
+static inline void
+ivpu_hw_reg_wr32(struct ivpu_device *vdev, void __iomem *base, u32 reg, u32 val,
+ const char *name, const char *func)
+{
+ ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%08x\n", func, name, reg, val);
+ writel(val, base + reg);
+}
+
+static inline void
+ivpu_hw_reg_wr64(struct ivpu_device *vdev, void __iomem *base, u32 reg, u64 val,
+ const char *name, const char *func)
+{
+ ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%016llx\n", func, name, reg, val);
+ writeq(val, base + reg);
+}
+
+static inline void
+ivpu_hw_reg_wr32_index(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ u32 stride, u32 index, u32 val, const char *name,
+ const char *func)
+{
+ reg += index * stride;
+
+ ivpu_dbg(vdev, REG, "%s WR: %s_%d (0x%08x) <= 0x%08x\n", func, name, index, reg, val);
+ writel(val, base + reg);
+}
+
+#endif /* __IVPU_HW_REG_IO_H__ */
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
new file mode 100644
index 000000000000..3adcfa80fc0e
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_ipc.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+#define IPC_MAX_RX_MSG 128
+#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
+
+struct ivpu_ipc_tx_buf {
+ struct ivpu_ipc_hdr ipc;
+ struct vpu_jsm_msg jsm;
+};
+
+struct ivpu_ipc_rx_msg {
+ struct list_head link;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+};
+
+static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
+ struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
+{
+ ivpu_dbg(vdev, IPC,
+ "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
+ c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
+ ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
+}
+
+static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
+ struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
+{
+ u32 *payload = (u32 *)&jsm_msg->payload;
+
+ ivpu_dbg(vdev, JSM,
+ "%s: vpu:0x%08x (type:0x%x, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
+ c, vpu_addr, jsm_msg->type, jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
+ payload[0], payload[1], payload[2], payload[3], payload[4]);
+}
+
+static void
+ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg)
+{
+ ipc_hdr->status = IVPU_IPC_HDR_FREE;
+ if (jsm_msg)
+ jsm_msg->status = VPU_JSM_MSG_FREE;
+ wmb(); /* Flush WC buffers for message statuses */
+}
+
+static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ ivpu_bo_free_internal(ipc->mem_rx);
+ ivpu_bo_free_internal(ipc->mem_tx);
+}
+
+static int
+ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct vpu_jsm_msg *req)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_tx_buf *tx_buf;
+ u32 tx_buf_vpu_addr;
+ u32 jsm_vpu_addr;
+
+ tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
+ if (!tx_buf_vpu_addr) {
+ ivpu_err(vdev, "Failed to reserve IPC buffer, size %ld\n",
+ sizeof(*tx_buf));
+ return -ENOMEM;
+ }
+
+ tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
+ if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
+ gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
+ return -EIO;
+ }
+
+ jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
+
+ if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
+ ivpu_warn(vdev, "IPC message vpu:0x%x not released by firmware\n",
+ tx_buf_vpu_addr);
+
+ if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
+ ivpu_warn(vdev, "JSM message vpu:0x%x not released by firmware\n",
+ jsm_vpu_addr);
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->ipc.data_addr = jsm_vpu_addr;
+ /* TODO: Set data_size to actual JSM message size, not union of all messages */
+ tx_buf->ipc.data_size = sizeof(*req);
+ tx_buf->ipc.channel = cons->channel;
+ tx_buf->ipc.src_node = 0;
+ tx_buf->ipc.dst_node = 1;
+ tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
+ tx_buf->jsm.type = req->type;
+ tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
+ tx_buf->jsm.payload = req->payload;
+
+ req->request_id = atomic_inc_return(&ipc->request_id);
+ tx_buf->jsm.request_id = req->request_id;
+ cons->request_id = req->request_id;
+ wmb(); /* Flush WC buffers for IPC, JSM msgs */
+
+ cons->tx_vpu_addr = tx_buf_vpu_addr;
+
+ ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
+ ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
+
+ return 0;
+}
+
+static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ if (vpu_addr)
+ gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
+}
+
+static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
+}
+
+void
+ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ INIT_LIST_HEAD(&cons->link);
+ cons->channel = channel;
+ cons->tx_vpu_addr = 0;
+ cons->request_id = 0;
+ spin_lock_init(&cons->rx_msg_lock);
+ INIT_LIST_HEAD(&cons->rx_msg_list);
+ init_waitqueue_head(&cons->rx_msg_wq);
+
+ spin_lock_irq(&ipc->cons_list_lock);
+ list_add_tail(&cons->link, &ipc->cons_list);
+ spin_unlock_irq(&ipc->cons_list_lock);
+}
+
+void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+
+ spin_lock_irq(&ipc->cons_list_lock);
+ list_del(&cons->link);
+ spin_unlock_irq(&ipc->cons_list_lock);
+
+ spin_lock_irq(&cons->rx_msg_lock);
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
+ list_del(&rx_msg->link);
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&ipc->rx_msg_count);
+ kfree(rx_msg);
+ }
+ spin_unlock_irq(&cons->rx_msg_lock);
+
+ ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
+}
+
+static int
+ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ipc->lock);
+ if (ret)
+ return ret;
+
+ if (!ipc->on) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = ivpu_ipc_tx_prepare(vdev, cons, req);
+ if (ret)
+ goto unlock;
+
+ ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
+
+unlock:
+ mutex_unlock(&ipc->lock);
+ return ret;
+}
+
+int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_buf,
+ struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+ int wait_ret, ret = 0;
+
+ wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
+ (IS_KTHREAD() && kthread_should_stop()) ||
+ !list_empty(&cons->rx_msg_list),
+ msecs_to_jiffies(timeout_ms));
+
+ if (IS_KTHREAD() && kthread_should_stop())
+ return -EINTR;
+
+ if (wait_ret == 0)
+ return -ETIMEDOUT;
+
+ if (wait_ret < 0)
+ return -ERESTARTSYS;
+
+ spin_lock_irq(&cons->rx_msg_lock);
+ rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
+ if (!rx_msg) {
+ spin_unlock_irq(&cons->rx_msg_lock);
+ return -EAGAIN;
+ }
+ list_del(&rx_msg->link);
+ spin_unlock_irq(&cons->rx_msg_lock);
+
+ if (ipc_buf)
+ memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
+ if (rx_msg->jsm_msg) {
+ u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
+
+ if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
+ ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
+ ret = -EBADMSG;
+ }
+
+ if (ipc_payload)
+ memcpy(ipc_payload, rx_msg->jsm_msg, size);
+ }
+
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&ipc->rx_msg_count);
+ kfree(rx_msg);
+
+ return ret;
+}
+
+static int
+ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms)
+{
+ struct ivpu_ipc_consumer cons;
+ int ret;
+
+ ivpu_ipc_consumer_add(vdev, &cons, channel);
+
+ ret = ivpu_ipc_send(vdev, &cons, req);
+ if (ret) {
+ ivpu_warn(vdev, "IPC send failed: %d\n", ret);
+ goto consumer_del;
+ }
+
+ ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
+ if (ret) {
+ ivpu_warn(vdev, "IPC receive failed: type 0x%x, ret %d\n", req->type, ret);
+ goto consumer_del;
+ }
+
+ if (resp->type != expected_resp_type) {
+ ivpu_warn(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
+ ret = -EBADE;
+ }
+
+consumer_del:
+ ivpu_ipc_consumer_del(vdev, &cons);
+ return ret;
+}
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms)
+{
+ struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
+ struct vpu_jsm_msg hb_resp;
+ int ret, hb_ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp,
+ channel, timeout_ms);
+ if (ret != -ETIMEDOUT)
+ goto rpm_put;
+
+ hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
+ &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.jsm);
+ if (hb_ret == -ETIMEDOUT) {
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+ }
+
+rpm_put:
+ ivpu_rpm_put(vdev);
+ return ret;
+}
+
+static bool
+ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ if (cons->channel != ipc_hdr->channel)
+ return false;
+
+ if (!jsm_msg || jsm_msg->request_id == cons->request_id)
+ return true;
+
+ return false;
+}
+
+static void
+ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+ unsigned long flags;
+
+ lockdep_assert_held(&ipc->cons_list_lock);
+
+ rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
+ if (!rx_msg) {
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ return;
+ }
+
+ atomic_inc(&ipc->rx_msg_count);
+
+ rx_msg->ipc_hdr = ipc_hdr;
+ rx_msg->jsm_msg = jsm_msg;
+
+ spin_lock_irqsave(&cons->rx_msg_lock, flags);
+ list_add_tail(&rx_msg->link, &cons->rx_msg_list);
+ spin_unlock_irqrestore(&cons->rx_msg_lock, flags);
+
+ wake_up(&cons->rx_msg_wq);
+}
+
+int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_consumer *cons;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+ unsigned long flags;
+ bool dispatched;
+ u32 vpu_addr;
+
+ /*
+ * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
+ * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
+ */
+ while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
+ vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
+ if (vpu_addr == REG_IO_ERROR) {
+ ivpu_err(vdev, "Failed to read IPC rx addr register\n");
+ return -EIO;
+ }
+
+ ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
+ if (!ipc_hdr) {
+ ivpu_warn(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
+ continue;
+ }
+ ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
+
+ jsm_msg = NULL;
+ if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
+ jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
+ if (!jsm_msg) {
+ ivpu_warn(vdev, "JSM msg 0x%x out of range\n", ipc_hdr->data_addr);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
+ continue;
+ }
+ ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
+ }
+
+ if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
+ ivpu_warn(vdev, "IPC RX msg dropped, msg count %d\n", IPC_MAX_RX_MSG);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ continue;
+ }
+
+ dispatched = false;
+ spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ list_for_each_entry(cons, &ipc->cons_list, link) {
+ if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
+ ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
+ dispatched = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+
+ if (!dispatched) {
+ ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ }
+ }
+
+ return 0;
+}
+
+int ivpu_ipc_init(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ int ret = -ENOMEM;
+
+ ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ if (!ipc->mem_tx)
+ return ret;
+
+ ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ if (!ipc->mem_rx)
+ goto err_free_tx;
+
+ ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
+ -1, "TX_IPC_JSM");
+ if (IS_ERR(ipc->mm_tx)) {
+ ret = PTR_ERR(ipc->mm_tx);
+ ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
+ goto err_free_rx;
+ }
+
+ ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1);
+ if (ret) {
+ ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
+ goto err_free_rx;
+ }
+
+ INIT_LIST_HEAD(&ipc->cons_list);
+ spin_lock_init(&ipc->cons_list_lock);
+ drmm_mutex_init(&vdev->drm, &ipc->lock);
+
+ ivpu_ipc_reset(vdev);
+ return 0;
+
+err_free_rx:
+ ivpu_bo_free_internal(ipc->mem_rx);
+err_free_tx:
+ ivpu_bo_free_internal(ipc->mem_tx);
+ return ret;
+}
+
+void ivpu_ipc_fini(struct ivpu_device *vdev)
+{
+ ivpu_ipc_mem_fini(vdev);
+}
+
+void ivpu_ipc_enable(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ mutex_lock(&ipc->lock);
+ ipc->on = true;
+ mutex_unlock(&ipc->lock);
+}
+
+void ivpu_ipc_disable(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_consumer *cons, *c;
+ unsigned long flags;
+
+ mutex_lock(&ipc->lock);
+ ipc->on = false;
+ mutex_unlock(&ipc->lock);
+
+ spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
+ wake_up(&cons->rx_msg_wq);
+ spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+}
+
+void ivpu_ipc_reset(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ mutex_lock(&ipc->lock);
+
+ memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size);
+ memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size);
+ wmb(); /* Flush WC buffers for TX and RX rings */
+
+ mutex_unlock(&ipc->lock);
+}
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
new file mode 100644
index 000000000000..68f5b6668e00
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_IPC_H__
+#define __IVPU_IPC_H__
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "vpu_jsm_api.h"
+
+struct ivpu_bo;
+
+/* VPU FW boot notification */
+#define IVPU_IPC_CHAN_BOOT_MSG 0x3ff
+#define IVPU_IPC_BOOT_MSG_DATA_ADDR 0x424f4f54
+
+/* The alignment to be used for IPC Buffers and IPC Data. */
+#define IVPU_IPC_ALIGNMENT 64
+
+#define IVPU_IPC_HDR_FREE 0
+#define IVPU_IPC_HDR_ALLOCATED 1
+
+/**
+ * struct ivpu_ipc_hdr - The IPC message header structure, exchanged
+ * with the VPU device firmware.
+ * @data_addr: The VPU address of the payload (JSM message)
+ * @data_size: The size of the payload.
+ * @channel: The channel used.
+ * @src_node: The Node ID of the sender.
+ * @dst_node: The Node ID of the intended receiver.
+ * @status: IPC buffer usage status
+ */
+struct ivpu_ipc_hdr {
+ u32 data_addr;
+ u32 data_size;
+ u16 channel;
+ u8 src_node;
+ u8 dst_node;
+ u8 status;
+} __packed __aligned(IVPU_IPC_ALIGNMENT);
+
+struct ivpu_ipc_consumer {
+ struct list_head link;
+ u32 channel;
+ u32 tx_vpu_addr;
+ u32 request_id;
+
+ spinlock_t rx_msg_lock; /* Protects rx_msg_list */
+ struct list_head rx_msg_list;
+ wait_queue_head_t rx_msg_wq;
+};
+
+struct ivpu_ipc_info {
+ struct gen_pool *mm_tx;
+ struct ivpu_bo *mem_tx;
+ struct ivpu_bo *mem_rx;
+
+ atomic_t rx_msg_count;
+
+ spinlock_t cons_list_lock; /* Protects cons_list */
+ struct list_head cons_list;
+
+ atomic_t request_id;
+ struct mutex lock; /* Lock on status */
+ bool on;
+};
+
+int ivpu_ipc_init(struct ivpu_device *vdev);
+void ivpu_ipc_fini(struct ivpu_device *vdev);
+
+void ivpu_ipc_enable(struct ivpu_device *vdev);
+void ivpu_ipc_disable(struct ivpu_device *vdev);
+void ivpu_ipc_reset(struct ivpu_device *vdev);
+
+int ivpu_ipc_irq_handler(struct ivpu_device *vdev);
+
+void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ u32 channel);
+void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
+
+int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload,
+ unsigned long timeout_ms);
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms);
+
+#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
new file mode 100644
index 000000000000..3c6f1e16cf2f
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include <linux/bitfield.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+#define CMD_BUF_IDX 0
+#define JOB_ID_JOB_MASK GENMASK(7, 0)
+#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+#define JOB_MAX_BUFFER_COUNT 65535
+
+static unsigned int ivpu_tdr_timeout_ms;
+module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+
+static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
+{
+ ivpu_hw_reg_db_set(vdev, cmdq->db_id);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct vpu_job_queue_header *jobq_header;
+ struct ivpu_cmdq *cmdq;
+
+ cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return NULL;
+
+ cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
+ if (!cmdq->mem)
+ goto cmdq_free;
+
+ cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
+ cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) /
+ sizeof(struct vpu_job_queue_entry));
+
+ cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr;
+ jobq_header = &cmdq->jobq->header;
+ jobq_header->engine_idx = engine;
+ jobq_header->head = 0;
+ jobq_header->tail = 0;
+ wmb(); /* Flush WC buffer for jobq->header */
+
+ return cmdq;
+
+cmdq_free:
+ kfree(cmdq);
+ return NULL;
+}
+
+static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ if (!cmdq)
+ return;
+
+ ivpu_bo_free_internal(cmdq->mem);
+ kfree(cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_alloc(file_priv, engine);
+ if (!cmdq)
+ return NULL;
+ file_priv->cmdq[engine] = cmdq;
+ }
+
+ if (cmdq->db_registered)
+ return cmdq;
+
+ ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
+ cmdq->mem->vpu_addr, cmdq->mem->base.size);
+ if (ret)
+ return NULL;
+
+ cmdq->db_registered = true;
+
+ return cmdq;
+}
+
+static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (cmdq) {
+ file_priv->cmdq[engine] = NULL;
+ if (cmdq->db_registered)
+ ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
+
+ ivpu_cmdq_free(file_priv, cmdq);
+ }
+}
+
+void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+{
+ int i;
+
+ mutex_lock(&file_priv->lock);
+
+ for (i = 0; i < IVPU_NUM_ENGINES; i++)
+ ivpu_cmdq_release_locked(file_priv, i);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+/*
+ * Mark the doorbell as unregistered and reset job queue pointers.
+ * This function needs to be called when the VPU hardware is restarted
+ * and FW looses job queue state. The next time job queue is used it
+ * will be registered again.
+ */
+static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (cmdq) {
+ cmdq->db_registered = false;
+ cmdq->jobq->header.head = 0;
+ cmdq->jobq->header.tail = 0;
+ wmb(); /* Flush WC buffer for jobq header */
+ }
+}
+
+static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
+{
+ int i;
+
+ mutex_lock(&file_priv->lock);
+
+ for (i = 0; i < IVPU_NUM_ENGINES; i++)
+ ivpu_cmdq_reset_locked(file_priv, i);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
+ if (!file_priv)
+ continue;
+
+ ivpu_cmdq_reset_all(file_priv);
+
+ ivpu_file_priv_put(&file_priv);
+ }
+}
+
+static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ struct ivpu_device *vdev = job->vdev;
+ struct vpu_job_queue_header *header = &cmdq->jobq->header;
+ struct vpu_job_queue_entry *entry;
+ u32 tail = READ_ONCE(header->tail);
+ u32 next_entry = (tail + 1) % cmdq->entry_count;
+
+ /* Check if there is space left in job queue */
+ if (next_entry == header->head) {
+ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
+ job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
+ return -EBUSY;
+ }
+
+ entry = &cmdq->jobq->job[tail];
+ entry->batch_buf_addr = job->cmd_buf_vpu_addr;
+ entry->job_id = job->job_id;
+ entry->flags = 0;
+ wmb(); /* Ensure that tail is updated after filling entry */
+ header->tail = next_entry;
+ wmb(); /* Flush WC buffer for jobq header */
+
+ return 0;
+}
+
+struct ivpu_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* protects base */
+ struct ivpu_device *vdev;
+};
+
+static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct ivpu_fence, base);
+}
+
+static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
+
+ return dev_name(ivpu_fence->vdev->drm.dev);
+}
+
+static const struct dma_fence_ops ivpu_fence_ops = {
+ .get_driver_name = ivpu_fence_get_driver_name,
+ .get_timeline_name = ivpu_fence_get_timeline_name,
+};
+
+static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
+{
+ struct ivpu_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->vdev = vdev;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
+
+ return &fence->base;
+}
+
+static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+{
+ struct ivpu_device *vdev = job->vdev;
+
+ kref_get(&job->ref);
+ *link = job;
+
+ ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
+}
+
+static void job_release(struct kref *ref)
+{
+ struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
+ struct ivpu_device *vdev = job->vdev;
+ u32 i;
+
+ for (i = 0; i < job->bo_count; i++)
+ if (job->bos[i])
+ drm_gem_object_put(&job->bos[i]->base);
+
+ dma_fence_put(job->done_fence);
+ ivpu_file_priv_put(&job->file_priv);
+
+ ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
+ kfree(job);
+
+ /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
+ ivpu_rpm_put(vdev);
+}
+
+static void job_put(struct ivpu_job *job)
+{
+ struct ivpu_device *vdev = job->vdev;
+
+ ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
+ kref_put(&job->ref, job_release);
+}
+
+static struct ivpu_job *
+ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_job *job;
+ size_t buf_size;
+ int ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return NULL;
+
+ buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
+ job = kzalloc(buf_size, GFP_KERNEL);
+ if (!job)
+ goto err_rpm_put;
+
+ kref_init(&job->ref);
+
+ job->vdev = vdev;
+ job->engine_idx = engine_idx;
+ job->bo_count = bo_count;
+ job->done_fence = ivpu_fence_create(vdev);
+ if (!job->done_fence) {
+ ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
+ goto err_free_job;
+ }
+
+ job->file_priv = ivpu_file_priv_get(file_priv);
+
+ ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
+
+ return job;
+
+err_free_job:
+ kfree(job);
+err_rpm_put:
+ ivpu_rpm_put(vdev);
+ return NULL;
+}
+
+static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job->file_priv->has_mmu_faults)
+ job_status = VPU_JSM_STATUS_ABORTED;
+
+ job->bos[CMD_BUF_IDX]->job_status = job_status;
+ dma_fence_signal(job->done_fence);
+
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+
+ job_put(job);
+ return 0;
+}
+
+static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
+{
+ struct vpu_ipc_msg_payload_job_done *payload;
+ struct vpu_jsm_msg *job_ret_msg = msg;
+ int ret;
+
+ payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
+
+ ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+ if (ret)
+ ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
+}
+
+void ivpu_jobs_abort_all(struct ivpu_device *vdev)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+}
+
+static int ivpu_direct_job_submission(struct ivpu_job *job)
+{
+ struct ivpu_file_priv *file_priv = job->file_priv;
+ struct ivpu_device *vdev = job->vdev;
+ struct xa_limit job_id_range;
+ struct ivpu_cmdq *cmdq;
+ int ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
+ if (!cmdq) {
+ ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
+ file_priv->ctx.id, job->engine_idx);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
+
+ job_get(job, &job);
+ ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
+ goto err_job_put;
+ }
+
+ ret = ivpu_cmdq_push_job(cmdq, job);
+ if (ret)
+ goto err_xa_erase;
+
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
+ job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
+ job->engine_idx, cmdq->jobq->header.tail);
+
+ if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
+ ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ cmdq->jobq->header.head = cmdq->jobq->header.tail;
+ wmb(); /* Flush WC buffer for jobq header */
+ } else {
+ ivpu_cmdq_ring_db(vdev, cmdq);
+ }
+
+ mutex_unlock(&file_priv->lock);
+ return 0;
+
+err_xa_erase:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_job_put:
+ job_put(job);
+err_unlock:
+ mutex_unlock(&file_priv->lock);
+ return ret;
+}
+
+static int
+ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
+ u32 buf_count, u32 commands_offset)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ww_acquire_ctx acquire_ctx;
+ struct ivpu_bo *bo;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < buf_count; i++) {
+ struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
+
+ if (!obj)
+ return -ENOENT;
+
+ job->bos[i] = to_ivpu_bo(obj);
+
+ ret = ivpu_bo_pin(job->bos[i]);
+ if (ret)
+ return ret;
+ }
+
+ bo = job->bos[CMD_BUF_IDX];
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
+ ivpu_warn(vdev, "Buffer is already in use\n");
+ return -EBUSY;
+ }
+
+ if (commands_offset >= bo->base.size) {
+ ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
+ return -EINVAL;
+ }
+
+ job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ goto unlock_reservations;
+ }
+
+ dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+
+unlock_reservations:
+ drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+
+ wmb(); /* Flush write combining buffers */
+
+ return ret;
+}
+
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_submit *params = data;
+ struct ivpu_job *job;
+ u32 *buf_handles;
+ int idx, ret;
+
+ if (params->engine > DRM_IVPU_ENGINE_COPY)
+ return -EINVAL;
+
+ if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(params->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ if (!buf_handles)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf_handles,
+ (void __user *)params->buffers_ptr,
+ params->buffer_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_handles;
+ }
+
+ if (!drm_dev_enter(&vdev->drm, &idx)) {
+ ret = -ENODEV;
+ goto free_handles;
+ }
+
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
+ file_priv->ctx.id, params->buffer_count);
+
+ job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+ if (!job) {
+ ivpu_err(vdev, "Failed to create job\n");
+ ret = -ENOMEM;
+ goto dev_exit;
+ }
+
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
+ params->commands_offset);
+ if (ret) {
+ ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
+ goto job_put;
+ }
+
+ ret = ivpu_direct_job_submission(job);
+ if (ret) {
+ dma_fence_signal(job->done_fence);
+ ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
+ }
+
+job_put:
+ job_put(job);
+dev_exit:
+ drm_dev_exit(idx);
+free_handles:
+ kfree(buf_handles);
+
+ return ret;
+}
+
+static int ivpu_job_done_thread(void *arg)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)arg;
+ struct ivpu_ipc_consumer cons;
+ struct vpu_jsm_msg jsm_msg;
+ bool jobs_submitted;
+ unsigned int timeout;
+ int ret;
+
+ ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
+
+ ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
+
+ while (!kthread_should_stop()) {
+ timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
+ ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
+ if (!ret) {
+ ivpu_job_done_message(vdev, &jsm_msg);
+ } else if (ret == -ETIMEDOUT) {
+ if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
+ ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+ }
+ }
+ }
+
+ ivpu_ipc_consumer_del(vdev, &cons);
+
+ ivpu_jobs_abort_all(vdev);
+
+ ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
+ return 0;
+}
+
+int ivpu_job_done_thread_init(struct ivpu_device *vdev)
+{
+ struct task_struct *thread;
+
+ thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
+ if (IS_ERR(thread)) {
+ ivpu_err(vdev, "Failed to start job completion thread\n");
+ return -EIO;
+ }
+
+ get_task_struct(thread);
+ wake_up_process(thread);
+
+ vdev->job_done_thread = thread;
+
+ return 0;
+}
+
+void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
+{
+ kthread_stop(vdev->job_done_thread);
+ put_task_struct(vdev->job_done_thread);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
new file mode 100644
index 000000000000..aa1f0b9479b0
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_JOB_H__
+#define __IVPU_JOB_H__
+
+#include <linux/kref.h>
+#include <linux/idr.h>
+
+#include "ivpu_gem.h"
+
+struct ivpu_device;
+struct ivpu_file_priv;
+
+/**
+ * struct ivpu_cmdq - Object representing device queue used to send jobs.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count Number of job entries in the queue
+ * @db_id: Doorbell assigned to this job queue
+ * @db_registered: True if doorbell is registered in device
+ */
+struct ivpu_cmdq {
+ struct vpu_job_queue *jobq;
+ struct ivpu_bo *mem;
+ u32 entry_count;
+ u32 db_id;
+ bool db_registered;
+};
+
+/**
+ * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
+ * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
+ * This is a unit of execution, and be tracked by the job_id for
+ * any status reporting from VPU FW through IPC JOB RET/DONE message.
+ * @file_priv: The client that submitted this job
+ * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
+ * @status: Status of the Job from IPC JOB RET/DONE message
+ * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
+ * @submit_status_offset: Offset within batch buffer where job completion handler
+ will update the job status
+ */
+struct ivpu_job {
+ struct kref ref;
+ struct ivpu_device *vdev;
+ struct ivpu_file_priv *file_priv;
+ struct dma_fence *done_fence;
+ u64 cmd_buf_vpu_addr;
+ u32 job_id;
+ u32 engine_idx;
+ size_t bo_count;
+ struct ivpu_bo *bos[];
+};
+
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+
+int ivpu_job_done_thread_init(struct ivpu_device *vdev);
+void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
+
+void ivpu_jobs_abort_all(struct ivpu_device *vdev);
+
+#endif /* __IVPU_JOB_H__ */
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
new file mode 100644
index 000000000000..831bfd2b2d39
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_ipc.h"
+#include "ivpu_jsm_msg.h"
+
+int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
+ u64 jobq_base, u32 jobq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.register_db.db_idx = db_id;
+ req.payload.register_db.jobq_base = jobq_base;
+ req.payload.register_db.jobq_size = jobq_size;
+ req.payload.register_db.host_ssid = ctx_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
+
+ return 0;
+}
+
+int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.unregister_db.db_idx = db_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
+
+ return 0;
+}
+
+int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.query_engine_hb.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
+ return ret;
+ }
+
+ *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
+ return ret;
+}
+
+int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.engine_reset.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.engine_preempt.engine_idx = engine;
+ req.payload.engine_preempt.preempt_id = preempt_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
+ return -ENOMEM;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
+ u64 *trace_hw_component_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
+ return ret;
+ }
+
+ *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
+ *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
+
+ return ret;
+}
+
+int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
+ u64 trace_hw_component_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.trace_config.trace_level = trace_level;
+ req.payload.trace_config.trace_destination_mask = trace_destination_mask;
+ req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn(vdev, "Failed to set config: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
+ struct vpu_jsm_msg resp;
+
+ req.payload.ssid_release.host_ssid = host_ssid;
+
+ return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
new file mode 100644
index 000000000000..ab50d7b017c1
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_JSM_MSG_H__
+#define __IVPU_JSM_MSG_H__
+
+#include "vpu_jsm_api.h"
+
+int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
+ u64 jobq_base, u32 jobq_size);
+int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id);
+int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat);
+int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine);
+int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id);
+int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size);
+int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
+ u64 *trace_hw_component_mask);
+int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
+ u64 trace_hw_component_mask);
+int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
+#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
new file mode 100644
index 000000000000..694e978aba66
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/highmem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+#include "ivpu_pm.h"
+
+#define IVPU_MMU_IDR0_REF 0x080f3e0f
+#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
+#define IVPU_MMU_IDR1_REF 0x0e739d18
+#define IVPU_MMU_IDR3_REF 0x0000003c
+#define IVPU_MMU_IDR5_REF 0x00040070
+#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
+#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
+
+#define IVPU_MMU_CDTAB_ENT_SIZE 64
+#define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */
+#define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
+
+#define IVPU_MMU_STREAM_ID0 0
+#define IVPU_MMU_STREAM_ID3 3
+
+#define IVPU_MMU_STRTAB_ENT_SIZE 64
+#define IVPU_MMU_STRTAB_ENT_COUNT 4
+#define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2
+#define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE
+
+#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
+#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
+#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
+#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+
+#define IVPU_MMU_CMDQ_CMD_SIZE 16
+#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
+
+#define IVPU_MMU_EVTQ_CMD_SIZE 32
+#define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
+
+#define IVPU_MMU_CMD_OPCODE GENMASK(7, 0)
+
+#define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12)
+#define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22)
+#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
+#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
+#define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32)
+
+#define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10)
+#define IVPU_MMU_CMD_CFGI_0_SSV BIT(11)
+#define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12)
+#define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32)
+#define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0)
+
+#define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48)
+#define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32)
+
+#define CMD_PREFETCH_CFG 0x1
+#define CMD_CFGI_STE 0x3
+#define CMD_CFGI_ALL 0x4
+#define CMD_CFGI_CD 0x5
+#define CMD_CFGI_CD_ALL 0x6
+#define CMD_TLBI_NH_ASID 0x11
+#define CMD_TLBI_EL2_ALL 0x20
+#define CMD_TLBI_NSNH_ALL 0x30
+#define CMD_SYNC 0x46
+
+#define IVPU_MMU_EVT_F_UUT 0x01
+#define IVPU_MMU_EVT_C_BAD_STREAMID 0x02
+#define IVPU_MMU_EVT_F_STE_FETCH 0x03
+#define IVPU_MMU_EVT_C_BAD_STE 0x04
+#define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05
+#define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06
+#define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
+#define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08
+#define IVPU_MMU_EVT_F_CD_FETCH 0x09
+#define IVPU_MMU_EVT_C_BAD_CD 0x0a
+#define IVPU_MMU_EVT_F_WALK_EABT 0x0b
+#define IVPU_MMU_EVT_F_TRANSLATION 0x10
+#define IVPU_MMU_EVT_F_ADDR_SIZE 0x11
+#define IVPU_MMU_EVT_F_ACCESS 0x12
+#define IVPU_MMU_EVT_F_PERMISSION 0x13
+#define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20
+#define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21
+#define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24
+#define IVPU_MMU_EVT_F_VMS_FETCH 0x25
+
+#define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0)
+#define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12)
+
+#define IVPU_MMU_Q_BASE_RWA BIT(62)
+#define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
+#define IVPU_MMU_STRTAB_BASE_RA BIT(62)
+#define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
+
+#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
+#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
+
+#define IVPU_MMU_CR0_ATSCHK BIT(4)
+#define IVPU_MMU_CR0_CMDQEN BIT(3)
+#define IVPU_MMU_CR0_EVTQEN BIT(2)
+#define IVPU_MMU_CR0_PRIQEN BIT(1)
+#define IVPU_MMU_CR0_SMMUEN BIT(0)
+
+#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
+#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
+#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
+#define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4)
+#define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2)
+#define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0)
+#define IVPU_MMU_CACHE_NC 0
+#define IVPU_MMU_CACHE_WB 1
+#define IVPU_MMU_CACHE_WT 2
+#define IVPU_MMU_SH_NSH 0
+#define IVPU_MMU_SH_OSH 2
+#define IVPU_MMU_SH_ISH 3
+
+#define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0)
+
+#define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
+#define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
+#define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
+#define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14)
+#define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30)
+#define IVPU_MMU_CD_0_ENDI BIT(15)
+#define IVPU_MMU_CD_0_V BIT(31)
+#define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32)
+#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
+#define IVPU_MMU_CD_0_AA64 BIT(41)
+#define IVPU_MMU_CD_0_S BIT(44)
+#define IVPU_MMU_CD_0_R BIT(45)
+#define IVPU_MMU_CD_0_A BIT(46)
+#define IVPU_MMU_CD_0_ASET BIT(47)
+#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
+
+#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
+
+#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
+#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
+#define IVPU_MMU_STE_0_S1FMT_LINEAR 0
+#define IVPU_MMU_STE_DWORDS 8
+#define IVPU_MMU_STE_0_CFG_S1_TRANS 5
+#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)
+#define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
+#define IVPU_MMU_STE_0_V BIT(0)
+
+#define IVPU_MMU_STE_1_STRW_NSEL1 0ul
+#define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13)
+#define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30)
+#define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48)
+#define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul
+#define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50)
+#define IVPU_MMU_STE_1_INSTCFG_DATA 2ul
+#define IVPU_MMU_STE_1_MEV BIT(19)
+#define IVPU_MMU_STE_1_S1STALLD BIT(27)
+#define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul
+#define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2)
+#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
+#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
+#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
+#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
+
+#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
+
+#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
+
+static char *ivpu_mmu_event_to_str(u32 cmd)
+{
+ switch (cmd) {
+ case IVPU_MMU_EVT_F_UUT:
+ return "Unsupported Upstream Transaction";
+ case IVPU_MMU_EVT_C_BAD_STREAMID:
+ return "Transaction StreamID out of range";
+ case IVPU_MMU_EVT_F_STE_FETCH:
+ return "Fetch of STE caused external abort";
+ case IVPU_MMU_EVT_C_BAD_STE:
+ return "Used STE invalid";
+ case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
+ return "Address Request disallowed for a StreamID";
+ case IVPU_MMU_EVT_F_STREAM_DISABLED:
+ return "Transaction marks non-substream disabled";
+ case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
+ return "MMU bypass is disallowed for this StreamID";
+ case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
+ return "Invalid StreamID";
+ case IVPU_MMU_EVT_F_CD_FETCH:
+ return "Fetch of CD caused external abort";
+ case IVPU_MMU_EVT_C_BAD_CD:
+ return "Fetched CD invalid";
+ case IVPU_MMU_EVT_F_WALK_EABT:
+ return " An external abort occurred fetching a TLB";
+ case IVPU_MMU_EVT_F_TRANSLATION:
+ return "Translation fault";
+ case IVPU_MMU_EVT_F_ADDR_SIZE:
+ return " Output address caused address size fault";
+ case IVPU_MMU_EVT_F_ACCESS:
+ return "Access flag fault";
+ case IVPU_MMU_EVT_F_PERMISSION:
+ return "Permission fault occurred on page access";
+ case IVPU_MMU_EVT_F_TLB_CONFLICT:
+ return "A TLB conflict";
+ case IVPU_MMU_EVT_F_CFG_CONFLICT:
+ return "A configuration cache conflict";
+ case IVPU_MMU_EVT_E_PAGE_REQUEST:
+ return "Page request hint from a client device";
+ case IVPU_MMU_EVT_F_VMS_FETCH:
+ return "Fetch of VMS caused external abort";
+ default:
+ return "Unknown CMDQ command";
+ }
+}
+
+static void ivpu_mmu_config_check(struct ivpu_device *vdev)
+{
+ u32 val_ref;
+ u32 val;
+
+ if (ivpu_is_simics(vdev))
+ val_ref = IVPU_MMU_IDR0_REF_SIMICS;
+ else
+ val_ref = IVPU_MMU_IDR0_REF;
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
+ if (val != val_ref)
+ ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
+ if (val != IVPU_MMU_IDR1_REF)
+ ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
+ if (val != IVPU_MMU_IDR3_REF)
+ ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
+
+ if (ivpu_is_simics(vdev))
+ val_ref = IVPU_MMU_IDR5_REF_SIMICS;
+ else if (ivpu_is_fpga(vdev))
+ val_ref = IVPU_MMU_IDR5_REF_FPGA;
+ else
+ val_ref = IVPU_MMU_IDR5_REF;
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
+ if (val != val_ref)
+ ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
+}
+
+static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
+
+ cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
+ if (!cdtab->base)
+ return -ENOMEM;
+
+ ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
+
+ return 0;
+}
+
+static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_strtab *strtab = &mmu->strtab;
+ size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
+
+ strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
+ if (!strtab->base)
+ return -ENOMEM;
+
+ strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
+ strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
+ strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
+
+ ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
+ &strtab->dma, &strtab->dma_q, size);
+
+ return 0;
+}
+
+static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_queue *q = &mmu->cmdq;
+
+ q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
+ if (!q->base)
+ return -ENOMEM;
+
+ q->dma_q = IVPU_MMU_Q_BASE_RWA;
+ q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
+ q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
+
+ ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
+ &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
+
+ return 0;
+}
+
+static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_queue *q = &mmu->evtq;
+
+ q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
+ if (!q->base)
+ return -ENOMEM;
+
+ q->dma_q = IVPU_MMU_Q_BASE_RWA;
+ q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
+ q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
+
+ ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
+ &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
+
+ return 0;
+}
+
+static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_mmu_cdtab_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_strtab_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_cmdq_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_evtq_alloc(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
+{
+ u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
+ u32 val_ack;
+ int ret;
+
+ REGV_WR32(reg, val);
+
+ ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
+
+ return ret;
+}
+
+static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
+{
+ u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
+ int ret;
+
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
+ if (ret)
+ return ret;
+
+ return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
+}
+
+static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+
+ return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
+ IVPU_MMU_QUEUE_TIMEOUT_US);
+}
+
+static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
+{
+ struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
+ u64 *queue_buffer = q->base;
+ int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+
+ if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
+ return -EBUSY;
+ }
+
+ queue_buffer[idx] = data0;
+ queue_buffer[idx + 1] = data1;
+ q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+
+ ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
+
+ return 0;
+}
+
+static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
+ u64 val;
+ int ret;
+
+ val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
+
+ ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
+ if (ret)
+ return ret;
+
+ clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
+
+ ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
+{
+ u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
+ u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
+}
+
+static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
+{
+ u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
+ FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
+}
+
+static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
+{
+ u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
+}
+
+static int ivpu_mmu_reset(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ u32 val;
+ int ret;
+
+ memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
+ clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
+ mmu->cmdq.prod = 0;
+ mmu->cmdq.cons = 0;
+
+ memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
+ clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
+ mmu->evtq.prod = 0;
+ mmu->evtq.cons = 0;
+
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
+ FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
+ REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
+
+ REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
+
+ REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
+
+ val = IVPU_MMU_CR0_CMDQEN;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ return ret;
+
+ REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
+
+ val |= IVPU_MMU_CR0_EVTQEN;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ val |= IVPU_MMU_CR0_ATSCHK;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_irqs_setup(vdev);
+ if (ret)
+ return ret;
+
+ val |= IVPU_MMU_CR0_SMMUEN;
+ return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+}
+
+static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_strtab *strtab = &mmu->strtab;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
+ u64 str[2];
+
+ str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
+ FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
+ IVPU_MMU_STE_0_V |
+ (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
+
+ str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
+ FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
+ FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
+ FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
+ FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
+ IVPU_MMU_STE_1_MEV |
+ IVPU_MMU_STE_1_S1STALLD;
+
+ WRITE_ONCE(entry[1], str[1]);
+ WRITE_ONCE(entry[0], str[0]);
+
+ clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
+
+ ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
+}
+
+static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
+{
+ ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
+ ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
+
+ return 0;
+}
+
+int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&mmu->lock);
+ if (ret)
+ return ret;
+
+ if (!mmu->on) {
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
+ if (ret)
+ goto unlock;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+unlock:
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 cd[4];
+ int ret;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ if (cd_dma != 0) {
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
+ FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
+ IVPU_MMU_CD_0_TCR_EPD1 |
+ IVPU_MMU_CD_0_AA64 |
+ IVPU_MMU_CD_0_R |
+ IVPU_MMU_CD_0_ASET |
+ IVPU_MMU_CD_0_V;
+ cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
+ cd[2] = 0;
+ cd[3] = 0x0000000000007444;
+
+ /* For global context generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ cd[0] |= IVPU_MMU_CD_0_A;
+ } else {
+ memset(cd, 0, sizeof(cd));
+ }
+
+ WRITE_ONCE(entry[1], cd[1]);
+ WRITE_ONCE(entry[2], cd[2]);
+ WRITE_ONCE(entry[3], cd[3]);
+ WRITE_ONCE(entry[0], cd[0]);
+
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
+
+ ret = mutex_lock_interruptible(&mmu->lock);
+ if (ret)
+ return ret;
+
+ if (!mmu->on) {
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ goto unlock;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+unlock:
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
+ if (ret)
+ ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
+{
+ int ret;
+
+ if (ssid == 0) {
+ ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
+ return -EINVAL;
+ }
+
+ ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
+ if (ret)
+ ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
+
+ return ret;
+}
+
+int ivpu_mmu_init(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ ivpu_dbg(vdev, MMU, "Init..\n");
+
+ drmm_mutex_init(&vdev->drm, &mmu->lock);
+ ivpu_mmu_config_check(vdev);
+
+ ret = ivpu_mmu_structs_alloc(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_strtab_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_cd_add_gbl(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, MMU, "Init done\n");
+
+ return 0;
+}
+
+int ivpu_mmu_enable(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ mutex_lock(&mmu->lock);
+
+ mmu->on = true;
+
+ ret = ivpu_mmu_reset(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
+ goto err;
+ }
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ goto err;
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
+ if (ret)
+ goto err;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ goto err;
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+err:
+ mmu->on = false;
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+void ivpu_mmu_disable(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ mmu->on = false;
+ mutex_unlock(&mmu->lock);
+}
+
+static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
+{
+ u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
+ u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
+ u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
+ u64 in_addr = ((u64)event[5]) << 32 | event[4];
+ u32 sid = event[1];
+
+ ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
+ op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
+}
+
+static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
+ u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
+ u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
+
+ evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
+ if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ return NULL;
+
+ clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
+
+ evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+
+ return evt;
+}
+
+void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
+{
+ bool schedule_recovery = false;
+ u32 *event;
+ u32 ssid;
+
+ ivpu_dbg(vdev, IRQ, "MMU event queue\n");
+
+ while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
+ ivpu_mmu_dump_event(vdev, event);
+
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ schedule_recovery = true;
+ else
+ ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ }
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
+{
+ u32 gerror_val, gerrorn_val, active;
+
+ ivpu_dbg(vdev, IRQ, "MMU error\n");
+
+ gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
+ gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
+
+ active = gerror_val ^ gerrorn_val;
+ if (!(active & IVPU_MMU_GERROR_ERR_MASK))
+ return;
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
+ ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
+ ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
+ ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
+
+ REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
+}
+
+int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
+{
+ return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
+}
+
+void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
+{
+ ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
+}
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
new file mode 100644
index 000000000000..cb551126806b
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_MMU_H__
+#define __IVPU_MMU_H__
+
+struct ivpu_device;
+
+struct ivpu_mmu_cdtab {
+ void *base;
+ dma_addr_t dma;
+};
+
+struct ivpu_mmu_strtab {
+ void *base;
+ dma_addr_t dma;
+ u64 dma_q;
+ u32 base_cfg;
+};
+
+struct ivpu_mmu_queue {
+ void *base;
+ dma_addr_t dma;
+ u64 dma_q;
+ u32 prod;
+ u32 cons;
+};
+
+struct ivpu_mmu_info {
+ struct mutex lock; /* Protects cdtab, strtab, cmdq, on */
+ struct ivpu_mmu_cdtab cdtab;
+ struct ivpu_mmu_strtab strtab;
+ struct ivpu_mmu_queue cmdq;
+ struct ivpu_mmu_queue evtq;
+ bool on;
+};
+
+int ivpu_mmu_init(struct ivpu_device *vdev);
+void ivpu_mmu_disable(struct ivpu_device *vdev);
+int ivpu_mmu_enable(struct ivpu_device *vdev);
+int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
+void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
+int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
+
+void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
+void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+
+#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
new file mode 100644
index 000000000000..8ce9b12ac356
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/highmem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+
+#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30)
+#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
+#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
+#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0)
+#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
+#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
+#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
+#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
+#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
+#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
+
+#define IVPU_MMU_PAGE_SIZE SZ_4K
+#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
+#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
+#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
+
+#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
+#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
+#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
+#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
+ IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
+
+static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ dma_addr_t pgd_dma;
+ u64 *pgd;
+
+ pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
+ if (!pgd)
+ return -ENOMEM;
+
+ pgtable->pgd = pgd;
+ pgtable->pgd_dma = pgd_dma;
+
+ return 0;
+}
+
+static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ int pgd_index, pmd_index;
+
+ for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
+ u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
+ u64 *pmd = pgtable->pgd_entries[pgd_index];
+
+ if (!pmd_entries)
+ continue;
+
+ for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
+ if (pmd_entries[pmd_index])
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
+ pmd_entries[pmd_index],
+ pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ kfree(pmd_entries);
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
+ pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
+ pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+}
+
+static u64*
+ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
+{
+ u64 **pmd_entries;
+ dma_addr_t pmd_dma;
+ u64 *pmd;
+
+ if (pgtable->pgd_entries[pgd_index])
+ return pgtable->pgd_entries[pgd_index];
+
+ pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ if (!pmd)
+ return NULL;
+
+ pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pmd_entries)
+ goto err_free_pgd;
+
+ pgtable->pgd_entries[pgd_index] = pmd;
+ pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
+ pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pmd;
+
+err_free_pgd:
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
+ return NULL;
+}
+
+static u64*
+ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
+ int pgd_index, int pmd_index)
+{
+ dma_addr_t pte_dma;
+ u64 *pte;
+
+ if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
+ return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
+
+ pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
+ pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pte;
+}
+
+static int
+ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, int prot)
+{
+ u64 *pte;
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Allocate PMD - second level page table if needed */
+ if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
+ return -ENOMEM;
+
+ /* Allocate PTE - third level page table if needed */
+ pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
+ if (!pte)
+ return -ENOMEM;
+
+ /* Update PTE - third level page table with DMA address */
+ pte[pte_index] = dma_addr | prot;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
+{
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Update PTE with dummy physical address and clear flags */
+ ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
+}
+
+static void
+ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ u64 end_addr = vpu_addr + size;
+ u64 *pgd = ctx->pgtable.pgd;
+
+ /* Align to PMD entry (2 MB) */
+ vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
+
+ while (vpu_addr < end_addr) {
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
+ u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
+
+ while (vpu_addr < end_addr && vpu_addr < pmd_end) {
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
+
+ clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
+ vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+ }
+ clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
+ }
+ clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
+}
+
+static int
+ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
+{
+ while (size) {
+ int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+
+ if (ret)
+ return ret;
+
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ dma_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ while (size) {
+ ivpu_mmu_context_unmap_page(ctx, vpu_addr);
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+}
+
+int
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+{
+ struct scatterlist *sg;
+ int prot;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ return -EINVAL;
+ /*
+ * VPU is only 32 bit, but DMA engine is 38 bit
+ * Ranges < 2 GB are reserved for VPU internal registers
+ * Limit range to 8 GB
+ */
+ if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+ return -EINVAL;
+
+ prot = IVPU_MMU_ENTRY_MAPPED;
+ if (llc_coherent)
+ prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ u64 dma_addr = sg_dma_address(sg) - sg->offset;
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map context pages\n");
+ mutex_unlock(&ctx->lock);
+ return ret;
+ }
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ return ret;
+}
+
+void
+ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+}
+
+int
+ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
+ 0, range->start, range->end, DRM_MM_INSERT_BEST);
+}
+
+void
+ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ drm_mm_remove_node(node);
+}
+
+static int
+ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+{
+ u64 start, end;
+ int ret;
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->bo_list);
+
+ ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
+ if (ret)
+ return ret;
+
+ if (!context_id) {
+ start = vdev->hw->ranges.global_low.start;
+ end = vdev->hw->ranges.global_high.end;
+ } else {
+ start = vdev->hw->ranges.user_low.start;
+ end = vdev->hw->ranges.user_high.end;
+ }
+
+ drm_mm_init(&ctx->mm, start, end - start);
+ ctx->id = context_id;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
+
+ mutex_destroy(&ctx->lock);
+ ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
+ drm_mm_takedown(&ctx->mm);
+}
+
+int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+}
+
+void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+}
+
+void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_file_priv *file_priv;
+
+ xa_lock(&vdev->context_xa);
+
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv)
+ file_priv->has_mmu_faults = true;
+
+ xa_unlock(&vdev->context_xa);
+}
+
+int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
+{
+ int ret;
+
+ drm_WARN_ON(&vdev->drm, !ctx_id);
+
+ ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set page table: %d\n", ret);
+ goto err_context_fini;
+ }
+
+ return 0;
+
+err_context_fini:
+ ivpu_mmu_context_fini(vdev, ctx);
+ return ret;
+}
+
+void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->id);
+
+ ivpu_mmu_clear_pgtable(vdev, ctx->id);
+ ivpu_mmu_context_fini(vdev, ctx);
+}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
new file mode 100644
index 000000000000..ddf11b95023a
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_MMU_CONTEXT_H__
+#define __IVPU_MMU_CONTEXT_H__
+
+#include <drm/drm_mm.h>
+
+struct ivpu_device;
+struct ivpu_file_priv;
+struct ivpu_addr_range;
+
+#define IVPU_MMU_PGTABLE_ENTRIES 512
+
+struct ivpu_mmu_pgtable {
+ u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pgd;
+ dma_addr_t pgd_dma;
+};
+
+struct ivpu_mmu_context {
+ struct mutex lock; /* protects: mm, pgtable, bo_list */
+ struct drm_mm mm;
+ struct ivpu_mmu_pgtable pgtable;
+ struct list_head bo_list;
+ u32 id;
+};
+
+int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
+void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
+
+int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
+void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
+
+int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node);
+void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx,
+ struct drm_mm_node *node);
+
+int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
+void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt);
+
+#endif /* __IVPU_MMU_CONTEXT_H__ */
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
new file mode 100644
index 000000000000..aa4d56dc52b3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_fw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+static bool ivpu_disable_recovery;
+module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
+MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+
+#define PM_RESCHEDULE_LIMIT 5
+
+static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+
+ ivpu_cmdq_reset_all_contexts(vdev);
+ ivpu_ipc_reset(vdev);
+ ivpu_fw_load(vdev);
+ fw->entry_point = fw->cold_boot_entry_point;
+}
+
+static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ struct vpu_boot_params *bp = fw->mem->kvaddr;
+
+ if (!bp->save_restore_ret_address) {
+ ivpu_pm_prepare_cold_boot(vdev);
+ return;
+ }
+
+ ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address);
+ fw->entry_point = bp->save_restore_ret_address;
+}
+
+static int ivpu_suspend(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_shutdown(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ivpu_resume(struct ivpu_device *vdev)
+{
+ int ret;
+
+retry:
+ ret = ivpu_hw_power_up(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
+ ivpu_hw_power_down(vdev);
+ return ret;
+ }
+
+ ret = ivpu_boot(vdev);
+ if (ret) {
+ ivpu_mmu_disable(vdev);
+ ivpu_hw_power_down(vdev);
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret);
+ ivpu_pm_prepare_cold_boot(vdev);
+ goto retry;
+ } else {
+ ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+ int ret;
+
+retry:
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
+ cond_resched();
+ goto retry;
+ }
+
+ if (ret && ret != -EAGAIN)
+ ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+}
+
+void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+{
+ struct ivpu_pm_info *pm = vdev->pm;
+
+ if (ivpu_disable_recovery) {
+ ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
+ return;
+ }
+
+ if (ivpu_is_fpga(vdev)) {
+ ivpu_err(vdev, "Recovery not available on FPGA\n");
+ return;
+ }
+
+ /* Schedule recovery if it's not in progress */
+ if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
+ ivpu_hw_irq_disable(vdev);
+ queue_work(system_long_wq, &pm->recovery_work);
+ }
+}
+
+int ivpu_pm_suspend_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ unsigned long timeout;
+
+ ivpu_dbg(vdev, PM, "Suspend..\n");
+
+ timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
+ while (!ivpu_hw_is_idle(vdev)) {
+ cond_resched();
+ if (time_after_eq(jiffies, timeout)) {
+ ivpu_err(vdev, "Failed to enter idle on system suspend\n");
+ return -EBUSY;
+ }
+ }
+
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_warm_boot(vdev);
+
+ pci_save_state(to_pci_dev(dev));
+ pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
+
+ ivpu_dbg(vdev, PM, "Suspend done.\n");
+
+ return 0;
+}
+
+int ivpu_pm_resume_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Resume..\n");
+
+ pci_set_power_state(to_pci_dev(dev), PCI_D0);
+ pci_restore_state(to_pci_dev(dev));
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume: %d\n", ret);
+
+ ivpu_dbg(vdev, PM, "Resume done.\n");
+
+ return ret;
+}
+
+int ivpu_pm_runtime_suspend_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Runtime suspend..\n");
+
+ if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
+ ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
+ vdev->pm->suspend_reschedule_counter);
+ pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
+ vdev->pm->suspend_reschedule_counter--;
+ return -EAGAIN;
+ }
+
+ ret = ivpu_suspend(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
+
+ if (!vdev->pm->suspend_reschedule_counter) {
+ ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_pm_prepare_cold_boot(vdev);
+ } else {
+ ivpu_pm_prepare_warm_boot(vdev);
+ }
+
+ vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
+
+ return 0;
+}
+
+int ivpu_pm_runtime_resume_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Runtime resume..\n");
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+
+ ivpu_dbg(vdev, PM, "Runtime resume done.\n");
+
+ return ret;
+}
+
+int ivpu_rpm_get(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(vdev->drm.dev);
+ if (!drm_WARN_ON(&vdev->drm, ret < 0))
+ vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ return ret;
+}
+
+void ivpu_rpm_put(struct ivpu_device *vdev)
+{
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
+
+void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+
+ ivpu_dbg(vdev, PM, "Pre-reset..\n");
+ atomic_set(&vdev->pm->in_reset, 1);
+ ivpu_shutdown(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+ ivpu_dbg(vdev, PM, "Pre-reset done.\n");
+}
+
+void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Post-reset..\n");
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+ atomic_set(&vdev->pm->in_reset, 0);
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
+
+int ivpu_pm_init(struct ivpu_device *vdev)
+{
+ struct device *dev = vdev->drm.dev;
+ struct ivpu_pm_info *pm = vdev->pm;
+
+ pm->vdev = vdev;
+ pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ atomic_set(&pm->in_reset, 0);
+ INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
+
+ pm_runtime_use_autosuspend(dev);
+
+ if (ivpu_disable_recovery)
+ pm_runtime_set_autosuspend_delay(dev, -1);
+ else if (ivpu_is_silicon(vdev))
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ else
+ pm_runtime_set_autosuspend_delay(dev, 60000);
+
+ return 0;
+}
+
+void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+{
+ cancel_work_sync(&vdev->pm->recovery_work);
+}
+
+void ivpu_pm_enable(struct ivpu_device *vdev)
+{
+ struct device *dev = vdev->drm.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+void ivpu_pm_disable(struct ivpu_device *vdev)
+{
+ pm_runtime_get_noresume(vdev->drm.dev);
+ pm_runtime_forbid(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
new file mode 100644
index 000000000000..baca98187255
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_PM_H__
+#define __IVPU_PM_H__
+
+#include <linux/types.h>
+
+struct ivpu_device;
+
+struct ivpu_pm_info {
+ struct ivpu_device *vdev;
+ struct work_struct recovery_work;
+ atomic_t in_reset;
+ bool is_warmboot;
+ u32 suspend_reschedule_counter;
+};
+
+int ivpu_pm_init(struct ivpu_device *vdev);
+void ivpu_pm_enable(struct ivpu_device *vdev);
+void ivpu_pm_disable(struct ivpu_device *vdev);
+void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+
+int ivpu_pm_suspend_cb(struct device *dev);
+int ivpu_pm_resume_cb(struct device *dev);
+int ivpu_pm_runtime_suspend_cb(struct device *dev);
+int ivpu_pm_runtime_resume_cb(struct device *dev);
+
+void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev);
+void ivpu_pm_reset_done_cb(struct pci_dev *pdev);
+
+int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
+void ivpu_rpm_put(struct ivpu_device *vdev);
+
+void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+
+#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
new file mode 100644
index 000000000000..6b71be92ba65
--- /dev/null
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef VPU_BOOT_API_H
+#define VPU_BOOT_API_H
+
+/*
+ * =========== FW API version information beginning ================
+ * The bellow values will be used to construct the version info this way:
+ * fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
+ * VPU_BOOT_API_VER_MINOR;
+ * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes.
+ */
+
+/*
+ * Major version changes that break backward compatibility.
+ * Major version must start from 1 and can only be incremented.
+ */
+#define VPU_BOOT_API_VER_MAJOR 3
+
+/*
+ * Minor version changes when API backward compatibility is preserved.
+ * Resets to 0 if Major version is incremented.
+ */
+#define VPU_BOOT_API_VER_MINOR 12
+
+/*
+ * API header changed (field names, documentation, formatting) but API itself has not been changed
+ */
+#define VPU_BOOT_API_VER_PATCH 2
+
+/*
+ * Index in the API version table
+ * Must be unique for each API
+ */
+#define VPU_BOOT_API_VER_INDEX 0
+/* ------------ FW API version information end ---------------------*/
+
+#pragma pack(push, 1)
+
+/*
+ * Firmware image header format
+ */
+#define VPU_FW_HEADER_SIZE 4096
+#define VPU_FW_HEADER_VERSION 0x1
+#define VPU_FW_VERSION_SIZE 32
+#define VPU_FW_API_VER_NUM 16
+
+struct vpu_firmware_header {
+ u32 header_version;
+ u32 image_format;
+ u64 image_load_address;
+ u32 image_size;
+ u64 entry_point;
+ u8 vpu_version[VPU_FW_VERSION_SIZE];
+ u32 compression_type;
+ u64 firmware_version_load_address;
+ u32 firmware_version_size;
+ u64 boot_params_load_address;
+ u32 api_version[VPU_FW_API_VER_NUM];
+ /* Size of memory require for firmware execution */
+ u32 runtime_size;
+ u32 shave_nn_fw_size;
+};
+
+/*
+ * Firmware boot parameters format
+ */
+
+#define VPU_BOOT_PLL_COUNT 3
+#define VPU_BOOT_PLL_OUT_COUNT 4
+
+/** Values for boot_type field */
+#define VPU_BOOT_TYPE_COLDBOOT 0
+#define VPU_BOOT_TYPE_WARMBOOT 1
+
+/** Value for magic filed */
+#define VPU_BOOT_PARAMS_MAGIC 0x10000
+
+/** VPU scheduling mode. By default, OS scheduling is used. */
+#define VPU_SCHEDULING_MODE_OS 0
+#define VPU_SCHEDULING_MODE_HW 1
+
+enum VPU_BOOT_L2_CACHE_CFG_TYPE {
+ VPU_BOOT_L2_CACHE_CFG_UPA = 0,
+ VPU_BOOT_L2_CACHE_CFG_NN = 1,
+ VPU_BOOT_L2_CACHE_CFG_NUM = 2
+};
+
+/**
+ * Logging destinations.
+ *
+ * Logging output can be directed to different logging destinations. This enum
+ * defines the list of logging destinations supported by the VPU firmware (NOTE:
+ * a specific VPU FW binary may support only a subset of such output
+ * destinations, depending on the target platform and compile options).
+ */
+enum vpu_trace_destination {
+ VPU_TRACE_DESTINATION_PIPEPRINT = 0x1,
+ VPU_TRACE_DESTINATION_VERBOSE_TRACING = 0x2,
+ VPU_TRACE_DESTINATION_NORTH_PEAK = 0x4,
+};
+
+/*
+ * Processor bit shifts (for loggable HW components).
+ */
+#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_LRT 1
+#define VPU_TRACE_PROC_BIT_LNN 2
+#define VPU_TRACE_PROC_BIT_SHV_0 3
+#define VPU_TRACE_PROC_BIT_SHV_1 4
+#define VPU_TRACE_PROC_BIT_SHV_2 5
+#define VPU_TRACE_PROC_BIT_SHV_3 6
+#define VPU_TRACE_PROC_BIT_SHV_4 7
+#define VPU_TRACE_PROC_BIT_SHV_5 8
+#define VPU_TRACE_PROC_BIT_SHV_6 9
+#define VPU_TRACE_PROC_BIT_SHV_7 10
+#define VPU_TRACE_PROC_BIT_SHV_8 11
+#define VPU_TRACE_PROC_BIT_SHV_9 12
+#define VPU_TRACE_PROC_BIT_SHV_10 13
+#define VPU_TRACE_PROC_BIT_SHV_11 14
+#define VPU_TRACE_PROC_BIT_SHV_12 15
+#define VPU_TRACE_PROC_BIT_SHV_13 16
+#define VPU_TRACE_PROC_BIT_SHV_14 17
+#define VPU_TRACE_PROC_BIT_SHV_15 18
+#define VPU_TRACE_PROC_BIT_ACT_SHV_0 19
+#define VPU_TRACE_PROC_BIT_ACT_SHV_1 20
+#define VPU_TRACE_PROC_BIT_ACT_SHV_2 21
+#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22
+#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23
+
+/* KMB HW component IDs are sequential, so define first and last IDs. */
+#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT
+#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15
+
+struct vpu_boot_l2_cache_config {
+ u8 use;
+ u8 cfg;
+};
+
+struct vpu_warm_boot_section {
+ u32 src;
+ u32 dst;
+ u32 size;
+ u32 core_id;
+ u32 is_clear_op;
+};
+
+struct vpu_boot_params {
+ u32 magic;
+ u32 vpu_id;
+ u32 vpu_count;
+ u32 pad0[5];
+ /* Clock frequencies: 0x20 - 0xFF */
+ u32 frequency;
+ u32 pll[VPU_BOOT_PLL_COUNT][VPU_BOOT_PLL_OUT_COUNT];
+ u32 perf_clk_frequency;
+ u32 pad1[42];
+ /* Memory regions: 0x100 - 0x1FF */
+ u64 ipc_header_area_start;
+ u32 ipc_header_area_size;
+ u64 shared_region_base;
+ u32 shared_region_size;
+ u64 ipc_payload_area_start;
+ u32 ipc_payload_area_size;
+ u64 global_aliased_pio_base;
+ u32 global_aliased_pio_size;
+ u32 autoconfig;
+ struct vpu_boot_l2_cache_config cache_defaults[VPU_BOOT_L2_CACHE_CFG_NUM];
+ u64 global_memory_allocator_base;
+ u32 global_memory_allocator_size;
+ /**
+ * ShaveNN FW section VPU base address
+ * On VPU2.7 HW this address must be within 2GB range starting from L2C_PAGE_TABLE base
+ */
+ u64 shave_nn_fw_base;
+ u64 save_restore_ret_address; /* stores the address of FW's restore entry point */
+ u32 pad2[43];
+ /* IRQ re-direct numbers: 0x200 - 0x2FF */
+ s32 watchdog_irq_mss;
+ s32 watchdog_irq_nce;
+ /* ARM -> VPU doorbell interrupt. ARM is notifying VPU of async command or compute job. */
+ u32 host_to_vpu_irq;
+ /* VPU -> ARM job done interrupt. VPU is notifying ARM of compute job completion. */
+ u32 job_done_irq;
+ /* VPU -> ARM IRQ line to use to request MMU update. */
+ u32 mmu_update_request_irq;
+ /* ARM -> VPU IRQ line to use to notify of MMU update completion. */
+ u32 mmu_update_done_irq;
+ /* ARM -> VPU IRQ line to use to request power level change. */
+ u32 set_power_level_irq;
+ /* VPU -> ARM IRQ line to use to notify of power level change completion. */
+ u32 set_power_level_done_irq;
+ /* VPU -> ARM IRQ line to use to notify of VPU idle state change */
+ u32 set_vpu_idle_update_irq;
+ /* VPU -> ARM IRQ line to use to request counter reset. */
+ u32 metric_query_event_irq;
+ /* ARM -> VPU IRQ line to use to notify of counter reset completion. */
+ u32 metric_query_event_done_irq;
+ /* VPU -> ARM IRQ line to use to notify of preemption completion. */
+ u32 preemption_done_irq;
+ /* Padding. */
+ u32 pad3[52];
+ /* Silicon information: 0x300 - 0x3FF */
+ u32 host_version_id;
+ u32 si_stepping;
+ u64 device_id;
+ u64 feature_exclusion;
+ u64 sku;
+ /** PLL ratio for minimum clock frequency */
+ u32 min_freq_pll_ratio;
+ /** PLL ratio for maximum clock frequency */
+ u32 max_freq_pll_ratio;
+ /**
+ * Initial log level threshold (messages with log level severity less than
+ * the threshold will not be logged); applies to every enabled logging
+ * destination and loggable HW component. See 'mvLog_t' enum for acceptable
+ * values.
+ */
+ u32 default_trace_level;
+ u32 boot_type;
+ u64 punit_telemetry_sram_base;
+ u64 punit_telemetry_sram_size;
+ u32 vpu_telemetry_enable;
+ u64 crit_tracing_buff_addr;
+ u32 crit_tracing_buff_size;
+ u64 verbose_tracing_buff_addr;
+ u32 verbose_tracing_buff_size;
+ u64 verbose_tracing_sw_component_mask; /* TO BE REMOVED */
+ /**
+ * Mask of destinations to which logging messages are delivered; bitwise OR
+ * of values defined in vpu_trace_destination enum.
+ */
+ u32 trace_destination_mask;
+ /**
+ * Mask of hardware components for which logging is enabled; bitwise OR of
+ * bits defined by the VPU_TRACE_PROC_BIT_* macros.
+ */
+ u64 trace_hw_component_mask;
+ /** Mask of trace message formats supported by the driver */
+ u64 tracing_buff_message_format_mask;
+ u64 trace_reserved_1[2];
+ /**
+ * Period at which the VPU reads the temp sensor values into MMIO, on
+ * platforms where that is necessary (in ms). 0 to disable reads.
+ */
+ u32 temp_sensor_period_ms;
+ /** PLL ratio for efficient clock frequency */
+ u32 pn_freq_pll_ratio;
+ u32 pad4[28];
+ /* Warm boot information: 0x400 - 0x43F */
+ u32 warm_boot_sections_count;
+ u32 warm_boot_start_address_reference;
+ u32 warm_boot_section_info_address_offset;
+ u32 pad5[13];
+ /* Power States transitions timestamps: 0x440 - 0x46F*/
+ struct {
+ /* VPU_IDLE -> VPU_ACTIVE transition initiated timestamp */
+ u64 vpu_active_state_requested;
+ /* VPU_IDLE -> VPU_ACTIVE transition completed timestamp */
+ u64 vpu_active_state_achieved;
+ /* VPU_ACTIVE -> VPU_IDLE transition initiated timestamp */
+ u64 vpu_idle_state_requested;
+ /* VPU_ACTIVE -> VPU_IDLE transition completed timestamp */
+ u64 vpu_idle_state_achieved;
+ /* VPU_IDLE -> VPU_STANDBY transition initiated timestamp */
+ u64 vpu_standby_state_requested;
+ /* VPU_IDLE -> VPU_STANDBY transition completed timestamp */
+ u64 vpu_standby_state_achieved;
+ } power_states_timestamps;
+ /* VPU scheduling mode. Values defined by VPU_SCHEDULING_MODE_* macros. */
+ u32 vpu_scheduling_mode;
+ /* Present call period in milliseconds. */
+ u32 vpu_focus_present_timer_ms;
+ /* Unused/reserved: 0x478 - 0xFFF */
+ u32 pad6[738];
+};
+
+/*
+ * Magic numbers set between host and vpu to detect corruptio of tracing init
+ */
+
+#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
+
+/* Tracing buffer message format definitions */
+#define VPU_TRACING_FORMAT_STRING 0
+#define VPU_TRACING_FORMAT_MIPI 2
+/*
+ * Header of the tracing buffer.
+ * The below defined header will be stored at the beginning of
+ * each allocated tracing buffer, followed by a series of 256b
+ * of ASCII trace message entries.
+ */
+struct vpu_tracing_buffer_header {
+ /**
+ * Magic number set by host to detect corruption
+ * @see VPU_TRACING_BUFFER_CANARY
+ */
+ u32 host_canary_start;
+ /* offset from start of buffer for trace entries */
+ u32 read_index;
+ u32 pad_to_cache_line_size_0[14];
+ /* End of first cache line */
+
+ /**
+ * Magic number set by host to detect corruption
+ * @see VPU_TRACING_BUFFER_CANARY
+ */
+ u32 vpu_canary_start;
+ /* offset from start of buffer from write start */
+ u32 write_index;
+ /* counter for buffer wrapping */
+ u32 wrap_count;
+ /* legacy field - do not use */
+ u32 reserved_0;
+ /**
+ * Size of the log buffer include this header (@header_size) and space
+ * reserved for all messages. If @alignment` is greater that 0 the @Size
+ * must be multiple of @Alignment.
+ */
+ u32 size;
+ /* Header version */
+ u16 header_version;
+ /* Header size */
+ u16 header_size;
+ /*
+ * Format of the messages in the trace buffer
+ * 0 - null terminated string
+ * 1 - size + null terminated string
+ * 2 - MIPI-SysT encoding
+ */
+ u32 format;
+ /*
+ * Message alignment
+ * 0 - messages are place 1 after another
+ * n - every message starts and multiple on offset
+ */
+ u32 alignment; /* 64, 128, 256 */
+ /* Name of the logging entity, i.e "LRT", "LNN", "SHV0", etc */
+ char name[16];
+ u32 pad_to_cache_line_size_1[4];
+ /* End of second cache line */
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
new file mode 100644
index 000000000000..2949ec8365bd
--- /dev/null
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -0,0 +1,1008 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+/**
+ * @file
+ * @brief JSM shared definitions
+ *
+ * @ingroup Jsm
+ * @brief JSM shared definitions
+ * @{
+ */
+#ifndef VPU_JSM_API_H
+#define VPU_JSM_API_H
+
+/*
+ * Major version changes that break backward compatibility
+ */
+#define VPU_JSM_API_VER_MAJOR 3
+
+/*
+ * Minor version changes when API backward compatibility is preserved.
+ */
+#define VPU_JSM_API_VER_MINOR 0
+
+/*
+ * API header changed (field names, documentation, formatting) but API itself has not been changed
+ */
+#define VPU_JSM_API_VER_PATCH 1
+
+/*
+ * Index in the API version table
+ */
+#define VPU_JSM_API_VER_INDEX 4
+
+/*
+ * Number of Priority Bands for Hardware Scheduling
+ * Bands: RealTime, Focus, Normal, Idle
+ */
+#define VPU_HWS_NUM_PRIORITY_BANDS 4
+
+/* Max number of impacted contexts that can be dealt with the engine reset command */
+#define VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS 3
+
+/** Pack the API structures for now, once alignment issues are fixed this can be removed */
+#pragma pack(push, 1)
+
+/*
+ * Engine indexes.
+ */
+#define VPU_ENGINE_COMPUTE 0
+#define VPU_ENGINE_COPY 1
+#define VPU_ENGINE_NB 2
+
+/*
+ * VPU status values.
+ */
+#define VPU_JSM_STATUS_SUCCESS 0x0U
+#define VPU_JSM_STATUS_PARSING_ERR 0x1U
+#define VPU_JSM_STATUS_PROCESSING_ERR 0x2U
+#define VPU_JSM_STATUS_PREEMPTED 0x3U
+#define VPU_JSM_STATUS_ABORTED 0x4U
+#define VPU_JSM_STATUS_USER_CTX_VIOL_ERR 0x5U
+#define VPU_JSM_STATUS_GLOBAL_CTX_VIOL_ERR 0x6U
+#define VPU_JSM_STATUS_MVNCI_WRONG_INPUT_FORMAT 0x7U
+#define VPU_JSM_STATUS_MVNCI_UNSUPPORTED_NETWORK_ELEMENT 0x8U
+#define VPU_JSM_STATUS_MVNCI_INVALID_HANDLE 0x9U
+#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
+#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
+#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
+/* Job status returned when the job was preempted mid-inference */
+#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+
+/*
+ * Host <-> VPU IPC channels.
+ * ASYNC commands use a high priority channel, other messages use low-priority ones.
+ */
+#define VPU_IPC_CHAN_ASYNC_CMD 0
+#define VPU_IPC_CHAN_GEN_CMD 10
+#define VPU_IPC_CHAN_JOB_RET 11
+
+/*
+ * Job flags bit masks.
+ */
+#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
+
+/*
+ * Sizes of the reserved areas in jobs, in bytes.
+ */
+#define VPU_JOB_RESERVED_BYTES 16
+/*
+ * Sizes of the reserved areas in job queues, in bytes.
+ */
+#define VPU_JOB_QUEUE_RESERVED_BYTES 52
+
+/*
+ * Max length (including trailing NULL char) of trace entity name (e.g., the
+ * name of a logging destination or a loggable HW component).
+ */
+#define VPU_TRACE_ENTITY_NAME_MAX_LEN 32
+
+/*
+ * Max length (including trailing NULL char) of a dyndbg command.
+ *
+ * NOTE: 96 is used so that the size of 'struct vpu_ipc_msg' in the JSM API is
+ * 128 bytes (multiple of 64 bytes, the cache line size).
+ */
+#define VPU_DYNDBG_CMD_MAX_LEN 96
+
+/*
+ * Job format.
+ */
+struct vpu_job_queue_entry {
+ u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
+ u32 job_id; /**< Job ID */
+ u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ u64 root_page_table_addr; /**< Address of root page table to use for this job */
+ u64 root_page_table_update_counter; /**< Page tables update events counter */
+ u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */
+ u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */
+ u8 reserved_0[VPU_JOB_RESERVED_BYTES];
+};
+
+/*
+ * Job queue control registers.
+ */
+struct vpu_job_queue_header {
+ u32 engine_idx;
+ u32 head;
+ u32 tail;
+ u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
+};
+
+/*
+ * Job queue format.
+ */
+struct vpu_job_queue {
+ struct vpu_job_queue_header header;
+ struct vpu_job_queue_entry job[];
+};
+
+/**
+ * Logging entity types.
+ *
+ * This enum defines the different types of entities involved in logging.
+ */
+enum vpu_trace_entity_type {
+ /** Logging destination (entity where logs can be stored / printed). */
+ VPU_TRACE_ENTITY_TYPE_DESTINATION = 1,
+ /** Loggable HW component (HW entity that can be logged). */
+ VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
+};
+
+/*
+ * Host <-> VPU IPC messages types.
+ */
+enum vpu_ipc_msg_type {
+ VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
+ /* IPC Host -> Device, Async commands */
+ VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
+ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
+ VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
+ VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
+ VPU_JSM_MSG_SET_POWER_LEVEL = 0x1107,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_OPEN = 0x1108,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_CLOSE = 0x1109,
+ /** Configure logging (used to modify configuration passed in boot params). */
+ VPU_JSM_MSG_TRACE_SET_CONFIG = 0x110a,
+ /** Return current logging configuration. */
+ VPU_JSM_MSG_TRACE_GET_CONFIG = 0x110b,
+ /**
+ * Get masks of destinations and HW components supported by the firmware
+ * (may vary between HW generations and FW compile
+ * time configurations)
+ */
+ VPU_JSM_MSG_TRACE_GET_CAPABILITY = 0x110c,
+ /** Get the name of a destination or HW component. */
+ VPU_JSM_MSG_TRACE_GET_NAME = 0x110d,
+ /**
+ * Release resource associated with host ssid . All jobs that belong to the host_ssid
+ * aborted and removed from internal scheduling queues. All doorbells assigned
+ * to the host_ssid are unregistered and any internal FW resources belonging to
+ * the host_ssid are released.
+ */
+ VPU_JSM_MSG_SSID_RELEASE = 0x110e,
+ /**
+ * Start collecting metric data.
+ * @see vpu_jsm_metric_streamer_start
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_START = 0x110f,
+ /**
+ * Stop collecting metric data. This command will return success if it is called
+ * for a metric stream that has already been stopped or was never started.
+ * @see vpu_jsm_metric_streamer_stop
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_STOP = 0x1110,
+ /**
+ * Update current and next buffer for metric data collection. This command can
+ * also be used to request information about the number of collected samples
+ * and the amount of data written to the buffer.
+ * @see vpu_jsm_metric_streamer_update
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_UPDATE = 0x1111,
+ /**
+ * Request description of selected metric groups and metric counters within
+ * each group. The VPU will write the description of groups and counters to
+ * the buffer specified in the command structure.
+ * @see vpu_jsm_metric_streamer_start
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
+ /** Control command: Priority band setup */
+ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
+ /** Control command: Create command queue */
+ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
+ /** Control command: Destroy command queue */
+ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
+ /** Control command: Set context scheduling properties */
+ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
+ /*
+ * Register a doorbell to notify VPU of new work. The doorbell may later be
+ * deallocated or reassigned to another context.
+ */
+ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
+ /* IPC Host -> Device, General commands */
+ VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+ VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
+ /**
+ * Control dyndbg behavior by executing a dyndbg command; equivalent to
+ * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ */
+ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
+ /* IPC Device -> Host, Job completion */
+ VPU_JSM_MSG_JOB_DONE = 0x2100,
+ /* IPC Device -> Host, Async command completion */
+ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
+ VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
+ VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
+ VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
+ VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
+ VPU_JSM_MSG_SET_POWER_LEVEL_DONE = 0x2207,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE = 0x2208,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE = 0x2209,
+ /** Response to VPU_JSM_MSG_TRACE_SET_CONFIG. */
+ VPU_JSM_MSG_TRACE_SET_CONFIG_RSP = 0x220a,
+ /** Response to VPU_JSM_MSG_TRACE_GET_CONFIG. */
+ VPU_JSM_MSG_TRACE_GET_CONFIG_RSP = 0x220b,
+ /** Response to VPU_JSM_MSG_TRACE_GET_CAPABILITY. */
+ VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
+ /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
+ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
+ /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_START.
+ * VPU will return an error result if metric collection cannot be started,
+ * e.g. when the specified metric mask is invalid.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_START_DONE = 0x220f,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_STOP.
+ * Returns information about collected metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE = 0x2210,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_UPDATE.
+ * Returns information about collected metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE = 0x2211,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_INFO.
+ * Returns a description of the metric groups and metric counters.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE = 0x2212,
+ /**
+ * Asynchronous event sent from the VPU to the host either when the current
+ * metric buffer is full or when the VPU has collected a multiple of
+ * @notify_sample_count samples as indicated through the start command
+ * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
+ * metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
+ /** Response to control command: Priority band setup */
+ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
+ /** Response to control command: Create command queue */
+ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
+ /** Response to control command: Destroy command queue */
+ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
+ /** Response to control command: Set context scheduling properties */
+ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
+ /* IPC Device -> Host, General command completion */
+ VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
+ VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
+ /** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */
+ VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301,
+};
+
+enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
+
+/*
+ * Host <-> LRT IPC message payload definitions
+ */
+struct vpu_ipc_msg_payload_engine_reset {
+ /* Engine to be reset. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_engine_preempt {
+ /* Engine to be preempted. */
+ u32 engine_idx;
+ /* ID of the preemption request. */
+ u32 preempt_id;
+};
+
+/*
+ * @brief Register doorbell command structure.
+ * This structure supports doorbell registration for only OS scheduling.
+ * @see VPU_JSM_MSG_REGISTER_DB
+ */
+struct vpu_ipc_msg_payload_register_db {
+ /* Index of the doorbell to register. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+ /* Virtual address in Global GTT pointing to the start of job queue. */
+ u64 jobq_base;
+ /* Size of the job queue in bytes. */
+ u32 jobq_size;
+ /* Host sub-stream ID for the context assigned to the doorbell. */
+ u32 host_ssid;
+};
+
+/**
+ * @brief Unregister doorbell command structure.
+ * Request structure to unregister a doorbell for both HW and OS scheduling.
+ * @see VPU_JSM_MSG_UNREGISTER_DB
+ */
+struct vpu_ipc_msg_payload_unregister_db {
+ /* Index of the doorbell to unregister. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_query_engine_hb {
+ /* Engine to return heartbeat value. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_power_level {
+ /**
+ * Requested power level. The power level value is in the
+ * range [0, power_level_count-1] where power_level_count
+ * is the number of available power levels as returned by
+ * the get power level count command. A power level of 0
+ * corresponds to the maximum possible power level, while
+ * power_level_count-1 corresponds to the minimum possible
+ * power level. Values outside of this range are not
+ * considered to be valid.
+ */
+ u32 power_level;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_ssid_release {
+ /* Host sub-stream ID for the context to be released. */
+ u32 host_ssid;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/**
+ * @brief Metric streamer start command structure.
+ * This structure is also used with VPU_JSM_MSG_METRIC_STREAMER_INFO to request metric
+ * groups and metric counters description from the firmware.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_START
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_streamer_start {
+ /**
+ * Bitmask to select the desired metric groups.
+ * A metric group can belong only to one metric streamer instance at a time.
+ * Since each metric streamer instance has a unique set of metric groups, it
+ * can also identify a metric streamer instance if more than one instance was
+ * started. If the VPU device does not support multiple metric streamer instances,
+ * then VPU_JSM_MSG_METRIC_STREAMER_START will return an error even if the second
+ * instance has different groups to the first.
+ */
+ u64 metric_group_mask;
+ /** Sampling rate in nanoseconds. */
+ u64 sampling_rate;
+ /**
+ * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
+ * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * If set to UINT_MAX the VPU will only generate a notification when the metric
+ * buffer is full. If set to 0 the VPU will never generate a notification.
+ */
+ u32 notify_sample_count;
+ u32 reserved_0;
+ /**
+ * Address and size of the buffer where the VPU will write metric data. The
+ * VPU writes all counters from enabled metric groups one after another. If
+ * there is no space left to write data at the next sample period the VPU
+ * will switch to the next buffer (@see next_buffer_addr) and will optionally
+ * send a notification to the host driver if @notify_sample_count is non-zero.
+ * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ */
+ u64 buffer_addr;
+ u64 buffer_size;
+ /**
+ * Address and size of the next buffer to write metric data to after the initial
+ * buffer is full. If the address is NULL the VPU will stop collecting metric
+ * data.
+ */
+ u64 next_buffer_addr;
+ u64 next_buffer_size;
+};
+
+/**
+ * @brief Metric streamer stop command structure.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_STOP
+ */
+struct vpu_jsm_metric_streamer_stop {
+ /** Bitmask to select the desired metric groups. */
+ u64 metric_group_mask;
+};
+
+/**
+ * Provide VPU FW with buffers to write metric data.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE
+ */
+struct vpu_jsm_metric_streamer_update {
+ /** Metric group mask that identifies metric streamer instance. */
+ u64 metric_group_mask;
+ /**
+ * Address and size of the buffer where the VPU will write metric data. If
+ * the buffer address is 0 or same as the currently used buffer the VPU will
+ * continue writing metric data to the current buffer. In this case the
+ * buffer size is ignored and the size of the current buffer is unchanged.
+ * If the address is non-zero and differs from the current buffer address the
+ * VPU will immediately switch data collection to the new buffer.
+ */
+ u64 buffer_addr;
+ u64 buffer_size;
+ /**
+ * Address and size of the next buffer to write metric data after the initial
+ * buffer is full. If the address is NULL the VPU will stop collecting metric
+ * data but will continue to record dropped samples.
+ *
+ * Note that there is a hazard possible if both buffer_addr and the next_buffer_addr
+ * are non-zero in same update request. It is the host's responsibility to ensure
+ * that both addresses make sense even if the VPU just switched to writing samples
+ * from the current to the next buffer.
+ */
+ u64 next_buffer_addr;
+ u64 next_buffer_size;
+};
+
+struct vpu_ipc_msg_payload_blob_deinit {
+ /* 64-bit unique ID for the blob to be de-initialized. */
+ u64 blob_id;
+};
+
+struct vpu_ipc_msg_payload_job_done {
+ /* Engine to which the job was submitted. */
+ u32 engine_idx;
+ /* Index of the doorbell to which the job was submitted */
+ u32 db_idx;
+ /* ID of the completed job */
+ u32 job_id;
+ /* Status of the completed job */
+ u32 job_status;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+struct vpu_jsm_engine_reset_context {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Flags: 0: cause of hang; 1: collateral damage of reset */
+ u64 flags;
+};
+
+struct vpu_ipc_msg_payload_engine_reset_done {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Number of impacted contexts */
+ u32 num_impacted_contexts;
+ /* Array of impacted command queue ids and their flags */
+ struct vpu_jsm_engine_reset_context
+ impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
+};
+
+struct vpu_ipc_msg_payload_engine_preempt_done {
+ /* Engine preempted. */
+ u32 engine_idx;
+ /* ID of the preemption request. */
+ u32 preempt_id;
+};
+
+/**
+ * Response structure for register doorbell command for both OS
+ * and HW scheduling.
+ * @see VPU_JSM_MSG_REGISTER_DB
+ * @see VPU_JSM_MSG_HWS_REGISTER_DB
+ */
+struct vpu_ipc_msg_payload_register_db_done {
+ /* Index of the registered doorbell. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/**
+ * Response structure for unregister doorbell command for both OS
+ * and HW scheduling.
+ * @see VPU_JSM_MSG_UNREGISTER_DB
+ */
+struct vpu_ipc_msg_payload_unregister_db_done {
+ /* Index of the unregistered doorbell. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_query_engine_hb_done {
+ /* Engine returning heartbeat value. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+ /* Heartbeat value. */
+ u64 heartbeat;
+};
+
+struct vpu_ipc_msg_payload_get_power_level_count_done {
+ /**
+ * Number of supported power levels. The maximum possible
+ * value of power_level_count is 16 but this may vary across
+ * implementations.
+ */
+ u32 power_level_count;
+ /* Reserved */
+ u32 reserved_0;
+ /**
+ * Power consumption limit for each supported power level in
+ * [0-100%] range relative to power level 0.
+ */
+ u8 power_limit[16];
+};
+
+struct vpu_ipc_msg_payload_blob_deinit_done {
+ /* 64-bit unique ID for the blob de-initialized. */
+ u64 blob_id;
+};
+
+/* HWS priority band setup request / response */
+struct vpu_ipc_msg_payload_hws_priority_band_setup {
+ /*
+ * Grace period in 100ns units when preempting another priority band for
+ * this priority band
+ */
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * Default quantum in 100ns units for scheduling across processes
+ * within a priority band
+ */
+ u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * Default grace period in 100ns units for processes that preempt each
+ * other within a priority band
+ */
+ u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * For normal priority band, specifies the target VPU percentage
+ * in situations when it's starved by the focus band.
+ */
+ u32 normal_band_percentage;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/* HWS create command queue request */
+struct vpu_ipc_msg_payload_hws_create_cmdq {
+ /* Process id */
+ u64 process_id;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Command queue base */
+ u64 cmdq_base;
+ /* Command queue size */
+ u32 cmdq_size;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/* HWS create command queue response */
+struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
+ /* Process id */
+ u64 process_id;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/* HWS destroy command queue request / response */
+struct vpu_ipc_msg_payload_hws_destroy_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/* HWS set context scheduling properties request / response */
+struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Priority band to assign to work of this context */
+ u32 priority_band;
+ /* Inside realtime band assigns a further priority */
+ u32 realtime_priority_level;
+ /* Priority relative to other contexts in the same process */
+ u32 in_process_priority;
+ /* Zero padding / Reserved */
+ u32 reserved_1;
+ /* Context quantum relative to other contexts of same priority in the same process */
+ u64 context_quantum;
+ /* Grace period when preempting context of the same priority within the same process */
+ u64 grace_period_same_priority;
+ /* Grace period when preempting context of a lower priority within the same process */
+ u64 grace_period_lower_priority;
+};
+
+/*
+ * @brief Register doorbell command structure.
+ * This structure supports doorbell registration for both HW and OS scheduling.
+ * Note: Queue base and size are added here so that the same structure can be used for
+ * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
+ * and cmdq_base and cmdq_size will be used. For HW scheduling, cmdq_base and cmdq_size will be
+ * ignored and cmdq_id is used.
+ * @see VPU_JSM_MSG_HWS_REGISTER_DB
+ */
+struct vpu_jsm_hws_register_db {
+ /* Index of the doorbell to register. */
+ u32 db_id;
+ /* Host sub-stream ID for the context assigned to the doorbell. */
+ u32 host_ssid;
+ /* ID of the command queue associated with the doorbell. */
+ u64 cmdq_id;
+ /* Virtual address pointing to the start of command queue. */
+ u64 cmdq_base;
+ /* Size of the command queue in bytes. */
+ u64 cmdq_size;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and
+ * VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages.
+ *
+ * The payload is interpreted differently depending on the type of message:
+ *
+ * - For VPU_JSM_MSG_TRACE_SET_CONFIG, the payload specifies the desired
+ * logging configuration to be set.
+ *
+ * - For VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, the payload reports the logging
+ * configuration that was set after a VPU_JSM_MSG_TRACE_SET_CONFIG request.
+ * The host can compare this payload with the one it sent in the
+ * VPU_JSM_MSG_TRACE_SET_CONFIG request to check whether or not the
+ * configuration was set as desired.
+ *
+ * - VPU_JSM_MSG_TRACE_GET_CONFIG_RSP, the payload reports the current logging
+ * configuration.
+ */
+struct vpu_ipc_msg_payload_trace_config {
+ /**
+ * Logging level (currently set or to be set); see 'mvLog_t' enum for
+ * acceptable values. The specified logging level applies to all
+ * destinations and HW components
+ */
+ u32 trace_level;
+ /**
+ * Bitmask of logging destinations (currently enabled or to be enabled);
+ * bitwise OR of values defined in logging_destination enum.
+ */
+ u32 trace_destination_mask;
+ /**
+ * Bitmask of loggable HW components (currently enabled or to be enabled);
+ * bitwise OR of values defined in loggable_hw_component enum.
+ */
+ u64 trace_hw_component_mask;
+ u64 reserved_0; /**< Reserved for future extensions. */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP messages.
+ */
+struct vpu_ipc_msg_payload_trace_capability_rsp {
+ u32 trace_destination_mask; /**< Bitmask of supported logging destinations. */
+ u32 reserved_0;
+ u64 trace_hw_component_mask; /**< Bitmask of supported loggable HW components. */
+ u64 reserved_1; /**< Reserved for future extensions. */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_NAME requests.
+ */
+struct vpu_ipc_msg_payload_trace_get_name {
+ /**
+ * The type of the entity to query name for; see logging_entity_type for
+ * possible values.
+ */
+ u32 entity_type;
+ u32 reserved_0;
+ /**
+ * The ID of the entity to query name for; possible values depends on the
+ * entity type.
+ */
+ u64 entity_id;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_NAME_RSP responses.
+ */
+struct vpu_ipc_msg_payload_trace_get_name_rsp {
+ /**
+ * The type of the entity whose name was queried; see logging_entity_type
+ * for possible values.
+ */
+ u32 entity_type;
+ u32 reserved_0;
+ /**
+ * The ID of the entity whose name was queried; possible values depends on
+ * the entity type.
+ */
+ u64 entity_id;
+ /** Reserved for future extensions. */
+ u64 reserved_1;
+ /** The name of the entity. */
+ char entity_name[VPU_TRACE_ENTITY_NAME_MAX_LEN];
+};
+
+/**
+ * Data sent from the VPU to the host in all metric streamer response messages
+ * and in asynchronous notification.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_START_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION
+ */
+struct vpu_jsm_metric_streamer_done {
+ /** Metric group mask that identifies metric streamer instance. */
+ u64 metric_group_mask;
+ /**
+ * Size in bytes of single sample - total size of all enabled counters.
+ * Some VPU implementations may align sample_size to more than 8 bytes.
+ */
+ u32 sample_size;
+ u32 reserved_0;
+ /**
+ * Number of samples collected since the metric streamer was started.
+ * This will be 0 if the metric streamer was not started.
+ */
+ u32 samples_collected;
+ /**
+ * Number of samples dropped since the metric streamer was started. This
+ * is incremented every time the metric streamer is not able to write
+ * collected samples because the current buffer is full and there is no
+ * next buffer to switch to.
+ */
+ u32 samples_dropped;
+ /** Address of the buffer that contains the latest metric data. */
+ u64 buffer_addr;
+ /**
+ * Number of bytes written into the metric data buffer. In response to the
+ * VPU_JSM_MSG_METRIC_STREAMER_INFO request this field contains the size of
+ * all group and counter descriptors. The size is updated even if the buffer
+ * in the request was NULL or too small to hold descriptors of all counters
+ */
+ u64 bytes_written;
+};
+
+/**
+ * Metric group description placed in the metric buffer after successful completion
+ * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
+ * @vpu_jsm_metric_counter_descriptor records.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_group_descriptor {
+ /**
+ * Offset to the next metric group (8-byte aligned). If this offset is 0 this
+ * is the last descriptor. The value of metric_info_size must be greater than
+ * or equal to sizeof(struct vpu_jsm_metric_group_descriptor) + name_string_size
+ * + description_string_size and must be 8-byte aligned.
+ */
+ u32 next_metric_group_info_offset;
+ /**
+ * Offset to the first metric counter description record (8-byte aligned).
+ * @see vpu_jsm_metric_counter_descriptor
+ */
+ u32 next_metric_counter_info_offset;
+ /** Index of the group. This corresponds to bit index in metric_group_mask. */
+ u32 group_id;
+ /** Number of counters in the metric group. */
+ u32 num_counters;
+ /** Data size for all counters, must be a multiple of 8 bytes.*/
+ u32 metric_group_data_size;
+ /**
+ * Metric group domain number. Cannot use multiple, simultaneous metric groups
+ * from the same domain.
+ */
+ u32 domain;
+ /**
+ * Counter name string size. The string must include a null termination character.
+ * The FW may use a fixed size name or send a different name for each counter.
+ * If the VPU uses fixed size strings, all characters from the end of the name
+ * to the of the fixed size character array must be zeroed.
+ */
+ u32 name_string_size;
+ /** Counter description string size, @see name_string_size */
+ u32 description_string_size;
+ u64 reserved_0;
+ /**
+ * Right after this structure, the VPU writes name and description of
+ * the metric group.
+ */
+};
+
+/**
+ * Metric counter description, placed in the buffer after vpu_jsm_metric_group_descriptor.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_counter_descriptor {
+ /**
+ * Offset to the next counter in a group (8-byte aligned). If this offset is
+ * 0 this is the last counter in the group.
+ */
+ u32 next_metric_counter_info_offset;
+ /**
+ * Offset to the counter data from the start of samples in this metric group.
+ * Note that metric_data_offset % metric_data_size must be 0.
+ */
+ u32 metric_data_offset;
+ /** Size of the metric counter data in bytes. */
+ u32 metric_data_size;
+ /** Metric type, see Level Zero API for definitions. */
+ u32 tier;
+ /** Metric type, see set_metric_type_t for definitions. */
+ u32 metric_type;
+ /** Metric type, see set_value_type_t for definitions. */
+ u32 metric_value_type;
+ /**
+ * Counter name string size. The string must include a null termination character.
+ * The FW may use a fixed size name or send a different name for each counter.
+ * If the VPU uses fixed size strings, all characters from the end of the name
+ * to the of the fixed size character array must be zeroed.
+ */
+ u32 name_string_size;
+ /** Counter description string size, @see name_string_size */
+ u32 description_string_size;
+ /** Counter component name string size, @see name_string_size */
+ u32 component_string_size;
+ /** Counter string size, @see name_string_size */
+ u32 units_string_size;
+ u64 reserved_0;
+ /**
+ * Right after this structure, the VPU writes name, description
+ * component and unit strings.
+ */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ *
+ * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
+ * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
+ * messages. This is equivalent to the Dynamic Debug functionality provided by
+ * Linux
+ * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
+ * The host can control Dynamic Debug behavior by sending dyndbg commands, which
+ * have the same syntax as Linux
+ * dyndbg commands.
+ *
+ * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
+ * still has to set the logging level to MVLOG_DEBUG, using the
+ * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ *
+ * The host can see the current dynamic debug configuration by executing a
+ * special 'show' command. The dyndbg configuration will be printed to the
+ * configured logging destination using MVLOG_INFO logging level.
+ */
+struct vpu_ipc_msg_payload_dyndbg_control {
+ /**
+ * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
+ * string.
+ */
+ char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
+};
+
+/*
+ * Payloads union, used to define complete message format.
+ */
+union vpu_ipc_msg_payload {
+ struct vpu_ipc_msg_payload_engine_reset engine_reset;
+ struct vpu_ipc_msg_payload_engine_preempt engine_preempt;
+ struct vpu_ipc_msg_payload_register_db register_db;
+ struct vpu_ipc_msg_payload_unregister_db unregister_db;
+ struct vpu_ipc_msg_payload_query_engine_hb query_engine_hb;
+ struct vpu_ipc_msg_payload_power_level power_level;
+ struct vpu_jsm_metric_streamer_start metric_streamer_start;
+ struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
+ struct vpu_jsm_metric_streamer_update metric_streamer_update;
+ struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
+ struct vpu_ipc_msg_payload_ssid_release ssid_release;
+ struct vpu_jsm_hws_register_db hws_register_db;
+ struct vpu_ipc_msg_payload_job_done job_done;
+ struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
+ struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
+ struct vpu_ipc_msg_payload_register_db_done register_db_done;
+ struct vpu_ipc_msg_payload_unregister_db_done unregister_db_done;
+ struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
+ struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
+ struct vpu_jsm_metric_streamer_done metric_streamer_done;
+ struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
+ struct vpu_ipc_msg_payload_trace_config trace_config;
+ struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
+ struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
+ struct vpu_ipc_msg_payload_trace_get_name_rsp trace_get_name_rsp;
+ struct vpu_ipc_msg_payload_dyndbg_control dyndbg_control;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup hws_priority_band_setup;
+ struct vpu_ipc_msg_payload_hws_create_cmdq hws_create_cmdq;
+ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp hws_create_cmdq_rsp;
+ struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq;
+ struct vpu_ipc_msg_payload_hws_set_context_sched_properties
+ hws_set_context_sched_properties;
+};
+
+/*
+ * Host <-> LRT IPC message base structure.
+ *
+ * NOTE: All instances of this object must be aligned on a 64B boundary
+ * to allow proper handling of VPU cache operations.
+ */
+struct vpu_jsm_msg {
+ /* Reserved */
+ u64 reserved_0;
+ /* Message type, see vpu_ipc_msg_type enum. */
+ u32 type;
+ /* Buffer status, see vpu_ipc_msg_status enum. */
+ u32 status;
+ /*
+ * Request ID, provided by the host in a request message and passed
+ * back by VPU in the response message.
+ */
+ u32 request_id;
+ /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ u32 result;
+ u64 reserved_1;
+ /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ union vpu_ipc_msg_payload payload;
+};
+
+#pragma pack(pop)
+
+#endif
+
+///@}
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
new file mode 100644
index 000000000000..a9f866230058
--- /dev/null
+++ b/drivers/accel/qaic/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Qualcomm Cloud AI accelerators driver
+#
+
+config DRM_ACCEL_QAIC
+ tristate "Qualcomm Cloud AI accelerators"
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on MHI_BUS
+ depends on MMU
+ select CRC32
+ help
+ Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
+ designed to accelerate Deep Learning inference workloads.
+
+ The driver manages the PCIe devices and provides an IOCTL interface
+ for users to submit workloads to the devices.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qaic.
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
new file mode 100644
index 000000000000..2418418f7a50
--- /dev/null
+++ b/drivers/accel/qaic/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Qualcomm Cloud AI accelerators driver
+#
+
+obj-$(CONFIG_DRM_ACCEL_QAIC) := qaic.o
+
+qaic-y := \
+ mhi_controller.o \
+ qaic_control.o \
+ qaic_data.o \
+ qaic_drv.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
new file mode 100644
index 000000000000..5036e58e7235
--- /dev/null
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/memblock.h>
+#include <linux/mhi.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#include "mhi_controller.h"
+#include "qaic.h"
+
+#define MAX_RESET_TIME_SEC 25
+
+static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
+module_param(mhi_timeout_ms, uint, 0600);
+MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
+
+static struct mhi_channel_config aic100_channels[] = {
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 0,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 1,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 2,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 3,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_DIAG",
+ .num = 4,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_DIAG",
+ .num = 5,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 6,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 7,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_QDSS",
+ .num = 8,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_QDSS",
+ .num = 9,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 10,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 11,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 12,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 13,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 14,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 15,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 16,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 17,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_DEBUG",
+ .num = 18,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_DEBUG",
+ .num = 19,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC",
+ .num = 20,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .num = 21,
+ .name = "QAIC_TIMESYNC",
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+};
+
+static struct mhi_event_config aic100_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 0,
+ .channel = U32_MAX,
+ .priority = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config aic100_config = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic100_channels),
+ .ch_cfg = aic100_channels,
+ .num_events = ARRAY_SIZE(aic100_events),
+ .event_cfg = aic100_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+};
+
+static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
+{
+ u32 tmp = readl_relaxed(addr);
+
+ if (tmp == U32_MAX)
+ return -EIO;
+
+ *out = tmp;
+
+ return 0;
+}
+
+static void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 val)
+{
+ writel_relaxed(val, addr);
+}
+
+static int mhi_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void mhi_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback reason)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_cntrl->cntrl_dev));
+
+ /* this event occurs in atomic context */
+ if (reason == MHI_CB_FATAL_ERROR)
+ pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
+ /* this event occurs in non-atomic context */
+ if (reason == MHI_CB_SYS_ERROR)
+ qaic_dev_reset_clean_local_state(qdev, true);
+}
+
+static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ u8 time_sec = 1;
+ int current_ee;
+ int ret;
+
+ /* Reset the device to bring the device in PBL EE */
+ mhi_soc_reset(mhi_cntrl);
+
+ /*
+ * Keep checking the execution environment(EE) after every 1 second
+ * interval.
+ */
+ do {
+ msleep(1000);
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ } while (current_ee != MHI_EE_PBL && time_sec++ <= MAX_RESET_TIME_SEC);
+
+ /* If the device is in PBL EE retry power up */
+ if (current_ee == MHI_EE_PBL)
+ ret = mhi_async_power_up(mhi_cntrl);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
+ int mhi_irq)
+{
+ struct mhi_controller *mhi_cntrl;
+ int ret;
+
+ mhi_cntrl = devm_kzalloc(&pci_dev->dev, sizeof(*mhi_cntrl), GFP_KERNEL);
+ if (!mhi_cntrl)
+ return ERR_PTR(-ENOMEM);
+
+ mhi_cntrl->cntrl_dev = &pci_dev->dev;
+
+ /*
+ * Covers the entire possible physical ram region. Remote side is
+ * going to calculate a size of this range, so subtract 1 to prevent
+ * rollover.
+ */
+ mhi_cntrl->iova_start = 0;
+ mhi_cntrl->iova_stop = PHYS_ADDR_MAX - 1;
+ mhi_cntrl->status_cb = mhi_status_cb;
+ mhi_cntrl->runtime_get = mhi_runtime_get;
+ mhi_cntrl->runtime_put = mhi_runtime_put;
+ mhi_cntrl->read_reg = mhi_read_reg;
+ mhi_cntrl->write_reg = mhi_write_reg;
+ mhi_cntrl->regs = mhi_bar;
+ mhi_cntrl->reg_len = SZ_4K;
+ mhi_cntrl->nr_irqs = 1;
+ mhi_cntrl->irq = devm_kmalloc(&pci_dev->dev, sizeof(*mhi_cntrl->irq), GFP_KERNEL);
+
+ if (!mhi_cntrl->irq)
+ return ERR_PTR(-ENOMEM);
+
+ mhi_cntrl->irq[0] = mhi_irq;
+ mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
+
+ /* use latest configured timeout */
+ aic100_config.timeout_ms = mhi_timeout_ms;
+ ret = mhi_register_controller(mhi_cntrl, &aic100_config);
+ if (ret) {
+ pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = mhi_prepare_for_power_up(mhi_cntrl);
+ if (ret) {
+ pci_err(pci_dev, "mhi_prepare_for_power_up failed %d\n", ret);
+ goto prepare_power_up_fail;
+ }
+
+ ret = mhi_async_power_up(mhi_cntrl);
+ /*
+ * If EIO is returned it is possible that device is in SBL EE, which is
+ * undesired. SOC reset the device and try to power up again.
+ */
+ if (ret == -EIO && MHI_EE_SBL == mhi_get_exec_env(mhi_cntrl)) {
+ pci_err(pci_dev, "Found device in SBL at MHI init. Attempting a reset.\n");
+ ret = mhi_reset_and_async_power_up(mhi_cntrl);
+ }
+
+ if (ret) {
+ pci_err(pci_dev, "mhi_async_power_up failed %d\n", ret);
+ goto power_up_fail;
+ }
+
+ return mhi_cntrl;
+
+power_up_fail:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+prepare_power_up_fail:
+ mhi_unregister_controller(mhi_cntrl);
+ return ERR_PTR(ret);
+}
+
+void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up)
+{
+ mhi_power_down(mhi_cntrl, link_up);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ mhi_unregister_controller(mhi_cntrl);
+}
+
+void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl)
+{
+ mhi_power_down(mhi_cntrl, true);
+}
+
+void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl)
+{
+ struct pci_dev *pci_dev = container_of(mhi_cntrl->cntrl_dev, struct pci_dev, dev);
+ int ret;
+
+ ret = mhi_async_power_up(mhi_cntrl);
+ if (ret)
+ pci_err(pci_dev, "mhi_async_power_up failed after reset %d\n", ret);
+}
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
new file mode 100644
index 000000000000..2ae45d768e24
--- /dev/null
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef MHICONTROLLERQAIC_H_
+#define MHICONTROLLERQAIC_H_
+
+struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
+ int mhi_irq);
+void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
+void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
+void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
+
+#endif /* MHICONTROLLERQAIC_H_ */
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
new file mode 100644
index 000000000000..f2bd637a0d4e
--- /dev/null
+++ b/drivers/accel/qaic/qaic.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _QAIC_H_
+#define _QAIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/mhi.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+
+#define QAIC_DBC_BASE SZ_128K
+#define QAIC_DBC_SIZE SZ_4K
+
+#define QAIC_NO_PARTITION -1
+
+#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
+
+#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
+
+extern bool datapath_polling;
+
+struct qaic_user {
+ /* Uniquely identifies this user for the device */
+ int handle;
+ struct kref ref_count;
+ /* Char device opened by this user */
+ struct qaic_drm_device *qddev;
+ /* Node in list of users that opened this drm device */
+ struct list_head node;
+ /* SRCU used to synchronize this user during cleanup */
+ struct srcu_struct qddev_lock;
+ atomic_t chunk_id;
+};
+
+struct dma_bridge_chan {
+ /* Pointer to device strcut maintained by driver */
+ struct qaic_device *qdev;
+ /* ID of this DMA bridge channel(DBC) */
+ unsigned int id;
+ /* Synchronizes access to xfer_list */
+ spinlock_t xfer_lock;
+ /* Base address of request queue */
+ void *req_q_base;
+ /* Base address of response queue */
+ void *rsp_q_base;
+ /*
+ * Base bus address of request queue. Response queue bus address can be
+ * calculated by adding request queue size to this variable
+ */
+ dma_addr_t dma_addr;
+ /* Total size of request and response queue in byte */
+ u32 total_size;
+ /* Capacity of request/response queue */
+ u32 nelem;
+ /* The user that opened this DBC */
+ struct qaic_user *usr;
+ /*
+ * Request ID of next memory handle that goes in request queue. One
+ * memory handle can enqueue more than one request elements, all
+ * this requests that belong to same memory handle have same request ID
+ */
+ u16 next_req_id;
+ /* true: DBC is in use; false: DBC not in use */
+ bool in_use;
+ /*
+ * Base address of device registers. Used to read/write request and
+ * response queue's head and tail pointer of this DBC.
+ */
+ void __iomem *dbc_base;
+ /* Head of list where each node is a memory handle queued in request queue */
+ struct list_head xfer_list;
+ /* Synchronizes DBC readers during cleanup */
+ struct srcu_struct ch_lock;
+ /*
+ * When this DBC is released, any thread waiting on this wait queue is
+ * woken up
+ */
+ wait_queue_head_t dbc_release;
+ /* Head of list where each node is a bo associated with this DBC */
+ struct list_head bo_lists;
+ /* The irq line for this DBC. Used for polling */
+ unsigned int irq;
+ /* Polling work item to simulate interrupts */
+ struct work_struct poll_work;
+};
+
+struct qaic_device {
+ /* Pointer to base PCI device struct of our physical device */
+ struct pci_dev *pdev;
+ /* Req. ID of request that will be queued next in MHI control device */
+ u32 next_seq_num;
+ /* Base address of bar 0 */
+ void __iomem *bar_0;
+ /* Base address of bar 2 */
+ void __iomem *bar_2;
+ /* Controller structure for MHI devices */
+ struct mhi_controller *mhi_cntrl;
+ /* MHI control channel device */
+ struct mhi_device *cntl_ch;
+ /* List of requests queued in MHI control device */
+ struct list_head cntl_xfer_list;
+ /* Synchronizes MHI control device transactions and its xfer list */
+ struct mutex cntl_mutex;
+ /* Array of DBC struct of this device */
+ struct dma_bridge_chan *dbc;
+ /* Work queue for tasks related to MHI control device */
+ struct workqueue_struct *cntl_wq;
+ /* Synchronizes all the users of device during cleanup */
+ struct srcu_struct dev_lock;
+ /* true: Device under reset; false: Device not under reset */
+ bool in_reset;
+ /*
+ * true: A tx MHI transaction has failed and a rx buffer is still queued
+ * in control device. Such a buffer is considered lost rx buffer
+ * false: No rx buffer is lost in control device
+ */
+ bool cntl_lost_buf;
+ /* Maximum number of DBC supported by this device */
+ u32 num_dbc;
+ /* Reference to the drm_device for this device when it is created */
+ struct qaic_drm_device *qddev;
+ /* Generate the CRC of a control message */
+ u32 (*gen_crc)(void *msg);
+ /* Validate the CRC of a control message */
+ bool (*valid_crc)(void *msg);
+};
+
+struct qaic_drm_device {
+ /* Pointer to the root device struct driven by this driver */
+ struct qaic_device *qdev;
+ /*
+ * The physical device can be partition in number of logical devices.
+ * And each logical device is given a partition id. This member stores
+ * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
+ * device is the actual physical device
+ */
+ s32 partition_id;
+ /* Pointer to the drm device struct of this drm device */
+ struct drm_device *ddev;
+ /* Head in list of users who have opened this drm device */
+ struct list_head users;
+ /* Synchronizes access to users list */
+ struct mutex users_mutex;
+};
+
+struct qaic_bo {
+ struct drm_gem_object base;
+ /* Scatter/gather table for allocate/imported BO */
+ struct sg_table *sgt;
+ /* BO size requested by user. GEM object might be bigger in size. */
+ u64 size;
+ /* Head in list of slices of this BO */
+ struct list_head slices;
+ /* Total nents, for all slices of this BO */
+ int total_slice_nents;
+ /*
+ * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
+ * DMA_FROM_DEVICE.
+ */
+ int dir;
+ /* The pointer of the DBC which operates on this BO */
+ struct dma_bridge_chan *dbc;
+ /* Number of slice that belongs to this buffer */
+ u32 nr_slice;
+ /* Number of slice that have been transferred by DMA engine */
+ u32 nr_slice_xfer_done;
+ /* true = BO is queued for execution, true = BO is not queued */
+ bool queued;
+ /*
+ * If true then user has attached slicing information to this BO by
+ * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
+ */
+ bool sliced;
+ /* Request ID of this BO if it is queued for execution */
+ u16 req_id;
+ /* Handle assigned to this BO */
+ u32 handle;
+ /* Wait on this for completion of DMA transfer of this BO */
+ struct completion xfer_done;
+ /*
+ * Node in linked list where head is dbc->xfer_list.
+ * This link list contain BO's that are queued for DMA transfer.
+ */
+ struct list_head xfer_list;
+ /*
+ * Node in linked list where head is dbc->bo_lists.
+ * This link list contain BO's that are associated with the DBC it is
+ * linked to.
+ */
+ struct list_head bo_list;
+ struct {
+ /*
+ * Latest timestamp(ns) at which kernel received a request to
+ * execute this BO
+ */
+ u64 req_received_ts;
+ /*
+ * Latest timestamp(ns) at which kernel enqueued requests of
+ * this BO for execution in DMA queue
+ */
+ u64 req_submit_ts;
+ /*
+ * Latest timestamp(ns) at which kernel received a completion
+ * interrupt for requests of this BO
+ */
+ u64 req_processed_ts;
+ /*
+ * Number of elements already enqueued in DMA queue before
+ * enqueuing requests of this BO
+ */
+ u32 queue_level_before;
+ } perf_stats;
+
+};
+
+struct bo_slice {
+ /* Mapped pages */
+ struct sg_table *sgt;
+ /* Number of requests required to queue in DMA queue */
+ int nents;
+ /* See enum dma_data_direction */
+ int dir;
+ /* Actual requests that will be copied in DMA queue */
+ struct dbc_req *reqs;
+ struct kref ref_count;
+ /* true: No DMA transfer required */
+ bool no_xfer;
+ /* Pointer to the parent BO handle */
+ struct qaic_bo *bo;
+ /* Node in list of slices maintained by parent BO */
+ struct list_head slice;
+ /* Size of this slice in bytes */
+ u64 size;
+ /* Offset of this slice in buffer */
+ u64 offset;
+};
+
+int get_dbc_req_elem_size(void);
+int get_dbc_rsp_elem_size(void);
+int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
+int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
+
+void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
+
+int qaic_control_open(struct qaic_device *qdev);
+void qaic_control_close(struct qaic_device *qdev);
+void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
+
+irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
+irqreturn_t dbc_irq_handler(int irq, void *data);
+int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
+void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
+void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
+void release_dbc(struct qaic_device *qdev, u32 dbc_id);
+
+void wake_all_cntl(struct qaic_device *qdev);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+
+struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
+
+int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+void irq_polling_work(struct work_struct *work);
+
+#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
new file mode 100644
index 000000000000..9f216eb6f76e
--- /dev/null
+++ b/drivers/accel/qaic/qaic_control.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <asm/byteorder.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <uapi/drm/qaic_accel.h>
+
+#include "qaic.h"
+
+#define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
+#define QAIC_DBC_Q_GAP SZ_256
+#define QAIC_DBC_Q_BUF_ALIGN SZ_4K
+#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
+#define QAIC_WRAPPER_MAX_SIZE SZ_4K
+#define QAIC_MHI_RETRY_WAIT_MS 100
+#define QAIC_MHI_RETRY_MAX 20
+
+static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
+module_param(control_resp_timeout_s, uint, 0600);
+MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
+
+struct manage_msg {
+ u32 len;
+ u32 count;
+ u8 data[];
+};
+
+/*
+ * wire encoding structures for the manage protocol.
+ * All fields are little endian on the wire
+ */
+struct wire_msg_hdr {
+ __le32 crc32; /* crc of everything following this field in the message */
+ __le32 magic_number;
+ __le32 sequence_number;
+ __le32 len; /* length of this message */
+ __le32 count; /* number of transactions in this message */
+ __le32 handle; /* unique id to track the resources consumed */
+ __le32 partition_id; /* partition id for the request (signed) */
+ __le32 padding; /* must be 0 */
+} __packed;
+
+struct wire_msg {
+ struct wire_msg_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct wire_trans_hdr {
+ __le32 type;
+ __le32 len;
+} __packed;
+
+/* Each message sent from driver to device are organized in a list of wrapper_msg */
+struct wrapper_msg {
+ struct list_head list;
+ struct kref ref_count;
+ u32 len; /* length of data to transfer */
+ struct wrapper_list *head;
+ union {
+ struct wire_msg msg;
+ struct wire_trans_hdr trans;
+ };
+};
+
+struct wrapper_list {
+ struct list_head list;
+ spinlock_t lock; /* Protects the list state during additions and removals */
+};
+
+struct wire_trans_passthrough {
+ struct wire_trans_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct wire_addr_size_pair {
+ __le64 addr;
+ __le64 size;
+} __packed;
+
+struct wire_trans_dma_xfer {
+ struct wire_trans_hdr hdr;
+ __le32 tag;
+ __le32 count;
+ __le32 dma_chunk_id;
+ __le32 padding;
+ struct wire_addr_size_pair data[];
+} __packed;
+
+/* Initiated by device to continue the DMA xfer of a large piece of data */
+struct wire_trans_dma_xfer_cont {
+ struct wire_trans_hdr hdr;
+ __le32 dma_chunk_id;
+ __le32 padding;
+ __le64 xferred_size;
+} __packed;
+
+struct wire_trans_activate_to_dev {
+ struct wire_trans_hdr hdr;
+ __le64 req_q_addr;
+ __le64 rsp_q_addr;
+ __le32 req_q_size;
+ __le32 rsp_q_size;
+ __le32 buf_len;
+ __le32 options; /* unused, but BIT(16) has meaning to the device */
+} __packed;
+
+struct wire_trans_activate_from_dev {
+ struct wire_trans_hdr hdr;
+ __le32 status;
+ __le32 dbc_id;
+ __le64 options; /* unused */
+} __packed;
+
+struct wire_trans_deactivate_from_dev {
+ struct wire_trans_hdr hdr;
+ __le32 status;
+ __le32 dbc_id;
+} __packed;
+
+struct wire_trans_terminate_to_dev {
+ struct wire_trans_hdr hdr;
+ __le32 handle;
+ __le32 padding;
+} __packed;
+
+struct wire_trans_terminate_from_dev {
+ struct wire_trans_hdr hdr;
+ __le32 status;
+ __le32 padding;
+} __packed;
+
+struct wire_trans_status_to_dev {
+ struct wire_trans_hdr hdr;
+} __packed;
+
+struct wire_trans_status_from_dev {
+ struct wire_trans_hdr hdr;
+ __le16 major;
+ __le16 minor;
+ __le32 status;
+ __le64 status_flags;
+} __packed;
+
+struct wire_trans_validate_part_to_dev {
+ struct wire_trans_hdr hdr;
+ __le32 part_id;
+ __le32 padding;
+} __packed;
+
+struct wire_trans_validate_part_from_dev {
+ struct wire_trans_hdr hdr;
+ __le32 status;
+ __le32 padding;
+} __packed;
+
+struct xfer_queue_elem {
+ /*
+ * Node in list of ongoing transfer request on control channel.
+ * Maintained by root device struct.
+ */
+ struct list_head list;
+ /* Sequence number of this transfer request */
+ u32 seq_num;
+ /* This is used to wait on until completion of transfer request */
+ struct completion xfer_done;
+ /* Received data from device */
+ void *buf;
+};
+
+struct dma_xfer {
+ /* Node in list of DMA transfers which is used for cleanup */
+ struct list_head list;
+ /* SG table of memory used for DMA */
+ struct sg_table *sgt;
+ /* Array pages used for DMA */
+ struct page **page_list;
+ /* Number of pages used for DMA */
+ unsigned long nr_pages;
+};
+
+struct ioctl_resources {
+ /* List of all DMA transfers which is used later for cleanup */
+ struct list_head dma_xfers;
+ /* Base address of request queue which belongs to a DBC */
+ void *buf;
+ /*
+ * Base bus address of request queue which belongs to a DBC. Response
+ * queue base bus address can be calculated by adding size of request
+ * queue to base bus address of request queue.
+ */
+ dma_addr_t dma_addr;
+ /* Total size of request queue and response queue in byte */
+ u32 total_size;
+ /* Total number of elements that can be queued in each of request and response queue */
+ u32 nelem;
+ /* Base address of response queue which belongs to a DBC */
+ void *rsp_q_base;
+ /* Status of the NNC message received */
+ u32 status;
+ /* DBC id of the DBC received from device */
+ u32 dbc_id;
+ /*
+ * DMA transfer request messages can be big in size and it may not be
+ * possible to send them in one shot. In such cases the messages are
+ * broken into chunks, this field stores ID of such chunks.
+ */
+ u32 dma_chunk_id;
+ /* Total number of bytes transferred for a DMA xfer request */
+ u64 xferred_dma_size;
+ /* Header of transaction message received from user. Used during DMA xfer request. */
+ void *trans_hdr;
+};
+
+struct resp_work {
+ struct work_struct work;
+ struct qaic_device *qdev;
+ void *buf;
+};
+
+/*
+ * Since we're working with little endian messages, its useful to be able to
+ * increment without filling a whole line with conversions back and forth just
+ * to add one(1) to a message count.
+ */
+static __le32 incr_le32(__le32 val)
+{
+ return cpu_to_le32(le32_to_cpu(val) + 1);
+}
+
+static u32 gen_crc(void *msg)
+{
+ struct wrapper_list *wrappers = msg;
+ struct wrapper_msg *w;
+ u32 crc = ~0;
+
+ list_for_each_entry(w, &wrappers->list, list)
+ crc = crc32(crc, &w->msg, w->len);
+
+ return crc ^ ~0;
+}
+
+static u32 gen_crc_stub(void *msg)
+{
+ return 0;
+}
+
+static bool valid_crc(void *msg)
+{
+ struct wire_msg_hdr *hdr = msg;
+ bool ret;
+ u32 crc;
+
+ /*
+ * The output of this algorithm is always converted to the native
+ * endianness.
+ */
+ crc = le32_to_cpu(hdr->crc32);
+ hdr->crc32 = 0;
+ ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
+ hdr->crc32 = cpu_to_le32(crc);
+ return ret;
+}
+
+static bool valid_crc_stub(void *msg)
+{
+ return true;
+}
+
+static void free_wrapper(struct kref *ref)
+{
+ struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
+
+ list_del(&wrapper->list);
+ kfree(wrapper);
+}
+
+static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
+ struct qaic_user *usr)
+{
+ u32 dbc_id = resources->dbc_id;
+
+ if (resources->buf) {
+ wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
+ qdev->dbc[dbc_id].req_q_base = resources->buf;
+ qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
+ qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
+ qdev->dbc[dbc_id].total_size = resources->total_size;
+ qdev->dbc[dbc_id].nelem = resources->nelem;
+ enable_dbc(qdev, dbc_id, usr);
+ qdev->dbc[dbc_id].in_use = true;
+ resources->buf = NULL;
+ }
+}
+
+static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
+{
+ if (resources->buf)
+ dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
+ resources->dma_addr);
+ resources->buf = NULL;
+}
+
+static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
+{
+ struct dma_xfer *xfer;
+ struct dma_xfer *x;
+ int i;
+
+ list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
+ dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
+ sg_free_table(xfer->sgt);
+ kfree(xfer->sgt);
+ for (i = 0; i < xfer->nr_pages; ++i)
+ put_page(xfer->page_list[i]);
+ kfree(xfer->page_list);
+ list_del(&xfer->list);
+ kfree(xfer);
+ }
+}
+
+static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
+{
+ struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
+
+ if (!w)
+ return NULL;
+ list_add_tail(&w->list, &wrappers->list);
+ kref_init(&w->ref_count);
+ w->head = wrappers;
+ return w;
+}
+
+static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
+ u32 *user_len)
+{
+ struct qaic_manage_trans_passthrough *in_trans = trans;
+ struct wire_trans_passthrough *out_trans;
+ struct wrapper_msg *trans_wrapper;
+ struct wrapper_msg *wrapper;
+ struct wire_msg *msg;
+ u32 msg_hdr_len;
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+ msg_hdr_len = le32_to_cpu(msg->hdr.len);
+
+ if (in_trans->hdr.len % 8 != 0)
+ return -EINVAL;
+
+ if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_EXT_MSG_LENGTH)
+ return -ENOSPC;
+
+ trans_wrapper = add_wrapper(wrappers,
+ offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
+ if (!trans_wrapper)
+ return -ENOMEM;
+ trans_wrapper->len = in_trans->hdr.len;
+ out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
+
+ memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
+ msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
+ msg->hdr.count = incr_le32(msg->hdr.count);
+ *user_len += in_trans->hdr.len;
+ out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
+ out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
+
+ return 0;
+}
+
+/* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
+static int find_and_map_user_pages(struct qaic_device *qdev,
+ struct qaic_manage_trans_dma_xfer *in_trans,
+ struct ioctl_resources *resources, struct dma_xfer *xfer)
+{
+ unsigned long need_pages;
+ struct page **page_list;
+ unsigned long nr_pages;
+ struct sg_table *sgt;
+ u64 xfer_start_addr;
+ int ret;
+ int i;
+
+ xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
+
+ need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
+ resources->xferred_dma_size, PAGE_SIZE);
+
+ nr_pages = need_pages;
+
+ while (1) {
+ page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN);
+ if (!page_list) {
+ nr_pages = nr_pages / 2;
+ if (!nr_pages)
+ return -ENOMEM;
+ } else {
+ break;
+ }
+ }
+
+ ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
+ if (ret < 0 || ret != nr_pages) {
+ ret = -EFAULT;
+ goto free_page_list;
+ }
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto put_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
+ offset_in_page(xfer_start_addr),
+ in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_sgt;
+ }
+
+ ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
+ if (ret)
+ goto free_table;
+
+ xfer->sgt = sgt;
+ xfer->page_list = page_list;
+ xfer->nr_pages = nr_pages;
+
+ return need_pages > nr_pages ? 1 : 0;
+
+free_table:
+ sg_free_table(sgt);
+free_sgt:
+ kfree(sgt);
+put_pages:
+ for (i = 0; i < nr_pages; ++i)
+ put_page(page_list[i]);
+free_page_list:
+ kfree(page_list);
+ return ret;
+}
+
+/* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
+static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
+ struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
+ struct wire_trans_dma_xfer **out_trans)
+{
+ struct wrapper_msg *trans_wrapper;
+ struct sg_table *sgt = xfer->sgt;
+ struct wire_addr_size_pair *asp;
+ struct scatterlist *sg;
+ struct wrapper_msg *w;
+ unsigned int dma_len;
+ u64 dma_chunk_len;
+ void *boundary;
+ int nents_dma;
+ int nents;
+ int i;
+
+ nents = sgt->nents;
+ nents_dma = nents;
+ *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+ for_each_sgtable_sg(sgt, sg, i) {
+ *size -= sizeof(*asp);
+ /* Save 1K for possible follow-up transactions. */
+ if (*size < SZ_1K) {
+ nents_dma = i;
+ break;
+ }
+ }
+
+ trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
+ if (!trans_wrapper)
+ return -ENOMEM;
+ *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
+
+ asp = (*out_trans)->data;
+ boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
+ *size = 0;
+
+ dma_len = 0;
+ w = trans_wrapper;
+ dma_chunk_len = 0;
+ for_each_sg(sgt->sgl, sg, nents_dma, i) {
+ asp->size = cpu_to_le64(dma_len);
+ dma_chunk_len += dma_len;
+ if (dma_len) {
+ asp++;
+ if ((void *)asp + sizeof(*asp) > boundary) {
+ w->len = (void *)asp - (void *)&w->msg;
+ *size += w->len;
+ w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
+ if (!w)
+ return -ENOMEM;
+ boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
+ asp = (struct wire_addr_size_pair *)&w->msg;
+ }
+ }
+ asp->addr = cpu_to_le64(sg_dma_address(sg));
+ dma_len = sg_dma_len(sg);
+ }
+ /* finalize the last segment */
+ asp->size = cpu_to_le64(dma_len);
+ w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
+ *size += w->len;
+ dma_chunk_len += dma_len;
+ resources->xferred_dma_size += dma_chunk_len;
+
+ return nents_dma < nents ? 1 : 0;
+}
+
+static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
+{
+ int i;
+
+ dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
+ sg_free_table(xfer->sgt);
+ kfree(xfer->sgt);
+ for (i = 0; i < xfer->nr_pages; ++i)
+ put_page(xfer->page_list[i]);
+ kfree(xfer->page_list);
+}
+
+static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
+ u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
+{
+ struct qaic_manage_trans_dma_xfer *in_trans = trans;
+ struct wire_trans_dma_xfer *out_trans;
+ struct wrapper_msg *wrapper;
+ struct dma_xfer *xfer;
+ struct wire_msg *msg;
+ bool need_cont_dma;
+ u32 msg_hdr_len;
+ u32 size;
+ int ret;
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+ msg_hdr_len = le32_to_cpu(msg->hdr.len);
+
+ if (msg_hdr_len > (UINT_MAX - QAIC_MANAGE_EXT_MSG_LENGTH))
+ return -EINVAL;
+
+ /* There should be enough space to hold at least one ASP entry. */
+ if (msg_hdr_len + sizeof(*out_trans) + sizeof(struct wire_addr_size_pair) >
+ QAIC_MANAGE_EXT_MSG_LENGTH)
+ return -ENOMEM;
+
+ if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
+ return -EINVAL;
+
+ xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
+ if (!xfer)
+ return -ENOMEM;
+
+ ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
+ if (ret < 0)
+ goto free_xfer;
+
+ need_cont_dma = (bool)ret;
+
+ ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
+ if (ret < 0)
+ goto cleanup_xfer;
+
+ need_cont_dma = need_cont_dma || (bool)ret;
+
+ msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
+ msg->hdr.count = incr_le32(msg->hdr.count);
+
+ out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
+ out_trans->hdr.len = cpu_to_le32(size);
+ out_trans->tag = cpu_to_le32(in_trans->tag);
+ out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
+ sizeof(struct wire_addr_size_pair));
+
+ *user_len += in_trans->hdr.len;
+
+ if (resources->dma_chunk_id) {
+ out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
+ } else if (need_cont_dma) {
+ while (resources->dma_chunk_id == 0)
+ resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
+
+ out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
+ }
+ resources->trans_hdr = trans;
+
+ list_add(&xfer->list, &resources->dma_xfers);
+ return 0;
+
+cleanup_xfer:
+ cleanup_xfer(qdev, xfer);
+free_xfer:
+ kfree(xfer);
+ return ret;
+}
+
+static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
+ u32 *user_len, struct ioctl_resources *resources)
+{
+ struct qaic_manage_trans_activate_to_dev *in_trans = trans;
+ struct wire_trans_activate_to_dev *out_trans;
+ struct wrapper_msg *trans_wrapper;
+ struct wrapper_msg *wrapper;
+ struct wire_msg *msg;
+ dma_addr_t dma_addr;
+ u32 msg_hdr_len;
+ void *buf;
+ u32 nelem;
+ u32 size;
+ int ret;
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+ msg_hdr_len = le32_to_cpu(msg->hdr.len);
+
+ if (msg_hdr_len + sizeof(*out_trans) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -ENOSPC;
+
+ if (!in_trans->queue_size)
+ return -EINVAL;
+
+ if (in_trans->pad)
+ return -EINVAL;
+
+ nelem = in_trans->queue_size;
+ size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
+ if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
+ return -EINVAL;
+
+ if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
+ return -EINVAL;
+
+ size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
+
+ buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ trans_wrapper = add_wrapper(wrappers,
+ offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
+ if (!trans_wrapper) {
+ ret = -ENOMEM;
+ goto free_dma;
+ }
+ trans_wrapper->len = sizeof(*out_trans);
+ out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
+
+ out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
+ out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
+ out_trans->buf_len = cpu_to_le32(size);
+ out_trans->req_q_addr = cpu_to_le64(dma_addr);
+ out_trans->req_q_size = cpu_to_le32(nelem);
+ out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
+ out_trans->rsp_q_size = cpu_to_le32(nelem);
+ out_trans->options = cpu_to_le32(in_trans->options);
+
+ *user_len += in_trans->hdr.len;
+ msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
+ msg->hdr.count = incr_le32(msg->hdr.count);
+
+ resources->buf = buf;
+ resources->dma_addr = dma_addr;
+ resources->total_size = size;
+ resources->nelem = nelem;
+ resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
+ return 0;
+
+free_dma:
+ dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
+ return ret;
+}
+
+static int encode_deactivate(struct qaic_device *qdev, void *trans,
+ u32 *user_len, struct qaic_user *usr)
+{
+ struct qaic_manage_trans_deactivate *in_trans = trans;
+
+ if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
+ return -EINVAL;
+
+ *user_len += in_trans->hdr.len;
+
+ return disable_dbc(qdev, in_trans->dbc_id, usr);
+}
+
+static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
+ u32 *user_len)
+{
+ struct qaic_manage_trans_status_to_dev *in_trans = trans;
+ struct wire_trans_status_to_dev *out_trans;
+ struct wrapper_msg *trans_wrapper;
+ struct wrapper_msg *wrapper;
+ struct wire_msg *msg;
+ u32 msg_hdr_len;
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+ msg_hdr_len = le32_to_cpu(msg->hdr.len);
+
+ if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -ENOSPC;
+
+ trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
+ if (!trans_wrapper)
+ return -ENOMEM;
+
+ trans_wrapper->len = sizeof(*out_trans);
+ out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
+
+ out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
+ out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
+ msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
+ msg->hdr.count = incr_le32(msg->hdr.count);
+ *user_len += in_trans->hdr.len;
+
+ return 0;
+}
+
+static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
+ struct wrapper_list *wrappers, struct ioctl_resources *resources,
+ struct qaic_user *usr)
+{
+ struct qaic_manage_trans_hdr *trans_hdr;
+ struct wrapper_msg *wrapper;
+ struct wire_msg *msg;
+ u32 user_len = 0;
+ int ret;
+ int i;
+
+ if (!user_msg->count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+
+ msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
+
+ if (resources->dma_chunk_id) {
+ ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
+ msg->hdr.count = cpu_to_le32(1);
+ goto out;
+ }
+
+ for (i = 0; i < user_msg->count; ++i) {
+ if (user_len >= user_msg->len) {
+ ret = -EINVAL;
+ break;
+ }
+ trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
+ if (user_len + trans_hdr->len > user_msg->len) {
+ ret = -EINVAL;
+ break;
+ }
+
+ switch (trans_hdr->type) {
+ case QAIC_TRANS_PASSTHROUGH_FROM_USR:
+ ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
+ break;
+ case QAIC_TRANS_DMA_XFER_FROM_USR:
+ ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
+ break;
+ case QAIC_TRANS_ACTIVATE_FROM_USR:
+ ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
+ break;
+ case QAIC_TRANS_DEACTIVATE_FROM_USR:
+ ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
+ break;
+ case QAIC_TRANS_STATUS_FROM_USR:
+ ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ break;
+ }
+
+ if (user_len != user_msg->len)
+ ret = -EINVAL;
+out:
+ if (ret) {
+ free_dma_xfers(qdev, resources);
+ free_dbc_buf(qdev, resources);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
+ u32 *msg_len)
+{
+ struct qaic_manage_trans_passthrough *out_trans;
+ struct wire_trans_passthrough *in_trans = trans;
+ u32 len;
+
+ out_trans = (void *)user_msg->data + user_msg->len;
+
+ len = le32_to_cpu(in_trans->hdr.len);
+ if (len % 8 != 0)
+ return -EINVAL;
+
+ if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -ENOSPC;
+
+ memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
+ user_msg->len += len;
+ *msg_len += len;
+ out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
+ out_trans->hdr.len = len;
+
+ return 0;
+}
+
+static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
+ u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
+{
+ struct qaic_manage_trans_activate_from_dev *out_trans;
+ struct wire_trans_activate_from_dev *in_trans = trans;
+ u32 len;
+
+ out_trans = (void *)user_msg->data + user_msg->len;
+
+ len = le32_to_cpu(in_trans->hdr.len);
+ if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -ENOSPC;
+
+ user_msg->len += len;
+ *msg_len += len;
+ out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
+ out_trans->hdr.len = len;
+ out_trans->status = le32_to_cpu(in_trans->status);
+ out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
+ out_trans->options = le64_to_cpu(in_trans->options);
+
+ if (!resources->buf)
+ /* how did we get an activate response without a request? */
+ return -EINVAL;
+
+ if (out_trans->dbc_id >= qdev->num_dbc)
+ /*
+ * The device assigned an invalid resource, which should never
+ * happen. Return an error so the user can try to recover.
+ */
+ return -ENODEV;
+
+ if (out_trans->status)
+ /*
+ * Allocating resources failed on device side. This is not an
+ * expected behaviour, user is expected to handle this situation.
+ */
+ return -ECANCELED;
+
+ resources->status = out_trans->status;
+ resources->dbc_id = out_trans->dbc_id;
+ save_dbc_buf(qdev, resources, usr);
+
+ return 0;
+}
+
+static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
+ struct qaic_user *usr)
+{
+ struct wire_trans_deactivate_from_dev *in_trans = trans;
+ u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
+ u32 status = le32_to_cpu(in_trans->status);
+
+ if (dbc_id >= qdev->num_dbc)
+ /*
+ * The device assigned an invalid resource, which should never
+ * happen. Inject an error so the user can try to recover.
+ */
+ return -ENODEV;
+
+ if (status) {
+ /*
+ * Releasing resources failed on the device side, which puts
+ * us in a bind since they may still be in use, so enable the
+ * dbc. User is expected to retry deactivation.
+ */
+ enable_dbc(qdev, dbc_id, usr);
+ return -ECANCELED;
+ }
+
+ release_dbc(qdev, dbc_id);
+ *msg_len += sizeof(*in_trans);
+
+ return 0;
+}
+
+static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
+ u32 *user_len, struct wire_msg *msg)
+{
+ struct qaic_manage_trans_status_from_dev *out_trans;
+ struct wire_trans_status_from_dev *in_trans = trans;
+ u32 len;
+
+ out_trans = (void *)user_msg->data + user_msg->len;
+
+ len = le32_to_cpu(in_trans->hdr.len);
+ if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -ENOSPC;
+
+ out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
+ out_trans->hdr.len = len;
+ out_trans->major = le16_to_cpu(in_trans->major);
+ out_trans->minor = le16_to_cpu(in_trans->minor);
+ out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
+ out_trans->status = le32_to_cpu(in_trans->status);
+ *user_len += le32_to_cpu(in_trans->hdr.len);
+ user_msg->len += len;
+
+ if (out_trans->status)
+ return -ECANCELED;
+ if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
+ return -EPIPE;
+
+ return 0;
+}
+
+static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
+ struct wire_msg *msg, struct ioctl_resources *resources,
+ struct qaic_user *usr)
+{
+ u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
+ struct wire_trans_hdr *trans_hdr;
+ u32 msg_len = 0;
+ int ret;
+ int i;
+
+ if (msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -EINVAL;
+
+ user_msg->len = 0;
+ user_msg->count = le32_to_cpu(msg->hdr.count);
+
+ for (i = 0; i < user_msg->count; ++i) {
+ trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
+ if (msg_len + le32_to_cpu(trans_hdr->len) > msg_hdr_len)
+ return -EINVAL;
+
+ switch (le32_to_cpu(trans_hdr->type)) {
+ case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
+ ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
+ break;
+ case QAIC_TRANS_ACTIVATE_FROM_DEV:
+ ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
+ break;
+ case QAIC_TRANS_DEACTIVATE_FROM_DEV:
+ ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
+ break;
+ case QAIC_TRANS_STATUS_FROM_DEV:
+ ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
+ bool ignore_signal)
+{
+ struct xfer_queue_elem elem;
+ struct wire_msg *out_buf;
+ struct wrapper_msg *w;
+ int retry_count;
+ long ret;
+
+ if (qdev->in_reset) {
+ mutex_unlock(&qdev->cntl_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ elem.seq_num = seq_num;
+ elem.buf = NULL;
+ init_completion(&elem.xfer_done);
+ if (likely(!qdev->cntl_lost_buf)) {
+ /*
+ * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
+ * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
+ */
+ out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!out_buf) {
+ mutex_unlock(&qdev->cntl_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
+ QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
+ if (ret) {
+ mutex_unlock(&qdev->cntl_mutex);
+ return ERR_PTR(ret);
+ }
+ } else {
+ /*
+ * we lost a buffer because we queued a recv buf, but then
+ * queuing the corresponding tx buf failed. To try to avoid
+ * a memory leak, lets reclaim it and use it for this
+ * transaction.
+ */
+ qdev->cntl_lost_buf = false;
+ }
+
+ list_for_each_entry(w, &wrappers->list, list) {
+ kref_get(&w->ref_count);
+ retry_count = 0;
+retry:
+ ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
+ list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
+ if (ret) {
+ if (ret == -EAGAIN && retry_count++ < QAIC_MHI_RETRY_MAX) {
+ msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
+ if (!signal_pending(current))
+ goto retry;
+ }
+
+ qdev->cntl_lost_buf = true;
+ kref_put(&w->ref_count, free_wrapper);
+ mutex_unlock(&qdev->cntl_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+
+ list_add_tail(&elem.list, &qdev->cntl_xfer_list);
+ mutex_unlock(&qdev->cntl_mutex);
+
+ if (ignore_signal)
+ ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
+ else
+ ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
+ control_resp_timeout_s * HZ);
+ /*
+ * not using _interruptable because we have to cleanup or we'll
+ * likely cause memory corruption
+ */
+ mutex_lock(&qdev->cntl_mutex);
+ if (!list_empty(&elem.list))
+ list_del(&elem.list);
+ if (!ret && !elem.buf)
+ ret = -ETIMEDOUT;
+ else if (ret > 0 && !elem.buf)
+ ret = -EIO;
+ mutex_unlock(&qdev->cntl_mutex);
+
+ if (ret < 0) {
+ kfree(elem.buf);
+ return ERR_PTR(ret);
+ } else if (!qdev->valid_crc(elem.buf)) {
+ kfree(elem.buf);
+ return ERR_PTR(-EPIPE);
+ }
+
+ return elem.buf;
+}
+
+/* Add a transaction to abort the outstanding DMA continuation */
+static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
+{
+ struct wire_trans_dma_xfer *out_trans;
+ u32 size = sizeof(*out_trans);
+ struct wrapper_msg *wrapper;
+ struct wrapper_msg *w;
+ struct wire_msg *msg;
+
+ wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
+ msg = &wrapper->msg;
+
+ /* Remove all but the first wrapper which has the msg header */
+ list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
+ if (!list_is_first(&wrapper->list, &wrappers->list))
+ kref_put(&wrapper->ref_count, free_wrapper);
+
+ wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
+
+ if (!wrapper)
+ return -ENOMEM;
+
+ out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
+ out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
+ out_trans->hdr.len = cpu_to_le32(size);
+ out_trans->tag = cpu_to_le32(0);
+ out_trans->count = cpu_to_le32(0);
+ out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
+
+ msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
+ msg->hdr.count = cpu_to_le32(1);
+ wrapper->len = size;
+
+ return 0;
+}
+
+static struct wrapper_list *alloc_wrapper_list(void)
+{
+ struct wrapper_list *wrappers;
+
+ wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL);
+ if (!wrappers)
+ return NULL;
+ INIT_LIST_HEAD(&wrappers->list);
+ spin_lock_init(&wrappers->lock);
+
+ return wrappers;
+}
+
+static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
+ struct manage_msg *user_msg, struct ioctl_resources *resources,
+ struct wire_msg **rsp)
+{
+ struct wrapper_list *wrappers;
+ struct wrapper_msg *wrapper;
+ struct wrapper_msg *w;
+ bool all_done = false;
+ struct wire_msg *msg;
+ int ret;
+
+ wrappers = alloc_wrapper_list();
+ if (!wrappers)
+ return -ENOMEM;
+
+ wrapper = add_wrapper(wrappers, sizeof(*wrapper));
+ if (!wrapper) {
+ kfree(wrappers);
+ return -ENOMEM;
+ }
+
+ msg = &wrapper->msg;
+ wrapper->len = sizeof(*msg);
+
+ ret = encode_message(qdev, user_msg, wrappers, resources, usr);
+ if (ret && resources->dma_chunk_id)
+ ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
+ if (ret)
+ goto encode_failed;
+
+ ret = mutex_lock_interruptible(&qdev->cntl_mutex);
+ if (ret)
+ goto lock_failed;
+
+ msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
+ msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
+
+ if (usr) {
+ msg->hdr.handle = cpu_to_le32(usr->handle);
+ msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
+ } else {
+ msg->hdr.handle = 0;
+ msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
+ }
+
+ msg->hdr.padding = cpu_to_le32(0);
+ msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
+
+ /* msg_xfer releases the mutex */
+ *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
+ if (IS_ERR(*rsp))
+ ret = PTR_ERR(*rsp);
+
+lock_failed:
+ free_dma_xfers(qdev, resources);
+encode_failed:
+ spin_lock(&wrappers->lock);
+ list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
+ kref_put(&wrapper->ref_count, free_wrapper);
+ all_done = list_empty(&wrappers->list);
+ spin_unlock(&wrappers->lock);
+ if (all_done)
+ kfree(wrappers);
+
+ return ret;
+}
+
+static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
+{
+ struct wire_trans_dma_xfer_cont *dma_cont = NULL;
+ struct ioctl_resources resources;
+ struct wire_msg *rsp = NULL;
+ int ret;
+
+ memset(&resources, 0, sizeof(struct ioctl_resources));
+
+ INIT_LIST_HEAD(&resources.dma_xfers);
+
+ if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
+ user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
+ return -EINVAL;
+
+dma_xfer_continue:
+ ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
+ if (ret)
+ return ret;
+ /* dma_cont should be the only transaction if present */
+ if (le32_to_cpu(rsp->hdr.count) == 1) {
+ dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
+ if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
+ dma_cont = NULL;
+ }
+ if (dma_cont) {
+ if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
+ le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
+ kfree(rsp);
+ goto dma_xfer_continue;
+ }
+
+ ret = -EINVAL;
+ goto dma_cont_failed;
+ }
+
+ ret = decode_message(qdev, user_msg, rsp, &resources, usr);
+
+dma_cont_failed:
+ free_dbc_buf(qdev, &resources);
+ kfree(rsp);
+ return ret;
+}
+
+int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_manage_msg *user_msg;
+ struct qaic_device *qdev;
+ struct manage_msg *msg;
+ struct qaic_user *usr;
+ u8 __user *user_data;
+ int qdev_rcu_id;
+ int usr_rcu_id;
+ int ret;
+
+ usr = file_priv->driver_priv;
+
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return -ENODEV;
+ }
+
+ qdev = usr->qddev->qdev;
+
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return -ENODEV;
+ }
+
+ user_msg = data;
+
+ if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg->len = user_msg->len;
+ msg->count = user_msg->count;
+
+ user_data = u64_to_user_ptr(user_msg->data);
+
+ if (copy_from_user(msg->data, user_data, user_msg->len)) {
+ ret = -EFAULT;
+ goto free_msg;
+ }
+
+ ret = qaic_manage(qdev, usr, msg);
+
+ /*
+ * If the qaic_manage() is successful then we copy the message onto
+ * userspace memory but we have an exception for -ECANCELED.
+ * For -ECANCELED, it means that device has NACKed the message with a
+ * status error code which userspace would like to know.
+ */
+ if (ret == -ECANCELED || !ret) {
+ if (copy_to_user(user_data, msg->data, msg->len)) {
+ ret = -EFAULT;
+ } else {
+ user_msg->len = msg->len;
+ user_msg->count = msg->count;
+ }
+ }
+
+free_msg:
+ kfree(msg);
+out:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
+{
+ struct qaic_manage_trans_status_from_dev *status_result;
+ struct qaic_manage_trans_status_to_dev *status_query;
+ struct manage_msg *user_msg;
+ int ret;
+
+ user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
+ if (!user_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ user_msg->len = sizeof(*status_query);
+ user_msg->count = 1;
+
+ status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
+ status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
+ status_query->hdr.len = sizeof(status_query->hdr);
+
+ ret = qaic_manage(qdev, usr, user_msg);
+ if (ret)
+ goto kfree_user_msg;
+ status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
+ *major = status_result->major;
+ *minor = status_result->minor;
+
+ if (status_result->status_flags & BIT(0)) { /* device is using CRC */
+ /* By default qdev->gen_crc is programmed to generate CRC */
+ qdev->valid_crc = valid_crc;
+ } else {
+ /* By default qdev->valid_crc is programmed to bypass CRC */
+ qdev->gen_crc = gen_crc_stub;
+ }
+
+kfree_user_msg:
+ kfree(user_msg);
+out:
+ return ret;
+}
+
+static void resp_worker(struct work_struct *work)
+{
+ struct resp_work *resp = container_of(work, struct resp_work, work);
+ struct qaic_device *qdev = resp->qdev;
+ struct wire_msg *msg = resp->buf;
+ struct xfer_queue_elem *elem;
+ struct xfer_queue_elem *i;
+ bool found = false;
+
+ mutex_lock(&qdev->cntl_mutex);
+ list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
+ if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
+ found = true;
+ list_del_init(&elem->list);
+ elem->buf = msg;
+ complete_all(&elem->xfer_done);
+ break;
+ }
+ }
+ mutex_unlock(&qdev->cntl_mutex);
+
+ if (!found)
+ /* request must have timed out, drop packet */
+ kfree(msg);
+
+ kfree(resp);
+}
+
+static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
+{
+ bool all_done = false;
+
+ spin_lock(&wrappers->lock);
+ kref_put(&wrapper->ref_count, free_wrapper);
+ all_done = list_empty(&wrappers->list);
+ spin_unlock(&wrappers->lock);
+
+ if (all_done)
+ kfree(wrappers);
+}
+
+void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct wire_msg *msg = mhi_result->buf_addr;
+ struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
+
+ free_wrapper_from_list(wrapper->head, wrapper);
+}
+
+void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct wire_msg *msg = mhi_result->buf_addr;
+ struct resp_work *resp;
+
+ if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
+ kfree(msg);
+ return;
+ }
+
+ resp = kmalloc(sizeof(*resp), GFP_ATOMIC);
+ if (!resp) {
+ kfree(msg);
+ return;
+ }
+
+ INIT_WORK(&resp->work, resp_worker);
+ resp->qdev = qdev;
+ resp->buf = msg;
+ queue_work(qdev->cntl_wq, &resp->work);
+}
+
+int qaic_control_open(struct qaic_device *qdev)
+{
+ if (!qdev->cntl_ch)
+ return -ENODEV;
+
+ qdev->cntl_lost_buf = false;
+ /*
+ * By default qaic should assume that device has CRC enabled.
+ * Qaic comes to know if device has CRC enabled or disabled during the
+ * device status transaction, which is the first transaction performed
+ * on control channel.
+ *
+ * So CRC validation of first device status transaction response is
+ * ignored (by calling valid_crc_stub) and is done later during decoding
+ * if device has CRC enabled.
+ * Now that qaic knows whether device has CRC enabled or not it acts
+ * accordingly.
+ */
+ qdev->gen_crc = gen_crc;
+ qdev->valid_crc = valid_crc_stub;
+
+ return mhi_prepare_for_transfer(qdev->cntl_ch);
+}
+
+void qaic_control_close(struct qaic_device *qdev)
+{
+ mhi_unprepare_from_transfer(qdev->cntl_ch);
+}
+
+void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
+{
+ struct wire_trans_terminate_to_dev *trans;
+ struct wrapper_list *wrappers;
+ struct wrapper_msg *wrapper;
+ struct wire_msg *msg;
+ struct wire_msg *rsp;
+
+ wrappers = alloc_wrapper_list();
+ if (!wrappers)
+ return;
+
+ wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
+ if (!wrapper)
+ return;
+
+ msg = &wrapper->msg;
+
+ trans = (struct wire_trans_terminate_to_dev *)msg->data;
+
+ trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
+ trans->hdr.len = cpu_to_le32(sizeof(*trans));
+ trans->handle = cpu_to_le32(usr->handle);
+
+ mutex_lock(&qdev->cntl_mutex);
+ wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
+ msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
+ msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
+ msg->hdr.len = cpu_to_le32(wrapper->len);
+ msg->hdr.count = cpu_to_le32(1);
+ msg->hdr.handle = cpu_to_le32(usr->handle);
+ msg->hdr.padding = cpu_to_le32(0);
+ msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
+
+ /*
+ * msg_xfer releases the mutex
+ * We don't care about the return of msg_xfer since we will not do
+ * anything different based on what happens.
+ * We ignore pending signals since one will be set if the user is
+ * killed, and we need give the device a chance to cleanup, otherwise
+ * DMA may still be in progress when we return.
+ */
+ rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
+ if (!IS_ERR(rsp))
+ kfree(rsp);
+ free_wrapper_from_list(wrappers, wrapper);
+}
+
+void wake_all_cntl(struct qaic_device *qdev)
+{
+ struct xfer_queue_elem *elem;
+ struct xfer_queue_elem *i;
+
+ mutex_lock(&qdev->cntl_mutex);
+ list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
+ list_del_init(&elem->list);
+ complete_all(&elem->xfer_done);
+ }
+ mutex_unlock(&qdev->cntl_mutex);
+}
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
new file mode 100644
index 000000000000..c0a574cd1b35
--- /dev/null
+++ b/drivers/accel/qaic/qaic_data.c
@@ -0,0 +1,1902 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
+#include <uapi/drm/qaic_accel.h>
+
+#include "qaic.h"
+
+#define SEM_VAL_MASK GENMASK_ULL(11, 0)
+#define SEM_INDEX_MASK GENMASK_ULL(4, 0)
+#define BULK_XFER BIT(3)
+#define GEN_COMPLETION BIT(4)
+#define INBOUND_XFER 1
+#define OUTBOUND_XFER 2
+#define REQHP_OFF 0x0 /* we read this */
+#define REQTP_OFF 0x4 /* we write this */
+#define RSPHP_OFF 0x8 /* we write this */
+#define RSPTP_OFF 0xc /* we read this */
+
+#define ENCODE_SEM(val, index, sync, cmd, flags) \
+ ({ \
+ FIELD_PREP(GENMASK(11, 0), (val)) | \
+ FIELD_PREP(GENMASK(20, 16), (index)) | \
+ FIELD_PREP(BIT(22), (sync)) | \
+ FIELD_PREP(GENMASK(26, 24), (cmd)) | \
+ FIELD_PREP(GENMASK(30, 29), (flags)) | \
+ FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \
+ })
+#define NUM_EVENTS 128
+#define NUM_DELAYS 10
+
+static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
+module_param(wait_exec_default_timeout_ms, uint, 0600);
+MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO");
+
+static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */
+module_param(datapath_poll_interval_us, uint, 0600);
+MODULE_PARM_DESC(datapath_poll_interval_us,
+ "Amount of time to sleep between activity when datapath polling is enabled");
+
+struct dbc_req {
+ /*
+ * A request ID is assigned to each memory handle going in DMA queue.
+ * As a single memory handle can enqueue multiple elements in DMA queue
+ * all of them will have the same request ID.
+ */
+ __le16 req_id;
+ /* Future use */
+ __u8 seq_id;
+ /*
+ * Special encoded variable
+ * 7 0 - Do not force to generate MSI after DMA is completed
+ * 1 - Force to generate MSI after DMA is completed
+ * 6:5 Reserved
+ * 4 1 - Generate completion element in the response queue
+ * 0 - No Completion Code
+ * 3 0 - DMA request is a Link list transfer
+ * 1 - DMA request is a Bulk transfer
+ * 2 Reserved
+ * 1:0 00 - No DMA transfer involved
+ * 01 - DMA transfer is part of inbound transfer
+ * 10 - DMA transfer has outbound transfer
+ * 11 - NA
+ */
+ __u8 cmd;
+ __le32 resv;
+ /* Source address for the transfer */
+ __le64 src_addr;
+ /* Destination address for the transfer */
+ __le64 dest_addr;
+ /* Length of transfer request */
+ __le32 len;
+ __le32 resv2;
+ /* Doorbell address */
+ __le64 db_addr;
+ /*
+ * Special encoded variable
+ * 7 1 - Doorbell(db) write
+ * 0 - No doorbell write
+ * 6:2 Reserved
+ * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary
+ * 01 - 16 bit access, db address must be aligned to 16bit-boundary
+ * 10 - 8 bit access, db address must be aligned to 8bit-boundary
+ * 11 - Reserved
+ */
+ __u8 db_len;
+ __u8 resv3;
+ __le16 resv4;
+ /* 32 bit data written to doorbell address */
+ __le32 db_data;
+ /*
+ * Special encoded variable
+ * All the fields of sem_cmdX are passed from user and all are ORed
+ * together to form sem_cmd.
+ * 0:11 Semaphore value
+ * 15:12 Reserved
+ * 20:16 Semaphore index
+ * 21 Reserved
+ * 22 Semaphore Sync
+ * 23 Reserved
+ * 26:24 Semaphore command
+ * 28:27 Reserved
+ * 29 Semaphore DMA out bound sync fence
+ * 30 Semaphore DMA in bound sync fence
+ * 31 Enable semaphore command
+ */
+ __le32 sem_cmd0;
+ __le32 sem_cmd1;
+ __le32 sem_cmd2;
+ __le32 sem_cmd3;
+} __packed;
+
+struct dbc_rsp {
+ /* Request ID of the memory handle whose DMA transaction is completed */
+ __le16 req_id;
+ /* Status of the DMA transaction. 0 : Success otherwise failure */
+ __le16 status;
+} __packed;
+
+inline int get_dbc_req_elem_size(void)
+{
+ return sizeof(struct dbc_req);
+}
+
+inline int get_dbc_rsp_elem_size(void)
+{
+ return sizeof(struct dbc_rsp);
+}
+
+static void free_slice(struct kref *kref)
+{
+ struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
+
+ list_del(&slice->slice);
+ drm_gem_object_put(&slice->bo->base);
+ sg_free_table(slice->sgt);
+ kfree(slice->sgt);
+ kfree(slice->reqs);
+ kfree(slice);
+}
+
+static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
+ struct sg_table *sgt_in, u64 size, u64 offset)
+{
+ int total_len, len, nents, offf = 0, offl = 0;
+ struct scatterlist *sg, *sgn, *sgf, *sgl;
+ struct sg_table *sgt;
+ int ret, j;
+
+ /* find out number of relevant nents needed for this mem */
+ total_len = 0;
+ sgf = NULL;
+ sgl = NULL;
+ nents = 0;
+
+ size = size ? size : PAGE_SIZE;
+ for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
+ len = sg_dma_len(sg);
+
+ if (!len)
+ continue;
+ if (offset >= total_len && offset < total_len + len) {
+ sgf = sg;
+ offf = offset - total_len;
+ }
+ if (sgf)
+ nents++;
+ if (offset + size >= total_len &&
+ offset + size <= total_len + len) {
+ sgl = sg;
+ offl = offset + size - total_len;
+ break;
+ }
+ total_len += len;
+ }
+
+ if (!sgf || !sgl) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ goto free_sgt;
+
+ /* copy relevant sg node and fix page and length */
+ sgn = sgf;
+ for_each_sgtable_sg(sgt, sg, j) {
+ memcpy(sg, sgn, sizeof(*sg));
+ if (sgn == sgf) {
+ sg_dma_address(sg) += offf;
+ sg_dma_len(sg) -= offf;
+ sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf);
+ } else {
+ offf = 0;
+ }
+ if (sgn == sgl) {
+ sg_dma_len(sg) = offl - offf;
+ sg_set_page(sg, sg_page(sgn), offl - offf, offf);
+ sg_mark_end(sg);
+ break;
+ }
+ sgn = sg_next(sgn);
+ }
+
+ *sgt_out = sgt;
+ return ret;
+
+free_sgt:
+ kfree(sgt);
+out:
+ *sgt_out = NULL;
+ return ret;
+}
+
+static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
+ struct qaic_attach_slice_entry *req)
+{
+ __le64 db_addr = cpu_to_le64(req->db_addr);
+ __le32 db_data = cpu_to_le32(req->db_data);
+ struct scatterlist *sg;
+ __u8 cmd = BULK_XFER;
+ int presync_sem;
+ u64 dev_addr;
+ __u8 db_len;
+ int i;
+
+ if (!slice->no_xfer)
+ cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
+
+ if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8))
+ return -EINVAL;
+
+ presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync;
+ if (presync_sem > 1)
+ return -EINVAL;
+
+ presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 |
+ req->sem2.presync << 2 | req->sem3.presync << 3;
+
+ switch (req->db_len) {
+ case 32:
+ db_len = BIT(7);
+ break;
+ case 16:
+ db_len = BIT(7) | 1;
+ break;
+ case 8:
+ db_len = BIT(7) | 2;
+ break;
+ case 0:
+ db_len = 0; /* doorbell is not active for this command */
+ break;
+ default:
+ return -EINVAL; /* should never hit this */
+ }
+
+ /*
+ * When we end up splitting up a single request (ie a buf slice) into
+ * multiple DMA requests, we have to manage the sync data carefully.
+ * There can only be one presync sem. That needs to be on every xfer
+ * so that the DMA engine doesn't transfer data before the receiver is
+ * ready. We only do the doorbell and postsync sems after the xfer.
+ * To guarantee previous xfers for the request are complete, we use a
+ * fence.
+ */
+ dev_addr = req->dev_addr;
+ for_each_sgtable_sg(slice->sgt, sg, i) {
+ slice->reqs[i].cmd = cmd;
+ slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
+ sg_dma_address(sg) : dev_addr);
+ slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
+ dev_addr : sg_dma_address(sg));
+ /*
+ * sg_dma_len(sg) returns size of a DMA segment, maximum DMA
+ * segment size is set to UINT_MAX by qaic and hence return
+ * values of sg_dma_len(sg) can never exceed u32 range. So,
+ * by down sizing we are not corrupting the value.
+ */
+ slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
+ switch (presync_sem) {
+ case BIT(0):
+ slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
+ req->sem0.index,
+ req->sem0.presync,
+ req->sem0.cmd,
+ req->sem0.flags));
+ break;
+ case BIT(1):
+ slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
+ req->sem1.index,
+ req->sem1.presync,
+ req->sem1.cmd,
+ req->sem1.flags));
+ break;
+ case BIT(2):
+ slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
+ req->sem2.index,
+ req->sem2.presync,
+ req->sem2.cmd,
+ req->sem2.flags));
+ break;
+ case BIT(3):
+ slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
+ req->sem3.index,
+ req->sem3.presync,
+ req->sem3.cmd,
+ req->sem3.flags));
+ break;
+ }
+ dev_addr += sg_dma_len(sg);
+ }
+ /* add post transfer stuff to last segment */
+ i--;
+ slice->reqs[i].cmd |= GEN_COMPLETION;
+ slice->reqs[i].db_addr = db_addr;
+ slice->reqs[i].db_len = db_len;
+ slice->reqs[i].db_data = db_data;
+ /*
+ * Add a fence if we have more than one request going to the hardware
+ * representing the entirety of the user request, and the user request
+ * has no presync condition.
+ * Fences are expensive, so we try to avoid them. We rely on the
+ * hardware behavior to avoid needing one when there is a presync
+ * condition. When a presync exists, all requests for that same
+ * presync will be queued into a fifo. Thus, since we queue the
+ * post xfer activity only on the last request we queue, the hardware
+ * will ensure that the last queued request is processed last, thus
+ * making sure the post xfer activity happens at the right time without
+ * a fence.
+ */
+ if (i && !presync_sem)
+ req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
+ QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE);
+ slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
+ req->sem0.presync, req->sem0.cmd,
+ req->sem0.flags));
+ slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
+ req->sem1.presync, req->sem1.cmd,
+ req->sem1.flags));
+ slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
+ req->sem2.presync, req->sem2.cmd,
+ req->sem2.flags));
+ slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
+ req->sem3.presync, req->sem3.cmd,
+ req->sem3.flags));
+
+ return 0;
+}
+
+static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
+ struct qaic_attach_slice_entry *slice_ent)
+{
+ struct sg_table *sgt = NULL;
+ struct bo_slice *slice;
+ int ret;
+
+ ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset);
+ if (ret)
+ goto out;
+
+ slice = kmalloc(sizeof(*slice), GFP_KERNEL);
+ if (!slice) {
+ ret = -ENOMEM;
+ goto free_sgt;
+ }
+
+ slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
+ if (!slice->reqs) {
+ ret = -ENOMEM;
+ goto free_slice;
+ }
+
+ slice->no_xfer = !slice_ent->size;
+ slice->sgt = sgt;
+ slice->nents = sgt->nents;
+ slice->dir = bo->dir;
+ slice->bo = bo;
+ slice->size = slice_ent->size;
+ slice->offset = slice_ent->offset;
+
+ ret = encode_reqs(qdev, slice, slice_ent);
+ if (ret)
+ goto free_req;
+
+ bo->total_slice_nents += sgt->nents;
+ kref_init(&slice->ref_count);
+ drm_gem_object_get(&bo->base);
+ list_add_tail(&slice->slice, &bo->slices);
+
+ return 0;
+
+free_req:
+ kfree(slice->reqs);
+free_slice:
+ kfree(slice);
+free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+out:
+ return ret;
+}
+
+static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
+{
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct page **pages;
+ int *pages_order;
+ int buf_extra;
+ int max_order;
+ int nr_pages;
+ int ret = 0;
+ int i, j, k;
+ int order;
+
+ if (size) {
+ nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ /*
+ * calculate how much extra we are going to allocate, to remove
+ * later
+ */
+ buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
+ max_order = min(MAX_ORDER - 1, get_order(size));
+ } else {
+ /* allocate a single page for book keeping */
+ nr_pages = 1;
+ buf_extra = 0;
+ max_order = 0;
+ }
+
+ pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages_order = (void *)pages + sizeof(*pages) * nr_pages;
+
+ /*
+ * Allocate requested memory using alloc_pages. It is possible to allocate
+ * the requested memory in multiple chunks by calling alloc_pages
+ * multiple times. Use SG table to handle multiple allocated pages.
+ */
+ i = 0;
+ while (nr_pages > 0) {
+ order = min(get_order(nr_pages * PAGE_SIZE), max_order);
+ while (1) {
+ pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER |
+ __GFP_NOWARN | __GFP_ZERO |
+ (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL),
+ order);
+ if (pages[i])
+ break;
+ if (!order--) {
+ ret = -ENOMEM;
+ goto free_partial_alloc;
+ }
+ }
+
+ max_order = order;
+ pages_order[i] = order;
+
+ nr_pages -= 1 << order;
+ if (nr_pages <= 0)
+ /* account for over allocation */
+ buf_extra += abs(nr_pages) * PAGE_SIZE;
+ i++;
+ }
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto free_partial_alloc;
+ }
+
+ if (sg_alloc_table(sgt, i, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto free_sgt;
+ }
+
+ /* Populate the SG table with the allocated memory pages */
+ sg = sgt->sgl;
+ for (k = 0; k < i; k++, sg = sg_next(sg)) {
+ /* Last entry requires special handling */
+ if (k < i - 1) {
+ sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
+ } else {
+ sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
+ sg_mark_end(sg);
+ }
+ }
+
+ kvfree(pages);
+ *sgt_out = sgt;
+ return ret;
+
+free_sgt:
+ kfree(sgt);
+free_partial_alloc:
+ for (j = 0; j < i; j++)
+ __free_pages(pages[j], pages_order[j]);
+ kvfree(pages);
+out:
+ *sgt_out = NULL;
+ return ret;
+}
+
+static bool invalid_sem(struct qaic_sem *sem)
+{
+ if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK ||
+ !(sem->presync == 0 || sem->presync == 1) || sem->pad ||
+ sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) ||
+ sem->cmd > QAIC_SEM_WAIT_GT_0)
+ return true;
+ return false;
+}
+
+static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
+ u32 count, u64 total_size)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 ||
+ slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) ||
+ invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) ||
+ invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
+ return -EINVAL;
+
+ if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qaic_free_sgt(struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+
+ for (sg = sgt->sgl; sg; sg = sg_next(sg))
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+
+ drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
+}
+
+static const struct vm_operations_struct drm_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+ unsigned long offset = 0;
+ struct scatterlist *sg;
+ int ret;
+
+ if (obj->import_attach)
+ return -EINVAL;
+
+ for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
+ if (sg_page(sg)) {
+ ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
+ sg->length, vma->vm_page_prot);
+ if (ret)
+ goto out;
+ offset += sg->length;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static void qaic_free_object(struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+
+ if (obj->import_attach) {
+ /* DMABUF/PRIME Path */
+ dma_buf_detach(obj->import_attach->dmabuf, obj->import_attach);
+ dma_buf_put(obj->import_attach->dmabuf);
+ } else {
+ /* Private buffer allocation path */
+ qaic_free_sgt(bo->sgt);
+ }
+
+ drm_gem_object_release(obj);
+ kfree(bo);
+}
+
+static const struct drm_gem_object_funcs qaic_gem_funcs = {
+ .free = qaic_free_object,
+ .print_info = qaic_gem_print_info,
+ .mmap = qaic_gem_object_mmap,
+ .vm_ops = &drm_vm_ops,
+};
+
+static struct qaic_bo *qaic_alloc_init_bo(void)
+{
+ struct qaic_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&bo->slices);
+ init_completion(&bo->xfer_done);
+ complete_all(&bo->xfer_done);
+
+ return bo;
+}
+
+int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_create_bo *args = data;
+ int usr_rcu_id, qdev_rcu_id;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ struct qaic_bo *bo;
+ size_t size;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ size = PAGE_ALIGN(args->size);
+ if (size == 0) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ bo = qaic_alloc_init_bo();
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ goto unlock_dev_srcu;
+ }
+ obj = &bo->base;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ obj->funcs = &qaic_gem_funcs;
+ ret = create_sgt(qdev, &bo->sgt, size);
+ if (ret)
+ goto free_bo;
+
+ bo->size = args->size;
+
+ ret = drm_gem_handle_create(file_priv, obj, &args->handle);
+ if (ret)
+ goto free_sgt;
+
+ bo->handle = args->handle;
+ drm_gem_object_put(obj);
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+
+ return 0;
+
+free_sgt:
+ qaic_free_sgt(bo->sgt);
+free_bo:
+ kfree(bo);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_mmap_bo *args = data;
+ int usr_rcu_id, qdev_rcu_id;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ int ret;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock_dev_srcu;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret == 0)
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ drm_gem_object_put(obj);
+
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_object *obj;
+ struct qaic_bo *bo;
+ size_t size;
+ int ret;
+
+ bo = qaic_alloc_init_bo();
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ goto out;
+ }
+
+ obj = &bo->base;
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto attach_fail;
+ }
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+ if (size == 0) {
+ ret = -EINVAL;
+ goto size_align_fail;
+ }
+
+ drm_gem_private_object_init(dev, obj, size);
+ /*
+ * skipping dma_buf_map_attachment() as we do not know the direction
+ * just yet. Once the direction is known in the subsequent IOCTL to
+ * attach slicing, we can do it then.
+ */
+
+ obj->funcs = &qaic_gem_funcs;
+ obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
+
+ return obj;
+
+size_align_fail:
+ dma_buf_detach(dma_buf, attach);
+attach_fail:
+ dma_buf_put(dma_buf);
+ kfree(bo);
+out:
+ return ERR_PTR(ret);
+}
+
+static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
+{
+ struct drm_gem_object *obj = &bo->base;
+ struct sg_table *sgt;
+ int ret;
+
+ if (obj->import_attach->dmabuf->size < hdr->size)
+ return -EINVAL;
+
+ sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ return ret;
+ }
+
+ bo->sgt = sgt;
+ bo->size = hdr->size;
+
+ return 0;
+}
+
+static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
+ struct qaic_attach_slice_hdr *hdr)
+{
+ int ret;
+
+ if (bo->size != hdr->size)
+ return -EINVAL;
+
+ ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
+ if (ret)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
+ struct qaic_attach_slice_hdr *hdr)
+{
+ int ret;
+
+ if (bo->base.import_attach)
+ ret = qaic_prepare_import_bo(bo, hdr);
+ else
+ ret = qaic_prepare_export_bo(qdev, bo, hdr);
+
+ if (ret == 0)
+ bo->dir = hdr->dir;
+
+ return ret;
+}
+
+static void qaic_unprepare_import_bo(struct qaic_bo *bo)
+{
+ dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
+ bo->sgt = NULL;
+ bo->size = 0;
+}
+
+static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
+{
+ dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0);
+}
+
+static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
+{
+ if (bo->base.import_attach)
+ qaic_unprepare_import_bo(bo);
+ else
+ qaic_unprepare_export_bo(qdev, bo);
+
+ bo->dir = 0;
+}
+
+static void qaic_free_slices_bo(struct qaic_bo *bo)
+{
+ struct bo_slice *slice, *temp;
+
+ list_for_each_entry_safe(slice, temp, &bo->slices, slice)
+ kref_put(&slice->ref_count, free_slice);
+}
+
+static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
+ struct qaic_attach_slice_hdr *hdr,
+ struct qaic_attach_slice_entry *slice_ent)
+{
+ int ret, i;
+
+ for (i = 0; i < hdr->count; i++) {
+ ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]);
+ if (ret) {
+ qaic_free_slices_bo(bo);
+ return ret;
+ }
+ }
+
+ if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
+ qaic_free_slices_bo(bo);
+ return -ENOSPC;
+ }
+
+ bo->sliced = true;
+ bo->nr_slice = hdr->count;
+ list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
+
+ return 0;
+}
+
+int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_attach_slice_entry *slice_ent;
+ struct qaic_attach_slice *args = data;
+ struct dma_bridge_chan *dbc;
+ int usr_rcu_id, qdev_rcu_id;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ unsigned long arg_size;
+ struct qaic_user *usr;
+ u8 __user *user_data;
+ struct qaic_bo *bo;
+ int ret;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->hdr.count == 0) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ arg_size = args->hdr.count * sizeof(*slice_ent);
+ if (arg_size / args->hdr.count != sizeof(*slice_ent)) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->hdr.dbc_id >= qdev->num_dbc) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->hdr.size == 0) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ dbc = &qdev->dbc[args->hdr.dbc_id];
+ if (dbc->usr != usr) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->data == 0) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ user_data = u64_to_user_ptr(args->data);
+
+ slice_ent = kzalloc(arg_size, GFP_KERNEL);
+ if (!slice_ent) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ ret = copy_from_user(slice_ent, user_data, arg_size);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_slice_ent;
+ }
+
+ ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
+ if (ret)
+ goto free_slice_ent;
+
+ obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto free_slice_ent;
+ }
+
+ bo = to_qaic_bo(obj);
+
+ ret = qaic_prepare_bo(qdev, bo, &args->hdr);
+ if (ret)
+ goto put_bo;
+
+ ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
+ if (ret)
+ goto unprepare_bo;
+
+ if (args->hdr.dir == DMA_TO_DEVICE)
+ dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
+
+ bo->dbc = dbc;
+ drm_gem_object_put(obj);
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+
+ return 0;
+
+unprepare_bo:
+ qaic_unprepare_bo(qdev, bo);
+put_bo:
+ drm_gem_object_put(obj);
+free_slice_ent:
+ kfree(slice_ent);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
+ u32 head, u32 *ptail)
+{
+ struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
+ struct dbc_req *reqs = slice->reqs;
+ u32 tail = *ptail;
+ u32 avail;
+
+ avail = head - tail;
+ if (head <= tail)
+ avail += dbc->nelem;
+
+ --avail;
+
+ if (avail < slice->nents)
+ return -EAGAIN;
+
+ if (tail + slice->nents > dbc->nelem) {
+ avail = dbc->nelem - tail;
+ avail = min_t(u32, avail, slice->nents);
+ memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
+ sizeof(*reqs) * avail);
+ reqs += avail;
+ avail = slice->nents - avail;
+ if (avail)
+ memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
+ } else {
+ memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
+ sizeof(*reqs) * slice->nents);
+ }
+
+ *ptail = (tail + slice->nents) % dbc->nelem;
+
+ return 0;
+}
+
+/*
+ * Based on the value of resize we may only need to transmit first_n
+ * entries and the last entry, with last_bytes to send from the last entry.
+ * Note that first_n could be 0.
+ */
+static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
+ u64 resize, u32 dbc_id, u32 head, u32 *ptail)
+{
+ struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
+ struct dbc_req *reqs = slice->reqs;
+ struct dbc_req *last_req;
+ u32 tail = *ptail;
+ u64 total_bytes;
+ u64 last_bytes;
+ u32 first_n;
+ u32 avail;
+ int ret;
+ int i;
+
+ avail = head - tail;
+ if (head <= tail)
+ avail += dbc->nelem;
+
+ --avail;
+
+ total_bytes = 0;
+ for (i = 0; i < slice->nents; i++) {
+ total_bytes += le32_to_cpu(reqs[i].len);
+ if (total_bytes >= resize)
+ break;
+ }
+
+ if (total_bytes < resize) {
+ /* User space should have used the full buffer path. */
+ ret = -EINVAL;
+ return ret;
+ }
+
+ first_n = i;
+ last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
+
+ if (avail < (first_n + 1))
+ return -EAGAIN;
+
+ if (first_n) {
+ if (tail + first_n > dbc->nelem) {
+ avail = dbc->nelem - tail;
+ avail = min_t(u32, avail, first_n);
+ memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
+ sizeof(*reqs) * avail);
+ last_req = reqs + avail;
+ avail = first_n - avail;
+ if (avail)
+ memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
+ } else {
+ memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
+ sizeof(*reqs) * first_n);
+ }
+ }
+
+ /* Copy over the last entry. Here we need to adjust len to the left over
+ * size, and set src and dst to the entry it is copied to.
+ */
+ last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
+ memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
+
+ /*
+ * last_bytes holds size of a DMA segment, maximum DMA segment size is
+ * set to UINT_MAX by qaic and hence last_bytes can never exceed u32
+ * range. So, by down sizing we are not corrupting the value.
+ */
+ last_req->len = cpu_to_le32((u32)last_bytes);
+ last_req->src_addr = reqs[first_n].src_addr;
+ last_req->dest_addr = reqs[first_n].dest_addr;
+
+ *ptail = (tail + first_n + 1) % dbc->nelem;
+
+ return 0;
+}
+
+static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
+ struct qaic_execute_entry *exec, unsigned int count,
+ bool is_partial, struct dma_bridge_chan *dbc, u32 head,
+ u32 *tail)
+{
+ struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
+ struct drm_gem_object *obj;
+ struct bo_slice *slice;
+ unsigned long flags;
+ struct qaic_bo *bo;
+ bool queued;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * ref count will be decremented when the transfer of this
+ * buffer is complete. It is inside dbc_irq_threaded_fn().
+ */
+ obj = drm_gem_object_lookup(file_priv,
+ is_partial ? pexec[i].handle : exec[i].handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto failed_to_send_bo;
+ }
+
+ bo = to_qaic_bo(obj);
+
+ if (!bo->sliced) {
+ ret = -EINVAL;
+ goto failed_to_send_bo;
+ }
+
+ if (is_partial && pexec[i].resize > bo->size) {
+ ret = -EINVAL;
+ goto failed_to_send_bo;
+ }
+
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ queued = bo->queued;
+ bo->queued = true;
+ if (queued) {
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ ret = -EINVAL;
+ goto failed_to_send_bo;
+ }
+
+ bo->req_id = dbc->next_req_id++;
+
+ list_for_each_entry(slice, &bo->slices, slice) {
+ /*
+ * If this slice does not fall under the given
+ * resize then skip this slice and continue the loop
+ */
+ if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
+ continue;
+
+ for (j = 0; j < slice->nents; j++)
+ slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
+
+ /*
+ * If it is a partial execute ioctl call then check if
+ * resize has cut this slice short then do a partial copy
+ * else do complete copy
+ */
+ if (is_partial && pexec[i].resize &&
+ pexec[i].resize < slice->offset + slice->size)
+ ret = copy_partial_exec_reqs(qdev, slice,
+ pexec[i].resize - slice->offset,
+ dbc->id, head, tail);
+ else
+ ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
+ if (ret) {
+ bo->queued = false;
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ goto failed_to_send_bo;
+ }
+ }
+ reinit_completion(&bo->xfer_done);
+ list_add_tail(&bo->xfer_list, &dbc->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
+ }
+
+ return 0;
+
+failed_to_send_bo:
+ if (likely(obj))
+ drm_gem_object_put(obj);
+ for (j = 0; j < i; j++) {
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
+ obj = &bo->base;
+ bo->queued = false;
+ list_del(&bo->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
+ drm_gem_object_put(obj);
+ }
+ return ret;
+}
+
+static void update_profiling_data(struct drm_file *file_priv,
+ struct qaic_execute_entry *exec, unsigned int count,
+ bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
+{
+ struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
+ struct drm_gem_object *obj;
+ struct qaic_bo *bo;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * Since we already committed the BO to hardware, the only way
+ * this should fail is a pending signal. We can't cancel the
+ * submit to hardware, so we have to just skip the profiling
+ * data. In case the signal is not fatal to the process, we
+ * return success so that the user doesn't try to resubmit.
+ */
+ obj = drm_gem_object_lookup(file_priv,
+ is_partial ? pexec[i].handle : exec[i].handle);
+ if (!obj)
+ break;
+ bo = to_qaic_bo(obj);
+ bo->perf_stats.req_received_ts = received_ts;
+ bo->perf_stats.req_submit_ts = submit_ts;
+ bo->perf_stats.queue_level_before = queue_level;
+ queue_level += bo->total_slice_nents;
+ drm_gem_object_put(obj);
+ }
+}
+
+static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
+ bool is_partial)
+{
+ struct qaic_partial_execute_entry *pexec;
+ struct qaic_execute *args = data;
+ struct qaic_execute_entry *exec;
+ struct dma_bridge_chan *dbc;
+ int usr_rcu_id, qdev_rcu_id;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ u8 __user *user_data;
+ unsigned long n;
+ u64 received_ts;
+ u32 queue_level;
+ u64 submit_ts;
+ int rcu_id;
+ u32 head;
+ u32 tail;
+ u64 size;
+ int ret;
+
+ received_ts = ktime_get_ns();
+
+ size = is_partial ? sizeof(*pexec) : sizeof(*exec);
+
+ n = (unsigned long)size * args->hdr.count;
+ if (args->hdr.count == 0 || n / args->hdr.count != size)
+ return -EINVAL;
+
+ user_data = u64_to_user_ptr(args->data);
+
+ exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
+ pexec = (struct qaic_partial_execute_entry *)exec;
+ if (!exec)
+ return -ENOMEM;
+
+ if (copy_from_user(exec, user_data, n)) {
+ ret = -EFAULT;
+ goto free_exec;
+ }
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->hdr.dbc_id >= qdev->num_dbc) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ dbc = &qdev->dbc[args->hdr.dbc_id];
+
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (!dbc->usr || dbc->usr->handle != usr->handle) {
+ ret = -EPERM;
+ goto release_ch_rcu;
+ }
+
+ head = readl(dbc->dbc_base + REQHP_OFF);
+ tail = readl(dbc->dbc_base + REQTP_OFF);
+
+ if (head == U32_MAX || tail == U32_MAX) {
+ /* PCI link error */
+ ret = -ENODEV;
+ goto release_ch_rcu;
+ }
+
+ queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
+
+ ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
+ head, &tail);
+ if (ret)
+ goto release_ch_rcu;
+
+ /* Finalize commit to hardware */
+ submit_ts = ktime_get_ns();
+ writel(tail, dbc->dbc_base + REQTP_OFF);
+
+ update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
+ submit_ts, queue_level);
+
+ if (datapath_polling)
+ schedule_work(&dbc->poll_work);
+
+release_ch_rcu:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+free_exec:
+ kfree(exec);
+ return ret;
+}
+
+int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ return __qaic_execute_bo_ioctl(dev, data, file_priv, false);
+}
+
+int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ return __qaic_execute_bo_ioctl(dev, data, file_priv, true);
+}
+
+/*
+ * Our interrupt handling is a bit more complicated than a simple ideal, but
+ * sadly necessary.
+ *
+ * Each dbc has a completion queue. Entries in the queue correspond to DMA
+ * requests which the device has processed. The hardware already has a built
+ * in irq mitigation. When the device puts an entry into the queue, it will
+ * only trigger an interrupt if the queue was empty. Therefore, when adding
+ * the Nth event to a non-empty queue, the hardware doesn't trigger an
+ * interrupt. This means the host doesn't get additional interrupts signaling
+ * the same thing - the queue has something to process.
+ * This behavior can be overridden in the DMA request.
+ * This means that when the host receives an interrupt, it is required to
+ * drain the queue.
+ *
+ * This behavior is what NAPI attempts to accomplish, although we can't use
+ * NAPI as we don't have a netdev. We use threaded irqs instead.
+ *
+ * However, there is a situation where the host drains the queue fast enough
+ * that every event causes an interrupt. Typically this is not a problem as
+ * the rate of events would be low. However, that is not the case with
+ * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
+ * lprnet, the host receives roughly 80k interrupts per second from the device
+ * (per /proc/interrupts). While NAPI documentation indicates the host should
+ * just chug along, sadly that behavior causes instability in some hosts.
+ *
+ * Therefore, we implement an interrupt disable scheme similar to NAPI. The
+ * key difference is that we will delay after draining the queue for a small
+ * time to allow additional events to come in via polling. Using the above
+ * lprnet workload, this reduces the number of interrupts processed from
+ * ~80k/sec to about 64 in 5 minutes and appears to solve the system
+ * instability.
+ */
+irqreturn_t dbc_irq_handler(int irq, void *data)
+{
+ struct dma_bridge_chan *dbc = data;
+ int rcu_id;
+ u32 head;
+ u32 tail;
+
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+
+ if (!dbc->usr) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_HANDLED;
+ }
+
+ head = readl(dbc->dbc_base + RSPHP_OFF);
+ if (head == U32_MAX) { /* PCI link error */
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_NONE;
+ }
+
+ tail = readl(dbc->dbc_base + RSPTP_OFF);
+ if (tail == U32_MAX) { /* PCI link error */
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_NONE;
+ }
+
+ if (head == tail) { /* queue empty */
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_NONE;
+ }
+
+ disable_irq_nosync(irq);
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_WAKE_THREAD;
+}
+
+void irq_polling_work(struct work_struct *work)
+{
+ struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
+ unsigned long flags;
+ int rcu_id;
+ u32 head;
+ u32 tail;
+
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+
+ while (1) {
+ if (dbc->qdev->in_reset) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+ if (!dbc->usr) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ if (list_empty(&dbc->xfer_list)) {
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+
+ head = readl(dbc->dbc_base + RSPHP_OFF);
+ if (head == U32_MAX) { /* PCI link error */
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+
+ tail = readl(dbc->dbc_base + RSPTP_OFF);
+ if (tail == U32_MAX) { /* PCI link error */
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+
+ if (head != tail) {
+ irq_wake_thread(dbc->irq, dbc);
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return;
+ }
+
+ cond_resched();
+ usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
+ }
+}
+
+irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
+{
+ struct dma_bridge_chan *dbc = data;
+ int event_count = NUM_EVENTS;
+ int delay_count = NUM_DELAYS;
+ struct qaic_device *qdev;
+ struct qaic_bo *bo, *i;
+ struct dbc_rsp *rsp;
+ unsigned long flags;
+ int rcu_id;
+ u16 status;
+ u16 req_id;
+ u32 head;
+ u32 tail;
+
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+
+ head = readl(dbc->dbc_base + RSPHP_OFF);
+ if (head == U32_MAX) /* PCI link error */
+ goto error_out;
+
+ qdev = dbc->qdev;
+read_fifo:
+
+ if (!event_count) {
+ event_count = NUM_EVENTS;
+ cond_resched();
+ }
+
+ /*
+ * if this channel isn't assigned or gets unassigned during processing
+ * we have nothing further to do
+ */
+ if (!dbc->usr)
+ goto error_out;
+
+ tail = readl(dbc->dbc_base + RSPTP_OFF);
+ if (tail == U32_MAX) /* PCI link error */
+ goto error_out;
+
+ if (head == tail) { /* queue empty */
+ if (delay_count) {
+ --delay_count;
+ usleep_range(100, 200);
+ goto read_fifo; /* check for a new event */
+ }
+ goto normal_out;
+ }
+
+ delay_count = NUM_DELAYS;
+ while (head != tail) {
+ if (!event_count)
+ break;
+ --event_count;
+ rsp = dbc->rsp_q_base + head * sizeof(*rsp);
+ req_id = le16_to_cpu(rsp->req_id);
+ status = le16_to_cpu(rsp->status);
+ if (status)
+ pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status);
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ /*
+ * A BO can receive multiple interrupts, since a BO can be
+ * divided into multiple slices and a buffer receives as many
+ * interrupts as slices. So until it receives interrupts for
+ * all the slices we cannot mark that buffer complete.
+ */
+ list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
+ if (bo->req_id == req_id)
+ bo->nr_slice_xfer_done++;
+ else
+ continue;
+
+ if (bo->nr_slice_xfer_done < bo->nr_slice)
+ break;
+
+ /*
+ * At this point we have received all the interrupts for
+ * BO, which means BO execution is complete.
+ */
+ dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
+ bo->nr_slice_xfer_done = 0;
+ bo->queued = false;
+ list_del(&bo->xfer_list);
+ bo->perf_stats.req_processed_ts = ktime_get_ns();
+ complete_all(&bo->xfer_done);
+ drm_gem_object_put(&bo->base);
+ break;
+ }
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ head = (head + 1) % dbc->nelem;
+ }
+
+ /*
+ * Update the head pointer of response queue and let the device know
+ * that we have consumed elements from the queue.
+ */
+ writel(head, dbc->dbc_base + RSPHP_OFF);
+
+ /* elements might have been put in the queue while we were processing */
+ goto read_fifo;
+
+normal_out:
+ if (likely(!datapath_polling))
+ enable_irq(irq);
+ else
+ schedule_work(&dbc->poll_work);
+ /* checking the fifo and enabling irqs is a race, missed event check */
+ tail = readl(dbc->dbc_base + RSPTP_OFF);
+ if (tail != U32_MAX && head != tail) {
+ if (likely(!datapath_polling))
+ disable_irq_nosync(irq);
+ goto read_fifo;
+ }
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ return IRQ_HANDLED;
+
+error_out:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ if (likely(!datapath_polling))
+ enable_irq(irq);
+ else
+ schedule_work(&dbc->poll_work);
+
+ return IRQ_HANDLED;
+}
+
+int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_wait *args = data;
+ int usr_rcu_id, qdev_rcu_id;
+ struct dma_bridge_chan *dbc;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ unsigned long timeout;
+ struct qaic_user *usr;
+ struct qaic_bo *bo;
+ int rcu_id;
+ int ret;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->pad != 0) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->dbc_id >= qdev->num_dbc) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ dbc = &qdev->dbc[args->dbc_id];
+
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (dbc->usr != usr) {
+ ret = -EPERM;
+ goto unlock_ch_srcu;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock_ch_srcu;
+ }
+
+ bo = to_qaic_bo(obj);
+ timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms;
+ timeout = msecs_to_jiffies(timeout);
+ ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto put_obj;
+ }
+ if (ret > 0)
+ ret = 0;
+
+ if (!dbc->usr)
+ ret = -EPERM;
+
+put_obj:
+ drm_gem_object_put(obj);
+unlock_ch_srcu:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_perf_stats_entry *ent = NULL;
+ struct qaic_perf_stats *args = data;
+ int usr_rcu_id, qdev_rcu_id;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ struct qaic_bo *bo;
+ int ret, i;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ if (args->hdr.dbc_id >= qdev->num_dbc) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
+ if (!ent) {
+ ret = -EINVAL;
+ goto unlock_dev_srcu;
+ }
+
+ ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_ent;
+ }
+
+ for (i = 0; i < args->hdr.count; i++) {
+ obj = drm_gem_object_lookup(file_priv, ent[i].handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto free_ent;
+ }
+ bo = to_qaic_bo(obj);
+ /*
+ * perf stats ioctl is called before wait ioctl is complete then
+ * the latency information is invalid.
+ */
+ if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) {
+ ent[i].device_latency_us = 0;
+ } else {
+ ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts -
+ bo->perf_stats.req_submit_ts), 1000);
+ }
+ ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts -
+ bo->perf_stats.req_received_ts), 1000);
+ ent[i].queue_level_before = bo->perf_stats.queue_level_before;
+ ent[i].num_queue_element = bo->total_slice_nents;
+ drm_gem_object_put(obj);
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent)))
+ ret = -EFAULT;
+
+free_ent:
+ kfree(ent);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
+static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
+{
+ unsigned long flags;
+ struct qaic_bo *bo;
+
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ while (!list_empty(&dbc->xfer_list)) {
+ bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
+ bo->queued = false;
+ list_del(&bo->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
+ complete_all(&bo->xfer_done);
+ drm_gem_object_put(&bo->base);
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ }
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+}
+
+int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
+{
+ if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
+ return -EPERM;
+
+ qdev->dbc[dbc_id].usr = NULL;
+ synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
+ return 0;
+}
+
+/**
+ * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
+ * user. Add user context back to DBC to enable it. This function trusts the
+ * DBC ID passed and expects the DBC to be disabled.
+ * @qdev: Qranium device handle
+ * @dbc_id: ID of the DBC
+ * @usr: User context
+ */
+void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
+{
+ qdev->dbc[dbc_id].usr = usr;
+}
+
+void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
+{
+ struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
+
+ dbc->usr = NULL;
+ empty_xfer_list(qdev, dbc);
+ synchronize_srcu(&dbc->ch_lock);
+}
+
+void release_dbc(struct qaic_device *qdev, u32 dbc_id)
+{
+ struct bo_slice *slice, *slice_temp;
+ struct qaic_bo *bo, *bo_temp;
+ struct dma_bridge_chan *dbc;
+
+ dbc = &qdev->dbc[dbc_id];
+ if (!dbc->in_use)
+ return;
+
+ wakeup_dbc(qdev, dbc_id);
+
+ dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
+ dbc->total_size = 0;
+ dbc->req_q_base = NULL;
+ dbc->dma_addr = 0;
+ dbc->nelem = 0;
+ dbc->usr = NULL;
+
+ list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
+ list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
+ kref_put(&slice->ref_count, free_slice);
+ bo->sliced = false;
+ INIT_LIST_HEAD(&bo->slices);
+ bo->total_slice_nents = 0;
+ bo->dir = 0;
+ bo->dbc = NULL;
+ bo->nr_slice = 0;
+ bo->nr_slice_xfer_done = 0;
+ bo->queued = false;
+ bo->req_id = 0;
+ init_completion(&bo->xfer_done);
+ complete_all(&bo->xfer_done);
+ list_del(&bo->bo_list);
+ bo->perf_stats.req_received_ts = 0;
+ bo->perf_stats.req_submit_ts = 0;
+ bo->perf_stats.req_processed_ts = 0;
+ bo->perf_stats.queue_level_before = 0;
+ }
+
+ dbc->in_use = false;
+ wake_up(&dbc->dbc_release);
+}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
new file mode 100644
index 000000000000..ff80eb571729
--- /dev/null
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <uapi/drm/qaic_accel.h>
+
+#include "mhi_controller.h"
+#include "qaic.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+#define PCI_DEV_AIC100 0xa100
+#define QAIC_NAME "qaic"
+#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
+#define CNTL_MAJOR 5
+#define CNTL_MINOR 0
+
+bool datapath_polling;
+module_param(datapath_polling, bool, 0400);
+MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
+static bool link_up;
+static DEFINE_IDA(qaic_usrs);
+
+static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
+static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
+
+static void free_usr(struct kref *kref)
+{
+ struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
+
+ cleanup_srcu_struct(&usr->qddev_lock);
+ ida_free(&qaic_usrs, usr->handle);
+ kfree(usr);
+}
+
+static int qaic_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct qaic_drm_device *qddev = dev->dev_private;
+ struct qaic_device *qdev = qddev->qdev;
+ struct qaic_user *usr;
+ int rcu_id;
+ int ret;
+
+ rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto dev_unlock;
+ }
+
+ usr = kmalloc(sizeof(*usr), GFP_KERNEL);
+ if (!usr) {
+ ret = -ENOMEM;
+ goto dev_unlock;
+ }
+
+ usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
+ if (usr->handle < 0) {
+ ret = usr->handle;
+ goto free_usr;
+ }
+ usr->qddev = qddev;
+ atomic_set(&usr->chunk_id, 0);
+ init_srcu_struct(&usr->qddev_lock);
+ kref_init(&usr->ref_count);
+
+ ret = mutex_lock_interruptible(&qddev->users_mutex);
+ if (ret)
+ goto cleanup_usr;
+
+ list_add(&usr->node, &qddev->users);
+ mutex_unlock(&qddev->users_mutex);
+
+ file->driver_priv = usr;
+
+ srcu_read_unlock(&qdev->dev_lock, rcu_id);
+ return 0;
+
+cleanup_usr:
+ cleanup_srcu_struct(&usr->qddev_lock);
+free_usr:
+ kfree(usr);
+dev_unlock:
+ srcu_read_unlock(&qdev->dev_lock, rcu_id);
+ return ret;
+}
+
+static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct qaic_user *usr = file->driver_priv;
+ struct qaic_drm_device *qddev;
+ struct qaic_device *qdev;
+ int qdev_rcu_id;
+ int usr_rcu_id;
+ int i;
+
+ qddev = usr->qddev;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (qddev) {
+ qdev = qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (!qdev->in_reset) {
+ qaic_release_usr(qdev, usr);
+ for (i = 0; i < qdev->num_dbc; ++i)
+ if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
+ release_dbc(qdev, i);
+ }
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+
+ mutex_lock(&qddev->users_mutex);
+ if (!list_empty(&usr->node))
+ list_del_init(&usr->node);
+ mutex_unlock(&qddev->users_mutex);
+ }
+
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ kref_put(&usr->ref_count, free_usr);
+
+ file->driver_priv = NULL;
+}
+
+DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
+
+static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
+};
+
+static const struct drm_driver qaic_accel_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
+
+ .name = QAIC_NAME,
+ .desc = QAIC_DESC,
+ .date = "20190618",
+
+ .fops = &qaic_accel_fops,
+ .open = qaic_open,
+ .postclose = qaic_postclose,
+
+ .ioctls = qaic_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = qaic_gem_prime_import,
+};
+
+static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
+{
+ struct qaic_drm_device *qddev;
+ struct drm_device *ddev;
+ struct device *pdev;
+ int ret;
+
+ /* Hold off implementing partitions until the uapi is determined */
+ if (partition_id != QAIC_NO_PARTITION)
+ return -EINVAL;
+
+ pdev = &qdev->pdev->dev;
+
+ qddev = kzalloc(sizeof(*qddev), GFP_KERNEL);
+ if (!qddev)
+ return -ENOMEM;
+
+ ddev = drm_dev_alloc(&qaic_accel_driver, pdev);
+ if (IS_ERR(ddev)) {
+ ret = PTR_ERR(ddev);
+ goto ddev_fail;
+ }
+
+ ddev->dev_private = qddev;
+ qddev->ddev = ddev;
+
+ qddev->qdev = qdev;
+ qddev->partition_id = partition_id;
+ INIT_LIST_HEAD(&qddev->users);
+ mutex_init(&qddev->users_mutex);
+
+ qdev->qddev = qddev;
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret) {
+ pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret);
+ goto drm_reg_fail;
+ }
+
+ return 0;
+
+drm_reg_fail:
+ mutex_destroy(&qddev->users_mutex);
+ qdev->qddev = NULL;
+ drm_dev_put(ddev);
+ddev_fail:
+ kfree(qddev);
+ return ret;
+}
+
+static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
+{
+ struct qaic_drm_device *qddev;
+ struct qaic_user *usr;
+
+ qddev = qdev->qddev;
+
+ /*
+ * Existing users get unresolvable errors till they close FDs.
+ * Need to sync carefully with users calling close(). The
+ * list of users can be modified elsewhere when the lock isn't
+ * held here, but the sync'ing the srcu with the mutex held
+ * could deadlock. Grab the mutex so that the list will be
+ * unmodified. The user we get will exist as long as the
+ * lock is held. Signal that the qcdev is going away, and
+ * grab a reference to the user so they don't go away for
+ * synchronize_srcu(). Then release the mutex to avoid
+ * deadlock and make sure the user has observed the signal.
+ * With the lock released, we cannot maintain any state of the
+ * user list.
+ */
+ mutex_lock(&qddev->users_mutex);
+ while (!list_empty(&qddev->users)) {
+ usr = list_first_entry(&qddev->users, struct qaic_user, node);
+ list_del_init(&usr->node);
+ kref_get(&usr->ref_count);
+ usr->qddev = NULL;
+ mutex_unlock(&qddev->users_mutex);
+ synchronize_srcu(&usr->qddev_lock);
+ kref_put(&usr->ref_count, free_usr);
+ mutex_lock(&qddev->users_mutex);
+ }
+ mutex_unlock(&qddev->users_mutex);
+
+ if (qddev->ddev) {
+ drm_dev_unregister(qddev->ddev);
+ drm_dev_put(qddev->ddev);
+ }
+
+ kfree(qddev);
+}
+
+static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev;
+ u16 major, minor;
+ int ret;
+
+ /*
+ * Invoking this function indicates that the control channel to the
+ * device is available. We use that as a signal to indicate that
+ * the device side firmware has booted. The device side firmware
+ * manages the device resources, so we need to communicate with it
+ * via the control channel in order to utilize the device. Therefore
+ * we wait until this signal to create the drm dev that userspace will
+ * use to control the device, because without the device side firmware,
+ * userspace can't do anything useful.
+ */
+
+ qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+
+ qdev->in_reset = false;
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->cntl_ch = mhi_dev;
+
+ ret = qaic_control_open(qdev);
+ if (ret) {
+ pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = get_cntl_version(qdev, NULL, &major, &minor);
+ if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
+ pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
+ __func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
+ ret = -EINVAL;
+ goto close_control;
+ }
+
+ ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+
+ return ret;
+
+close_control:
+ qaic_control_close(qdev);
+ return ret;
+}
+
+static void qaic_mhi_remove(struct mhi_device *mhi_dev)
+{
+/* This is redundant since we have already observed the device crash */
+}
+
+static void qaic_notify_reset(struct qaic_device *qdev)
+{
+ int i;
+
+ qdev->in_reset = true;
+ /* wake up any waiters to avoid waiting for timeouts at sync */
+ wake_all_cntl(qdev);
+ for (i = 0; i < qdev->num_dbc; ++i)
+ wakeup_dbc(qdev, i);
+ synchronize_srcu(&qdev->dev_lock);
+}
+
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+{
+ int i;
+
+ qaic_notify_reset(qdev);
+
+ /* remove drmdevs to prevent new users from coming in */
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
+
+ /* start tearing things down */
+ for (i = 0; i < qdev->num_dbc; ++i)
+ release_dbc(qdev, i);
+
+ if (exit_reset)
+ qdev->in_reset = false;
+}
+
+static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct qaic_device *qdev;
+ int i;
+
+ qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return NULL;
+
+ if (id->device == PCI_DEV_AIC100) {
+ qdev->num_dbc = 16;
+ qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ if (!qdev->dbc)
+ return NULL;
+ }
+
+ qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
+ if (!qdev->cntl_wq)
+ return NULL;
+
+ pci_set_drvdata(pdev, qdev);
+ qdev->pdev = pdev;
+
+ mutex_init(&qdev->cntl_mutex);
+ INIT_LIST_HEAD(&qdev->cntl_xfer_list);
+ init_srcu_struct(&qdev->dev_lock);
+
+ for (i = 0; i < qdev->num_dbc; ++i) {
+ spin_lock_init(&qdev->dbc[i].xfer_lock);
+ qdev->dbc[i].qdev = qdev;
+ qdev->dbc[i].id = i;
+ INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
+ init_srcu_struct(&qdev->dbc[i].ch_lock);
+ init_waitqueue_head(&qdev->dbc[i].dbc_release);
+ INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+ }
+
+ return qdev;
+}
+
+static void cleanup_qdev(struct qaic_device *qdev)
+{
+ int i;
+
+ for (i = 0; i < qdev->num_dbc; ++i)
+ cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
+ cleanup_srcu_struct(&qdev->dev_lock);
+ pci_set_drvdata(qdev->pdev, NULL);
+ destroy_workqueue(qdev->cntl_wq);
+}
+
+static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+{
+ int bars;
+ int ret;
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ /* make sure the device has the expected BARs */
+ if (bars != (BIT(0) | BIT(2) | BIT(4))) {
+ pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
+ __func__, bars);
+ return -EINVAL;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+ ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ if (ret)
+ return ret;
+
+ qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
+ if (IS_ERR(qdev->bar_0))
+ return PTR_ERR(qdev->bar_0);
+
+ qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
+ if (IS_ERR(qdev->bar_2))
+ return PTR_ERR(qdev->bar_2);
+
+ /* Managed release since we use pcim_enable_device above */
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
+{
+ int mhi_irq;
+ int ret;
+ int i;
+
+ /* Managed release since we use pcim_enable_device */
+ ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (ret < 0)
+ return ret;
+
+ if (ret < 32) {
+ pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+
+ mhi_irq = pci_irq_vector(pdev, 0);
+ if (mhi_irq < 0)
+ return mhi_irq;
+
+ for (i = 0; i < qdev->num_dbc; ++i) {
+ ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
+ dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
+ "qaic_dbc", &qdev->dbc[i]);
+ if (ret)
+ return ret;
+
+ if (datapath_polling) {
+ qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
+ disable_irq_nosync(qdev->dbc[i].irq);
+ INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
+ }
+ }
+
+ return mhi_irq;
+}
+
+static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct qaic_device *qdev;
+ int mhi_irq;
+ int ret;
+ int i;
+
+ qdev = create_qdev(pdev, id);
+ if (!qdev)
+ return -ENOMEM;
+
+ ret = init_pci(qdev, pdev);
+ if (ret)
+ goto cleanup_qdev;
+
+ for (i = 0; i < qdev->num_dbc; ++i)
+ qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
+
+ mhi_irq = init_msi(qdev, pdev);
+ if (mhi_irq < 0) {
+ ret = mhi_irq;
+ goto cleanup_qdev;
+ }
+
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
+ if (IS_ERR(qdev->mhi_cntrl)) {
+ ret = PTR_ERR(qdev->mhi_cntrl);
+ goto cleanup_qdev;
+ }
+
+ return 0;
+
+cleanup_qdev:
+ cleanup_qdev(qdev);
+ return ret;
+}
+
+static void qaic_pci_remove(struct pci_dev *pdev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(pdev);
+
+ if (!qdev)
+ return;
+
+ qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
+ cleanup_qdev(qdev);
+}
+
+static void qaic_pci_shutdown(struct pci_dev *pdev)
+{
+ /* see qaic_exit for what link_up is doing */
+ link_up = true;
+ qaic_pci_remove(pdev);
+}
+
+static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
+{
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static void qaic_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(pdev);
+
+ qaic_notify_reset(qdev);
+ qaic_mhi_start_reset(qdev->mhi_cntrl);
+ qaic_dev_reset_clean_local_state(qdev, false);
+}
+
+static void qaic_pci_reset_done(struct pci_dev *pdev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(pdev);
+
+ qdev->in_reset = false;
+ qaic_mhi_reset_done(qdev->mhi_cntrl);
+}
+
+static const struct mhi_device_id qaic_mhi_match_table[] = {
+ { .chan = "QAIC_CONTROL", },
+ {},
+};
+
+static struct mhi_driver qaic_mhi_driver = {
+ .id_table = qaic_mhi_match_table,
+ .remove = qaic_mhi_remove,
+ .probe = qaic_mhi_probe,
+ .ul_xfer_cb = qaic_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_mhi",
+ },
+};
+
+static const struct pci_device_id qaic_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, qaic_ids);
+
+static const struct pci_error_handlers qaic_pci_err_handler = {
+ .error_detected = qaic_pci_error_detected,
+ .reset_prepare = qaic_pci_reset_prepare,
+ .reset_done = qaic_pci_reset_done,
+};
+
+static struct pci_driver qaic_pci_driver = {
+ .name = QAIC_NAME,
+ .id_table = qaic_ids,
+ .probe = qaic_pci_probe,
+ .remove = qaic_pci_remove,
+ .shutdown = qaic_pci_shutdown,
+ .err_handler = &qaic_pci_err_handler,
+};
+
+static int __init qaic_init(void)
+{
+ int ret;
+
+ ret = mhi_driver_register(&qaic_mhi_driver);
+ if (ret) {
+ pr_debug("qaic: mhi_driver_register failed %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&qaic_pci_driver);
+ if (ret) {
+ pr_debug("qaic: pci_register_driver failed %d\n", ret);
+ goto free_mhi;
+ }
+
+ return 0;
+
+free_mhi:
+ mhi_driver_unregister(&qaic_mhi_driver);
+ return ret;
+}
+
+static void __exit qaic_exit(void)
+{
+ /*
+ * We assume that qaic_pci_remove() is called due to a hotplug event
+ * which would mean that the link is down, and thus
+ * qaic_mhi_free_controller() should not try to access the device during
+ * cleanup.
+ * We call pci_unregister_driver() below, which also triggers
+ * qaic_pci_remove(), but since this is module exit, we expect the link
+ * to the device to be up, in which case qaic_mhi_free_controller()
+ * should try to access the device during cleanup to put the device in
+ * a sane state.
+ * For that reason, we set link_up here to let qaic_mhi_free_controller
+ * know the expected link state. Since the module is going to be
+ * removed at the end of this, we don't need to worry about
+ * reinitializing the link_up state after the cleanup is done.
+ */
+ link_up = true;
+ pci_unregister_driver(&qaic_pci_driver);
+ mhi_driver_unregister(&qaic_mhi_driver);
+}
+
+module_init(qaic_init);
+module_exit(qaic_exit);
+
+MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
+MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index c4d54a5326b1..06b43b678d6e 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -24,7 +24,6 @@
MODULE_AUTHOR("samuel.thibault@ens-lyon.org");
MODULE_DESCRIPTION("braille device");
-MODULE_LICENSE("GPL");
/*
* Braille device support part.
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 4733fd6334ab..56c073103cbb 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -2490,7 +2490,7 @@ MODULE_PARM_DESC(punc_level, "Controls the level of punctuation spoken as the sc
MODULE_PARM_DESC(reading_punc, "It controls the level of punctuation when reviewing the screen with speakup's screen review commands.");
MODULE_PARM_DESC(cursor_time, "This controls cursor delay when using arrow keys.");
MODULE_PARM_DESC(say_control, "This controls if speakup speaks shift, alt and control when those keys are pressed or not.");
-MODULE_PARM_DESC(say_word_ctl, "Sets thw say_word_ctl on load.");
+MODULE_PARM_DESC(say_word_ctl, "Sets the say_word_ctl on load.");
MODULE_PARM_DESC(no_interrupt, "Controls if typing interrupts output from speakup.");
MODULE_PARM_DESC(key_echo, "Controls if speakup speaks keys when they are typed. One = on zero = off or don't echo keys.");
MODULE_PARM_DESC(cur_phonetic, "Controls if speakup speaks letters phonetically during navigation. One = on zero = off or don't speak phonetically.");
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 08cf8a17754b..07373b3debd1 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
+ if (tty == NULL)
+ return;
+
tty_lock(tty);
if (tty->ops->close)
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 3bbe2276cac7..80f945cbec8a 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -83,6 +83,8 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
GFP_KERNEL);
+ if (!clk_data->name)
+ return -ENOMEM;
strcpy(clk_data->name, obj->string.pointer);
} else {
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 50540d4d4948..c5598b6d5db8 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <asm/msr.h>
#include <asm/tsc.h>
+#include "internal.h"
struct lpit_residency_info {
struct acpi_generic_address gaddr;
@@ -97,6 +98,12 @@ EXPORT_SYMBOL_GPL(lpit_read_residency_count_address);
static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native)
{
+ struct device *dev_root = bus_get_dev_root(&cpu_subsys);
+
+ /* Silently fail, if cpuidle attribute group is not present */
+ if (!dev_root)
+ return;
+
info->frequency = lpit_native->counter_frequency ?
lpit_native->counter_frequency : tsc_khz * 1000;
if (!info->frequency)
@@ -107,18 +114,18 @@ static void lpit_update_residency(struct lpit_residency_info *info,
info->iomem_addr = ioremap(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
- return;
+ goto exit;
- /* Silently fail, if cpuidle attribute group is not present */
- sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ sysfs_add_file_to_group(&dev_root->kobj,
&dev_attr_low_power_idle_system_residency_us.attr,
"cpuidle");
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- /* Silently fail, if cpuidle attribute group is not present */
- sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ sysfs_add_file_to_group(&dev_root->kobj,
&dev_attr_low_power_idle_cpu_residency_us.attr,
"cpuidle");
}
+exit:
+ put_device(dev_root);
}
static void lpit_process(u64 begin, u64 end)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f08ffa75f4a7..77186f084d3a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -271,6 +271,12 @@ static const struct lpss_device_desc bsw_pwm_dev_desc = {
.resume_from_noirq = true,
};
+static const struct lpss_device_desc bsw_pwm2_dev_desc = {
+ .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
+ .prv_offset = 0x800,
+ .resume_from_noirq = true,
+};
+
static const struct lpss_device_desc byt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.clk_con_id = "baudclk",
@@ -368,6 +374,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
/* Braswell LPSS devices */
{ "80862286", LPSS_ADDR(lpss_dma_desc) },
{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+ { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) },
{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
{ "808622C0", LPSS_ADDR(lpss_dma_desc) },
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index ffdcfcd4a10d..01abf26764b0 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -348,10 +348,22 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
return false;
}
+/*
+ * If one of the device IDs below is present in the list of device IDs of a
+ * given ACPI device object, the PNP scan handler will not attach to that
+ * object, because there is a proper non-PNP driver in the kernel for the
+ * device represented by it.
+ */
+static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INTC1080"},
+ {"INTC1081"},
+ {""},
+};
+
static int acpi_pnp_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
- return 1;
+ return !!acpi_match_device_ids(adev, acpi_nonpnp_device_ids);
}
static struct acpi_scan_handler acpi_pnp_handler = {
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 6737b1cbf6d6..f9aa02cac6d1 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <acpi/processor.h>
@@ -148,6 +149,34 @@ static int acpi_processor_errata(void)
return result;
}
+/* Create a platform device to represent a CPU frequency control mechanism. */
+static void cpufreq_add_device(const char *name)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(pdev))
+ pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev));
+}
+
+#ifdef CONFIG_X86
+/* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */
+static void __init acpi_pcc_cpufreq_init(void)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (acpi_has_method(handle, "PCCH"))
+ cpufreq_add_device("pcc-cpufreq");
+}
+#else
+static void __init acpi_pcc_cpufreq_init(void) {}
+#endif /* CONFIG_X86 */
+
/* Initialization */
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int __weak acpi_map_cpu(acpi_handle handle,
@@ -280,14 +309,22 @@ static int acpi_processor_get_info(struct acpi_device *device)
dev_dbg(&device->dev, "Failed to get CPU physical ID.\n");
pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
- if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
+ if (!cpu0_initialized) {
cpu0_initialized = 1;
/*
* Handle UP system running SMP kernel, with no CPU
* entry in MADT
*/
- if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
+ if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) &&
+ (num_online_cpus() == 1))
pr->id = 0;
+ /*
+ * Check availability of Processor Performance Control by
+ * looking at the presence of the _PCT object under the first
+ * processor definition.
+ */
+ if (acpi_has_method(pr->handle, "_PCT"))
+ cpufreq_add_device("acpi-cpufreq");
}
/*
@@ -686,6 +723,7 @@ void __init acpi_processor_init(void)
acpi_processor_check_duplicates();
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
+ acpi_pcc_cpufreq_init();
}
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 30d8fd03fec7..62f4364e4460 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -70,16 +70,6 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
-/*
- * Display probing is known to take up to 5 seconds, so delay the fallback
- * backlight registration by 5 seconds + 3 seconds for some extra margin.
- */
-static int register_backlight_delay = 8;
-module_param(register_backlight_delay, int, 0444);
-MODULE_PARM_DESC(register_backlight_delay,
- "Delay in seconds before doing fallback (non GPU driver triggered) "
- "backlight registration, set to 0 to disable.");
-
static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -88,9 +78,6 @@ static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device);
static void acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
-static void acpi_video_bus_register_backlight_work(struct work_struct *ignored);
-static DECLARE_DELAYED_WORK(video_bus_register_backlight_work,
- acpi_video_bus_register_backlight_work);
/*
* Indices in the _BCL method response: the first two items are special,
@@ -1988,6 +1975,7 @@ static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
+ bool auto_detect;
int error;
acpi_status status;
@@ -2049,10 +2037,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_unlock(&video_list_lock);
/*
- * The userspace visible backlight_device gets registered separately
- * from acpi_video_register_backlight().
+ * If backlight-type auto-detection is used then a native backlight may
+ * show up later and this may change the result from video to native.
+ * Therefor normally the userspace visible /sys/class/backlight device
+ * gets registered separately by the GPU driver calling
+ * acpi_video_register_backlight() when an internal panel is detected.
+ * Register the backlight now when not using auto-detection, so that
+ * when the kernel cmdline or DMI-quirks are used the backlight will
+ * get registered even if acpi_video_register_backlight() is not called.
*/
acpi_video_run_bcl_for_osi(video);
+ if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
+ !auto_detect)
+ acpi_video_bus_register_backlight(video);
+
acpi_video_bus_add_notify_handler(video);
return 0;
@@ -2089,11 +2087,6 @@ static void acpi_video_bus_remove(struct acpi_device *device)
kfree(video);
}
-static void acpi_video_bus_register_backlight_work(struct work_struct *ignored)
-{
- acpi_video_register_backlight();
-}
-
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
@@ -2204,18 +2197,6 @@ int acpi_video_register(void)
*/
register_count = 1;
- /*
- * acpi_video_bus_add() skips registering the userspace visible
- * backlight_device. The intend is for this to be registered by the
- * drm/kms driver calling acpi_video_register_backlight() *after* it is
- * done setting up its own native backlight device. The delayed work
- * ensures that acpi_video_register_backlight() always gets called
- * eventually, in case there is no drm/kms driver or it is disabled.
- */
- if (register_backlight_delay)
- schedule_delayed_work(&video_bus_register_backlight_work,
- register_backlight_delay * HZ);
-
leave:
mutex_unlock(&register_count_mutex);
return ret;
@@ -2226,7 +2207,6 @@ void acpi_video_unregister(void)
{
mutex_lock(&register_count_mutex);
if (register_count) {
- cancel_delayed_work_sync(&video_bus_register_backlight_work);
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
may_report_brightness_keys = false;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 9e0d95d76fff..30f3fc13c29d 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -3,7 +3,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
+ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 0a50b4912515..9d4cbd956627 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index bb329e34ee7d..4536dc9d3979 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index 476d21e67767..c6ba6a36cfb5 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index d629716aa5b2..22f1f7a9e5a3 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index fe2c3630a38d..73eecbf62f06 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 922f559a3e59..ddd072cbc738 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 777457a58340..778241173ed4 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6f2787506b50..ebf8fd373cf7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6bdf133a2767..955114c926bd 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 901b1543b869..12d4a024f029 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -1122,7 +1122,8 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP 0x90
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x92
+#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 2f3e609df47d..de83dd22292b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 7b27b9cc5916..9448bc026b9b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 6af5dc995085..1bdfeee5d7c5 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index a224926bd9c8..da96d80e6b3a 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 4511c2bd8bc3..6dad786a382c 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index f7d65a20026b..e64aabe3d33a 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f7749c63d277..d772ff9ca07d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -306,6 +306,7 @@ extern struct acpi_rsconvert_info acpi_rs_convert_pin_config[];
extern struct acpi_rsconvert_info acpi_rs_convert_pin_group[];
extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[];
extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[];
+extern struct acpi_rsconvert_info acpi_rs_convert_clock_input[];
/* These resources require separate get/set tables */
@@ -361,6 +362,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_pin_config[];
extern struct acpi_rsdump_info acpi_rs_dump_pin_group[];
extern struct acpi_rsdump_info acpi_rs_dump_pin_group_function[];
extern struct acpi_rsdump_info acpi_rs_dump_pin_group_config[];
+extern struct acpi_rsdump_info acpi_rs_dump_clock_input[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index b859de96a1e4..f8fee94ba708 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1c29325e4c7f..b6ae979b01b6 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 71175b664f49..edfdbbef81c1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -53,6 +53,8 @@ extern const char *acpi_gbl_sb_decode[];
extern const char *acpi_gbl_fc_decode[];
extern const char *acpi_gbl_pt_decode[];
extern const char *acpi_gbl_ptyp_decode[];
+extern const char *acpi_gbl_clock_input_mode[];
+extern const char *acpi_gbl_clock_input_scale[];
#endif
/*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 62a7ec277513..effe52b40dce 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index b31779ce204a..4e88f9fc2a28 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -70,6 +70,8 @@
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8And16(1), 16(2) */
#define ACPI_RESTAG_VENDORDATA "_VEN"
+#define ACPI_RESTAG_FQN "_FQN"
+#define ACPI_RESTAG_FQD "_FQD"
/* Default sizes for "small" resource descriptors */
@@ -259,7 +261,10 @@ struct aml_resource_address16 {
struct aml_resource_extended_irq {
AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
u8 interrupt_count;
- u32 interrupts[1];
+ union {
+ u32 interrupt;
+ ACPI_FLEX_ARRAY(u32, interrupts);
+ };
/* res_source_index, res_source optional fields follow */
};
@@ -427,6 +432,20 @@ struct aml_resource_pin_config {
*/
};
+#define AML_RESOURCE_CLOCK_INPUT_REVISION 1 /* ACPI 6.5 */
+
+struct aml_resource_clock_input {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+ u16 flags;
+ u16 frequency_divisor;
+ u32 frequency_numerator;
+ /*
+ * Optional fields follow immediately:
+ * 1) Resource Source index
+ * 2) Resource Source String
+ */
+};
+
#define AML_RESOURCE_PIN_CONFIG_REVISION 1 /* ACPI 6.2 */
struct aml_resource_pin_group {
@@ -533,6 +552,7 @@ union aml_resource {
struct aml_resource_pin_group pin_group;
struct aml_resource_pin_group_function pin_group_function;
struct aml_resource_pin_group_config pin_group_config;
+ struct aml_resource_clock_input clock_input;
/* Utility overlays */
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 105e6ceaa887..e874c1dddefa 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 3615e1a6efd8..b91155ea9c34 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -652,6 +652,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
object_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));
+ if (!object_info)
+ return (AE_NO_MEMORY);
+
/* Walk the namespace from the root */
(void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 2963d1579c05..4354c175e12e 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 8492619149d1..80c69af06948 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 2d99ccf5bde7..c5c8380a3114 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index de175f1b4beb..532401ecdab0 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index dffd54fdbd51..6e0e362e461f 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 9332bc688713..e809c2aed78a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index e3dfc734ace9..555f148d666b 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 2b9b6a974ca9..dd3059000885 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 1624d6e7dc46..ecf793fe9919 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index b082eb942a0f..a43336f05206 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 9f6573646ab5..f7b8496c8bdd 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 778df616aaa0..541235f498c2 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 634b9100f674..1fdd07ae862c 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 0aa735d3b93c..d3841ded3a81 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
- walk_state->parser_state.aml_start = aml_start;
- walk_state->parser_state.aml_end =
- walk_state->parser_state.pkg_end = aml_start + aml_length;
+ walk_state->parser_state.aml_start =
+ walk_state->parser_state.aml_end =
+ walk_state->parser_state.pkg_end = aml_start;
+ /* Avoid undefined behavior: applying zero offset to null pointer */
+ if (aml_length != 0) {
+ walk_state->parser_state.aml_end += aml_length;
+ walk_state->parser_state.pkg_end += aml_length;
+ }
/* The next_op of the next_walk will be the beginning of the method */
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 82d1728b9bc6..9e78c5b9ad52 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -142,9 +142,6 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
status =
acpi_write_bit_register(acpi_gbl_fixed_event_info
[i].enable_register_id,
- (i ==
- ACPI_EVENT_PCIE_WAKE) ?
- ACPI_ENABLE_EVENT :
ACPI_DISABLE_EVENT);
if (ACPI_FAILURE(status)) {
return (status);
@@ -188,11 +185,6 @@ u32 acpi_ev_fixed_event_detect(void)
return (int_status);
}
- if (fixed_enable & ACPI_BITMASK_PCIEXP_WAKE_DISABLE)
- fixed_enable &= ~ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
- else
- fixed_enable |= ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
-
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
"Fixed Event Block: Enable %08X Status %08X\n",
fixed_enable, fixed_status));
@@ -258,9 +250,6 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
if (!acpi_gbl_fixed_event_handlers[event].handler) {
(void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
enable_register_id,
- (event ==
- ACPI_EVENT_PCIE_WAKE) ?
- ACPI_ENABLE_EVENT :
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 9aab54797ded..989dc01af03f 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index a6bb480d631c..934b201d3820 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 39fe4566310b..58e1890ab25b 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 2f1a75fee61c..0dbc4d88919a 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index c32eb57aa21d..ee3b1ea656d4 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index be9a05498adc..1c8cb6d924df 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 6172cddc1b39..e68e876d3b84 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index d035092799eb..18fdf2bc2d49 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index ca4ba6b351fe..46d1b3f5582d 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -430,7 +430,7 @@ acpi_ev_data_table_region_setup(acpi_handle handle,
{
union acpi_operand_object *region_desc =
(union acpi_operand_object *)handle;
- struct acpi_data_table_space_context *local_region_context;
+ struct acpi_data_table_mapping *local_region_context;
ACPI_FUNCTION_TRACE(ev_data_table_region_setup);
@@ -445,7 +445,7 @@ acpi_ev_data_table_region_setup(acpi_handle handle,
/* Create a new context */
local_region_context =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_space_context));
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_mapping));
if (!(local_region_context)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 18219abba108..24fa6433d562 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 8187b081e0a6..48bf845191d2 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 340947e412bb..4eeeb3b7ab7e 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index a5c19f46ec17..3197e6303c5b 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 66201742f499..2fb78b35565b 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index e82faabdf907..473115309860 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 8de5d47ad485..3729bf3b74f7 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index fb2453fa9442..1bea9d97652c 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 8a99aadb9d15..3f86bfada510 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 24b3d041b3e5..2e2da8790224 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 657f4002f9dc..61ff36189ace 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index d769cea1468b..cf6c812a8b6d 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index b4bac8c60a13..c6f2a9166ac0 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index e9dcfa1e93eb..65c487facdda 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 318eb769058d..9a448165bfeb 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index d108a1a86f12..20fb34b68bee 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index ebf7c89d52d9..743c258bf2e8 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 4b069bd6bc71..d3091f619909 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 2a506ef386cf..1af35e143ba9 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 08f06797386a..08196fa17080 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 4ff35852c0b3..8907b8bf4267 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -509,12 +509,12 @@ acpi_ex_data_table_space_handler(u32 function,
u64 *value,
void *handler_context, void *region_context)
{
- struct acpi_data_table_space_context *mapping;
+ struct acpi_data_table_mapping *mapping;
char *pointer;
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
- mapping = (struct acpi_data_table_space_context *) region_context;
+ mapping = (struct acpi_data_table_mapping *) region_context;
pointer = ACPI_CAST_PTR(char, mapping->pointer) +
(address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index b81506d73447..873de01b8ad2 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 61ee7fb46006..24a78b5e266c 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3342780230af..3a437e6ace5c 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index fd63f2042514..5d99b1a76c83 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index f99e8cf27a6c..575c7a39f1aa 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index c848b328e760..b01ae015e1b5 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 45c757bbf9a9..37c3131a82fa 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 7b5470f404f3..f665ffd9a396 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index b570d7a7e134..f1730221ff13 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 87f01ce1c1aa..f4d4a033f166 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 2f1c2fc8bd2a..790f342dcd25 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index d8597e052912..a9ba9190408b 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 13d54a48e6e9..e0c847ab8324 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 37b3f641feaa..e0921f08b71a 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -311,20 +311,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
[ACPI_EVENT_SLEEP_BUTTON].
status_register_id, ACPI_CLEAR_STATUS);
- /* Enable pcie wake event if support */
- if ((acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE)) {
- (void)
- acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_PCIE_WAKE].
- enable_register_id,
- ACPI_DISABLE_EVENT);
- (void)
- acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_PCIE_WAKE].
- status_register_id,
- ACPI_CLEAR_STATUS);
- }
-
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 46f3ae03ab99..192c04b5a599 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 915b26448d2c..b8de458f0368 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some
- * ports are only illegal if the BIOS calls _OSI with a win_XP string or
- * later (meaning that the BIOS itelf is post-XP.)
+ * ports are only illegal if the BIOS calls _OSI with nothing newer than
+ * the specific _OSI strings.
*
* This provides ACPICA with the desired port protections and
* Microsoft compatibility.
@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
- if (acpi_gbl_osi_data >= port_info->osi_dependency) {
+ if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
+ acpi_gbl_osi_data == port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 55d9b897e70f..c31f803995c6 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index aff51ceea02c..36ea48f64110 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 22586b90e532..3efb46f0dc54 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index b02555fe38f0..7e5a683ae957 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index f154824d4eb6..90a26cb0c472 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index b9a88b7b518b..fa116ebe49a3 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 3e6207ad18d8..86d126fdb27d 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 880260a30c0c..fcb9de0f77a2 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 4b893676ab5c..31e551cf4ea6 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c0db6690bb32..cf57bd69616d 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 82932c9a774b..dd37fc108fce 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 367fcd201f96..b8657004190d 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
- if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ if (expected_btypes) {
+ if (!(expected_btypes & ACPI_RTYPE_NONE) &&
+ package_index != ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
if (ACPI_SUCCESS(status)) {
return (AE_OK); /* Repair was successful */
}
- } else {
+ }
+
+ if (expected_btypes != ACPI_RTYPE_NONE) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
"Missing expected return value"));
+ return (AE_AML_NO_RETURN_VALUE);
}
-
- return (AE_AML_NO_RETURN_VALUE);
}
}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index dd533c887e3a..1bb7b71f07f1 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -499,7 +499,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
char *source;
char *dest;
- ACPI_FUNCTION_NAME(ns_repair_HID);
+ ACPI_FUNCTION_TRACE(ns_repair_HID);
/* We only care about string _HID objects (not integers) */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index ef531b145add..06ffdb6808f5 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 82a0dae349e2..eee396a77bae 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b2cfdfef3194..5d5bcf165298 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -44,7 +44,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname, acpi_handle *ret_handle)
+ const char *pathname, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index f7ec5606098c..422c074ed289 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 840512fa9fc6..d0fd55636129 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index bca249e67c6b..54471083ba54 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bef69e87a0a2..09029fe545f1 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index f10afe699ad7..bccf606e08b4 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index ba93f359760a..10a072953d78 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 400f001631ea..a0035bde7556 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 3012a9342367..7f7f5ecd4011 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 49b39aeded12..d550c4af4702 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 7735a01dab90..d92817c72b8d 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index a6509aeb2955..6f4eace0ba69 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 5737c3af1902..fff48001d7ef 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,12 +272,17 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
+ struct aml_resource_address address;
+
ACPI_FUNCTION_ENTRY();
+ /* Avoid undefined behavior: member access within misaligned address */
+
+ memcpy(&address, aml, sizeof(address));
+
/* Validate the Resource Type */
- if ((aml->address.resource_type > 2) &&
- (aml->address.resource_type < 0xC0)) {
+ if ((address.resource_type > 2) && (address.resource_type < 0xC0)) {
return (FALSE);
}
@@ -298,7 +303,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- aml->address.specific_flags;
+ address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 90583db459a2..6e7a152d6459 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -320,6 +320,16 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
break;
+ case ACPI_RESOURCE_TYPE_CLOCK_INPUT:
+
+ total_size = (acpi_rs_length)(total_size +
+ resource->data.
+ clock_input.
+ resource_source.
+ string_length);
+
+ break;
+
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
total_size =
@@ -596,15 +606,23 @@ acpi_rs_get_list_length(u8 *aml_buffer,
}
break;
- case ACPI_RESOURCE_NAME_SERIAL_BUS:
+ case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- minimum_aml_resource_length =
- acpi_gbl_resource_aml_serial_bus_sizes
- [aml_resource->common_serial_bus.type];
- extra_struct_bytes +=
- aml_resource->common_serial_bus.resource_length -
- minimum_aml_resource_length;
- break;
+ /* Avoid undefined behavior: member access within misaligned address */
+
+ struct aml_resource_common_serialbus
+ common_serial_bus;
+ memcpy(&common_serial_bus, aml_resource,
+ sizeof(common_serial_bus));
+
+ minimum_aml_resource_length =
+ acpi_gbl_resource_aml_serial_bus_sizes
+ [common_serial_bus.type];
+ extra_struct_bytes +=
+ common_serial_bus.resource_length -
+ minimum_aml_resource_length;
+ break;
+ }
case ACPI_RESOURCE_NAME_PIN_CONFIG:
@@ -650,6 +668,13 @@ acpi_rs_get_list_length(u8 *aml_buffer,
break;
+ case ACPI_RESOURCE_NAME_CLOCK_INPUT:
+ extra_struct_bytes =
+ acpi_rs_stream_option_length(resource_length,
+ minimum_aml_resource_length);
+
+ break;
+
default:
break;
@@ -663,10 +688,16 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+ /* Avoid undefined behavior: member access within misaligned address */
+
+ struct aml_resource_common_serialbus common_serial_bus;
+ memcpy(&common_serial_bus, aml_resource,
+ sizeof(common_serial_bus));
+
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [aml_resource->common_serial_bus.type] +
- extra_struct_bytes;
+ [common_serial_bus.type] + extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index b8b37449011b..998a79cc09c2 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -301,6 +301,23 @@ struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = {
"VendorData", NULL},
};
+struct acpi_rsdump_info acpi_rs_dump_clock_input[7] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_clock_input),
+ "ClockInput", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(clock_input.revision_id), "RevisionId",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(clock_input.frequency_numerator),
+ "FrequencyNumerator", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(clock_input.frequency_divisor),
+ "FrequencyDivisor", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(clock_input.scale), "Scale",
+ acpi_gbl_clock_input_scale},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(clock_input.mode), "Mode",
+ acpi_gbl_clock_input_mode},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(clock_input.resource_source),
+ "ResourceSource", NULL},
+};
+
struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_config),
"PinConfig", NULL},
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index eaeb7ab58c2a..ad7465ddfe13 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -49,6 +49,7 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
acpi_rs_convert_pin_group, /* 0x16, ACPI_RESOURCE_TYPE_PIN_GROUP */
acpi_rs_convert_pin_group_function, /* 0x17, ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
acpi_rs_convert_pin_group_config, /* 0x18, ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
+ acpi_rs_convert_clock_input, /* 0x19, ACPI_RESOURCE_TYPE_CLOCK_INPUT */
};
/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,6 +95,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_pin_group, /* 0x10, ACPI_RESOURCE_NAME_PIN_GROUP */
acpi_rs_convert_pin_group_function, /* 0x11, ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION */
acpi_rs_convert_pin_group_config, /* 0x12, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG */
+ acpi_rs_convert_clock_input, /* 0x13, ACPI_RESOURCE_NAME_CLOCK_INPUT */
};
/* Subtype table for serial_bus -- I2C, SPI, UART, and CSI2 */
@@ -136,6 +138,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_pin_group, /* ACPI_RESOURCE_TYPE_PIN_GROUP */
acpi_rs_dump_pin_group_function, /* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
acpi_rs_dump_pin_group_config, /* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
+ acpi_rs_dump_clock_input, /* ACPI_RESOURCE_TYPE_CLOCK_INPUT */
};
struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
@@ -178,6 +181,7 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
sizeof(struct aml_resource_pin_group), /* ACPI_RESOURCE_TYPE_PIN_GROUP */
sizeof(struct aml_resource_pin_group_function), /* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
sizeof(struct aml_resource_pin_group_config), /* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
+ sizeof(struct aml_resource_clock_input), /* ACPI_RESOURCE_TYPE_CLOCK_INPUT */
};
const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -221,6 +225,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE(struct acpi_resource_pin_group),
ACPI_RS_SIZE(struct acpi_resource_pin_group_function),
ACPI_RS_SIZE(struct acpi_resource_pin_group_config),
+ ACPI_RS_SIZE(struct acpi_resource_clock_input),
};
const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index e46efaa889cd..164c96e063c6 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,15 +55,21 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- if (aml_resource->common_serial_bus.type >
- AML_RESOURCE_MAX_SERIALBUSTYPE) {
+
+ /* Avoid undefined behavior: member access within misaligned address */
+
+ struct aml_resource_common_serialbus common_serial_bus;
+ memcpy(&common_serial_bus, aml_resource,
+ sizeof(common_serial_bus));
+
+ if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [aml_resource->common_serial_bus.type];
+ [common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c2dd9aae4745..6e8e98cf598d 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -194,7 +194,8 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_COUNT_SERIAL_VEN:
- item_count = ACPI_GET16(source) - info->value;
+ ACPI_MOVE_16_TO_16(&temp16, source);
+ item_count = temp16 - info->value;
resource->length = resource->length + item_count;
ACPI_SET16(destination, item_count);
@@ -202,9 +203,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_COUNT_SERIAL_RES:
+ ACPI_MOVE_16_TO_16(&temp16, source);
item_count = (aml_resource_length +
sizeof(struct aml_resource_large_header))
- - ACPI_GET16(source) - info->value;
+ - temp16 - info->value;
resource->length = resource->length + item_count;
ACPI_SET16(destination, item_count);
@@ -289,9 +291,9 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/* Copy the resource_source string */
+ ACPI_MOVE_16_TO_16(&temp16, source);
source =
- ACPI_ADD_PTR(void, aml,
- (ACPI_GET16(source) + info->value));
+ ACPI_ADD_PTR(void, aml, (temp16 + info->value));
acpi_rs_move_data(target, source, item_count,
info->opcode);
break;
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index f9267956535c..279bfa27da94 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -111,6 +111,55 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
/*******************************************************************************
*
+ * acpi_rs_convert_clock_input
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_clock_input[8] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_CLOCK_INPUT,
+ ACPI_RS_SIZE(struct acpi_resource_clock_input),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_clock_input)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_CLOCK_INPUT,
+ sizeof(struct aml_resource_clock_input),
+ 0}
+ ,
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.clock_input.revision_id),
+ AML_OFFSET(clock_input.revision_id),
+ 1}
+ ,
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.clock_input.mode),
+ AML_OFFSET(clock_input.flags),
+ 0}
+ ,
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.clock_input.scale),
+ AML_OFFSET(clock_input.flags),
+ 1}
+ ,
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.clock_input.frequency_divisor),
+ AML_OFFSET(clock_input.frequency_divisor),
+ 2}
+ ,
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.clock_input.frequency_numerator),
+ AML_OFFSET(clock_input.frequency_numerator),
+ 4}
+ ,
+
+ /* Resource Source */
+ {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.clock_input.resource_source),
+ 0,
+ sizeof(struct aml_resource_clock_input)}
+ ,
+
+};
+
+/*******************************************************************************
+ *
* acpi_rs_convert_pinfunction
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 1f7677e0dbbe..a1f10e4409a3 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f04dc6051320..44267a92bce5 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index c31a5ddb0ffd..1c1b2e284bd9 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 499efcaf798d..0dc003c20e4d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index f07aa9b46f3f..58b02e4b254b 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 17ad9c227d42..bb4a56e5673a 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -165,6 +165,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
{
+ u32 address32;
u64 address64;
/*
@@ -176,8 +177,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
* 32-bit platform, RSDT: Return 32-bit table entry
* 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
*/
- return ((acpi_physical_address)
- (*ACPI_CAST_PTR(u32, table_entry)));
+ ACPI_MOVE_32_TO_32(&address32, table_entry);
+ return address32;
} else {
/*
* 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 37da09dca940..275b52dc42e9 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 258796e02be1..0f2a7343de3a 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 53afd75bbc06..5b413bbab338 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 915321806cd7..be94d2fd99a7 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 2bab6017d827..c1fb70457e20 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 72fb7e9ec485..2be37676edd7 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 59c4050b8e91..b054bb5eeaf0 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 5425968dd2a8..85a85f7cf750 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index c166e4c05ab6..b483894c3629 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 63c17f420fb8..2e17e657dfa4 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 64ed546cf19c..1bbba8585fa6 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 3176393a729d..95a4b7509e01 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index df20d46ed8b7..3e5173d03953 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 53afa5edb6ec..820820ea8119 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -186,10 +186,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
ACPI_BITREG_RT_CLOCK_ENABLE,
ACPI_BITMASK_RT_CLOCK_STATUS,
ACPI_BITMASK_RT_CLOCK_ENABLE},
- /* ACPI_EVENT_PCIE_WAKE */ {ACPI_BITREG_PCIEXP_WAKE_STATUS,
- ACPI_BITREG_PCIEXP_WAKE_DISABLE,
- ACPI_BITMASK_PCIEXP_WAKE_STATUS,
- ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
};
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index c811ee2a8160..e62802791dcf 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b6caab49f1bd..15c2ce91d403 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 18177e4f26f7..92fbaef161a7 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 84abdbf5cfca..ee6d72385c5c 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index d3667bfff401..f4aae8f0d3a8 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b8ab0a3cb5b9..251bd396c6fd 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 2524f013be7a..29d2977d0746 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index d5aa2109847f..42b30b9f9312 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utresdecode.c b/drivers/acpi/acpica/utresdecode.c
index 85730fcd7d00..d801d9069841 100644
--- a/drivers/acpi/acpica/utresdecode.c
+++ b/drivers/acpi/acpica/utresdecode.c
@@ -284,4 +284,15 @@ const char *acpi_gbl_ptyp_decode[] = {
"Input Schmitt Trigger",
};
+const char *acpi_gbl_clock_input_mode[] = {
+ "Fixed",
+ "Variable",
+};
+
+const char *acpi_gbl_clock_input_scale[] = {
+ "Hz",
+ "KHz",
+ "MHz",
+};
+
#endif
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 16f9a7035b39..cff7901f7866 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -57,6 +57,8 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group),
ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_function),
ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_config),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_clock_input),
+
};
const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
@@ -114,6 +116,7 @@ static const u8 acpi_gbl_resource_types[] = {
ACPI_VARIABLE_LENGTH, /* 10 pin_group */
ACPI_VARIABLE_LENGTH, /* 11 pin_group_function */
ACPI_VARIABLE_LENGTH, /* 12 pin_group_config */
+ ACPI_VARIABLE_LENGTH, /* 13 clock_input */
};
/*******************************************************************************
@@ -358,16 +361,20 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ /* Avoid undefined behavior: member access within misaligned address */
+
+ struct aml_resource_common_serialbus common_serial_bus;
+ memcpy(&common_serial_bus, aml_resource,
+ sizeof(common_serial_bus));
+
/* Validate the bus_type field */
- if ((aml_resource->common_serial_bus.type == 0) ||
- (aml_resource->common_serial_bus.type >
- AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((common_serial_bus.type == 0) ||
+ (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- aml_resource->common_serial_bus.
- type));
+ common_serial_bus.type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index a06988ac409d..f5f5da441458 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index e24bc670b53e..8f10b413e928 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 86e76b443da7..aa2e923462b7 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index f2acec3a0ee3..1915bec2b279 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2022, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index ab86b2f4e719..013eb621dc92 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -489,9 +489,15 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- if (val != EINJ_STATUS_SUCCESS)
+ if (val == EINJ_STATUS_FAIL)
return -EBUSY;
+ else if (val == EINJ_STATUS_INVAL)
+ return -EINVAL;
+ /*
+ * The error is injected into the platform successfully, then it needs
+ * to trigger the error.
+ */
rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
if (rc)
return rc;
@@ -584,6 +590,12 @@ static const char * const einj_error_type_string[] = {
"0x00000200\tPlatform Correctable\n",
"0x00000400\tPlatform Uncorrectable non-fatal\n",
"0x00000800\tPlatform Uncorrectable fatal\n",
+ "0x00001000\tCXL.cache Protocol Correctable\n",
+ "0x00002000\tCXL.cache Protocol Uncorrectable non-fatal\n",
+ "0x00004000\tCXL.cache Protocol Uncorrectable fatal\n",
+ "0x00008000\tCXL.mem Protocol Correctable\n",
+ "0x00010000\tCXL.mem Protocol Uncorrectable non-fatal\n",
+ "0x00020000\tCXL.mem Protocol Uncorrectable fatal\n",
};
static int available_error_type_show(struct seq_file *m, void *v)
@@ -616,6 +628,10 @@ static int error_type_set(void *data, u64 val)
u32 available_error_type = 0;
u32 tval, vendor;
+ /* Only low 32 bits for error type are valid */
+ if (val & GENMASK_ULL(63, 32))
+ return -EINVAL;
+
/*
* Vendor defined types have 0x80000000 bit set, and
* are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index cf31abd0ed1b..f605302395c3 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -64,8 +64,11 @@ static int agdi_remove(struct platform_device *pdev)
int err, i;
err = sdei_event_disable(adata->sdei_event);
- if (err)
- return err;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n",
+ adata->sdei_event, ERR_PTR(err));
+ return 0;
+ }
for (i = 0; i < 3; i++) {
err = sdei_event_unregister(adata->sdei_event);
@@ -75,7 +78,11 @@ static int agdi_remove(struct platform_device *pdev)
schedule();
}
- return err;
+ if (err)
+ dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n",
+ adata->sdei_event, ERR_PTR(err));
+
+ return 0;
}
static struct platform_driver agdi_driver = {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index f4badcdde76e..9c67ed02d797 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -42,6 +42,8 @@
#define ACPI_BATTERY_STATE_CHARGING 0x2
#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define MAX_STRING_LENGTH 64
+
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
@@ -118,10 +120,10 @@ struct acpi_battery {
int capacity_granularity_1;
int capacity_granularity_2;
int alarm;
- char model_number[32];
- char serial_number[32];
- char type[32];
- char oem_info[32];
+ char model_number[MAX_STRING_LENGTH];
+ char serial_number[MAX_STRING_LENGTH];
+ char type[MAX_STRING_LENGTH];
+ char oem_info[MAX_STRING_LENGTH];
int state;
int power_unit;
unsigned long flags;
@@ -437,16 +439,25 @@ static int extract_package(struct acpi_battery *battery,
element = &package->package.elements[i];
if (offsets[i].mode) {
u8 *ptr = (u8 *)battery + offsets[i].offset;
+ u32 len = MAX_STRING_LENGTH;
+
+ switch (element->type) {
+ case ACPI_TYPE_BUFFER:
+ if (len > element->buffer.length + 1)
+ len = element->buffer.length + 1;
+
+ fallthrough;
+ case ACPI_TYPE_STRING:
+ strscpy(ptr, element->string.pointer, len);
- if (element->type == ACPI_TYPE_STRING ||
- element->type == ACPI_TYPE_BUFFER)
- strncpy(ptr, element->string.pointer, 32);
- else if (element->type == ACPI_TYPE_INTEGER) {
- strncpy(ptr, (u8 *)&element->integer.value,
- sizeof(u64));
- ptr[sizeof(u64)] = 0;
- } else
+ break;
+ case ACPI_TYPE_INTEGER:
+ strscpy(ptr, (u8 *)&element->integer.value, sizeof(u64) + 1);
+
+ break;
+ default:
*ptr = 0; /* don't have value */
+ }
} else {
int *x = (int *)((u8 *)battery + offsets[i].offset);
*x = (element->type == ACPI_TYPE_INTEGER) ?
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0c05ccde1f7a..d161ff707de4 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -459,85 +459,67 @@ out_free:
Notification Handling
-------------------------------------------------------------------------- */
-/*
- * acpi_bus_notify
- * ---------------
- * Callback for all 'system-level' device notifications (values 0x00-0x7F).
+/**
+ * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
+ * @handle: Target ACPI object.
+ * @type: Notification type.
+ * @data: Ignored.
+ *
+ * This only handles notifications related to device hotplug.
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- bool hotplug_event = false;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
- break;
+ return;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
- break;
+ return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
- break;
+ return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
- break;
+ return;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
- break;
+ return;
default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
- break;
+ return;
}
adev = acpi_get_acpi_dev(handle);
- if (!adev)
- goto err;
-
- if (adev->dev.driver) {
- struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
-
- if (driver && driver->ops.notify &&
- (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
- driver->ops.notify(adev, type);
- }
- if (!hotplug_event) {
- acpi_put_acpi_dev(adev);
- return;
- }
-
- if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+ if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
acpi_put_acpi_dev(adev);
- err:
- acpi_evaluate_ost(handle, type, ost_code, NULL);
+ acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@@ -562,42 +544,52 @@ static u32 acpi_device_fixed_event(void *data)
return ACPI_INTERRUPT_HANDLED;
}
-static int acpi_device_install_notify_handler(struct acpi_device *device)
+static int acpi_device_install_notify_handler(struct acpi_device *device,
+ struct acpi_driver *acpi_drv)
{
acpi_status status;
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
- else
- status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY,
+ } else {
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+ status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device,
device);
+ }
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
+static void acpi_device_remove_notify_handler(struct acpi_device *device,
+ struct acpi_driver *acpi_drv)
{
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
- else
- acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ } else {
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+ acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
+ }
+ acpi_os_wait_events_complete();
}
/* Handle events targeting \_SB device (at present only graceful shutdown) */
@@ -632,8 +624,9 @@ static void acpi_sb_notify(acpi_handle handle, u32 event, void *data)
if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) {
if (!work_busy(&acpi_sb_work))
schedule_work(&acpi_sb_work);
- } else
+ } else {
pr_warn("event %x is not supported by \\_SB device\n", event);
+ }
}
static int __init acpi_setup_sb_notify_handler(void)
@@ -817,9 +810,10 @@ static bool acpi_of_modalias(struct acpi_device *adev,
* @modalias: Pointer to buffer that modalias value will be copied into
* @len: Length of modalias buffer
*
- * This is a counterpart of of_modalias_node() for struct acpi_device objects.
- * If there is a compatible string for @adev, it will be copied to @modalias
- * with the vendor prefix stripped; otherwise, @default_id will be used.
+ * This is a counterpart of of_alias_from_compatible() for struct acpi_device
+ * objects. If there is a compatible string for @adev, it will be copied to
+ * @modalias with the vendor prefix stripped; otherwise, @default_id will be
+ * used.
*/
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len)
@@ -1014,7 +1008,7 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
-static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int acpi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
@@ -1039,7 +1033,7 @@ static int acpi_device_probe(struct device *dev)
acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
- ret = acpi_device_install_notify_handler(acpi_dev);
+ ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
@@ -1062,7 +1056,7 @@ static void acpi_device_remove(struct device *dev)
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify)
- acpi_device_remove_notify_handler(acpi_dev);
+ acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 0f17b1c32718..7ff269a78c20 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -193,7 +193,7 @@ static struct attribute *cppc_attrs[] = {
};
ATTRIBUTE_GROUPS(cppc);
-static struct kobj_type cppc_ktype = {
+static const struct kobj_type cppc_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = cppc_groups,
};
@@ -595,6 +595,7 @@ bool __weak cpc_supported_by_cpu(void)
/**
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
+ * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
@@ -1154,6 +1155,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
}
/**
+ * cppc_get_epp_perf - Get the epp register value.
+ * @cpunum: CPU from which to get epp preference value.
+ * @epp_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+}
+EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
@@ -1365,6 +1379,156 @@ out_err:
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
+/*
+ * Set Energy Performance Preference Register value through
+ * Performance Controls Interface
+ */
+int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *epp_set_reg;
+ struct cpc_register_resource *auto_sel_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+ epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
+
+ if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret)
+ return ret;
+ }
+
+ if (CPC_SUPPORTED(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ if (ret)
+ return ret;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ } else {
+ ret = -ENOTSUPP;
+ pr_debug("_CPC in PCC is not supported\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
+
+/**
+ * cppc_get_auto_sel_caps - Read autonomous selection register.
+ * @cpunum : CPU from which to read register.
+ * @perf_caps : struct where autonomous selection register value is updated.
+ */
+int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ struct cpc_register_resource *auto_sel_reg;
+ u64 auto_sel;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+
+ if (!CPC_SUPPORTED(auto_sel_reg))
+ pr_warn_once("Autonomous mode is not unsupported!\n");
+
+ if (CPC_IN_PCC(auto_sel_reg)) {
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret = 0;
+
+ if (pcc_ss_id < 0)
+ return -ENODEV;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
+ cpc_read(cpunum, auto_sel_reg, &auto_sel);
+ perf_caps->auto_sel = (bool)auto_sel;
+ } else {
+ ret = -EIO;
+ }
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+
+/**
+ * cppc_set_auto_sel - Write autonomous selection register.
+ * @cpu : CPU to which to write register.
+ * @enable : the desired value of autonomous selection resiter to be updated.
+ */
+int cppc_set_auto_sel(int cpu, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *auto_sel_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret = -EINVAL;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+
+ if (CPC_IN_PCC(auto_sel_reg)) {
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret)
+ return ret;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ } else {
+ ret = -ENOTSUPP;
+ pr_debug("_CPC in PCC is not supported\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
+
/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
@@ -1420,7 +1584,7 @@ EXPORT_SYMBOL_GPL(cppc_set_enable);
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cpc_register_resource *desired_reg;
+ struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
@@ -1431,6 +1595,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
}
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+ min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
+ max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
/*
* This is Phase-I where we want to write to CPC registers
@@ -1439,7 +1605,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* Since read_lock can be acquired by multiple CPUs simultaneously we
* achieve that goal here
*/
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
@@ -1462,13 +1628,19 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
cpc_desc->write_cmd_status = 0;
}
+ cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
+
/*
- * Skip writing MIN/MAX until Linux knows how to come up with
- * useful values.
+ * Only write if min_perf and max_perf not zero. Some drivers pass zero
+ * value to min and max perf, but they don't mean to set the zero value,
+ * they just don't want to write to those registers.
*/
- cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
+ if (perf_ctrls->min_perf)
+ cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
+ if (perf_ctrls->max_perf)
+ cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
- if (CPC_IN_PCC(desired_reg))
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
/*
* This is Phase-II where we transfer the ownership of PCC to Platform
@@ -1516,7 +1688,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* case during a CMD_READ and if there are pending writes it delivers
* the write command before servicing the read command
*/
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
/* Update only if there are pending write commands */
if (pcc_ss_data->pending_pcc_write_cmd)
@@ -1536,6 +1708,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
/**
* cppc_get_transition_latency - returns frequency transition latency in ns
+ * @cpu_num: CPU number for per_cpu().
*
* ACPI CPPC does not explicitly specify how a platform can specify the
* transition latency for performance change requests. The closest we have
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 97450f4003cc..f007116a8427 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
+/**
+ * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
+ * @adev: ACPI companion of the target device.
+ *
+ * Evaluate _S0W for @adev and return the value produced by it or return
+ * ACPI_STATE_UNKNOWN on errors (including _S0W not present).
+ */
+u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
+{
+ unsigned long long state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return ACPI_STATE_UNKNOWN;
+
+ return state;
+}
+
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 120873dad2cc..0fbfbaa8d8e3 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -78,7 +78,7 @@ static void acpi_data_node_release(struct kobject *kobj)
complete(&dn->kobj_done);
}
-static struct kobj_type acpi_data_node_ktype = {
+static const struct kobj_type acpi_data_node_ktype = {
.sysfs_ops = &acpi_data_node_sysfs_ops,
.default_groups = acpi_data_node_default_groups,
.release = acpi_data_node_release,
@@ -133,7 +133,7 @@ static void acpi_hide_nondev_subnodes(struct acpi_device_data *data)
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
-static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
@@ -191,7 +191,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
* only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
* ACPI/PNP IDs.
*/
-static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
@@ -239,7 +239,7 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
return len;
}
-int __acpi_device_uevent_modalias(struct acpi_device *adev,
+int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env)
{
int len;
@@ -277,7 +277,7 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 105d2e795afa..928899ab9502 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1083,9 +1083,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data)
{
- struct acpi_ec_query_handler *handler =
- kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
+ struct acpi_ec_query_handler *handler;
+
+ if (!handle && !func)
+ return -EINVAL;
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler)
return -ENOMEM;
@@ -1097,6 +1100,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
kref_init(&handler->kref);
list_add(&handler->node, &ec->list);
mutex_unlock(&ec->mutex);
+
return 0;
}
EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
@@ -1109,9 +1113,16 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
- if (remove_all || query_bit == handler->query_bit) {
+ /*
+ * When remove_all is false, only remove custom query handlers
+ * which have handler->func set. This is done to preserve query
+ * handlers discovered thru ACPI, as they should continue handling
+ * EC queries.
+ */
+ if (remove_all || (handler->func && handler->query_bit == query_bit)) {
list_del_init(&handler->node);
list_add(&handler->node, &free_list);
+
}
}
mutex_unlock(&ec->mutex);
@@ -1122,6 +1133,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
+ flush_workqueue(ec_query_wq);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 204fe94c7e45..a194f30876c5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
}
#define FIND_CHILD_MIN_SCORE 1
-#define FIND_CHILD_MAX_SCORE 2
+#define FIND_CHILD_MID_SCORE 2
+#define FIND_CHILD_MAX_SCORE 3
static int match_any(struct acpi_device *adev, void *not_used)
{
@@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
return -ENODEV;
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Special case: backlight device objects without _STA are
+ * preferred to other objects with the same _ADR value, because
+ * it is more likely that they are actually useful.
+ */
+ if (adev->pnp.type.backlight)
+ return FIND_CHILD_MID_SCORE;
+
return FIND_CHILD_MIN_SCORE;
+ }
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ec584442fb29..06ad497067ac 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -120,7 +120,7 @@ int acpi_bus_register_early_device(int type);
Device Matching and Notification
-------------------------------------------------------------------------- */
struct acpi_device *acpi_companion_match(const struct device *dev);
-int __acpi_device_uevent_modalias(struct acpi_device *adev,
+int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env);
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index a690c7b18623..6677955b4a8e 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -24,6 +24,7 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <acpi/acpi.h>
+#include "internal.h"
struct acpi_pci_ioapic {
acpi_handle root_handle;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f1cc5ec6a3b6..07204d482968 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -894,7 +894,7 @@ static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
{
if (flush->header.length < sizeof(*flush))
return 0;
- return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
+ return struct_size(flush, hint_address, flush->hint_count);
}
static bool add_flush(struct acpi_nfit_desc *acpi_desc,
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
@@ -3476,8 +3476,8 @@ static __init int nfit_init(void)
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
- BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
- BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 16);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 8);
BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 605a0c7053be..bba268ecd802 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -718,7 +718,7 @@ static void hmat_register_target_devices(struct memory_target *target)
for (res = target->memregions.child; res; res = res->sibling) {
int target_nid = pxm_to_node(target->memory_pxm);
- hmem_register_device(target_nid, res);
+ hmem_register_resource(target_nid, res);
}
}
@@ -869,4 +869,4 @@ out_put:
acpi_put_table(tbl);
return 0;
}
-device_initcall(hmat_init);
+subsys_initcall(hmat_init);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b3c202d2a433..84030804a763 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -1047,6 +1047,9 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL))
host_bridge->native_dpc = 0;
+ if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
+ host_bridge->native_cxl_error = 0;
+
/*
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
* exists and returns 0, we must preserve any PCI resource
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 27fb6cdad75f..843f678ade0c 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
return -EROFS;
/* changing from read to write with mprotect is not allowed */
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);
diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index 9ea79f210965..2b09f8da5400 100644
--- a/drivers/acpi/pmic/intel_pmic_bytcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -283,6 +283,7 @@ static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
.power_table_count= ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
.thermal_table_count = ARRAY_SIZE(thermal_table),
+ .pmic_i2c_address = 0x6e,
};
static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 418eec523025..c84ef3d15181 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -20,19 +20,19 @@
#define CHTDC_TI_GPADC 0x5a
static struct pmic_table chtdc_ti_power_table[] = {
- { .address = 0x00, .reg = 0x41 },
- { .address = 0x04, .reg = 0x42 },
- { .address = 0x08, .reg = 0x43 },
- { .address = 0x0c, .reg = 0x45 },
- { .address = 0x10, .reg = 0x46 },
- { .address = 0x14, .reg = 0x47 },
- { .address = 0x18, .reg = 0x48 },
- { .address = 0x1c, .reg = 0x49 },
- { .address = 0x20, .reg = 0x4a },
- { .address = 0x24, .reg = 0x4b },
- { .address = 0x28, .reg = 0x4c },
- { .address = 0x2c, .reg = 0x4d },
- { .address = 0x30, .reg = 0x4e },
+ { .address = 0x00, .reg = 0x41 }, /* LDO1 */
+ { .address = 0x04, .reg = 0x42 }, /* LDO2 */
+ { .address = 0x08, .reg = 0x43 }, /* LDO3 */
+ { .address = 0x0c, .reg = 0x45 }, /* LDO5 */
+ { .address = 0x10, .reg = 0x46 }, /* LDO6 */
+ { .address = 0x14, .reg = 0x47 }, /* LDO7 */
+ { .address = 0x18, .reg = 0x48 }, /* LDO8 */
+ { .address = 0x1c, .reg = 0x49 }, /* LDO9 */
+ { .address = 0x20, .reg = 0x4a }, /* LD10 */
+ { .address = 0x24, .reg = 0x4b }, /* LD11 */
+ { .address = 0x28, .reg = 0x4c }, /* LD12 */
+ { .address = 0x2c, .reg = 0x4d }, /* LD13 */
+ { .address = 0x30, .reg = 0x4e }, /* LD14 */
};
static struct pmic_table chtdc_ti_thermal_table[] = {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 23507d29f000..c2c70139c4f1 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -1022,6 +1023,21 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
+ {
+ /*
+ * The Toshiba Click Mini has a CPR3 power-resource which must
+ * be on for the touchscreen to work, but which is not in any
+ * _PR? lists. The other 2 affected power-resources are no-ops.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"),
+ },
+ },
+ {}
+};
+
/**
* acpi_turn_off_unused_power_resources - Turn off power resources not in use.
*/
@@ -1029,6 +1045,9 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
+ if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ return;
+
mutex_lock(&power_resource_list_lock);
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index c91342dcbcd6..a35dd0e41c27 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
* acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache
* @table_hdr: Pointer to the head of the PPTT table
* @local_level: passed res reflects this cache level
+ * @split_levels: Number of split cache levels (data/instruction).
* @res: cache resource in the PPTT we want to walk
* @found: returns a pointer to the requested level if found
* @level: the requested cache level
@@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*/
static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
unsigned int local_level,
+ unsigned int *split_levels,
struct acpi_subtable_header *res,
struct acpi_pptt_cache **found,
unsigned int level, int type)
@@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
while (cache) {
local_level++;
+ if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) {
+ cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
+ continue;
+ }
+
+ if (split_levels &&
+ (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) ||
+ acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR)))
+ *split_levels = local_level;
+
if (local_level == level &&
- cache->flags & ACPI_PPTT_CACHE_TYPE_VALID &&
acpi_pptt_match_type(cache->attributes, type)) {
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
@@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
static struct acpi_pptt_cache *
acpi_find_cache_level(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node,
- unsigned int *starting_level, unsigned int level,
- int type)
+ unsigned int *starting_level, unsigned int *split_levels,
+ unsigned int level, int type)
{
struct acpi_subtable_header *res;
unsigned int number_of_levels = *starting_level;
@@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
resource++;
local_level = acpi_pptt_walk_cache(table_hdr, *starting_level,
- res, &ret, level, type);
+ split_levels, res, &ret,
+ level, type);
/*
* we are looking for the max depth. Since its potentially
* possible for a given node to have resources with differing
@@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
+ * levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
+ * @levels: Number of levels if success.
+ * @split_levels: Number of split cache levels (data/instruction) if
+ * success. Can by NULL.
*
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
- * caches that exist across packages). Count the number of cache levels that
- * exist at each level on the way up.
- *
- * Return: Total number of levels found.
+ * caches that exist across packages). Count the number of cache levels and
+ * split cache levels (data/instruction) that exist at each level on the way
+ * up.
*/
-static int acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node)
+static void acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *levels, unsigned int *split_levels)
{
- int total_levels = 0;
-
do {
- acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
-
- return total_levels;
}
/**
@@ -281,19 +293,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
return NULL;
}
-static int acpi_find_cache_levels(struct acpi_table_header *table_hdr,
- u32 acpi_cpu_id)
-{
- int number_of_levels = 0;
- struct acpi_pptt_processor *cpu;
-
- cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id);
- if (cpu)
- number_of_levels = acpi_count_levels(table_hdr, cpu);
-
- return number_of_levels;
-}
-
static u8 acpi_cache_type(enum cache_type type)
{
switch (type) {
@@ -334,7 +333,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
while (cpu_node && !found) {
found = acpi_find_cache_level(table_hdr, cpu_node,
- &total_levels, level, acpi_type);
+ &total_levels, NULL, level, acpi_type);
*node = cpu_node;
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
}
@@ -537,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
static struct acpi_table_header *acpi_get_pptt(void)
{
static struct acpi_table_header *pptt;
+ static bool is_pptt_checked;
acpi_status status;
/*
* PPTT will be used at runtime on every CPU hotplug in path, so we
* don't need to call acpi_put_table() to release the table mapping.
*/
- if (!pptt) {
+ if (!pptt && !is_pptt_checked) {
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
if (ACPI_FAILURE(status))
acpi_pptt_warn_missing();
+
+ is_pptt_checked = true;
}
return pptt;
@@ -602,32 +604,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
}
/**
- * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
+ * acpi_get_cache_info() - Determine the number of cache levels and
+ * split cache levels (data/instruction) and for a PE.
* @cpu: Kernel logical CPU number
+ * @levels: Number of levels if success.
+ * @split_levels: Number of levels being split (i.e. data/instruction)
+ * if success. Can by NULL.
*
* Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
- * Return: Cache levels visible to this core.
+ * Return: -ENOENT if no PPTT table or no PPTT processor struct found.
+ * 0 on success.
*/
-int acpi_find_last_cache_level(unsigned int cpu)
+int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
+ unsigned int *split_levels)
{
- u32 acpi_cpu_id;
+ struct acpi_pptt_processor *cpu_node;
struct acpi_table_header *table;
- int number_of_levels = 0;
+ u32 acpi_cpu_id;
+
+ *levels = 0;
+ if (split_levels)
+ *split_levels = 0;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
- pr_debug("Cache Setup find last level CPU=%d\n", cpu);
+ pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
- number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
- pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ return -ENOENT;
+
+ acpi_count_levels(table, cpu_node, levels, split_levels);
- return number_of_levels;
+ pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
+ *levels, split_levels ? *split_levels : -1);
+
+ return 0;
}
/**
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 998101cf16e4..3d4c4620f9f9 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -236,6 +236,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
efi_status_t status;
struct prm_context_buffer context;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
+ return AE_NO_HANDLER;
+ }
+
/*
* The returned acpi_status will always be AE_OK. Error values will be
* saved in the first byte of the PRM message buffer to be used by ASL.
@@ -325,6 +330,11 @@ void __init init_prmt(void)
pr_info("PRM: found %u modules\n", mc);
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_err("PRM: EFI runtime services unavailable\n");
+ return;
+ }
+
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_RT,
&acpi_platformrt_space_handler,
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 1278969eec1f..4bd16b3f0781 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -263,6 +263,12 @@ static int __init acpi_processor_driver_init(void)
if (acpi_disabled)
return 0;
+ if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER)) {
+ acpi_processor_cpufreq_init = true;
+ acpi_processor_ignore_ppc_init();
+ }
+
result = driver_register(&acpi_processor_driver);
if (result < 0)
return result;
@@ -276,12 +282,6 @@ static int __init acpi_processor_driver_init(void)
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
- if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
- CPUFREQ_POLICY_NOTIFIER)) {
- acpi_processor_cpufreq_init = true;
- acpi_processor_ignore_ppc_init();
- }
-
acpi_processor_throttling_init();
return 0;
err:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7bf882fcd64b..9718d07cc2a2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -109,8 +109,8 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
- safe_halt();
- local_irq_disable();
+ raw_safe_halt();
+ raw_local_irq_disable();
}
}
@@ -147,7 +147,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
static void __lapic_timer_propagate_broadcast(void *arg)
{
- struct acpi_processor *pr = (struct acpi_processor *) arg;
+ struct acpi_processor *pr = arg;
if (pr->power.timer_broadcast_on_state < INT_MAX)
tick_broadcast_enable();
@@ -523,8 +523,11 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
-static void wait_for_freeze(void)
+static __cpuidle void io_idle(unsigned long addr)
{
+ /* IO port based C-state */
+ inb(addr);
+
#ifdef CONFIG_X86
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -569,9 +572,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
- /* IO port based C-state */
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
}
perf_lopwr_cb(false);
@@ -593,8 +594,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
} else
return -ENODEV;
@@ -607,7 +607,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return 0;
}
-static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
+static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
@@ -642,6 +642,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
*/
bool dis_bm = pr->flags.bm_control;
+ instrumentation_begin();
+
/* If we can skip BM, demote to a safe state. */
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
dis_bm = false;
@@ -663,11 +665,11 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
- ct_idle_enter();
+ ct_cpuidle_enter();
acpi_idle_do_entry(cx);
- ct_idle_exit();
+ ct_cpuidle_exit();
/* Re-enable bus master arbitration */
if (dis_bm) {
@@ -677,6 +679,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
+ instrumentation_end();
+
return index;
}
@@ -1219,6 +1223,8 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;
drv->safe_state_index = i;
}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 8c3f82c9fff3..18fb04523f93 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -14,6 +14,8 @@
#include <linux/acpi.h>
#include <acpi/processor.h>
+#include <xen/xen.h>
+
#include "internal.h"
static bool __init processor_physically_present(acpi_handle handle)
@@ -47,6 +49,15 @@ static bool __init processor_physically_present(acpi_handle handle)
return false;
}
+ if (xen_initial_domain())
+ /*
+ * When running as a Xen dom0 the number of processors Linux
+ * sees can be different from the real number of processors on
+ * the system, and we still need to execute _PDC for all of
+ * them.
+ */
+ return xen_processor_present(acpi_id);
+
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 970f04a958cd..4265814c74f8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
+ s32 qos_value;
+ int index;
int ret;
if (!pr)
@@ -72,17 +74,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
}
}
+ index = ppc;
+
+ if (pr->performance_platform_limit == index ||
+ ppc >= pr->performance->state_count)
+ return 0;
+
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
- (int)ppc, ppc ? "" : "not");
+ index, index ? "is" : "is not");
- pr->performance_platform_limit = (int)ppc;
+ pr->performance_platform_limit = index;
- if (ppc >= pr->performance->state_count ||
- unlikely(!freq_qos_request_active(&pr->perflib_req)))
+ if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = freq_qos_update_request(&pr->perflib_req,
- pr->performance->states[ppc].core_frequency * 1000);
+ /*
+ * If _PPC returns 0, it means that all of the available states can be
+ * used ("no limit").
+ */
+ if (index == 0)
+ qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ else
+ qos_value = pr->performance->states[index].core_frequency * 1000;
+
+ ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
@@ -166,9 +181,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (!pr)
continue;
+ /*
+ * Reset performance_platform_limit in case there is a stale
+ * value in it, so as to make it match the "no limit" QoS value
+ * below.
+ */
+ pr->performance_platform_limit = 0;
+
ret = freq_qos_add_request(&policy->constraints,
- &pr->perflib_req,
- FREQ_QOS_MAX, INT_MAX);
+ &pr->perflib_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e534fd49a67e..b7c6287eccca 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -140,9 +140,13 @@ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
ret = freq_qos_add_request(&policy->constraints,
&pr->thermal_req,
FREQ_QOS_MAX, INT_MAX);
- if (ret < 0)
+ if (ret < 0) {
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ continue;
+ }
+
+ thermal_cooling_device_update(pr->cdev);
}
}
@@ -153,8 +157,12 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
- if (pr)
- freq_qos_remove_request(&pr->thermal_req);
+ if (!pr)
+ continue;
+
+ freq_qos_remove_request(&pr->thermal_req);
+
+ thermal_cooling_device_update(pr->cdev);
}
}
#else /* ! CONFIG_CPU_FREQ */
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index b8d9eb9a433e..413e4fcadcaf 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -971,60 +971,48 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
enum dev_prop_type proptype, void *val)
{
const union acpi_object *obj;
- int ret;
+ int ret = 0;
- if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
+ if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
- if (ret)
- return ret;
-
- switch (proptype) {
- case DEV_PROP_U8:
- if (obj->integer.value > U8_MAX)
- return -EOVERFLOW;
-
- if (val)
- *(u8 *)val = obj->integer.value;
-
- break;
- case DEV_PROP_U16:
- if (obj->integer.value > U16_MAX)
- return -EOVERFLOW;
-
- if (val)
- *(u16 *)val = obj->integer.value;
-
- break;
- case DEV_PROP_U32:
- if (obj->integer.value > U32_MAX)
- return -EOVERFLOW;
-
- if (val)
- *(u32 *)val = obj->integer.value;
-
- break;
- default:
- if (val)
- *(u64 *)val = obj->integer.value;
-
- break;
- }
-
- if (!val)
- return 1;
- } else if (proptype == DEV_PROP_STRING) {
+ else if (proptype == DEV_PROP_STRING)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ switch (proptype) {
+ case DEV_PROP_U8:
+ if (obj->integer.value > U8_MAX)
+ return -EOVERFLOW;
+ if (val)
+ *(u8 *)val = obj->integer.value;
+ break;
+ case DEV_PROP_U16:
+ if (obj->integer.value > U16_MAX)
+ return -EOVERFLOW;
+ if (val)
+ *(u16 *)val = obj->integer.value;
+ break;
+ case DEV_PROP_U32:
+ if (obj->integer.value > U32_MAX)
+ return -EOVERFLOW;
+ if (val)
+ *(u32 *)val = obj->integer.value;
+ break;
+ case DEV_PROP_U64:
+ if (val)
+ *(u64 *)val = obj->integer.value;
+ break;
+ case DEV_PROP_STRING:
if (val)
*(char **)val = obj->string.pointer;
-
return 1;
- } else {
- ret = -EINVAL;
+ default:
+ return -EINVAL;
}
- return ret;
+
+ /* When no storage provided return number of available values */
+ return val ? 0 : 1;
}
#define acpi_copy_property_array_uint(items, val, nval) \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index f27914aedbd5..e8492b3a393a 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "M17T"),
},
},
+ {
+ .ident = "MEDION S17413",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
+ },
+ },
{ }
};
@@ -432,10 +439,45 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
+ {
+ .ident = "Asus ExpertBook B1502CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B2402CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B2402FBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B2502",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ },
+ },
{ }
};
-static const struct dmi_system_id lenovo_82ra[] = {
+static const struct dmi_system_id lenovo_laptop[] = {
+ {
+ .ident = "LENOVO IdeaPad Flex 5 14ALC7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
+ },
+ },
{
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
.matches = {
@@ -446,6 +488,34 @@ static const struct dmi_system_id lenovo_82ra[] = {
{ }
};
+static const struct dmi_system_id tongfang_gm_rg[] = {
+ {
+ .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ { }
+};
+
+static const struct dmi_system_id maingear_laptop[] = {
+ {
+ .ident = "MAINGEAR Vector Pro 2 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -458,8 +528,10 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
- { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e90752d4f488..94e3c000df2e 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -473,23 +473,32 @@ static const struct device_attribute alarm_attr = {
-------------------------------------------------------------------------- */
static int acpi_battery_read(struct acpi_battery *battery)
{
- int result = 0, saved_present = battery->present;
+ int result, saved_present = battery->present;
u16 state;
if (battery->sbs->manager_present) {
result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
ACPI_SBS_MANAGER, 0x01, (u8 *)&state);
- if (!result)
- battery->present = state & (1 << battery->id);
- state &= 0x0fff;
+ if (result)
+ return result;
+
+ battery->present = state & (1 << battery->id);
+ if (!battery->present)
+ return 0;
+
+ /* Masking necessary for Smart Battery Selectors */
+ state = 0x0fff;
state |= 1 << (battery->id + 12);
acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD,
ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
- } else if (battery->id == 0)
- battery->present = 1;
-
- if (result || !battery->present)
- return result;
+ } else {
+ if (battery->id == 0) {
+ battery->present = 1;
+ } else {
+ if (!battery->present)
+ return 0;
+ }
+ }
if (saved_present != battery->present) {
battery->update_time = 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 274344434282..0c6f06abe3f4 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
*/
- if (acpi_is_video_device(handle))
+ if (acpi_is_video_device(handle)) {
acpi_add_id(pnp, ACPI_VIDEO_HID);
- else if (acpi_bay_match(handle))
+ pnp->type.backlight = 1;
+ break;
+ }
+ if (acpi_bay_match(handle))
acpi_add_id(pnp, ACPI_BAY_HID);
else if (acpi_dock_match(handle))
acpi_add_id(pnp, ACPI_DOCK_HID);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0b557c0d405e..72470b9f16c4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
.priority = 0,
};
+#ifndef acpi_skip_set_wakeup_address
+#define acpi_skip_set_wakeup_address() false
+#endif
+
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
unsigned long acpi_wakeup_address;
/* do we have a wakeup address for S2 and S3? */
- if (acpi_state == ACPI_STATE_S3) {
+ if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
@@ -710,7 +714,13 @@ int acpi_s2idle_begin(void)
int acpi_s2idle_prepare(void)
{
if (acpi_sci_irq_valid()) {
- enable_irq_wake(acpi_sci_irq);
+ int error;
+
+ error = enable_irq_wake(acpi_sci_irq);
+ if (error)
+ pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
+ acpi_sci_irq, error);
+
acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 1eabfcd122ee..cd36a97b0ea2 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -71,7 +71,6 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
/**
* acpi_parse_spcr() - parse ACPI SPCR table and add preferred console
- *
* @enable_earlycon: set up earlycon for the console specified by the table
* @enable_console: setup the console specified by the table.
*
@@ -82,7 +81,6 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
*
* When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called
* from arch initialization code as soon as the DT/ACPI decision is made.
- *
*/
int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
@@ -97,9 +95,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
if (acpi_disabled)
return -ENODEV;
- status = acpi_get_table(ACPI_SIG_SPCR, 0,
- (struct acpi_table_header **)&table);
-
+ status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table);
if (ACPI_FAILURE(status))
return -ENOENT;
@@ -110,12 +106,12 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
u32 bit_width = table->serial_port.access_width;
if (bit_width > ACPI_ACCESS_BIT_MAX) {
- pr_err("Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
+ pr_err(FW_BUG "Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
bit_width = ACPI_ACCESS_BIT_DEFAULT;
}
switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) {
default:
- pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
+ pr_err(FW_BUG "Unexpected SPCR Access Width. Defaulting to byte size\n");
fallthrough;
case 8:
iotype = "mmio";
@@ -202,7 +198,8 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
if (xgene_8250_erratum_present(table)) {
iotype = "mmio32";
- /* for xgene v1 and v2 we don't know the clock rate of the
+ /*
+ * For xgene v1 and v2 we don't know the clock rate of the
* UART so don't attempt to change to the baud rate state
* in the table because driver cannot calculate the dividers
*/
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 7db3b530279b..687524b50085 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -458,11 +458,28 @@ static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
}
+static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
+{
+ struct acpi_table_ccel *ccel = th;
+
+ if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
+ !ccel->log_area_start_address || !ccel->log_area_minimum_length) {
+ kfree(data_attr);
+ return -EINVAL;
+ }
+ data_attr->addr = ccel->log_area_start_address;
+ data_attr->attr.size = ccel->log_area_minimum_length;
+ data_attr->attr.attr.name = "CCEL";
+
+ return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
+}
+
static struct acpi_data_obj {
char *name;
int (*fn)(void *, struct acpi_data_attr *);
} acpi_data_objs[] = {
{ ACPI_SIG_BERT, acpi_bert_data_init },
+ { ACPI_SIG_CCEL, acpi_ccel_data_init },
};
#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
@@ -953,7 +970,7 @@ static struct attribute *hotplug_profile_attrs[] = {
};
ATTRIBUTE_GROUPS(hotplug_profile);
-static struct kobj_type acpi_hotplug_profile_ktype = {
+static const struct kobj_type acpi_hotplug_profile_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = hotplug_profile_groups,
};
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 5fbc32b802d0..7b4680da57d7 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -555,7 +555,8 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI };
+ ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
+ ACPI_SIG_NBFT };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0b4b844f9d4c..4720a3649a61 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -419,10 +419,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* the next higher trip point
*/
tz->trips.active[i-1].temperature =
- (tz->trips.active[i-2].temperature <
- celsius_to_deci_kelvin(act) ?
- tz->trips.active[i-2].temperature :
- celsius_to_deci_kelvin(act));
+ min_t(unsigned long,
+ tz->trips.active[i-2].temperature,
+ celsius_to_deci_kelvin(act));
break;
} else {
@@ -498,7 +497,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
int result;
if (!tz)
@@ -516,7 +515,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
static int thermal_get_trip_type(struct thermal_zone_device *thermal,
int trip, enum thermal_trip_type *type)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
int i;
if (!tz || trip < 0)
@@ -560,7 +559,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
int trip, int *temp)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
int i;
if (!tz || trip < 0)
@@ -613,7 +612,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
int *temperature)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
if (tz->trips.critical.flags.valid) {
*temperature = deci_kelvin_to_millicelsius_with_offset(
@@ -628,7 +627,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
static int thermal_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
enum thermal_trip_type type;
int i;
@@ -670,7 +669,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
dev_name(&tz->device->dev),
@@ -679,7 +678,7 @@ static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
static void acpi_thermal_zone_device_critical(struct thermal_zone_device *thermal)
{
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
dev_name(&tz->device->dev),
@@ -693,7 +692,7 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
bool bind)
{
struct acpi_device *device = cdev->devdata;
- struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
struct acpi_device *dev;
acpi_handle handle;
int i;
@@ -787,6 +786,32 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.critical = acpi_thermal_zone_device_critical,
};
+static int acpi_thermal_zone_sysfs_add(struct acpi_thermal *tz)
+{
+ struct device *tzdev = thermal_zone_device(tz->thermal_zone);
+ int ret;
+
+ ret = sysfs_create_link(&tz->device->dev.kobj,
+ &tzdev->kobj, "thermal_zone");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&tzdev->kobj,
+ &tz->device->dev.kobj, "device");
+ if (ret)
+ sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+
+ return ret;
+}
+
+static void acpi_thermal_zone_sysfs_remove(struct acpi_thermal *tz)
+{
+ struct device *tzdev = thermal_zone_device(tz->thermal_zone);
+
+ sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+ sysfs_remove_link(&tzdev->kobj, "device");
+}
+
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
{
int trips = 0;
@@ -820,21 +845,15 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (IS_ERR(tz->thermal_zone))
return -ENODEV;
- result = sysfs_create_link(&tz->device->dev.kobj,
- &tz->thermal_zone->device.kobj, "thermal_zone");
+ result = acpi_thermal_zone_sysfs_add(tz);
if (result)
goto unregister_tzd;
- result = sysfs_create_link(&tz->thermal_zone->device.kobj,
- &tz->device->dev.kobj, "device");
- if (result)
- goto remove_tz_link;
-
status = acpi_bus_attach_private_data(tz->device->handle,
tz->thermal_zone);
if (ACPI_FAILURE(status)) {
result = -ENODEV;
- goto remove_dev_link;
+ goto remove_links;
}
result = thermal_zone_device_enable(tz->thermal_zone);
@@ -842,16 +861,14 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
goto acpi_bus_detach;
dev_info(&tz->device->dev, "registered as thermal_zone%d\n",
- tz->thermal_zone->id);
+ thermal_zone_device_id(tz->thermal_zone));
return 0;
acpi_bus_detach:
acpi_bus_detach_private_data(tz->device->handle);
-remove_dev_link:
- sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
-remove_tz_link:
- sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+remove_links:
+ acpi_thermal_zone_sysfs_remove(tz);
unregister_tzd:
thermal_zone_device_unregister(tz->thermal_zone);
@@ -860,8 +877,7 @@ unregister_tzd:
static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
{
- sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
- sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
+ acpi_thermal_zone_sysfs_remove(tz);
thermal_zone_device_unregister(tz->thermal_zone);
tz->thermal_zone = NULL;
acpi_bus_detach_private_data(tz->device->handle);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a934bbc9dd37..bcc25d457581 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
+#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
@@ -49,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
acpi_backlight_cmdline = acpi_backlight_video;
if (!strcmp("native", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_native;
+ if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
+ acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
+ if (!strcmp("apple_gmux", acpi_video_backlight_string))
+ acpi_backlight_cmdline = acpi_backlight_apple_gmux;
if (!strcmp("none", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_none;
}
@@ -125,12 +130,6 @@ static int video_detect_force_native(const struct dmi_system_id *d)
return 0;
}
-static int video_detect_force_none(const struct dmi_system_id *d)
-{
- acpi_backlight_dmi = acpi_backlight_none;
- return 0;
-}
-
static const struct dmi_system_id video_detect_dmi_table[] = {
/*
* Models which should use the vendor backlight interface,
@@ -272,6 +271,29 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
/*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+ * is detected. Typically these are all-in-ones (monitors with builtin
+ * PC) where the panel connection shows up as regular DP instead of eDP.
+ */
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
+ },
+ },
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
+ },
+ },
+
+ /*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
@@ -429,7 +451,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
/* Lenovo Ideapad Z570 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
},
{
@@ -492,6 +514,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Acer Aspire 3830TG */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Acer Aspire 4810T */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Acer Aspire 5738z */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -579,6 +617,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Asus U46E */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Asus UX303UB */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -587,6 +633,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* HP EliteBook 8460p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* HP Pavilion g6-1d80nr / B4U19UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Samsung N150P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -678,33 +741,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
-
- /*
- * Desktops which falsely report a backlight and which our heuristics
- * for this do not catch.
- */
{
- .callback = video_detect_force_none,
- /* Dell OptiPlex 9020M */
+ .callback = video_detect_force_native,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
- },
- },
- {
- .callback = video_detect_force_none,
- /* GIGABYTE GB-BXBT-2807 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
- },
- },
- {
- .callback = video_detect_force_none,
- /* MSI MS-7721 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
},
},
{ },
@@ -729,10 +770,11 @@ static bool prefer_native_over_acpi_video(void)
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
*/
-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
+ static bool apple_gmux_present;
static bool native_available;
static bool init_done;
static long video_caps;
@@ -746,12 +788,16 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
+ apple_gmux_present = apple_gmux_detect(NULL, NULL);
init_done = true;
}
if (native)
native_available = true;
mutex_unlock(&init_mutex);
+ if (auto_detect)
+ *auto_detect = false;
+
/*
* The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else.
@@ -763,11 +809,14 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
+ if (auto_detect)
+ *auto_detect = true;
+
/* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
- if (apple_gmux_present())
+ if (apple_gmux_present)
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */
@@ -782,15 +831,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
-
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
-{
- return __acpi_video_get_backlight_type(false);
-}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-
-bool acpi_video_backlight_use_native(void)
-{
- return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
-}
-EXPORT_SYMBOL(acpi_video_backlight_use_native);
+EXPORT_SYMBOL(__acpi_video_get_backlight_type);
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index ed752cbbe636..c8025921c129 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -328,6 +328,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
{
u32 epid;
struct viot_endpoint *ep;
+ struct device *aliased_dev = data;
u32 domain_nr = pci_domain_nr(pdev->bus);
list_for_each_entry(ep, &viot_pci_ranges, list) {
@@ -338,7 +339,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
epid = ((domain_nr - ep->segment_start) << 16) +
dev_id - ep->bdf_start + ep->endpoint_id;
- return viot_dev_iommu_init(&pdev->dev, ep->viommu,
+ return viot_dev_iommu_init(aliased_dev, ep->viommu,
epid);
}
}
@@ -372,7 +373,7 @@ int viot_iommu_configure(struct device *dev)
{
if (dev_is_pci(dev))
return pci_for_each_dma_alias(to_pci_dev(dev),
- viot_pci_dev_iommu_init, NULL);
+ viot_pci_dev_iommu_init, dev);
else if (dev_is_platform(dev))
return viot_mmio_dev_iommu_init(to_platform_device(dev));
return -ENODEV;
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index 8812ecd03d55..45d0f16f374f 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -71,13 +71,16 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
if ( key->type != ACPI_TYPE_STRING ||
(val->type != ACPI_TYPE_INTEGER &&
- val->type != ACPI_TYPE_BUFFER))
+ val->type != ACPI_TYPE_BUFFER &&
+ val->type != ACPI_TYPE_STRING))
continue; /* skip invalid properties */
__set_bit(i, valid);
newsize += key->string.length + 1;
if ( val->type == ACPI_TYPE_BUFFER)
newsize += val->buffer.length;
+ else if (val->type == ACPI_TYPE_STRING)
+ newsize += val->string.length + 1;
}
numvalid = bitmap_weight(valid, numprops);
@@ -119,6 +122,12 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
newprops[v].type = val->type;
if (val->type == ACPI_TYPE_INTEGER) {
newprops[v].integer.value = val->integer.value;
+ } else if (val->type == ACPI_TYPE_STRING) {
+ newprops[v].string.length = val->string.length;
+ newprops[v].string.pointer = free_space;
+ memcpy(free_space, val->string.pointer,
+ val->string.length);
+ free_space += val->string.length + 1;
} else {
newprops[v].buffer.length = val->buffer.length;
newprops[v].buffer.pointer = free_space;
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 5350c73564b6..e499c60c4579 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
-static bool prefer_microsoft_dsm_guid __read_mostly;
-module_param(prefer_microsoft_dsm_guid, bool, 0644);
-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
-
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -369,27 +365,15 @@ out:
}
struct amd_lps0_hid_device_data {
- const unsigned int rev_id;
const bool check_off_by_one;
- const bool prefer_amd_guid;
};
static const struct amd_lps0_hid_device_data amd_picasso = {
- .rev_id = 0,
.check_off_by_one = true,
- .prefer_amd_guid = false,
};
static const struct amd_lps0_hid_device_data amd_cezanne = {
- .rev_id = 0,
- .check_off_by_one = false,
- .prefer_amd_guid = false,
-};
-
-static const struct amd_lps0_hid_device_data amd_rembrandt = {
- .rev_id = 2,
.check_off_by_one = false,
- .prefer_amd_guid = true,
};
static const struct acpi_device_id amd_hid_ids[] = {
@@ -397,71 +381,6 @@ static const struct acpi_device_id amd_hid_ids[] = {
{"AMD0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
- {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
- {}
-};
-
-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
-{
- pr_debug("Preferring Microsoft GUID.\n");
- prefer_microsoft_dsm_guid = true;
- return 0;
-}
-
-static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
- {
- /*
- * ASUS TUF Gaming A17 FA707RE
- * https://bugzilla.kernel.org/show_bug.cgi?id=216101
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
- },
- },
- {
- /* ASUS ROG Zephyrus G14 (2022) */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
- },
- },
- {
- /*
- * Lenovo Yoga Slim 7 Pro X 14ARH7
- * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
- * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
- },
- },
{}
};
@@ -484,16 +403,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (dev_id->id[0])
data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
else
- data = &amd_rembrandt;
- rev_id = data->rev_id;
+ data = &amd_cezanne;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
- !prefer_microsoft_dsm_guid) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -501,8 +418,7 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- if (!prefer_microsoft_dsm_guid)
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
@@ -647,7 +563,6 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
void __init acpi_s2idle_setup(void)
{
- dmi_check_system(s2idle_dmi_table);
acpi_scan_add_handler(&lps0_handler);
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 4e816bb402f6..9c2d6f35f88a 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -143,6 +143,16 @@ static const struct override_status_id override_status_ids[] = {
DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
}),
+
+ /*
+ * The LSM303D on the Lenovo Yoga Tablet 2 series is present
+ * as both ACCL0001 and MAGN0001. As we can only ever register an
+ * i2c client for one of them, ignore MAGN0001.
+ */
+ NOT_PRESENT_ENTRY_HID("MAGN0001", "1", ATOM_SILVERMONT, {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"),
+ }),
};
bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status)
@@ -200,39 +210,29 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* a hardcoded allowlist for D3 support, which was used for these platforms.
*
* This allows quirking on Linux in a similar fashion.
+ *
+ * Cezanne systems shouldn't *normally* need this as the BIOS includes
+ * StorageD3Enable. But for two reasons we have added it.
+ * 1) The BIOS on a number of Dell systems have ambiguity
+ * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME.
+ * GPP1.NVME is needed to get StorageD3Enable node set properly.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216773
+ * https://bugzilla.kernel.org/show_bug.cgi?id=217003
+ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+ disk in the system.
*/
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- {}
-};
-
-static const struct dmi_system_id force_storage_d3_dmi[] = {
- {
- /*
- * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
- * but .NVME is needed to get StorageD3Enable node
- * https://bugzilla.kernel.org/show_bug.cgi?id=216440
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
- }
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"),
- }
- },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
{}
};
bool force_storage_d3(void)
{
- const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
-
- return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
+ return x86_match_cpu(storage_d3_cpu_ids);
}
/*
@@ -262,6 +262,7 @@ bool force_storage_d3(void)
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -291,13 +292,35 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
+ /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_UART1_TTY_UART2_SKIP |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Lenovo Yoga Book X90F/L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
.matches = {
@@ -305,7 +328,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Lenovo Yoga Tablet 2 1050F/L */
@@ -347,7 +371,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Whitelabel (sold as various brands) TM800A550L */
@@ -424,6 +449,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
return 0;
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
+bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
+ return false;
+
+ quirks = (unsigned long)dmi_id->driver_data;
+ return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
#endif
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index ff7454a38058..ce88af9eb562 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -235,9 +235,9 @@ static int amba_match(struct device *dev, struct device_driver *drv)
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int amba_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct amba_device *pcdev = to_amba_device(dev);
+ const struct amba_device *pcdev = to_amba_device(dev);
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 0b2c20fddb7c..c0e8b765522d 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -285,5 +285,4 @@ module_platform_driver(tegra_ahb_driver);
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_DESCRIPTION("Tegra AHB driver");
-MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 880224ec6abb..fb56bfc45096 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -277,11 +277,11 @@ _binder_proc_lock(struct binder_proc *proc, int line)
/**
* binder_proc_unlock() - Release spinlock for given binder_proc
- * @proc: struct binder_proc to acquire
+ * @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
*/
-#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
__releases(&proc->outer_lock)
@@ -378,7 +378,7 @@ _binder_node_inner_lock(struct binder_node *node, int line)
}
/**
- * binder_node_unlock() - Release node and inner locks
+ * binder_node_inner_unlock() - Release node and inner locks
* @node: struct binder_node to acquire
*
* Release lock acquired via binder_node_lock()
@@ -1194,13 +1194,13 @@ static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
}
/**
- * binder_dec_ref() - dec the ref for given handle
+ * binder_dec_ref_olocked() - dec the ref for given handle
* @ref: ref to be decremented
* @strong: if true, strong decrement, else weak
*
* Decrement the ref.
*
- * Return: true if ref is cleaned up and ready to be freed
+ * Return: %true if ref is cleaned up and ready to be freed.
*/
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
@@ -2728,7 +2728,10 @@ binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
*
* Return: 0 if the transaction was successfully queued
* BR_DEAD_REPLY if the target process or thread is dead
- * BR_FROZEN_REPLY if the target process or thread is frozen
+ * BR_FROZEN_REPLY if the target process or thread is frozen and
+ * the sync transaction was rejected
+ * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
+ * and the async transaction was successfully queued
*/
static int binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
@@ -2738,6 +2741,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
struct binder_transaction *t_outdated = NULL;
+ bool frozen = false;
BUG_ON(!node);
binder_node_lock(node);
@@ -2751,15 +2755,16 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_inner_proc_lock(proc);
if (proc->is_frozen) {
+ frozen = true;
proc->sync_recv |= !oneway;
proc->async_recv |= oneway;
}
- if ((proc->is_frozen && !oneway) || proc->is_dead ||
+ if ((frozen && !oneway) || proc->is_dead ||
(thread && thread->is_dead)) {
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
+ return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
}
if (!thread && !pending_async)
@@ -2770,7 +2775,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
- if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
+ if ((t->flags & TF_UPDATE_TXN) && frozen) {
t_outdated = binder_find_outdated_transaction_ilocked(t,
&node->async_todo);
if (t_outdated) {
@@ -2807,14 +2812,17 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
+ if (oneway && frozen)
+ return BR_TRANSACTION_PENDING_FROZEN;
+
return 0;
}
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
- * @proc: returns @node->proc if valid
- * @error: if no @proc then returns BR_DEAD_REPLY
+ * @procp: returns @node->proc if valid
+ * @error: if no @procp then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
@@ -2828,8 +2836,8 @@ static int binder_proc_transaction(struct binder_transaction *t,
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
- * Also sets @proc if valid. If the @node->proc is NULL indicating that the
- * target proc has died, @error is set to BR_DEAD_REPLY
+ * Also sets @procp if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY.
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
@@ -3607,9 +3615,17 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- binder_enqueue_thread_work(thread, tcomplete);
return_error = binder_proc_transaction(t, target_proc, NULL);
- if (return_error)
+ /*
+ * Let the caller know when async transaction reaches a frozen
+ * process and is put in a pending queue, waiting for the target
+ * process to be unfrozen.
+ */
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_enqueue_thread_work(thread, tcomplete);
+ if (return_error &&
+ return_error != BR_TRANSACTION_PENDING_FROZEN)
goto err_dead_proc_or_thread;
}
if (target_thread)
@@ -4440,10 +4456,13 @@ retry:
binder_stat_br(proc, thread, cmd);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE:
+ case BINDER_WORK_TRANSACTION_PENDING:
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
if (proc->oneway_spam_detection_enabled &&
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
cmd = BR_ONEWAY_SPAM_SUSPECT;
+ else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
+ cmd = BR_TRANSACTION_PENDING_FROZEN;
else
cmd = BR_TRANSACTION_COMPLETE;
binder_inner_proc_unlock(proc);
@@ -5006,20 +5025,14 @@ static __poll_t binder_poll(struct file *filp,
return 0;
}
-static int binder_ioctl_write_read(struct file *filp,
- unsigned int cmd, unsigned long arg,
+static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
- unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (size != sizeof(struct binder_write_read)) {
- ret = -EINVAL;
- goto out;
- }
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
@@ -5296,7 +5309,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
- unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
@@ -5318,7 +5330,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case BINDER_WRITE_READ:
- ret = binder_ioctl_write_read(filp, cmd, arg, thread);
+ ret = binder_ioctl_write_read(filp, arg, thread);
if (ret)
goto err;
break;
@@ -5361,10 +5373,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
- if (size != sizeof(struct binder_version)) {
- ret = -EINVAL;
- goto err;
- }
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
@@ -5572,8 +5580,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
return -EPERM;
}
- vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@@ -6170,6 +6177,7 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
};
static const char * const binder_command_strings[] = {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 4ad42b0f75cd..55a3c3c2409f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1019,7 +1019,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma, page_addr, PAGE_SIZE);
+ zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
trace_binder_unmap_user_end(alloc, index);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index abe19d88c6ec..28ef5b3704b1 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -133,7 +133,7 @@ enum binder_stat_types {
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
+ atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
@@ -152,6 +152,7 @@ struct binder_work {
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_TRANSACTION_PENDING,
BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 09b2ce7e4c34..76e7d6676657 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -222,14 +222,14 @@ err:
}
/**
- * binderfs_ctl_ioctl - handle binder device node allocation requests
+ * binder_ctl_ioctl - handle binder device node allocation requests
*
* The request handler for the binder-control device. All requests operate on
* the binderfs mount the binder-control device resides in:
* - BINDER_CTL_ADD
* Allocate a new binder device.
*
- * Return: 0 on success, negative errno on failure
+ * Return: %0 on success, negative errno on failure.
*/
static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -352,7 +352,7 @@ static inline bool is_binderfs_control_device(const struct dentry *dentry)
return info->control_dentry == dentry;
}
-static int binderfs_rename(struct user_namespace *mnt_userns,
+static int binderfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -361,7 +361,7 @@ static int binderfs_rename(struct user_namespace *mnt_userns,
is_binderfs_control_device(new_dentry))
return -EPERM;
- return simple_rename(&init_user_ns, old_dir, old_dentry, new_dir,
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
new_dentry, flags);
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index eceaec33af65..42b51c9812a0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -276,6 +276,7 @@ config AHCI_XGENE
config AHCI_QORIQ
tristate "Freescale QorIQ AHCI SATA support"
depends on OF
+ depends on SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
select SATA_HOST
help
This option enables support for the Freescale QorIQ AHCI SoC's
@@ -640,6 +641,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
+ depends on !UML
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -1082,15 +1084,6 @@ config PATA_OPTI
If unsure, say N.
-config PATA_PALMLD
- tristate "Palm LifeDrive PATA support"
- depends on MACH_PALMLD
- help
- This option enables support for Palm LifeDrive's internal ATA
- port via the new ATA layer.
-
- If unsure, say N.
-
config PATA_PCMCIA
tristate "PCMCIA PATA support"
depends on PCMCIA
@@ -1144,16 +1137,6 @@ config PATA_RZ1000
If unsure, say N.
-config PATA_SAMSUNG_CF
- tristate "Samsung SoC PATA support"
- depends on SAMSUNG_DEV_IDE || COMPILE_TEST
- select PATA_TIMINGS
- help
- This option enables basic support for Samsung's S3C/S5P board
- PATA controllers via the new ATA layer
-
- If unsure, say N.
-
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA
@@ -1162,6 +1145,20 @@ config PATA_WINBOND_VLB
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
+config PATA_PARPORT
+ tristate "Parallel port IDE device support"
+ depends on PARPORT_PC
+ help
+ There are many external CD-ROM and disk devices that connect through
+ your computer's parallel port. Most of them are actually IDE devices
+ using a parallel port IDE adapter. This option enables the
+ PATA_PARPORT subsystem which contains drivers for many of these
+ external drives.
+ Read <file:Documentation/admin-guide/blockdev/paride.rst> for more
+ information.
+
+source "drivers/ata/pata_parport/Kconfig"
+
comment "Generic fallback / legacy drivers"
config PATA_ACPI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d2e36d367274..20e6645ab737 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -105,15 +105,15 @@ obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_OPTI) += pata_opti.o
obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
-obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
-obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
obj-$(CONFIG_PATA_PXA) += pata_pxa.o
+obj-$(CONFIG_PATA_PARPORT) += pata_parport/
+
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
# Should be last but one libata driver
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 7654a40c12b4..547f56341705 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -57,7 +57,7 @@ struct acard_sg {
};
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
-static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -66,7 +66,7 @@ static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg
static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
#endif
-static struct scsi_host_template acard_ahci_sht = {
+static const struct scsi_host_template acard_ahci_sht = {
AHCI_SHT("acard-ahci"),
};
@@ -248,7 +248,7 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
return AC_ERR_OK;
}
-static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
@@ -263,13 +263,11 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
- !(qc->flags & ATA_QCFLAG_FAILED)) {
+ !(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
-
- return true;
}
static int acard_ahci_port_start(struct ata_port *ap)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 0cfd0ec6229b..addba109406b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -83,6 +83,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -100,7 +101,7 @@ static int ahci_pci_device_resume(struct device *dev);
#endif
#endif /* CONFIG_PM */
-static struct scsi_host_template ahci_sht = {
+static const struct scsi_host_template ahci_sht = {
AHCI_SHT("ahci"),
};
@@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ /*
+ * If platform firmware failed to enable ports, try to enable
+ * them here.
+ */
+ ahci_intel_pcs_quirk(pdev, hpriv);
+
+ return 0;
+}
+
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
@@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
- /*
- * If platform firmware failed to enable ports, try to enable
- * them here.
- */
- ahci_intel_pcs_quirk(pdev, hpriv);
-
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ff8e6ae1c636..4bae95b06ae3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -430,7 +430,7 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
void ahci_print_info(struct ata_host *host, const char *scc_s);
-int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
+int ahci_host_activate(struct ata_host *host, const struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap);
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6f216eb25610..4e3dc2b6d67f 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -417,7 +417,7 @@ out_disable_clks:
return ret;
}
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index cb24ecf36faf..bc027468decb 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -185,7 +185,7 @@ static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
}
}
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index dc8a019b8340..ca0924dc5bd2 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -153,7 +153,7 @@ static const struct ata_port_info ahci_da850_port_info = {
.port_ops = &ahci_da850_port_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index d26efcd20f64..b08547b877a1 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -134,7 +134,7 @@ static const struct ata_port_info ahci_dm816_port_info = {
.port_ops = &ahci_dm816_port_ops,
};
-static struct scsi_host_template ahci_dm816_platform_sht = {
+static const struct scsi_host_template ahci_dm816_platform_sht = {
AHCI_SHT(AHCI_DM816_DRV_NAME),
};
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index 8fb66860db31..4bfbb09cdc02 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -398,7 +398,7 @@ static const struct ata_port_info ahci_dwc_port_info = {
.port_ops = &ahci_dwc_port_ops,
};
-static struct scsi_host_template ahci_dwc_scsi_info = {
+static const struct scsi_host_template ahci_dwc_scsi_info = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index a950767f7948..3a8c248e7c0e 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -418,7 +418,7 @@ static int __sata_ahci_read_temperature(void *dev, int *temp)
static int sata_ahci_read_temperature(struct thermal_zone_device *tz, int *temp)
{
- return __sata_ahci_read_temperature(tz->devdata, temp);
+ return __sata_ahci_read_temperature(thermal_zone_device_priv(tz), temp);
}
static ssize_t sata_ahci_show_temp(struct device *dev,
@@ -979,7 +979,7 @@ static u32 imx_ahci_parse_props(struct device *dev,
return reg_value;
}
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index c056378e3e72..0bf83a297091 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -37,7 +37,7 @@ static const struct ata_port_info ahci_port_info = {
.port_ops = &ahci_platform_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
@@ -106,7 +106,7 @@ static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
struct device_node *np = dev->of_node;
/* enable SATA function if needed */
- if (of_find_property(np, "mediatek,phy-mode", NULL)) {
+ if (of_property_present(np, "mediatek,phy-mode")) {
plat->mode = syscon_regmap_lookup_by_phandle(
np, "mediatek,phy-mode");
if (IS_ERR(plat->mode)) {
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 22ecc4f3ae79..596cf017f427 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -178,7 +178,7 @@ static const struct ata_port_info ahci_mvebu_port_info = {
.port_ops = &ahci_platform_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index b9460b91288f..5021ab3ede49 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -73,11 +73,6 @@ static int ahci_octeon_probe(struct platform_device *pdev)
return 0;
}
-static int ahci_octeon_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id octeon_ahci_match[] = {
{ .compatible = "cavium,octeon-7130-sata-uctl", },
{ /* sentinel */ }
@@ -86,7 +81,6 @@ MODULE_DEVICE_TABLE(of, octeon_ahci_match);
static struct platform_driver ahci_octeon_driver = {
.probe = ahci_octeon_probe,
- .remove = ahci_octeon_remove,
.driver = {
.name = "octeon-ahci",
.of_match_table = octeon_ahci_match,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 8f5572a9f8f1..299ee686ac49 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -36,7 +36,7 @@ static const struct ata_port_info ahci_port_info_nolpm = {
.port_ops = &ahci_platform_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 9cf9bf36a874..0ba764d283c8 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -159,7 +159,7 @@ static const struct ata_port_info ahci_qoriq_port_info = {
.port_ops = &ahci_qoriq_ops,
};
-static struct scsi_host_template ahci_qoriq_sht = {
+static const struct scsi_host_template ahci_qoriq_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index ced12705ed9d..9eda7bbd2151 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -72,7 +72,7 @@ static const struct ata_port_info ahci_port_seattle_info = {
.port_ops = &ahci_seattle_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8607b68eee53..f2c1edb36986 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -138,7 +138,7 @@ static const struct ata_port_info st_ahci_port_info = {
.port_ops = &st_ahci_port_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index c7273c1cb0c7..076c12b4ba08 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -206,7 +206,7 @@ static const struct ata_port_info ahci_sunxi_port_info = {
.port_ops = &ahci_platform_ops,
};
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 4fb94db1217d..8e5e2b359f2d 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -506,7 +506,7 @@ static const struct of_device_id tegra_ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 1e08704d5117..83f5ff54ef5b 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -710,7 +710,7 @@ static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
}
-static struct scsi_host_template ahci_platform_sht = {
+static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 20a32e4d501d..2f57ec00ab82 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -95,7 +95,7 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
return 0;
}
-static struct scsi_host_template generic_sht = {
+static const struct scsi_host_template generic_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ade5e894563b..ec3c5bd1f813 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1059,7 +1059,7 @@ static u8 piix_vmw_bmdma_status(struct ata_port *ap)
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
-static struct scsi_host_template piix_sht = {
+static const struct scsi_host_template piix_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -1095,7 +1095,7 @@ static struct attribute *piix_sidpr_shost_attrs[] = {
ATTRIBUTE_GROUPS(piix_sidpr_shost);
-static struct scsi_host_template piix_sidpr_sht = {
+static const struct scsi_host_template piix_sidpr_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.shost_groups = piix_sidpr_shost_groups,
};
@@ -1645,7 +1645,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
- struct scsi_host_template *sht = &piix_sht;
+ const struct scsi_host_template *sht = &piix_sht;
unsigned long port_flags;
struct ata_host *host;
struct piix_host_priv *hpriv;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 29acc35bf4a6..9c2cb6cbea76 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -55,7 +55,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
@@ -157,6 +158,7 @@ struct ata_port_operations ahci_ops = {
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.qc_fill_rtf = ahci_qc_fill_rtf,
+ .qc_ncq_fill_rtf = ahci_qc_ncq_fill_rtf,
.freeze = ahci_freeze,
.thaw = ahci_thaw,
@@ -1847,18 +1849,47 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}
-static void ahci_handle_port_interrupt(struct ata_port *ap,
- void __iomem *port_mmio, u32 status)
+static void ahci_qc_complete(struct ata_port *ap, void __iomem *port_mmio)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
u32 qc_active = 0;
int rc;
+ /*
+ * pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+ if (unlikely(rc < 0 && !(ap->pflags & ATA_PFLAG_RESETTING))) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+ void __iomem *port_mmio, u32 status)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
/* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
+ if (unlikely(ap->pflags & ATA_PFLAG_RESETTING))
status &= ~PORT_IRQ_BAD_PMP;
if (sata_lpm_ignore_phy_events(&ap->link)) {
@@ -1867,6 +1898,12 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
if (unlikely(status & PORT_IRQ_ERROR)) {
+ /*
+ * Before getting the error notification, we may have
+ * received SDB FISes notifying successful completions.
+ * Handle these first and then handle the error.
+ */
+ ahci_qc_complete(ap, port_mmio);
ahci_error_intr(ap, status);
return;
}
@@ -1903,32 +1940,8 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
}
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
+ /* Handle completed commands */
+ ahci_qc_complete(ap, port_mmio);
}
static void ahci_port_intr(struct ata_port *ap)
@@ -2053,11 +2066,18 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ahci_qc_issue);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
+ /*
+ * rtf may already be filled (e.g. for successful NCQ commands).
+ * If that is the case, we have nothing to do.
+ */
+ if (qc->flags & ATA_QCFLAG_RTF_FILLED)
+ return;
+
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
@@ -2068,9 +2088,12 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
- !(qc->flags & ATA_QCFLAG_FAILED)) {
+ !(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ return;
+ }
/*
* For NCQ commands, we never get a D2H FIS, so reading the D2H Register
@@ -2080,15 +2103,85 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* instead. However, the SDB FIS does not contain the LBA, so we can't
* use the ata_tf_from_fis() helper.
*/
- } else if (ata_is_ncq(qc->tf.protocol)) {
+ if (ata_is_ncq(qc->tf.protocol)) {
const u8 *fis = rx_fis + RX_FIS_SDB;
+ /*
+ * Successful NCQ commands have been filled already.
+ * A failed NCQ command will read the status here.
+ * (Note that a failed NCQ command will get a more specific
+ * error when reading the NCQ Command Error log.)
+ */
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
- } else
- ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ return;
+ }
+
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+}
+
+static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ const u8 *fis;
+
+ /* No outstanding commands. */
+ if (!ap->qc_active)
+ return;
+
+ /*
+ * FBS not enabled, so read status and error once, since they are shared
+ * for all QCs.
+ */
+ if (!pp->fbs_enabled) {
+ u8 status, error;
+
+ /* No outstanding NCQ commands. */
+ if (!pp->active_link->sactive)
+ return;
+
+ fis = pp->rx_fis + RX_FIS_SDB;
+ status = fis[2];
+ error = fis[3];
- return true;
+ while (done_mask) {
+ struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs64(done_mask);
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ qc->result_tf.status = status;
+ qc->result_tf.error = error;
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ }
+ done_mask &= ~(1ULL << tag);
+ }
+
+ return;
+ }
+
+ /*
+ * FBS enabled, so read the status and error for each QC, since the QCs
+ * can belong to different PMP links. (Each PMP link has its own FIS
+ * Receive Area.)
+ */
+ while (done_mask) {
+ struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs64(done_mask);
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ fis = pp->rx_fis;
+ fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+ fis += RX_FIS_SDB;
+ qc->result_tf.status = fis[2];
+ qc->result_tf.error = fis[3];
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ }
+ done_mask &= ~(1ULL << tag);
+ }
}
static void ahci_freeze(struct ata_port *ap)
@@ -2138,7 +2231,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
ahci_kick_engine(ap);
}
@@ -2599,7 +2692,7 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
EXPORT_SYMBOL_GPL(ahci_set_em_messages);
static int ahci_host_activate_multi_irqs(struct ata_host *host,
- struct scsi_host_template *sht)
+ const struct scsi_host_template *sht)
{
struct ahci_host_priv *hpriv = host->private_data;
int i, rc;
@@ -2643,7 +2736,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
+int ahci_host_activate(struct ata_host *host, const struct scsi_host_template *sht)
{
struct ahci_host_priv *hpriv = host->private_data;
int irq = hpriv->irq;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index b9e336bacf17..9a8d43f54adc 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -363,7 +363,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
switch (rc) {
case -ENOSYS:
/* No PHY support. Check if PHY is required. */
- if (of_find_property(node, "phys", NULL)) {
+ if (of_property_present(node, "phys")) {
dev_err(dev,
"couldn't get PHY in node %pOFn: ENOSYS\n",
node);
@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
- struct scsi_host_template *sht)
+ const struct scsi_host_template *sht)
{
struct device *dev = &pdev->dev;
struct ata_port_info pi = *pi_template;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 884ae73b11ea..8bf612bdd61a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -552,7 +552,7 @@ static const u8 ata_rw_cmds[] = {
0,
0,
0,
- ATA_CMD_WRITE_MULTI_FUA_EXT,
+ 0,
/* pio */
ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE,
@@ -574,17 +574,18 @@ static const u8 ata_rw_cmds[] = {
};
/**
- * ata_rwcmd_protocol - set taskfile r/w commands and protocol
- * @tf: command to examine and configure
- * @dev: device tf belongs to
+ * ata_set_rwcmd_protocol - set taskfile r/w command and protocol
+ * @dev: target device for the taskfile
+ * @tf: taskfile to examine and configure
*
- * Examine the device configuration and tf->flags to calculate
- * the proper read/write commands and protocol to use.
+ * Examine the device configuration and tf->flags to determine
+ * the proper read/write command and protocol to use for @tf.
*
* LOCKING:
* caller.
*/
-static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
+static bool ata_set_rwcmd_protocol(struct ata_device *dev,
+ struct ata_taskfile *tf)
{
u8 cmd;
@@ -607,11 +608,12 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
}
cmd = ata_rw_cmds[index + fua + lba48 + write];
- if (cmd) {
- tf->command = cmd;
- return 0;
- }
- return -1;
+ if (!cmd)
+ return false;
+
+ tf->command = cmd;
+
+ return true;
}
/**
@@ -725,7 +727,8 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
- if (lba_28_ok(block, n_block)) {
+ /* We need LBA48 for FUA writes */
+ if (!(tf->flags & ATA_TFLAG_FUA) && lba_28_ok(block, n_block)) {
/* use LBA28 */
tf->device |= (block >> 24) & 0xf;
} else if (lba_48_ok(block, n_block)) {
@@ -740,11 +743,12 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
- } else
+ } else {
/* request too large even for LBA48 */
return -ERANGE;
+ }
- if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
tf->nsect = n_block & 0xff;
@@ -762,7 +766,7 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
if (!lba_28_ok(block, n_block))
return -ERANGE;
- if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
/* Convert LBA to CHS */
@@ -1590,7 +1594,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->ops->post_internal_cmd(qc);
/* perform minimal error analysis */
- if (qc->flags & ATA_QCFLAG_FAILED) {
+ if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
@@ -2420,6 +2424,28 @@ static void ata_dev_config_chs(struct ata_device *dev)
dev->heads, dev->sectors);
}
+static void ata_dev_config_fua(struct ata_device *dev)
+{
+ /* Ignore FUA support if its use is disabled globally */
+ if (!libata_fua)
+ goto nofua;
+
+ /* Ignore devices without support for WRITE DMA FUA EXT */
+ if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
+ goto nofua;
+
+ /* Ignore known bad devices and devices that lack NCQ support */
+ if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
+ goto nofua;
+
+ dev->flags |= ATA_DFLAG_FUA;
+
+ return;
+
+nofua:
+ dev->flags &= ~ATA_DFLAG_FUA;
+}
+
static void ata_dev_config_devslp(struct ata_device *dev)
{
u8 *sata_setting = dev->link->ap->sector_buf;
@@ -2508,7 +2534,8 @@ static void ata_dev_print_features(struct ata_device *dev)
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s\n",
+ dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
@@ -2669,6 +2696,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
@@ -3109,7 +3137,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
- else
+ else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
@@ -4045,6 +4073,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI },
+ { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NO_NCQ_ON_ATI, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
@@ -4103,6 +4134,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+ /* Buggy FUA */
+ { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
+ { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
+ { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
+ { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
+
/* End Marker */
{ }
};
@@ -4683,10 +4720,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
/* XXX: New EH and old EH use different mechanisms to
* synchronize EH with regular execution path.
*
- * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
+ * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
* Normal execution path is responsible for not accessing a
- * failed qc. libata core enforces the rule by returning NULL
- * from ata_qc_from_tag() for failed qcs.
+ * qc owned by EH. libata core enforces the rule by returning NULL
+ * from ata_qc_from_tag() for qcs owned by EH.
*
* Old EH depends on ata_qc_complete() nullifying completion
* requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
@@ -4698,7 +4735,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_eh_info *ehi = &dev->link->eh_info;
if (unlikely(qc->err_mask))
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
/*
* Finish internal commands without any further processing
@@ -4715,7 +4752,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* Non-internal qc has failed. Fill the result TF and
* summon EH.
*/
- if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+ if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
fill_result_tf(qc);
trace_ata_qc_complete_failed(qc);
ata_qc_schedule_eh(qc);
@@ -5738,7 +5775,7 @@ static void async_port_probe(void *data, async_cookie_t cookie)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
{
int i, rc;
@@ -5846,7 +5883,7 @@ EXPORT_SYMBOL_GPL(ata_host_register);
*/
int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
- struct scsi_host_template *sht)
+ const struct scsi_host_template *sht)
{
int i, rc;
char *irq_desc;
@@ -6214,6 +6251,7 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
+ force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
force_horkage_on(disable, ATA_HORKAGE_DISABLE),
};
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 34303ce67c14..a6c901811802 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -565,17 +565,23 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
{
int i;
unsigned long flags;
+ struct scsi_cmnd *scmd, *tmp;
+ int nr_timedout = 0;
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
+ if (!ap->ops->error_handler)
+ return;
+
/* synchronize with host lock and sort out timeouts */
- /* For new EH, all qcs are finished in one of three ways -
+ /*
+ * For new EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
* Both completions can race against SCSI timeout. When normal
* completion wins, the qc never reaches EH. When error
- * completion wins, the qc has ATA_QCFLAG_FAILED set.
+ * completion wins, the qc has ATA_QCFLAG_EH set.
*
* When SCSI timeout wins, things are a bit more complex.
* Normal or error completion can occur after the timeout but
@@ -584,64 +590,61 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
* timed out iff its associated qc is active and not failed.
*/
spin_lock_irqsave(ap->lock, flags);
- if (ap->ops->error_handler) {
- struct scsi_cmnd *scmd, *tmp;
- int nr_timedout = 0;
-
- /* This must occur under the ap->lock as we don't want
- a polled recovery to race the real interrupt handler
-
- The lost_interrupt handler checks for any completed but
- non-notified command and completes much like an IRQ handler.
- We then fall into the error recovery code which will treat
- this as if normal completion won the race */
-
- if (ap->ops->lost_interrupt)
- ap->ops->lost_interrupt(ap);
+ /*
+ * This must occur under the ap->lock as we don't want
+ * a polled recovery to race the real interrupt handler
+ *
+ * The lost_interrupt handler checks for any completed but
+ * non-notified command and completes much like an IRQ handler.
+ *
+ * We then fall into the error recovery code which will treat
+ * this as if normal completion won the race
+ */
+ if (ap->ops->lost_interrupt)
+ ap->ops->lost_interrupt(ap);
- list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
- struct ata_queued_cmd *qc;
+ list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
+ struct ata_queued_cmd *qc;
- ata_qc_for_each_raw(ap, qc, i) {
- if (qc->flags & ATA_QCFLAG_ACTIVE &&
- qc->scsicmd == scmd)
- break;
- }
+ ata_qc_for_each_raw(ap, qc, i) {
+ if (qc->flags & ATA_QCFLAG_ACTIVE &&
+ qc->scsicmd == scmd)
+ break;
+ }
- if (i < ATA_MAX_QUEUE) {
- /* the scmd has an associated qc */
- if (!(qc->flags & ATA_QCFLAG_FAILED)) {
- /* which hasn't failed yet, timeout */
- qc->err_mask |= AC_ERR_TIMEOUT;
- qc->flags |= ATA_QCFLAG_FAILED;
- nr_timedout++;
- }
- } else {
- /* Normal completion occurred after
- * SCSI timeout but before this point.
- * Successfully complete it.
- */
- scmd->retries = scmd->allowed;
- scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+ if (i < ATA_MAX_QUEUE) {
+ /* the scmd has an associated qc */
+ if (!(qc->flags & ATA_QCFLAG_EH)) {
+ /* which hasn't failed yet, timeout */
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ qc->flags |= ATA_QCFLAG_EH;
+ nr_timedout++;
}
+ } else {
+ /* Normal completion occurred after
+ * SCSI timeout but before this point.
+ * Successfully complete it.
+ */
+ scmd->retries = scmd->allowed;
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
+ }
- /* If we have timed out qcs. They belong to EH from
- * this point but the state of the controller is
- * unknown. Freeze the port to make sure the IRQ
- * handler doesn't diddle with those qcs. This must
- * be done atomically w.r.t. setting QCFLAG_FAILED.
- */
- if (nr_timedout)
- __ata_port_freeze(ap);
+ /*
+ * If we have timed out qcs. They belong to EH from
+ * this point but the state of the controller is
+ * unknown. Freeze the port to make sure the IRQ
+ * handler doesn't diddle with those qcs. This must
+ * be done atomically w.r.t. setting ATA_QCFLAG_EH.
+ */
+ if (nr_timedout)
+ __ata_port_freeze(ap);
+ /* initialize eh_tries */
+ ap->eh_tries = ATA_EH_MAX_TRIES;
- /* initialize eh_tries */
- ap->eh_tries = ATA_EH_MAX_TRIES;
- }
spin_unlock_irqrestore(ap->lock, flags);
-
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
@@ -911,12 +914,12 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
WARN_ON(!ap->ops->error_handler);
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
ata_eh_set_pending(ap, 1);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
- * Note that ATA_QCFLAG_FAILED is unconditionally set after
+ * Note that ATA_QCFLAG_EH is unconditionally set after
* this function completes.
*/
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
@@ -994,7 +997,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
ata_qc_complete(qc);
nr_aborted++;
}
@@ -1954,7 +1957,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= ehc->i.err_mask;
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
qc->flags & ATA_QCFLAG_RETRY ||
ata_dev_phys_link(qc->dev) != link)
continue;
@@ -2232,7 +2235,7 @@ static void ata_eh_link_report(struct ata_link *link)
desc = ehc->i.desc;
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
qc->err_mask == AC_ERR_DEV))
@@ -2298,7 +2301,7 @@ static void ata_eh_link_report(struct ata_link *link)
char data_buf[20] = "";
char cdb_buf[70] = "";
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
continue;
@@ -3802,7 +3805,7 @@ void ata_eh_finish(struct ata_port *ap)
/* retry or finish qcs */
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED))
+ if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 18ef14e749a0..f3e7396e3191 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -655,6 +655,9 @@ int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
return -EINVAL;
}
+ if (ap->ops->qc_ncq_fill_rtf)
+ ap->ops->qc_ncq_fill_rtf(ap, done_mask);
+
while (done_mask) {
struct ata_queued_cmd *qc;
unsigned int tag = __ffs64(done_mask);
@@ -1429,7 +1432,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
/* has LLDD analyzed already? */
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED))
+ if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask)
@@ -1477,7 +1480,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
}
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link)
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cbb3a7a50816..7bb12deab70c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -383,8 +383,12 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[4], *argbuf = NULL;
int argsize = 0;
- enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
int cmd_result;
if (arg == NULL)
@@ -407,11 +411,9 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
block count in sector count field */
- data_dir = DMA_FROM_DEVICE;
} else {
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
- data_dir = DMA_NONE;
}
scsi_cmd[0] = ATA_16;
@@ -429,9 +431,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf,
+ argsize, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
@@ -491,6 +492,11 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
if (arg == NULL)
return -EINVAL;
@@ -513,9 +519,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL,
+ 0, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
@@ -1654,7 +1659,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0);
+ int need_sense = (qc->err_mask != 0) &&
+ !(qc->flags & ATA_QCFLAG_SENSE_VALID);
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
@@ -1668,12 +1674,11 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
- else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- cmd->result = SAM_STAT_CHECK_CONDITION;
else if (need_sense)
ata_gen_ata_sense(qc);
else
- cmd->result = SAM_STAT_GOOD;
+ /* Keep the SCSI ML and status byte, clear host byte. */
+ cmd->result &= 0x0000ffff;
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap, &qc->result_tf);
@@ -2240,30 +2245,6 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
return sizeof(def_rw_recovery_mpage);
}
-/*
- * We can turn this into a real blacklist if it's needed, for now just
- * blacklist any Maxtor BANC1G10 revision firmware
- */
-static int ata_dev_supports_fua(u16 *id)
-{
- unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
-
- if (!libata_fua)
- return 0;
- if (!ata_id_has_fua(id))
- return 0;
-
- ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
- ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
-
- if (strcmp(model, "Maxtor"))
- return 1;
- if (strcmp(fw, "BANC1G10"))
- return 1;
-
- return 0; /* blacklisted */
-}
-
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest.
@@ -2287,7 +2268,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte;
- u8 dpofua, bp = 0xff;
+ u8 dpofua = 0, bp = 0xff;
u16 fp;
six_byte = (scsicmd[0] == MODE_SENSE);
@@ -2350,9 +2331,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
goto invalid_fld;
}
- dpofua = 0;
- if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
- (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
+ if (dev->flags & ATA_DFLAG_FUA)
dpofua = 1 << 4;
if (six_byte) {
@@ -3266,11 +3245,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
u8 supported = 0;
unsigned int err = 0;
- if (cdb[2] != 1) {
+ if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
err = 2;
goto out;
}
+
switch (cdb[3]) {
case INQUIRY:
case MODE_SENSE:
@@ -4206,7 +4186,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
scsi_done(cmd);
}
-int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+int ata_scsi_add_hosts(struct ata_host *host, const struct scsi_host_template *sht)
{
int i, rc;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 153f49e00713..9d28badfe41d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1377,14 +1377,10 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
*
* LOCKING:
* spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * true indicating that result TF is successfully filled.
*/
-bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
+void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
{
qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
- return true;
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
@@ -2073,7 +2069,7 @@ void ata_sff_error_handler(struct ata_port *ap)
unsigned long flags;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
spin_lock_irqsave(ap->lock, flags);
@@ -2285,7 +2281,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
*/
int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
- struct scsi_host_template *sht)
+ const struct scsi_host_template *sht)
{
struct device *dev = host->dev;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2382,7 +2378,7 @@ static const struct ata_port_info *ata_sff_find_valid_pi(
static int ata_pci_init_one(struct pci_dev *pdev,
const struct ata_port_info * const *ppi,
- struct scsi_host_template *sht, void *host_priv,
+ const struct scsi_host_template *sht, void *host_priv,
int hflags, bool bmdma)
{
struct device *dev = &pdev->dev;
@@ -2456,7 +2452,7 @@ out:
*/
int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const *ppi,
- struct scsi_host_template *sht, void *host_priv, int hflag)
+ const struct scsi_host_template *sht, void *host_priv, int hflag)
{
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
}
@@ -2796,7 +2792,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
bool thaw = false;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
/* reset PIO HSM and stop DMA engine */
@@ -3179,7 +3175,7 @@ EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
*/
int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht, void *host_priv,
+ const struct scsi_host_template *sht, void *host_priv,
int hflags)
{
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index e0e4d0d5a100..9b5363fd0ab0 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -142,7 +142,7 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
trace_seq_printf(p, "QUIET ");
if (qc_flags & ATA_QCFLAG_RETRY)
trace_seq_printf(p, "RETRY ");
- if (qc_flags & ATA_QCFLAG_FAILED)
+ if (qc_flags & ATA_QCFLAG_EH)
trace_seq_printf(p, "FAILED ");
if (qc_flags & ATA_QCFLAG_SENSE_VALID)
trace_seq_printf(p, "SENSE_VALID ");
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 2cd6124a01e8..926d0d33cd29 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -111,7 +111,7 @@ static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev);
extern int ata_scsi_add_hosts(struct ata_host *host,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index f8706ee427d2..ab38871b5e00 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
return ata_bmdma_port_start(ap);
}
-static struct scsi_host_template pacpi_sht = {
+static const struct scsi_host_template pacpi_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 76ad0e73fe2a..bb790edd6036 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -355,7 +355,7 @@ static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
ata_sff_postreset(link, classes);
}
-static struct scsi_host_template ali_sht = {
+static const struct scsi_host_template ali_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index f216f9d7b9ec..5b02b89748b7 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -388,7 +388,7 @@ static void nv_host_stop(struct ata_host *host)
pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
}
-static struct scsi_host_template amd_sht = {
+static const struct scsi_host_template amd_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index e89617ed9175..6ab294322e79 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -218,7 +218,7 @@ struct arasan_cf_dev {
struct ata_queued_cmd *qc;
};
-static struct scsi_host_template arasan_cf_sht = {
+static const struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
.dma_boundary = 0xFFFFFFFFUL,
};
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 20a8f31a3f57..40544282f455 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -292,7 +292,7 @@ static int artop6210_qc_defer(struct ata_queued_cmd *qc)
return 0;
}
-static struct scsi_host_template artop_sht = {
+static const struct scsi_host_template artop_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index efdb94cff68b..8c5cc803aab3 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -251,7 +251,7 @@ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
ata_bmdma_stop(qc);
}
-static struct scsi_host_template atiixp_sht = {
+static const struct scsi_host_template atiixp_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 779d660415c8..aaef5924f636 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -259,7 +259,7 @@ static int atp867x_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA_UNK;
}
-static struct scsi_host_template atp867x_sht = {
+static const struct scsi_host_template atp867x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_buddha.c b/drivers/ata/pata_buddha.c
index 27d4c417fc60..49bc619b83e2 100644
--- a/drivers/ata/pata_buddha.c
+++ b/drivers/ata/pata_buddha.c
@@ -57,7 +57,7 @@ static unsigned int xsurf_bases[2] = {
XSURF_BASE1, XSURF_BASE2
};
-static struct scsi_host_template pata_buddha_sht = {
+static const struct scsi_host_template pata_buddha_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 1a3372a72213..45a7217b136e 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -172,7 +172,7 @@ static bool cmd640_sff_irq_check(struct ata_port *ap)
return irq_stat & irq_mask;
}
-static struct scsi_host_template cmd640_sht = {
+static const struct scsi_host_template cmd640_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 5baa4a7819c1..fafea2b79145 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -319,7 +319,7 @@ static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
ata_bmdma_stop(qc);
}
-static struct scsi_host_template cmd64x_sht = {
+static const struct scsi_host_template cmd64x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index f4289a532f87..422d42761a1d 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -94,7 +94,7 @@ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
cs5520_set_timings(ap, adev, adev->pio_mode);
}
-static struct scsi_host_template cs5520_sht = {
+static const struct scsi_host_template cs5520_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index d5b7ac14e78f..1e67b0f8db43 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -146,7 +146,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
return ata_bmdma_qc_issue(qc);
}
-static struct scsi_host_template cs5530_sht = {
+static const struct scsi_host_template cs5530_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index c2c3238ff84b..d793fc441b46 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -141,7 +141,7 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
}
-static struct scsi_host_template cs5535_sht = {
+static const struct scsi_host_template cs5535_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index ab47aeb5587f..b811efd2cc34 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -217,7 +217,7 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
cs5536_write(pdev, ETC, etc);
}
-static struct scsi_host_template cs5536_sht = {
+static const struct scsi_host_template cs5536_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 3be5d52a777b..ae347b5c2871 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -115,7 +115,7 @@ static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
outb(0x50, 0x23);
}
-static struct scsi_host_template cy82c693_sht = {
+static const struct scsi_host_template cy82c693_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 21da59f35b41..2e6eccf2902f 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -234,7 +234,7 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
spin_unlock_irqrestore(&efar_lock, flags);
}
-static struct scsi_host_template efar_sht = {
+static const struct scsi_host_template efar_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 47845d920075..c6e043e05d43 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -872,7 +872,7 @@ static int ep93xx_pata_port_start(struct ata_port *ap)
return 0;
}
-static struct scsi_host_template ep93xx_pata_sht = {
+static const struct scsi_host_template ep93xx_pata_sht = {
ATA_BASE_SHT(DRV_NAME),
/* ep93xx dma implementation limit */
.sg_tablesize = 32,
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 823c88622e34..996516e64f13 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -33,7 +33,7 @@
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
-static struct scsi_host_template pata_falcon_sht = {
+static const struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 88924b5daa1a..6f6734c09b11 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -84,7 +84,7 @@ struct ftide010 {
#define FTIDE010_CLK_MOD_DEV0_UDMA_EN BIT(4)
#define FTIDE010_CLK_MOD_DEV1_UDMA_EN BIT(5)
-static struct scsi_host_template pata_ftide010_sht = {
+static const struct scsi_host_template pata_ftide010_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 65bc9f3042ce..e5aa07f92106 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -35,7 +35,7 @@
#define GAYLE_CONTROL 0x101a
-static struct scsi_host_template pata_gayle_sht = {
+static const struct scsi_host_template pata_gayle_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 7e441fb304d3..bdccd1ba1524 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -312,7 +312,7 @@ static int hpt366_prereset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
-static struct scsi_host_template hpt36x_sht = {
+static const struct scsi_host_template hpt36x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index ce3c5eaa7e76..c0329cf01135 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -526,7 +526,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
}
-static struct scsi_host_template hpt37x_sht = {
+static const struct scsi_host_template hpt37x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 617c95522f43..5b1ecccf3c83 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -337,7 +337,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
return ata_bmdma_qc_issue(qc);
}
-static struct scsi_host_template hpt3x2n_sht = {
+static const struct scsi_host_template hpt3x2n_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 83974d5eb387..d65c586b5ad0 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -136,7 +136,7 @@ static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
#endif /* CONFIG_PATA_HPT3X3_DMA */
-static struct scsi_host_template hpt3x3_sht = {
+static const struct scsi_host_template hpt3x3_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 498383cb6e29..9cfb064782c3 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -298,7 +298,7 @@ static int icside_dma_init(struct pata_icside_info *info)
}
-static struct scsi_host_template pata_icside_sht = {
+static const struct scsi_host_template pata_icside_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 150939275b1b..4013f28679a9 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -97,7 +97,7 @@ static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
}
-static struct scsi_host_template pata_imx_sht = {
+static const struct scsi_host_template pata_imx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 43bb224430d3..25a63d043c8e 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -20,7 +20,7 @@
#define DRV_NAME "pata_isapnp"
#define DRV_VERSION "0.2.5"
-static struct scsi_host_template isapnp_sht = {
+static const struct scsi_host_template isapnp_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 8a3e8778163c..b7ac56103c8a 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -228,7 +228,7 @@ static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x48, udma_enable);
}
-static struct scsi_host_template it8213_sht = {
+static const struct scsi_host_template it8213_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 8a5b4e0079ab..2fe3fb6102ce 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -800,7 +800,7 @@ static int it821x_rdc_cable(struct ata_port *ap)
return ATA_CBL_PATA80;
}
-static struct scsi_host_template it821x_sht = {
+static const struct scsi_host_template it821x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index e225913a619d..99a2ce723495 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <scsi/scsi_host.h>
@@ -173,7 +174,7 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
return words << 1;
}
-static struct scsi_host_template ixp4xx_sht = {
+static const struct scsi_host_template ixp4xx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index d1b3ce8958dd..f51fb8219762 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -107,7 +107,7 @@ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
/* No PIO or DMA methods needed for this device */
-static struct scsi_host_template jmicron_sht = {
+static const struct scsi_host_template jmicron_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 03c580625c2c..448a511cbc17 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -250,7 +250,7 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
return 0;
}
-static struct scsi_host_template legacy_sht = {
+static const struct scsi_host_template legacy_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 9ccaac9e2bc3..17f6ccee53c7 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -21,6 +21,7 @@
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/scatterlist.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/pci.h>
@@ -908,7 +909,7 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
}
#endif /* CONFIG_PM_SLEEP */
-static struct scsi_host_template pata_macio_sht = {
+static const struct scsi_host_template pata_macio_sht = {
__ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 014ccb0f45dc..8119caaad605 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -92,7 +92,7 @@ static int marvell_cable_detect(struct ata_port *ap)
/* No PIO or DMA methods needed for this device */
-static struct scsi_host_template marvell_sht = {
+static const struct scsi_host_template marvell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3ebd6522a1fd..66c9dea4ea6e 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -606,7 +606,7 @@ mpc52xx_ata_task_irq(int irq, void *vpriv)
return IRQ_HANDLED;
}
-static struct scsi_host_template mpc52xx_ata_sht = {
+static const struct scsi_host_template mpc52xx_ata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 8fda0e32c1ab..69e4baf27d72 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -136,7 +136,7 @@ static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
return ata_sff_qc_issue(qc);
}
-static struct scsi_host_template mpiix_sht = {
+static const struct scsi_host_template mpiix_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 06929e77c491..c0b2897fcf40 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -31,7 +31,7 @@ static unsigned int netcell_read_id(struct ata_device *adev,
return err_mask;
}
-static struct scsi_host_template netcell_sht = {
+static const struct scsi_host_template netcell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index f9255d6fd194..76a91013d27d 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -77,7 +77,7 @@ static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
}
}
-static struct scsi_host_template ninja32_sht = {
+static const struct scsi_host_template ninja32_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index ca3ab2736fef..44cc24d21d5f 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -114,7 +114,7 @@ static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc)
return ata_sff_qc_issue(qc);
}
-static struct scsi_host_template ns87410_sht = {
+static const struct scsi_host_template ns87410_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 9dd6bffefb48..d60e1f69d7b0 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -320,7 +320,7 @@ static struct ata_port_operations ns87560_pata_ops = {
};
#endif
-static struct scsi_host_template ns87415_sht = {
+static const struct scsi_host_template ns87415_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 35608a0cf552..b1ce9f1761af 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -58,7 +58,7 @@ struct octeon_cf_port {
u64 dma_base;
};
-static struct scsi_host_template octeon_cf_sht = {
+static const struct scsi_host_template octeon_cf_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -67,7 +67,7 @@ module_param(enable_dma, int, 0444);
MODULE_PARM_DESC(enable_dma,
"Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
-/**
+/*
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
*/
@@ -114,7 +114,7 @@ static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
}
-/**
+/*
* Called after libata determines the needed PIO mode. This
* function programs the Octeon bootbus regions to support the
* timing requirements of the PIO mode.
@@ -278,7 +278,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
-/**
+/*
* Handle an 8 bit I/O request.
*
* @qc: Queued command
@@ -317,7 +317,7 @@ static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
return buflen;
}
-/**
+/*
* Handle a 16 bit I/O request.
*
* @qc: Queued command
@@ -372,7 +372,7 @@ static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
return buflen;
}
-/**
+/*
* Read the taskfile for 16bit non-True IDE only.
*/
static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
@@ -453,7 +453,7 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
return 0;
}
-/**
+/*
* Load the taskfile for 16bit non-True IDE only. The device_addr is
* not loaded, we do this as part of octeon_cf_exec_command16.
*/
@@ -525,7 +525,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
ap->ops->sff_exec_command(ap, &qc->tf);
}
-/**
+/*
* Start a DMA transfer that was already setup
*
* @qc: Information about the DMA
@@ -580,7 +580,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
-/**
+/*
*
* LOCKING:
* spin_lock_irqsave(host lock)
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index ac5a633c00a5..178b28eff170 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -15,7 +15,7 @@
#define DRV_NAME "pata_of_platform"
-static struct scsi_host_template pata_platform_sht = {
+static const struct scsi_host_template pata_platform_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 22a020374410..dca82d92b004 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -204,7 +204,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
}
-static struct scsi_host_template oldpiix_sht = {
+static const struct scsi_host_template oldpiix_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 01976c4e4033..3d23f57eb128 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -148,7 +148,7 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
opti_write_reg(ap, 0x85, CNTRL_REG);
}
-static struct scsi_host_template opti_sht = {
+static const struct scsi_host_template opti_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index ad1090b90e52..dfc36b4ec9c6 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -334,7 +334,7 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
return rc;
}
-static struct scsi_host_template optidma_sht = {
+static const struct scsi_host_template optidma_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
deleted file mode 100644
index 51caa2a427dd..000000000000
--- a/drivers/ata/pata_palmld.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/ata/pata_palmld.c
- *
- * Driver for IDE channel in Palm LifeDrive
- *
- * Based on research of:
- * Alex Osborne <ato@meshy.org>
- *
- * Rewrite for mainline:
- * Marek Vasut <marek.vasut@gmail.com>
- *
- * Rewritten version based on pata_ixp4xx_cf.c:
- * ixp4xx PATA/Compact Flash driver
- * Copyright (C) 2006-07 Tower Technologies
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/libata.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-
-#include <scsi/scsi_host.h>
-
-#define DRV_NAME "pata_palmld"
-
-struct palmld_pata {
- struct ata_host *host;
- struct gpio_desc *power;
- struct gpio_desc *reset;
-};
-
-static struct scsi_host_template palmld_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations palmld_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer32,
- .cable_detect = ata_cable_40wire,
-};
-
-static int palmld_pata_probe(struct platform_device *pdev)
-{
- struct palmld_pata *lda;
- struct ata_port *ap;
- void __iomem *mem;
- struct device *dev = &pdev->dev;
- int ret;
-
- lda = devm_kzalloc(dev, sizeof(*lda), GFP_KERNEL);
- if (!lda)
- return -ENOMEM;
-
- /* allocate host */
- lda->host = ata_host_alloc(dev, 1);
- if (!lda->host)
- return -ENOMEM;
-
- /* remap drive's physical memory address */
- mem = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mem))
- return PTR_ERR(mem);
-
- /* request and activate power and reset GPIOs */
- lda->power = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
- if (IS_ERR(lda->power))
- return PTR_ERR(lda->power);
- lda->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(lda->reset)) {
- gpiod_set_value(lda->power, 0);
- return PTR_ERR(lda->reset);
- }
-
- /* Assert reset to reset the drive */
- gpiod_set_value(lda->reset, 1);
- msleep(30);
- gpiod_set_value(lda->reset, 0);
- msleep(30);
-
- /* setup the ata port */
- ap = lda->host->ports[0];
- ap->ops = &palmld_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_PIO_POLLING;
-
- /* memory mapping voodoo */
- ap->ioaddr.cmd_addr = mem + 0x10;
- ap->ioaddr.altstatus_addr = mem + 0xe;
- ap->ioaddr.ctl_addr = mem + 0xe;
-
- /* start the port */
- ata_sff_std_ports(&ap->ioaddr);
-
- /* activate host */
- ret = ata_host_activate(lda->host, 0, NULL, IRQF_TRIGGER_RISING,
- &palmld_sht);
- /* power down on failure */
- if (ret) {
- gpiod_set_value(lda->power, 0);
- return ret;
- }
-
- platform_set_drvdata(pdev, lda);
- return 0;
-}
-
-static int palmld_pata_remove(struct platform_device *pdev)
-{
- struct palmld_pata *lda = platform_get_drvdata(pdev);
-
- ata_platform_remove_one(pdev);
-
- /* power down the HDD */
- gpiod_set_value(lda->power, 0);
-
- return 0;
-}
-
-static struct platform_driver palmld_pata_platform_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = palmld_pata_probe,
- .remove = palmld_pata_remove,
-};
-
-module_platform_driver(palmld_pata_platform_driver);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PalmLD PATA driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_parport/Kconfig b/drivers/ata/pata_parport/Kconfig
new file mode 100644
index 000000000000..2c953f5d1396
--- /dev/null
+++ b/drivers/ata/pata_parport/Kconfig
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0
+
+comment "Parallel IDE protocol modules"
+ depends on PATA_PARPORT
+
+config PATA_PARPORT_ATEN
+ tristate "ATEN EH-100 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the ATEN EH-100 parallel port IDE
+ protocol. This protocol is used in some inexpensive low performance
+ parallel port kits made in Hong Kong.
+
+config PATA_PARPORT_BPCK
+ tristate "MicroSolutions backpack (Series 5) protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Micro Solutions BACKPACK
+ parallel port Series 5 IDE protocol. (Most BACKPACK drives made
+ before 1999 were Series 5) Series 5 drives will NOT always have the
+ Series noted on the bottom of the drive. Series 6 drivers will.
+
+ In other words, if your BACKPACK drive doesn't say "Series 6" on the
+ bottom, enable this option.
+
+config PATA_PARPORT_BPCK6
+ tristate "MicroSolutions backpack (Series 6) protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Micro Solutions BACKPACK
+ parallel port Series 6 IDE protocol. (Most BACKPACK drives made
+ after 1999 were Series 6) Series 6 drives will have the Series noted
+ on the bottom of the drive. Series 5 drivers don't always have it
+ noted.
+
+ In other words, if your BACKPACK drive says "Series 6" on the
+ bottom, enable this option.
+
+config PATA_PARPORT_COMM
+ tristate "DataStor Commuter protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Commuter parallel port IDE
+ protocol from DataStor.
+
+config PATA_PARPORT_DSTR
+ tristate "DataStor EP-2000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the EP-2000 parallel port IDE
+ protocol from DataStor
+
+config PATA_PARPORT_FIT2
+ tristate "FIT TD-2000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the TD-2000 parallel port IDE
+ protocol from Fidelity International Technology. This is a simple
+ (low speed) adapter that is used in some portable hard drives.
+
+config PATA_PARPORT_FIT3
+ tristate "FIT TD-3000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the TD-3000 parallel port IDE
+ protocol from Fidelity International Technology. This protocol is
+ used in newer models of their portable disk, CD-ROM and PD/CD
+ devices.
+
+config PATA_PARPORT_EPAT
+ tristate "Shuttle EPAT/EPEZ protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the EPAT parallel port IDE protocol.
+ EPAT is a parallel port IDE adapter manufactured by Shuttle
+ Technology and widely used in devices from major vendors such as
+ Hewlett-Packard, SyQuest, Imation and Avatar.
+
+config PATA_PARPORT_EPATC8
+ bool "Support c7/c8 chips"
+ depends on PATA_PARPORT_EPAT
+ help
+ This option enables support for the newer Shuttle EP1284 (aka c7 and
+ c8) chip. You need this if you are using any recent Imation SuperDisk
+ (LS-120) drive.
+
+config PATA_PARPORT_EPIA
+ tristate "Shuttle EPIA protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the (obsolete) EPIA parallel port
+ IDE protocol from Shuttle Technology. This adapter can still be
+ found in some no-name kits.
+
+config PATA_PARPORT_FRIQ
+ tristate "Freecom IQ ASIC-2 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for version 2 of the Freecom IQ parallel
+ port IDE adapter. This adapter is used by the Maxell Superdisk
+ drive.
+
+config PATA_PARPORT_FRPW
+ tristate "FreeCom power protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Freecom power parallel port IDE
+ protocol.
+
+config PATA_PARPORT_KBIC
+ tristate "KingByte KBIC-951A/971A protocols"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the KBIC-951A and KBIC-971A parallel
+ port IDE protocols from KingByte Information Corp. KingByte's
+ adapters appear in many no-name portable disk and CD-ROM products,
+ especially in Europe.
+
+config PATA_PARPORT_KTTI
+ tristate "KT PHd protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the "PHd" parallel port IDE protocol
+ from KT Technology. This is a simple (low speed) adapter that is
+ used in some 2.5" portable hard drives.
+
+config PATA_PARPORT_ON20
+ tristate "OnSpec 90c20 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the (obsolete) 90c20 parallel port
+ IDE protocol from OnSpec (often marketed under the ValuStore brand
+ name).
+
+config PATA_PARPORT_ON26
+ tristate "OnSpec 90c26 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the 90c26 parallel port IDE protocol
+ from OnSpec Electronics (often marketed under the ValuStore brand
+ name).
diff --git a/drivers/ata/pata_parport/Makefile b/drivers/ata/pata_parport/Makefile
new file mode 100644
index 000000000000..0932c8d55b91
--- /dev/null
+++ b/drivers/ata/pata_parport/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PATA_PARPORT) += pata_parport.o
+
+obj-$(CONFIG_PATA_PARPORT_ATEN) += aten.o
+obj-$(CONFIG_PATA_PARPORT_BPCK) += bpck.o
+obj-$(CONFIG_PATA_PARPORT_COMM) += comm.o
+obj-$(CONFIG_PATA_PARPORT_DSTR) += dstr.o
+obj-$(CONFIG_PATA_PARPORT_KBIC) += kbic.o
+obj-$(CONFIG_PATA_PARPORT_EPAT) += epat.o
+obj-$(CONFIG_PATA_PARPORT_EPIA) += epia.o
+obj-$(CONFIG_PATA_PARPORT_FRPW) += frpw.o
+obj-$(CONFIG_PATA_PARPORT_FRIQ) += friq.o
+obj-$(CONFIG_PATA_PARPORT_FIT2) += fit2.o
+obj-$(CONFIG_PATA_PARPORT_FIT3) += fit3.o
+obj-$(CONFIG_PATA_PARPORT_ON20) += on20.o
+obj-$(CONFIG_PATA_PARPORT_ON26) += on26.o
+obj-$(CONFIG_PATA_PARPORT_KTTI) += ktti.o
+obj-$(CONFIG_PATA_PARPORT_BPCK6) += bpck6.o
diff --git a/drivers/block/paride/aten.c b/drivers/ata/pata_parport/aten.c
index 2695465568ad..1bd248c42f8b 100644
--- a/drivers/block/paride/aten.c
+++ b/drivers/ata/pata_parport/aten.c
@@ -9,14 +9,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.05 init_proto, release_proto
-
-*/
-
-#define ATEN_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -24,8 +16,7 @@
#include <linux/wait.h>
#include <linux/types.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
@@ -35,7 +26,7 @@
static int cont_map[2] = { 0x08, 0x20 };
-static void aten_write_regr( PIA *pi, int cont, int regr, int val)
+static void aten_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -44,7 +35,7 @@ static void aten_write_regr( PIA *pi, int cont, int regr, int val)
w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc);
}
-static int aten_read_regr( PIA *pi, int cont, int regr )
+static int aten_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -67,7 +58,7 @@ static int aten_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void aten_read_block( PIA *pi, char * buf, int count )
+static void aten_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b, c, d;
@@ -95,7 +86,7 @@ static void aten_read_block( PIA *pi, char * buf, int count )
}
}
-static void aten_write_block( PIA *pi, char * buf, int count )
+static void aten_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -107,28 +98,25 @@ static void aten_write_block( PIA *pi, char * buf, int count )
w2(0xc);
}
-static void aten_connect ( PIA *pi )
+static void aten_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xc);
}
-static void aten_disconnect ( PIA *pi )
+static void aten_disconnect(struct pi_adapter *pi)
{ w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void aten_log_adapter( PIA *pi, char * scratch, int verbose )
+static void aten_log_adapter(struct pi_adapter *pi)
{ char *mode_string[2] = {"4-bit","8-bit"};
- printk("%s: aten %s, ATEN EH-100 at 0x%x, ",
- pi->device,ATEN_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "ATEN EH-100 at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol aten = {
@@ -147,16 +135,5 @@ static struct pi_protocol aten = {
.log_adapter = aten_log_adapter,
};
-static int __init aten_init(void)
-{
- return paride_register(&aten);
-}
-
-static void __exit aten_exit(void)
-{
- paride_unregister( &aten );
-}
-
MODULE_LICENSE("GPL");
-module_init(aten_init)
-module_exit(aten_exit)
+module_pata_parport_driver(aten);
diff --git a/drivers/block/paride/bpck.c b/drivers/ata/pata_parport/bpck.c
index d880a9465e9b..1c5035a09554 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/ata/pata_parport/bpck.c
@@ -7,15 +7,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay
- 1.02 GRG 1998.08.15 default pi->delay returned to 4
-
-*/
-
-#define BPCK_VERSION "1.02"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -23,8 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#undef r2
#undef w2
@@ -46,7 +36,7 @@
static int cont_map[3] = { 0x40, 0x48, 0 };
-static int bpck_read_regr( PIA *pi, int cont, int regr )
+static int bpck_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int r, l, h;
@@ -77,7 +67,7 @@ static int bpck_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void bpck_write_regr( PIA *pi, int cont, int regr, int val )
+static void bpck_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -106,7 +96,7 @@ static void bpck_write_regr( PIA *pi, int cont, int regr, int val )
#define WR(r,v) bpck_write_regr(pi,2,r,v)
#define RR(r) (bpck_read_regr(pi,2,r))
-static void bpck_write_block( PIA *pi, char * buf, int count )
+static void bpck_write_block(struct pi_adapter *pi, char *buf, int count)
{ int i;
@@ -147,7 +137,7 @@ static void bpck_write_block( PIA *pi, char * buf, int count )
}
}
-static void bpck_read_block( PIA *pi, char * buf, int count )
+static void bpck_read_block(struct pi_adapter *pi, char *buf, int count)
{ int i, l, h;
@@ -194,7 +184,7 @@ static void bpck_read_block( PIA *pi, char * buf, int count )
}
}
-static int bpck_probe_unit ( PIA *pi )
+static int bpck_probe_unit(struct pi_adapter *pi)
{ int o1, o0, f7, id;
int t, s;
@@ -217,7 +207,7 @@ static int bpck_probe_unit ( PIA *pi )
return 1;
}
-static void bpck_connect ( PIA *pi )
+static void bpck_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
w0(0xff-pi->unit); w2(4); w0(pi->unit);
@@ -241,24 +231,24 @@ static void bpck_connect ( PIA *pi )
WR(5,8);
- if (pi->devtype == PI_PCD) {
+/* if (pi->devtype == PI_PCD) { possibly wrong, purpose unknown */
WR(0x46,0x10); /* fiddle with ESS logic ??? */
WR(0x4c,0x38);
WR(0x4d,0x88);
WR(0x46,0xa0);
WR(0x41,0);
WR(0x4e,8);
- }
+/* }*/
}
-static void bpck_disconnect ( PIA *pi )
+static void bpck_disconnect(struct pi_adapter *pi)
{ w0(0);
if (pi->mode >= 2) { w2(9); w2(0); } else t2(2);
w2(0x4c); w0(pi->saved_r0);
}
-static void bpck_force_spp ( PIA *pi )
+static void bpck_force_spp(struct pi_adapter *pi)
/* This fakes the EPP protocol to turn off EPP ... */
@@ -276,7 +266,7 @@ static void bpck_force_spp ( PIA *pi )
#define TEST_LEN 16
-static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
+static int bpck_test_proto(struct pi_adapter *pi)
{ int i, e, l, h, om;
char buf[TEST_LEN];
@@ -334,19 +324,16 @@ static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
}
- if (verbose) {
- printk("%s: bpck: 0x%x unit %d mode %d: ",
- pi->device,pi->port,pi->unit,pi->mode);
- for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]);
- printk("\n");
- }
+ dev_dbg(&pi->dev, "bpck: 0x%x unit %d mode %d: ",
+ pi->port, pi->unit, pi->mode);
+ print_hex_dump_debug("bpck: ", DUMP_PREFIX_NONE, TEST_LEN, 1, buf, TEST_LEN, false);
e = 0;
for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++;
return e;
}
-static void bpck_read_eeprom ( PIA *pi, char * buf )
+static void bpck_read_eeprom(struct pi_adapter *pi, char *buf)
{ int i, j, k, p, v, f, om, od;
@@ -397,7 +384,7 @@ static void bpck_read_eeprom ( PIA *pi, char * buf )
pi->mode = om; pi->delay = od;
}
-static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */
+static int bpck_test_port(struct pi_adapter *pi) /* check for 8-bit port */
{ int i, r, m;
@@ -416,31 +403,17 @@ static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */
return 5;
}
-static void bpck_log_adapter( PIA *pi, char * scratch, int verbose )
+static void bpck_log_adapter(struct pi_adapter *pi)
{ char *mode_string[5] = { "4-bit","8-bit","EPP-8",
"EPP-16","EPP-32" };
-
-#ifdef DUMP_EEPROM
- int i;
-#endif
+ char scratch[128];
bpck_read_eeprom(pi,scratch);
-
-#ifdef DUMP_EEPROM
- if (verbose) {
- for(i=0;i<128;i++)
- if ((scratch[i] < ' ') || (scratch[i] > '~'))
- scratch[i] = '.';
- printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch);
- printk("%s: %64.64s\n",pi->device,&scratch[64]);
- }
-#endif
-
- printk("%s: bpck %s, backpack %8.8s unit %d",
- pi->device,BPCK_VERSION,&scratch[110],pi->unit);
- printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port,
- pi->mode,mode_string[pi->mode],pi->delay);
+ print_hex_dump_bytes("bpck EEPROM: ", DUMP_PREFIX_NONE, scratch, 128);
+ dev_info(&pi->dev, "backpack %8.8s unit %d at 0x%x, mode %d (%s), delay %d\n",
+ &scratch[110], pi->unit, pi->port, pi->mode,
+ mode_string[pi->mode], pi->delay);
}
static struct pi_protocol bpck = {
@@ -462,16 +435,5 @@ static struct pi_protocol bpck = {
.log_adapter = bpck_log_adapter,
};
-static int __init bpck_init(void)
-{
- return paride_register(&bpck);
-}
-
-static void __exit bpck_exit(void)
-{
- paride_unregister(&bpck);
-}
-
MODULE_LICENSE("GPL");
-module_init(bpck_init)
-module_exit(bpck_exit)
+module_pata_parport_driver(bpck);
diff --git a/drivers/ata/pata_parport/bpck6.c b/drivers/ata/pata_parport/bpck6.c
new file mode 100644
index 000000000000..76febd07a9bb
--- /dev/null
+++ b/drivers/ata/pata_parport/bpck6.c
@@ -0,0 +1,460 @@
+/*
+ backpack.c (c) 2001 Micro Solutions Inc.
+ Released under the terms of the GNU General Public license
+
+ backpack.c is a low-level protocol driver for the Micro Solutions
+ "BACKPACK" parallel port IDE adapter
+ (Works on Series 6 drives)
+
+ Written by: Ken Hahn (linux-dev@micro-solutions.com)
+ Clive Turvey (linux-dev@micro-solutions.com)
+
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/parport.h>
+#include "pata_parport.h"
+
+/* 60772 Commands */
+#define ACCESS_REG 0x00
+#define ACCESS_PORT 0x40
+
+#define ACCESS_READ 0x00
+#define ACCESS_WRITE 0x20
+
+/* 60772 Command Prefix */
+#define CMD_PREFIX_SET 0xe0 // Special command that modifies next command's operation
+#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
+ #define PREFIX_IO16 0x01 // perform 16-bit wide I/O
+ #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
+ #define PREFIX_BLK 0x08 // enable block transfer mode
+
+/* 60772 Registers */
+#define REG_STATUS 0x00 // status register
+ #define STATUS_IRQA 0x01 // Peripheral IRQA line
+ #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
+#define REG_VERSION 0x01 // PPC version register (read)
+#define REG_HWCFG 0x02 // Hardware Config register
+#define REG_RAMSIZE 0x03 // Size of RAM Buffer
+ #define RAMSIZE_128K 0x02
+#define REG_EEPROM 0x06 // EEPROM control register
+ #define EEPROM_SK 0x01 // eeprom SK bit
+ #define EEPROM_DI 0x02 // eeprom DI bit
+ #define EEPROM_CS 0x04 // eeprom CS bit
+ #define EEPROM_EN 0x08 // eeprom output enable
+#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
+
+/* flags */
+#define fifo_wait 0x10
+
+/* DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES */
+#define PPCMODE_UNI_SW 0
+#define PPCMODE_UNI_FW 1
+#define PPCMODE_BI_SW 2
+#define PPCMODE_BI_FW 3
+#define PPCMODE_EPP_BYTE 4
+#define PPCMODE_EPP_WORD 5
+#define PPCMODE_EPP_DWORD 6
+
+static int mode_map[] = { PPCMODE_UNI_FW, PPCMODE_BI_FW, PPCMODE_EPP_BYTE,
+ PPCMODE_EPP_WORD, PPCMODE_EPP_DWORD };
+
+static void bpck6_send_cmd(struct pi_adapter *pi, u8 cmd)
+{
+ switch (mode_map[pi->mode]) {
+ case PPCMODE_UNI_SW:
+ case PPCMODE_UNI_FW:
+ case PPCMODE_BI_SW:
+ case PPCMODE_BI_FW:
+ parport_write_data(pi->pardev->port, cmd);
+ parport_frob_control(pi->pardev->port, 0, PARPORT_CONTROL_AUTOFD);
+ break;
+ case PPCMODE_EPP_BYTE:
+ case PPCMODE_EPP_WORD:
+ case PPCMODE_EPP_DWORD:
+ pi->pardev->port->ops->epp_write_addr(pi->pardev->port, &cmd, 1, 0);
+ break;
+ }
+}
+
+static u8 bpck6_rd_data_byte(struct pi_adapter *pi)
+{
+ u8 data = 0;
+
+ switch (mode_map[pi->mode]) {
+ case PPCMODE_UNI_SW:
+ case PPCMODE_UNI_FW:
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_INIT);
+ data = parport_read_status(pi->pardev->port);
+ data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ data |= parport_read_status(pi->pardev->port) & 0xB8;
+ break;
+ case PPCMODE_BI_SW:
+ case PPCMODE_BI_FW:
+ parport_data_reverse(pi->pardev->port);
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT);
+ data = parport_read_data(pi->pardev->port);
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE, 0);
+ parport_data_forward(pi->pardev->port);
+ break;
+ case PPCMODE_EPP_BYTE:
+ case PPCMODE_EPP_WORD:
+ case PPCMODE_EPP_DWORD:
+ pi->pardev->port->ops->epp_read_data(pi->pardev->port, &data, 1, 0);
+ break;
+ }
+
+ return data;
+}
+
+static void bpck6_wr_data_byte(struct pi_adapter *pi, u8 data)
+{
+ switch (mode_map[pi->mode]) {
+ case PPCMODE_UNI_SW:
+ case PPCMODE_UNI_FW:
+ case PPCMODE_BI_SW:
+ case PPCMODE_BI_FW:
+ parport_write_data(pi->pardev->port, data);
+ parport_frob_control(pi->pardev->port, 0, PARPORT_CONTROL_INIT);
+ break;
+ case PPCMODE_EPP_BYTE:
+ case PPCMODE_EPP_WORD:
+ case PPCMODE_EPP_DWORD:
+ pi->pardev->port->ops->epp_write_data(pi->pardev->port, &data, 1, 0);
+ break;
+ }
+}
+
+static int bpck6_read_regr(struct pi_adapter *pi, int cont, int reg)
+{
+ u8 port = cont ? reg | 8 : reg;
+
+ bpck6_send_cmd(pi, port | ACCESS_PORT | ACCESS_READ);
+ return bpck6_rd_data_byte(pi);
+}
+
+static void bpck6_write_regr(struct pi_adapter *pi, int cont, int reg, int val)
+{
+ u8 port = cont ? reg | 8 : reg;
+
+ bpck6_send_cmd(pi, port | ACCESS_PORT | ACCESS_WRITE);
+ bpck6_wr_data_byte(pi, val);
+}
+
+static void bpck6_wait_for_fifo(struct pi_adapter *pi)
+{
+ int i;
+
+ if (pi->private & fifo_wait) {
+ for (i = 0; i < 20; i++)
+ parport_read_status(pi->pardev->port);
+ }
+}
+
+static void bpck6_write_block(struct pi_adapter *pi, char *buf, int len)
+{
+ u8 this, last;
+
+ bpck6_send_cmd(pi, REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE);
+ bpck6_wr_data_byte(pi, (u8)len);
+ bpck6_wr_data_byte(pi, (u8)(len >> 8));
+ bpck6_wr_data_byte(pi, 0);
+
+ bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK);
+ bpck6_send_cmd(pi, ATA_REG_DATA | ACCESS_PORT | ACCESS_WRITE);
+
+ switch (mode_map[pi->mode]) {
+ case PPCMODE_UNI_SW:
+ case PPCMODE_BI_SW:
+ while (len--) {
+ parport_write_data(pi->pardev->port, *buf++);
+ parport_frob_control(pi->pardev->port, 0,
+ PARPORT_CONTROL_INIT);
+ }
+ break;
+ case PPCMODE_UNI_FW:
+ case PPCMODE_BI_FW:
+ bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_FASTWR);
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+
+ last = *buf;
+
+ parport_write_data(pi->pardev->port, last);
+
+ while (len) {
+ this = *buf++;
+ len--;
+
+ if (this == last) {
+ parport_frob_control(pi->pardev->port, 0,
+ PARPORT_CONTROL_INIT);
+ } else {
+ parport_write_data(pi->pardev->port, this);
+ last = this;
+ }
+ }
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ 0);
+ bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_FASTWR);
+ break;
+ case PPCMODE_EPP_BYTE:
+ pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
+ len, PARPORT_EPP_FAST_8);
+ bpck6_wait_for_fifo(pi);
+ break;
+ case PPCMODE_EPP_WORD:
+ pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
+ len, PARPORT_EPP_FAST_16);
+ bpck6_wait_for_fifo(pi);
+ break;
+ case PPCMODE_EPP_DWORD:
+ pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
+ len, PARPORT_EPP_FAST_32);
+ bpck6_wait_for_fifo(pi);
+ break;
+ }
+
+ bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK);
+}
+
+static void bpck6_read_block(struct pi_adapter *pi, char *buf, int len)
+{
+ bpck6_send_cmd(pi, REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE);
+ bpck6_wr_data_byte(pi, (u8)len);
+ bpck6_wr_data_byte(pi, (u8)(len >> 8));
+ bpck6_wr_data_byte(pi, 0);
+
+ bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK);
+ bpck6_send_cmd(pi, ATA_REG_DATA | ACCESS_PORT | ACCESS_READ);
+
+ switch (mode_map[pi->mode]) {
+ case PPCMODE_UNI_SW:
+ case PPCMODE_UNI_FW:
+ while (len) {
+ u8 d;
+
+ parport_frob_control(pi->pardev->port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_INIT); /* DATA STROBE */
+ d = parport_read_status(pi->pardev->port);
+ d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
+ parport_frob_control(pi->pardev->port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ d |= parport_read_status(pi->pardev->port) & 0xB8;
+ *buf++ = d;
+ len--;
+ }
+ break;
+ case PPCMODE_BI_SW:
+ case PPCMODE_BI_FW:
+ parport_data_reverse(pi->pardev->port);
+ while (len) {
+ parport_frob_control(pi->pardev->port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT);
+ *buf++ = parport_read_data(pi->pardev->port);
+ len--;
+ }
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
+ 0);
+ parport_data_forward(pi->pardev->port);
+ break;
+ case PPCMODE_EPP_BYTE:
+ pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
+ PARPORT_EPP_FAST_8);
+ break;
+ case PPCMODE_EPP_WORD:
+ pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
+ PARPORT_EPP_FAST_16);
+ break;
+ case PPCMODE_EPP_DWORD:
+ pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
+ PARPORT_EPP_FAST_32);
+ break;
+ }
+
+ bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK);
+}
+
+static int bpck6_open(struct pi_adapter *pi)
+{
+ u8 i, j, k;
+
+ pi->saved_r0 = parport_read_data(pi->pardev->port);
+ pi->saved_r2 = parport_read_control(pi->pardev->port) & 0x5F;
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+ if (pi->saved_r0 == 'b')
+ parport_write_data(pi->pardev->port, 'x');
+ parport_write_data(pi->pardev->port, 'b');
+ parport_write_data(pi->pardev->port, 'p');
+ parport_write_data(pi->pardev->port, pi->unit);
+ parport_write_data(pi->pardev->port, ~pi->unit);
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT, 0);
+ parport_write_control(pi->pardev->port, PARPORT_CONTROL_INIT);
+
+ i = mode_map[pi->mode] & 0x0C;
+ if (i == 0)
+ i = (mode_map[pi->mode] & 2) | 1;
+ parport_write_data(pi->pardev->port, i);
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
+ k = parport_read_status(pi->pardev->port) & 0xB8;
+ if (j != k)
+ goto fail;
+
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_AUTOFD, 0);
+ k = (parport_read_status(pi->pardev->port) & 0xB8) ^ 0xB8;
+ if (j != k)
+ goto fail;
+
+ if (i & 4) // EPP
+ parport_frob_control(pi->pardev->port,
+ PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT, 0);
+ else // PPC/ECP
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT, 0);
+
+ pi->private = 0;
+
+ bpck6_send_cmd(pi, ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE);
+ bpck6_wr_data_byte(pi, RAMSIZE_128K);
+
+ bpck6_send_cmd(pi, ACCESS_REG | ACCESS_READ | REG_VERSION);
+ if ((bpck6_rd_data_byte(pi) & 0x3F) == 0x0C)
+ pi->private |= fifo_wait;
+
+ return 1;
+
+fail:
+ parport_write_control(pi->pardev->port, pi->saved_r2);
+ parport_write_data(pi->pardev->port, pi->saved_r0);
+
+ return 0; // FAIL
+}
+
+static void bpck6_deselect(struct pi_adapter *pi)
+{
+ if (mode_map[pi->mode] & 4) // EPP
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ else // PPC/ECP
+ parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+
+ parport_write_data(pi->pardev->port, pi->saved_r0);
+ parport_write_control(pi->pardev->port,
+ pi->saved_r2 | PARPORT_CONTROL_SELECT);
+ parport_write_control(pi->pardev->port, pi->saved_r2);
+}
+
+static void bpck6_wr_extout(struct pi_adapter *pi, u8 regdata)
+{
+ bpck6_send_cmd(pi, REG_VERSION | ACCESS_REG | ACCESS_WRITE);
+ bpck6_wr_data_byte(pi, (u8)((regdata & 0x03) << 6));
+}
+
+static void bpck6_connect(struct pi_adapter *pi)
+{
+ dev_dbg(&pi->dev, "connect\n");
+
+ bpck6_open(pi);
+ bpck6_wr_extout(pi, 0x3);
+}
+
+static void bpck6_disconnect(struct pi_adapter *pi)
+{
+ dev_dbg(&pi->dev, "disconnect\n");
+ bpck6_wr_extout(pi, 0x0);
+ bpck6_deselect(pi);
+}
+
+static int bpck6_test_port(struct pi_adapter *pi) /* check for 8-bit port */
+{
+ dev_dbg(&pi->dev, "PARPORT indicates modes=%x for lp=0x%lx\n",
+ pi->pardev->port->modes, pi->pardev->port->base);
+
+ /* look at the parport device to see what modes we can use */
+ if (pi->pardev->port->modes & PARPORT_MODE_EPP)
+ return 5; /* Can do EPP */
+ if (pi->pardev->port->modes & PARPORT_MODE_TRISTATE)
+ return 2;
+ return 1; /* Just flat SPP */
+}
+
+static int bpck6_probe_unit(struct pi_adapter *pi)
+{
+ int out, saved_mode;
+
+ dev_dbg(&pi->dev, "PROBE UNIT %x on port:%x\n", pi->unit, pi->port);
+
+ saved_mode = pi->mode;
+ /*LOWER DOWN TO UNIDIRECTIONAL*/
+ pi->mode = 0;
+
+ out = bpck6_open(pi);
+
+ dev_dbg(&pi->dev, "ppc_open returned %2x\n", out);
+
+ if(out)
+ {
+ bpck6_deselect(pi);
+ dev_dbg(&pi->dev, "leaving probe\n");
+ pi->mode = saved_mode;
+ return(1);
+ }
+ else
+ {
+ dev_dbg(&pi->dev, "Failed open\n");
+ pi->mode = saved_mode;
+ return(0);
+ }
+}
+
+static void bpck6_log_adapter(struct pi_adapter *pi)
+{
+ char *mode_string[5]=
+ {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
+
+ dev_info(&pi->dev, "Micro Solutions BACKPACK Drive unit %d at 0x%x, mode:%d (%s), delay %d\n",
+ pi->unit, pi->port, pi->mode, mode_string[pi->mode], pi->delay);
+}
+
+static struct pi_protocol bpck6 = {
+ .owner = THIS_MODULE,
+ .name = "bpck6",
+ .max_mode = 5,
+ .epp_first = 2, /* 2-5 use epp (need 8 ports) */
+ .max_units = 255,
+ .write_regr = bpck6_write_regr,
+ .read_regr = bpck6_read_regr,
+ .write_block = bpck6_write_block,
+ .read_block = bpck6_read_block,
+ .connect = bpck6_connect,
+ .disconnect = bpck6_disconnect,
+ .test_port = bpck6_test_port,
+ .probe_unit = bpck6_probe_unit,
+ .log_adapter = bpck6_log_adapter,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Micro Solutions Inc.");
+MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE");
+module_pata_parport_driver(bpck6);
diff --git a/drivers/block/paride/comm.c b/drivers/ata/pata_parport/comm.c
index 9bcd35495323..4c2f9ad60ad8 100644
--- a/drivers/block/paride/comm.c
+++ b/drivers/ata/pata_parport/comm.c
@@ -8,14 +8,6 @@
use this adapter.
*/
-/* Changes:
-
- 1.01 GRG 1998.05.05 init_proto, release_proto
-
-*/
-
-#define COMM_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -23,8 +15,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
@@ -42,7 +33,7 @@
static int cont_map[2] = { 0x08, 0x10 };
-static int comm_read_regr( PIA *pi, int cont, int regr )
+static int comm_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int l, h, r;
@@ -68,7 +59,7 @@ static int comm_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void comm_write_regr( PIA *pi, int cont, int regr, int val )
+static void comm_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -87,7 +78,7 @@ static void comm_write_regr( PIA *pi, int cont, int regr, int val )
}
}
-static void comm_connect ( PIA *pi )
+static void comm_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -98,14 +89,14 @@ static void comm_connect ( PIA *pi )
w2(4); w0(0xe0); w2(0xc); w2(0xc); w2(4);
}
-static void comm_disconnect ( PIA *pi )
+static void comm_disconnect(struct pi_adapter *pi)
{ w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void comm_read_block( PIA *pi, char * buf, int count )
+static void comm_read_block(struct pi_adapter *pi, char *buf, int count)
{ int i, l, h;
@@ -146,7 +137,7 @@ static void comm_read_block( PIA *pi, char * buf, int count )
/* NB: Watch out for the byte swapped writes ! */
-static void comm_write_block( PIA *pi, char * buf, int count )
+static void comm_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -165,26 +156,26 @@ static void comm_write_block( PIA *pi, char * buf, int count )
break;
case 3: w3(0x48); (void)r1();
- for (k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
+ for (k = 0; k < count / 2; k++)
+ w4w(swab16(((u16 *)buf)[k]));
break;
case 4: w3(0x48); (void)r1();
- for (k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
+ for (k = 0; k < count / 4; k++)
+ w4l(swab16(((u16 *)buf)[2 * k]) |
+ swab16(((u16 *)buf)[2 * k + 1]) << 16);
break;
}
}
-static void comm_log_adapter( PIA *pi, char * scratch, int verbose )
+static void comm_log_adapter(struct pi_adapter *pi)
{ char *mode_string[5] = {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
- printk("%s: comm %s, DataStor Commuter at 0x%x, ",
- pi->device,COMM_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol comm = {
@@ -203,16 +194,5 @@ static struct pi_protocol comm = {
.log_adapter = comm_log_adapter,
};
-static int __init comm_init(void)
-{
- return paride_register(&comm);
-}
-
-static void __exit comm_exit(void)
-{
- paride_unregister(&comm);
-}
-
MODULE_LICENSE("GPL");
-module_init(comm_init)
-module_exit(comm_exit)
+module_pata_parport_driver(comm);
diff --git a/drivers/block/paride/dstr.c b/drivers/ata/pata_parport/dstr.c
index accc5c777cbb..2524684be206 100644
--- a/drivers/block/paride/dstr.c
+++ b/drivers/ata/pata_parport/dstr.c
@@ -7,14 +7,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
-
-*/
-
-#define DSTR_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -22,8 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
@@ -44,7 +35,7 @@
static int cont_map[2] = { 0x20, 0x40 };
-static int dstr_read_regr( PIA *pi, int cont, int regr )
+static int dstr_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -71,7 +62,7 @@ static int dstr_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void dstr_write_regr( PIA *pi, int cont, int regr, int val )
+static void dstr_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -98,21 +89,21 @@ static void dstr_write_regr( PIA *pi, int cont, int regr, int val )
w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);w0(0x78);\
w0(x);w2(5);w2(4);
-static void dstr_connect ( PIA *pi )
+static void dstr_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); CCP(0xe0); w0(0xff);
}
-static void dstr_disconnect ( PIA *pi )
+static void dstr_disconnect(struct pi_adapter *pi)
{ CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void dstr_read_block( PIA *pi, char * buf, int count )
+static void dstr_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b;
@@ -154,7 +145,7 @@ static void dstr_read_block( PIA *pi, char * buf, int count )
}
}
-static void dstr_write_block( PIA *pi, char * buf, int count )
+static void dstr_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -190,16 +181,13 @@ static void dstr_write_block( PIA *pi, char * buf, int count )
}
-static void dstr_log_adapter( PIA *pi, char * scratch, int verbose )
+static void dstr_log_adapter(struct pi_adapter *pi)
{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
"EPP-16","EPP-32"};
- printk("%s: dstr %s, DataStor EP2000 at 0x%x, ",
- pi->device,DSTR_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "DataStor EP2000 at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol dstr = {
@@ -218,16 +206,5 @@ static struct pi_protocol dstr = {
.log_adapter = dstr_log_adapter,
};
-static int __init dstr_init(void)
-{
- return paride_register(&dstr);
-}
-
-static void __exit dstr_exit(void)
-{
- paride_unregister(&dstr);
-}
-
MODULE_LICENSE("GPL");
-module_init(dstr_init)
-module_exit(dstr_exit)
+module_pata_parport_driver(dstr);
diff --git a/drivers/block/paride/epat.c b/drivers/ata/pata_parport/epat.c
index 1bcdff77322e..b146999368ae 100644
--- a/drivers/block/paride/epat.c
+++ b/drivers/ata/pata_parport/epat.c
@@ -9,15 +9,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
- 1.02 Joshua b. Jore CPP(renamed), epat_connect, epat_disconnect
-
-*/
-
-#define EPAT_VERSION "1.02"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -25,8 +16,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0))
#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0))
@@ -44,7 +34,7 @@ MODULE_PARM_DESC(epatc8, "support for the Shuttle EP1284 chip, "
static int cont_map[3] = { 0x18, 0x10, 0 };
-static void epat_write_regr( PIA *pi, int cont, int regr, int val)
+static void epat_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -65,7 +55,7 @@ static void epat_write_regr( PIA *pi, int cont, int regr, int val)
}
}
-static int epat_read_regr( PIA *pi, int cont, int regr )
+static int epat_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -94,7 +84,7 @@ static int epat_read_regr( PIA *pi, int cont, int regr )
return -1; /* never gets here */
}
-static void epat_read_block( PIA *pi, char * buf, int count )
+static void epat_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, ph, a, b;
@@ -159,7 +149,7 @@ static void epat_read_block( PIA *pi, char * buf, int count )
}
}
-static void epat_write_block( PIA *pi, char * buf, int count )
+static void epat_write_block(struct pi_adapter *pi, char *buf, int count)
{ int ph, k;
@@ -210,7 +200,7 @@ static void epat_write_block( PIA *pi, char * buf, int count )
#define CPP(x) w2(4);w0(0x22);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
-static void epat_connect ( PIA *pi )
+static void epat_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -242,16 +232,17 @@ static void epat_connect ( PIA *pi )
}
}
-static void epat_disconnect (PIA *pi)
+static void epat_disconnect(struct pi_adapter *pi)
{ CPP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static int epat_test_proto( PIA *pi, char * scratch, int verbose )
+static int epat_test_proto(struct pi_adapter *pi)
{ int k, j, f, cc;
int e[2] = {0,0};
+ char scratch[512];
epat_connect(pi);
cc = RR(0xd);
@@ -279,15 +270,13 @@ static int epat_test_proto( PIA *pi, char * scratch, int verbose )
}
epat_disconnect(pi);
- if (verbose) {
- printk("%s: epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n",
- pi->device,pi->port,pi->mode,cc,e[0],e[1],f);
- }
+ dev_dbg(&pi->dev, "epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n",
+ pi->port, pi->mode, cc, e[0], e[1], f);
return (e[0] && e[1]) || f;
}
-static void epat_log_adapter( PIA *pi, char * scratch, int verbose )
+static void epat_log_adapter(struct pi_adapter *pi)
{ int ver;
char *mode_string[6] =
@@ -298,11 +287,8 @@ static void epat_log_adapter( PIA *pi, char * scratch, int verbose )
ver = RR(0xb);
epat_disconnect(pi);
- printk("%s: epat %s, Shuttle EPAT chip %x at 0x%x, ",
- pi->device,EPAT_VERSION,ver,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "Shuttle EPAT chip %x at 0x%x, mode %d (%s), delay %d\n",
+ ver, pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol epat = {
@@ -324,15 +310,15 @@ static struct pi_protocol epat = {
static int __init epat_init(void)
{
-#ifdef CONFIG_PARIDE_EPATC8
+#ifdef CONFIG_PATA_PARPORT_EPATC8
epatc8 = 1;
#endif
- return paride_register(&epat);
+ return pata_parport_register_driver(&epat);
}
static void __exit epat_exit(void)
{
- paride_unregister(&epat);
+ pata_parport_unregister_driver(&epat);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/epia.c b/drivers/ata/pata_parport/epia.c
index fb0e782d055e..f6db2f79fe99 100644
--- a/drivers/block/paride/epia.c
+++ b/drivers/ata/pata_parport/epia.c
@@ -10,15 +10,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
- 1.02 GRG 1998.06.17 support older versions of EPIA
-
-*/
-
-#define EPIA_VERSION "1.02"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -26,8 +17,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
/* mode codes: 0 nybble reads on port 1, 8-bit writes
1 5/3 reads on ports 1 & 2, 8-bit writes
@@ -46,7 +36,7 @@
static int cont_map[2] = { 0, 0x80 };
-static int epia_read_regr( PIA *pi, int cont, int regr )
+static int epia_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -79,7 +69,7 @@ static int epia_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void epia_write_regr( PIA *pi, int cont, int regr, int val)
+static void epia_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -110,7 +100,7 @@ static void epia_write_regr( PIA *pi, int cont, int regr, int val)
2048 byte reads (the last two being used in the CDrom drivers.
*/
-static void epia_connect ( PIA *pi )
+static void epia_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -124,7 +114,7 @@ static void epia_connect ( PIA *pi )
WR(0x86,8);
}
-static void epia_disconnect ( PIA *pi )
+static void epia_disconnect(struct pi_adapter *pi)
{ /* WR(0x84,0x10); */
w0(pi->saved_r0);
@@ -133,7 +123,7 @@ static void epia_disconnect ( PIA *pi )
w2(pi->saved_r2);
}
-static void epia_read_block( PIA *pi, char * buf, int count )
+static void epia_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, ph, a, b;
@@ -193,7 +183,7 @@ static void epia_read_block( PIA *pi, char * buf, int count )
}
}
-static void epia_write_block( PIA *pi, char * buf, int count )
+static void epia_write_block(struct pi_adapter *pi, char *buf, int count)
{ int ph, k, last, d;
@@ -234,10 +224,11 @@ static void epia_write_block( PIA *pi, char * buf, int count )
}
-static int epia_test_proto( PIA *pi, char * scratch, int verbose )
+static int epia_test_proto(struct pi_adapter *pi)
{ int j, k, f;
int e[2] = {0,0};
+ char scratch[512];
epia_connect(pi);
for (j=0;j<2;j++) {
@@ -262,26 +253,21 @@ static int epia_test_proto( PIA *pi, char * scratch, int verbose )
WR(0x84,0);
epia_disconnect(pi);
- if (verbose) {
- printk("%s: epia: port 0x%x, mode %d, test=(%d,%d,%d)\n",
- pi->device,pi->port,pi->mode,e[0],e[1],f);
- }
+ dev_dbg(&pi->dev, "epia: port 0x%x, mode %d, test=(%d,%d,%d)\n",
+ pi->port, pi->mode, e[0], e[1], f);
return (e[0] && e[1]) || f;
}
-static void epia_log_adapter( PIA *pi, char * scratch, int verbose )
+static void epia_log_adapter(struct pi_adapter *pi)
{ char *mode_string[6] = {"4-bit","5/3","8-bit",
"EPP-8","EPP-16","EPP-32"};
- printk("%s: epia %s, Shuttle EPIA at 0x%x, ",
- pi->device,EPIA_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "Shuttle EPIA at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol epia = {
@@ -301,16 +287,5 @@ static struct pi_protocol epia = {
.log_adapter = epia_log_adapter,
};
-static int __init epia_init(void)
-{
- return paride_register(&epia);
-}
-
-static void __exit epia_exit(void)
-{
- paride_unregister(&epia);
-}
-
MODULE_LICENSE("GPL");
-module_init(epia_init)
-module_exit(epia_exit)
+module_pata_parport_driver(epia);
diff --git a/drivers/block/paride/fit2.c b/drivers/ata/pata_parport/fit2.c
index 381283753ae4..fd3b2ce426a5 100644
--- a/drivers/block/paride/fit2.c
+++ b/drivers/ata/pata_parport/fit2.c
@@ -13,8 +13,6 @@
*/
-#define FIT2_VERSION "1.0"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -22,8 +20,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
@@ -37,13 +34,13 @@ devices.
*/
-static void fit2_write_regr( PIA *pi, int cont, int regr, int val)
+static void fit2_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ if (cont == 1) return;
w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4);
}
-static int fit2_read_regr( PIA *pi, int cont, int regr )
+static int fit2_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -61,7 +58,7 @@ static int fit2_read_regr( PIA *pi, int cont, int regr )
}
-static void fit2_read_block( PIA *pi, char * buf, int count )
+static void fit2_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b, c, d;
@@ -87,7 +84,7 @@ static void fit2_read_block( PIA *pi, char * buf, int count )
}
-static void fit2_write_block( PIA *pi, char * buf, int count )
+static void fit2_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -100,23 +97,24 @@ static void fit2_write_block( PIA *pi, char * buf, int count )
w2(4);
}
-static void fit2_connect ( PIA *pi )
+static void fit2_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xcc);
}
-static void fit2_disconnect ( PIA *pi )
+static void fit2_disconnect(struct pi_adapter *pi)
{ w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void fit2_log_adapter( PIA *pi, char * scratch, int verbose )
+static void fit2_log_adapter(struct pi_adapter *pi)
-{ printk("%s: fit2 %s, FIT 2000 adapter at 0x%x, delay %d\n",
- pi->device,FIT2_VERSION,pi->port,pi->delay);
+{
+ dev_info(&pi->dev, "FIT 2000 adapter at 0x%x, delay %d\n",
+ pi->port, pi->delay);
}
@@ -136,16 +134,5 @@ static struct pi_protocol fit2 = {
.log_adapter = fit2_log_adapter,
};
-static int __init fit2_init(void)
-{
- return paride_register(&fit2);
-}
-
-static void __exit fit2_exit(void)
-{
- paride_unregister(&fit2);
-}
-
MODULE_LICENSE("GPL");
-module_init(fit2_init)
-module_exit(fit2_exit)
+module_pata_parport_driver(fit2);
diff --git a/drivers/block/paride/fit3.c b/drivers/ata/pata_parport/fit3.c
index 275d269458eb..75df656ac472 100644
--- a/drivers/block/paride/fit3.c
+++ b/drivers/ata/pata_parport/fit3.c
@@ -17,8 +17,6 @@
*/
-#define FIT3_VERSION "1.0"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -26,8 +24,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0))
@@ -39,7 +36,7 @@
*/
-static void fit3_write_regr( PIA *pi, int cont, int regr, int val)
+static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ if (cont == 1) return;
@@ -59,7 +56,7 @@ static void fit3_write_regr( PIA *pi, int cont, int regr, int val)
}
}
-static int fit3_read_regr( PIA *pi, int cont, int regr )
+static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b;
@@ -92,7 +89,7 @@ static int fit3_read_regr( PIA *pi, int cont, int regr )
}
-static void fit3_read_block( PIA *pi, char * buf, int count )
+static void fit3_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b, c, d;
@@ -131,7 +128,7 @@ static void fit3_read_block( PIA *pi, char * buf, int count )
}
}
-static void fit3_write_block( PIA *pi, char * buf, int count )
+static void fit3_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -152,7 +149,7 @@ static void fit3_write_block( PIA *pi, char * buf, int count )
}
}
-static void fit3_connect ( PIA *pi )
+static void fit3_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -162,22 +159,19 @@ static void fit3_connect ( PIA *pi )
}
}
-static void fit3_disconnect ( PIA *pi )
+static void fit3_disconnect(struct pi_adapter *pi)
{ w2(0xc); w0(0xa); w2(0x8); w2(0xc);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void fit3_log_adapter( PIA *pi, char * scratch, int verbose )
+static void fit3_log_adapter(struct pi_adapter *pi)
{ char *mode_string[3] = {"4-bit","8-bit","EPP"};
- printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, "
- "mode %d (%s), delay %d\n",
- pi->device,FIT3_VERSION,pi->port,
- pi->mode,mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "FIT 3000 adapter at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol fit3 = {
@@ -196,16 +190,5 @@ static struct pi_protocol fit3 = {
.log_adapter = fit3_log_adapter,
};
-static int __init fit3_init(void)
-{
- return paride_register(&fit3);
-}
-
-static void __exit fit3_exit(void)
-{
- paride_unregister(&fit3);
-}
-
MODULE_LICENSE("GPL");
-module_init(fit3_init)
-module_exit(fit3_exit)
+module_pata_parport_driver(fit3);
diff --git a/drivers/block/paride/friq.c b/drivers/ata/pata_parport/friq.c
index 4f2ba244689b..1647264cd9a8 100644
--- a/drivers/block/paride/friq.c
+++ b/drivers/ata/pata_parport/friq.c
@@ -20,13 +20,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.12.20 Added support for soft power switch
-*/
-
-#define FRIQ_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -34,8 +27,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\
w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x);
@@ -48,7 +40,7 @@
static int cont_map[2] = { 0x08, 0x10 };
-static int friq_read_regr( PIA *pi, int cont, int regr )
+static int friq_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int h,l,r;
@@ -63,7 +55,7 @@ static int friq_read_regr( PIA *pi, int cont, int regr )
}
-static void friq_write_regr( PIA *pi, int cont, int regr, int val)
+static void friq_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -74,7 +66,7 @@ static void friq_write_regr( PIA *pi, int cont, int regr, int val)
w2(5);w2(7);w2(5);w2(4);
}
-static void friq_read_block_int( PIA *pi, char * buf, int count, int regr )
+static void friq_read_block_int(struct pi_adapter *pi, char *buf, int count, int regr)
{ int h, l, k, ph;
@@ -129,12 +121,12 @@ static void friq_read_block_int( PIA *pi, char * buf, int count, int regr )
}
}
-static void friq_read_block( PIA *pi, char * buf, int count)
+static void friq_read_block(struct pi_adapter *pi, char *buf, int count)
{ friq_read_block_int(pi,buf,count,0x08);
}
-static void friq_write_block( PIA *pi, char * buf, int count )
+static void friq_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -166,24 +158,25 @@ static void friq_write_block( PIA *pi, char * buf, int count )
}
}
-static void friq_connect ( PIA *pi )
+static void friq_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
-static void friq_disconnect ( PIA *pi )
+static void friq_disconnect(struct pi_adapter *pi)
{ CMD(0x20);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static int friq_test_proto( PIA *pi, char * scratch, int verbose )
+static int friq_test_proto(struct pi_adapter *pi)
{ int j, k, r;
int e[2] = {0,0};
+ char scratch[512];
pi->saved_r0 = r0();
w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */
@@ -207,24 +200,20 @@ static int friq_test_proto( PIA *pi, char * scratch, int verbose )
for (k=0;k<128;k++) if (scratch[k] != k) r++;
friq_disconnect(pi);
- if (verbose) {
- printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
- pi->device,pi->port,pi->mode,e[0],e[1],r);
- }
+ dev_dbg(&pi->dev, "friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
+ pi->port, pi->mode, e[0], e[1], r);
return (r || (e[0] && e[1]));
}
-static void friq_log_adapter( PIA *pi, char * scratch, int verbose )
+static void friq_log_adapter(struct pi_adapter *pi)
{ char *mode_string[6] = {"4-bit","8-bit",
"EPP-8","EPP-16","EPP-32"};
- printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device,
- FRIQ_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
+ dev_info(&pi->dev, "Freecom IQ ASIC-2 adapter at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
pi->private = 1;
friq_connect(pi);
@@ -233,7 +222,7 @@ static void friq_log_adapter( PIA *pi, char * scratch, int verbose )
}
-static void friq_release_proto( PIA *pi)
+static void friq_release_proto(struct pi_adapter *pi)
{
if (pi->private) { /* turn off the power */
friq_connect(pi);
@@ -261,16 +250,5 @@ static struct pi_protocol friq = {
.release_proto = friq_release_proto,
};
-static int __init friq_init(void)
-{
- return paride_register(&friq);
-}
-
-static void __exit friq_exit(void)
-{
- paride_unregister(&friq);
-}
-
MODULE_LICENSE("GPL");
-module_init(friq_init)
-module_exit(friq_exit)
+module_pata_parport_driver(friq);
diff --git a/drivers/block/paride/frpw.c b/drivers/ata/pata_parport/frpw.c
index c3cde364603a..3ec0abf16fa6 100644
--- a/drivers/block/paride/frpw.c
+++ b/drivers/ata/pata_parport/frpw.c
@@ -13,18 +13,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
- fix chip detect
- added EPP-16 and EPP-32
- 1.02 GRG 1998.09.23 added hard reset to initialisation process
- 1.03 GRG 1998.12.14 made hard reset conditional
-
-*/
-
-#define FRPW_VERSION "1.03"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -32,8 +20,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4);
#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
@@ -44,7 +31,7 @@
static int cont_map[2] = { 0x08, 0x10 };
-static int frpw_read_regr( PIA *pi, int cont, int regr )
+static int frpw_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int h,l,r;
@@ -60,7 +47,7 @@ static int frpw_read_regr( PIA *pi, int cont, int regr )
}
-static void frpw_write_regr( PIA *pi, int cont, int regr, int val)
+static void frpw_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -71,7 +58,7 @@ static void frpw_write_regr( PIA *pi, int cont, int regr, int val)
w2(5);w2(7);w2(5);w2(4);
}
-static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr )
+static void frpw_read_block_int(struct pi_adapter *pi, char *buf, int count, int regr)
{ int h, l, k, ph;
@@ -132,12 +119,12 @@ static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr )
}
}
-static void frpw_read_block( PIA *pi, char * buf, int count)
+static void frpw_read_block(struct pi_adapter *pi, char *buf, int count)
{ frpw_read_block_int(pi,buf,count,0x08);
}
-static void frpw_write_block( PIA *pi, char * buf, int count )
+static void frpw_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -170,14 +157,14 @@ static void frpw_write_block( PIA *pi, char * buf, int count )
}
}
-static void frpw_connect ( PIA *pi )
+static void frpw_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
-static void frpw_disconnect ( PIA *pi )
+static void frpw_disconnect(struct pi_adapter *pi)
{ w2(4); w0(0x20); cec4;
w0(pi->saved_r0);
@@ -188,7 +175,7 @@ static void frpw_disconnect ( PIA *pi )
between the Xilinx and ASIC implementations of the Freecom adapter.
*/
-static int frpw_test_pnp ( PIA *pi )
+static int frpw_test_pnp(struct pi_adapter *pi)
/* returns chip_type: 0 = Xilinx, 1 = ASIC */
@@ -221,25 +208,22 @@ static int frpw_test_pnp ( PIA *pi )
a hack :-(
*/
-static int frpw_test_proto( PIA *pi, char * scratch, int verbose )
+static int frpw_test_proto(struct pi_adapter *pi)
{ int j, k, r;
int e[2] = {0,0};
+ char scratch[512];
if ((pi->private>>1) != pi->port)
pi->private = frpw_test_pnp(pi) + 2*pi->port;
if (((pi->private%2) == 0) && (pi->mode > 2)) {
- if (verbose)
- printk("%s: frpw: Xilinx does not support mode %d\n",
- pi->device, pi->mode);
+ dev_dbg(&pi->dev, "frpw: Xilinx does not support mode %d\n", pi->mode);
return 1;
}
if (((pi->private%2) == 1) && (pi->mode == 2)) {
- if (verbose)
- printk("%s: frpw: ASIC does not support mode 2\n",
- pi->device);
+ dev_dbg(&pi->dev, "frpw: ASIC does not support mode 2\n");
return 1;
}
@@ -260,25 +244,21 @@ static int frpw_test_proto( PIA *pi, char * scratch, int verbose )
for (k=0;k<128;k++) if (scratch[k] != k) r++;
frpw_disconnect(pi);
- if (verbose) {
- printk("%s: frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n",
- pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r);
- }
+ dev_dbg(&pi->dev, "frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n",
+ pi->port, (pi->private%2), pi->mode, e[0], e[1], r);
return (r || (e[0] && e[1]));
}
-static void frpw_log_adapter( PIA *pi, char * scratch, int verbose )
+static void frpw_log_adapter(struct pi_adapter *pi)
{ char *mode_string[6] = {"4-bit","8-bit","EPP",
"EPP-8","EPP-16","EPP-32"};
- printk("%s: frpw %s, Freecom (%s) adapter at 0x%x, ", pi->device,
- FRPW_VERSION,((pi->private%2) == 0)?"Xilinx":"ASIC",pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "Freecom (%s) adapter at 0x%x, mode %d (%s), delay %d\n",
+ ((pi->private % 2) == 0) ? "Xilinx" : "ASIC",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol frpw = {
@@ -298,16 +278,5 @@ static struct pi_protocol frpw = {
.log_adapter = frpw_log_adapter,
};
-static int __init frpw_init(void)
-{
- return paride_register(&frpw);
-}
-
-static void __exit frpw_exit(void)
-{
- paride_unregister(&frpw);
-}
-
MODULE_LICENSE("GPL");
-module_init(frpw_init)
-module_exit(frpw_exit)
+module_pata_parport_driver(frpw);
diff --git a/drivers/block/paride/kbic.c b/drivers/ata/pata_parport/kbic.c
index 35999c415ee3..8213e62f8f00 100644
--- a/drivers/block/paride/kbic.c
+++ b/drivers/ata/pata_parport/kbic.c
@@ -12,14 +12,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
-
-*/
-
-#define KBIC_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -27,8 +19,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define r12w() (delay_p,inw(pi->port+1)&0xffff)
@@ -42,7 +33,7 @@
static int cont_map[2] = { 0x80, 0x40 };
-static int kbic_read_regr( PIA *pi, int cont, int regr )
+static int kbic_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, s;
@@ -72,7 +63,7 @@ static int kbic_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void kbic_write_regr( PIA *pi, int cont, int regr, int val)
+static void kbic_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int s;
@@ -96,14 +87,14 @@ static void kbic_write_regr( PIA *pi, int cont, int regr, int val)
}
}
-static void k951_connect ( PIA *pi )
+static void k951_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
-static void k951_disconnect ( PIA *pi )
+static void k951_disconnect(struct pi_adapter *pi)
{ w0(pi->saved_r0);
w2(pi->saved_r2);
@@ -112,7 +103,7 @@ static void k951_disconnect ( PIA *pi )
#define CCP(x) w2(0xc4);w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);\
w0(0x78);w0(x);w2(0xc5);w2(0xc4);w0(0xff);
-static void k971_connect ( PIA *pi )
+static void k971_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -120,7 +111,7 @@ static void k971_connect ( PIA *pi )
w2(4);
}
-static void k971_disconnect ( PIA *pi )
+static void k971_disconnect(struct pi_adapter *pi)
{ CCP(0x30);
w0(pi->saved_r0);
@@ -131,7 +122,7 @@ static void k971_disconnect ( PIA *pi )
have this property.
*/
-static void kbic_read_block( PIA *pi, char * buf, int count )
+static void kbic_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b;
@@ -189,7 +180,7 @@ static void kbic_read_block( PIA *pi, char * buf, int count )
}
}
-static void kbic_write_block( PIA *pi, char * buf, int count )
+static void kbic_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -213,12 +204,15 @@ static void kbic_write_block( PIA *pi, char * buf, int count )
break;
case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
- for(k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
+ for (k = 0; k < count / 2; k++)
+ w4w(swab16(((u16 *)buf)[k]));
w2(4); w2(0); w2(4);
break;
case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
- for(k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
+ for (k = 0; k < count / 4; k++)
+ w4l(swab16(((u16 *)buf)[2 * k]) |
+ swab16(((u16 *)buf)[2 * k + 1]) << 16);
w2(4); w2(0); w2(4);
break;
@@ -226,27 +220,23 @@ static void kbic_write_block( PIA *pi, char * buf, int count )
}
-static void kbic_log_adapter( PIA *pi, char * scratch,
- int verbose, char * chip )
+static void kbic_log_adapter(struct pi_adapter *pi, char *chip)
{ char *mode_string[6] = {"4-bit","5/3","8-bit",
"EPP-8","EPP_16","EPP-32"};
- printk("%s: kbic %s, KingByte %s at 0x%x, ",
- pi->device,KBIC_VERSION,chip,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "KingByte %s at 0x%x, mode %d (%s), delay %d\n",
+ chip, pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
-static void k951_log_adapter( PIA *pi, char * scratch, int verbose )
-
-{ kbic_log_adapter(pi,scratch,verbose,"KBIC-951A");
+static void k951_log_adapter(struct pi_adapter *pi)
+{
+ kbic_log_adapter(pi, "KBIC-951A");
}
-static void k971_log_adapter( PIA *pi, char * scratch, int verbose )
-
-{ kbic_log_adapter(pi,scratch,verbose,"KBIC-971A");
+static void k971_log_adapter(struct pi_adapter *pi)
+{
+ kbic_log_adapter(pi, "KBIC-971A");
}
static struct pi_protocol k951 = {
@@ -285,19 +275,19 @@ static int __init kbic_init(void)
{
int rv;
- rv = paride_register(&k951);
+ rv = pata_parport_register_driver(&k951);
if (rv < 0)
return rv;
- rv = paride_register(&k971);
+ rv = pata_parport_register_driver(&k971);
if (rv < 0)
- paride_unregister(&k951);
+ pata_parport_unregister_driver(&k951);
return rv;
}
static void __exit kbic_exit(void)
{
- paride_unregister(&k951);
- paride_unregister(&k971);
+ pata_parport_unregister_driver(&k951);
+ pata_parport_unregister_driver(&k971);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/ktti.c b/drivers/ata/pata_parport/ktti.c
index 117ab0e8ccf0..4890b1f12348 100644
--- a/drivers/block/paride/ktti.c
+++ b/drivers/ata/pata_parport/ktti.c
@@ -9,8 +9,6 @@
*/
-#define KTTI_VERSION "1.0"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -18,8 +16,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
@@ -29,7 +26,7 @@
static int cont_map[2] = { 0x10, 0x08 };
-static void ktti_write_regr( PIA *pi, int cont, int regr, int val)
+static void ktti_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -39,7 +36,7 @@ static void ktti_write_regr( PIA *pi, int cont, int regr, int val)
w0(val); w2(3); w0(0); w2(6); w2(0xb);
}
-static int ktti_read_regr( PIA *pi, int cont, int regr )
+static int ktti_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -51,7 +48,7 @@ static int ktti_read_regr( PIA *pi, int cont, int regr )
}
-static void ktti_read_block( PIA *pi, char * buf, int count )
+static void ktti_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b;
@@ -64,7 +61,7 @@ static void ktti_read_block( PIA *pi, char * buf, int count )
}
}
-static void ktti_write_block( PIA *pi, char * buf, int count )
+static void ktti_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -76,25 +73,25 @@ static void ktti_write_block( PIA *pi, char * buf, int count )
}
}
-static void ktti_connect ( PIA *pi )
+static void ktti_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xb); w2(0xa); w0(0); w2(3); w2(6);
}
-static void ktti_disconnect ( PIA *pi )
+static void ktti_disconnect(struct pi_adapter *pi)
{ w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void ktti_log_adapter( PIA *pi, char * scratch, int verbose )
-
-{ printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n",
- pi->device,KTTI_VERSION,pi->port,pi->delay);
+static void ktti_log_adapter(struct pi_adapter *pi)
+{
+ dev_info(&pi->dev, "KT adapter at 0x%x, delay %d\n",
+ pi->port, pi->delay);
}
static struct pi_protocol ktti = {
@@ -113,16 +110,5 @@ static struct pi_protocol ktti = {
.log_adapter = ktti_log_adapter,
};
-static int __init ktti_init(void)
-{
- return paride_register(&ktti);
-}
-
-static void __exit ktti_exit(void)
-{
- paride_unregister(&ktti);
-}
-
MODULE_LICENSE("GPL");
-module_init(ktti_init)
-module_exit(ktti_exit)
+module_pata_parport_driver(ktti);
diff --git a/drivers/block/paride/on20.c b/drivers/ata/pata_parport/on20.c
index 0173697a1a4d..276ace12d490 100644
--- a/drivers/block/paride/on20.c
+++ b/drivers/ata/pata_parport/on20.c
@@ -6,14 +6,6 @@
Onspec 90c20 parallel to IDE adapter.
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
-
-*/
-
-#define ON20_VERSION "1.01"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -21,8 +13,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
#define op(f) w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
#define vl(v) w2(4);w0(v);w2(5);w2(7);w2(5);w2(4);
@@ -33,7 +24,7 @@
cont = 1 - access the IDE command set
*/
-static int on20_read_regr( PIA *pi, int cont, int regr )
+static int on20_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int h,l, r ;
@@ -56,7 +47,7 @@ static int on20_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void on20_write_regr( PIA *pi, int cont, int regr, int val )
+static void on20_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -67,7 +58,7 @@ static void on20_write_regr( PIA *pi, int cont, int regr, int val )
op(0); vl(val);
}
-static void on20_connect ( PIA *pi)
+static void on20_connect(struct pi_adapter *pi)
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
@@ -77,14 +68,14 @@ static void on20_connect ( PIA *pi)
else { op(2); vl(0); op(2); vl(8); }
}
-static void on20_disconnect ( PIA *pi )
+static void on20_disconnect(struct pi_adapter *pi)
{ w2(4);w0(7);w2(4);w2(0xc);w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
-static void on20_read_block( PIA *pi, char * buf, int count )
+static void on20_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, l, h;
@@ -101,7 +92,7 @@ static void on20_read_block( PIA *pi, char * buf, int count )
w2(4);
}
-static void on20_write_block( PIA *pi, char * buf, int count )
+static void on20_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -111,15 +102,12 @@ static void on20_write_block( PIA *pi, char * buf, int count )
w2(4);
}
-static void on20_log_adapter( PIA *pi, char * scratch, int verbose )
+static void on20_log_adapter(struct pi_adapter *pi)
{ char *mode_string[2] = {"4-bit","8-bit"};
- printk("%s: on20 %s, OnSpec 90c20 at 0x%x, ",
- pi->device,ON20_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "OnSpec 90c20 at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol on20 = {
@@ -138,16 +126,5 @@ static struct pi_protocol on20 = {
.log_adapter = on20_log_adapter,
};
-static int __init on20_init(void)
-{
- return paride_register(&on20);
-}
-
-static void __exit on20_exit(void)
-{
- paride_unregister(&on20);
-}
-
MODULE_LICENSE("GPL");
-module_init(on20_init)
-module_exit(on20_exit)
+module_pata_parport_driver(on20);
diff --git a/drivers/block/paride/on26.c b/drivers/ata/pata_parport/on26.c
index 95ba256921f2..dc47a54b121f 100644
--- a/drivers/block/paride/on26.c
+++ b/drivers/ata/pata_parport/on26.c
@@ -7,17 +7,6 @@
*/
-/* Changes:
-
- 1.01 GRG 1998.05.06 init_proto, release_proto
- 1.02 GRG 1998.09.23 updates for the -E rev chip
- 1.03 GRG 1998.12.14 fix for slave drives
- 1.04 GRG 1998.12.20 yet another bug fix
-
-*/
-
-#define ON26_VERSION "1.04"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -25,8 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
-
-#include "paride.h"
+#include "pata_parport.h"
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
@@ -44,7 +32,7 @@
cont = 1 - access the IDE command set
*/
-static int on26_read_regr( PIA *pi, int cont, int regr )
+static int on26_read_regr(struct pi_adapter *pi, int cont, int regr)
{ int a, b, r;
@@ -73,7 +61,7 @@ static int on26_read_regr( PIA *pi, int cont, int regr )
return -1;
}
-static void on26_write_regr( PIA *pi, int cont, int regr, int val )
+static void on26_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{ int r;
@@ -99,7 +87,7 @@ static void on26_write_regr( PIA *pi, int cont, int regr, int val )
#define CCP(x) w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
-static void on26_connect ( PIA *pi )
+static void on26_connect(struct pi_adapter *pi)
{ int x;
@@ -113,7 +101,7 @@ static void on26_connect ( PIA *pi )
w0(2); P1; w0(x); P2;
}
-static void on26_disconnect ( PIA *pi )
+static void on26_disconnect(struct pi_adapter *pi)
{ if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); }
else { w0(4); P1; w0(4); P1; }
@@ -124,7 +112,7 @@ static void on26_disconnect ( PIA *pi )
#define RESET_WAIT 200
-static int on26_test_port( PIA *pi) /* hard reset */
+static int on26_test_port(struct pi_adapter *pi) /* hard reset */
{ int i, m, d, x=0, y=0;
@@ -167,7 +155,7 @@ static int on26_test_port( PIA *pi) /* hard reset */
}
if (i == RESET_WAIT)
- printk("on26: Device reset failed (%x,%x)\n",x,y);
+ dev_err(&pi->dev, "on26: Device reset failed (%x,%x)\n", x, y);
w0(4); P1; w0(4); P1;
}
@@ -183,7 +171,7 @@ static int on26_test_port( PIA *pi) /* hard reset */
}
-static void on26_read_block( PIA *pi, char * buf, int count )
+static void on26_read_block(struct pi_adapter *pi, char *buf, int count)
{ int k, a, b;
@@ -232,7 +220,7 @@ static void on26_read_block( PIA *pi, char * buf, int count )
}
}
-static void on26_write_block( PIA *pi, char * buf, int count )
+static void on26_write_block(struct pi_adapter *pi, char *buf, int count)
{ int k;
@@ -275,16 +263,13 @@ static void on26_write_block( PIA *pi, char * buf, int count )
}
-static void on26_log_adapter( PIA *pi, char * scratch, int verbose )
+static void on26_log_adapter(struct pi_adapter *pi)
{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
"EPP-16","EPP-32"};
- printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ",
- pi->device,ON26_VERSION,pi->port);
- printk("mode %d (%s), delay %d\n",pi->mode,
- mode_string[pi->mode],pi->delay);
-
+ dev_info(&pi->dev, "OnSpec 90c26 at 0x%x, mode %d (%s), delay %d\n",
+ pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol on26 = {
@@ -304,16 +289,5 @@ static struct pi_protocol on26 = {
.log_adapter = on26_log_adapter,
};
-static int __init on26_init(void)
-{
- return paride_register(&on26);
-}
-
-static void __exit on26_exit(void)
-{
- paride_unregister(&on26);
-}
-
MODULE_LICENSE("GPL");
-module_init(on26_init)
-module_exit(on26_exit)
+module_pata_parport_driver(on26);
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
new file mode 100644
index 000000000000..1af64d435d3c
--- /dev/null
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Ondrej Zary
+ * based on paride.c by Grant R. Guenther <grant@torque.net>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/parport.h>
+#include "pata_parport.h"
+
+#define DRV_NAME "pata_parport"
+
+static DEFINE_IDR(parport_list);
+static DEFINE_IDR(protocols);
+static DEFINE_IDA(pata_parport_bus_dev_ids);
+static DEFINE_MUTEX(pi_mutex);
+
+static bool probe = true;
+module_param(probe, bool, 0644);
+MODULE_PARM_DESC(probe, "Enable automatic device probing (0=off, 1=on [default])");
+
+/*
+ * libata drivers cannot sleep so this driver claims parport before activating
+ * the ata host and keeps it claimed (and protocol connected) until the ata
+ * host is removed. Unfortunately, this means that you cannot use any chained
+ * devices (neither other pata_parport devices nor a printer).
+ */
+static void pi_connect(struct pi_adapter *pi)
+{
+ parport_claim_or_block(pi->pardev);
+ pi->proto->connect(pi);
+}
+
+static void pi_disconnect(struct pi_adapter *pi)
+{
+ pi->proto->disconnect(pi);
+ parport_release(pi->pardev);
+}
+
+static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tmp);
+ ata_sff_pause(ap);
+}
+
+static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+ u8 nsect, lbal;
+
+ pata_parport_dev_select(ap, device);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
+
+ nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+
+ return (nsect == 0x55) && (lbal == 0xaa);
+}
+
+static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ /* software reset. causes dev0 to be selected */
+ pi->proto->write_regr(pi, 1, 6, ap->ctl);
+ udelay(20);
+ pi->proto->write_regr(pi, 1, 6, ap->ctl | ATA_SRST);
+ udelay(20);
+ pi->proto->write_regr(pi, 1, 6, ap->ctl);
+ ap->last_ctl = ap->ctl;
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0/1 are present */
+ if (pata_parport_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (pata_parport_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ pata_parport_dev_select(ap, 0);
+
+ /* issue bus reset */
+ rc = pata_parport_bus_softreset(ap, devmask, deadline);
+ if (rc && rc != -ENODEV) {
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0],
+ devmask & (1 << 0), &err);
+ if (err != 0x81)
+ classes[1] = ata_sff_dev_classify(&link->device[1],
+ devmask & (1 << 1), &err);
+
+ return 0;
+}
+
+static u8 pata_parport_check_status(struct ata_port *ap)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ return pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
+}
+
+static u8 pata_parport_check_altstatus(struct ata_port *ap)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ return pi->proto->read_regr(pi, 1, 6);
+}
+
+static void pata_parport_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ if (tf->ctl != ap->last_ctl) {
+ pi->proto->write_regr(pi, 1, 6, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (tf->flags & ATA_TFLAG_ISADDR) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ pi->proto->write_regr(pi, 0, ATA_REG_FEATURE,
+ tf->hob_feature);
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT,
+ tf->hob_nsect);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL,
+ tf->hob_lbal);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAM,
+ tf->hob_lbam);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAH,
+ tf->hob_lbah);
+ }
+ pi->proto->write_regr(pi, 0, ATA_REG_FEATURE, tf->feature);
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, tf->nsect);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, tf->lbal);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAM, tf->lbam);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAH, tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tf->device);
+
+ ata_wait_idle(ap);
+}
+
+static void pata_parport_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ tf->status = pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
+ tf->error = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
+ tf->nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ tf->lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+ tf->lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
+ tf->lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
+ tf->device = pi->proto->read_regr(pi, 0, ATA_REG_DEVICE);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ pi->proto->write_regr(pi, 1, 6, tf->ctl | ATA_HOB);
+ tf->hob_feature = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
+ tf->hob_nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ tf->hob_lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+ tf->hob_lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
+ tf->hob_lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
+ pi->proto->write_regr(pi, 1, 6, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+static void pata_parport_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ pi->proto->write_regr(pi, 0, ATA_REG_CMD, tf->command);
+ ata_sff_pause(ap);
+}
+
+static unsigned int pata_parport_data_xfer(struct ata_queued_cmd *qc,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct ata_port *ap = qc->dev->link->ap;
+ struct pi_adapter *pi = ap->host->private_data;
+
+ if (rw == READ)
+ pi->proto->read_block(pi, buf, buflen);
+ else
+ pi->proto->write_block(pi, buf, buflen);
+
+ return buflen;
+}
+
+static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+ struct pi_adapter *pi;
+ char junk[2];
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ pi = ap->host->private_data;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (pata_parport_check_status(ap) & ATA_DRQ)
+ && count < 65536; count += 2) {
+ pi->proto->read_block(pi, junk, 2);
+ }
+
+ if (count)
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static struct ata_port_operations pata_parport_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .softreset = pata_parport_softreset,
+ .hardreset = NULL,
+
+ .sff_dev_select = pata_parport_dev_select,
+ .sff_check_status = pata_parport_check_status,
+ .sff_check_altstatus = pata_parport_check_altstatus,
+ .sff_tf_load = pata_parport_tf_load,
+ .sff_tf_read = pata_parport_tf_read,
+ .sff_exec_command = pata_parport_exec_command,
+ .sff_data_xfer = pata_parport_data_xfer,
+ .sff_drain_fifo = pata_parport_drain_fifo,
+};
+
+static const struct ata_port_info pata_parport_port_info = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
+ .pio_mask = ATA_PIO0,
+ /* No DMA */
+ .port_ops = &pata_parport_port_ops,
+};
+
+static void pi_release(struct pi_adapter *pi)
+{
+ parport_unregister_device(pi->pardev);
+ if (pi->proto->release_proto)
+ pi->proto->release_proto(pi);
+ module_put(pi->proto->owner);
+}
+
+static int default_test_proto(struct pi_adapter *pi)
+{
+ int j, k;
+ int e[2] = { 0, 0 };
+
+ pi->proto->connect(pi);
+
+ for (j = 0; j < 2; j++) {
+ pi->proto->write_regr(pi, 0, 6, 0xa0 + j * 0x10);
+ for (k = 0; k < 256; k++) {
+ pi->proto->write_regr(pi, 0, 2, k ^ 0xaa);
+ pi->proto->write_regr(pi, 0, 3, k ^ 0x55);
+ if (pi->proto->read_regr(pi, 0, 2) != (k ^ 0xaa))
+ e[j]++;
+ }
+ }
+ pi->proto->disconnect(pi);
+
+ dev_dbg(&pi->dev, "%s: port 0x%x, mode %d, test=(%d,%d)\n",
+ pi->proto->name, pi->port, pi->mode, e[0], e[1]);
+
+ return e[0] && e[1]; /* not here if both > 0 */
+}
+
+static int pi_test_proto(struct pi_adapter *pi)
+{
+ int res;
+
+ parport_claim_or_block(pi->pardev);
+ if (pi->proto->test_proto)
+ res = pi->proto->test_proto(pi);
+ else
+ res = default_test_proto(pi);
+ parport_release(pi->pardev);
+
+ return res;
+}
+
+static bool pi_probe_mode(struct pi_adapter *pi, int max)
+{
+ int best, range;
+
+ if (pi->mode != -1) {
+ if (pi->mode >= max)
+ return false;
+ range = 3;
+ if (pi->mode >= pi->proto->epp_first)
+ range = 8;
+ if (range == 8 && pi->port % 8)
+ return false;
+ return !pi_test_proto(pi);
+ }
+ best = -1;
+ for (pi->mode = 0; pi->mode < max; pi->mode++) {
+ range = 3;
+ if (pi->mode >= pi->proto->epp_first)
+ range = 8;
+ if (range == 8 && pi->port % 8)
+ break;
+ if (!pi_test_proto(pi))
+ best = pi->mode;
+ }
+ pi->mode = best;
+ return best > -1;
+}
+
+static bool pi_probe_unit(struct pi_adapter *pi, int unit)
+{
+ int max, s, e;
+
+ s = unit;
+ e = s + 1;
+
+ if (s == -1) {
+ s = 0;
+ e = pi->proto->max_units;
+ }
+
+ if (pi->proto->test_port) {
+ parport_claim_or_block(pi->pardev);
+ max = pi->proto->test_port(pi);
+ parport_release(pi->pardev);
+ } else {
+ max = pi->proto->max_mode;
+ }
+
+ if (pi->proto->probe_unit) {
+ parport_claim_or_block(pi->pardev);
+ for (pi->unit = s; pi->unit < e; pi->unit++) {
+ if (pi->proto->probe_unit(pi)) {
+ parport_release(pi->pardev);
+ return pi_probe_mode(pi, max);
+ }
+ }
+ parport_release(pi->pardev);
+ return false;
+ }
+
+ return pi_probe_mode(pi, max);
+}
+
+static void pata_parport_dev_release(struct device *dev)
+{
+ struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+
+ ida_free(&pata_parport_bus_dev_ids, dev->id);
+ kfree(pi);
+}
+
+static void pata_parport_bus_release(struct device *dev)
+{
+ /* nothing to do here but required to avoid warning on device removal */
+}
+
+static struct bus_type pata_parport_bus_type = {
+ .name = DRV_NAME,
+};
+
+static struct device pata_parport_bus = {
+ .init_name = DRV_NAME,
+ .release = pata_parport_bus_release,
+};
+
+static const struct scsi_host_template pata_parport_sht = {
+ PATA_PARPORT_SHT("pata_parport")
+};
+
+struct pi_device_match {
+ struct parport *parport;
+ struct pi_protocol *proto;
+};
+
+static int pi_find_dev(struct device *dev, void *data)
+{
+ struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+ struct pi_device_match *match = data;
+
+ return pi->pardev->port == match->parport && pi->proto == match->proto;
+}
+
+static struct pi_adapter *pi_init_one(struct parport *parport,
+ struct pi_protocol *pr, int mode, int unit, int delay)
+{
+ struct pardev_cb par_cb = { };
+ const struct ata_port_info *ppi[] = { &pata_parport_port_info };
+ struct ata_host *host;
+ struct pi_adapter *pi;
+ struct pi_device_match match = { .parport = parport, .proto = pr };
+ int id;
+
+ /*
+ * Abort if there's a device already registered on the same parport
+ * using the same protocol.
+ */
+ if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
+ return NULL;
+
+ id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
+ if (!pi) {
+ ida_free(&pata_parport_bus_dev_ids, id);
+ return NULL;
+ }
+
+ /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
+ pi->dev.parent = &pata_parport_bus;
+ pi->dev.bus = &pata_parport_bus_type;
+ pi->dev.driver = &pr->driver;
+ pi->dev.release = pata_parport_dev_release;
+ pi->dev.id = id;
+ dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
+ if (device_register(&pi->dev)) {
+ put_device(&pi->dev);
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+ return NULL;
+ }
+
+ pi->proto = pr;
+
+ if (!try_module_get(pi->proto->owner))
+ goto out_unreg_dev;
+ if (pi->proto->init_proto && pi->proto->init_proto(pi) < 0)
+ goto out_module_put;
+
+ pi->delay = (delay == -1) ? pi->proto->default_delay : delay;
+ pi->mode = mode;
+ pi->port = parport->base;
+
+ par_cb.private = pi;
+ pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
+ if (!pi->pardev)
+ goto out_module_put;
+
+ if (!pi_probe_unit(pi, unit)) {
+ dev_info(&pi->dev, "Adapter not found\n");
+ goto out_unreg_parport;
+ }
+
+ pi->proto->log_adapter(pi);
+
+ host = ata_host_alloc_pinfo(&pi->pardev->dev, ppi, 1);
+ if (!host)
+ goto out_unreg_parport;
+ dev_set_drvdata(&pi->dev, host);
+ host->private_data = pi;
+
+ ata_port_desc(host->ports[0], "port %s", pi->pardev->port->name);
+ ata_port_desc(host->ports[0], "protocol %s", pi->proto->name);
+
+ pi_connect(pi);
+ if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
+ goto out_disconnect;
+
+ return pi;
+
+out_disconnect:
+ pi_disconnect(pi);
+out_unreg_parport:
+ parport_unregister_device(pi->pardev);
+ if (pi->proto->release_proto)
+ pi->proto->release_proto(pi);
+out_module_put:
+ module_put(pi->proto->owner);
+out_unreg_dev:
+ device_unregister(&pi->dev);
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+ return NULL;
+}
+
+int pata_parport_register_driver(struct pi_protocol *pr)
+{
+ int error;
+ struct parport *parport;
+ int port_num;
+
+ pr->driver.bus = &pata_parport_bus_type;
+ pr->driver.name = pr->name;
+ error = driver_register(&pr->driver);
+ if (error)
+ return error;
+
+ mutex_lock(&pi_mutex);
+ error = idr_alloc(&protocols, pr, 0, 0, GFP_KERNEL);
+ if (error < 0) {
+ driver_unregister(&pr->driver);
+ mutex_unlock(&pi_mutex);
+ return error;
+ }
+
+ pr_info("pata_parport: protocol %s registered\n", pr->name);
+
+ if (probe) {
+ /* probe all parports using this protocol */
+ idr_for_each_entry(&parport_list, parport, port_num)
+ pi_init_one(parport, pr, -1, -1, -1);
+ }
+ mutex_unlock(&pi_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pata_parport_register_driver);
+
+void pata_parport_unregister_driver(struct pi_protocol *pr)
+{
+ struct pi_protocol *pr_iter;
+ int id = -1;
+
+ mutex_lock(&pi_mutex);
+ idr_for_each_entry(&protocols, pr_iter, id) {
+ if (pr_iter == pr)
+ break;
+ }
+ idr_remove(&protocols, id);
+ mutex_unlock(&pi_mutex);
+ driver_unregister(&pr->driver);
+}
+EXPORT_SYMBOL_GPL(pata_parport_unregister_driver);
+
+static ssize_t new_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ char port[12] = "auto";
+ char protocol[8] = "auto";
+ int mode = -1, unit = -1, delay = -1;
+ struct pi_protocol *pr, *pr_wanted;
+ struct device_driver *drv;
+ struct parport *parport;
+ int port_num, port_wanted, pr_num;
+ bool ok = false;
+
+ if (sscanf(buf, "%11s %7s %d %d %d",
+ port, protocol, &mode, &unit, &delay) < 1)
+ return -EINVAL;
+
+ if (sscanf(port, "parport%u", &port_wanted) < 1) {
+ if (strcmp(port, "auto")) {
+ pr_err("invalid port name %s\n", port);
+ return -EINVAL;
+ }
+ port_wanted = -1;
+ }
+
+ drv = driver_find(protocol, &pata_parport_bus_type);
+ if (!drv) {
+ if (strcmp(protocol, "auto")) {
+ pr_err("protocol %s not found\n", protocol);
+ return -EINVAL;
+ }
+ pr_wanted = NULL;
+ } else {
+ pr_wanted = container_of(drv, struct pi_protocol, driver);
+ }
+
+ mutex_lock(&pi_mutex);
+ /* walk all parports */
+ idr_for_each_entry(&parport_list, parport, port_num) {
+ if (port_num == port_wanted || port_wanted == -1) {
+ parport = parport_find_number(port_num);
+ if (!parport) {
+ pr_err("no such port %s\n", port);
+ mutex_unlock(&pi_mutex);
+ return -ENODEV;
+ }
+ /* walk all protocols */
+ idr_for_each_entry(&protocols, pr, pr_num) {
+ if (pr == pr_wanted || !pr_wanted)
+ if (pi_init_one(parport, pr, mode, unit,
+ delay))
+ ok = true;
+ }
+ parport_put_port(parport);
+ }
+ }
+ mutex_unlock(&pi_mutex);
+ if (!ok)
+ return -ENODEV;
+
+ return count;
+}
+static BUS_ATTR_WO(new_device);
+
+static void pi_remove_one(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pi_adapter *pi = host->private_data;
+
+ ata_host_detach(host);
+ pi_disconnect(pi);
+ pi_release(pi);
+ device_unregister(dev);
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+}
+
+static ssize_t delete_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct device *dev;
+
+ mutex_lock(&pi_mutex);
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev) {
+ mutex_unlock(&pi_mutex);
+ return -ENODEV;
+ }
+
+ pi_remove_one(dev);
+ put_device(dev);
+ mutex_unlock(&pi_mutex);
+
+ return count;
+}
+static BUS_ATTR_WO(delete_device);
+
+static void pata_parport_attach(struct parport *port)
+{
+ struct pi_protocol *pr;
+ int pr_num, id;
+
+ mutex_lock(&pi_mutex);
+ id = idr_alloc(&parport_list, port, port->number, port->number,
+ GFP_KERNEL);
+ if (id < 0) {
+ mutex_unlock(&pi_mutex);
+ return;
+ }
+
+ if (probe) {
+ /* probe this port using all protocols */
+ idr_for_each_entry(&protocols, pr, pr_num)
+ pi_init_one(port, pr, -1, -1, -1);
+ }
+ mutex_unlock(&pi_mutex);
+}
+
+static int pi_remove_port(struct device *dev, void *p)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pi_adapter *pi = host->private_data;
+
+ if (pi->pardev->port == p)
+ pi_remove_one(dev);
+
+ return 0;
+}
+
+static void pata_parport_detach(struct parport *port)
+{
+ mutex_lock(&pi_mutex);
+ bus_for_each_dev(&pata_parport_bus_type, NULL, port, pi_remove_port);
+ idr_remove(&parport_list, port->number);
+ mutex_unlock(&pi_mutex);
+}
+
+static struct parport_driver pata_parport_driver = {
+ .name = DRV_NAME,
+ .match_port = pata_parport_attach,
+ .detach = pata_parport_detach,
+ .devmodel = true,
+};
+
+static __init int pata_parport_init(void)
+{
+ int error;
+
+ error = bus_register(&pata_parport_bus_type);
+ if (error) {
+ pr_err("failed to register pata_parport bus, error: %d\n", error);
+ return error;
+ }
+
+ error = device_register(&pata_parport_bus);
+ if (error) {
+ pr_err("failed to register pata_parport bus, error: %d\n", error);
+ goto out_unregister_bus;
+ }
+
+ error = bus_create_file(&pata_parport_bus_type, &bus_attr_new_device);
+ if (error) {
+ pr_err("unable to create sysfs file, error: %d\n", error);
+ goto out_unregister_dev;
+ }
+
+ error = bus_create_file(&pata_parport_bus_type, &bus_attr_delete_device);
+ if (error) {
+ pr_err("unable to create sysfs file, error: %d\n", error);
+ goto out_remove_new;
+ }
+
+ error = parport_register_driver(&pata_parport_driver);
+ if (error) {
+ pr_err("unable to register parport driver, error: %d\n", error);
+ goto out_remove_del;
+ }
+
+ return 0;
+
+out_remove_del:
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
+out_remove_new:
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
+out_unregister_dev:
+ device_unregister(&pata_parport_bus);
+out_unregister_bus:
+ bus_unregister(&pata_parport_bus_type);
+ return error;
+}
+
+static __exit void pata_parport_exit(void)
+{
+ parport_unregister_driver(&pata_parport_driver);
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
+ device_unregister(&pata_parport_bus);
+ bus_unregister(&pata_parport_bus_type);
+}
+
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("driver for parallel port ATA adapters");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("paride");
+
+module_init(pata_parport_init);
+module_exit(pata_parport_exit);
diff --git a/drivers/ata/pata_parport/pata_parport.h b/drivers/ata/pata_parport/pata_parport.h
new file mode 100644
index 000000000000..bbfa4e63ee85
--- /dev/null
+++ b/drivers/ata/pata_parport/pata_parport.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * pata_parport.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
+ * Under the terms of the GPL.
+ *
+ * This file defines the interface for parallel port IDE adapter chip drivers.
+ */
+
+#ifndef LINUX_PATA_PARPORT_H
+#define LINUX_PATA_PARPORT_H
+
+#include <linux/libata.h>
+
+struct pi_adapter {
+ struct device dev;
+ struct pi_protocol *proto; /* adapter protocol */
+ int port; /* base address of parallel port */
+ int mode; /* transfer mode in use */
+ int delay; /* adapter delay setting */
+ int unit; /* unit number for chained adapters */
+ int saved_r0; /* saved port state */
+ int saved_r2; /* saved port state */
+ unsigned long private; /* for protocol module */
+ struct pardevice *pardev; /* pointer to pardevice */
+};
+
+/* registers are addressed as (cont,regr)
+ * cont: 0 for command register file, 1 for control register(s)
+ * regr: 0-7 for register number.
+ */
+
+/* macros and functions exported to the protocol modules */
+#define delay_p (pi->delay ? udelay(pi->delay) : (void)0)
+#define out_p(offs, byte) do { outb(byte, pi->port + offs); delay_p; } while (0)
+#define in_p(offs) (delay_p, inb(pi->port + offs))
+
+#define w0(byte) out_p(0, byte)
+#define r0() in_p(0)
+#define w1(byte) out_p(1, byte)
+#define r1() in_p(1)
+#define w2(byte) out_p(2, byte)
+#define r2() in_p(2)
+#define w3(byte) out_p(3, byte)
+#define w4(byte) out_p(4, byte)
+#define r4() in_p(4)
+#define w4w(data) do { outw(data, pi->port + 4); delay_p; } while (0)
+#define w4l(data) do { outl(data, pi->port + 4); delay_p; } while (0)
+#define r4w() (delay_p, inw(pi->port + 4))
+#define r4l() (delay_p, inl(pi->port + 4))
+
+struct pi_protocol {
+ char name[8];
+
+ int max_mode;
+ int epp_first; /* modes >= this use 8 ports */
+
+ int default_delay;
+ int max_units; /* max chained units probed for */
+
+ void (*write_regr)(struct pi_adapter *pi, int cont, int regr, int val);
+ int (*read_regr)(struct pi_adapter *pi, int cont, int regr);
+ void (*write_block)(struct pi_adapter *pi, char *buf, int count);
+ void (*read_block)(struct pi_adapter *pi, char *buf, int count);
+
+ void (*connect)(struct pi_adapter *pi);
+ void (*disconnect)(struct pi_adapter *pi);
+
+ int (*test_port)(struct pi_adapter *pi);
+ int (*probe_unit)(struct pi_adapter *pi);
+ int (*test_proto)(struct pi_adapter *pi);
+ void (*log_adapter)(struct pi_adapter *pi);
+
+ int (*init_proto)(struct pi_adapter *pi);
+ void (*release_proto)(struct pi_adapter *pi);
+ struct module *owner;
+ struct device_driver driver;
+ struct scsi_host_template sht;
+};
+
+#define PATA_PARPORT_SHT ATA_PIO_SHT
+
+int pata_parport_register_driver(struct pi_protocol *pr);
+void pata_parport_unregister_driver(struct pi_protocol *pr);
+
+/**
+ * module_pata_parport_driver() - Helper macro for registering a pata_parport driver
+ * @__pi_protocol: pi_protocol struct
+ *
+ * Helper macro for pata_parport drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pata_parport_driver(__pi_protocol) \
+ module_driver(__pi_protocol, pata_parport_register_driver, pata_parport_unregister_driver)
+
+#endif /* LINUX_PATA_PARPORT_H */
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 8eb066abbd9c..5b602206c522 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -132,7 +132,7 @@ static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
}
-static struct scsi_host_template pcmcia_sht = {
+static const struct scsi_host_template pcmcia_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 4191aa61c8e4..6820c5597b14 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -122,7 +122,7 @@ static struct pci_driver pdc2027x_pci_driver = {
#endif
};
-static struct scsi_host_template pdc2027x_sht = {
+static const struct scsi_host_template pdc2027x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index f894ff2de0a9..a32723e46357 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -289,7 +289,7 @@ static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
return 1;
}
-static struct scsi_host_template pdc202xx_sht = {
+static const struct scsi_host_template pdc202xx_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 389b63b13c70..ced906bf56be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -62,7 +62,7 @@ static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev)
}
-static struct scsi_host_template tosh_sht = {
+static const struct scsi_host_template tosh_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 21fb059859bd..87479bc893b2 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -45,7 +45,7 @@ static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unu
return 0;
}
-static struct scsi_host_template pata_platform_sht = {
+static const struct scsi_host_template pata_platform_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -97,7 +97,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
int __pata_platform_probe(struct device *dev, struct resource *io_res,
struct resource *ctl_res, struct resource *irq_res,
unsigned int ioport_shift, int __pio_mask,
- struct scsi_host_template *sht, bool use16bit)
+ const struct scsi_host_template *sht, bool use16bit)
{
struct ata_host *host;
struct ata_port *ap;
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 985f42c4fd70..ea402e02c46e 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -136,7 +136,7 @@ static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
return -EOPNOTSUPP;
}
-static struct scsi_host_template pxa_ata_sht = {
+static const struct scsi_host_template pxa_ata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 3aca8fe3fdb6..84b001097093 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -183,7 +183,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
}
-static struct scsi_host_template radisys_sht = {
+static const struct scsi_host_template radisys_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 2e110aefe59b..3974d294a341 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -73,7 +73,7 @@ static struct ata_port_operations rb532_pata_port_ops = {
/* ------------------------------------------------------------------------ */
-static struct scsi_host_template rb532_pata_sht = {
+static const struct scsi_host_template rb532_pata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index ecb229c2c1a2..0a9689862f71 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -288,7 +288,7 @@ static const struct ata_port_info rdc_port_info = {
.port_ops = &rdc_pata_ops,
};
-static struct scsi_host_template rdc_sht = {
+static const struct scsi_host_template rdc_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index fb00c3e5fd19..8e2606793091 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -50,7 +50,7 @@ static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
}
-static struct scsi_host_template rz1000_sht = {
+static const struct scsi_host_template rz1000_sht = {
ATA_PIO_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
deleted file mode 100644
index aba1536ddd44..000000000000
--- a/drivers/ata/pata_samsung_cf.c
+++ /dev/null
@@ -1,662 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * PATA driver for Samsung SoCs.
- * Supports CF Interface in True IDE mode. Currently only PIO mode has been
- * implemented; UDMA support has to be added.
- *
- * Based on:
- * PATA driver for AT91SAM9260 Static Memory Controller
- * PATA driver for Toshiba SCC controller
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/ata-samsung_cf.h>
-
-#define DRV_NAME "pata_samsung_cf"
-#define DRV_VERSION "0.1"
-
-#define S3C_CFATA_REG(x) (x)
-#define S3C_CFATA_MUX S3C_CFATA_REG(0x0)
-#define S3C_ATA_CTRL S3C_CFATA_REG(0x0)
-#define S3C_ATA_CMD S3C_CFATA_REG(0x8)
-#define S3C_ATA_IRQ S3C_CFATA_REG(0x10)
-#define S3C_ATA_IRQ_MSK S3C_CFATA_REG(0x14)
-#define S3C_ATA_CFG S3C_CFATA_REG(0x18)
-
-#define S3C_ATA_PIO_TIME S3C_CFATA_REG(0x2c)
-#define S3C_ATA_PIO_DTR S3C_CFATA_REG(0x54)
-#define S3C_ATA_PIO_FED S3C_CFATA_REG(0x58)
-#define S3C_ATA_PIO_SCR S3C_CFATA_REG(0x5c)
-#define S3C_ATA_PIO_LLR S3C_CFATA_REG(0x60)
-#define S3C_ATA_PIO_LMR S3C_CFATA_REG(0x64)
-#define S3C_ATA_PIO_LHR S3C_CFATA_REG(0x68)
-#define S3C_ATA_PIO_DVR S3C_CFATA_REG(0x6c)
-#define S3C_ATA_PIO_CSD S3C_CFATA_REG(0x70)
-#define S3C_ATA_PIO_DAD S3C_CFATA_REG(0x74)
-#define S3C_ATA_PIO_RDATA S3C_CFATA_REG(0x7c)
-
-#define S3C_CFATA_MUX_TRUEIDE 0x01
-#define S3C_ATA_CFG_SWAP 0x40
-#define S3C_ATA_CFG_IORDYEN 0x02
-
-enum s3c_cpu_type {
- TYPE_S3C64XX,
- TYPE_S5PV210,
-};
-
-/*
- * struct s3c_ide_info - S3C PATA instance.
- * @clk: The clock resource for this controller.
- * @ide_addr: The area mapped for the hardware registers.
- * @sfr_addr: The area mapped for the special function registers.
- * @irq: The IRQ number we are using.
- * @cpu_type: The exact type of this controller.
- * @fifo_status_reg: The ATA_FIFO_STATUS register offset.
- */
-struct s3c_ide_info {
- struct clk *clk;
- void __iomem *ide_addr;
- void __iomem *sfr_addr;
- int irq;
- enum s3c_cpu_type cpu_type;
- unsigned int fifo_status_reg;
-};
-
-static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
-{
- u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
- reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
- writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
-}
-
-static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
-{
- /* Select true-ide as the internal operating mode */
- writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
- s3c_ide_sfrbase + S3C_CFATA_MUX);
-}
-
-static unsigned long
-pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
-{
- int t1 = ata->setup;
- int t2 = ata->act8b;
- int t2i = ata->rec8b;
- ulong piotime;
-
- piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
-
- return piotime;
-}
-
-static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct s3c_ide_info *info = ap->host->private_data;
- struct ata_timing timing;
- int cycle_time;
- ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
- ulong piotime;
-
- /* Enables IORDY if mode requires it */
- if (ata_pio_need_iordy(adev))
- ata_cfg |= S3C_ATA_CFG_IORDYEN;
- else
- ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
-
- cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
-
- ata_timing_compute(adev, adev->pio_mode, &timing,
- cycle_time * 1000, 0);
-
- piotime = pata_s3c_setup_timing(info, &timing);
-
- writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
- writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
-}
-
-/*
- * Waits until the IDE controller is able to perform next read/write
- * operation to the disk. Needed for 64XX series boards only.
- */
-static int wait_for_host_ready(struct s3c_ide_info *info)
-{
- ulong timeout;
- void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
-
- /* wait for maximum of 20 msec */
- timeout = jiffies + msecs_to_jiffies(20);
- while (time_before(jiffies, timeout)) {
- if ((readl(fifo_reg) >> 28) == 0)
- return 0;
- }
- return -EBUSY;
-}
-
-/*
- * Writes to one of the task file registers.
- */
-static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
-{
- struct s3c_ide_info *info = host->private_data;
-
- wait_for_host_ready(info);
- writeb(addr, reg);
-}
-
-/*
- * Reads from one of the task file registers.
- */
-static u8 ata_inb(struct ata_host *host, void __iomem *reg)
-{
- struct s3c_ide_info *info = host->private_data;
- u8 temp;
-
- wait_for_host_ready(info);
- (void) readb(reg);
- wait_for_host_ready(info);
- temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
- return temp;
-}
-
-/*
- * pata_s3c_tf_load - send taskfile registers to host controller
- */
-static void pata_s3c_tf_load(struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
-
- if (tf->ctl != ap->last_ctl) {
- ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
- ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
- }
-
- if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
- ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
- ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
- ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
- ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
- ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
- }
-
- if (is_addr) {
- ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
- ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
- ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
- ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
- ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
- }
-
- if (tf->flags & ATA_TFLAG_DEVICE)
- ata_outb(ap->host, tf->device, ioaddr->device_addr);
-
- ata_wait_idle(ap);
-}
-
-/*
- * pata_s3c_tf_read - input device's ATA taskfile shadow registers
- */
-static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- tf->error = ata_inb(ap->host, ioaddr->error_addr);
- tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
- tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
- tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
- tf->device = ata_inb(ap->host, ioaddr->device_addr);
-
- if (tf->flags & ATA_TFLAG_LBA48) {
- ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
- tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
- tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
- tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
- tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
- ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
- ap->last_ctl = tf->ctl;
- }
-}
-
-/*
- * pata_s3c_exec_command - issue ATA command to host controller
- */
-static void pata_s3c_exec_command(struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
- ata_sff_pause(ap);
-}
-
-/*
- * pata_s3c_check_status - Read device status register
- */
-static u8 pata_s3c_check_status(struct ata_port *ap)
-{
- return ata_inb(ap->host, ap->ioaddr.status_addr);
-}
-
-/*
- * pata_s3c_check_altstatus - Read alternate device status register
- */
-static u8 pata_s3c_check_altstatus(struct ata_port *ap)
-{
- return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
-}
-
-/*
- * pata_s3c_data_xfer - Transfer data by PIO
- */
-static unsigned int pata_s3c_data_xfer(struct ata_queued_cmd *qc,
- unsigned char *buf, unsigned int buflen, int rw)
-{
- struct ata_port *ap = qc->dev->link->ap;
- struct s3c_ide_info *info = ap->host->private_data;
- void __iomem *data_addr = ap->ioaddr.data_addr;
- unsigned int words = buflen >> 1, i;
- u16 *data_ptr = (u16 *)buf;
-
- /* Requires wait same as in ata_inb/ata_outb */
- if (rw == READ)
- for (i = 0; i < words; i++, data_ptr++) {
- wait_for_host_ready(info);
- (void) readw(data_addr);
- wait_for_host_ready(info);
- *data_ptr = readw(info->ide_addr
- + S3C_ATA_PIO_RDATA);
- }
- else
- for (i = 0; i < words; i++, data_ptr++) {
- wait_for_host_ready(info);
- writew(*data_ptr, data_addr);
- }
-
- if (buflen & 0x01)
- dev_err(ap->dev, "unexpected trailing data\n");
-
- return words << 1;
-}
-
-/*
- * pata_s3c_dev_select - Select device on ATA bus
- */
-static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
-{
- u8 tmp = ATA_DEVICE_OBS;
-
- if (device != 0)
- tmp |= ATA_DEV1;
-
- ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
- ata_sff_pause(ap);
-}
-
-/*
- * pata_s3c_devchk - PATA device presence detection
- */
-static bool pata_s3c_devchk(struct ata_port *ap, unsigned int device)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 nsect, lbal;
-
- pata_s3c_dev_select(ap, device);
-
- ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
- ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
-
- ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
- ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
-
- ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
- ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
-
- nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- lbal = ata_inb(ap->host, ioaddr->lbal_addr);
-
- if ((nsect == 0x55) && (lbal == 0xaa))
- return true; /* we found a device */
-
- return false; /* nothing found */
-}
-
-/*
- * pata_s3c_wait_after_reset - wait for devices to become ready after reset
- */
-static int pata_s3c_wait_after_reset(struct ata_link *link,
- unsigned long deadline)
-{
- int rc;
-
- ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
-
- /* always check readiness of the master device */
- rc = ata_sff_wait_ready(link, deadline);
- /* -ENODEV means the odd clown forgot the D7 pulldown resistor
- * and TF status is 0xff, bail out on it too.
- */
- if (rc)
- return rc;
-
- return 0;
-}
-
-/*
- * pata_s3c_bus_softreset - PATA device software reset
- */
-static int pata_s3c_bus_softreset(struct ata_port *ap,
- unsigned long deadline)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- /* software reset. causes dev0 to be selected */
- ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
- udelay(20);
- ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
- udelay(20);
- ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
-
- return pata_s3c_wait_after_reset(&ap->link, deadline);
-}
-
-/*
- * pata_s3c_softreset - reset host port via ATA SRST
- */
-static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- unsigned int devmask = 0;
- int rc;
- u8 err;
-
- /* determine if device 0 is present */
- if (pata_s3c_devchk(ap, 0))
- devmask |= (1 << 0);
-
- /* select device 0 again */
- pata_s3c_dev_select(ap, 0);
-
- /* issue bus reset */
- rc = pata_s3c_bus_softreset(ap, deadline);
- /* if link is occupied, -ENODEV too is an error */
- if (rc && rc != -ENODEV) {
- ata_link_err(link, "SRST failed (errno=%d)\n", rc);
- return rc;
- }
-
- /* determine by signature whether we have ATA or ATAPI devices */
- classes[0] = ata_sff_dev_classify(&ap->link.device[0],
- devmask & (1 << 0), &err);
-
- return 0;
-}
-
-/*
- * pata_s3c_set_devctl - Write device control register
- */
-static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
-{
- ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
-}
-
-static struct scsi_host_template pata_s3c_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations pata_s3c_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_check_status = pata_s3c_check_status,
- .sff_check_altstatus = pata_s3c_check_altstatus,
- .sff_tf_load = pata_s3c_tf_load,
- .sff_tf_read = pata_s3c_tf_read,
- .sff_data_xfer = pata_s3c_data_xfer,
- .sff_exec_command = pata_s3c_exec_command,
- .sff_dev_select = pata_s3c_dev_select,
- .sff_set_devctl = pata_s3c_set_devctl,
- .softreset = pata_s3c_softreset,
- .set_piomode = pata_s3c_set_piomode,
-};
-
-static struct ata_port_operations pata_s5p_port_ops = {
- .inherits = &ata_sff_port_ops,
- .set_piomode = pata_s3c_set_piomode,
-};
-
-static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state)
-{
- u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
- temp = state ? (temp | 1) : (temp & ~1);
- writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
-}
-
-static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct s3c_ide_info *info = host->private_data;
- u32 reg;
-
- reg = readl(info->ide_addr + S3C_ATA_IRQ);
- writel(reg, info->ide_addr + S3C_ATA_IRQ);
-
- return ata_sff_interrupt(irq, dev_instance);
-}
-
-static void pata_s3c_hwinit(struct s3c_ide_info *info,
- struct s3c_ide_platdata *pdata)
-{
- switch (info->cpu_type) {
- case TYPE_S3C64XX:
- /* Configure as big endian */
- pata_s3c_cfg_mode(info->sfr_addr);
- pata_s3c_set_endian(info->ide_addr, 1);
- pata_s3c_enable(info->ide_addr, true);
- msleep(100);
-
- /* Remove IRQ Status */
- writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
- writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
- break;
-
- case TYPE_S5PV210:
- /* Configure as little endian */
- pata_s3c_set_endian(info->ide_addr, 0);
- pata_s3c_enable(info->ide_addr, true);
- msleep(100);
-
- /* Remove IRQ Status */
- writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
- writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
- break;
-
- default:
- BUG();
- }
-}
-
-static int __init pata_s3c_probe(struct platform_device *pdev)
-{
- struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct device *dev = &pdev->dev;
- struct s3c_ide_info *info;
- struct resource *res;
- struct ata_port *ap;
- struct ata_host *host;
- enum s3c_cpu_type cpu_type;
- int ret;
-
- cpu_type = platform_get_device_id(pdev)->driver_data;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = platform_get_irq(pdev, 0);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- info->ide_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(info->ide_addr))
- return PTR_ERR(info->ide_addr);
-
- info->clk = devm_clk_get(&pdev->dev, "cfcon");
- if (IS_ERR(info->clk)) {
- dev_err(dev, "failed to get access to cf controller clock\n");
- ret = PTR_ERR(info->clk);
- info->clk = NULL;
- return ret;
- }
-
- clk_enable(info->clk);
-
- /* init ata host */
- host = ata_host_alloc(dev, 1);
- if (!host) {
- dev_err(dev, "failed to allocate ide host\n");
- ret = -ENOMEM;
- goto stop_clk;
- }
-
- ap = host->ports[0];
- ap->pio_mask = ATA_PIO4;
-
- if (cpu_type == TYPE_S3C64XX) {
- ap->ops = &pata_s3c_port_ops;
- info->sfr_addr = info->ide_addr + 0x1800;
- info->ide_addr += 0x1900;
- info->fifo_status_reg = 0x94;
- } else {
- ap->ops = &pata_s5p_port_ops;
- info->fifo_status_reg = 0x84;
- }
-
- info->cpu_type = cpu_type;
-
- if (info->irq <= 0) {
- ap->flags |= ATA_FLAG_PIO_POLLING;
- info->irq = 0;
- ata_port_desc(ap, "no IRQ, using PIO polling\n");
- }
-
- ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD;
- ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
- ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
- ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
- ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
- ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
- ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
- ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
- ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
- ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
- ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
- ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
- ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
-
- ata_port_desc(ap, "mmio cmd 0x%llx ",
- (unsigned long long)res->start);
-
- host->private_data = info;
-
- if (pdata && pdata->setup_gpio)
- pdata->setup_gpio();
-
- /* Set endianness and enable the interface */
- pata_s3c_hwinit(info, pdata);
-
- ret = ata_host_activate(host, info->irq,
- info->irq ? pata_s3c_irq : NULL,
- 0, &pata_s3c_sht);
- if (ret)
- goto stop_clk;
-
- return 0;
-
-stop_clk:
- clk_disable(info->clk);
- return ret;
-}
-
-static int __exit pata_s3c_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct s3c_ide_info *info = host->private_data;
-
- ata_host_detach(host);
-
- clk_disable(info->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pata_s3c_suspend(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_suspend(host, PMSG_SUSPEND);
- return 0;
-}
-
-static int pata_s3c_resume(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
- struct s3c_ide_platdata *pdata = dev_get_platdata(dev);
- struct s3c_ide_info *info = host->private_data;
-
- pata_s3c_hwinit(info, pdata);
- ata_host_resume(host);
-
- return 0;
-}
-
-static const struct dev_pm_ops pata_s3c_pm_ops = {
- .suspend = pata_s3c_suspend,
- .resume = pata_s3c_resume,
-};
-#endif
-
-/* driver device registration */
-static const struct platform_device_id pata_s3c_driver_ids[] = {
- {
- .name = "s3c64xx-pata",
- .driver_data = TYPE_S3C64XX,
- }, {
- .name = "s5pv210-pata",
- .driver_data = TYPE_S5PV210,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
-
-static struct platform_driver pata_s3c_driver = {
- .remove = __exit_p(pata_s3c_remove),
- .id_table = pata_s3c_driver_ids,
- .driver = {
- .name = DRV_NAME,
-#ifdef CONFIG_PM_SLEEP
- .pm = &pata_s3c_pm_ops,
-#endif
- },
-};
-
-module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
-
-MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
-MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index f28daf62a37d..a388dfb97ad8 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -192,7 +192,7 @@ static int sc1200_qc_defer(struct ata_queued_cmd *qc)
return 0;
}
-static struct scsi_host_template sc1200_sht = {
+static const struct scsi_host_template sc1200_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 4f9c2aefd807..8356f1f2a025 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -57,7 +57,7 @@ static struct pci_driver sch_pci_driver = {
#endif
};
-static struct scsi_host_template sch_sht = {
+static const struct scsi_host_template sch_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index c0bc4af0d196..549ff24a9823 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -252,13 +252,13 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
pci_write_config_byte(pdev, 0x54, ultra_cfg);
}
-static struct scsi_host_template serverworks_osb4_sht = {
+static const struct scsi_host_template serverworks_osb4_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
-static struct scsi_host_template serverworks_csb_sht = {
+static const struct scsi_host_template serverworks_csb_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -413,7 +413,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
}
};
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
- struct scsi_host_template *sht = &serverworks_csb_sht;
+ const struct scsi_host_template *sht = &serverworks_csb_sht;
int rc;
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 67ef2e26d7df..abe64b5f83cf 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -223,7 +223,7 @@ static bool sil680_sff_irq_check(struct ata_port *ap)
return val & 0x08;
}
-static struct scsi_host_template sil680_sht = {
+static const struct scsi_host_template sil680_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 92e4cf05de2c..31de06b66221 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -539,7 +539,7 @@ static unsigned int sis_133_mode_filter(struct ata_device *adev, unsigned int ma
return mask;
}
-static struct scsi_host_template sis_sht = {
+static const struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 8487470e2e01..3b62ea482f1a 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -238,7 +238,7 @@ static bool sl82c105_sff_irq_check(struct ata_port *ap)
return val & mask;
}
-static struct scsi_host_template sl82c105_sht = {
+static const struct scsi_host_template sl82c105_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 782162d2f3f8..26d448a869e2 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -160,7 +160,7 @@ static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
}
-static struct scsi_host_template triflex_sht = {
+static const struct scsi_host_template triflex_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 34f00f389932..696b99720dcb 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -443,7 +443,7 @@ static int via_port_start(struct ata_port *ap)
return 0;
}
-static struct scsi_host_template via_sht = {
+static const struct scsi_host_template via_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 35b823ac20c9..8e6b2599f0d5 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -123,7 +123,7 @@ static void adma_freeze(struct ata_port *ap);
static void adma_thaw(struct ata_port *ap);
static int adma_prereset(struct ata_link *link, unsigned long deadline);
-static struct scsi_host_template adma_ata_sht = {
+static const struct scsi_host_template adma_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ADMA_DMA_BOUNDARY,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 21d77633a98f..fabdd1e380f9 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -810,7 +810,7 @@ static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
struct device *dev = hsdev->dev;
#ifdef CONFIG_SATA_DWC_OLD_DMA
- if (!of_find_property(dev->of_node, "dmas", NULL))
+ if (!of_property_present(dev->of_node, "dmas"))
return sata_dwc_dma_get_channel_old(hsdevp);
#endif
@@ -1076,7 +1076,7 @@ static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
/*
* scsi mid-layer and libata interface structures
*/
-static struct scsi_host_template sata_dwc_sht = {
+static const struct scsi_host_template sata_dwc_sht = {
ATA_NCQ_SHT(DRV_NAME),
/*
* test-only: Currently this driver doesn't handle NCQ
@@ -1180,7 +1180,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
}
#ifdef CONFIG_SATA_DWC_OLD_DMA
- if (!of_find_property(np, "dmas", NULL)) {
+ if (!of_property_present(np, "dmas")) {
err = sata_dwc_dma_init_old(ofdev, hsdev);
if (err)
return err;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b9a4f68b371d..ccd99b9aa9ff 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -566,7 +566,7 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
-static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
@@ -577,7 +577,6 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
cd = pp->cmdentry + tag;
ata_tf_from_fis(cd->sfis, &qc->result_tf);
- return true;
}
static int sata_fsl_scr_write(struct ata_link *link,
@@ -1042,7 +1041,7 @@ static void sata_fsl_error_handler(struct ata_port *ap)
static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
{
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
@@ -1377,7 +1376,7 @@ static void sata_fsl_host_stop(struct ata_host *host)
/*
* scsi mid-layer and libata interface structures
*/
-static struct scsi_host_template sata_fsl_sht = {
+static const struct scsi_host_template sata_fsl_sht = {
ATA_NCQ_SHT_QD("sata_fsl", SATA_FSL_QUEUE_DEPTH),
.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
.dma_boundary = ATA_DMA_BOUNDARY,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index dfbf9493e451..8237ece4a46f 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -438,7 +438,7 @@ static const struct ata_port_info ahci_highbank_port_info = {
.port_ops = &ahci_highbank_ops,
};
-static struct scsi_host_template ahci_highbank_platform_sht = {
+static const struct scsi_host_template ahci_highbank_platform_sht = {
AHCI_SHT("sata_highbank"),
};
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 11e518f0111c..2c8c78ed86c1 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -242,7 +242,7 @@ struct inic_port_priv {
dma_addr_t cpb_tbl_dma;
};
-static struct scsi_host_template inic_sht = {
+static const struct scsi_host_template inic_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
@@ -566,7 +566,7 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
tf->status = readb(port_base + PORT_TF_COMMAND);
}
-static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void inic_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ata_taskfile *rtf = &qc->result_tf;
struct ata_taskfile tf;
@@ -580,12 +580,10 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
inic_tf_read(qc->ap, &tf);
- if (!(tf.status & ATA_ERR))
- return false;
-
- rtf->status = tf.status;
- rtf->error = tf.error;
- return true;
+ if (tf.status & ATA_ERR) {
+ rtf->status = tf.status;
+ rtf->error = tf.error;
+ }
}
static void inic_freeze(struct ata_port *ap)
@@ -672,7 +670,7 @@ static void inic_error_handler(struct ata_port *ap)
static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
{
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
inic_reset_port(inic_port_base(qc->ap));
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index e3cff01201b8..d404e631d152 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -659,13 +659,13 @@ static u8 mv_sff_check_status(struct ata_port *ap);
* PRDs for 64K boundaries in mv_fill_sg().
*/
#ifdef CONFIG_PCI
-static struct scsi_host_template mv5_sht = {
+static const struct scsi_host_template mv5_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
#endif
-static struct scsi_host_template mv6_sht = {
+static const struct scsi_host_template mv6_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 9b2d289e89e1..abf5651c87ab 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -371,11 +371,11 @@ static struct pci_driver nv_pci_driver = {
.remove = ata_pci_remove_one,
};
-static struct scsi_host_template nv_sht = {
+static const struct scsi_host_template nv_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
-static struct scsi_host_template nv_adma_sht = {
+static const struct scsi_host_template nv_adma_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
@@ -386,7 +386,7 @@ static struct scsi_host_template nv_adma_sht = {
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
-static struct scsi_host_template nv_swncq_sht = {
+static const struct scsi_host_template nv_swncq_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
@@ -520,7 +520,7 @@ static struct ata_port_operations nv_swncq_ops = {
struct nv_pi_priv {
irq_handler_t irq_handler;
- struct scsi_host_template *sht;
+ const struct scsi_host_template *sht;
};
#define NV_PI_PRIV(_irq_handler, _sht) \
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 9cd7d8b71361..2df1a070b25a 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -158,7 +158,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_pata_cable_detect(struct ata_port *ap);
-static struct scsi_host_template pdc_ata_sht = {
+static const struct scsi_host_template pdc_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = PDC_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
@@ -828,7 +828,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 8ca0810aad26..8a6286159044 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -108,7 +108,7 @@ static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
static void qs_error_handler(struct ata_port *ap);
-static struct scsi_host_template qs_ata_sht = {
+static const struct scsi_host_template qs_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = QS_MAX_PRD,
.dma_boundary = QS_DMA_BOUNDARY,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 0195eb29f6c2..34790f15c1b8 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -608,7 +608,7 @@ static u8 sata_rcar_bmdma_status(struct ata_port *ap)
return host_stat;
}
-static struct scsi_host_template sata_rcar_sht = {
+static const struct scsi_host_template sata_rcar_sht = {
ATA_BASE_SHT(DRV_NAME),
/*
* This controller allows transfer chunks up to 512MB which cross 64KB
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3b989a52879d..cc77c0248284 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -156,7 +156,7 @@ static struct pci_driver sil_pci_driver = {
#endif
};
-static struct scsi_host_template sil_sht = {
+static const struct scsi_host_template sil_sht = {
ATA_BASE_SHT(DRV_NAME),
/** These controllers support Large Block Transfer which allows
transfer chunks up to 2GB and which cross 64KB boundaries,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 2fef6ce93f07..e72a0257990d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -328,7 +328,7 @@ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
-static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
static void sil24_pmp_detach(struct ata_port *ap);
static void sil24_freeze(struct ata_port *ap);
@@ -373,7 +373,7 @@ static struct pci_driver sil24_pci_driver = {
#endif
};
-static struct scsi_host_template sil24_sht = {
+static const struct scsi_host_template sil24_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
@@ -901,10 +901,9 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
-static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
{
sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
- return true;
}
static void sil24_pmp_attach(struct ata_port *ap)
@@ -1185,7 +1184,7 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
+ if ((qc->flags & ATA_QCFLAG_EH) && sil24_init_port(ap))
ata_eh_freeze_port(ap);
}
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 316237362aa9..ef8724986de3 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -72,7 +72,7 @@ static struct pci_driver sis_pci_driver = {
#endif
};
-static struct scsi_host_template sis_sht = {
+static const struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 2e3418a82b44..c47c3fb434d5 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -330,7 +330,7 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
return 0;
}
-static struct scsi_host_template k2_sata_sht = {
+static const struct scsi_host_template k2_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.show_info = k2_sata_show_info,
};
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ab70cbc78f96..ccc016072637 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -226,7 +226,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
-static struct scsi_host_template pdc_sata_sht = {
+static const struct scsi_host_template pdc_sata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
@@ -866,7 +866,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 815e6af75310..60ea45926cd1 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -59,7 +59,7 @@ static struct pci_driver uli_pci_driver = {
.remove = ata_pci_remove_one,
};
-static struct scsi_host_template uli_sht = {
+static const struct scsi_host_template uli_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index c7891cc84ea0..57cbf2cef618 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -107,7 +107,7 @@ static struct pci_driver svia_pci_driver = {
.remove = ata_pci_remove_one,
};
-static struct scsi_host_template svia_sht = {
+static const struct scsi_host_template svia_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 87e4ed66b306..d39b87537168 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -277,7 +277,7 @@ out:
}
-static struct scsi_host_template vsc_sata_sht = {
+static const struct scsi_host_template vsc_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index eec0cc2144e0..e327a0229dc1 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
+ kfree(vc);
}
}
}
@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
return 0;
}
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+ struct vc_map *vc = card->vcs[0];
+
+ free_scq(card, vc->scq);
+ kfree(vc);
+}
+
static int
idt77252_dev_open(struct idt77252_dev *card)
{
@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
struct idt77252_dev *card = dev->dev_data;
u32 conf;
+ close_card_ubr0(card);
close_card_oam(card);
conf = SAR_CFG_RXPTH | /* enable receive path */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 8b2a0eb3f32a..d56a5d508ccd 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -322,8 +322,10 @@ fail1:
static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct hd44780_common *hdc = lcd->drvdata;
charlcd_unregister(lcd);
+ kfree(hdc->hd44780);
kfree(lcd->drvdata);
kfree(lcd);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 6f04b831a5c0..2b8fd6bb7da0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -230,4 +230,16 @@ config GENERIC_ARCH_NUMA
Enable support for generic NUMA implementation. Currently, RISC-V
and ARM64 use it.
+config FW_DEVLINK_SYNC_STATE_TIMEOUT
+ bool "sync_state() behavior defaults to timeout instead of strict"
+ help
+ This is build time equivalent of adding kernel command line parameter
+ "fw_devlink.sync_state=timeout". Give up waiting on consumers and
+ call sync_state() on any devices that haven't yet received their
+ sync_state() calls after deferred_probe_timeout has expired or by
+ late_initcall() if !CONFIG_MODULES. You should almost always want to
+ select N here unless you have already successfully tested with the
+ command line option on every system/board your kernel is expected to
+ work on.
+
endmenu
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e7d6e6657ffa..b741b5ba82bd 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid)
ret = detect_cache_attributes(cpuid);
if (ret && ret != -ENOENT)
- pr_info("Early cacheinfo failed, ret = %d\n", ret);
+ pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
@@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
- int ret;
+ int cpu, ret;
reset_cpu_topology();
ret = parse_acpi_topology();
@@ -835,9 +835,18 @@ void __init init_cpu_topology(void)
if (ret) {
/*
* Discard anything that was parsed if we hit an error so we
- * don't use partial information.
+ * don't use partial information. But do not return yet to give
+ * arch-specific early cache level detection a chance to run.
*/
reset_cpu_topology();
+ }
+
+ for_each_possible_cpu(cpu) {
+ ret = fetch_cache_info(cpu);
+ if (!ret)
+ continue;
+ else if (ret != -ENOENT)
+ pr_err("Early cacheinfo failed, ret = %d\n", ret);
return;
}
}
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 8c5e65930617..4d4c2c8d26c4 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -185,7 +185,7 @@ static int auxiliary_match(struct device *dev, struct device_driver *drv)
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
-static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int auxiliary_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const char *name, *p;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7d4803c03d3e..eb4c0ace9242 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -27,11 +27,13 @@
* on this bus.
* @bus - pointer back to the struct bus_type that this structure is associated
* with.
+ * @dev_root: Default device to use as the parent.
*
* @glue_dirs - "glue" directory to put in-between the parent device to
* avoid namespace conflicts
* @class - pointer back to the struct class that this structure is associated
* with.
+ * @lock_key: Lock class key for use by the lock validator
*
* This structure is the one that is the actual kobject allowing struct
* bus_type/class to be statically allocated safely. Nothing outside of the
@@ -48,12 +50,30 @@ struct subsys_private {
struct klist klist_drivers;
struct blocking_notifier_head bus_notifier;
unsigned int drivers_autoprobe:1;
- struct bus_type *bus;
+ const struct bus_type *bus;
+ struct device *dev_root;
struct kset glue_dirs;
- struct class *class;
+ const struct class *class;
+
+ struct lock_class_key lock_key;
};
-#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
+#define to_subsys_private(obj) container_of_const(obj, struct subsys_private, subsys.kobj)
+
+static inline struct subsys_private *subsys_get(struct subsys_private *sp)
+{
+ if (sp)
+ kset_get(&sp->subsys);
+ return sp;
+}
+
+static inline void subsys_put(struct subsys_private *sp)
+{
+ if (sp)
+ kset_put(&sp->subsys);
+}
+
+struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
struct kobject kobj;
@@ -107,65 +127,73 @@ struct device_private {
container_of(obj, struct device_private, knode_class)
/* initialisation functions */
-extern int devices_init(void);
-extern int buses_init(void);
-extern int classes_init(void);
-extern int firmware_init(void);
+int devices_init(void);
+int buses_init(void);
+int classes_init(void);
+int firmware_init(void);
#ifdef CONFIG_SYS_HYPERVISOR
-extern int hypervisor_init(void);
+int hypervisor_init(void);
#else
static inline int hypervisor_init(void) { return 0; }
#endif
-extern int platform_bus_init(void);
-extern void cpu_dev_init(void);
-extern void container_dev_init(void);
+int platform_bus_init(void);
+void cpu_dev_init(void);
+void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
-extern void auxiliary_bus_init(void);
+void auxiliary_bus_init(void);
#else
static inline void auxiliary_bus_init(void) { }
#endif
struct kobject *virtual_device_parent(struct device *dev);
-extern int bus_add_device(struct device *dev);
-extern void bus_probe_device(struct device *dev);
-extern void bus_remove_device(struct device *dev);
+int bus_add_device(struct device *dev);
+void bus_probe_device(struct device *dev);
+void bus_remove_device(struct device *dev);
+void bus_notify(struct device *dev, enum bus_notifier_event value);
+bool bus_is_registered(const struct bus_type *bus);
-extern int bus_add_driver(struct device_driver *drv);
-extern void bus_remove_driver(struct device_driver *drv);
-extern void device_release_driver_internal(struct device *dev,
- struct device_driver *drv,
- struct device *parent);
+int bus_add_driver(struct device_driver *drv);
+void bus_remove_driver(struct device_driver *drv);
+void device_release_driver_internal(struct device *dev, struct device_driver *drv,
+ struct device *parent);
-extern void driver_detach(struct device_driver *drv);
-extern void driver_deferred_probe_del(struct device *dev);
-extern void device_set_deferred_probe_reason(const struct device *dev,
- struct va_format *vaf);
+void driver_detach(struct device_driver *drv);
+void driver_deferred_probe_del(struct device *dev);
+void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
static inline int driver_match_device(struct device_driver *drv,
struct device *dev)
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
-extern int driver_add_groups(struct device_driver *drv,
- const struct attribute_group **groups);
-extern void driver_remove_groups(struct device_driver *drv,
- const struct attribute_group **groups);
+static inline void dev_sync_state(struct device *dev)
+{
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+}
+
+int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups);
+void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
-extern int devres_release_all(struct device *dev);
-extern void device_block_probing(void);
-extern void device_unblock_probing(void);
-extern void deferred_probe_extend_timeout(void);
-extern void driver_deferred_probe_trigger(void);
+int devres_release_all(struct device *dev);
+void device_block_probing(void);
+void device_unblock_probing(void);
+void deferred_probe_extend_timeout(void);
+void driver_deferred_probe_trigger(void);
+const char *device_get_devnode(const struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid, const char **tmp);
/* /sys/devices directory */
extern struct kset *devices_kset;
-extern void devices_kset_move_last(struct device *dev);
+void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-extern void module_add_driver(struct module *mod, struct device_driver *drv);
-extern void module_remove_driver(struct device_driver *drv);
+void module_add_driver(struct module *mod, struct device_driver *drv);
+void module_remove_driver(struct device_driver *drv);
#else
static inline void module_add_driver(struct module *mod,
struct device_driver *drv) { }
@@ -173,23 +201,34 @@ static inline void module_remove_driver(struct device_driver *drv) { }
#endif
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_init(void);
+int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+#ifdef CONFIG_BLOCK
+extern struct class block_class;
+static inline bool is_blockdev(struct device *dev)
+{
+ return dev->class == &block_class;
+}
+#else
+static inline bool is_blockdev(struct device *dev) { return false; }
+#endif
+
/* Device links support */
-extern int device_links_read_lock(void);
-extern void device_links_read_unlock(int idx);
-extern int device_links_read_lock_held(void);
-extern int device_links_check_suppliers(struct device *dev);
-extern void device_links_force_bind(struct device *dev);
-extern void device_links_driver_bound(struct device *dev);
-extern void device_links_driver_cleanup(struct device *dev);
-extern void device_links_no_driver(struct device *dev);
-extern bool device_links_busy(struct device *dev);
-extern void device_links_unbind_consumers(struct device *dev);
-extern void fw_devlink_drivers_done(void);
+int device_links_read_lock(void);
+void device_links_read_unlock(int idx);
+int device_links_read_lock_held(void);
+int device_links_check_suppliers(struct device *dev);
+void device_links_force_bind(struct device *dev);
+void device_links_driver_bound(struct device *dev);
+void device_links_driver_cleanup(struct device *dev);
+void device_links_no_driver(struct device *dev);
+bool device_links_busy(struct device *dev);
+void device_links_unbind_consumers(struct device *dev);
+void fw_devlink_drivers_done(void);
+void fw_devlink_probing_done(void);
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4ec6dbab73be..84a21084d67d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -6,6 +6,7 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
+ * Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
*/
#include <linux/async.h>
@@ -24,6 +25,9 @@
/* /sys/devices/system */
static struct kset *system_kset;
+/* /sys/bus */
+static struct kset *bus_kset;
+
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
@@ -39,19 +43,63 @@ static struct kset *system_kset;
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
-static struct bus_type *bus_get(struct bus_type *bus)
+/**
+ * bus_to_subsys - Turn a struct bus_type into a struct subsys_private
+ *
+ * @bus: pointer to the struct bus_type to look up
+ *
+ * The driver core internals needs to work on the subsys_private structure, not
+ * the external struct bus_type pointer. This function walks the list of
+ * registered busses in the system and finds the matching one and returns the
+ * internal struct subsys_private that relates to that bus.
+ *
+ * Note, the reference count of the return value is INCREMENTED if it is not
+ * NULL. A call to subsys_put() must be done when finished with the pointer in
+ * order for it to be properly freed.
+ */
+static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
- if (bus) {
- kset_get(&bus->p->subsys);
- return bus;
+ struct subsys_private *sp = NULL;
+ struct kobject *kobj;
+
+ if (!bus || !bus_kset)
+ return NULL;
+
+ spin_lock(&bus_kset->list_lock);
+
+ if (list_empty(&bus_kset->list))
+ goto done;
+
+ list_for_each_entry(kobj, &bus_kset->list, entry) {
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+
+ sp = container_of_const(kset, struct subsys_private, subsys);
+ if (sp->bus == bus)
+ goto done;
}
+ sp = NULL;
+done:
+ sp = subsys_get(sp);
+ spin_unlock(&bus_kset->list_lock);
+ return sp;
+}
+
+static const struct bus_type *bus_get(const struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (sp)
+ return bus;
return NULL;
}
-static void bus_put(struct bus_type *bus)
+static void bus_put(const struct bus_type *bus)
{
- if (bus)
- kset_put(&bus->p->subsys);
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ /* two puts are required as the call to bus_to_subsys incremented it again */
+ subsys_put(sp);
+ subsys_put(sp);
}
static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
@@ -91,7 +139,7 @@ static void driver_release(struct kobject *kobj)
kfree(drv_priv);
}
-static struct kobj_type driver_ktype = {
+static const struct kobj_type driver_ktype = {
.sysfs_ops = &driver_sysfs_ops,
.release = driver_release,
};
@@ -128,37 +176,42 @@ static const struct sysfs_ops bus_sysfs_ops = {
.store = bus_attr_store,
};
-int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
int error;
- if (bus_get(bus)) {
- error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- } else
- error = -EINVAL;
+
+ if (!sp)
+ return -EINVAL;
+
+ error = sysfs_create_file(&sp->subsys.kobj, &attr->attr);
+
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_create_file);
-void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr)
{
- if (bus_get(bus)) {
- sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- }
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (!sp)
+ return;
+
+ sysfs_remove_file(&sp->subsys.kobj, &attr->attr);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
struct subsys_private *priv = to_subsys_private(kobj);
- struct bus_type *bus = priv->bus;
+ lockdep_unregister_key(&priv->lock_key);
kfree(priv);
- bus->p = NULL;
}
-static struct kobj_type bus_ktype = {
+static const struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
.release = bus_release,
};
@@ -176,13 +229,11 @@ static const struct kset_uevent_ops bus_uevent_ops = {
.filter = bus_uevent_filter,
};
-static struct kset *bus_kset;
-
/* Manually detach a device from its associated driver. */
static ssize_t unbind_store(struct device_driver *drv, const char *buf,
size_t count)
{
- struct bus_type *bus = bus_get(drv->bus);
+ const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
@@ -205,7 +256,7 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
static ssize_t bind_store(struct device_driver *drv, const char *buf,
size_t count)
{
- struct bus_type *bus = bus_get(drv->bus);
+ const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
@@ -223,22 +274,37 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
-static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
+static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int ret;
+
+ if (!sp)
+ return -EINVAL;
+
+ ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe);
+ subsys_put(sp);
+ return ret;
}
-static ssize_t drivers_autoprobe_store(struct bus_type *bus,
+static ssize_t drivers_autoprobe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (!sp)
+ return -EINVAL;
+
if (buf[0] == '0')
- bus->p->drivers_autoprobe = 0;
+ sp->drivers_autoprobe = 0;
else
- bus->p->drivers_autoprobe = 1;
+ sp->drivers_autoprobe = 1;
+
+ subsys_put(sp);
return count;
}
-static ssize_t drivers_probe_store(struct bus_type *bus,
+static ssize_t drivers_probe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
@@ -285,21 +351,23 @@ static struct device *next_device(struct klist_iter *i)
* to retain this data, it should do so, and increment the reference
* count in the supplied callback.
*/
-int bus_for_each_dev(struct bus_type *bus, struct device *start,
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
void *data, int (*fn)(struct device *, void *))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
int error = 0;
- if (!bus || !bus->p)
+ if (!sp)
return -EINVAL;
- klist_iter_init_node(&bus->p->klist_devices, &i,
+ klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_dev);
@@ -319,67 +387,28 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
-struct device *bus_find_device(struct bus_type *bus,
+struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
- if (!bus || !bus->p)
+ if (!sp)
return NULL;
- klist_iter_init_node(&bus->p->klist_devices, &i,
+ klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
klist_iter_exit(&i);
+ subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(bus_find_device);
-/**
- * subsys_find_device_by_id - find a device with a specific enumeration number
- * @subsys: subsystem
- * @id: index 'id' in struct device
- * @hint: device to check first
- *
- * Check the hint's next object and if it is a match return it directly,
- * otherwise, fall back to a full list search. Either way a reference for
- * the returned object is taken.
- */
-struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
- struct device *hint)
-{
- struct klist_iter i;
- struct device *dev;
-
- if (!subsys)
- return NULL;
-
- if (hint) {
- klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
- dev = next_device(&i);
- if (dev && dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- klist_iter_exit(&i);
- }
-
- klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
- while ((dev = next_device(&i))) {
- if (dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- }
- klist_iter_exit(&i);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
-
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -411,21 +440,23 @@ static struct device_driver *next_driver(struct klist_iter *i)
* in the callback. It must also be sure to increment the refcount
* so it doesn't disappear before returning to the caller.
*/
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device_driver *drv;
int error = 0;
- if (!bus)
+ if (!sp)
return -EINVAL;
- klist_iter_init_node(&bus->p->klist_drivers, &i,
+ klist_iter_init_node(&sp->klist_drivers, &i,
start ? &start->p->knode_bus : NULL);
while ((drv = next_driver(&i)) && !error)
error = fn(drv, data);
klist_iter_exit(&i);
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_drv);
@@ -440,32 +471,46 @@ EXPORT_SYMBOL_GPL(bus_for_each_drv);
*/
int bus_add_device(struct device *dev)
{
- struct bus_type *bus = bus_get(dev->bus);
- int error = 0;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+ int error;
- if (bus) {
- pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
- error = device_add_groups(dev, bus->dev_groups);
- if (error)
- goto out_put;
- error = sysfs_create_link(&bus->p->devices_kset->kobj,
- &dev->kobj, dev_name(dev));
- if (error)
- goto out_groups;
- error = sysfs_create_link(&dev->kobj,
- &dev->bus->p->subsys.kobj, "subsystem");
- if (error)
- goto out_subsys;
- klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
+ if (!sp) {
+ /*
+ * This is a normal operation for many devices that do not
+ * have a bus assigned to them, just say that all went
+ * well.
+ */
+ return 0;
}
+
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the device is removed from the bus
+ */
+
+ pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev));
+
+ error = device_add_groups(dev, sp->bus->dev_groups);
+ if (error)
+ goto out_put;
+
+ error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_groups;
+
+ error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
+ if (error)
+ goto out_subsys;
+
+ klist_add_tail(&dev->p->knode_bus, &sp->klist_devices);
return 0;
out_subsys:
- sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+ sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
out_groups:
- device_remove_groups(dev, bus->dev_groups);
+ device_remove_groups(dev, sp->bus->dev_groups);
out_put:
- bus_put(dev->bus);
+ subsys_put(sp);
return error;
}
@@ -477,20 +522,21 @@ out_put:
*/
void bus_probe_device(struct device *dev)
{
- struct bus_type *bus = dev->bus;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
- if (!bus)
+ if (!sp)
return;
- if (bus->p->drivers_autoprobe)
+ if (sp->drivers_autoprobe)
device_initial_probe(dev);
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
+ mutex_lock(&sp->mutex);
+ list_for_each_entry(sif, &sp->interfaces, node)
if (sif->add_dev)
sif->add_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
+ mutex_unlock(&sp->mutex);
+ subsys_put(sp);
}
/**
@@ -505,21 +551,20 @@ void bus_probe_device(struct device *dev)
*/
void bus_remove_device(struct device *dev)
{
- struct bus_type *bus = dev->bus;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
- if (!bus)
+ if (!sp)
return;
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
+ mutex_lock(&sp->mutex);
+ list_for_each_entry(sif, &sp->interfaces, node)
if (sif->remove_dev)
sif->remove_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
+ mutex_unlock(&sp->mutex);
sysfs_remove_link(&dev->kobj, "subsystem");
- sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev_name(dev));
+ sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
@@ -527,7 +572,14 @@ void bus_remove_device(struct device *dev)
pr_debug("bus: '%s': remove device %s\n",
dev->bus->name, dev_name(dev));
device_release_driver(dev);
- bus_put(dev->bus);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in bus_add_device()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
static int __must_check add_bind_files(struct device_driver *drv)
@@ -552,7 +604,7 @@ static void remove_bind_files(struct device_driver *drv)
static BUS_ATTR_WO(drivers_probe);
static BUS_ATTR_RW(drivers_autoprobe);
-static int add_probe_files(struct bus_type *bus)
+static int add_probe_files(const struct bus_type *bus)
{
int retval;
@@ -567,7 +619,7 @@ out:
return retval;
}
-static void remove_probe_files(struct bus_type *bus)
+static void remove_probe_files(const struct bus_type *bus)
{
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
@@ -589,15 +641,18 @@ static DRIVER_ATTR_WO(uevent);
*/
int bus_add_driver(struct device_driver *drv)
{
- struct bus_type *bus;
+ struct subsys_private *sp = bus_to_subsys(drv->bus);
struct driver_private *priv;
int error = 0;
- bus = bus_get(drv->bus);
- if (!bus)
+ if (!sp)
return -EINVAL;
- pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the driver is removed from the bus
+ */
+ pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
@@ -607,14 +662,14 @@ int bus_add_driver(struct device_driver *drv)
klist_init(&priv->klist_devices, NULL, NULL);
priv->driver = drv;
drv->p = priv;
- priv->kobj.kset = bus->p->drivers_kset;
+ priv->kobj.kset = sp->drivers_kset;
error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
"%s", drv->name);
if (error)
goto out_unregister;
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
- if (drv->bus->p->drivers_autoprobe) {
+ klist_add_tail(&priv->knode_bus, &sp->klist_drivers);
+ if (sp->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_del_list;
@@ -626,7 +681,7 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
- error = driver_add_groups(drv, bus->drv_groups);
+ error = driver_add_groups(drv, sp->bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
@@ -651,7 +706,7 @@ out_unregister:
/* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
- bus_put(bus);
+ subsys_put(sp);
return error;
}
@@ -665,19 +720,29 @@ out_put_bus:
*/
void bus_remove_driver(struct device_driver *drv)
{
- if (!drv->bus)
+ struct subsys_private *sp = bus_to_subsys(drv->bus);
+
+ if (!sp)
return;
+ pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name);
+
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
- driver_remove_groups(drv, drv->bus->drv_groups);
+ driver_remove_groups(drv, sp->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
- pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
driver_detach(drv);
module_remove_driver(drv);
kobject_put(&drv->p->kobj);
- bus_put(drv->bus);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in bus_add_driver()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
/* Helper for bus_rescan_devices's iter */
@@ -704,7 +769,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* attached and rescan it against existing drivers to see if it matches
* any by calling device_attach() for the unbound devices.
*/
-int bus_rescan_devices(struct bus_type *bus)
+int bus_rescan_devices(const struct bus_type *bus)
{
return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
}
@@ -727,18 +792,6 @@ int device_reprobe(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_reprobe);
-static int bus_add_groups(struct bus_type *bus,
- const struct attribute_group **groups)
-{
- return sysfs_create_groups(&bus->p->subsys.kobj, groups);
-}
-
-static void bus_remove_groups(struct bus_type *bus,
- const struct attribute_group **groups)
-{
- sysfs_remove_groups(&bus->p->subsys.kobj, groups);
-}
-
static void klist_devices_get(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
@@ -755,13 +808,21 @@ static void klist_devices_put(struct klist_node *n)
put_device(dev);
}
-static ssize_t bus_uevent_store(struct bus_type *bus,
+static ssize_t bus_uevent_store(const struct bus_type *bus,
const char *buf, size_t count)
{
- int rc;
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int ret;
- rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
- return rc ? rc : count;
+ if (!sp)
+ return -EINVAL;
+
+ ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count);
+ subsys_put(sp);
+
+ if (ret)
+ return ret;
+ return count;
}
/*
* "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
@@ -780,27 +841,28 @@ static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL,
* infrastructure, then register the children subsystems it has:
* the devices and drivers that belong to the subsystem.
*/
-int bus_register(struct bus_type *bus)
+int bus_register(const struct bus_type *bus)
{
int retval;
struct subsys_private *priv;
- struct lock_class_key *key = &bus->lock_key;
+ struct kobject *bus_kobj;
+ struct lock_class_key *key;
priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = bus;
- bus->p = priv;
BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
- retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
+ bus_kobj = &priv->subsys.kobj;
+ retval = kobject_set_name(bus_kobj, "%s", bus->name);
if (retval)
goto out;
- priv->subsys.kobj.kset = bus_kset;
- priv->subsys.kobj.ktype = &bus_ktype;
+ bus_kobj->kset = bus_kset;
+ bus_kobj->ktype = &bus_ktype;
priv->drivers_autoprobe = 1;
retval = kset_register(&priv->subsys);
@@ -811,21 +873,21 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_uevent_fail;
- priv->devices_kset = kset_create_and_add("devices", NULL,
- &priv->subsys.kobj);
+ priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj);
if (!priv->devices_kset) {
retval = -ENOMEM;
goto bus_devices_fail;
}
- priv->drivers_kset = kset_create_and_add("drivers", NULL,
- &priv->subsys.kobj);
+ priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj);
if (!priv->drivers_kset) {
retval = -ENOMEM;
goto bus_drivers_fail;
}
INIT_LIST_HEAD(&priv->interfaces);
+ key = &priv->lock_key;
+ lockdep_register_key(key);
__mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
@@ -834,7 +896,7 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_probe_files_fail;
- retval = bus_add_groups(bus, bus->bus_groups);
+ retval = sysfs_create_groups(bus_kobj, bus->bus_groups);
if (retval)
goto bus_groups_fail;
@@ -844,16 +906,15 @@ int bus_register(struct bus_type *bus)
bus_groups_fail:
remove_probe_files(bus);
bus_probe_files_fail:
- kset_unregister(bus->p->drivers_kset);
+ kset_unregister(priv->drivers_kset);
bus_drivers_fail:
- kset_unregister(bus->p->devices_kset);
+ kset_unregister(priv->devices_kset);
bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
- kset_unregister(&bus->p->subsys);
+ kset_unregister(&priv->subsys);
out:
- kfree(bus->p);
- bus->p = NULL;
+ kfree(priv);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register);
@@ -865,43 +926,82 @@ EXPORT_SYMBOL_GPL(bus_register);
* Unregister the child subsystems and the bus itself.
* Finally, we call bus_put() to release the refcount
*/
-void bus_unregister(struct bus_type *bus)
+void bus_unregister(const struct bus_type *bus)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kobject *bus_kobj;
+
+ if (!sp)
+ return;
+
pr_debug("bus: '%s': unregistering\n", bus->name);
- if (bus->dev_root)
- device_unregister(bus->dev_root);
- bus_remove_groups(bus, bus->bus_groups);
+ if (sp->dev_root)
+ device_unregister(sp->dev_root);
+
+ bus_kobj = &sp->subsys.kobj;
+ sysfs_remove_groups(bus_kobj, bus->bus_groups);
remove_probe_files(bus);
- kset_unregister(bus->p->drivers_kset);
- kset_unregister(bus->p->devices_kset);
bus_remove_file(bus, &bus_attr_uevent);
- kset_unregister(&bus->p->subsys);
+
+ kset_unregister(sp->drivers_kset);
+ kset_unregister(sp->devices_kset);
+ kset_unregister(&sp->subsys);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_unregister);
-int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int retval;
+
+ if (!sp)
+ return -EINVAL;
+
+ retval = blocking_notifier_chain_register(&sp->bus_notifier, nb);
+ subsys_put(sp);
+ return retval;
}
EXPORT_SYMBOL_GPL(bus_register_notifier);
-int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int retval;
+
+ if (!sp)
+ return -EINVAL;
+ retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb);
+ subsys_put(sp);
+ return retval;
}
EXPORT_SYMBOL_GPL(bus_unregister_notifier);
-struct kset *bus_get_kset(struct bus_type *bus)
+void bus_notify(struct device *dev, enum bus_notifier_event value)
{
- return &bus->p->subsys;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ blocking_notifier_call_chain(&sp->bus_notifier, value, dev);
+ subsys_put(sp);
}
-EXPORT_SYMBOL_GPL(bus_get_kset);
-struct klist *bus_get_device_klist(struct bus_type *bus)
+struct kset *bus_get_kset(const struct bus_type *bus)
{
- return &bus->p->klist_devices;
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kset *kset;
+
+ if (!sp)
+ return NULL;
+
+ kset = &sp->subsys;
+ subsys_put(sp);
+
+ return kset;
}
-EXPORT_SYMBOL_GPL(bus_get_device_klist);
+EXPORT_SYMBOL_GPL(bus_get_kset);
/*
* Yes, this forcibly breaks the klist abstraction temporarily. It
@@ -934,13 +1034,16 @@ void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
LIST_HEAD(sorted_devices);
struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
- device_klist = bus_get_device_klist(bus);
+ if (!sp)
+ return;
+ device_klist = &sp->klist_devices;
spin_lock(&device_klist->k_lock);
list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
@@ -950,13 +1053,19 @@ void bus_sort_breadthfirst(struct bus_type *bus,
}
list_splice(&sorted_devices, &device_klist->k_list);
spin_unlock(&device_klist->k_lock);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
/**
* subsys_dev_iter_init - initialize subsys device iterator
* @iter: subsys iterator to initialize
- * @subsys: the subsys we wanna iterate over
+ * @sp: the subsys private (i.e. bus) we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
@@ -965,17 +1074,16 @@ EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
-void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
- struct device *start, const struct device_type *type)
+static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp,
+ struct device *start, const struct device_type *type)
{
struct klist_node *start_knode = NULL;
if (start)
start_knode = &start->p->knode_bus;
- klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+ klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
/**
* subsys_dev_iter_next - iterate to the next device
@@ -989,7 +1097,7 @@ EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
* free to do whatever it wants to do with the device including
* calling back into subsys code.
*/
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
@@ -1003,7 +1111,6 @@ struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
return dev;
}
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
/**
* subsys_dev_iter_exit - finish iteration
@@ -1012,34 +1119,38 @@ EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+static void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
int subsys_interface_register(struct subsys_interface *sif)
{
- struct bus_type *subsys;
+ struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return -ENODEV;
- subsys = bus_get(sif->subsys);
- if (!subsys)
+ sp = bus_to_subsys(sif->subsys);
+ if (!sp)
return -EINVAL;
- mutex_lock(&subsys->p->mutex);
- list_add_tail(&sif->node, &subsys->p->interfaces);
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the interface is removed from the bus
+ */
+
+ mutex_lock(&sp->mutex);
+ list_add_tail(&sif->node, &sp->interfaces);
if (sif->add_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->add_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
- mutex_unlock(&subsys->p->mutex);
+ mutex_unlock(&sp->mutex);
return 0;
}
@@ -1047,26 +1158,34 @@ EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
- struct bus_type *subsys;
+ struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return;
- subsys = sif->subsys;
+ sp = bus_to_subsys(sif->subsys);
+ if (!sp)
+ return;
- mutex_lock(&subsys->p->mutex);
+ mutex_lock(&sp->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->remove_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
- mutex_unlock(&subsys->p->mutex);
-
- bus_put(subsys);
+ mutex_unlock(&sp->mutex);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in subsys_interface_register()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(subsys_interface_unregister);
@@ -1079,6 +1198,7 @@ static int subsys_register(struct bus_type *subsys,
const struct attribute_group **groups,
struct kobject *parent_of_root)
{
+ struct subsys_private *sp;
struct device *dev;
int err;
@@ -1086,6 +1206,12 @@ static int subsys_register(struct bus_type *subsys,
if (err < 0)
return err;
+ sp = bus_to_subsys(subsys);
+ if (!sp) {
+ err = -EINVAL;
+ goto err_sp;
+ }
+
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!dev) {
err = -ENOMEM;
@@ -1104,7 +1230,8 @@ static int subsys_register(struct bus_type *subsys,
if (err < 0)
goto err_dev_reg;
- subsys->dev_root = dev;
+ sp->dev_root = dev;
+ subsys_put(sp);
return 0;
err_dev_reg:
@@ -1113,6 +1240,8 @@ err_dev_reg:
err_name:
kfree(dev);
err_dev:
+ subsys_put(sp);
+err_sp:
bus_unregister(subsys);
return err;
}
@@ -1166,6 +1295,82 @@ int subsys_virtual_register(struct bus_type *subsys,
}
EXPORT_SYMBOL_GPL(subsys_virtual_register);
+/**
+ * driver_find - locate driver on a bus by its name.
+ * @name: name of the driver.
+ * @bus: bus to scan for the driver.
+ *
+ * Call kset_find_obj() to iterate over list of drivers on
+ * a bus to find driver by name. Return driver if found.
+ *
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
+ */
+struct device_driver *driver_find(const char *name, const struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kobject *k;
+ struct driver_private *priv;
+
+ if (!sp)
+ return NULL;
+
+ k = kset_find_obj(sp->drivers_kset, name);
+ subsys_put(sp);
+ if (!k)
+ return NULL;
+
+ priv = to_driver(k);
+
+ /* Drop reference added by kset_find_obj() */
+ kobject_put(k);
+ return priv->driver;
+}
+EXPORT_SYMBOL_GPL(driver_find);
+
+/*
+ * Warning, the value could go to "removed" instantly after calling this function, so be very
+ * careful when calling it...
+ */
+bool bus_is_registered(const struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ bool is_initialized = false;
+
+ if (sp) {
+ is_initialized = true;
+ subsys_put(sp);
+ }
+ return is_initialized;
+}
+
+/**
+ * bus_get_dev_root - return a pointer to the "device root" of a bus
+ * @bus: bus to return the device root of.
+ *
+ * If a bus has a "device root" structure, return it, WITH THE REFERENCE
+ * COUNT INCREMENTED.
+ *
+ * Note, when finished with the device, a call to put_device() is required.
+ *
+ * If the device root is not present (or bus is not a valid pointer), NULL
+ * will be returned.
+ */
+struct device *bus_get_dev_root(const struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct device *dev_root;
+
+ if (!sp)
+ return NULL;
+
+ dev_root = get_device(sp->dev_root);
+ subsys_put(sp);
+ return dev_root;
+}
+EXPORT_SYMBOL_GPL(bus_get_dev_root);
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 950b22cdb5f7..bba3482ddeb8 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -14,7 +14,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -28,6 +28,9 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define per_cpu_cacheinfo_idx(cpu, idx) \
(per_cpu_cacheinfo(cpu) + (idx))
+/* Set if no cache information is found in DT/ACPI. */
+static bool use_arch_info;
+
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
return ci_cacheinfo(cpu);
@@ -38,11 +41,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
/*
* For non DT/ACPI systems, assume unique level 1 caches,
- * system-wide shared caches for all other levels. This will be used
- * only if arch specific code has not populated shared_cpu_map
+ * system-wide shared caches for all other levels.
*/
- if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
- return !(this_leaf->level == 1);
+ if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
+ use_arch_info)
+ return (this_leaf->level != 1) && (sib_leaf->level != 1);
if ((sib_leaf->attributes & CACHE_ID) &&
(this_leaf->attributes & CACHE_ID))
@@ -79,6 +82,9 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
}
#ifdef CONFIG_OF
+
+static bool of_check_cache_nodes(struct device_node *np);
+
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
@@ -206,6 +212,11 @@ static int cache_setup_of_node(unsigned int cpu)
return -ENOENT;
}
+ if (!of_check_cache_nodes(np)) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
prev = np;
while (index < cache_leaves(cpu)) {
@@ -229,8 +240,95 @@ static int cache_setup_of_node(unsigned int cpu)
return 0;
}
+
+static bool of_check_cache_nodes(struct device_node *np)
+{
+ struct device_node *next;
+
+ if (of_property_present(np, "cache-size") ||
+ of_property_present(np, "i-cache-size") ||
+ of_property_present(np, "d-cache-size") ||
+ of_property_present(np, "cache-unified"))
+ return true;
+
+ next = of_find_next_cache_node(np);
+ if (next) {
+ of_node_put(next);
+ return true;
+ }
+
+ return false;
+}
+
+static int of_count_cache_leaves(struct device_node *np)
+{
+ unsigned int leaves = 0;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+
+ if (!leaves) {
+ /* The '[i-|d-|]cache-size' property is required, but
+ * if absent, fallback on the 'cache-unified' property.
+ */
+ if (of_property_read_bool(np, "cache-unified"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return leaves;
+}
+
+int init_of_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ unsigned int levels = 0, leaves, level;
+
+ if (!of_check_cache_nodes(np)) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ leaves = of_count_cache_leaves(np);
+ if (leaves > 0)
+ levels = 1;
+
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+ if (!of_device_is_compatible(np, "cache"))
+ goto err_out;
+ if (of_property_read_u32(np, "cache-level", &level))
+ goto err_out;
+ if (level <= levels)
+ goto err_out;
+
+ leaves += of_count_cache_leaves(np);
+ levels = level;
+ }
+
+ of_node_put(np);
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return -EINVAL;
+}
+
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+int init_of_cache_level(unsigned int cpu) { return 0; }
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -249,6 +347,10 @@ static int cache_setup_properties(unsigned int cpu)
else if (!acpi_disabled)
ret = cache_setup_acpi(cpu);
+ // Assume there is no cache information available in DT/ACPI from now.
+ if (ret && use_arch_cache_info())
+ use_arch_info = true;
+
return ret;
}
@@ -256,7 +358,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int index;
+ unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
@@ -267,7 +369,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
* to update the shared cpu_map if the cache attributes were
* populated early before all the cpus are brought online
*/
- if (!last_level_cache_is_valid(cpu)) {
+ if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
ret = cache_setup_properties(cpu);
if (ret)
return ret;
@@ -284,11 +386,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
-
- sib_leaf = per_cpu_cacheinfo_idx(i, index);
- if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
- cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ break;
+ }
}
}
/* record the maximum cache line size */
@@ -302,7 +406,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int sibling, index;
+ unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
@@ -313,9 +417,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
- cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
}
}
}
@@ -326,10 +435,11 @@ static void free_cache_attributes(unsigned int cpu)
return;
cache_shared_cpu_map_remove(cpu);
+}
- kfree(per_cpu_cacheinfo(cpu));
- per_cpu_cacheinfo(cpu) = NULL;
- cache_leaves(cpu) = 0;
+int __weak early_cache_level(unsigned int cpu)
+{
+ return -ENOENT;
}
int __weak init_cache_level(unsigned int cpu)
@@ -342,38 +452,111 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-int detect_cache_attributes(unsigned int cpu)
+static inline
+int allocate_cache_info(int cpu)
{
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (!per_cpu_cacheinfo(cpu)) {
+ cache_leaves(cpu) = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int fetch_cache_info(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int levels = 0, split_levels = 0;
int ret;
- /* Since early detection of the cacheinfo is allowed via this
- * function and this also gets called as CPU hotplug callbacks via
- * cacheinfo_cpu_online, the initialisation can be skipped and only
- * CPU maps can be updated as the CPU online status would be update
- * if called via cacheinfo_cpu_online path.
+ if (acpi_disabled) {
+ ret = init_of_cache_level(cpu);
+ } else {
+ ret = acpi_get_cache_info(cpu, &levels, &split_levels);
+ if (!ret) {
+ this_cpu_ci->num_levels = levels;
+ /*
+ * This assumes that:
+ * - there cannot be any split caches (data/instruction)
+ * above a unified cache
+ * - data/instruction caches come by pair
+ */
+ this_cpu_ci->num_leaves = levels + split_levels;
+ }
+ }
+
+ if (ret || !cache_leaves(cpu)) {
+ ret = early_cache_level(cpu);
+ if (ret)
+ return ret;
+
+ if (!cache_leaves(cpu))
+ return -ENOENT;
+
+ this_cpu_ci->early_ci_levels = true;
+ }
+
+ return allocate_cache_info(cpu);
+}
+
+static inline int init_level_allocate_ci(unsigned int cpu)
+{
+ unsigned int early_leaves = cache_leaves(cpu);
+
+ /* Since early initialization/allocation of the cacheinfo is allowed
+ * via fetch_cache_info() and this also gets called as CPU hotplug
+ * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
+ * as it will happen only once (the cacheinfo memory is never freed).
+ * Just populate the cacheinfo. However, if the cacheinfo has been
+ * allocated early through the arch-specific early_cache_level() call,
+ * there is a chance the info is wrong (this can happen on arm64). In
+ * that case, call init_cache_level() anyway to give the arch-specific
+ * code a chance to make things right.
*/
- if (per_cpu_cacheinfo(cpu))
- goto update_cpu_map;
+ if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
+ return 0;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
- if (per_cpu_cacheinfo(cpu) == NULL) {
- cache_leaves(cpu) = 0;
- return -ENOMEM;
- }
-
/*
- * populate_cache_leaves() may completely setup the cache leaves and
- * shared_cpu_map or it may leave it partially setup.
+ * Now that we have properly initialized the cache level info, make
+ * sure we don't try to do that again the next time we are called
+ * (e.g. as CPU hotplug callbacks).
*/
- ret = populate_cache_leaves(cpu);
+ ci_cacheinfo(cpu)->early_ci_levels = false;
+
+ if (cache_leaves(cpu) <= early_leaves)
+ return 0;
+
+ kfree(per_cpu_cacheinfo(cpu));
+ return allocate_cache_info(cpu);
+}
+
+int detect_cache_attributes(unsigned int cpu)
+{
+ int ret;
+
+ ret = init_level_allocate_ci(cpu);
if (ret)
- goto free_ci;
+ return ret;
+
+ /*
+ * If LLC is valid the cache leaves were already populated so just go to
+ * update the cpu map.
+ */
+ if (!last_level_cache_is_valid(cpu)) {
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+ }
-update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 86ec554cfe60..ac1808d1a2e8 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -20,8 +20,52 @@
#include <linux/mutex.h>
#include "base.h"
+/* /sys/class */
+static struct kset *class_kset;
+
#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
+/**
+ * class_to_subsys - Turn a struct class into a struct subsys_private
+ *
+ * @class: pointer to the struct bus_type to look up
+ *
+ * The driver core internals need to work on the subsys_private structure, not
+ * the external struct class pointer. This function walks the list of
+ * registered classes in the system and finds the matching one and returns the
+ * internal struct subsys_private that relates to that class.
+ *
+ * Note, the reference count of the return value is INCREMENTED if it is not
+ * NULL. A call to subsys_put() must be done when finished with the pointer in
+ * order for it to be properly freed.
+ */
+struct subsys_private *class_to_subsys(const struct class *class)
+{
+ struct subsys_private *sp = NULL;
+ struct kobject *kobj;
+
+ if (!class || !class_kset)
+ return NULL;
+
+ spin_lock(&class_kset->list_lock);
+
+ if (list_empty(&class_kset->list))
+ goto done;
+
+ list_for_each_entry(kobj, &class_kset->list, entry) {
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+
+ sp = container_of_const(kset, struct subsys_private, subsys);
+ if (sp->class == class)
+ goto done;
+ }
+ sp = NULL;
+done:
+ sp = subsys_get(sp);
+ spin_unlock(&class_kset->list_lock);
+ return sp;
+}
+
static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
@@ -49,7 +93,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
- struct class *class = cp->class;
+ const struct class *class = cp->class;
pr_debug("class '%s': release.\n", class->name);
@@ -59,13 +103,14 @@ static void class_release(struct kobject *kobj)
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
+ lockdep_unregister_key(&cp->lock_key);
kfree(cp);
}
static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj)
{
- struct subsys_private *cp = to_subsys_private(kobj);
- struct class *class = cp->class;
+ const struct subsys_private *cp = to_subsys_private(kobj);
+ const struct class *class = cp->class;
return class->ns_type;
}
@@ -75,48 +120,40 @@ static const struct sysfs_ops class_sysfs_ops = {
.store = class_attr_store,
};
-static struct kobj_type class_ktype = {
+static const struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
.child_ns_type = class_child_ns_type,
};
-/* Hotplug events for classes go to the class subsys */
-static struct kset *class_kset;
-
-
-int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+int class_create_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
+ struct subsys_private *sp = class_to_subsys(cls);
int error;
- if (cls)
- error = sysfs_create_file_ns(&cls->p->subsys.kobj,
- &attr->attr, ns);
- else
- error = -EINVAL;
+ if (!sp)
+ return -EINVAL;
+
+ error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns);
+ subsys_put(sp);
+
return error;
}
+EXPORT_SYMBOL_GPL(class_create_file_ns);
-void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
- if (cls)
- sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
-}
+ struct subsys_private *sp = class_to_subsys(cls);
-static struct class *class_get(struct class *cls)
-{
- if (cls)
- kset_get(&cls->p->subsys);
- return cls;
-}
+ if (!sp)
+ return;
-static void class_put(struct class *cls)
-{
- if (cls)
- kset_put(&cls->p->subsys);
+ sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns);
+ subsys_put(sp);
}
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
static struct device *klist_class_to_dev(struct klist_node *n)
{
@@ -138,21 +175,10 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
-static int class_add_groups(struct class *cls,
- const struct attribute_group **groups)
-{
- return sysfs_create_groups(&cls->p->subsys.kobj, groups);
-}
-
-static void class_remove_groups(struct class *cls,
- const struct attribute_group **groups)
-{
- return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
-}
-
-int __class_register(struct class *cls, struct lock_class_key *key)
+int class_register(const struct class *cls)
{
struct subsys_private *cp;
+ struct lock_class_key *key;
int error;
pr_debug("device class '%s': registering\n", cls->name);
@@ -163,6 +189,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->interfaces);
kset_init(&cp->glue_dirs);
+ key = &cp->lock_key;
+ lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) {
@@ -170,55 +198,52 @@ int __class_register(struct class *cls, struct lock_class_key *key)
return error;
}
- /* set the default /sys/dev directory for devices of this class */
- if (!cls->dev_kobj)
- cls->dev_kobj = sysfs_dev_char_kobj;
-
-#if defined(CONFIG_BLOCK)
- /* let the block class directory show up in the root of sysfs */
- if (!sysfs_deprecated || cls != &block_class)
- cp->subsys.kobj.kset = class_kset;
-#else
cp->subsys.kobj.kset = class_kset;
-#endif
cp->subsys.kobj.ktype = &class_ktype;
cp->class = cls;
- cls->p = cp;
error = kset_register(&cp->subsys);
- if (error) {
- kfree(cp);
- return error;
- }
- error = class_add_groups(class_get(cls), cls->class_groups);
- class_put(cls);
+ if (error)
+ goto err_out;
+
+ error = sysfs_create_groups(&cp->subsys.kobj, cls->class_groups);
if (error) {
kobject_del(&cp->subsys.kobj);
kfree_const(cp->subsys.kobj.name);
- kfree(cp);
+ goto err_out;
}
+ return 0;
+
+err_out:
+ kfree(cp);
return error;
}
-EXPORT_SYMBOL_GPL(__class_register);
+EXPORT_SYMBOL_GPL(class_register);
-void class_unregister(struct class *cls)
+void class_unregister(const struct class *cls)
{
+ struct subsys_private *sp = class_to_subsys(cls);
+
+ if (!sp)
+ return;
+
pr_debug("device class '%s': unregistering\n", cls->name);
- class_remove_groups(cls, cls->class_groups);
- kset_unregister(&cls->p->subsys);
+
+ sysfs_remove_groups(&sp->subsys.kobj, cls->class_groups);
+ kset_unregister(&sp->subsys);
+ subsys_put(sp);
}
+EXPORT_SYMBOL_GPL(class_unregister);
-static void class_create_release(struct class *cls)
+static void class_create_release(const struct class *cls)
{
pr_debug("%s called for %s\n", __func__, cls->name);
kfree(cls);
}
/**
- * __class_create - create a struct class structure
- * @owner: pointer to the module that is to "own" this struct class
+ * class_create - create a struct class structure
* @name: pointer to a string for the name of this class.
- * @key: the lock_class_key for this class; used by mutex lock debugging
*
* This is used to create a struct class pointer that can then be used
* in calls to device_create().
@@ -228,8 +253,7 @@ static void class_create_release(struct class *cls)
* Note, the pointer created here is to be destroyed when finished by
* making a call to class_destroy().
*/
-struct class *__class_create(struct module *owner, const char *name,
- struct lock_class_key *key)
+struct class *class_create(const char *name)
{
struct class *cls;
int retval;
@@ -241,10 +265,9 @@ struct class *__class_create(struct module *owner, const char *name,
}
cls->name = name;
- cls->owner = owner;
cls->class_release = class_create_release;
- retval = __class_register(cls, key);
+ retval = class_register(cls);
if (retval)
goto error;
@@ -254,7 +277,7 @@ error:
kfree(cls);
return ERR_PTR(retval);
}
-EXPORT_SYMBOL_GPL(__class_create);
+EXPORT_SYMBOL_GPL(class_create);
/**
* class_destroy - destroys a struct class structure
@@ -263,13 +286,14 @@ EXPORT_SYMBOL_GPL(__class_create);
* Note, the pointer to be destroyed must have been created with a call
* to class_create().
*/
-void class_destroy(struct class *cls)
+void class_destroy(const struct class *cls)
{
if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
}
+EXPORT_SYMBOL_GPL(class_destroy);
/**
* class_dev_iter_init - initialize class device iterator
@@ -283,14 +307,18 @@ void class_destroy(struct class *cls)
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
-void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
- struct device *start, const struct device_type *type)
+void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ const struct device *start, const struct device_type *type)
{
+ struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
+ if (!sp)
+ return;
+
if (start)
start_knode = &start->p->knode_class;
- klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
+ klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
EXPORT_SYMBOL_GPL(class_dev_iter_init);
@@ -354,16 +382,17 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* @fn is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
-int class_for_each_device(struct class *class, struct device *start,
+int class_for_each_device(const struct class *class, const struct device *start,
void *data, int (*fn)(struct device *, void *))
{
+ struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
int error = 0;
if (!class)
return -EINVAL;
- if (!class->p) {
+ if (!sp) {
WARN(1, "%s called for class '%s' before it was initialized",
__func__, class->name);
return -EINVAL;
@@ -376,6 +405,7 @@ int class_for_each_device(struct class *class, struct device *start,
break;
}
class_dev_iter_exit(&iter);
+ subsys_put(sp);
return error;
}
@@ -401,16 +431,17 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
-struct device *class_find_device(struct class *class, struct device *start,
+struct device *class_find_device(const struct class *class, const struct device *start,
const void *data,
int (*match)(struct device *, const void *))
{
+ struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
if (!class)
return NULL;
- if (!class->p) {
+ if (!sp) {
WARN(1, "%s called for class '%s' before it was initialized",
__func__, class->name);
return NULL;
@@ -424,6 +455,7 @@ struct device *class_find_device(struct class *class, struct device *start,
}
}
class_dev_iter_exit(&iter);
+ subsys_put(sp);
return dev;
}
@@ -431,54 +463,74 @@ EXPORT_SYMBOL_GPL(class_find_device);
int class_interface_register(struct class_interface *class_intf)
{
- struct class *parent;
+ struct subsys_private *sp;
+ const struct class *parent;
struct class_dev_iter iter;
struct device *dev;
if (!class_intf || !class_intf->class)
return -ENODEV;
- parent = class_get(class_intf->class);
- if (!parent)
+ parent = class_intf->class;
+ sp = class_to_subsys(parent);
+ if (!sp)
return -EINVAL;
- mutex_lock(&parent->p->mutex);
- list_add_tail(&class_intf->node, &parent->p->interfaces);
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the interface is removed in the call to class_interface_unregister()
+ */
+
+ mutex_lock(&sp->mutex);
+ list_add_tail(&class_intf->node, &sp->interfaces);
if (class_intf->add_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
- class_intf->add_dev(dev, class_intf);
+ class_intf->add_dev(dev);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->mutex);
+ mutex_unlock(&sp->mutex);
return 0;
}
+EXPORT_SYMBOL_GPL(class_interface_register);
void class_interface_unregister(struct class_interface *class_intf)
{
- struct class *parent = class_intf->class;
+ struct subsys_private *sp;
+ const struct class *parent = class_intf->class;
struct class_dev_iter iter;
struct device *dev;
if (!parent)
return;
- mutex_lock(&parent->p->mutex);
+ sp = class_to_subsys(parent);
+ if (!sp)
+ return;
+
+ mutex_lock(&sp->mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
- class_intf->remove_dev(dev, class_intf);
+ class_intf->remove_dev(dev);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->mutex);
+ mutex_unlock(&sp->mutex);
- class_put(parent);
+ /*
+ * Decrement the reference count twice, once for the class_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in class_interface_register()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
+EXPORT_SYMBOL_GPL(class_interface_unregister);
-ssize_t show_class_attr_string(struct class *class,
- struct class_attribute *attr, char *buf)
+ssize_t show_class_attr_string(const struct class *class,
+ const struct class_attribute *attr, char *buf)
{
struct class_attribute_string *cs;
@@ -575,6 +627,31 @@ void class_compat_remove_link(struct class_compat *cls, struct device *dev,
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
+/**
+ * class_is_registered - determine if at this moment in time, a class is
+ * registered in the driver core or not.
+ * @class: the class to check
+ *
+ * Returns a boolean to state if the class is registered in the driver core
+ * or not. Note that the value could switch right after this call is made,
+ * so only use this in places where you "know" it is safe to do so (usually
+ * to determine if the specific class has been registered yet or not).
+ *
+ * Be careful in using this.
+ */
+bool class_is_registered(const struct class *class)
+{
+ struct subsys_private *sp = class_to_subsys(class);
+ bool is_initialized = false;
+
+ if (sp) {
+ is_initialized = true;
+ subsys_put(sp);
+ }
+ return is_initialized;
+}
+EXPORT_SYMBOL_GPL(class_is_registered);
+
int __init classes_init(void)
{
class_kset = kset_create_and_add("class", NULL, NULL);
@@ -582,11 +659,3 @@ int __init classes_init(void)
return -ENOMEM;
return 0;
}
-
-EXPORT_SYMBOL_GPL(class_create_file_ns);
-EXPORT_SYMBOL_GPL(class_remove_file_ns);
-EXPORT_SYMBOL_GPL(class_unregister);
-EXPORT_SYMBOL_GPL(class_destroy);
-
-EXPORT_SYMBOL_GPL(class_interface_register);
-EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 5eadeac6c532..7dbf14a1d915 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m)
static void component_debugfs_del(struct aggregate_device *m)
{
- debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
+ debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
}
#else
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a3e14143ec0c..3dff5037943e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -36,29 +36,17 @@
#include "physical_location.h"
#include "power/power.h"
-#ifdef CONFIG_SYSFS_DEPRECATED
-#ifdef CONFIG_SYSFS_DEPRECATED_V2
-long sysfs_deprecated = 1;
-#else
-long sysfs_deprecated = 0;
-#endif
-static int __init sysfs_deprecated_setup(char *arg)
-{
- return kstrtol(arg, 10, &sysfs_deprecated);
-}
-early_param("sysfs.deprecated", sysfs_deprecated_setup);
-#endif
-
/* Device links support. */
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
+static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
/**
- * fwnode_link_add - Create a link between two fwnode_handles.
+ * __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
*
@@ -74,35 +62,42 @@ static bool fw_devlink_best_effort;
* Attempts to create duplicate links between the same pair of fwnode handles
* are ignored and there is no reference counting.
*/
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+static int __fwnode_link_add(struct fwnode_handle *con,
+ struct fwnode_handle *sup, u8 flags)
{
struct fwnode_link *link;
- int ret = 0;
-
- mutex_lock(&fwnode_link_lock);
list_for_each_entry(link, &sup->consumers, s_hook)
- if (link->consumer == con)
- goto out;
+ if (link->consumer == con) {
+ link->flags |= flags;
+ return 0;
+ }
link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!link)
+ return -ENOMEM;
link->supplier = sup;
INIT_LIST_HEAD(&link->s_hook);
link->consumer = con;
INIT_LIST_HEAD(&link->c_hook);
+ link->flags = flags;
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
- pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n",
con, sup);
-out:
- mutex_unlock(&fwnode_link_lock);
+ return 0;
+}
+
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+{
+ int ret;
+
+ mutex_lock(&fwnode_link_lock);
+ ret = __fwnode_link_add(con, sup, 0);
+ mutex_unlock(&fwnode_link_lock);
return ret;
}
@@ -114,7 +109,7 @@ out:
*/
static void __fwnode_link_del(struct fwnode_link *link)
{
- pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+ pr_debug("%pfwf Dropping the fwnode link to %pfwf\n",
link->consumer, link->supplier);
list_del(&link->s_hook);
list_del(&link->c_hook);
@@ -122,6 +117,19 @@ static void __fwnode_link_del(struct fwnode_link *link)
}
/**
+ * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
+ * @link: the fwnode_link to be marked
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_cycle(struct fwnode_link *link)
+{
+ pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ link->consumer, link->supplier);
+ link->flags |= FWLINK_FLAG_CYCLE;
+}
+
+/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
@@ -181,7 +189,51 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
-#ifdef CONFIG_SRCU
+/**
+ * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
+ * @from: move consumers away from this fwnode
+ * @to: move consumers to this fwnode
+ *
+ * Move all consumer links from @from fwnode to @to fwnode.
+ */
+static void __fwnode_links_move_consumers(struct fwnode_handle *from,
+ struct fwnode_handle *to)
+{
+ struct fwnode_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
+ __fwnode_link_add(link->consumer, to, link->flags);
+ __fwnode_link_del(link);
+ }
+}
+
+/**
+ * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
+ * @fwnode: fwnode from which to pick up dangling consumers
+ * @new_sup: fwnode of new supplier
+ *
+ * If the @fwnode has a corresponding struct device and the device supports
+ * probing (that is, added to a bus), then we want to let fw_devlink create
+ * MANAGED device links to this device, so leave @fwnode and its descendant's
+ * fwnode links alone.
+ *
+ * Otherwise, move its consumers to the new supplier @new_sup.
+ */
+static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
+ struct fwnode_handle *new_sup)
+{
+ struct fwnode_handle *child;
+
+ if (fwnode->dev && fwnode->dev->bus)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ __fwnode_links_move_consumers(fwnode, new_sup);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ __fw_devlink_pickup_dangling_consumers(child, new_sup);
+}
+
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
@@ -220,47 +272,6 @@ static void device_link_remove_from_lists(struct device_link *link)
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
}
-#else /* !CONFIG_SRCU */
-static DECLARE_RWSEM(device_links_lock);
-
-static inline void device_links_write_lock(void)
-{
- down_write(&device_links_lock);
-}
-
-static inline void device_links_write_unlock(void)
-{
- up_write(&device_links_lock);
-}
-
-int device_links_read_lock(void)
-{
- down_read(&device_links_lock);
- return 0;
-}
-
-void device_links_read_unlock(int not_used)
-{
- up_read(&device_links_lock);
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int device_links_read_lock_held(void)
-{
- return lockdep_is_held(&device_links_lock);
-}
-#endif
-
-static inline void device_link_synchronize_removal(void)
-{
-}
-
-static void device_link_remove_from_lists(struct device_link *link)
-{
- list_del(&link->s_node);
- list_del(&link->c_node);
-}
-#endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target)
{
@@ -272,6 +283,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+static inline bool device_link_flag_is_sync_state_only(u32 flags)
+{
+ return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -298,8 +315,7 @@ int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if (device_link_flag_is_sync_state_only(link->flags))
continue;
if (link->consumer == target)
@@ -372,8 +388,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if (device_link_flag_is_sync_state_only(link->flags))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@@ -522,13 +537,11 @@ static void devlink_dev_release(struct device *dev)
static struct class devlink_class = {
.name = "devlink",
- .owner = THIS_MODULE,
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
};
-static int devlink_add_symlinks(struct device *dev,
- struct class_interface *class_intf)
+static int devlink_add_symlinks(struct device *dev)
{
int ret;
size_t len;
@@ -577,8 +590,7 @@ out:
return ret;
}
-static void devlink_remove_symlinks(struct device *dev,
- struct class_interface *class_intf)
+static void devlink_remove_symlinks(struct device *dev)
{
struct device_link *link = to_devlink(dev);
size_t len;
@@ -634,7 +646,8 @@ postcore_initcall(devlink_class_init);
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY | \
- DL_FLAG_INFERRED)
+ DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -703,8 +716,6 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || consumer == supplier ||
flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
- (flags & DL_FLAG_SYNC_STATE_ONLY &&
- (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -720,6 +731,10 @@ struct device_link *device_link_add(struct device *consumer,
if (!(flags & DL_FLAG_STATELESS))
flags |= DL_FLAG_MANAGED;
+ if (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !device_link_flag_is_sync_state_only(flags))
+ return NULL;
+
device_links_write_lock();
device_pm_lock();
@@ -984,6 +999,21 @@ static bool dev_is_best_effort(struct device *dev)
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
}
+static struct fwnode_handle *fwnode_links_check_suppliers(
+ struct fwnode_handle *fwnode)
+{
+ struct fwnode_link *link;
+
+ if (!fwnode || fw_devlink_is_permissive())
+ return NULL;
+
+ list_for_each_entry(link, &fwnode->suppliers, c_hook)
+ if (!(link->flags & FWLINK_FLAG_CYCLE))
+ return link->supplier;
+
+ return NULL;
+}
+
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
@@ -1011,15 +1041,12 @@ int device_links_check_suppliers(struct device *dev)
* probe.
*/
mutex_lock(&fwnode_link_lock);
- if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
- !fw_devlink_is_permissive()) {
- sup_fw = list_first_entry(&dev->fwnode->suppliers,
- struct fwnode_link,
- c_hook)->supplier;
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
if (!dev_is_best_effort(dev)) {
fwnode_ret = -EPROBE_DEFER;
dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwP\n", sup_fw);
+ "wait for supplier %pfwf\n", sup_fw);
} else {
fwnode_ret = -EAGAIN;
}
@@ -1130,10 +1157,7 @@ static void device_links_flush_sync_list(struct list_head *list,
if (dev != dont_lock_dev)
device_lock(dev);
- if (dev->bus->sync_state)
- dev->bus->sync_state(dev);
- else if (dev->driver && dev->driver->sync_state)
- dev->driver->sync_state(dev);
+ dev_sync_state(dev);
if (dev != dont_lock_dev)
device_unlock(dev);
@@ -1204,7 +1228,9 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- val = !list_empty(&dev->fwnode->suppliers);
+ mutex_lock(&fwnode_link_lock);
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
+ mutex_unlock(&fwnode_link_lock);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1267,16 +1293,23 @@ void device_links_driver_bound(struct device *dev)
* them. So, fw_devlink no longer needs to create device links to any
* of the device's suppliers.
*
- * Also, if a child firmware node of this bound device is not added as
- * a device by now, assume it is never going to be added and make sure
- * other devices don't defer probe indefinitely by waiting for such a
- * child device.
+ * Also, if a child firmware node of this bound device is not added as a
+ * device by now, assume it is never going to be added. Make this bound
+ * device the fallback supplier to the dangling consumers of the child
+ * firmware node because this bound device is probably implementing the
+ * child firmware node functionality and we don't want the dangling
+ * consumers to defer probe indefinitely waiting for a device for the
+ * child firmware node.
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
+ mutex_lock(&fwnode_link_lock);
fwnode_for_each_available_child_node(dev->fwnode, child)
- fw_devlink_purge_absent_suppliers(child);
+ __fw_devlink_pickup_dangling_consumers(child,
+ dev->fwnode);
+ __fw_devlink_link_to_consumers(dev);
+ mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
@@ -1633,8 +1666,36 @@ static int __init fw_devlink_strict_setup(char *arg)
}
early_param("fw_devlink.strict", fw_devlink_strict_setup);
-u32 fw_devlink_get_flags(void)
+#define FW_DEVLINK_SYNC_STATE_STRICT 0
+#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1
+
+#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT
+static int fw_devlink_sync_state;
+#else
+static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
+#endif
+
+static int __init fw_devlink_sync_state_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "strict") == 0) {
+ fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT;
+ return 0;
+ } else if (strcmp(arg, "timeout") == 0) {
+ fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
+ return 0;
+ }
+ return -EINVAL;
+}
+early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup);
+
+static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
{
+ if (fwlink_flags & FWLINK_FLAG_CYCLE)
+ return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
+
return fw_devlink_flags;
}
@@ -1672,7 +1733,7 @@ static void fw_devlink_relax_link(struct device_link *link)
if (!(link->flags & DL_FLAG_INFERRED))
return;
- if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
+ if (device_link_flag_is_sync_state_only(link->flags))
return;
pm_runtime_drop_link(link);
@@ -1700,6 +1761,44 @@ void fw_devlink_drivers_done(void)
device_links_write_unlock();
}
+static int fw_devlink_dev_sync_state(struct device *dev, void *data)
+{
+ struct device_link *link = to_devlink(dev);
+ struct device *sup = link->supplier;
+
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->status == DL_STATE_ACTIVE || sup->state_synced ||
+ !dev_has_sync_state(sup))
+ return 0;
+
+ if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
+ dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_name(link->consumer));
+ return 0;
+ }
+
+ if (!list_empty(&sup->links.defer_sync))
+ return 0;
+
+ dev_warn(sup, "Timed out. Forcing sync_state()\n");
+ sup->state_synced = true;
+ get_device(sup);
+ list_add_tail(&sup->links.defer_sync, data);
+
+ return 0;
+}
+
+void fw_devlink_probing_done(void)
+{
+ LIST_HEAD(sync_list);
+
+ device_links_write_lock();
+ class_for_each_device(&devlink_class, NULL, &sync_list,
+ fw_devlink_dev_sync_state);
+ device_links_write_unlock();
+ device_links_flush_sync_list(&sync_list, NULL);
+}
+
/**
* wait_for_init_devices_probe - Try to probe any device needed for init
*
@@ -1769,44 +1868,138 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+
+static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+ bool ret;
+
+ if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
+ return false;
+
+ dev = get_dev_from_fwnode(fwnode);
+ ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
+ put_device(dev);
+
+ return ret;
+}
+
+static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (fwnode_init_without_drv(parent)) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
- * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
- * @con: Device to check dependencies for.
- * @sup: Device to check against.
- *
- * Check if @sup depends on @con or any device dependent on it (its child or
- * its consumer etc). When such a cyclic dependency is found, convert all
- * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
- * This is the equivalent of doing fw_devlink=permissive just between the
- * devices in the cycle. We need to do this because, at this point, fw_devlink
- * can't tell which of these dependencies is not a real dependency.
- *
- * Return 1 if a cycle is found. Otherwise, return 0.
+ * __fw_devlink_relax_cycles - Relax and mark dependency cycles.
+ * @con: Potential consumer device.
+ * @sup_handle: Potential supplier's fwnode.
+ *
+ * Needs to be called with fwnode_lock and device link lock held.
+ *
+ * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
+ * depend on @con. This function can detect multiple cyles between @sup_handle
+ * and @con. When such dependency cycles are found, convert all device links
+ * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
+ * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
+ * converted into a device link in the future, they are created as
+ * SYNC_STATE_ONLY device links. This is the equivalent of doing
+ * fw_devlink=permissive just between the devices in the cycle. We need to do
+ * this because, at this point, fw_devlink can't tell which of these
+ * dependencies is not a real dependency.
+ *
+ * Return true if one or more cycles were found. Otherwise, return false.
*/
-static int fw_devlink_relax_cycle(struct device *con, void *sup)
+static bool __fw_devlink_relax_cycles(struct device *con,
+ struct fwnode_handle *sup_handle)
{
- struct device_link *link;
- int ret;
+ struct device *sup_dev = NULL, *par_dev = NULL;
+ struct fwnode_link *link;
+ struct device_link *dev_link;
+ bool ret = false;
- if (con == sup)
- return 1;
+ if (!sup_handle)
+ return false;
- ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
- if (ret)
- return ret;
+ /*
+ * We aren't trying to find all cycles. Just a cycle between con and
+ * sup_handle.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_VISITED)
+ return false;
- list_for_each_entry(link, &con->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
- continue;
+ sup_handle->flags |= FWNODE_FLAG_VISITED;
- if (!fw_devlink_relax_cycle(link->consumer, sup))
- continue;
+ sup_dev = get_dev_from_fwnode(sup_handle);
- ret = 1;
+ /* Termination condition. */
+ if (sup_dev == con) {
+ ret = true;
+ goto out;
+ }
- fw_devlink_relax_link(link);
+ /*
+ * If sup_dev is bound to a driver and @con hasn't started binding to a
+ * driver, sup_dev can't be a consumer of @con. So, no need to check
+ * further.
+ */
+ if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
+ con->links.status == DL_DEV_NO_DRIVER) {
+ ret = false;
+ goto out;
+ }
+
+ list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ __fwnode_link_cycle(link);
+ ret = true;
+ }
+ }
+
+ /*
+ * Give priority to device parent over fwnode parent to account for any
+ * quirks in how fwnodes are converted to devices.
+ */
+ if (sup_dev)
+ par_dev = get_device(sup_dev->parent);
+ else
+ par_dev = fwnode_get_next_parent_dev(sup_handle);
+
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ ret = true;
+
+ if (!sup_dev)
+ goto out;
+
+ list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
+ /*
+ * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
+ * such due to a cycle.
+ */
+ if (device_link_flag_is_sync_state_only(dev_link->flags) &&
+ !(dev_link->flags & DL_FLAG_CYCLE))
+ continue;
+
+ if (__fw_devlink_relax_cycles(con,
+ dev_link->supplier->fwnode)) {
+ fw_devlink_relax_link(dev_link);
+ dev_link->flags |= DL_FLAG_CYCLE;
+ ret = true;
+ }
}
+
+out:
+ sup_handle->flags &= ~FWNODE_FLAG_VISITED;
+ put_device(sup_dev);
+ put_device(par_dev);
return ret;
}
@@ -1814,7 +2007,7 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con: consumer device for the device link
* @sup_handle: fwnode handle of supplier
- * @flags: devlink flags
+ * @link: fwnode link that's being converted to a device link
*
* This function will try to create a device link between the consumer device
* @con and the supplier device represented by @sup_handle.
@@ -1831,10 +2024,17 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
* possible to do that in the future
*/
static int fw_devlink_create_devlink(struct device *con,
- struct fwnode_handle *sup_handle, u32 flags)
+ struct fwnode_handle *sup_handle,
+ struct fwnode_link *link)
{
struct device *sup_dev;
int ret = 0;
+ u32 flags;
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
/*
* In some cases, a device P might also be a supplier to its child node
@@ -1855,7 +2055,26 @@ static int fw_devlink_create_devlink(struct device *con,
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
- sup_dev = get_dev_from_fwnode(sup_handle);
+ /*
+ * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+ * So cycle detection isn't necessary and shouldn't be done.
+ */
+ if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(con, sup_handle)) {
+ __fwnode_link_cycle(link);
+ flags = fw_devlink_get_flags(link->flags);
+ dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
+ sup_handle);
+ }
+ device_links_write_unlock();
+ }
+
+ if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
+ sup_dev = fwnode_get_next_parent_dev(sup_handle);
+ else
+ sup_dev = get_dev_from_fwnode(sup_handle);
+
if (sup_dev) {
/*
* If it's one of those drivers that don't actually bind to
@@ -1864,71 +2083,34 @@ static int fw_devlink_create_devlink(struct device *con,
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ dev_dbg(con,
+ "Not linking %pfwf - dev might never probe\n",
+ sup_handle);
ret = -EINVAL;
goto out;
}
- /*
- * If this fails, it is due to cycles in device links. Just
- * give up on this link and treat it as invalid.
- */
- if (!device_link_add(con, sup_dev, flags) &&
- !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
- dev_info(con, "Fixing up cyclic dependency with %s\n",
- dev_name(sup_dev));
- device_links_write_lock();
- fw_devlink_relax_cycle(con, sup_dev);
- device_links_write_unlock();
- device_link_add(con, sup_dev,
- FW_DEVLINK_FLAGS_PERMISSIVE);
+ if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
+ dev_err(con, "Failed to create device link (0x%x) with %s\n",
+ flags, dev_name(sup_dev));
ret = -EINVAL;
}
goto out;
}
- /* Supplier that's already initialized without a struct device. */
- if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
- return -EINVAL;
-
/*
- * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
- * cycles. So cycle detection isn't necessary and shouldn't be
- * done.
+ * Supplier or supplier's ancestor already initialized without a struct
+ * device or being probed by a driver.
*/
- if (flags & DL_FLAG_SYNC_STATE_ONLY)
- return -EAGAIN;
-
- /*
- * If we can't find the supplier device from its fwnode, it might be
- * due to a cyclic dependency between fwnodes. Some of these cycles can
- * be broken by applying logic. Check for these types of cycles and
- * break them so that devices in the cycle probe properly.
- *
- * If the supplier's parent is dependent on the consumer, then the
- * consumer and supplier have a cyclic dependency. Since fw_devlink
- * can't tell which of the inferred dependencies are incorrect, don't
- * enforce probe ordering between any of the devices in this cyclic
- * dependency. Do this by relaxing all the fw_devlink device links in
- * this cycle and by treating the fwnode link between the consumer and
- * the supplier as an invalid dependency.
- */
- sup_dev = fwnode_get_next_parent_dev(sup_handle);
- if (sup_dev && device_is_dependent(con, sup_dev)) {
- dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
- sup_handle, dev_name(sup_dev));
- device_links_write_lock();
- fw_devlink_relax_cycle(con, sup_dev);
- device_links_write_unlock();
- ret = -EINVAL;
- } else {
- /*
- * Can't check for cycles or no cycles. So let's try
- * again later.
- */
- ret = -EAGAIN;
+ if (fwnode_init_without_drv(sup_handle) ||
+ fwnode_ancestor_init_without_drv(sup_handle)) {
+ dev_dbg(con, "Not linking %pfwf - might never become dev\n",
+ sup_handle);
+ return -EINVAL;
}
+ ret = -EAGAIN;
out:
put_device(sup_dev);
return ret;
@@ -1956,7 +2138,6 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
- u32 dl_flags = fw_devlink_get_flags();
struct device *con_dev;
bool own_link = true;
int ret;
@@ -1986,14 +2167,13 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
con_dev = NULL;
} else {
own_link = false;
- dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
}
}
if (!con_dev)
continue;
- ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
+ ret = fw_devlink_create_devlink(con_dev, fwnode, link);
put_device(con_dev);
if (!own_link || ret == -EAGAIN)
continue;
@@ -2013,10 +2193,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
*
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
* and the real suppliers of @dev. Once these device links are created, the
- * fwnode links are deleted. When such device links are successfully created,
- * this function is called recursively on those supplier devices. This is
- * needed to detect and break some invalid cycles in fwnode links. See
- * fw_devlink_create_devlink() for more details.
+ * fwnode links are deleted.
*
* In addition, it also looks at all the suppliers of the entire fwnode tree
* because some of the child devices of @dev that have not been added yet
@@ -2034,44 +2211,16 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
bool own_link = (dev->fwnode == fwnode);
struct fwnode_link *link, *tmp;
struct fwnode_handle *child = NULL;
- u32 dl_flags;
-
- if (own_link)
- dl_flags = fw_devlink_get_flags();
- else
- dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
- struct device *sup_dev;
struct fwnode_handle *sup = link->supplier;
- ret = fw_devlink_create_devlink(dev, sup, dl_flags);
+ ret = fw_devlink_create_devlink(dev, sup, link);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
-
- /* If no device link was created, nothing more to do. */
- if (ret)
- continue;
-
- /*
- * If a device link was successfully created to a supplier, we
- * now need to try and link the supplier to all its suppliers.
- *
- * This is needed to detect and delete false dependencies in
- * fwnode links that haven't been converted to a device link
- * yet. See comments in fw_devlink_create_devlink() for more
- * details on the false dependency.
- *
- * Without deleting these false dependencies, some devices will
- * never probe because they'll keep waiting for their false
- * dependency fwnode links to be converted to device links.
- */
- sup_dev = get_dev_from_fwnode(sup);
- __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
- put_device(sup_dev);
}
/*
@@ -2104,8 +2253,12 @@ static void fw_devlink_link_device(struct device *dev)
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
-struct kobject *sysfs_dev_char_kobj;
-struct kobject *sysfs_dev_block_kobj;
+
+/* /sys/dev/char */
+static struct kobject *sysfs_dev_char_kobj;
+
+/* /sys/dev/block */
+static struct kobject *sysfs_dev_block_kobj;
static DEFINE_MUTEX(device_hotplug_lock);
@@ -2354,7 +2507,7 @@ static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t
dev->class->get_ownership(dev, uid, gid);
}
-static struct kobj_type device_ktype = {
+static const struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
@@ -2387,9 +2540,9 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
+static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
- struct device *dev = kobj_to_dev(kobj);
+ const struct device *dev = kobj_to_dev(kobj);
int retval = 0;
/* add device node properties if present */
@@ -2674,7 +2827,7 @@ EXPORT_SYMBOL_GPL(devm_device_add_groups);
static int device_add_attrs(struct device *dev)
{
- struct class *class = dev->class;
+ const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
@@ -2741,7 +2894,7 @@ static int device_add_attrs(struct device *dev)
static void device_remove_attrs(struct device *dev)
{
- struct class *class = dev->class;
+ const struct class *class = dev->class;
const struct device_type *type = dev->type;
if (dev->physical_location) {
@@ -2974,7 +3127,7 @@ struct kobject *virtual_device_parent(struct device *dev)
struct class_dir {
struct kobject kobj;
- struct class *class;
+ const struct class *class;
};
#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
@@ -2992,14 +3145,14 @@ struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *ko
return dir->class->ns_type;
}
-static struct kobj_type class_dir_ktype = {
+static const struct kobj_type class_dir_ktype = {
.release = class_dir_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = class_dir_child_ns_type
};
-static struct kobject *
-class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+static struct kobject *class_dir_create_and_add(struct subsys_private *sp,
+ struct kobject *parent_kobj)
{
struct class_dir *dir;
int retval;
@@ -3008,12 +3161,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
if (!dir)
return ERR_PTR(-ENOMEM);
- dir->class = class;
+ dir->class = sp->class;
kobject_init(&dir->kobj, &class_dir_ktype);
- dir->kobj.kset = &class->p->glue_dirs;
+ dir->kobj.kset = &sp->glue_dirs;
- retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
return ERR_PTR(retval);
@@ -3026,20 +3179,13 @@ static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
- if (dev->class) {
- struct kobject *kobj = NULL;
+ struct subsys_private *sp = class_to_subsys(dev->class);
+ struct kobject *kobj = NULL;
+
+ if (sp) {
struct kobject *parent_kobj;
struct kobject *k;
-#ifdef CONFIG_BLOCK
- /* block disks show up in /sys/block */
- if (sysfs_deprecated && dev->class == &block_class) {
- if (parent && parent->class == &block_class)
- return &parent->kobj;
- return &block_class.p->subsys.kobj;
- }
-#endif
-
/*
* If we have no parent, we live in "virtual".
* Class-devices with a non class-device as parent, live
@@ -3047,36 +3193,47 @@ static struct kobject *get_device_parent(struct device *dev,
*/
if (parent == NULL)
parent_kobj = virtual_device_parent(dev);
- else if (parent->class && !dev->class->ns_type)
+ else if (parent->class && !dev->class->ns_type) {
+ subsys_put(sp);
return &parent->kobj;
- else
+ } else {
parent_kobj = &parent->kobj;
+ }
mutex_lock(&gdp_mutex);
/* find our class-directory at the parent and reference it */
- spin_lock(&dev->class->p->glue_dirs.list_lock);
- list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
+ spin_lock(&sp->glue_dirs.list_lock);
+ list_for_each_entry(k, &sp->glue_dirs.list, entry)
if (k->parent == parent_kobj) {
kobj = kobject_get(k);
break;
}
- spin_unlock(&dev->class->p->glue_dirs.list_lock);
+ spin_unlock(&sp->glue_dirs.list_lock);
if (kobj) {
mutex_unlock(&gdp_mutex);
+ subsys_put(sp);
return kobj;
}
/* or create a new class-directory at the parent device */
- k = class_dir_create_and_add(dev->class, parent_kobj);
+ k = class_dir_create_and_add(sp, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
+ subsys_put(sp);
return k;
}
/* subsystems can specify a default root directory for their devices */
- if (!parent && dev->bus && dev->bus->dev_root)
- return &dev->bus->dev_root->kobj;
+ if (!parent && dev->bus) {
+ struct device *dev_root = bus_get_dev_root(dev->bus);
+
+ if (dev_root) {
+ kobj = &dev_root->kobj;
+ put_device(dev_root);
+ return kobj;
+ }
+ }
if (parent)
return &parent->kobj;
@@ -3086,10 +3243,23 @@ static struct kobject *get_device_parent(struct device *dev,
static inline bool live_in_glue_dir(struct kobject *kobj,
struct device *dev)
{
- if (!kobj || !dev->class ||
- kobj->kset != &dev->class->p->glue_dirs)
+ struct subsys_private *sp;
+ bool retval;
+
+ if (!kobj || !dev->class)
return false;
- return true;
+
+ sp = class_to_subsys(dev->class);
+ if (!sp)
+ return false;
+
+ if (kobj->kset == &sp->glue_dirs)
+ retval = true;
+ else
+ retval = false;
+
+ subsys_put(sp);
+ return retval;
}
static inline struct kobject *get_glue_dir(struct device *dev)
@@ -3186,6 +3356,7 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
static int device_add_class_symlinks(struct device *dev)
{
struct device_node *of_node = dev_of_node(dev);
+ struct subsys_private *sp;
int error;
if (of_node) {
@@ -3195,12 +3366,11 @@ static int device_add_class_symlinks(struct device *dev)
/* An error here doesn't warrant bringing down the device */
}
- if (!dev->class)
+ sp = class_to_subsys(dev->class);
+ if (!sp)
return 0;
- error = sysfs_create_link(&dev->kobj,
- &dev->class->p->subsys.kobj,
- "subsystem");
+ error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
if (error)
goto out_devnode;
@@ -3211,46 +3381,38 @@ static int device_add_class_symlinks(struct device *dev)
goto out_subsys;
}
-#ifdef CONFIG_BLOCK
- /* /sys/block has directories and does not need symlinks */
- if (sysfs_deprecated && dev->class == &block_class)
- return 0;
-#endif
-
/* link in the class directory pointing to the device */
- error = sysfs_create_link(&dev->class->p->subsys.kobj,
- &dev->kobj, dev_name(dev));
+ error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
if (error)
goto out_device;
-
- return 0;
+ goto exit;
out_device:
sysfs_remove_link(&dev->kobj, "device");
-
out_subsys:
sysfs_remove_link(&dev->kobj, "subsystem");
out_devnode:
sysfs_remove_link(&dev->kobj, "of_node");
+exit:
+ subsys_put(sp);
return error;
}
static void device_remove_class_symlinks(struct device *dev)
{
+ struct subsys_private *sp = class_to_subsys(dev->class);
+
if (dev_of_node(dev))
sysfs_remove_link(&dev->kobj, "of_node");
- if (!dev->class)
+ if (!sp)
return;
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(&dev->kobj, "subsystem");
-#ifdef CONFIG_BLOCK
- if (sysfs_deprecated && dev->class == &block_class)
- return;
-#endif
- sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
+ sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
+ subsys_put(sp);
}
/**
@@ -3270,27 +3432,13 @@ int dev_set_name(struct device *dev, const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(dev_set_name);
-/**
- * device_to_dev_kobj - select a /sys/dev/ directory for the device
- * @dev: device
- *
- * By default we select char/ for new entries. Setting class->dev_obj
- * to NULL prevents an entry from being created. class->dev_kobj must
- * be set (or cleared) before any devices are registered to the class
- * otherwise device_create_sys_dev_entry() and
- * device_remove_sys_dev_entry() will disagree about the presence of
- * the link.
- */
+/* select a /sys/dev/ directory for the device */
static struct kobject *device_to_dev_kobj(struct device *dev)
{
- struct kobject *kobj;
-
- if (dev->class)
- kobj = dev->class->dev_kobj;
+ if (is_blockdev(dev))
+ return sysfs_dev_block_kobj;
else
- kobj = sysfs_dev_char_kobj;
-
- return kobj;
+ return sysfs_dev_char_kobj;
}
static int device_create_sys_dev_entry(struct device *dev)
@@ -3359,6 +3507,7 @@ static int device_private_init(struct device *dev)
*/
int device_add(struct device *dev)
{
+ struct subsys_private *sp;
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
@@ -3413,7 +3562,7 @@ int device_add(struct device *dev)
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error) {
- glue_dir = get_glue_dir(dev);
+ glue_dir = kobj;
goto Error;
}
@@ -3453,10 +3602,7 @@ int device_add(struct device *dev)
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_ADD_DEVICE, dev);
-
+ bus_notify(dev, BUS_NOTIFY_ADD_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_ADD);
/*
@@ -3490,18 +3636,18 @@ int device_add(struct device *dev)
klist_add_tail(&dev->p->knode_parent,
&parent->p->klist_children);
- if (dev->class) {
- mutex_lock(&dev->class->p->mutex);
+ sp = class_to_subsys(dev->class);
+ if (sp) {
+ mutex_lock(&sp->mutex);
/* tie the class to the device */
- klist_add_tail(&dev->p->knode_class,
- &dev->class->p->klist_devices);
+ klist_add_tail(&dev->p->knode_class, &sp->klist_devices);
/* notify any interfaces that the device is here */
- list_for_each_entry(class_intf,
- &dev->class->p->interfaces, node)
+ list_for_each_entry(class_intf, &sp->interfaces, node)
if (class_intf->add_dev)
- class_intf->add_dev(dev, class_intf);
- mutex_unlock(&dev->class->p->mutex);
+ class_intf->add_dev(dev);
+ mutex_unlock(&sp->mutex);
+ subsys_put(sp);
}
done:
put_device(dev);
@@ -3513,6 +3659,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
+ dev->driver = NULL;
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
@@ -3620,6 +3767,7 @@ EXPORT_SYMBOL_GPL(kill_device);
*/
void device_del(struct device *dev)
{
+ struct subsys_private *sp;
struct device *parent = dev->parent;
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
@@ -3636,9 +3784,7 @@ void device_del(struct device *dev)
* before dpm_sysfs_remove().
*/
noio_flag = memalloc_noio_save();
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DEL_DEVICE, dev);
+ bus_notify(dev, BUS_NOTIFY_DEL_DEVICE);
dpm_sysfs_remove(dev);
if (parent)
@@ -3648,18 +3794,20 @@ void device_del(struct device *dev)
device_remove_sys_dev_entry(dev);
device_remove_file(dev, &dev_attr_dev);
}
- if (dev->class) {
+
+ sp = class_to_subsys(dev->class);
+ if (sp) {
device_remove_class_symlinks(dev);
- mutex_lock(&dev->class->p->mutex);
+ mutex_lock(&sp->mutex);
/* notify any interfaces that the device is now gone */
- list_for_each_entry(class_intf,
- &dev->class->p->interfaces, node)
+ list_for_each_entry(class_intf, &sp->interfaces, node)
if (class_intf->remove_dev)
- class_intf->remove_dev(dev, class_intf);
+ class_intf->remove_dev(dev);
/* remove the device from the class list */
klist_del(&dev->p->knode_class);
- mutex_unlock(&dev->class->p->mutex);
+ mutex_unlock(&sp->mutex);
+ subsys_put(sp);
}
device_remove_file(dev, &dev_attr_uevent);
device_remove_attrs(dev);
@@ -3669,9 +3817,7 @@ void device_del(struct device *dev)
device_platform_notify_remove(dev);
device_links_purge(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_REMOVED_DEVICE, dev);
+ bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
@@ -3739,7 +3885,7 @@ static struct device *next_device(struct klist_iter *i)
* a name. This memory is returned in tmp and needs to be
* freed by the caller.
*/
-const char *device_get_devnode(struct device *dev,
+const char *device_get_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid,
const char **tmp)
{
@@ -4124,7 +4270,7 @@ static void device_create_release(struct device *dev)
}
static __printf(6, 0) struct device *
-device_create_groups_vargs(struct class *class, struct device *parent,
+device_create_groups_vargs(const struct class *class, struct device *parent,
dev_t devt, void *drvdata,
const struct attribute_group **groups,
const char *fmt, va_list args)
@@ -4184,11 +4330,8 @@ error:
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
*/
-struct device *device_create(struct class *class, struct device *parent,
+struct device *device_create(const struct class *class, struct device *parent,
dev_t devt, void *drvdata, const char *fmt, ...)
{
va_list vargs;
@@ -4225,11 +4368,8 @@ EXPORT_SYMBOL_GPL(device_create);
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
*/
-struct device *device_create_with_groups(struct class *class,
+struct device *device_create_with_groups(const struct class *class,
struct device *parent, dev_t devt,
void *drvdata,
const struct attribute_group **groups,
@@ -4254,7 +4394,7 @@ EXPORT_SYMBOL_GPL(device_create_with_groups);
* This call unregisters and cleans up a device that was created with a
* call to device_create().
*/
-void device_destroy(struct class *class, dev_t devt)
+void device_destroy(const struct class *class, dev_t devt)
{
struct device *dev;
@@ -4276,9 +4416,12 @@ EXPORT_SYMBOL_GPL(device_destroy);
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
*
- * Note: Don't call this function. Currently, the networking layer calls this
- * function, but that will change. The following text from Kay Sievers offers
- * some insight:
+ * Note: given that some subsystems (networking and infiniband) use this
+ * function, with no immediate plans for this to change, we cannot assume or
+ * require that this function not be called at all.
+ *
+ * However, if you're writing new code, do not call this function. The following
+ * text from Kay Sievers offers some insight:
*
* Renaming devices is racy at many levels, symlinks and other stuff are not
* replaced atomically, and you get a "move" uevent, but it's not easy to
@@ -4292,13 +4435,6 @@ EXPORT_SYMBOL_GPL(device_destroy);
* kernel device renaming. Besides that, it's not even implemented now for
* other things than (driver-core wise very simple) network devices.
*
- * We are currently about to change network renaming in udev to completely
- * disallow renaming of devices in the same namespace as the kernel uses,
- * because we can't solve the problems properly, that arise with swapping names
- * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
- * be allowed to some other name than eth[0-9]*, for the aforementioned
- * reasons.
- *
* Make up a "real" name in the driver before you register anything, or add
* some other attributes for userspace to find the device, or use udev to add
* symlinks -- but never rename kernel devices later, it's a complete mess. We
@@ -4324,9 +4460,16 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
- kobj, old_device_name,
+ struct subsys_private *sp = class_to_subsys(dev->class);
+
+ if (!sp) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
+ subsys_put(sp);
if (error)
goto out;
}
@@ -4451,7 +4594,7 @@ static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
{
struct kobject *kobj = &dev->kobj;
- struct class *class = dev->class;
+ const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
@@ -4509,6 +4652,7 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
{
int error;
struct kobject *kobj = &dev->kobj;
+ struct subsys_private *sp;
dev = get_device(dev);
if (!dev)
@@ -4545,21 +4689,19 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
if (error)
goto out;
-#ifdef CONFIG_BLOCK
- if (sysfs_deprecated && dev->class == &block_class)
- goto out;
-#endif
-
/*
* Change the owner of the symlink located in the class directory of
* the device class associated with @dev which points to the actual
* directory entry for @dev to @kuid/@kgid. This ensures that the
* symlink shows the same permissions as its target.
*/
- error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
- dev_name(dev), kuid, kgid);
- if (error)
+ sp = class_to_subsys(dev->class);
+ if (!sp) {
+ error = -EINVAL;
goto out;
+ }
+ error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid);
+ subsys_put(sp);
out:
put_device(dev);
@@ -4858,9 +5000,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+
+ /* Skip nullifying fn->secondary if the primary is shared */
+ if (parent && fn == parent->fwnode)
+ return;
+
/* Set fn->secondary = NULL, so fn remains the primary fwnode */
- if (!(parent && fn == parent->fwnode))
- fn->secondary = NULL;
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c98849577d4..c1815b9dae68 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -125,17 +125,6 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
- .match = cpu_subsys_match,
-#ifdef CONFIG_HOTPLUG_CPU
- .online = cpu_subsys_online,
- .offline = cpu_subsys_offline,
-#endif
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
@@ -336,7 +325,7 @@ static ssize_t print_cpu_modalias(struct device *dev,
return len;
}
-static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (buf) {
@@ -348,6 +337,20 @@ static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif
+struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ .uevent = cpu_uevent,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -368,9 +371,6 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
- cpu->dev.bus->uevent = cpu_uevent;
-#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
@@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
bool cpu_is_hotpluggable(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
@@ -610,9 +611,13 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
static void __init cpu_register_vulnerabilities(void)
{
- if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
- &cpu_root_vulnerabilities_group))
- pr_err("Unable to register CPU vulnerabilities\n");
+ struct device *dev = bus_get_dev_root(&cpu_subsys);
+
+ if (dev) {
+ if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+ put_device(dev);
+ }
}
#else
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e9b2f9c25efe..9c09ca5c4ab6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -257,13 +257,11 @@ static int deferred_devs_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
#ifdef CONFIG_MODULES
-int driver_deferred_probe_timeout = 10;
+static int driver_deferred_probe_timeout = 10;
#else
-int driver_deferred_probe_timeout;
+static int driver_deferred_probe_timeout;
#endif
-EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-
static int __init deferred_probe_timeout_setup(char *str)
{
int timeout;
@@ -317,6 +315,8 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
+
+ fw_devlink_probing_done();
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -366,13 +366,17 @@ static int deferred_probe_initcall(void)
schedule_delayed_work(&deferred_probe_timeout_work,
driver_deferred_probe_timeout * HZ);
}
+
+ if (!IS_ENABLED(CONFIG_MODULES))
+ fw_devlink_probing_done();
+
return 0;
}
late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
- debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
+ debugfs_lookup_and_remove("devices_deferred", NULL);
}
__exitcall(deferred_probe_exit);
@@ -413,10 +417,7 @@ static void driver_bound(struct device *dev)
driver_deferred_probe_del(dev);
driver_deferred_probe_trigger();
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BOUND_DRIVER, dev);
-
+ bus_notify(dev, BUS_NOTIFY_BOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_BIND);
}
@@ -435,9 +436,7 @@ static int driver_sysfs_add(struct device *dev)
{
int ret;
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BIND_DRIVER, dev);
+ bus_notify(dev, BUS_NOTIFY_BIND_DRIVER);
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
@@ -502,9 +501,8 @@ int device_bind_driver(struct device *dev)
device_links_force_bind(dev);
driver_bound(dev);
}
- else if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ else
+ bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
return ret;
}
EXPORT_SYMBOL_GPL(device_bind_driver);
@@ -512,6 +510,27 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+static ssize_t state_synced_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+
+ if (strcmp("1", buf))
+ return -EINVAL;
+
+ device_lock(dev);
+ if (!dev->state_synced) {
+ dev->state_synced = true;
+ dev_sync_state(dev);
+ } else {
+ ret = -EINVAL;
+ }
+ device_unlock(dev);
+
+ return ret ? ret : count;
+}
+
static ssize_t state_synced_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -523,7 +542,7 @@ static ssize_t state_synced_show(struct device *dev,
return sysfs_emit(buf, "%u\n", val);
}
-static DEVICE_ATTR_RO(state_synced);
+static DEVICE_ATTR_RW(state_synced);
static void device_unbind_cleanup(struct device *dev)
{
@@ -695,9 +714,7 @@ dev_groups_failed:
probe_failed:
driver_sysfs_remove(dev);
sysfs_failed:
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
@@ -718,7 +735,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
calltime = ktime_get();
ret = really_probe(dev, drv);
rettime = ktime_get();
- pr_debug("probe of %s returned %d after %lld usecs\n",
+ /*
+ * Don't change this to pr_debug() because that requires
+ * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
+ * kernel commandline to print this all the time at the debug level.
+ */
+ printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
dev_name(dev), ret, ktime_us_delta(rettime, calltime));
return ret;
}
@@ -1243,10 +1265,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
driver_sysfs_remove(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBIND_DRIVER,
- dev);
+ bus_notify(dev, BUS_NOTIFY_UNBIND_DRIVER);
pm_runtime_put_sync(dev);
@@ -1260,11 +1279,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBOUND_DRIVER,
- dev);
+ bus_notify(dev, BUS_NOTIFY_UNBOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 1c06781f7114..91536ee05f14 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -167,7 +167,7 @@ static int devcd_free(struct device *dev, void *data)
return 0;
}
-static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
+static ssize_t disabled_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", devcd_disabled);
@@ -197,7 +197,7 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
* so, above situation would not occur.
*/
-static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
+static ssize_t disabled_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
long tmp = simple_strtol(buf, NULL, 10);
@@ -226,7 +226,6 @@ ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
- .owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
.class_groups = devcd_class_groups,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c0e100074aa3..5c998cfac335 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -722,20 +722,21 @@ static void devm_action_release(struct device *dev, void *res)
}
/**
- * devm_add_action() - add a custom action to list of managed resources
+ * __devm_add_action() - add a custom action to list of managed resources
* @dev: Device that owns the action
* @action: Function that should be called
* @data: Pointer to data passed to @action implementation
+ * @name: Name of the resource (for debugging purposes)
*
* This adds a custom action to the list of managed resources so that
* it gets executed as part of standard resource unwinding.
*/
-int devm_add_action(struct device *dev, void (*action)(void *), void *data)
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
{
struct action_devres *devres;
- devres = devres_alloc(devm_action_release,
- sizeof(struct action_devres), GFP_KERNEL);
+ devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
+ GFP_KERNEL, NUMA_NO_NODE, name);
if (!devres)
return -ENOMEM;
@@ -745,7 +746,7 @@ int devm_add_action(struct device *dev, void (*action)(void *), void *data)
devres_add(dev, devres);
return 0;
}
-EXPORT_SYMBOL_GPL(devm_add_action);
+EXPORT_SYMBOL_GPL(__devm_add_action);
/**
* devm_remove_action() - removes previously added custom action
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index e4bffeabf344..b848764ef018 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -13,6 +13,8 @@
* overwrite the default setting if needed.
*/
+#define pr_fmt(fmt) "devtmpfs: " fmt
+
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
@@ -92,15 +94,6 @@ static struct file_system_type dev_fs_type = {
.mount = public_dev_mount,
};
-#ifdef CONFIG_BLOCK
-static inline int is_blockdev(struct device *dev)
-{
- return dev->class == &block_class;
-}
-#else
-static inline int is_blockdev(struct device *dev) { return 0; }
-#endif
-
static int devtmpfs_submit_req(struct req *req, const char *tmp)
{
init_completion(&req->done);
@@ -173,7 +166,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
+ err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -223,7 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
dev->devt);
if (!err) {
struct iattr newattrs;
@@ -233,7 +226,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
- notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
@@ -254,7 +247,7 @@ static int dev_rmdir(const char *name)
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
dentry);
else
err = -EPERM;
@@ -341,9 +334,9 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
- notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
- err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
@@ -376,9 +369,9 @@ int __init devtmpfs_mount(void)
err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
- printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ pr_info("error mounting %d\n", err);
else
- printk(KERN_INFO "devtmpfs: mounted\n");
+ pr_info("mounted\n");
return err;
}
@@ -460,14 +453,12 @@ int __init devtmpfs_init(void)
mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
if (IS_ERR(mnt)) {
- printk(KERN_ERR "devtmpfs: unable to create devtmpfs %ld\n",
- PTR_ERR(mnt));
+ pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
err = register_filesystem(&dev_fs_type);
if (err) {
- printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
- "type %i\n", err);
+ pr_err("unable to register devtmpfs type %d\n", err);
return err;
}
@@ -480,12 +471,12 @@ int __init devtmpfs_init(void)
}
if (err) {
- printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
+ pr_err("unable to create devtmpfs %d\n", err);
unregister_filesystem(&dev_fs_type);
thread = NULL;
return err;
}
- printk(KERN_INFO "devtmpfs: initialized\n");
+ pr_info("initialized\n");
return 0;
}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 676b6275d5b5..c8436c26ed6a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -224,7 +224,7 @@ int driver_register(struct device_driver *drv)
int ret;
struct device_driver *other;
- if (!drv->bus->p) {
+ if (!bus_is_registered(drv->bus)) {
pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n",
drv->name, drv->bus->name);
return -EINVAL;
@@ -274,30 +274,3 @@ void driver_unregister(struct device_driver *drv)
bus_remove_driver(drv);
}
EXPORT_SYMBOL_GPL(driver_unregister);
-
-/**
- * driver_find - locate driver on a bus by its name.
- * @name: name of the driver.
- * @bus: bus to scan for the driver.
- *
- * Call kset_find_obj() to iterate over list of drivers on
- * a bus to find driver by name. Return driver if found.
- *
- * This routine provides no locking to prevent the driver it returns
- * from being unregistered or unloaded while the caller is using it.
- * The caller is responsible for preventing this.
- */
-struct device_driver *driver_find(const char *name, struct bus_type *bus)
-{
- struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
- struct driver_private *priv;
-
- if (k) {
- /* Drop reference added by kset_find_obj() */
- kobject_put(k);
- priv = to_driver(k);
- return priv->driver;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(driver_find);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 5166b323a0f8..5ca00e02fe82 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,6 +3,8 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
+ select CRYPTO_HASH if FW_LOADER_DEBUG
+ select CRYPTO_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -24,6 +26,17 @@ config FW_LOADER
You also want to be sure to enable this built-in if you are going to
enable built-in firmware (CONFIG_EXTRA_FIRMWARE).
+config FW_LOADER_DEBUG
+ bool "Log filenames and checksums for loaded firmware"
+ depends on CRYPTO = FW_LOADER || CRYPTO=y
+ depends on DYNAMIC_DEBUG
+ depends on FW_LOADER
+ default FW_LOADER
+ help
+ Select this option to use dynamic debug to log firmware filenames and
+ SHA256 checksums to the kernel log for each firmware file that is
+ loaded.
+
if FW_LOADER
config FW_LOADER_PAGED_BUF
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 017c4cdb219e..9d79d5ad9102 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -493,9 +493,9 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
const void *in_buffer))
{
size_t size;
- int i, len;
+ int i, len, maxlen = 0;
int rc = -ENOENT;
- char *path;
+ char *path, *nt = NULL;
size_t msize = INT_MAX;
void *buffer = NULL;
@@ -518,8 +518,17 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
if (!fw_path[i][0])
continue;
- len = snprintf(path, PATH_MAX, "%s/%s%s",
- fw_path[i], fw_priv->fw_name, suffix);
+ /* strip off \n from customized path */
+ maxlen = strlen(fw_path[i]);
+ if (i == 0) {
+ nt = strchr(fw_path[i], '\n');
+ if (nt)
+ maxlen = nt - fw_path[i];
+ }
+
+ len = snprintf(path, PATH_MAX, "%.*s/%s%s",
+ maxlen, fw_path[i],
+ fw_priv->fw_name, suffix);
if (len >= PATH_MAX) {
rc = -ENAMETOOLONG;
break;
@@ -791,6 +800,50 @@ static void fw_abort_batch_reqs(struct firmware *fw)
mutex_unlock(&fw_lock);
}
+#if defined(CONFIG_FW_LOADER_DEBUG)
+#include <crypto/hash.h>
+#include <crypto/sha2.h>
+
+static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
+{
+ struct shash_desc *shash;
+ struct crypto_shash *alg;
+ u8 *sha256buf;
+ char *outbuf;
+
+ alg = crypto_alloc_shash("sha256", 0, 0);
+ if (!alg)
+ return;
+
+ sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
+ outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
+ shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
+ if (!sha256buf || !outbuf || !shash)
+ goto out_free;
+
+ shash->tfm = alg;
+
+ if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
+ goto out_shash;
+
+ for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
+ sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
+ outbuf[SHA256_BLOCK_SIZE] = 0;
+ dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
+
+out_shash:
+ crypto_free_shash(alg);
+out_free:
+ kfree(shash);
+ kfree(outbuf);
+ kfree(sha256buf);
+}
+#else
+static void fw_log_firmware_info(const struct firmware *fw, const char *name,
+ struct device *device)
+{}
+#endif
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -861,11 +914,13 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
revert_creds(old_cred);
put_cred(kern_cred);
- out:
+out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
+ } else {
+ fw_log_firmware_info(fw, name, device);
}
*firmware_p = fw;
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 56911d75b90a..c9c93b47d9a5 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -25,7 +25,7 @@ void __fw_load_abort(struct fw_priv *fw_priv)
}
#ifdef CONFIG_FW_LOADER_USER_HELPER
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+static ssize_t timeout_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
@@ -44,7 +44,7 @@ static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
*
* Note: zero means 'wait forever'.
**/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fe98fb8d94e5..b456ac213610 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -115,18 +115,13 @@ unsigned long __weak memory_block_size_bytes(void)
}
EXPORT_SYMBOL_GPL(memory_block_size_bytes);
-/*
- * Show the first physical section index (number) of this memory block.
- */
+/* Show the memory block ID, relative to the memory block size */
static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
- unsigned long phys_index;
-
- phys_index = mem->start_section_nr / sections_per_block;
- return sysfs_emit(buf, "%08lx\n", phys_index);
+ return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
}
/*
diff --git a/drivers/base/node.c b/drivers/base/node.c
index faf3597a96da..b46db17124f3 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -587,6 +587,9 @@ static const struct attribute_group *node_dev_groups[] = {
#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
&arch_node_dev_group,
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ &memory_failure_attr_group,
+#endif
NULL
};
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 87af641cfe1a..951819e71b4a 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -24,8 +24,11 @@ bool dev_add_physical_location(struct device *dev)
dev->physical_location =
kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
- if (!dev->physical_location)
+ if (!dev->physical_location) {
+ ACPI_FREE(pld);
return false;
+ }
+
dev->physical_location->panel = pld->panel;
dev->physical_location->vertical_position = pld->vertical_position;
dev->physical_location->horizontal_position = pld->horizontal_position;
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
index 82cde9f1b161..3f3f61307998 100644
--- a/drivers/base/physical_location.h
+++ b/drivers/base/physical_location.h
@@ -8,7 +8,7 @@
#include <linux/device.h>
#ifdef CONFIG_ACPI
-extern bool dev_add_physical_location(struct device *dev);
+bool dev_add_physical_location(struct device *dev);
extern const struct attribute_group dev_attr_physical_location_group;
#else
static inline bool dev_add_physical_location(struct device *dev) { return false; };
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 5883e7634a2b..f37ad34c80ec 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -324,6 +324,7 @@ void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int vir
struct platform_msi_priv_data *data = domain->host_data;
msi_lock_descs(data->dev);
+ msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
msi_unlock_descs(data->dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 968f3d71eeab..77510e4f47de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -441,11 +441,9 @@ static int __platform_get_irq_byname(struct platform_device *dev,
struct resource *r;
int ret;
- if (!dev->dev.of_node || IS_ENABLED(CONFIG_OF_IRQ)) {
- ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
- if (ret > 0 || ret == -EPROBE_DEFER)
- return ret;
- }
+ ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
+ if (ret > 0 || ret == -EPROBE_DEFER)
+ return ret;
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
@@ -499,6 +497,8 @@ EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
+ *
+ * Return: 0 on success, negative error number on failure.
*/
int platform_add_devices(struct platform_device **devs, int num)
{
@@ -883,6 +883,13 @@ static int platform_probe_fail(struct platform_device *pdev)
return -ENXIO;
}
+static int is_bound_to_driver(struct device *dev, void *driver)
+{
+ if (dev->driver == driver)
+ return 1;
+ return 0;
+}
+
/**
* __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
@@ -906,7 +913,7 @@ static int platform_probe_fail(struct platform_device *pdev)
int __init_or_module __platform_driver_probe(struct platform_driver *drv,
int (*probe)(struct platform_device *), struct module *module)
{
- int retval, code;
+ int retval;
if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
@@ -932,24 +939,21 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
- retval = code = __platform_driver_register(drv, module);
+ retval = __platform_driver_register(drv, module);
if (retval)
return retval;
- /*
- * Fixup that section violation, being paranoid about code scanning
- * the list of drivers in order to probe new devices. Check to see
- * if the probe was successful, and make sure any forced probes of
- * new devices fail.
- */
- spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
+ /* Force all new probes of this driver to fail */
drv->probe = platform_probe_fail;
- if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
- retval = -ENODEV;
- spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
- if (code != retval)
+ /* Walk all platform devices and see if any actually bound to this driver.
+ * If not, return an error as the device should have done so by now.
+ */
+ if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) {
+ retval = -ENODEV;
platform_driver_unregister(drv);
+ }
+
return retval;
}
EXPORT_SYMBOL_GPL(__platform_driver_probe);
@@ -1353,9 +1357,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
return (strcmp(pdev->name, drv->name) == 0);
}
-static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device *pdev = to_platform_device(dev);
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
@@ -1416,7 +1420,9 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- if (drv->remove) {
+ if (drv->remove_new) {
+ drv->remove_new(dev);
+ } else if (drv->remove) {
int ret = drv->remove(dev);
if (ret)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 967bcf9d415e..32084e38b73d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -220,13 +220,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd);
static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
- struct dentry *d;
-
if (!genpd_debugfs_dir)
return;
- d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
- debugfs_remove(d);
+ debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
@@ -519,6 +516,31 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
+/*
+ * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
+ *
+ * @dev: A device that is attached to the genpd.
+ *
+ * Allows a consumer of the genpd to notify the provider that the next power off
+ * should be synchronous.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ */
+void dev_pm_genpd_synced_poweroff(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ genpd_lock(genpd);
+ genpd->synced_poweroff = true;
+ genpd_unlock(genpd);
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -562,6 +584,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ genpd->synced_poweroff = false;
return 0;
err:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c50139207794..f85f3515c258 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -679,7 +679,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = device_resume_noirq(dev, pm_transition, true);
@@ -816,7 +816,7 @@ Out:
static void async_resume_early(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = device_resume_early(dev, pm_transition, true);
@@ -980,7 +980,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
static void async_resume(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = device_resume(dev, pm_transition, true);
@@ -1269,7 +1269,7 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = __device_suspend_noirq(dev, pm_transition, true);
@@ -1450,7 +1450,7 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = __device_suspend_late(dev, pm_transition, true);
@@ -1727,7 +1727,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
int error;
error = __device_suspend(dev, pm_transition, true);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 50e726b6c2cf..4545669cb973 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -468,7 +468,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle_rcuidle(dev, rpmflags);
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -508,7 +508,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
@@ -530,7 +530,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -562,7 +562,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend_rcuidle(dev, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -713,7 +713,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
@@ -765,7 +765,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume_rcuidle(dev, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
@@ -935,7 +935,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -1091,7 +1091,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1129,7 +1129,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1212,7 +1212,7 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
@@ -1576,7 +1576,7 @@ void pm_runtime_allow(struct device *dev)
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
- trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+ trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1646,7 +1646,7 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
}
}
@@ -1864,6 +1864,10 @@ static bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
+ *
+ * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
+ * state where this function has called the ->runtime_suspend callback but the
+ * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 924fac493c4f..6732ed2869f9 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -210,7 +210,7 @@ void wakeup_source_sysfs_remove(struct wakeup_source *ws)
static int __init wakeup_sources_sysfs_init(void)
{
- wakeup_class = class_create(THIS_MODULE, "wakeup");
+ wakeup_class = class_create("wakeup");
return PTR_ERR_OR_ZERO(wakeup_class);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index bbb3e499ff4a..f6117ec9805c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -37,8 +37,10 @@ EXPORT_SYMBOL_GPL(__dev_fwnode_const);
* @propname: Name of the property
*
* Check if property @propname is present in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
*/
-bool device_property_present(struct device *dev, const char *propname)
+bool device_property_present(const struct device *dev, const char *propname)
{
return fwnode_property_present(dev_fwnode(dev), propname);
}
@@ -48,6 +50,8 @@ EXPORT_SYMBOL_GPL(device_property_present);
* fwnode_property_present - check if a property of a firmware node is present
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
*/
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
@@ -86,7 +90,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_u8_array(struct device *dev, const char *propname,
+int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval)
{
return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
@@ -114,7 +118,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_u16_array(struct device *dev, const char *propname,
+int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval)
{
return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
@@ -142,7 +146,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_u32_array(struct device *dev, const char *propname,
+int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval)
{
return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
@@ -170,7 +174,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_u64_array(struct device *dev, const char *propname,
+int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval)
{
return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
@@ -198,7 +202,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_string_array(struct device *dev, const char *propname,
+int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval)
{
return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
@@ -220,7 +224,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array);
* %-EPROTO or %-EILSEQ if the property type is not a string.
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_read_string(struct device *dev, const char *propname,
+int device_property_read_string(const struct device *dev, const char *propname,
const char **val)
{
return fwnode_property_read_string(dev_fwnode(dev), propname, val);
@@ -242,7 +246,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
-int device_property_match_string(struct device *dev, const char *propname,
+int device_property_match_string(const struct device *dev, const char *propname,
const char *string)
{
return fwnode_property_match_string(dev_fwnode(dev), propname, string);
@@ -508,10 +512,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Obtain a reference based on a named property in an fwnode, with
* integer arguments.
*
- * Caller is responsible to call fwnode_handle_put() on the returned
- * args->fwnode pointer.
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * @args->fwnode pointer.
*
- * Returns: %0 on success
+ * Return: %0 on success
* %-ENOENT when the index is out of bounds, the index has an empty
* reference or the property was not found
* %-EINVAL on parse error
@@ -547,8 +551,11 @@ EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
*
* @index can be used when the named reference holds a table of references.
*
- * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to
- * call fwnode_handle_put() on the returned fwnode pointer.
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: a pointer to the reference fwnode, when found. Otherwise,
+ * returns an error pointer.
*/
struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
@@ -567,7 +574,7 @@ EXPORT_SYMBOL_GPL(fwnode_find_reference);
* fwnode_get_name - Return the name of a node
* @fwnode: The firmware node
*
- * Returns a pointer to the node name.
+ * Return: a pointer to the node name, or %NULL.
*/
const char *fwnode_get_name(const struct fwnode_handle *fwnode)
{
@@ -579,7 +586,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_name);
* fwnode_get_name_prefix - Return the prefix of node for printing purposes
* @fwnode: The firmware node
*
- * Returns the prefix of a node, intended to be printed right before the node.
+ * Return: the prefix of a node, intended to be printed right before the node.
* The prefix works also as a separator between the nodes.
*/
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
@@ -591,7 +598,10 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
* fwnode_get_parent - Return parent firwmare node
* @fwnode: Firmware whose parent is retrieved
*
- * Return parent firmware node of the given node if possible or %NULL if no
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
@@ -608,8 +618,12 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent);
* on the passed node, making it suitable for iterating through a
* node's parents.
*
- * Returns a node pointer with refcount incremented, use
- * fwnode_handle_put() on it when done.
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @fwnode
+ * unconditionally.
+ *
+ * Return: parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
*/
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
{
@@ -629,10 +643,12 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
* firmware node that has a corresponding struct device and returns that struct
* device.
*
- * The caller of this function is expected to call put_device() on the returned
- * device when they are done.
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
*/
-struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
+struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
struct device *dev;
@@ -651,7 +667,7 @@ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
- * Returns the number of parents a node has.
+ * Return: the number of parents a node has.
*/
unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
@@ -670,12 +686,12 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents);
* @fwnode: The node the parent of which is requested
* @depth: Distance of the parent from the node
*
- * Returns the nth parent of a node. If there is no parent at the requested
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the nth parent of a node. If there is no parent at the requested
* @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
* fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
- *
- * The caller is responsible for calling fwnode_handle_put() for the returned
- * node.
*/
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
unsigned int depth)
@@ -700,9 +716,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
*
* A node is considered an ancestor of itself too.
*
- * Returns true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
*/
-bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child)
+bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
{
struct fwnode_handle *parent;
@@ -725,6 +741,10 @@ bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
@@ -735,10 +755,13 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
/**
- * fwnode_get_next_available_child_node - Return the next
- * available child node handle for a node
+ * fwnode_get_next_available_child_node - Return the next available child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
@@ -762,7 +785,11 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
- * @child: Handle to one of the device's child nodes or a null handle.
+ * @child: Handle to one of the device's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
*/
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child)
@@ -787,6 +814,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
* fwnode_get_named_child_node - Return first matching named child node handle
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
*/
struct fwnode_handle *
fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
@@ -800,6 +830,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
* device_get_named_child_node - Return first matching named child node handle
* @dev: Device to find the named child node for.
* @childname: String to match child node name against.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
*/
struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname)
@@ -812,7 +845,10 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node);
* fwnode_handle_get - Obtain a reference to a device node
* @fwnode: Pointer to the device node to obtain the reference to.
*
- * Returns the fwnode handle.
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the fwnode handle.
*/
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
{
@@ -841,6 +877,8 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
+ * Return: true if device is available for use. Otherwise, returns false.
+ *
* For fwnode node types that don't implement the .device_is_available()
* operation, this function returns true.
*/
@@ -859,6 +897,8 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
* device_get_child_node_count - return the number of child nodes for device
* @dev: Device to cound the child nodes for
+ *
+ * Return: the number of child nodes for a given device.
*/
unsigned int device_get_child_node_count(const struct device *dev)
{
@@ -895,7 +935,7 @@ EXPORT_SYMBOL_GPL(device_get_dma_attr);
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
-int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
+int fwnode_get_phy_mode(const struct fwnode_handle *fwnode)
{
const char *pm;
int err, i;
@@ -934,7 +974,7 @@ EXPORT_SYMBOL_GPL(device_get_phy_mode);
* @fwnode: Pointer to the firmware node
* @index: Index of the IO range
*
- * Returns a pointer to the mapped memory.
+ * Return: a pointer to the mapped memory.
*/
void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
{
@@ -947,8 +987,8 @@ EXPORT_SYMBOL(fwnode_iomap);
* @fwnode: Pointer to the firmware node
* @index: Zero-based index of the IRQ
*
- * Returns Linux IRQ number on success. Other values are determined
- * accordingly to acpi_/of_ irq_get() operation.
+ * Return: Linux IRQ number on success. Other values are determined
+ * according to acpi_irq_get() or of_irq_get() operation.
*/
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
@@ -967,8 +1007,7 @@ EXPORT_SYMBOL(fwnode_irq_get);
* number of the IRQ resource corresponding to the index of the matched
* string.
*
- * Return:
- * Linux IRQ number on success, or negative errno otherwise.
+ * Return: Linux IRQ number on success, or negative errno otherwise.
*/
int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
{
@@ -990,33 +1029,43 @@ EXPORT_SYMBOL(fwnode_irq_get_byname);
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
- * Returns an endpoint firmware node pointer or %NULL if no more endpoints
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @prev
+ * unconditionally.
+ *
+ * Return: an endpoint firmware node pointer or %NULL if no more endpoints
* are available.
*/
struct fwnode_handle *
fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
+ struct fwnode_handle *ep, *port_parent = NULL;
const struct fwnode_handle *parent;
- struct fwnode_handle *ep;
/*
* If this function is in a loop and the previous iteration returned
* an endpoint from fwnode->secondary, then we need to use the secondary
* as parent rather than @fwnode.
*/
- if (prev)
- parent = fwnode_graph_get_port_parent(prev);
- else
+ if (prev) {
+ port_parent = fwnode_graph_get_port_parent(prev);
+ parent = port_parent;
+ } else {
parent = fwnode;
+ }
if (IS_ERR_OR_NULL(parent))
return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
if (ep)
- return ep;
+ goto out_put_port_parent;
+
+ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
- return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+out_put_port_parent:
+ fwnode_handle_put(port_parent);
+ return ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
@@ -1024,6 +1073,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
* fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
* @endpoint: Endpoint firmware node of the port
*
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
@@ -1045,6 +1097,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote device the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
@@ -1065,6 +1120,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote port the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
@@ -1078,6 +1136,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote endpoint the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
@@ -1105,8 +1166,11 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
* @endpoint: identifier of the endpoint node under the port node
* @flags: fwnode lookup flags
*
- * Return the fwnode handle of the local endpoint corresponding the port and
- * endpoint IDs or NULL if not found.
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the fwnode handle of the local endpoint corresponding the port and
+ * endpoint IDs or %NULL if not found.
*
* If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
* has not been found, look for the closest endpoint ID greater than the
@@ -1114,9 +1178,6 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
*
* Does not return endpoints that belong to disabled devices or endpoints that
* are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
- *
- * The returned endpoint needs to be released by calling fwnode_handle_put() on
- * it when it is not needed any more.
*/
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
@@ -1174,7 +1235,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
* If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
* and endpoints connected to disabled devices are counted.
*/
-unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
+unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
unsigned long flags)
{
struct fwnode_handle *ep;
@@ -1322,7 +1383,8 @@ EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
* @fwnode and other device nodes. @match will be used to convert the
* connection description to data the caller is expecting to be returned
* through the @matches array.
- * If @matches is NULL @matches_len is ignored and the total number of resolved
+ *
+ * If @matches is %NULL @matches_len is ignored and the total number of resolved
* matches is returned.
*
* Return: Number of matches resolved, or negative errno.
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index cd4bb642b9de..33a8366e22a5 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -9,10 +9,12 @@ config REGMAP
select MDIO_BUS if REGMAP_MDIO
bool
-config REGCACHE_COMPRESSED
- select LZO_COMPRESS
- select LZO_DECOMPRESS
- bool
+config REGMAP_KUNIT
+ tristate "KUnit tests for regmap"
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select REGMAP
+ select REGMAP_RAM
config REGMAP_AC97
tristate
@@ -46,6 +48,9 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+config REGMAP_RAM
+ tristate
+
config REGMAP_SOUNDWIRE
tristate
depends on SOUNDWIRE
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 6990de7ca9a9..f6c6cb017200 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,11 +3,12 @@
CFLAGS_regmap.o := -I$(src)
obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o
-obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o regcache-maple.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+obj-$(CONFIG_REGMAP_KUNIT) += regmap-kunit.o
obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_RAM) += regmap-ram.o
obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index da8996e7a1f1..9bd0dfd1e259 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -31,8 +31,8 @@ struct regmap_format {
size_t buf_size;
size_t reg_bytes;
size_t pad_bytes;
- size_t reg_downshift;
size_t val_bytes;
+ s8 reg_shift;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
@@ -270,6 +270,7 @@ unsigned int regcache_get_val(struct regmap *map, const void *base,
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool noinc);
@@ -281,7 +282,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_config *config);
extern struct regcache_ops regcache_rbtree_ops;
-extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_maple_ops;
extern struct regcache_ops regcache_flat_ops;
static inline const char *regmap_name(const struct regmap *map)
@@ -307,4 +308,23 @@ static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
return reg >> map->reg_stride_order;
}
+struct regmap_ram_data {
+ unsigned int *vals; /* Allocatd by caller */
+ bool *read;
+ bool *written;
+};
+
+/*
+ * Create a test register map with data stored in RAM, not intended
+ * for practical use.
+ */
+struct regmap *__regmap_init_ram(const struct regmap_config *config,
+ struct regmap_ram_data *data,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+#define regmap_init_ram(config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+
+
#endif
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
deleted file mode 100644
index 7886303eb026..000000000000
--- a/drivers/base/regmap/regcache-lzo.c
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Register cache access API - LZO caching support
-//
-// Copyright 2011 Wolfson Microelectronics plc
-//
-// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
-
-#include <linux/device.h>
-#include <linux/lzo.h>
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int regcache_lzo_exit(struct regmap *map);
-
-struct regcache_lzo_ctx {
- void *wmem;
- void *dst;
- const void *src;
- size_t src_len;
- size_t dst_len;
- size_t decompressed_size;
- unsigned long *sync_bmp;
- int sync_bmp_nbits;
-};
-
-#define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(struct regmap *map)
-{
- return LZO_BLOCK_NUM;
-}
-
-static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
-{
- lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!lzo_ctx->wmem)
- return -ENOMEM;
- return 0;
-}
-
-static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
-{
- size_t compress_size;
- int ret;
-
- ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
- lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
- if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
- return -EINVAL;
- lzo_ctx->dst_len = compress_size;
- return 0;
-}
-
-static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
-{
- size_t dst_len;
- int ret;
-
- dst_len = lzo_ctx->dst_len;
- ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
- lzo_ctx->dst, &dst_len);
- if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
- return -EINVAL;
- return 0;
-}
-
-static int regcache_lzo_compress_cache_block(struct regmap *map,
- struct regcache_lzo_ctx *lzo_ctx)
-{
- int ret;
-
- lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
- lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
- if (!lzo_ctx->dst) {
- lzo_ctx->dst_len = 0;
- return -ENOMEM;
- }
-
- ret = regcache_lzo_compress(lzo_ctx);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int regcache_lzo_decompress_cache_block(struct regmap *map,
- struct regcache_lzo_ctx *lzo_ctx)
-{
- int ret;
-
- lzo_ctx->dst_len = lzo_ctx->decompressed_size;
- lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
- if (!lzo_ctx->dst) {
- lzo_ctx->dst_len = 0;
- return -ENOMEM;
- }
-
- ret = regcache_lzo_decompress(lzo_ctx);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int regcache_lzo_get_blkindex(struct regmap *map,
- unsigned int reg)
-{
- return ((reg / map->reg_stride) * map->cache_word_size) /
- DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map));
-}
-
-static inline int regcache_lzo_get_blkpos(struct regmap *map,
- unsigned int reg)
-{
- return (reg / map->reg_stride) %
- (DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map)) /
- map->cache_word_size);
-}
-
-static inline int regcache_lzo_get_blksize(struct regmap *map)
-{
- return DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map));
-}
-
-static int regcache_lzo_init(struct regmap *map)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- size_t bmp_size;
- int ret, i, blksize, blkcount;
- const char *p, *end;
- unsigned long *sync_bmp;
-
- ret = 0;
-
- blkcount = regcache_lzo_block_count(map);
- map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
- GFP_KERNEL);
- if (!map->cache)
- return -ENOMEM;
- lzo_blocks = map->cache;
-
- /*
- * allocate a bitmap to be used when syncing the cache with
- * the hardware. Each time a register is modified, the corresponding
- * bit is set in the bitmap, so we know that we have to sync
- * that register.
- */
- bmp_size = map->num_reg_defaults_raw;
- sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
- if (!sync_bmp) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* allocate the lzo blocks and initialize them */
- for (i = 0; i < blkcount; i++) {
- lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
- GFP_KERNEL);
- if (!lzo_blocks[i]) {
- bitmap_free(sync_bmp);
- ret = -ENOMEM;
- goto err;
- }
- lzo_blocks[i]->sync_bmp = sync_bmp;
- lzo_blocks[i]->sync_bmp_nbits = bmp_size;
- /* alloc the working space for the compressed block */
- ret = regcache_lzo_prepare(lzo_blocks[i]);
- if (ret < 0)
- goto err;
- }
-
- blksize = regcache_lzo_get_blksize(map);
- p = map->reg_defaults_raw;
- end = map->reg_defaults_raw + map->cache_size_raw;
- /* compress the register map and fill the lzo blocks */
- for (i = 0; i < blkcount; i++, p += blksize) {
- lzo_blocks[i]->src = p;
- if (p + blksize > end)
- lzo_blocks[i]->src_len = end - p;
- else
- lzo_blocks[i]->src_len = blksize;
- ret = regcache_lzo_compress_cache_block(map,
- lzo_blocks[i]);
- if (ret < 0)
- goto err;
- lzo_blocks[i]->decompressed_size =
- lzo_blocks[i]->src_len;
- }
-
- return 0;
-err:
- regcache_lzo_exit(map);
- return ret;
-}
-
-static int regcache_lzo_exit(struct regmap *map)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- int i, blkcount;
-
- lzo_blocks = map->cache;
- if (!lzo_blocks)
- return 0;
-
- blkcount = regcache_lzo_block_count(map);
- /*
- * the pointer to the bitmap used for syncing the cache
- * is shared amongst all lzo_blocks. Ensure it is freed
- * only once.
- */
- if (lzo_blocks[0])
- bitmap_free(lzo_blocks[0]->sync_bmp);
- for (i = 0; i < blkcount; i++) {
- if (lzo_blocks[i]) {
- kfree(lzo_blocks[i]->wmem);
- kfree(lzo_blocks[i]->dst);
- }
- /* each lzo_block is a pointer returned by kmalloc or NULL */
- kfree(lzo_blocks[i]);
- }
- kfree(lzo_blocks);
- map->cache = NULL;
- return 0;
-}
-
-static int regcache_lzo_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
-{
- struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
- int ret, blkindex, blkpos;
- size_t tmp_dst_len;
- void *tmp_dst;
-
- /* index of the compressed lzo block */
- blkindex = regcache_lzo_get_blkindex(map, reg);
- /* register index within the decompressed block */
- blkpos = regcache_lzo_get_blkpos(map, reg);
- lzo_blocks = map->cache;
- lzo_block = lzo_blocks[blkindex];
-
- /* save the pointer and length of the compressed block */
- tmp_dst = lzo_block->dst;
- tmp_dst_len = lzo_block->dst_len;
-
- /* prepare the source to be the compressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* decompress the block */
- ret = regcache_lzo_decompress_cache_block(map, lzo_block);
- if (ret >= 0)
- /* fetch the value from the cache */
- *value = regcache_get_val(map, lzo_block->dst, blkpos);
-
- kfree(lzo_block->dst);
- /* restore the pointer and length of the compressed block */
- lzo_block->dst = tmp_dst;
- lzo_block->dst_len = tmp_dst_len;
-
- return ret;
-}
-
-static int regcache_lzo_write(struct regmap *map,
- unsigned int reg, unsigned int value)
-{
- struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
- int ret, blkindex, blkpos;
- size_t tmp_dst_len;
- void *tmp_dst;
-
- /* index of the compressed lzo block */
- blkindex = regcache_lzo_get_blkindex(map, reg);
- /* register index within the decompressed block */
- blkpos = regcache_lzo_get_blkpos(map, reg);
- lzo_blocks = map->cache;
- lzo_block = lzo_blocks[blkindex];
-
- /* save the pointer and length of the compressed block */
- tmp_dst = lzo_block->dst;
- tmp_dst_len = lzo_block->dst_len;
-
- /* prepare the source to be the compressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* decompress the block */
- ret = regcache_lzo_decompress_cache_block(map, lzo_block);
- if (ret < 0) {
- kfree(lzo_block->dst);
- goto out;
- }
-
- /* write the new value to the cache */
- if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
- kfree(lzo_block->dst);
- goto out;
- }
-
- /* prepare the source to be the decompressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* compress the block */
- ret = regcache_lzo_compress_cache_block(map, lzo_block);
- if (ret < 0) {
- kfree(lzo_block->dst);
- kfree(lzo_block->src);
- goto out;
- }
-
- /* set the bit so we know we have to sync this register */
- set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
- kfree(tmp_dst);
- kfree(lzo_block->src);
- return 0;
-out:
- lzo_block->dst = tmp_dst;
- lzo_block->dst_len = tmp_dst_len;
- return ret;
-}
-
-static int regcache_lzo_sync(struct regmap *map, unsigned int min,
- unsigned int max)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- unsigned int val;
- int i;
- int ret;
-
- lzo_blocks = map->cache;
- i = min;
- for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
- lzo_blocks[0]->sync_bmp_nbits) {
- if (i > max)
- continue;
-
- ret = regcache_read(map, i, &val);
- if (ret)
- return ret;
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, i);
- if (ret > 0 && val == map->reg_defaults[ret].def)
- continue;
-
- map->cache_bypass = true;
- ret = _regmap_write(map, i, val);
- map->cache_bypass = false;
- if (ret)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- i, val);
- }
-
- return 0;
-}
-
-struct regcache_ops regcache_lzo_ops = {
- .type = REGCACHE_COMPRESSED,
- .name = "lzo",
- .init = regcache_lzo_init,
- .exit = regcache_lzo_exit,
- .read = regcache_lzo_read,
- .write = regcache_lzo_write,
- .sync = regcache_lzo_sync
-};
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
new file mode 100644
index 000000000000..9b1b559107ef
--- /dev/null
+++ b/drivers/base/regmap/regcache-maple.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - maple tree based cache
+//
+// Copyright 2023 Arm, Ltd
+//
+// Author: Mark Brown <broonie@kernel.org>
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/maple_tree.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_maple_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, reg, reg);
+ unsigned long *entry;
+
+ rcu_read_lock();
+
+ entry = mas_walk(&mas);
+ if (!entry) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *value = entry[reg - mas.index];
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int regcache_maple_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, reg, reg);
+ unsigned long *entry, *upper, *lower;
+ unsigned long index, last;
+ size_t lower_sz, upper_sz;
+ int ret;
+
+ rcu_read_lock();
+
+ entry = mas_walk(&mas);
+ if (entry) {
+ entry[reg - mas.index] = val;
+ rcu_read_unlock();
+ return 0;
+ }
+
+ /* Any adjacent entries to extend/merge? */
+ mas_set_range(&mas, reg - 1, reg + 1);
+ index = reg;
+ last = reg;
+
+ lower = mas_find(&mas, reg - 1);
+ if (lower) {
+ index = mas.index;
+ lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
+ }
+
+ upper = mas_find(&mas, reg + 1);
+ if (upper) {
+ last = mas.last;
+ upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
+ }
+
+ rcu_read_unlock();
+
+ entry = kmalloc((last - index + 1) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ if (lower)
+ memcpy(entry, lower, lower_sz);
+ entry[reg - index] = val;
+ if (upper)
+ memcpy(&entry[reg - index + 1], upper, upper_sz);
+
+ /*
+ * This is safe because the regmap lock means the Maple lock
+ * is redundant, but we need to take it due to lockdep asserts
+ * in the maple tree code.
+ */
+ mas_lock(&mas);
+
+ mas_set_range(&mas, index, last);
+ ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
+
+ mas_unlock(&mas);
+
+ if (ret == 0) {
+ kfree(lower);
+ kfree(upper);
+ }
+
+ return ret;
+}
+
+static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, min, max);
+ unsigned long *entry, *lower, *upper;
+ unsigned long lower_index, lower_last;
+ unsigned long upper_index, upper_last;
+ int ret;
+
+ lower = NULL;
+ upper = NULL;
+
+ mas_lock(&mas);
+
+ mas_for_each(&mas, entry, max) {
+ /*
+ * This is safe because the regmap lock means the
+ * Maple lock is redundant, but we need to take it due
+ * to lockdep asserts in the maple tree code.
+ */
+ mas_unlock(&mas);
+
+ /* Do we need to save any of this entry? */
+ if (mas.index < min) {
+ lower_index = mas.index;
+ lower_last = min -1;
+
+ lower = kmemdup(entry, ((min - mas.index) *
+ sizeof(unsigned long)),
+ GFP_KERNEL);
+ if (!lower) {
+ ret = -ENOMEM;
+ goto out_unlocked;
+ }
+ }
+
+ if (mas.last > max) {
+ upper_index = max + 1;
+ upper_last = mas.last;
+
+ upper = kmemdup(&entry[max + 1],
+ ((mas.last - max) *
+ sizeof(unsigned long)),
+ GFP_KERNEL);
+ if (!upper) {
+ ret = -ENOMEM;
+ goto out_unlocked;
+ }
+ }
+
+ kfree(entry);
+ mas_lock(&mas);
+ mas_erase(&mas);
+
+ /* Insert new nodes with the saved data */
+ if (lower) {
+ mas_set_range(&mas, lower_index, lower_last);
+ ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
+ if (ret != 0)
+ goto out;
+ lower = NULL;
+ }
+
+ if (upper) {
+ mas_set_range(&mas, upper_index, upper_last);
+ ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
+ if (ret != 0)
+ goto out;
+ upper = NULL;
+ }
+ }
+
+out:
+ mas_unlock(&mas);
+out_unlocked:
+ kfree(lower);
+ kfree(upper);
+
+ return ret;
+}
+
+static int regcache_maple_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct maple_tree *mt = map->cache;
+ unsigned long *entry;
+ MA_STATE(mas, mt, min, max);
+ unsigned long lmin = min;
+ unsigned long lmax = max;
+ unsigned int r;
+ int ret;
+
+ map->cache_bypass = true;
+
+ rcu_read_lock();
+
+ mas_for_each(&mas, entry, max) {
+ for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
+ ret = regcache_sync_val(map, r, entry[r - mas.index]);
+ if (ret != 0)
+ goto out;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+
+ map->cache_bypass = false;
+
+ return ret;
+}
+
+static int regcache_maple_exit(struct regmap *map)
+{
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, 0, UINT_MAX);
+ unsigned int *entry;;
+
+ /* if we've already been called then just return */
+ if (!mt)
+ return 0;
+
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, UINT_MAX)
+ kfree(entry);
+ __mt_destroy(mt);
+ mas_unlock(&mas);
+
+ kfree(mt);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_maple_init(struct regmap *map)
+{
+ struct maple_tree *mt;
+ int i;
+ int ret;
+
+ mt = kmalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return -ENOMEM;
+ map->cache = mt;
+
+ mt_init(mt);
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_maple_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_maple_exit(map);
+ return ret;
+}
+
+struct regcache_ops regcache_maple_ops = {
+ .type = REGCACHE_MAPLE,
+ .name = "maple",
+ .init = regcache_maple_init,
+ .exit = regcache_maple_exit,
+ .read = regcache_maple_read,
+ .write = regcache_maple_write,
+ .drop = regcache_maple_drop,
+ .sync = regcache_maple_sync,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 362e043e26d8..029564695dbb 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -17,9 +17,7 @@
static const struct regcache_ops *cache_types[] = {
&regcache_rbtree_ops,
-#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
- &regcache_lzo_ops,
-#endif
+ &regcache_maple_ops,
&regcache_flat_ops,
};
@@ -148,7 +146,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
break;
if (i == ARRAY_SIZE(cache_types)) {
- dev_err(map->dev, "Could not match compress type: %d\n",
+ dev_err(map->dev, "Could not match cache type: %d\n",
map->cache_type);
return -EINVAL;
}
@@ -242,7 +240,7 @@ int regcache_read(struct regmap *map,
int ret;
if (map->cache_type == REGCACHE_NONE)
- return -ENOSYS;
+ return -EINVAL;
BUG_ON(!map->cache_ops);
@@ -311,6 +309,8 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
continue;
ret = regcache_read(map, reg, &val);
+ if (ret == -ENOENT)
+ continue;
if (ret)
return ret;
@@ -349,6 +349,9 @@ int regcache_sync(struct regmap *map)
const char *name;
bool bypass;
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@@ -418,6 +421,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
bool bypass;
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@@ -672,6 +678,30 @@ static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
return test_bit(idx, cache_present);
}
+int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (!regcache_reg_needs_sync(map, reg, val))
+ return 0;
+
+ map->cache_bypass = true;
+
+ ret = _regmap_write(map, reg, val);
+
+ map->cache_bypass = false;
+
+ if (ret != 0) {
+ dev_err(map->dev, "Unable to sync register %#x. %d\n",
+ reg, ret);
+ return ret;
+ }
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ reg, val);
+
+ return 0;
+}
+
static int regcache_sync_block_single(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base,
@@ -688,21 +718,9 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
continue;
val = regcache_get_val(map, block, i);
- if (!regcache_reg_needs_sync(map, regtmp, val))
- continue;
-
- map->cache_bypass = true;
-
- ret = _regmap_write(map, regtmp, val);
-
- map->cache_bypass = false;
- if (ret != 0) {
- dev_err(map->dev, "Unable to sync register %#x. %d\n",
- regtmp, ret);
+ ret = regcache_sync_val(map, regtmp, val);
+ if (ret != 0)
return ret;
- }
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- regtmp, val);
}
return 0;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 817eda2075aa..c491fabe3617 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
- count = PAGE_SIZE << (MAX_ORDER - 1);
+ if (count > (PAGE_SIZE << MAX_ORDER))
+ count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
@@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
- count = PAGE_SIZE << (MAX_ORDER - 1);
+ if (count > (PAGE_SIZE << MAX_ORDER))
+ count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index a8f185430a07..b99bb2369fff 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -189,12 +189,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (!d->type_buf_def[i])
continue;
reg = d->get_irq_reg(d, d->chip->type_base, i);
- if (d->chip->type_invert)
- ret = regmap_update_bits(d->map, reg,
- d->type_buf_def[i], ~d->type_buf[i]);
- else
- ret = regmap_update_bits(d->map, reg,
- d->type_buf_def[i], d->type_buf[i]);
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], d->type_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync type in %x\n",
reg);
@@ -333,8 +329,8 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
}
if (d->chip->set_type_config) {
- ret = d->chip->set_type_config(d->config_buf, type,
- irq_data, reg);
+ ret = d->chip->set_type_config(d->config_buf, type, irq_data,
+ reg, d->chip->irq_drv_data);
if (ret)
return ret;
}
@@ -437,7 +433,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* possible in order to reduce the I/O overheads.
*/
- if (chip->num_main_regs) {
+ if (chip->no_status) {
+ /* no status register so default to all active */
+ memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
+ } else if (chip->num_main_regs) {
unsigned int max_main_bits;
unsigned long size;
@@ -655,13 +654,15 @@ EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
* @type: The requested IRQ type.
* @irq_data: The IRQ being configured.
* @idx: Index of the irq's config registers within each array `buf[i]`
+ * @irq_drv_data: Driver specific IRQ data
*
* This is a &struct regmap_irq_chip->set_type_config callback suitable for
* chips with one config register. Register values are updated according to
* the &struct regmap_irq_type data associated with an IRQ.
*/
int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
- const struct regmap_irq *irq_data, int idx)
+ const struct regmap_irq *irq_data,
+ int idx, void *irq_drv_data)
{
const struct regmap_irq_type *t = &irq_data->type;
@@ -882,20 +883,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
*/
dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
- /* Might as well warn about mask_invert while we're at it... */
- if (chip->mask_invert)
- dev_warn(map->dev, "mask_invert=true ignored");
-
- d->mask_base = chip->unmask_base;
- d->unmask_base = chip->mask_base;
- } else if (chip->mask_invert) {
- /*
- * Swap the roles of mask_base and unmask_base if the bits are
- * inverted. This is deprecated, drivers should use unmask_base
- * directly.
- */
- dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
-
d->mask_base = chip->unmask_base;
d->unmask_base = chip->mask_base;
} else {
@@ -967,12 +954,17 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
continue;
/* Ack masked but set interrupts */
- reg = d->get_irq_reg(d, d->chip->status_base, i);
- ret = regmap_read(map, reg, &d->status_buf[i]);
- if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto err_alloc;
+ if (d->chip->no_status) {
+ /* no status register so default to all active */
+ d->status_buf[i] = GENMASK(31, 0);
+ } else {
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
+ ret = regmap_read(map, reg, &d->status_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ goto err_alloc;
+ }
}
if (chip->status_invert)
@@ -1028,9 +1020,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_read(map, reg, &d->type_buf_def[i]);
- if (d->chip->type_invert)
- d->type_buf_def[i] = ~d->type_buf_def[i];
-
if (ret) {
dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
reg, ret);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
new file mode 100644
index 000000000000..f76d41688134
--- /dev/null
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// regmap KUnit tests
+//
+// Copyright 2023 Arm Ltd
+
+#include <kunit/test.h>
+#include "internal.h"
+
+#define BLOCK_TEST_SIZE 12
+
+static const struct regmap_config test_regmap_config = {
+ .max_register = BLOCK_TEST_SIZE,
+ .reg_stride = 1,
+ .val_bits = sizeof(unsigned int) * 8,
+};
+
+struct regcache_types {
+ enum regcache_type type;
+ const char *name;
+};
+
+static void case_to_desc(const struct regcache_types *t, char *desc)
+{
+ strcpy(desc, t->name);
+}
+
+static const struct regcache_types regcache_types_list[] = {
+ { REGCACHE_NONE, "none" },
+ { REGCACHE_FLAT, "flat" },
+ { REGCACHE_RBTREE, "rbtree" },
+ { REGCACHE_MAPLE, "maple" },
+};
+
+KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
+
+static const struct regcache_types real_cache_types_list[] = {
+ { REGCACHE_FLAT, "flat" },
+ { REGCACHE_RBTREE, "rbtree" },
+ { REGCACHE_MAPLE, "maple" },
+};
+
+KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
+
+static const struct regcache_types sparse_cache_types_list[] = {
+ { REGCACHE_RBTREE, "rbtree" },
+ { REGCACHE_MAPLE, "maple" },
+};
+
+KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
+
+static struct regmap *gen_regmap(struct regmap_config *config,
+ struct regmap_ram_data **data)
+{
+ unsigned int *buf;
+ struct regmap *ret;
+ size_t size = (config->max_register + 1) * sizeof(unsigned int);
+ int i;
+ struct reg_default *defaults;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ get_random_bytes(buf, size);
+
+ *data = kzalloc(sizeof(**data), GFP_KERNEL);
+ if (!(*data))
+ return ERR_PTR(-ENOMEM);
+ (*data)->vals = buf;
+
+ if (config->num_reg_defaults) {
+ defaults = kcalloc(config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!defaults)
+ return ERR_PTR(-ENOMEM);
+ config->reg_defaults = defaults;
+
+ for (i = 0; i < config->num_reg_defaults; i++) {
+ defaults[i].reg = i * config->reg_stride;
+ defaults[i].def = buf[i * config->reg_stride];
+ }
+ }
+
+ ret = regmap_init_ram(config, *data);
+ if (IS_ERR(ret)) {
+ kfree(buf);
+ kfree(*data);
+ }
+
+ return ret;
+}
+
+static void basic_read_write(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val, rval;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* If we write a value to a register we can read it back */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, val, rval);
+
+ /* If using a cache the cache satisfied the read */
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
+
+ regmap_exit(map);
+}
+
+static void bulk_write(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /*
+ * Data written via the bulk API can be read back with single
+ * reads.
+ */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
+
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+
+ regmap_exit(map);
+}
+
+static void bulk_read(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Data written as single writes can be read via the bulk API */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+
+ regmap_exit(map);
+}
+
+static void reg_defaults(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Read back the expected default data */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+
+ /* The data should have been read from cache if there was one */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+}
+
+static void reg_defaults_read_dev(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* We should have read the cache defaults back from the map */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
+ data->read[i] = false;
+ }
+
+ /* Read back the expected default data */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+
+ /* The data should have been read from cache if there was one */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+}
+
+static void register_patch(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ struct reg_sequence patch[2];
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ /* We need defaults so readback works */
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Stash the original values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+
+ /* Patch a couple of values */
+ patch[0].reg = 2;
+ patch[0].def = rval[2] + 1;
+ patch[0].delay_us = 0;
+ patch[1].reg = 5;
+ patch[1].def = rval[5] + 1;
+ patch[1].delay_us = 0;
+ KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
+ ARRAY_SIZE(patch)));
+
+ /* Only the patched registers are written */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ switch (i) {
+ case 2:
+ case 5:
+ KUNIT_EXPECT_TRUE(test, data->written[i]);
+ KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ break;
+ }
+ }
+
+ regmap_exit(map);
+}
+
+static void stride(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.reg_stride = 2;
+ config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Only even registers can be accessed, try both read and write */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ data->read[i] = false;
+ data->written[i] = false;
+
+ if (i % 2) {
+ KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
+ KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ } else {
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
+ KUNIT_EXPECT_EQ(test, data->vals[i], rval);
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
+ data->read[i]);
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
+ KUNIT_EXPECT_TRUE(test, data->written[i]);
+ }
+ }
+
+ regmap_exit(map);
+}
+
+static struct regmap_range_cfg test_range = {
+ .selector_reg = 1,
+ .selector_mask = 0xff,
+
+ .window_start = 4,
+ .window_len = 10,
+
+ .range_min = 20,
+ .range_max = 40,
+};
+
+static bool test_range_volatile(struct device *dev, unsigned int reg)
+{
+ if (reg >= test_range.window_start &&
+ reg <= test_range.selector_reg + test_range.window_len)
+ return true;
+
+ if (reg >= test_range.range_min && reg <= test_range.range_max)
+ return true;
+
+ return false;
+}
+
+static void basic_ranges(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.volatile_reg = test_range_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ data->read[i] = false;
+ data->written[i] = false;
+ }
+
+ /* Reset the page to a non-zero value to trigger a change */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
+ test_range.range_max));
+
+ /* Check we set the page and use the window for writes */
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
+ test_range.range_min +
+ test_range.window_len,
+ 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ /* Same for reads */
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
+ test_range.range_min +
+ test_range.window_len,
+ &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ /* No physical access triggered in the virtual range */
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ }
+
+ regmap_exit(map);
+}
+
+/* Try to stress dynamic creation of cache data structures */
+static void stress_insert(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval, *vals;
+ size_t buf_sz;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.max_register = 300;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
+ GFP_KERNEL);
+ KUNIT_ASSERT_FALSE(test, vals == NULL);
+ buf_sz = sizeof(unsigned long) * config.max_register;
+
+ get_random_bytes(vals, buf_sz);
+
+ /* Write data into the map/cache in ever decreasing strides */
+ for (i = 0; i < config.max_register; i += 100)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 50)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 25)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 10)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 5)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 3)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i += 2)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+ for (i = 0; i < config.max_register; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
+
+ /* Do reads from the cache (if there is one) match? */
+ for (i = 0; i < config.max_register; i ++) {
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
+ KUNIT_EXPECT_EQ(test, rval, vals[i]);
+ KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ }
+
+ regmap_exit(map);
+}
+
+static void cache_bypass(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val, rval;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Ensure the cache has a value in it */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+
+ /* Bypass then write a different value */
+ regcache_cache_bypass(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
+
+ /* Read the bypassed value */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, val + 1, rval);
+ KUNIT_EXPECT_EQ(test, data->vals[0], rval);
+
+ /* Disable bypass, the cache should still return the original value */
+ regcache_cache_bypass(map, false);
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, val, rval);
+
+ regmap_exit(map);
+}
+
+static void cache_sync(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Put some data into the cache */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[i] = false;
+
+ /* Trash the data on the device itself then resync */
+ regcache_mark_dirty(map);
+ memset(data->vals, 0, sizeof(val));
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just write the correct data out? */
+ KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, true, data->written[i]);
+
+ regmap_exit(map);
+}
+
+static void cache_sync_defaults(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Change the value of one register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
+
+ /* Resync */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just sync the one register we touched? */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
+
+ regmap_exit(map);
+}
+
+static void cache_sync_patch(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ struct reg_sequence patch[2];
+ unsigned int rval[BLOCK_TEST_SIZE], val;
+ int i;
+
+ /* We need defaults so readback works */
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Stash the original values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+
+ /* Patch a couple of values */
+ patch[0].reg = 2;
+ patch[0].def = rval[2] + 1;
+ patch[0].delay_us = 0;
+ patch[1].reg = 5;
+ patch[1].def = rval[5] + 1;
+ patch[1].delay_us = 0;
+ KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
+ ARRAY_SIZE(patch)));
+
+ /* Sync the cache */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* The patch should be on the device but not in the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, val, rval[i]);
+
+ switch (i) {
+ case 2:
+ case 5:
+ KUNIT_EXPECT_EQ(test, true, data->written[i]);
+ KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ break;
+ default:
+ KUNIT_EXPECT_EQ(test, false, data->written[i]);
+ KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ break;
+ }
+ }
+
+ regmap_exit(map);
+}
+
+static void cache_drop(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ data->read[i] = false;
+ }
+ KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+
+ /* Drop some registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
+
+ /* Reread and check only the dropped registers hit the device. */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
+ KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+
+ regmap_exit(map);
+}
+
+static struct kunit_case regmap_test_cases[] = {
+ KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ {}
+};
+
+static struct kunit_suite regmap_test_suite = {
+ .name = "regmap",
+ .test_cases = regmap_test_cases,
+};
+kunit_test_suite(regmap_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index f7293040a2b1..6aa6a2409478 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -10,31 +10,21 @@
/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
#define REGNUM_C45_MASK GENMASK(20, 0)
-static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
{
+ struct mdio_device *mdio_dev = context;
int ret;
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
*val = ret & REGVAL_MASK;
- return 0;
-}
-
-static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
-{
- return mdiodev_write(mdio_dev, reg, val);
-}
-
-static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
-{
- struct mdio_device *mdio_dev = context;
-
- if (unlikely(reg & ~REGNUM_C22_MASK))
- return -ENXIO;
- return regmap_mdio_read(mdio_dev, reg, val);
+ return 0;
}
static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
@@ -55,21 +45,36 @@ static const struct regmap_bus regmap_mdio_c22_bus = {
static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
+ int ret;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ ret = mdiodev_c45_read(mdio_dev, devad, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+
+ return 0;
}
static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ return mdiodev_c45_write(mdio_dev, devad, reg, val);
}
static const struct regmap_bus regmap_mdio_c45_bus = {
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
new file mode 100644
index 000000000000..85f34a5dee04
--- /dev/null
+++ b/drivers/base/regmap/regmap-ram.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - Memory region
+//
+// This is intended for testing only
+//
+// Copyright (c) 2023, Arm Ltd
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#include "internal.h"
+
+static int regmap_ram_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct regmap_ram_data *data = context;
+
+ data->vals[reg] = val;
+ data->written[reg] = true;
+
+ return 0;
+}
+
+static int regmap_ram_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct regmap_ram_data *data = context;
+
+ *val = data->vals[reg];
+ data->read[reg] = true;
+
+ return 0;
+}
+
+static void regmap_ram_free_context(void *context)
+{
+ struct regmap_ram_data *data = context;
+
+ kfree(data->vals);
+ kfree(data->read);
+ kfree(data->written);
+ kfree(data);
+}
+
+static const struct regmap_bus regmap_ram = {
+ .fast_io = true,
+ .reg_write = regmap_ram_write,
+ .reg_read = regmap_ram_read,
+ .free_context = regmap_ram_free_context,
+};
+
+struct regmap *__regmap_init_ram(const struct regmap_config *config,
+ struct regmap_ram_data *data,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap *map;
+
+ if (!config->max_register) {
+ pr_crit("No max_register specified for RAM regmap\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ GFP_KERNEL);
+ if (!data->read)
+ return ERR_PTR(-ENOMEM);
+
+ data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ GFP_KERNEL);
+ if (!data->written)
+ return ERR_PTR(-ENOMEM);
+
+ map = __regmap_init(NULL, &regmap_ram, data, config,
+ lock_key, lock_name);
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(__regmap_init_ram);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 81b0327f719d..09899ae99fc1 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -6,44 +6,53 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
+#include <linux/types.h>
#include "internal.h"
-static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+static int regmap_sdw_write(void *context, const void *val_buf, size_t val_size)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ /* First word of buffer contains the destination address */
+ u32 addr = le32_to_cpu(*(const __le32 *)val_buf);
+ const u8 *val = val_buf;
- return sdw_write_no_pm(slave, reg, val);
+ return sdw_nwrite_no_pm(slave, addr, val_size - sizeof(addr), val + sizeof(addr));
}
-static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+static int regmap_sdw_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read;
+ u32 addr = le32_to_cpu(*(const __le32 *)reg_buf);
- read = sdw_read_no_pm(slave, reg);
- if (read < 0)
- return read;
+ return sdw_nwrite_no_pm(slave, addr, val_size, val_buf);
+}
- *val = read;
- return 0;
+static int regmap_sdw_read(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ u32 addr = le32_to_cpu(*(const __le32 *)reg_buf);
+
+ return sdw_nread_no_pm(slave, addr, val_size, val_buf);
}
static const struct regmap_bus regmap_sdw = {
- .reg_read = regmap_sdw_read,
- .reg_write = regmap_sdw_write,
+ .write = regmap_sdw_write,
+ .gather_write = regmap_sdw_gather_write,
+ .read = regmap_sdw_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static int regmap_sdw_config_check(const struct regmap_config *config)
{
- /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
- if (config->val_bits != 8)
- return -ENOTSUPP;
-
- /* Registers are 32 bits wide */
+ /* Register addresses are 32 bits wide */
if (config->reg_bits != 32)
return -ENOTSUPP;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d12d669157f2..db7851f0e3b8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -814,7 +814,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
- map->format.reg_downshift = config->reg_downshift;
+ map->format.reg_shift = config->reg_shift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
@@ -1676,6 +1676,18 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
buf[i] |= (mask >> (8 * i)) & 0xff;
}
+static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
+{
+ reg += map->reg_base;
+
+ if (map->format.reg_shift > 0)
+ reg >>= map->format.reg_shift;
+ else if (map->format.reg_shift < 0)
+ reg <<= -(map->format.reg_shift);
+
+ return reg;
+}
+
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool noinc)
{
@@ -1753,8 +1765,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
return ret;
}
- reg += map->reg_base;
- reg >>= map->format.reg_downshift;
+ reg = regmap_reg_addr(map, reg);
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
@@ -1924,8 +1935,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
- reg += map->reg_base;
- reg >>= map->format.reg_downshift;
+ reg = regmap_reg_addr(map, reg);
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
@@ -1941,7 +1951,17 @@ static int _regmap_bus_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct regmap *map = context;
+ struct regmap_range_node *range;
+ int ret;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ reg = regmap_reg_addr(map, reg);
return map->bus->reg_write(map->bus_context, reg, val);
}
@@ -2492,8 +2512,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
- reg += map->reg_base;
- reg >>= map->format.reg_downshift;
+ reg = regmap_reg_addr(map, reg);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -2819,8 +2838,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
- reg += map->reg_base;
- reg >>= map->format.reg_downshift;
+ reg = regmap_reg_addr(map, reg);
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
@@ -2839,7 +2857,17 @@ static int _regmap_bus_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct regmap *map = context;
+ struct regmap_range_node *range;
+ int ret;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+ reg = regmap_reg_addr(map, reg);
return map->bus->reg_read(map->bus_context, reg, val);
}
@@ -3231,6 +3259,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
*change = false;
if (regmap_volatile(map, reg) && map->reg_update_bits) {
+ reg = regmap_reg_addr(map, reg);
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
if (ret == 0 && change)
*change = true;
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 22130b5f789d..8dec5228fde3 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include <linux/init.h>
+#include <linux/of.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/idr.h>
@@ -30,6 +31,7 @@ struct soc_device {
static struct bus_type soc_bus_type = {
.name = "soc",
};
+static bool soc_bus_registered;
static DEVICE_ATTR(machine, 0444, soc_info_show, NULL);
static DEVICE_ATTR(family, 0444, soc_info_show, NULL);
@@ -109,6 +111,18 @@ static void soc_release(struct device *dev)
kfree(soc_dev);
}
+static void soc_device_get_machine(struct soc_device_attribute *soc_dev_attr)
+{
+ struct device_node *np;
+
+ if (soc_dev_attr->machine)
+ return;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+}
+
static struct soc_device_attribute *early_soc_dev_attr;
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
@@ -117,7 +131,9 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
const struct attribute_group **soc_attr_groups;
int ret;
- if (!soc_bus_type.p) {
+ soc_device_get_machine(soc_dev_attr);
+
+ if (!soc_bus_registered) {
if (early_soc_dev_attr)
return ERR_PTR(-EBUSY);
early_soc_dev_attr = soc_dev_attr;
@@ -183,6 +199,7 @@ static int __init soc_bus_register(void)
ret = bus_register(&soc_bus_type);
if (ret)
return ret;
+ soc_bus_registered = true;
if (early_soc_dev_attr)
return PTR_ERR(soc_device_register(early_soc_dev_attr));
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 0a482212c7e8..1886995a0b3a 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -760,7 +760,7 @@ static void software_node_release(struct kobject *kobj)
kfree(swnode);
}
-static struct kobj_type software_node_type = {
+static const struct kobj_type software_node_type = {
.release = software_node_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -820,67 +820,6 @@ swnode_register(const struct software_node *node, struct swnode *parent,
}
/**
- * software_node_register_nodes - Register an array of software nodes
- * @nodes: Zero terminated array of software nodes to be registered
- *
- * Register multiple software nodes at once. If any node in the array
- * has its .parent pointer set (which can only be to another software_node),
- * then its parent **must** have been registered before it is; either outside
- * of this function or by ordering the array such that parent comes before
- * child.
- */
-int software_node_register_nodes(const struct software_node *nodes)
-{
- int ret;
- int i;
-
- for (i = 0; nodes[i].name; i++) {
- const struct software_node *parent = nodes[i].parent;
-
- if (parent && !software_node_to_swnode(parent)) {
- ret = -EINVAL;
- goto err_unregister_nodes;
- }
-
- ret = software_node_register(&nodes[i]);
- if (ret)
- goto err_unregister_nodes;
- }
-
- return 0;
-
-err_unregister_nodes:
- software_node_unregister_nodes(nodes);
- return ret;
-}
-EXPORT_SYMBOL_GPL(software_node_register_nodes);
-
-/**
- * software_node_unregister_nodes - Unregister an array of software nodes
- * @nodes: Zero terminated array of software nodes to be unregistered
- *
- * Unregister multiple software nodes at once. If parent pointers are set up
- * in any of the software nodes then the array **must** be ordered such that
- * parents come before their children.
- *
- * NOTE: If you are uncertain whether the array is ordered such that
- * parents will be unregistered before their children, it is wiser to
- * remove the nodes individually, in the correct order (child before
- * parent).
- */
-void software_node_unregister_nodes(const struct software_node *nodes)
-{
- unsigned int i = 0;
-
- while (nodes[i].name)
- i++;
-
- while (i--)
- software_node_unregister(&nodes[i]);
-}
-EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
-
-/**
* software_node_register_node_group - Register a group of software nodes
* @node_group: NULL terminated array of software node pointers to be registered
*
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index 6071d5bc128c..dd2b606d76a3 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -405,20 +405,18 @@ static void pe_test_move_inline_str(struct kunit *test)
/* Handling of reference properties */
static void pe_test_reference(struct kunit *test)
{
- static const struct software_node nodes[] = {
- { .name = "1", },
- { .name = "2", },
- { }
- };
+ static const struct software_node node1 = { .name = "1" };
+ static const struct software_node node2 = { .name = "2" };
+ static const struct software_node *group[] = { &node1, &node2, NULL };
static const struct software_node_ref_args refs[] = {
- SOFTWARE_NODE_REFERENCE(&nodes[0]),
- SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4),
+ SOFTWARE_NODE_REFERENCE(&node1),
+ SOFTWARE_NODE_REFERENCE(&node2, 3, 4),
};
const struct property_entry entries[] = {
- PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
- PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF("ref-1", &node1),
+ PROPERTY_ENTRY_REF("ref-2", &node2, 1, 2),
PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
{ }
};
@@ -427,7 +425,7 @@ static void pe_test_reference(struct kunit *test)
struct fwnode_reference_args ref;
int error;
- error = software_node_register_nodes(nodes);
+ error = software_node_register_node_group(group);
KUNIT_ASSERT_EQ(test, error, 0);
node = fwnode_create_software_node(entries, NULL);
@@ -436,7 +434,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* wrong index */
@@ -447,7 +445,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
1, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
@@ -455,7 +453,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
3, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
@@ -470,14 +468,14 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* second reference in the array */
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
2, 1, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
@@ -488,7 +486,7 @@ static void pe_test_reference(struct kunit *test)
KUNIT_EXPECT_NE(test, error, 0);
fwnode_remove_software_node(node);
- software_node_unregister_nodes(nodes);
+ software_node_unregister_node_group(group);
}
static struct kunit_case property_entry_test_cases[] = {
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 4d1976ca5072..929410d0dd6f 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
calltime = ktime_get();
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
- pdev = &sync_dev[sync_id];
+ pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index ccc86206e508..09ee2a1e35bb 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -155,12 +155,27 @@ static int transport_add_class_device(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
int error = attribute_container_add_class_device(classdev);
struct transport_container *tcont =
attribute_container_to_transport_container(cont);
- if (!error && tcont->statistics)
+ if (error)
+ goto err_remove;
+
+ if (tcont->statistics) {
error = sysfs_create_group(&classdev->kobj, tcont->statistics);
+ if (error)
+ goto err_del;
+ }
+
+ return 0;
+
+err_del:
+ attribute_container_class_device_del(classdev);
+err_remove:
+ if (tclass->remove)
+ tclass->remove(tcont, dev, classdev);
return error;
}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 4f01e6b17bb9..9be0806eb033 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -46,12 +46,6 @@ static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
dev->id.id == BCMA_CORE_USB20_HOST;
}
-static inline u32 mips_read32(struct bcma_drv_mips *mcore,
- u16 offset)
-{
- return bcma_read32(mcore->core, offset);
-}
-
static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
{
u32 flag;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 0a8469e0b13a..7061d3ee836a 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_device.h>
#include <linux/of_platform.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
@@ -28,7 +29,7 @@ static DEFINE_MUTEX(bcma_buses_mutex);
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static void bcma_device_remove(struct device *dev);
-static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
+static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -140,17 +141,17 @@ static struct device_node *bcma_of_find_child_device(struct device *parent,
struct bcma_device *core)
{
struct device_node *node;
- u64 size;
- const __be32 *reg;
+ int ret;
if (!parent->of_node)
return NULL;
for_each_child_of_node(parent->of_node, node) {
- reg = of_get_address(node, 0, &size, NULL);
- if (!reg)
+ struct resource res;
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret)
continue;
- if (of_translate_address(node, reg) == core->addr)
+ if (res.start == core->addr)
return node;
}
return NULL;
@@ -627,9 +628,9 @@ static void bcma_device_remove(struct device *dev)
put_device(dev);
}
-static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+ const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev);
return add_uevent_var(env,
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a2184b428493..5b9d4aaebb81 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -103,35 +103,6 @@ config GDROM
Most users will want to say "Y" here.
You can also build this as a module which will be called gdrom.
-config PARIDE
- tristate "Parallel port IDE device support"
- depends on PARPORT_PC
- help
- There are many external CD-ROM and disk devices that connect through
- your computer's parallel port. Most of them are actually IDE devices
- using a parallel port IDE adapter. This option enables the PARIDE
- subsystem which contains drivers for many of these external drives.
- Read <file:Documentation/admin-guide/blockdev/paride.rst> for more information.
-
- If you have said Y to the "Parallel-port support" configuration
- option, you may share a single port between your printer and other
- parallel port devices. Answer Y to build PARIDE support into your
- kernel, or M if you would like to build it as a loadable module. If
- your parallel port support is in a loadable module, you must build
- PARIDE as a module. If you built PARIDE support into your kernel,
- you may still build the individual protocol modules and high-level
- drivers as loadable modules. If you build this support as a module,
- it will be called paride.
-
- To use the PARIDE support, you must say Y or M here and also to at
- least one high-level driver (e.g. "Parallel port IDE disks",
- "Parallel port ATAPI CD-ROMs", "Parallel port ATAPI disks" etc.) and
- to at least one protocol driver (e.g. "ATEN EH-100 protocol",
- "MicroSolutions backpack protocol", "DataStor Commuter protocol"
- etc.).
-
-source "drivers/block/paride/Kconfig"
-
source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
@@ -285,6 +256,49 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
+config CDROM_PKTCDVD
+ tristate "Packet writing on CD/DVD media (DEPRECATED)"
+ depends on !UML
+ depends on SCSI
+ select CDROM
+ help
+ Note: This driver is deprecated and will be removed from the
+ kernel in the near future!
+
+ If you have a CDROM/DVD drive that supports packet writing, say
+ Y to include support. It should work with any MMC/Mt Fuji
+ compliant ATAPI or SCSI drive, which is just about any newer
+ DVD/CD writer.
+
+ Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
+ is possible.
+ DVD-RW disks must be in restricted overwrite mode.
+
+ See the file <file:Documentation/cdrom/packet-writing.rst>
+ for further information on the use of this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pktcdvd.
+
+config CDROM_PKTCDVD_BUFFERS
+ int "Free buffers for data gathering"
+ depends on CDROM_PKTCDVD
+ default "8"
+ help
+ This controls the maximum number of active concurrent packets. More
+ concurrent packets can increase write performance, but also require
+ more memory. Each concurrent packet will require approximately 64Kb
+ of non-swappable kernel memory, memory which will be allocated when
+ a disc is opened for writing.
+
+config CDROM_PKTCDVD_WCACHE
+ bool "Enable write caching"
+ depends on CDROM_PKTCDVD
+ help
+ If enabled, write caching will be set for the CD-R/W device. For now
+ this option is dangerous unless the CD-RW media is known good, as we
+ don't do deferred write error handling yet.
+
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
@@ -371,6 +385,23 @@ config BLK_DEV_UBLK
can handle batch more effectively, but task_work_add() isn't exported
for module, so ublk has to be built to kernel.
+config BLKDEV_UBLK_LEGACY_OPCODES
+ bool "Support legacy command opcode"
+ depends on BLK_DEV_UBLK
+ default y
+ help
+ ublk driver started to take plain command encoding, which turns out
+ one bad way. The traditional ioctl command opcode encodes more
+ info and basically defines each code uniquely, so opcode conflict
+ is avoided, and driver can handle wrong command easily, meantime it
+ may help security subsystem to audit io_uring command.
+
+ Say Y if your application still uses legacy command opcode.
+
+ Say N if you don't want to support legacy command opcode. It is
+ suggested to enable N if your application(ublk server) switches to
+ ioctl command encoding.
+
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 962ee65d8ca3..101612cba303 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 7a368c90467d..4c666f72203f 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -290,7 +290,7 @@ aoechr_init(void)
}
init_completion(&emsgs_comp);
spin_lock_init(&emsgs_lock);
- aoe_class = class_create(THIS_MODULE, "aoe");
+ aoe_class = class_create("aoe");
if (IS_ERR(aoe_class)) {
unregister_chrdev(AOE_MAJOR, "aoechr");
return PTR_ERR(aoe_class);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 20acc4a1fd6d..bcad9b926b0c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -78,32 +78,25 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
}
/*
- * Look up and return a brd's page for a given sector.
- * If one does not exist, allocate an empty page, and insert that. Then
- * return it.
+ * Insert a new page for a given sector, if one does not already exist.
*/
-static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
pgoff_t idx;
struct page *page;
- gfp_t gfp_flags;
+ int ret = 0;
page = brd_lookup_page(brd, sector);
if (page)
- return page;
+ return 0;
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
- page = alloc_page(gfp_flags);
+ page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
if (!page)
- return NULL;
+ return -ENOMEM;
- if (radix_tree_preload(GFP_NOIO)) {
+ if (radix_tree_maybe_preload(gfp)) {
__free_page(page);
- return NULL;
+ return -ENOMEM;
}
spin_lock(&brd->brd_lock);
@@ -112,16 +105,17 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
__free_page(page);
page = radix_tree_lookup(&brd->brd_pages, idx);
- BUG_ON(!page);
- BUG_ON(page->index != idx);
+ if (!page)
+ ret = -ENOMEM;
+ else if (page->index != idx)
+ ret = -EIO;
} else {
brd->brd_nr_pages++;
}
spin_unlock(&brd->brd_lock);
radix_tree_preload_end();
-
- return page;
+ return ret;
}
/*
@@ -170,20 +164,22 @@ static void brd_free_pages(struct brd_device *brd)
/*
* copy_to_brd_setup must be called before copy_to_brd. It may sleep.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
+static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
+ gfp_t gfp)
{
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
size_t copy;
+ int ret;
copy = min_t(size_t, n, PAGE_SIZE - offset);
- if (!brd_insert_page(brd, sector))
- return -ENOSPC;
+ ret = brd_insert_page(brd, sector, gfp);
+ if (ret)
+ return ret;
if (copy < n) {
sector += copy >> SECTOR_SHIFT;
- if (!brd_insert_page(brd, sector))
- return -ENOSPC;
+ ret = brd_insert_page(brd, sector, gfp);
}
- return 0;
+ return ret;
}
/*
@@ -256,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, enum req_op op,
+ unsigned int len, unsigned int off, blk_opf_t opf,
sector_t sector)
{
void *mem;
int err = 0;
- if (op_is_write(op)) {
- err = copy_to_brd_setup(brd, sector, len);
+ if (op_is_write(opf)) {
+ /*
+ * Must use NOIO because we don't want to recurse back into the
+ * block or filesystem layers from page reclaim.
+ */
+ gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
+
+ err = copy_to_brd_setup(brd, sector, len, gfp);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!op_is_write(op)) {
+ if (!op_is_write(opf)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -298,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
(len & (SECTOR_SIZE - 1)));
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio_op(bio), sector);
+ bio->bi_opf, sector);
if (err) {
+ if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
bio_io_error(bio);
return;
}
@@ -309,23 +315,9 @@ static void brd_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct brd_device *brd = bdev->bd_disk->private_data;
- int err;
-
- if (PageTransHuge(page))
- return -ENOTSUPP;
- err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op_is_write(op), err);
- return err;
-}
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.submit_bio = brd_submit_bio,
- .rw_page = brd_rw_page,
};
/*
@@ -411,7 +403,8 @@ static int brd_alloc(int i)
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index c93e462130ff..67a8b352a1d5 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-drbd-y := drbd_bitmap.o drbd_proc.o
+drbd-y := drbd_buildtag.o drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
drbd-y += drbd_interval.o drbd_state.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 429255876800..64b3a1c76f03 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -735,8 +735,9 @@ static bool update_rs_extent(struct drbd_device *device,
return false;
}
-void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
+void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go)
{
+ struct drbd_device *device = peer_device->device;
unsigned long now = jiffies;
unsigned long last = device->rs_mark_time[device->rs_last_mark];
int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
@@ -819,7 +820,7 @@ static int update_sync_bits(struct drbd_device *device,
if (mode == SET_IN_SYNC) {
unsigned long still_to_go = drbd_bm_total_weight(device);
bool rs_is_done = (still_to_go <= device->rs_failed);
- drbd_advance_rs_marks(device, still_to_go);
+ drbd_advance_rs_marks(first_peer_device(device), still_to_go);
if (cleared || rs_is_done)
maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
} else if (mode == RECORD_RS_FAILED)
@@ -843,10 +844,11 @@ static bool plausible_request_size(int size)
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
-int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
+int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
enum update_sync_bits_mode mode)
{
/* Is called from worker and receiver context _only_ */
+ struct drbd_device *device = peer_device->device;
unsigned long sbnr, ebnr, lbnr;
unsigned long count = 0;
sector_t esector, nr_sectors;
@@ -1009,14 +1011,15 @@ retry:
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
-int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
+int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector)
{
+ struct drbd_device *device = peer_device->device;
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
- bool throttle = drbd_rs_should_slow_down(device, sector, true);
+ bool throttle = drbd_rs_should_slow_down(peer_device, sector, true);
/* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
* not yet BME_LOCKED) extent needs to be kicked out explicitly if we
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 289876ffbc31..6ac8c54b44c7 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1216,7 +1216,9 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
-int drbd_bm_read(struct drbd_device *device) __must_hold(local)
+int drbd_bm_read(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
return bm_rw(device, BM_AIO_READ, 0);
}
@@ -1227,7 +1229,8 @@ int drbd_bm_read(struct drbd_device *device) __must_hold(local)
*
* Will only write pages that have changed since last IO.
*/
-int drbd_bm_write(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, 0, 0);
}
@@ -1238,7 +1241,8 @@ int drbd_bm_write(struct drbd_device *device) __must_hold(local)
*
* Will write all pages.
*/
-int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write_all(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
}
@@ -1264,7 +1268,8 @@ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_ho
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
-int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write_copy_pages(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
diff --git a/drivers/block/drbd/drbd_buildtag.c b/drivers/block/drbd/drbd_buildtag.c
new file mode 100644
index 000000000000..cb1aa66d7d5d
--- /dev/null
+++ b/drivers/block/drbd/drbd_buildtag.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/drbd_config.h>
+#include <linux/module.h>
+
+const char *drbd_buildtag(void)
+{
+ /* DRBD built from external sources has here a reference to the
+ * git hash of the source code.
+ */
+
+ static char buildtag[38] = "\0uilt-in";
+
+ if (buildtag[0] == 0) {
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
+#endif
+ }
+
+ return buildtag;
+}
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index a72c096aa5b1..12460b584bcb 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -844,7 +844,7 @@ static int drbd_version_show(struct seq_file *m, void *ignored)
{
seq_printf(m, "# %s\n", drbd_buildtag());
seq_printf(m, "VERSION=%s\n", REL_VERSION);
- seq_printf(m, "API_VERSION=%u\n", API_VERSION);
+ seq_printf(m, "API_VERSION=%u\n", GENL_MAGIC_VERSION);
seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
return 0;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ae713338aa46..a30a5ed811be 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -34,21 +34,12 @@
#include <linux/prefetch.h>
#include <linux/drbd_genl_api.h>
#include <linux/drbd.h>
+#include <linux/drbd_config.h>
#include "drbd_strings.h"
#include "drbd_state.h"
#include "drbd_protocol.h"
#include "drbd_polymorph_printk.h"
-#ifdef __CHECKER__
-# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
-# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
-# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
-#else
-# define __protected_by(x)
-# define __protected_read_by(x)
-# define __protected_write_by(x)
-#endif
-
/* shared module parameters, defined in drbd_main.c */
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int drbd_enable_faults;
@@ -75,6 +66,7 @@ extern int drbd_proc_details;
struct drbd_device;
struct drbd_connection;
+struct drbd_peer_device;
/* Defines to control fault insertion */
enum {
@@ -135,8 +127,8 @@ struct bm_xfer_ctx {
unsigned bytes[2];
};
-extern void INFO_bm_xfer_stats(struct drbd_device *device,
- const char *direction, struct bm_xfer_ctx *c);
+extern void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
+ const char *direction, struct bm_xfer_ctx *c);
static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
{
@@ -550,9 +542,10 @@ struct drbd_md_io {
struct bm_io_work {
struct drbd_work w;
+ struct drbd_peer_device *peer_device;
char *why;
enum bm_flag flags;
- int (*io_fn)(struct drbd_device *device);
+ int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device);
void (*done)(struct drbd_device *device, int rv);
};
@@ -774,7 +767,7 @@ struct drbd_device {
unsigned long flags;
/* configured by drbdsetup */
- struct drbd_backing_dev *ldev __protected_by(local);
+ struct drbd_backing_dev *ldev;
sector_t p_size; /* partner's disk size */
struct request_queue *rq_queue;
@@ -1050,7 +1043,7 @@ extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
-extern int drbd_send_bitmap(struct drbd_device *device);
+extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device);
extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
@@ -1074,17 +1067,22 @@ extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
extern void drbd_md_mark_dirty(struct drbd_device *device);
extern void drbd_queue_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
void (*done)(struct drbd_device *, int),
- char *why, enum bm_flag flags);
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
extern int drbd_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags);
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags);
-extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
-extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
+extern int drbd_bmio_set_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
+extern int drbd_bmio_clear_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
/* Meta data layout
*
@@ -1293,14 +1291,18 @@ extern void _drbd_bm_set_bits(struct drbd_device *device,
const unsigned long s, const unsigned long e);
extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
-extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_read(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
-extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
-extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
-extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern size_t drbd_bm_words(struct drbd_device *device);
extern unsigned long drbd_bm_bits(struct drbd_device *device);
extern sector_t drbd_bm_capacity(struct drbd_device *device);
@@ -1431,21 +1433,24 @@ void drbd_resync_after_changed(struct drbd_device *device);
extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
extern void resume_next_sg(struct drbd_device *device);
extern void suspend_other_sg(struct drbd_device *device);
-extern int drbd_resync_finished(struct drbd_device *device);
+extern int drbd_resync_finished(struct drbd_peer_device *peer_device);
/* maybe rather drbd_main.c ? */
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
-extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
+extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device,
+ sector_t sector, int size);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
-extern void drbd_rs_controller_reset(struct drbd_device *device);
+extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device);
-static inline void ov_out_of_sync_print(struct drbd_device *device)
+static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
+
if (device->ov_last_oos_size) {
- drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
+ drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n",
(unsigned long long)device->ov_last_oos_start,
(unsigned long)device->ov_last_oos_size);
}
@@ -1484,7 +1489,7 @@ extern int drbd_ack_receiver(struct drbd_thread *thi);
extern void drbd_send_ping_wf(struct work_struct *ws);
extern void drbd_send_acks_wf(struct work_struct *ws);
extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
-extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
+extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
@@ -1540,22 +1545,22 @@ extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i
extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
-extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector);
extern void drbd_rs_cancel_all(struct drbd_device *device);
extern int drbd_rs_del_all(struct drbd_device *device);
-extern void drbd_rs_failed_io(struct drbd_device *device,
+extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device,
sector_t sector, int size);
-extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
+extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go);
enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
-extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
+extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
enum update_sync_bits_mode mode);
-#define drbd_set_in_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_IN_SYNC)
-#define drbd_set_out_of_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
-#define drbd_rs_failed_io(device, sector, size) \
- __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
+#define drbd_set_in_sync(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, SET_IN_SYNC)
+#define drbd_set_out_of_sync(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC)
+#define drbd_rs_failed_io(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED)
extern void drbd_al_shrink(struct drbd_device *device);
extern int drbd_al_initialize(struct drbd_device *, void *);
@@ -1927,18 +1932,14 @@ static inline void inc_ap_pending(struct drbd_device *device)
atomic_inc(&device->ap_pending_cnt);
}
-#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
- if (atomic_read(&device->which) < 0) \
- drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
- func, line, \
- atomic_read(&device->which))
-
-#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
-static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
+#define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0))
+static inline int __dec_ap_pending(struct drbd_device *device)
{
- if (atomic_dec_and_test(&device->ap_pending_cnt))
+ int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
+
+ if (ap_pending_cnt == 0)
wake_up(&device->misc_wait);
- ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
+ return ap_pending_cnt;
}
/* counts how many resync-related answers we still expect from the peer
@@ -1947,16 +1948,16 @@ static inline void _dec_ap_pending(struct drbd_device *device, const char *func,
* C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
* (or P_NEG_ACK with ID_SYNCER)
*/
-static inline void inc_rs_pending(struct drbd_device *device)
+static inline void inc_rs_pending(struct drbd_peer_device *peer_device)
{
- atomic_inc(&device->rs_pending_cnt);
+ atomic_inc(&peer_device->device->rs_pending_cnt);
}
-#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
-static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
+#define dec_rs_pending(peer_device) \
+ ((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0))
+static inline int __dec_rs_pending(struct drbd_peer_device *peer_device)
{
- atomic_dec(&device->rs_pending_cnt);
- ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
+ return atomic_dec_return(&peer_device->device->rs_pending_cnt);
}
/* counts how many answers we still need to send to the peer.
@@ -1973,18 +1974,16 @@ static inline void inc_unacked(struct drbd_device *device)
atomic_inc(&device->unacked_cnt);
}
-#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
-static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
+#define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0))
+static inline int __dec_unacked(struct drbd_device *device)
{
- atomic_dec(&device->unacked_cnt);
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
+ return atomic_dec_return(&device->unacked_cnt);
}
-#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
-static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
+#define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0))
+static inline int __sub_unacked(struct drbd_device *device, int n)
{
- atomic_sub(n, &device->unacked_cnt);
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
+ return atomic_sub_return(n, &device->unacked_cnt);
}
static inline bool is_sync_target_state(enum drbd_conns connection_state)
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index 5024ffd6143d..873beda6de24 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -58,7 +58,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
* drbd_contains_interval - check if a tree contains a given interval
* @root: red black tree root
* @sector: start sector of @interval
- * @interval: may not be a valid pointer
+ * @interval: may be an invalid pointer
*
* Returns if the tree contains the node @interval with start sector @start.
* Does not dereference @interval until @interval is known to be a valid object
@@ -95,6 +95,10 @@ drbd_contains_interval(struct rb_root *root, sector_t sector,
void
drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
{
+ /* avoid endless loop */
+ if (drbd_interval_empty(this))
+ return;
+
rb_erase_augmented(&this->rb, root, &augment_callbacks);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e43dfb9eb6ad..83987e7a5ef2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -231,9 +231,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
+ struct drbd_peer_device *peer_device;
if (req->epoch != expect_epoch)
break;
- _req_mod(req, BARRIER_ACKED);
+ peer_device = conn_peer_device(connection, req->device->vnr);
+ _req_mod(req, BARRIER_ACKED, peer_device);
}
spin_unlock_irq(&connection->resource->req_lock);
@@ -256,10 +258,13 @@ bail:
/* must hold resource->req_lock */
void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
+ struct drbd_peer_device *peer_device;
struct drbd_request *req, *r;
- list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
- _req_mod(req, what);
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
+ peer_device = conn_peer_device(connection, req->device->vnr);
+ _req_mod(req, what, peer_device);
+ }
}
void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
@@ -297,7 +302,7 @@ void tl_abort_disk_io(struct drbd_device *device)
continue;
if (req->device != device)
continue;
- _req_mod(req, ABORT_DISK_IO);
+ _req_mod(req, ABORT_DISK_IO, NULL);
}
spin_unlock_irq(&connection->resource->req_lock);
}
@@ -1198,10 +1203,11 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_peer_device *peer_device, struct bm_xfer_ctx *c)
{
- struct drbd_socket *sock = &first_peer_device(device)->connection->data;
- unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
+ struct drbd_device *device = peer_device->device;
+ struct drbd_socket *sock = &peer_device->connection->data;
+ unsigned int header_size = drbd_header_size(peer_device->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
@@ -1212,7 +1218,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
- err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
+ err = __send_command(peer_device->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
@@ -1233,7 +1239,8 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
len = num_words * sizeof(*p);
if (len)
drbd_bm_get_lel(device, c->word_offset, num_words, p);
- err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP,
+ len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -1245,7 +1252,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
}
if (!err) {
if (len == 0) {
- INFO_bm_xfer_stats(device, "send", c);
+ INFO_bm_xfer_stats(peer_device, "send", c);
return 0;
} else
return 1;
@@ -1254,7 +1261,8 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
}
/* See the comment at receive_bitmap() */
-static int _drbd_send_bitmap(struct drbd_device *device)
+static int _drbd_send_bitmap(struct drbd_device *device,
+ struct drbd_peer_device *peer_device)
{
struct bm_xfer_ctx c;
int err;
@@ -1266,7 +1274,7 @@ static int _drbd_send_bitmap(struct drbd_device *device)
if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
drbd_bm_set_all(device);
- if (drbd_bm_write(device)) {
+ if (drbd_bm_write(device, peer_device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
@@ -1285,20 +1293,20 @@ static int _drbd_send_bitmap(struct drbd_device *device)
};
do {
- err = send_bitmap_rle_or_plain(device, &c);
+ err = send_bitmap_rle_or_plain(peer_device, &c);
} while (err > 0);
return err == 0;
}
-int drbd_send_bitmap(struct drbd_device *device)
+int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
{
- struct drbd_socket *sock = &first_peer_device(device)->connection->data;
+ struct drbd_socket *sock = &peer_device->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
- err = !_drbd_send_bitmap(device);
+ err = !_drbd_send_bitmap(device, peer_device);
mutex_unlock(&sock->mutex);
return err;
}
@@ -2899,7 +2907,7 @@ static int __init drbd_init(void)
pr_info("initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
- API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
+ GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
pr_info("%s\n", drbd_buildtag());
pr_info("registered as block device major %d\n", DRBD_MAJOR);
return 0; /* Success! */
@@ -3406,7 +3414,9 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
+int drbd_bmio_set_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
int rv = -EIO;
@@ -3414,7 +3424,7 @@ int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
drbd_md_sync(device);
drbd_bm_set_all(device);
- rv = drbd_bm_write(device);
+ rv = drbd_bm_write(device, peer_device);
if (!rv) {
drbd_md_clear_flag(device, MDF_FULL_SYNC);
@@ -3430,11 +3440,13 @@ int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
+int drbd_bmio_clear_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
drbd_resume_al(device);
drbd_bm_clear_all(device);
- return drbd_bm_write(device);
+ return drbd_bm_write(device, peer_device);
}
static int w_bitmap_io(struct drbd_work *w, int unused)
@@ -3453,7 +3465,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
if (get_ldev(device)) {
drbd_bm_lock(device, work->why, work->flags);
- rv = work->io_fn(device);
+ rv = work->io_fn(device, work->peer_device);
drbd_bm_unlock(device);
put_ldev(device);
}
@@ -3488,11 +3500,12 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
* put_ldev().
*/
void drbd_queue_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
void (*done)(struct drbd_device *, int),
- char *why, enum bm_flag flags)
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device)
{
- D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
+ D_ASSERT(device, current == peer_device->connection->worker.task);
D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
@@ -3501,6 +3514,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
why, device->bm_io_work.why);
+ device->bm_io_work.peer_device = peer_device;
device->bm_io_work.io_fn = io_fn;
device->bm_io_work.done = done;
device->bm_io_work.why = why;
@@ -3512,7 +3526,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* application IO does not conflict anyways. */
if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
- drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ drbd_queue_work(&peer_device->connection->sender_work,
&device->bm_io_work.w);
}
spin_unlock_irq(&device->resource->req_lock);
@@ -3528,8 +3542,10 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags)
+int drbd_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device)
{
/* Only suspend io, if some operation is supposed to be locked out */
const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
@@ -3541,7 +3557,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
drbd_suspend_io(device);
drbd_bm_lock(device, why, flags);
- rv = io_fn(device);
+ rv = io_fn(device, peer_device);
drbd_bm_unlock(device);
if (do_suspend_io)
@@ -3776,24 +3792,6 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
}
#endif
-const char *drbd_buildtag(void)
-{
- /* DRBD built from external sources has here a reference to the
- git hash of the source code. */
-
- static char buildtag[38] = "\0uilt-in";
-
- if (buildtag[0] == 0) {
-#ifdef MODULE
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
-#else
- buildtag[0] = 'b';
-#endif
- }
-
- return buildtag;
-}
-
module_init(drbd_init)
module_exit(drbd_cleanup)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 60757ac31701..1a5d3d72d91d 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1053,7 +1053,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
- "size changed", BM_LOCKED_MASK);
+ "size changed", BM_LOCKED_MASK, NULL);
/* on-disk bitmap and activity log is authoritative again
* (unless there was an IO error meanwhile...) */
@@ -1615,7 +1615,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
drbd_send_sync_param(peer_device);
}
- kvfree_rcu(old_disk_conf);
+ kvfree_rcu_mightsleep(old_disk_conf);
kfree(old_plan);
mod_timer(&device->request_timer, jiffies + HZ);
goto success;
@@ -2027,13 +2027,15 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
drbd_info(device, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
- "set_n_write from attaching", BM_LOCKED_MASK)) {
+ "set_n_write from attaching", BM_LOCKED_MASK,
+ NULL)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
if (drbd_bitmap_io(device, &drbd_bm_read,
- "read from attaching", BM_LOCKED_MASK)) {
+ "read from attaching", BM_LOCKED_MASK,
+ NULL)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -2446,7 +2448,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- kvfree_rcu(old_net_conf);
+ kvfree_rcu_mightsleep(old_net_conf);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
struct drbd_peer_device *peer_device;
@@ -2860,7 +2862,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
new_disk_conf->disk_size = (sector_t)rs.resize_size;
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&device->resource->conf_update);
- kvfree_rcu(old_disk_conf);
+ kvfree_rcu_mightsleep(old_disk_conf);
new_disk_conf = NULL;
}
@@ -2972,7 +2974,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
- "set_n_write from invalidate", BM_LOCKED_MASK))
+ "set_n_write from invalidate", BM_LOCKED_MASK, NULL))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -3005,11 +3007,12 @@ out:
return 0;
}
-static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
+static int drbd_bmio_set_susp_al(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
int rv;
- rv = drbd_bmio_set_n_write(device);
+ rv = drbd_bmio_set_n_write(device, peer_device);
drbd_suspend_al(device);
return rv;
}
@@ -3052,7 +3055,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
"set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
+ BM_LOCKED_SET_ALLOWED, NULL))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -4148,7 +4151,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
if (args.clear_bm) {
err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
- "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
+ "clear_n_write from new_c_uuid", BM_LOCKED_MASK, NULL);
if (err) {
drbd_err(device, "Writing bitmap failed with %d\n", err);
retcode = ERR_IO_MD_DISK;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2227fb0db1ce..1d0feafceadc 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -228,7 +228,7 @@ int drbd_seq_show(struct seq_file *seq, void *v)
};
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
- API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag());
+ GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag());
/*
cs .. connection state
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 757f4692b5bd..8c2bc47de473 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1283,7 +1283,7 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
- REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
+ REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) {
@@ -2044,11 +2044,11 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(device, sector, peer_req->i.size);
+ drbd_set_in_sync(peer_device, sector, peer_req->i.size);
err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(device, sector, peer_req->i.size);
+ drbd_rs_failed_io(peer_device, sector, peer_req->i.size);
err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
}
@@ -2067,7 +2067,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
if (!peer_req)
goto fail;
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
inc_unacked(device);
/* corresponding dec_unacked() in e_end_resync_block()
@@ -2138,7 +2138,7 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
- req_mod(req, DATA_RECEIVED);
+ req_mod(req, DATA_RECEIVED, peer_device);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
@@ -2196,7 +2196,7 @@ static void restart_conflicting_writes(struct drbd_device *device,
continue;
/* as it is RQ_POSTPONED, this will cause it to
* be queued on the retry workqueue. */
- __req_mod(req, CONFLICT_RESOLVED, NULL);
+ __req_mod(req, CONFLICT_RESOLVED, NULL, NULL);
}
}
@@ -2220,7 +2220,7 @@ static int e_end_block(struct drbd_work *w, int cancel)
P_RS_WRITE_ACK : P_WRITE_ACK;
err = drbd_send_ack(peer_device, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(device, sector, peer_req->i.size);
+ drbd_set_in_sync(peer_device, sector, peer_req->i.size);
} else {
err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
@@ -2420,6 +2420,7 @@ static blk_opf_t wire_flags_to_bio(struct drbd_connection *connection, u32 dpf)
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
unsigned int size)
{
+ struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_interval *i;
repeat:
@@ -2433,7 +2434,7 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
if (!(req->rq_state & RQ_POSTPONED))
continue;
req->rq_state &= ~RQ_POSTPONED;
- __req_mod(req, NEG_ACKED, &m);
+ __req_mod(req, NEG_ACKED, peer_device, &m);
spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
complete_master_bio(device, &m);
@@ -2690,7 +2691,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
if (device->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
+ drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
drbd_al_begin_io(device, &peer_req->i);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
@@ -2729,9 +2730,10 @@ out_interrupted:
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
-bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
+bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
bool throttle_if_app_is_waiting)
{
+ struct drbd_device *device = peer_device->device;
struct lc_element *tmp;
bool throttle = drbd_rs_c_min_rate_throttle(device);
@@ -2843,7 +2845,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
break;
case P_OV_REPLY:
verb = 0;
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
@@ -2914,7 +2916,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
/* track progress, we may need to throttle */
atomic_add(size >> 9, &device->rs_sect_in);
peer_req->w.cb = w_e_end_ov_reply;
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
goto submit_for_resync;
@@ -2977,7 +2979,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
update_receiver_timing_details(connection, drbd_rs_should_slow_down);
if (device->state.peer != R_PRIMARY
- && drbd_rs_should_slow_down(device, sector, false))
+ && drbd_rs_should_slow_down(peer_device, sector, false))
schedule_timeout_uninterruptible(HZ/10);
update_receiver_timing_details(connection, drbd_rs_begin_io);
if (drbd_rs_begin_io(device, sector))
@@ -3226,10 +3228,11 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
-1096 requires proto 96
*/
-static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
+static int drbd_uuid_compare(struct drbd_peer_device *const peer_device,
+ enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
{
- struct drbd_peer_device *const peer_device = first_peer_device(device);
- struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
+ struct drbd_connection *const connection = peer_device->connection;
+ struct drbd_device *device = peer_device->device;
u64 self, peer;
int i, j;
@@ -3465,7 +3468,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
drbd_uuid_dump(device, "peer", device->p_uuid,
device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
- hg = drbd_uuid_compare(device, peer_role, &rule_nr);
+ hg = drbd_uuid_compare(peer_device, peer_role, &rule_nr);
spin_unlock_irq(&device->ldev->md.uuid_lock);
drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -3591,7 +3594,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
if (abs(hg) >= 2) {
drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
- BM_LOCKED_SET_ALLOWED))
+ BM_LOCKED_SET_ALLOWED, NULL))
return C_MASK;
}
@@ -3759,7 +3762,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
drbd_info(connection, "peer data-integrity-alg: %s\n",
integrity_alg[0] ? integrity_alg : "(none)");
- kvfree_rcu(old_net_conf);
+ kvfree_rcu_mightsleep(old_net_conf);
return 0;
disconnect_rcu_unlock:
@@ -4127,7 +4130,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&connection->resource->conf_update);
- kvfree_rcu(old_disk_conf);
+ kvfree_rcu_mightsleep(old_disk_conf);
drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
(unsigned long)p_usize, (unsigned long)my_usize);
@@ -4270,7 +4273,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from receive_uuids",
- BM_LOCKED_TEST_ALLOWED);
+ BM_LOCKED_TEST_ALLOWED, NULL);
_drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
_drbd_uuid_set(device, UI_BITMAP, 0);
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
@@ -4448,7 +4451,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
else if (os.conn >= C_SYNC_SOURCE &&
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(device) <= device->rs_failed)
- drbd_resync_finished(device);
+ drbd_resync_finished(peer_device);
return 0;
}
}
@@ -4456,8 +4459,8 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
/* explicit verify finished notification, stop sector reached. */
if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
- ov_out_of_sync_print(device);
- drbd_resync_finished(device);
+ ov_out_of_sync_print(peer_device);
+ drbd_resync_finished(peer_device);
return 0;
}
@@ -4766,11 +4769,11 @@ decode_bitmap_c(struct drbd_peer_device *peer_device,
return -EIO;
}
-void INFO_bm_xfer_stats(struct drbd_device *device,
+void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
+ unsigned int header_size = drbd_header_size(peer_device->connection);
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
unsigned int plain =
header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
@@ -4794,7 +4797,7 @@ void INFO_bm_xfer_stats(struct drbd_device *device,
r = 1000;
r = 1000 - r;
- drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
+ drbd_info(peer_device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
"total %u; compression: %u.%u%%\n",
direction,
c->bytes[1], c->packets[1],
@@ -4872,12 +4875,12 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
goto out;
}
- INFO_bm_xfer_stats(device, "receive", &c);
+ INFO_bm_xfer_stats(peer_device, "receive", &c);
if (device->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- err = drbd_send_bitmap(device);
+ err = drbd_send_bitmap(device, peer_device);
if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
@@ -4935,7 +4938,7 @@ static int receive_out_of_sync(struct drbd_connection *connection, struct packet
drbd_conn_str(device->state.conn));
}
- drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
+ drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
return 0;
}
@@ -4956,7 +4959,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
@@ -5214,7 +5217,7 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
if (get_ldev(device)) {
drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
- "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
+ "write from disconnected", BM_LOCKED_CHANGE_ALLOWED, NULL);
put_ldev(device);
}
@@ -5648,22 +5651,23 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
if (get_ldev(device)) {
drbd_rs_complete_io(device, sector);
- drbd_set_in_sync(device, sector, blksize);
+ drbd_set_in_sync(peer_device, sector, blksize);
/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
put_ldev(device);
}
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
atomic_add(blksize >> 9, &device->rs_sect_in);
return 0;
}
static int
-validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
+validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct rb_root *root, const char *func,
enum drbd_req_event what, bool missing_ok)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_request *req;
struct bio_and_error m;
@@ -5673,7 +5677,7 @@ validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
return -EIO;
}
- __req_mod(req, what, &m);
+ __req_mod(req, what, peer_device, &m);
spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
@@ -5698,8 +5702,8 @@ static int got_BlockAck(struct drbd_connection *connection, struct packet_info *
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- drbd_set_in_sync(device, sector, blksize);
- dec_rs_pending(device);
+ drbd_set_in_sync(peer_device, sector, blksize);
+ dec_rs_pending(peer_device);
return 0;
}
switch (pi->cmd) {
@@ -5722,7 +5726,7 @@ static int got_BlockAck(struct drbd_connection *connection, struct packet_info *
BUG();
}
- return validate_req_change_req_state(device, p->block_id, sector,
+ return validate_req_change_req_state(peer_device, p->block_id, sector,
&device->write_requests, __func__,
what, false);
}
@@ -5744,12 +5748,12 @@ static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- dec_rs_pending(device);
- drbd_rs_failed_io(device, sector, size);
+ dec_rs_pending(peer_device);
+ drbd_rs_failed_io(peer_device, sector, size);
return 0;
}
- err = validate_req_change_req_state(device, p->block_id, sector,
+ err = validate_req_change_req_state(peer_device, p->block_id, sector,
&device->write_requests, __func__,
NEG_ACKED, true);
if (err) {
@@ -5758,7 +5762,7 @@ static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi
request is no longer in the collision hash. */
/* In Protocol B we might already have got a P_RECV_ACK
but then get a P_NEG_ACK afterwards. */
- drbd_set_out_of_sync(device, sector, size);
+ drbd_set_out_of_sync(peer_device, sector, size);
}
return 0;
}
@@ -5780,7 +5784,7 @@ static int got_NegDReply(struct drbd_connection *connection, struct packet_info
drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
- return validate_req_change_req_state(device, p->block_id, sector,
+ return validate_req_change_req_state(peer_device, p->block_id, sector,
&device->read_requests, __func__,
NEG_ACKED, false);
}
@@ -5803,13 +5807,13 @@ static int got_NegRSDReply(struct drbd_connection *connection, struct packet_inf
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
if (get_ldev_if_state(device, D_FAILED)) {
drbd_rs_complete_io(device, sector);
switch (pi->cmd) {
case P_NEG_RS_DREPLY:
- drbd_rs_failed_io(device, sector, size);
+ drbd_rs_failed_io(peer_device, sector, size);
break;
case P_RS_CANCEL:
break;
@@ -5866,21 +5870,21 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_out_of_sync_found(device, sector, size);
+ drbd_ov_out_of_sync_found(peer_device, sector, size);
else
- ov_out_of_sync_print(device);
+ ov_out_of_sync_print(peer_device);
if (!get_ldev(device))
return 0;
drbd_rs_complete_io(device, sector);
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
--device->ov_left;
/* let's advance progress step marks only for every other megabyte */
if ((device->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(device, device->ov_left);
+ drbd_advance_rs_marks(peer_device, device->ov_left);
if (device->ov_left == 0) {
dw = kmalloc(sizeof(*dw), GFP_NOIO);
@@ -5890,8 +5894,8 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
} else {
drbd_err(device, "kmalloc(dw) failed.");
- ov_out_of_sync_print(device);
- drbd_resync_finished(device);
+ ov_out_of_sync_print(peer_device);
+ drbd_resync_finished(peer_device);
}
}
put_ldev(device);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index eb14ec8ec04c..380e6584a4ee 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -122,12 +122,13 @@ void drbd_req_destroy(struct kref *kref)
* before it even was submitted or sent.
* In that case we do not want to touch the bitmap at all.
*/
+ struct drbd_peer_device *peer_device = first_peer_device(device);
if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(device, req->i.sector, req->i.size);
+ drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(device, req->i.sector, req->i.size);
+ drbd_set_in_sync(peer_device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io
@@ -552,12 +553,15 @@ static inline bool is_pending_write_protocol_A(struct drbd_request *req)
* happen "atomically" within the req_lock,
* and it enforces that we have to think in a very structured manner
* about the "events" that may happen to a request during its life time ...
+ *
+ *
+ * peer_device == NULL means local disk
*/
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
+ struct drbd_peer_device *peer_device,
struct bio_and_error *m)
{
struct drbd_device *const device = req->device;
- struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
struct net_conf *nc;
int p, rv = 0;
@@ -617,7 +621,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case READ_COMPLETED_WITH_ERROR:
- drbd_set_out_of_sync(device, req->i.sector, req->i.size);
+ drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
fallthrough;
@@ -1100,6 +1104,7 @@ static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
static int drbd_process_write_request(struct drbd_request *req)
{
struct drbd_device *device = req->device;
+ struct drbd_peer_device *peer_device = first_peer_device(device);
int remote, send_oos;
remote = drbd_should_do_remote(device->state);
@@ -1115,7 +1120,7 @@ static int drbd_process_write_request(struct drbd_request *req)
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
- _req_mod(req, QUEUE_AS_DRBD_BARRIER);
+ _req_mod(req, QUEUE_AS_DRBD_BARRIER, peer_device);
return remote;
}
@@ -1125,10 +1130,10 @@ static int drbd_process_write_request(struct drbd_request *req)
D_ASSERT(device, !(remote && send_oos));
if (remote) {
- _req_mod(req, TO_BE_SENT);
- _req_mod(req, QUEUE_FOR_NET_WRITE);
- } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
- _req_mod(req, QUEUE_FOR_SEND_OOS);
+ _req_mod(req, TO_BE_SENT, peer_device);
+ _req_mod(req, QUEUE_FOR_NET_WRITE, peer_device);
+ } else if (drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size))
+ _req_mod(req, QUEUE_FOR_SEND_OOS, peer_device);
return remote;
}
@@ -1312,6 +1317,7 @@ static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
+ struct drbd_peer_device *peer_device = first_peer_device(device);
const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
@@ -1375,8 +1381,8 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
/* We either have a private_bio, or we can read from remote.
* Otherwise we had done the goto nodata above. */
if (req->private_bio == NULL) {
- _req_mod(req, TO_BE_SENT);
- _req_mod(req, QUEUE_FOR_NET_READ);
+ _req_mod(req, TO_BE_SENT, peer_device);
+ _req_mod(req, QUEUE_FOR_NET_READ, peer_device);
} else
no_remote = true;
}
@@ -1397,7 +1403,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
req->pre_submit_jif = jiffies;
list_add_tail(&req->req_pending_local,
&device->pending_completion[rw == WRITE]);
- _req_mod(req, TO_BE_SUBMITTED);
+ _req_mod(req, TO_BE_SUBMITTED, NULL);
/* but we need to give up the spinlock to submit */
submit_private_bio = true;
} else if (no_remote) {
@@ -1607,6 +1613,8 @@ void drbd_submit_bio(struct bio *bio)
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
/*
* what we "blindly" assume:
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index b4017b5c3fbc..9ae860e7591b 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -267,6 +267,7 @@ struct bio_and_error {
extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
+ struct drbd_peer_device *peer_device,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
@@ -280,14 +281,15 @@ extern void drbd_restart_request(struct drbd_request *req);
/* use this if you don't want to deal with calling complete_master_bio()
* outside the spinlock, e.g. when walking some list on cleanup. */
-static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
+static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what,
+ struct drbd_peer_device *peer_device)
{
struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
/* __req_mod possibly frees req, do not touch req after that! */
- rv = __req_mod(req, what, &m);
+ rv = __req_mod(req, what, peer_device, &m);
if (m.bio)
complete_master_bio(device, &m);
@@ -299,7 +301,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
* of the lower level driver completion callback, so we need to
* spin_lock_irqsave here. */
static inline int req_mod(struct drbd_request *req,
- enum drbd_req_event what)
+ enum drbd_req_event what,
+ struct drbd_peer_device *peer_device)
{
unsigned long flags;
struct drbd_device *device = req->device;
@@ -307,7 +310,7 @@ static inline int req_mod(struct drbd_request *req,
int rv;
spin_lock_irqsave(&device->resource->req_lock, flags);
- rv = __req_mod(req, what, &m);
+ rv = __req_mod(req, what, peer_device, &m);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (m.bio)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 75d13ea0024f..287a8d1d3f70 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1222,9 +1222,11 @@ void drbd_resume_al(struct drbd_device *device)
}
/* helper for _drbd_set_state */
-static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
+static void set_ov_position(struct drbd_peer_device *peer_device, enum drbd_conns cs)
{
- if (first_peer_device(device)->connection->agreed_pro_version < 90)
+ struct drbd_device *device = peer_device->device;
+
+ if (peer_device->connection->agreed_pro_version < 90)
device->ov_start_sector = 0;
device->rs_total = drbd_bm_bits(device);
device->ov_position = 0;
@@ -1387,7 +1389,7 @@ _drbd_set_state(struct drbd_device *device, union drbd_state ns,
unsigned long now = jiffies;
int i;
- set_ov_position(device, ns.conn);
+ set_ov_position(peer_device, ns.conn);
device->rs_start = now;
device->rs_last_sect_ev = 0;
device->ov_last_oos_size = 0;
@@ -1398,7 +1400,7 @@ _drbd_set_state(struct drbd_device *device, union drbd_state ns,
device->rs_mark_time[i] = now;
}
- drbd_rs_controller_reset(device);
+ drbd_rs_controller_reset(peer_device);
if (ns.conn == C_VERIFY_S) {
drbd_info(device, "Starting Online Verify from sector %llu\n",
@@ -1518,8 +1520,9 @@ static void abw_start_sync(struct drbd_device *device, int rv)
}
int drbd_bitmap_io_from_worker(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags)
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device)
{
int rv;
@@ -1529,7 +1532,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
atomic_inc(&device->suspend_cnt);
drbd_bm_lock(device, why, flags);
- rv = io_fn(device);
+ rv = io_fn(device, peer_device);
drbd_bm_unlock(device);
drbd_resume_io(device);
@@ -1809,7 +1812,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
device->state.conn == C_WF_BITMAP_S)
drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
"send_bitmap (WFBitMapS)",
- BM_LOCKED_TEST_ALLOWED);
+ BM_LOCKED_TEST_ALLOWED, peer_device);
/* Lost contact to peer's copy of the data */
if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
@@ -1839,7 +1842,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
* No harm done if the bitmap still changes,
* redirtied pages will follow later. */
drbd_bitmap_io_from_worker(device, &drbd_bm_write,
- "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+ "demote diskless peer", BM_LOCKED_SET_ALLOWED, peer_device);
put_ldev(device);
}
@@ -1851,7 +1854,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* No changes to the bitmap expected this time, so assert that,
* even though no harm was done if it did change. */
drbd_bitmap_io_from_worker(device, &drbd_bm_write,
- "demote", BM_LOCKED_TEST_ALLOWED);
+ "demote", BM_LOCKED_TEST_ALLOWED, peer_device);
put_ldev(device);
}
@@ -1888,7 +1891,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* no other bitmap changes expected during this phase */
drbd_queue_bitmap_io(device,
&drbd_bmio_set_n_write, &abw_start_sync,
- "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
+ "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED,
+ peer_device);
/* first half of local IO error, failure to attach,
* or administrative detach */
@@ -2011,7 +2015,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
(ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
- "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED,
+ peer_device);
put_ldev(device);
}
@@ -2071,7 +2076,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
conn_free_crypto(connection);
mutex_unlock(&connection->resource->conf_update);
- kvfree_rcu(old_conf);
+ kvfree_rcu_mightsleep(old_conf);
}
if (ns_max.susp_fen) {
diff --git a/drivers/block/drbd/drbd_vli.h b/drivers/block/drbd/drbd_vli.h
index 1ee81e3c2152..941c511cc4da 100644
--- a/drivers/block/drbd/drbd_vli.h
+++ b/drivers/block/drbd/drbd_vli.h
@@ -327,7 +327,7 @@ static inline int bitstream_get_bits(struct bitstream *bs, u64 *out, int bits)
*/
static inline int vli_encode_bits(struct bitstream *bs, u64 in)
{
- u64 code = code;
+ u64 code;
int bits = __vli_encode_bits(&code, in);
if (bits <= 0)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index f46738040d6b..4352a50fbb3f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -28,8 +28,8 @@
#include "drbd_protocol.h"
#include "drbd_req.h"
-static int make_ov_request(struct drbd_device *, int);
-static int make_resync_request(struct drbd_device *, int);
+static int make_ov_request(struct drbd_peer_device *, int);
+static int make_resync_request(struct drbd_peer_device *, int);
/* endio handlers:
* drbd_md_endio (defined here)
@@ -124,7 +124,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
* In case of a write error, send the neg ack anyways. */
if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
inc_unacked(device);
- drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
+ drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
}
spin_lock_irqsave(&device->resource->req_lock, flags);
@@ -276,7 +276,7 @@ void drbd_request_endio(struct bio *bio)
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
- __req_mod(req, what, &m);
+ __req_mod(req, what, NULL, &m);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
put_ldev(device);
@@ -363,7 +363,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
err = drbd_send_drequest_csum(peer_device, sector, size,
digest, digest_size,
P_CSUM_RS_REQUEST);
@@ -430,10 +430,10 @@ int w_resync_timer(struct drbd_work *w, int cancel)
switch (device->state.conn) {
case C_VERIFY_S:
- make_ov_request(device, cancel);
+ make_ov_request(first_peer_device(device), cancel);
break;
case C_SYNC_TARGET:
- make_resync_request(device, cancel);
+ make_resync_request(first_peer_device(device), cancel);
break;
}
@@ -493,8 +493,9 @@ struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
return fb;
}
-static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in)
+static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in)
{
+ struct drbd_device *device = peer_device->device;
struct disk_conf *dc;
unsigned int want; /* The number of sectors we want in-flight */
int req_sect; /* Number of sectors to request in this turn */
@@ -545,8 +546,9 @@ static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in)
return req_sect;
}
-static int drbd_rs_number_requests(struct drbd_device *device)
+static int drbd_rs_number_requests(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
int number, mxb;
@@ -556,7 +558,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
rcu_read_lock();
mxb = drbd_get_max_buffers(device) / 2;
if (rcu_dereference(device->rs_plan_s)->size) {
- number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9);
+ number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9);
device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
@@ -580,9 +582,9 @@ static int drbd_rs_number_requests(struct drbd_device *device)
return number;
}
-static int make_resync_request(struct drbd_device *const device, int cancel)
+static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel)
{
- struct drbd_peer_device *const peer_device = first_peer_device(device);
+ struct drbd_device *const device = peer_device->device;
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
unsigned long bit;
sector_t sector;
@@ -598,7 +600,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
if (device->rs_total == 0) {
/* empty resync? */
- drbd_resync_finished(device);
+ drbd_resync_finished(peer_device);
return 0;
}
@@ -618,7 +620,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
}
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
- number = drbd_rs_number_requests(device);
+ number = drbd_rs_number_requests(peer_device);
if (number <= 0)
goto requeue;
@@ -653,7 +655,7 @@ next_sector:
sector = BM_BIT_TO_SECT(bit);
- if (drbd_try_rs_begin_io(device, sector)) {
+ if (drbd_try_rs_begin_io(peer_device, sector)) {
device->bm_resync_fo = bit;
goto requeue;
}
@@ -729,13 +731,13 @@ next_sector:
} else {
int err;
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
err = drbd_send_drequest(peer_device,
size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
put_ldev(device);
return err;
}
@@ -760,8 +762,9 @@ next_sector:
return 0;
}
-static int make_ov_request(struct drbd_device *device, int cancel)
+static int make_ov_request(struct drbd_peer_device *peer_device, int cancel)
{
+ struct drbd_device *device = peer_device->device;
int number, i, size;
sector_t sector;
const sector_t capacity = get_capacity(device->vdisk);
@@ -770,7 +773,7 @@ static int make_ov_request(struct drbd_device *device, int cancel)
if (unlikely(cancel))
return 1;
- number = drbd_rs_number_requests(device);
+ number = drbd_rs_number_requests(peer_device);
sector = device->ov_position;
for (i = 0; i < number; i++) {
@@ -788,7 +791,7 @@ static int make_ov_request(struct drbd_device *device, int cancel)
size = BM_BLOCK_SIZE;
- if (drbd_try_rs_begin_io(device, sector)) {
+ if (drbd_try_rs_begin_io(peer_device, sector)) {
device->ov_position = sector;
goto requeue;
}
@@ -796,9 +799,9 @@ static int make_ov_request(struct drbd_device *device, int cancel)
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
return 0;
}
sector += BM_SECT_PER_BIT;
@@ -818,8 +821,8 @@ int w_ov_finished(struct drbd_work *w, int cancel)
container_of(w, struct drbd_device_work, w);
struct drbd_device *device = dw->device;
kfree(dw);
- ov_out_of_sync_print(device);
- drbd_resync_finished(device);
+ ov_out_of_sync_print(first_peer_device(device));
+ drbd_resync_finished(first_peer_device(device));
return 0;
}
@@ -831,7 +834,7 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
struct drbd_device *device = dw->device;
kfree(dw);
- drbd_resync_finished(device);
+ drbd_resync_finished(first_peer_device(device));
return 0;
}
@@ -846,9 +849,10 @@ static void ping_peer(struct drbd_device *device)
test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
}
-int drbd_resync_finished(struct drbd_device *device)
+int drbd_resync_finished(struct drbd_peer_device *peer_device)
{
- struct drbd_connection *connection = first_peer_device(device)->connection;
+ struct drbd_device *device = peer_device->device;
+ struct drbd_connection *connection = peer_device->connection;
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
@@ -1129,7 +1133,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(device->state.pdsk >= D_INCONSISTENT)) {
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
err = drbd_send_rs_deallocated(peer_device, peer_req);
else
@@ -1148,7 +1152,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
+ drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(device);
@@ -1199,12 +1203,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
}
if (eq) {
- drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
+ drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
} else {
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
@@ -1257,10 +1261,10 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(device);
+ inc_rs_pending(peer_device);
err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
if (err)
- dec_rs_pending(device);
+ dec_rs_pending(peer_device);
kfree(digest);
out:
@@ -1270,15 +1274,16 @@ out:
return err;
}
-void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
+ struct drbd_device *device = peer_device->device;
if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
device->ov_last_oos_size += size>>9;
} else {
device->ov_last_oos_start = sector;
device->ov_last_oos_size = size>>9;
}
- drbd_set_out_of_sync(device, sector, size);
+ drbd_set_out_of_sync(peer_device, sector, size);
}
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
@@ -1328,9 +1333,9 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
if (!eq)
- drbd_ov_out_of_sync_found(device, sector, size);
+ drbd_ov_out_of_sync_found(peer_device, sector, size);
else
- ov_out_of_sync_print(device);
+ ov_out_of_sync_print(peer_device);
err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
@@ -1341,14 +1346,14 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
/* let's advance progress step marks only for every other megabyte */
if ((device->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(device, device->ov_left);
+ drbd_advance_rs_marks(peer_device, device->ov_left);
stop_sector_reached = verify_can_do_stop_sector(device) &&
(sector + (size>>9)) >= device->ov_stop_sector;
if (device->ov_left == 0 || stop_sector_reached) {
- ov_out_of_sync_print(device);
- drbd_resync_finished(device);
+ ov_out_of_sync_print(peer_device);
+ drbd_resync_finished(peer_device);
}
return err;
@@ -1425,7 +1430,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- req_mod(req, SEND_CANCELED);
+ req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
@@ -1437,7 +1442,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
maybe_send_barrier(connection, req->epoch);
err = drbd_send_out_of_sync(peer_device, req);
- req_mod(req, OOS_HANDED_TO_NETWORK);
+ req_mod(req, OOS_HANDED_TO_NETWORK, peer_device);
return err;
}
@@ -1457,7 +1462,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- req_mod(req, SEND_CANCELED);
+ req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
@@ -1467,7 +1472,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
connection->send.current_epoch_writes++;
err = drbd_send_dblock(peer_device, req);
- req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
if (do_send_unplug && !err)
pd_send_unplug_remote(peer_device);
@@ -1490,7 +1495,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- req_mod(req, SEND_CANCELED);
+ req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
@@ -1502,7 +1507,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req);
- req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
if (do_send_unplug && !err)
pd_send_unplug_remote(peer_device);
@@ -1668,8 +1673,9 @@ void drbd_resync_after_changed(struct drbd_device *device)
} while (changed);
}
-void drbd_rs_controller_reset(struct drbd_device *device)
+void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
struct fifo_buffer *plan;
@@ -1891,10 +1897,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
- drbd_resync_finished(device);
+ drbd_resync_finished(peer_device);
}
- drbd_rs_controller_reset(device);
+ drbd_rs_controller_reset(peer_device);
/* ns.conn may already be != device->state.conn,
* we may have been paused in between, or become paused until
* the timer triggers.
@@ -1909,8 +1915,9 @@ out:
mutex_unlock(device->state_mutex);
}
-static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done)
+static void update_on_disk_bitmap(struct drbd_peer_device *peer_device, bool resync_done)
{
+ struct drbd_device *device = peer_device->device;
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
device->rs_last_bcast = jiffies;
@@ -1919,7 +1926,7 @@ static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done)
drbd_bm_write_lazy(device, 0);
if (resync_done && is_sync_state(device->state.conn))
- drbd_resync_finished(device);
+ drbd_resync_finished(peer_device);
drbd_bcast_event(device, &sib);
/* update timestamp, in case it took a while to write out stuff */
@@ -1945,6 +1952,7 @@ static void drbd_ldev_destroy(struct drbd_device *device)
static void go_diskless(struct drbd_device *device)
{
+ struct drbd_peer_device *peer_device = first_peer_device(device);
D_ASSERT(device, device->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
@@ -1970,7 +1978,7 @@ static void go_diskless(struct drbd_device *device)
* Any modifications would not be expected anymore, though.
*/
if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
- "detach", BM_LOCKED_TEST_ALLOWED)) {
+ "detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
if (test_bit(WAS_READ_ERROR, &device->flags)) {
drbd_md_set_flag(device, MDF_FULL_SYNC);
drbd_md_sync(device);
@@ -2017,7 +2025,7 @@ static void do_device_work(struct drbd_device *device, const unsigned long todo)
do_md_sync(device);
if (test_bit(RS_DONE, &todo) ||
test_bit(RS_PROGRESS, &todo))
- update_on_disk_bitmap(device, test_bit(RS_DONE, &todo));
+ update_on_disk_bitmap(first_peer_device(device), test_bit(RS_DONE, &todo));
if (test_bit(GO_DISKLESS, &todo))
go_diskless(device);
if (test_bit(DESTROY_DISK, &todo))
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 487840e3564d..cec2c20f5e59 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3108,7 +3108,7 @@ loop:
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
- if (ptr->length <= 0 || ptr->length >= MAX_LEN)
+ if (ptr->length <= 0 || ptr->length > MAX_LEN)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1518a6423279..bc31bb7072a2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -90,7 +90,7 @@ struct loop_cmd {
};
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
-#define LOOP_DEFAULT_HW_Q_DEPTH (128)
+#define LOOP_DEFAULT_HW_Q_DEPTH 128
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
@@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo,
return -EINVAL;
}
+ /* Avoid assigning overflow values */
+ if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
+ return -EOVERFLOW;
+
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
- /* loff_t vars have been assigned __u64 */
- if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
- return -EOVERFLOW;
-
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- /* suppress uevents while reconfiguring the device */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
-
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
}
+ /* suppress uevents while reconfiguring the device */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ /* enable and uncork uevent now that we are done */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
+
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
- error = 0;
-done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
- return error;
+ return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
@@ -1130,7 +1130,7 @@ out_putf:
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- goto done;
+ return error;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)
@@ -1792,9 +1792,15 @@ static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
{
- int ret = kstrtoint(s, 10, &hw_queue_depth);
+ int qd, ret;
- return (ret || (hw_queue_depth < 1)) ? -EINVAL : 0;
+ ret = kstrtoint(s, 0, &qd);
+ if (ret < 0)
+ return ret;
+ if (qd < 1)
+ return -EINVAL;
+ hw_queue_depth = qd;
+ return 0;
}
static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
@@ -1803,7 +1809,7 @@ static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
};
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
-MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 128");
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1853,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
+ struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
+ struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
+ const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
- if (cmd->blkcg_css)
- kthread_associate_blkcg(cmd->blkcg_css);
- if (cmd->memcg_css)
+ if (cmd_blkcg_css)
+ kthread_associate_blkcg(cmd_blkcg_css);
+ if (cmd_memcg_css)
old_memcg = set_active_memcg(
- mem_cgroup_from_css(cmd->memcg_css));
+ mem_cgroup_from_css(cmd_memcg_css));
+ /*
+ * do_req_filebacked() may call blk_mq_complete_request() synchronously
+ * or asynchronously if using aio. Hence, do not touch 'cmd' after
+ * do_req_filebacked() has returned unless we are sure that 'cmd' has
+ * not yet been completed.
+ */
ret = do_req_filebacked(lo, rq);
- if (cmd->blkcg_css)
+ if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
- if (cmd->memcg_css) {
+ if (cmd_memcg_css) {
set_active_memcg(old_memcg);
- css_put(cmd->memcg_css);
+ css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
- if (!cmd->use_aio || ret) {
+ if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 592cfa8b765a..65ecde3e2a5b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -325,6 +325,9 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (blk_validate_block_size(blksize))
return -EINVAL;
+ if (bytesize < 0)
+ return -EINVAL;
+
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
@@ -606,7 +609,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.len = htonl(size);
}
handle = nbd_cmd_handle(cmd);
- memcpy(request.handle, &handle, sizeof(handle));
+ request.cookie = cpu_to_be64(handle);
trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
@@ -618,7 +621,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
trace_nbd_header_sent(req, handle);
if (result < 0) {
if (was_interrupted(result)) {
- /* If we havne't sent anything we can just return BUSY,
+ /* If we haven't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
* completely done.
@@ -732,7 +735,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
u32 tag;
int ret = 0;
- memcpy(&handle, reply->handle, sizeof(handle));
+ handle = be64_to_cpu(reply->cookie);
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -1111,6 +1114,9 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct nbd_sock *nsock;
int err;
+ /* Arg will be cast to int, check it to avoid overflow */
+ if (arg > INT_MAX)
+ return -EINVAL;
sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
@@ -1660,7 +1666,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
- if (!dir) {
+ if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
@@ -1686,7 +1692,7 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
- if (!dbg_dir)
+ if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;
@@ -1799,7 +1805,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
@@ -1934,11 +1939,11 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
}
- if (!info->attrs[NBD_ATTR_SOCKETS]) {
+ if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
pr_err("must specify at least one socket\n");
return -EINVAL;
}
- if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
+ if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
pr_err("must specify a size in bytes for the device\n");
return -EINVAL;
}
@@ -2123,7 +2128,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- if (!info->attrs[NBD_ATTR_INDEX]) {
+ if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
pr_err("must specify an index to disconnect\n");
return -EINVAL;
}
@@ -2161,7 +2166,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- if (!info->attrs[NBD_ATTR_INDEX]) {
+ if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
pr_err("must specify a device to reconfigure\n");
return -EINVAL;
}
@@ -2325,6 +2330,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.resv_start_op = NBD_CMD_STATUS + 1,
.maxattr = NBD_ATTR_MAX,
+ .netnsok = 1,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
diff --git a/drivers/block/null_blk/Kconfig b/drivers/block/null_blk/Kconfig
index 6bf1f8ca20a2..ff23bb9346d0 100644
--- a/drivers/block/null_blk/Kconfig
+++ b/drivers/block/null_blk/Kconfig
@@ -9,4 +9,4 @@ config BLK_DEV_NULL_BLK
config BLK_DEV_NULL_BLK_FAULT_INJECTION
bool "Support fault injection for Null test block driver"
- depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION_CONFIGFS
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 7d28e3aa406c..b3fedafe301e 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -250,7 +250,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
static inline struct nullb_device *to_nullb_device(struct config_item *item)
{
- return item ? container_of(item, struct nullb_device, item) : NULL;
+ return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL;
}
static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
@@ -593,8 +593,29 @@ static const struct config_item_type nullb_device_type = {
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+
+static void nullb_add_fault_config(struct nullb_device *dev)
+{
+ fault_config_init(&dev->timeout_config, "timeout_inject");
+ fault_config_init(&dev->requeue_config, "requeue_inject");
+ fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject");
+
+ configfs_add_default_group(&dev->timeout_config.group, &dev->group);
+ configfs_add_default_group(&dev->requeue_config.group, &dev->group);
+ configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
+}
+
+#else
+
+static void nullb_add_fault_config(struct nullb_device *dev)
+{
+}
+
+#endif
+
static struct
-config_item *nullb_group_make_item(struct config_group *group, const char *name)
+config_group *nullb_group_make_group(struct config_group *group, const char *name)
{
struct nullb_device *dev;
@@ -605,9 +626,10 @@ config_item *nullb_group_make_item(struct config_group *group, const char *name)
if (!dev)
return ERR_PTR(-ENOMEM);
- config_item_init_type_name(&dev->item, name, &nullb_device_type);
+ config_group_init_type_name(&dev->group, name, &nullb_device_type);
+ nullb_add_fault_config(dev);
- return &dev->item;
+ return &dev->group;
}
static void
@@ -645,7 +667,7 @@ static struct configfs_attribute *nullb_group_attrs[] = {
};
static struct configfs_group_operations nullb_group_ops = {
- .make_item = nullb_group_make_item,
+ .make_group = nullb_group_make_group,
.drop_item = nullb_group_drop_item,
};
@@ -676,6 +698,13 @@ static struct nullb_device *null_alloc_dev(void)
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
+
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ dev->timeout_config.attr = null_timeout_attr;
+ dev->requeue_config.attr = null_requeue_attr;
+ dev->init_hctx_fault_config.attr = null_init_hctx_attr;
+#endif
+
INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
if (badblocks_init(&dev->badblocks, 0)) {
@@ -1030,8 +1059,8 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
if (!t_page)
return -ENOMEM;
- src = kmap_atomic(c_page->page);
- dst = kmap_atomic(t_page->page);
+ src = kmap_local_page(c_page->page);
+ dst = kmap_local_page(t_page->page);
for (i = 0; i < PAGE_SECTORS;
i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
@@ -1043,8 +1072,8 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
}
}
- kunmap_atomic(dst);
- kunmap_atomic(src);
+ kunmap_local(dst);
+ kunmap_local(src);
ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
null_free_page(ret);
@@ -1112,7 +1141,6 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
size_t temp, count = 0;
unsigned int offset;
struct nullb_page *t_page;
- void *dst, *src;
while (count < n) {
temp = min_t(size_t, nullb->dev->blocksize, n - count);
@@ -1126,11 +1154,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
if (!t_page)
return -ENOSPC;
- src = kmap_atomic(source);
- dst = kmap_atomic(t_page->page);
- memcpy(dst + offset, src + off + count, temp);
- kunmap_atomic(dst);
- kunmap_atomic(src);
+ memcpy_page(t_page->page, offset, source, off + count, temp);
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
@@ -1149,7 +1173,6 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
size_t temp, count = 0;
unsigned int offset;
struct nullb_page *t_page;
- void *dst, *src;
while (count < n) {
temp = min_t(size_t, nullb->dev->blocksize, n - count);
@@ -1158,16 +1181,11 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));
- dst = kmap_atomic(dest);
- if (!t_page) {
- memset(dst + off + count, 0, temp);
- goto next;
- }
- src = kmap_atomic(t_page->page);
- memcpy(dst + off + count, src + offset, temp);
- kunmap_atomic(src);
-next:
- kunmap_atomic(dst);
+ if (t_page)
+ memcpy_page(dest, off + count, t_page->page, offset,
+ temp);
+ else
+ zero_user(dest, off + count, temp);
count += temp;
sector += temp >> SECTOR_SHIFT;
@@ -1178,11 +1196,7 @@ next:
static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off)
{
- void *dst;
-
- dst = kmap_atomic(page);
- memset(dst + off, 0xFF, len);
- kunmap_atomic(dst);
+ memset_page(page, off, 0xff, len);
}
blk_status_t null_handle_discard(struct nullb_device *dev,
@@ -1413,8 +1427,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
- if (likely(!blk_should_fake_timeout(cmd->rq->q)))
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
@@ -1530,24 +1543,48 @@ static void null_submit_bio(struct bio *bio)
null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
}
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+
+static bool should_timeout_request(struct request *rq)
+{
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct nullb_device *dev = cmd->nq->dev;
+
+ return should_fail(&dev->timeout_config.attr, 1);
+}
+
+static bool should_requeue_request(struct request *rq)
+{
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct nullb_device *dev = cmd->nq->dev;
+
+ return should_fail(&dev->requeue_config.attr, 1);
+}
+
+static bool should_init_hctx_fail(struct nullb_device *dev)
+{
+ return should_fail(&dev->init_hctx_fault_config.attr, 1);
+}
+
+#else
+
static bool should_timeout_request(struct request *rq)
{
-#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
- if (g_timeout_str[0])
- return should_fail(&null_timeout_attr, 1);
-#endif
return false;
}
static bool should_requeue_request(struct request *rq)
{
-#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
- if (g_requeue_str[0])
- return should_fail(&null_requeue_attr, 1);
-#endif
return false;
}
+static bool should_init_hctx_fail(struct nullb_device *dev)
+{
+ return false;
+}
+
+#endif
+
static void null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
@@ -1658,12 +1695,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
}
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+ const struct blk_mq_queue_data *bd)
{
- struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct request *rq = bd->rq;
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_queue *nq = hctx->driver_data;
- sector_t nr_sectors = blk_rq_sectors(bd->rq);
- sector_t sector = blk_rq_pos(bd->rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ sector_t sector = blk_rq_pos(rq);
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -1672,14 +1710,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
- cmd->rq = bd->rq;
+ cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
- cmd->fake_timeout = should_timeout_request(bd->rq);
+ cmd->fake_timeout = should_timeout_request(rq) ||
+ blk_should_fake_timeout(rq->q);
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
- if (should_requeue_request(bd->rq)) {
+ if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
* driver driven requeue path
@@ -1687,22 +1726,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
nq->requeue_selection++;
if (nq->requeue_selection & 1)
return BLK_STS_RESOURCE;
- else {
- blk_mq_requeue_request(bd->rq, true);
- return BLK_STS_OK;
- }
+ blk_mq_requeue_request(rq, true);
+ return BLK_STS_OK;
}
if (is_poll) {
spin_lock(&nq->poll_lock);
- list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ list_add_tail(&rq->queuelist, &nq->poll_list);
spin_unlock(&nq->poll_lock);
return BLK_STS_OK;
}
if (cmd->fake_timeout)
return BLK_STS_OK;
- return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+ return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void cleanup_queue(struct nullb_queue *nq)
@@ -1744,10 +1781,8 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
struct nullb *nullb = hctx->queue->queuedata;
struct nullb_queue *nq;
-#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
- if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
+ if (should_init_hctx_fail(nullb->dev))
return -EFAULT;
-#endif
nq = &nullb->queues[hctx_idx];
hctx->driver_data = nq;
@@ -1965,6 +2000,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
static int null_validate_conf(struct nullb_device *dev)
{
+ if (dev->queue_mode == NULL_Q_RQ) {
+ pr_err("legacy IO path is no longer available\n");
+ return -EINVAL;
+ }
+
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
@@ -2067,9 +2107,6 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
- if (!null_setup_fault())
- goto out_cleanup_tags;
-
nullb->tag_set->timeout = 5 * HZ;
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
if (IS_ERR(nullb->disk)) {
@@ -2107,7 +2144,6 @@ static int null_add_dev(struct nullb_device *dev)
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
@@ -2123,8 +2159,7 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_physical_block_size(nullb->q, dev->blocksize);
if (!dev->max_sectors)
dev->max_sectors = queue_max_hw_sectors(nullb->q);
- dev->max_sectors = min_t(unsigned int, dev->max_sectors,
- BLK_DEF_MAX_SECTORS);
+ dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
if (dev->virt_boundary)
@@ -2132,10 +2167,10 @@ static int null_add_dev(struct nullb_device *dev)
null_config_discard(nullb);
- if (config_item_name(&dev->item)) {
+ if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
snprintf(nullb->disk_name, sizeof(nullb->disk_name),
- "%s", config_item_name(&dev->item));
+ "%s", config_item_name(&dev->group.cg_item));
} else {
sprintf(nullb->disk_name, "nullb%d", nullb->index);
}
@@ -2235,6 +2270,9 @@ static int __init null_init(void)
g_home_node = NUMA_NO_NODE;
}
+ if (!null_setup_fault())
+ return -EINVAL;
+
if (g_queue_mode == NULL_Q_RQ) {
pr_err("legacy IO path is no longer available\n");
return -EINVAL;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index eb5972c50be8..929f659dd255 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -69,7 +69,12 @@ enum {
struct nullb_device {
struct nullb *nullb;
- struct config_item item;
+ struct config_group group;
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ struct fault_config timeout_config;
+ struct fault_config requeue_config;
+ struct fault_config init_hctx_fault_config;
+#endif
struct radix_tree_root data; /* data stored in the disk */
struct radix_tree_root cache; /* disk cache data */
unsigned long flags; /* device flags */
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
deleted file mode 100644
index a295634597ba..000000000000
--- a/drivers/block/paride/Kconfig
+++ /dev/null
@@ -1,302 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# PARIDE configuration
-#
-# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
-# PARIDE must also be a module.
-# PARIDE only supports PC style parports. Tough for USB or other parports...
-
-comment "Parallel IDE high-level drivers"
- depends on PARIDE
-
-config PARIDE_PD
- tristate "Parallel port IDE disks"
- depends on PARIDE
- help
- This option enables the high-level driver for IDE-type disk devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port IDE driver, otherwise you should answer M to build
- it as a loadable module. The module will be called pd. You
- must also have at least one parallel port protocol driver in your
- system. Among the devices supported by this driver are the SyQuest
- EZ-135, EZ-230 and SparQ drives, the Avatar Shark and the backpack
- hard drives from MicroSolutions.
-
-config PARIDE_PCD
- tristate "Parallel port ATAPI CD-ROMs"
- depends on PARIDE
- select CDROM
- help
- This option enables the high-level driver for ATAPI CD-ROM devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI CD-ROM driver, otherwise you should answer M to
- build it as a loadable module. The module will be called pcd. You
- must also have at least one parallel port protocol driver in your
- system. Among the devices supported by this driver are the
- MicroSolutions backpack CD-ROM drives and the Freecom Power CD. If
- you have such a CD-ROM drive, you should also say Y or M to "ISO
- 9660 CD-ROM file system support" below, because that's the file
- system used on CD-ROMs.
-
-config PARIDE_PF
- tristate "Parallel port ATAPI disks"
- depends on PARIDE
- help
- This option enables the high-level driver for ATAPI disk devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI disk driver, otherwise you should answer M
- to build it as a loadable module. The module will be called pf.
- You must also have at least one parallel port protocol driver in
- your system. Among the devices supported by this driver are the
- MicroSolutions backpack PD/CD drive and the Imation Superdisk
- LS-120 drive.
-
-config PARIDE_PT
- tristate "Parallel port ATAPI tapes"
- depends on PARIDE
- help
- This option enables the high-level driver for ATAPI tape devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI disk driver, otherwise you should answer M
- to build it as a loadable module. The module will be called pt.
- You must also have at least one parallel port protocol driver in
- your system. Among the devices supported by this driver is the
- parallel port version of the HP 5GB drive.
-
-config PARIDE_PG
- tristate "Parallel port generic ATAPI devices"
- depends on PARIDE
- help
- This option enables a special high-level driver for generic ATAPI
- devices connected through a parallel port. The driver allows user
- programs, such as cdrtools, to send ATAPI commands directly to a
- device.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the parallel port generic ATAPI driver,
- otherwise you should answer M to build it as a loadable module. The
- module will be called pg.
-
- You must also have at least one parallel port protocol driver in
- your system.
-
- This driver implements an API loosely related to the generic SCSI
- driver. See <file:include/linux/pg.h>. for details.
-
- You can obtain the most recent version of cdrtools from
- <ftp://ftp.berlios.de/pub/cdrecord/>. Versions 1.6.1a3 and
- later fully support this driver.
-
-comment "Parallel IDE protocol modules"
- depends on PARIDE
-
-config PARIDE_ATEN
- tristate "ATEN EH-100 protocol"
- depends on PARIDE
- help
- This option enables support for the ATEN EH-100 parallel port IDE
- protocol. This protocol is used in some inexpensive low performance
- parallel port kits made in Hong Kong. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called aten. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_BPCK
- tristate "MicroSolutions backpack (Series 5) protocol"
- depends on PARIDE
- help
- This option enables support for the Micro Solutions BACKPACK
- parallel port Series 5 IDE protocol. (Most BACKPACK drives made
- before 1999 were Series 5) Series 5 drives will NOT always have the
- Series noted on the bottom of the drive. Series 6 drivers will.
-
- In other words, if your BACKPACK drive doesn't say "Series 6" on the
- bottom, enable this option.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the protocol driver, otherwise you should
- answer M to build it as a loadable module. The module will be
- called bpck. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_BPCK6
- tristate "MicroSolutions backpack (Series 6) protocol"
- depends on PARIDE && !64BIT
- help
- This option enables support for the Micro Solutions BACKPACK
- parallel port Series 6 IDE protocol. (Most BACKPACK drives made
- after 1999 were Series 6) Series 6 drives will have the Series noted
- on the bottom of the drive. Series 5 drivers don't always have it
- noted.
-
- In other words, if your BACKPACK drive says "Series 6" on the
- bottom, enable this option.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the protocol driver, otherwise you should
- answer M to build it as a loadable module. The module will be
- called bpck6. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_COMM
- tristate "DataStor Commuter protocol"
- depends on PARIDE
- help
- This option enables support for the Commuter parallel port IDE
- protocol from DataStor. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called comm. You must also have
- a high-level driver for the type of device that you want to support.
-
-config PARIDE_DSTR
- tristate "DataStor EP-2000 protocol"
- depends on PARIDE
- help
- This option enables support for the EP-2000 parallel port IDE
- protocol from DataStor. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called dstr. You must also have
- a high-level driver for the type of device that you want to support.
-
-config PARIDE_FIT2
- tristate "FIT TD-2000 protocol"
- depends on PARIDE
- help
- This option enables support for the TD-2000 parallel port IDE
- protocol from Fidelity International Technology. This is a simple
- (low speed) adapter that is used in some portable hard drives. If
- you chose to build PARIDE support into your kernel, you may answer Y
- here to build in the protocol driver, otherwise you should answer M
- to build it as a loadable module. The module will be called ktti.
- You must also have a high-level driver for the type of device that
- you want to support.
-
-config PARIDE_FIT3
- tristate "FIT TD-3000 protocol"
- depends on PARIDE
- help
- This option enables support for the TD-3000 parallel port IDE
- protocol from Fidelity International Technology. This protocol is
- used in newer models of their portable disk, CD-ROM and PD/CD
- devices. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called fit3. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_EPAT
- tristate "Shuttle EPAT/EPEZ protocol"
- depends on PARIDE
- help
- This option enables support for the EPAT parallel port IDE protocol.
- EPAT is a parallel port IDE adapter manufactured by Shuttle
- Technology and widely used in devices from major vendors such as
- Hewlett-Packard, SyQuest, Imation and Avatar. If you chose to build
- PARIDE support into your kernel, you may answer Y here to build in
- the protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called epat. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_EPATC8
- bool "Support c7/c8 chips"
- depends on PARIDE_EPAT
- help
- This option enables support for the newer Shuttle EP1284 (aka c7 and
- c8) chip. You need this if you are using any recent Imation SuperDisk
- (LS-120) drive.
-
-config PARIDE_EPIA
- tristate "Shuttle EPIA protocol"
- depends on PARIDE
- help
- This option enables support for the (obsolete) EPIA parallel port
- IDE protocol from Shuttle Technology. This adapter can still be
- found in some no-name kits. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called epia. You must also have a
- high-level driver for the type of device that you want to support.
-
-config PARIDE_FRIQ
- tristate "Freecom IQ ASIC-2 protocol"
- depends on PARIDE
- help
- This option enables support for version 2 of the Freecom IQ parallel
- port IDE adapter. This adapter is used by the Maxell Superdisk
- drive. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called friq. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_FRPW
- tristate "FreeCom power protocol"
- depends on PARIDE
- help
- This option enables support for the Freecom power parallel port IDE
- protocol. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called frpw. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_KBIC
- tristate "KingByte KBIC-951A/971A protocols"
- depends on PARIDE
- help
- This option enables support for the KBIC-951A and KBIC-971A parallel
- port IDE protocols from KingByte Information Corp. KingByte's
- adapters appear in many no-name portable disk and CD-ROM products,
- especially in Europe. If you chose to build PARIDE support into your
- kernel, you may answer Y here to build in the protocol driver,
- otherwise you should answer M to build it as a loadable module. The
- module will be called kbic. You must also have a high-level driver
- for the type of device that you want to support.
-
-config PARIDE_KTTI
- tristate "KT PHd protocol"
- depends on PARIDE
- help
- This option enables support for the "PHd" parallel port IDE protocol
- from KT Technology. This is a simple (low speed) adapter that is
- used in some 2.5" portable hard drives. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called ktti. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_ON20
- tristate "OnSpec 90c20 protocol"
- depends on PARIDE
- help
- This option enables support for the (obsolete) 90c20 parallel port
- IDE protocol from OnSpec (often marketed under the ValuStore brand
- name). If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will
- be called on20. You must also have a high-level driver for the
- type of device that you want to support.
-
-config PARIDE_ON26
- tristate "OnSpec 90c26 protocol"
- depends on PARIDE
- help
- This option enables support for the 90c26 parallel port IDE protocol
- from OnSpec Electronics (often marketed under the ValuStore brand
- name). If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called on26. You must also have a high-level driver for the type
- of device that you want to support.
-
-#
diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile
deleted file mode 100644
index cf1742a8475e..000000000000
--- a/drivers/block/paride/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Parallel port IDE device drivers.
-#
-# 7 October 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
-# Rewritten to use lists instead of if-statements.
-#
-
-obj-$(CONFIG_PARIDE) += paride.o
-obj-$(CONFIG_PARIDE_ATEN) += aten.o
-obj-$(CONFIG_PARIDE_BPCK) += bpck.o
-obj-$(CONFIG_PARIDE_COMM) += comm.o
-obj-$(CONFIG_PARIDE_DSTR) += dstr.o
-obj-$(CONFIG_PARIDE_KBIC) += kbic.o
-obj-$(CONFIG_PARIDE_EPAT) += epat.o
-obj-$(CONFIG_PARIDE_EPIA) += epia.o
-obj-$(CONFIG_PARIDE_FRPW) += frpw.o
-obj-$(CONFIG_PARIDE_FRIQ) += friq.o
-obj-$(CONFIG_PARIDE_FIT2) += fit2.o
-obj-$(CONFIG_PARIDE_FIT3) += fit3.o
-obj-$(CONFIG_PARIDE_ON20) += on20.o
-obj-$(CONFIG_PARIDE_ON26) += on26.o
-obj-$(CONFIG_PARIDE_KTTI) += ktti.o
-obj-$(CONFIG_PARIDE_BPCK6) += bpck6.o
-obj-$(CONFIG_PARIDE_PD) += pd.o
-obj-$(CONFIG_PARIDE_PCD) += pcd.o
-obj-$(CONFIG_PARIDE_PF) += pf.o
-obj-$(CONFIG_PARIDE_PT) += pt.o
-obj-$(CONFIG_PARIDE_PG) += pg.o
diff --git a/drivers/block/paride/Transition-notes b/drivers/block/paride/Transition-notes
deleted file mode 100644
index 70374907c020..000000000000
--- a/drivers/block/paride/Transition-notes
+++ /dev/null
@@ -1,128 +0,0 @@
-Lemma 1:
- If ps_tq is scheduled, ps_tq_active is 1. ps_tq_int() can be called
- only when ps_tq_active is 1.
-Proof: All assignments to ps_tq_active and all scheduling of ps_tq happen
- under ps_spinlock. There are three places where that can happen:
- one in ps_set_intr() (A) and two in ps_tq_int() (B and C).
- Consider the sequnce of these events. A can not be preceded by
- anything except B, since it is under if (!ps_tq_active) under
- ps_spinlock. C is always preceded by B, since we can't reach it
- other than through B and we don't drop ps_spinlock between them.
- IOW, the sequence is A?(BA|BC|B)*. OTOH, number of B can not exceed
- the sum of numbers of A and C, since each call of ps_tq_int() is
- the result of ps_tq execution. Therefore, the sequence starts with
- A and each B is preceded by either A or C. Moments when we enter
- ps_tq_int() are sandwiched between {A,C} and B in that sequence,
- since at any time number of B can not exceed the number of these
- moments which, in turn, can not exceed the number of A and C.
- In other words, the sequence of events is (A or C set ps_tq_active to
- 1 and schedule ps_tq, ps_tq is executed, ps_tq_int() is entered,
- B resets ps_tq_active)*.
-
-
-consider the following area:
- * in do_pd_request1(): to calls of pi_do_claimed() and return in
- case when pd_req is NULL.
- * in next_request(): to call of do_pd_request1()
- * in do_pd_read(): to call of ps_set_intr()
- * in do_pd_read_start(): to calls of pi_do_claimed(), next_request()
-and ps_set_intr()
- * in do_pd_read_drq(): to calls of pi_do_claimed() and next_request()
- * in do_pd_write(): to call of ps_set_intr()
- * in do_pd_write_start(): to calls of pi_do_claimed(), next_request()
-and ps_set_intr()
- * in do_pd_write_done(): to calls of pi_do_claimed() and next_request()
- * in ps_set_intr(): to check for ps_tq_active and to scheduling
- ps_tq if ps_tq_active was 0.
- * in ps_tq_int(): from the moment when we get ps_spinlock() to the
- return, call of con() or scheduling ps_tq.
- * in pi_schedule_claimed() when called from pi_do_claimed() called from
- pd.c, everything until returning 1 or setting or setting ->claim_cont
- on the path that returns 0
- * in pi_do_claimed() when called from pd.c, everything until the call
- of pi_do_claimed() plus the everything until the call of cont() if
- pi_do_claimed() has returned 1.
- * in pi_wake_up() called for PIA that belongs to pd.c, everything from
- the moment when pi_spinlock has been acquired.
-
-Lemma 2:
- 1) at any time at most one thread of execution can be in that area or
- be preempted there.
- 2) When there is such a thread, pd_busy is set or pd_lock is held by
- that thread.
- 3) When there is such a thread, ps_tq_active is 0 or ps_spinlock is
- held by that thread.
- 4) When there is such a thread, all PIA belonging to pd.c have NULL
- ->claim_cont or pi_spinlock is held by thread in question.
-
-Proof: consider the first moment when the above is not true.
-
-(1) can become not true if some thread enters that area while another is there.
- a) do_pd_request1() can be called from next_request() or do_pd_request()
- In the first case the thread was already in the area. In the second,
- the thread was holding pd_lock and found pd_busy not set, which would
- mean that (2) was already not true.
- b) ps_set_intr() and pi_schedule_claimed() can be called only from the
- area.
- c) pi_do_claimed() is called by pd.c only from the area.
- d) ps_tq_int() can enter the area only when the thread is holding
- ps_spinlock and ps_tq_active is 1 (due to Lemma 1). It means that
- (3) was already not true.
- e) do_pd_{read,write}* could be called only from the area. The only
- case that needs consideration is call from pi_wake_up() and there
- we would have to be called for the PIA that got ->claimed_cont
- from pd.c. That could happen only if pi_do_claimed() had been
- called from pd.c for that PIA, which happens only for PIA belonging
- to pd.c.
- f) pi_wake_up() can enter the area only when the thread is holding
- pi_spinlock and ->claimed_cont is non-NULL for PIA belonging to
- pd.c. It means that (4) was already not true.
-
-(2) can become not true only when pd_lock is released by the thread in question.
- Indeed, pd_busy is reset only in the area and thread that resets
- it is holding pd_lock. The only place within the area where we
- release pd_lock is in pd_next_buf() (called from within the area).
- But that code does not reset pd_busy, so pd_busy would have to be
- 0 when pd_next_buf() had acquired pd_lock. If it become 0 while
- we were acquiring the lock, (1) would be already false, since
- the thread that had reset it would be in the area simulateously.
- If it was 0 before we tried to acquire pd_lock, (2) would be
- already false.
-
-For similar reasons, (3) can become not true only when ps_spinlock is released
-by the thread in question. However, all such places within the area are right
-after resetting ps_tq_active to 0.
-
-(4) is done the same way - all places where we release pi_spinlock within
-the area are either after resetting ->claimed_cont to NULL while holding
-pi_spinlock, or after not tocuhing ->claimed_cont since acquiring pi_spinlock
-also in the area. The only place where ->claimed_cont is made non-NULL is
-in the area, under pi_spinlock and we do not release it until after leaving
-the area.
-
-QED.
-
-
-Corollary 1: ps_tq_active can be killed. Indeed, the only place where we
-check its value is in ps_set_intr() and if it had been non-zero at that
-point, we would have violated either (2.1) (if it was set while ps_set_intr()
-was acquiring ps_spinlock) or (2.3) (if it was set when we started to
-acquire ps_spinlock).
-
-Corollary 2: ps_spinlock can be killed. Indeed, Lemma 1 and Lemma 2 show
-that the only possible contention is between scheduling ps_tq followed by
-immediate release of spinlock and beginning of execution of ps_tq on
-another CPU.
-
-Corollary 3: assignment to pd_busy in do_pd_read_start() and do_pd_write_start()
-can be killed. Indeed, we are not holding pd_lock and thus pd_busy is already
-1 here.
-
-Corollary 4: in ps_tq_int() uses of con can be replaced with uses of
-ps_continuation, since the latter is changed only from the area.
-We don't need to reset it to NULL, since we are guaranteed that there
-will be a call of ps_set_intr() before we look at ps_continuation again.
-We can remove the check for ps_continuation being NULL for the same
-reason - the value is guaranteed to be set by the last ps_set_intr() and
-we never pass it NULL. Assignements in the beginning of ps_set_intr()
-can be taken to callers as long as they remain within the area.
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
deleted file mode 100644
index ec64e7f5d1ce..000000000000
--- a/drivers/block/paride/bpck6.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- backpack.c (c) 2001 Micro Solutions Inc.
- Released under the terms of the GNU General Public license
-
- backpack.c is a low-level protocol driver for the Micro Solutions
- "BACKPACK" parallel port IDE adapter
- (Works on Series 6 drives)
-
- Written by: Ken Hahn (linux-dev@micro-solutions.com)
- Clive Turvey (linux-dev@micro-solutions.com)
-
-*/
-
-/*
- This is Ken's linux wrapper for the PPC library
- Version 1.0.0 is the backpack driver for which source is not available
- Version 2.0.0 is the first to have source released
- Version 2.0.1 is the "Cox-ified" source code
- Version 2.0.2 - fixed version string usage, and made ppc functions static
-*/
-
-
-#define BACKPACK_VERSION "2.0.2"
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <linux/parport.h>
-
-#include "ppc6lnx.c"
-#include "paride.h"
-
-/* PARAMETERS */
-static bool verbose; /* set this to 1 to see debugging messages and whatnot */
-
-
-#define PPCSTRUCT(pi) ((Interface *)(pi->private))
-
-/****************************************************************/
-/*
- ATAPI CDROM DRIVE REGISTERS
-*/
-#define ATAPI_DATA 0 /* data port */
-#define ATAPI_ERROR 1 /* error register (read) */
-#define ATAPI_FEATURES 1 /* feature register (write) */
-#define ATAPI_INT_REASON 2 /* interrupt reason register */
-#define ATAPI_COUNT_LOW 4 /* byte count register (low) */
-#define ATAPI_COUNT_HIGH 5 /* byte count register (high) */
-#define ATAPI_DRIVE_SEL 6 /* drive select register */
-#define ATAPI_STATUS 7 /* status port (read) */
-#define ATAPI_COMMAND 7 /* command port (write) */
-#define ATAPI_ALT_STATUS 0x0e /* alternate status reg (read) */
-#define ATAPI_DEVICE_CONTROL 0x0e /* device control (write) */
-/****************************************************************/
-
-static int bpck6_read_regr(PIA *pi, int cont, int reg)
-{
- unsigned int out;
-
- /* check for bad settings */
- if (reg<0 || reg>7 || cont<0 || cont>2)
- {
- return(-1);
- }
- out=ppc6_rd_port(PPCSTRUCT(pi),cont?reg|8:reg);
- return(out);
-}
-
-static void bpck6_write_regr(PIA *pi, int cont, int reg, int val)
-{
- /* check for bad settings */
- if (reg>=0 && reg<=7 && cont>=0 && cont<=1)
- {
- ppc6_wr_port(PPCSTRUCT(pi),cont?reg|8:reg,(u8)val);
- }
-}
-
-static void bpck6_write_block( PIA *pi, char * buf, int len )
-{
- ppc6_wr_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1);
-}
-
-static void bpck6_read_block( PIA *pi, char * buf, int len )
-{
- ppc6_rd_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1);
-}
-
-static void bpck6_connect ( PIA *pi )
-{
- if(verbose)
- {
- printk(KERN_DEBUG "connect\n");
- }
-
- if(pi->mode >=2)
- {
- PPCSTRUCT(pi)->mode=4+pi->mode-2;
- }
- else if(pi->mode==1)
- {
- PPCSTRUCT(pi)->mode=3;
- }
- else
- {
- PPCSTRUCT(pi)->mode=1;
- }
-
- ppc6_open(PPCSTRUCT(pi));
- ppc6_wr_extout(PPCSTRUCT(pi),0x3);
-}
-
-static void bpck6_disconnect ( PIA *pi )
-{
- if(verbose)
- {
- printk("disconnect\n");
- }
- ppc6_wr_extout(PPCSTRUCT(pi),0x0);
- ppc6_close(PPCSTRUCT(pi));
-}
-
-static int bpck6_test_port ( PIA *pi ) /* check for 8-bit port */
-{
- if(verbose)
- {
- printk(KERN_DEBUG "PARPORT indicates modes=%x for lp=0x%lx\n",
- ((struct pardevice*)(pi->pardev))->port->modes,
- ((struct pardevice *)(pi->pardev))->port->base);
- }
-
- /*copy over duplicate stuff.. initialize state info*/
- PPCSTRUCT(pi)->ppc_id=pi->unit;
- PPCSTRUCT(pi)->lpt_addr=pi->port;
-
- /* look at the parport device to see if what modes we can use */
- if(((struct pardevice *)(pi->pardev))->port->modes &
- (PARPORT_MODE_EPP)
- )
- {
- return 5; /* Can do EPP*/
- }
- else if(((struct pardevice *)(pi->pardev))->port->modes &
- (PARPORT_MODE_TRISTATE)
- )
- {
- return 2;
- }
- else /*Just flat SPP*/
- {
- return 1;
- }
-}
-
-static int bpck6_probe_unit ( PIA *pi )
-{
- int out;
-
- if(verbose)
- {
- printk(KERN_DEBUG "PROBE UNIT %x on port:%x\n",pi->unit,pi->port);
- }
-
- /*SET PPC UNIT NUMBER*/
- PPCSTRUCT(pi)->ppc_id=pi->unit;
-
- /*LOWER DOWN TO UNIDIRECTIONAL*/
- PPCSTRUCT(pi)->mode=1;
-
- out=ppc6_open(PPCSTRUCT(pi));
-
- if(verbose)
- {
- printk(KERN_DEBUG "ppc_open returned %2x\n",out);
- }
-
- if(out)
- {
- ppc6_close(PPCSTRUCT(pi));
- if(verbose)
- {
- printk(KERN_DEBUG "leaving probe\n");
- }
- return(1);
- }
- else
- {
- if(verbose)
- {
- printk(KERN_DEBUG "Failed open\n");
- }
- return(0);
- }
-}
-
-static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose )
-{
- char *mode_string[5]=
- {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
-
- printk("%s: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n",pi->device);
- printk("%s: Copyright 2001 by Micro Solutions, Inc., DeKalb IL.\n",pi->device);
- printk("%s: BACKPACK %s, Micro Solutions BACKPACK Drive at 0x%x\n",
- pi->device,BACKPACK_VERSION,pi->port);
- printk("%s: Unit: %d Mode:%d (%s) Delay %d\n",pi->device,
- pi->unit,pi->mode,mode_string[pi->mode],pi->delay);
-}
-
-static int bpck6_init_proto(PIA *pi)
-{
- Interface *p = kzalloc(sizeof(Interface), GFP_KERNEL);
-
- if (p) {
- pi->private = (unsigned long)p;
- return 0;
- }
-
- printk(KERN_ERR "%s: ERROR COULDN'T ALLOCATE MEMORY\n", pi->device);
- return -1;
-}
-
-static void bpck6_release_proto(PIA *pi)
-{
- kfree((void *)(pi->private));
-}
-
-static struct pi_protocol bpck6 = {
- .owner = THIS_MODULE,
- .name = "bpck6",
- .max_mode = 5,
- .epp_first = 2, /* 2-5 use epp (need 8 ports) */
- .max_units = 255,
- .write_regr = bpck6_write_regr,
- .read_regr = bpck6_read_regr,
- .write_block = bpck6_write_block,
- .read_block = bpck6_read_block,
- .connect = bpck6_connect,
- .disconnect = bpck6_disconnect,
- .test_port = bpck6_test_port,
- .probe_unit = bpck6_probe_unit,
- .log_adapter = bpck6_log_adapter,
- .init_proto = bpck6_init_proto,
- .release_proto = bpck6_release_proto,
-};
-
-static int __init bpck6_init(void)
-{
- printk(KERN_INFO "bpck6: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n");
- printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
- if(verbose)
- printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
- return paride_register(&bpck6);
-}
-
-static void __exit bpck6_exit(void)
-{
- paride_unregister(&bpck6);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Micro Solutions Inc.");
-MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE");
-module_param(verbose, bool, 0644);
-module_init(bpck6_init)
-module_exit(bpck6_exit)
diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd
deleted file mode 100644
index 6d0d802479ea..000000000000
--- a/drivers/block/paride/mkd
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# mkd -- a script to create the device special files for the PARIDE subsystem
-#
-# block devices: pd (45), pcd (46), pf (47)
-# character devices: pt (96), pg (97)
-#
-function mkdev {
- mknod $1 $2 $3 $4 ; chmod 0660 $1 ; chown root:disk $1
-}
-#
-function pd {
- D=$( printf \\$( printf "x%03x" $[ $1 + 97 ] ) )
- mkdev pd$D b 45 $[ $1 * 16 ]
- for P in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
- do mkdev pd$D$P b 45 $[ $1 * 16 + $P ]
- done
-}
-#
-cd /dev
-#
-for u in 0 1 2 3 ; do pd $u ; done
-for u in 0 1 2 3 ; do mkdev pcd$u b 46 $u ; done
-for u in 0 1 2 3 ; do mkdev pf$u b 47 $u ; done
-for u in 0 1 2 3 ; do mkdev pt$u c 96 $u ; done
-for u in 0 1 2 3 ; do mkdev npt$u c 96 $[ $u + 128 ] ; done
-for u in 0 1 2 3 ; do mkdev pg$u c 97 $u ; done
-#
-# end of mkd
-
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
deleted file mode 100644
index 0e287993b778..000000000000
--- a/drivers/block/paride/paride.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- paride.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the base module for the family of device drivers
- that support parallel port IDE devices.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.03 Use spinlocks
- 1.02 GRG 1998.05.05 init_proto, release_proto, ktti
- 1.03 GRG 1998.08.15 eliminate compiler warning
- 1.04 GRG 1998.11.28 added support for FRIQ
- 1.05 TMW 2000.06.06 use parport_find_number instead of
- parport_enumerate
- 1.06 TMW 2001.03.26 more sane parport-or-not resource management
-*/
-
-#define PI_VERSION "1.06"
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/sched.h> /* TASK_* */
-#include <linux/parport.h>
-#include <linux/slab.h>
-
-#include "paride.h"
-
-MODULE_LICENSE("GPL");
-
-#define MAX_PROTOS 32
-
-static struct pi_protocol *protocols[MAX_PROTOS];
-
-static DEFINE_SPINLOCK(pi_spinlock);
-
-void pi_write_regr(PIA * pi, int cont, int regr, int val)
-{
- pi->proto->write_regr(pi, cont, regr, val);
-}
-
-EXPORT_SYMBOL(pi_write_regr);
-
-int pi_read_regr(PIA * pi, int cont, int regr)
-{
- return pi->proto->read_regr(pi, cont, regr);
-}
-
-EXPORT_SYMBOL(pi_read_regr);
-
-void pi_write_block(PIA * pi, char *buf, int count)
-{
- pi->proto->write_block(pi, buf, count);
-}
-
-EXPORT_SYMBOL(pi_write_block);
-
-void pi_read_block(PIA * pi, char *buf, int count)
-{
- pi->proto->read_block(pi, buf, count);
-}
-
-EXPORT_SYMBOL(pi_read_block);
-
-static void pi_wake_up(void *p)
-{
- PIA *pi = (PIA *) p;
- unsigned long flags;
- void (*cont) (void) = NULL;
-
- spin_lock_irqsave(&pi_spinlock, flags);
-
- if (pi->claim_cont && !parport_claim(pi->pardev)) {
- cont = pi->claim_cont;
- pi->claim_cont = NULL;
- pi->claimed = 1;
- }
-
- spin_unlock_irqrestore(&pi_spinlock, flags);
-
- wake_up(&(pi->parq));
-
- if (cont)
- cont();
-}
-
-int pi_schedule_claimed(PIA * pi, void (*cont) (void))
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pi_spinlock, flags);
- if (pi->pardev && parport_claim(pi->pardev)) {
- pi->claim_cont = cont;
- spin_unlock_irqrestore(&pi_spinlock, flags);
- return 0;
- }
- pi->claimed = 1;
- spin_unlock_irqrestore(&pi_spinlock, flags);
- return 1;
-}
-EXPORT_SYMBOL(pi_schedule_claimed);
-
-void pi_do_claimed(PIA * pi, void (*cont) (void))
-{
- if (pi_schedule_claimed(pi, cont))
- cont();
-}
-
-EXPORT_SYMBOL(pi_do_claimed);
-
-static void pi_claim(PIA * pi)
-{
- if (pi->claimed)
- return;
- pi->claimed = 1;
- if (pi->pardev)
- wait_event(pi->parq,
- !parport_claim((struct pardevice *) pi->pardev));
-}
-
-static void pi_unclaim(PIA * pi)
-{
- pi->claimed = 0;
- if (pi->pardev)
- parport_release((struct pardevice *) (pi->pardev));
-}
-
-void pi_connect(PIA * pi)
-{
- pi_claim(pi);
- pi->proto->connect(pi);
-}
-
-EXPORT_SYMBOL(pi_connect);
-
-void pi_disconnect(PIA * pi)
-{
- pi->proto->disconnect(pi);
- pi_unclaim(pi);
-}
-
-EXPORT_SYMBOL(pi_disconnect);
-
-static void pi_unregister_parport(PIA * pi)
-{
- if (pi->pardev) {
- parport_unregister_device((struct pardevice *) (pi->pardev));
- pi->pardev = NULL;
- }
-}
-
-void pi_release(PIA * pi)
-{
- pi_unregister_parport(pi);
- if (pi->proto->release_proto)
- pi->proto->release_proto(pi);
- module_put(pi->proto->owner);
-}
-
-EXPORT_SYMBOL(pi_release);
-
-static int default_test_proto(PIA * pi, char *scratch, int verbose)
-{
- int j, k;
- int e[2] = { 0, 0 };
-
- pi->proto->connect(pi);
-
- for (j = 0; j < 2; j++) {
- pi_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
- for (k = 0; k < 256; k++) {
- pi_write_regr(pi, 0, 2, k ^ 0xaa);
- pi_write_regr(pi, 0, 3, k ^ 0x55);
- if (pi_read_regr(pi, 0, 2) != (k ^ 0xaa))
- e[j]++;
- }
- }
- pi->proto->disconnect(pi);
-
- if (verbose)
- printk("%s: %s: port 0x%x, mode %d, test=(%d,%d)\n",
- pi->device, pi->proto->name, pi->port,
- pi->mode, e[0], e[1]);
-
- return (e[0] && e[1]); /* not here if both > 0 */
-}
-
-static int pi_test_proto(PIA * pi, char *scratch, int verbose)
-{
- int res;
-
- pi_claim(pi);
- if (pi->proto->test_proto)
- res = pi->proto->test_proto(pi, scratch, verbose);
- else
- res = default_test_proto(pi, scratch, verbose);
- pi_unclaim(pi);
-
- return res;
-}
-
-int paride_register(PIP * pr)
-{
- int k;
-
- for (k = 0; k < MAX_PROTOS; k++)
- if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
- printk("paride: %s protocol already registered\n",
- pr->name);
- return -1;
- }
- k = 0;
- while ((k < MAX_PROTOS) && (protocols[k]))
- k++;
- if (k == MAX_PROTOS) {
- printk("paride: protocol table full\n");
- return -1;
- }
- protocols[k] = pr;
- pr->index = k;
- printk("paride: %s registered as protocol %d\n", pr->name, k);
- return 0;
-}
-
-EXPORT_SYMBOL(paride_register);
-
-void paride_unregister(PIP * pr)
-{
- if (!pr)
- return;
- if (protocols[pr->index] != pr) {
- printk("paride: %s not registered\n", pr->name);
- return;
- }
- protocols[pr->index] = NULL;
-}
-
-EXPORT_SYMBOL(paride_unregister);
-
-static int pi_register_parport(PIA *pi, int verbose, int unit)
-{
- struct parport *port;
- struct pardev_cb par_cb;
-
- port = parport_find_base(pi->port);
- if (!port)
- return 0;
- memset(&par_cb, 0, sizeof(par_cb));
- par_cb.wakeup = pi_wake_up;
- par_cb.private = (void *)pi;
- pi->pardev = parport_register_dev_model(port, pi->device, &par_cb,
- unit);
- parport_put_port(port);
- if (!pi->pardev)
- return 0;
-
- init_waitqueue_head(&pi->parq);
-
- if (verbose)
- printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
-
- pi->parname = (char *) port->name;
-
- return 1;
-}
-
-static int pi_probe_mode(PIA * pi, int max, char *scratch, int verbose)
-{
- int best, range;
-
- if (pi->mode != -1) {
- if (pi->mode >= max)
- return 0;
- range = 3;
- if (pi->mode >= pi->proto->epp_first)
- range = 8;
- if ((range == 8) && (pi->port % 8))
- return 0;
- pi->reserved = range;
- return (!pi_test_proto(pi, scratch, verbose));
- }
- best = -1;
- for (pi->mode = 0; pi->mode < max; pi->mode++) {
- range = 3;
- if (pi->mode >= pi->proto->epp_first)
- range = 8;
- if ((range == 8) && (pi->port % 8))
- break;
- pi->reserved = range;
- if (!pi_test_proto(pi, scratch, verbose))
- best = pi->mode;
- }
- pi->mode = best;
- return (best > -1);
-}
-
-static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
-{
- int max, s, e;
-
- s = unit;
- e = s + 1;
-
- if (s == -1) {
- s = 0;
- e = pi->proto->max_units;
- }
-
- if (!pi_register_parport(pi, verbose, s))
- return 0;
-
- if (pi->proto->test_port) {
- pi_claim(pi);
- max = pi->proto->test_port(pi);
- pi_unclaim(pi);
- } else
- max = pi->proto->max_mode;
-
- if (pi->proto->probe_unit) {
- pi_claim(pi);
- for (pi->unit = s; pi->unit < e; pi->unit++)
- if (pi->proto->probe_unit(pi)) {
- pi_unclaim(pi);
- if (pi_probe_mode(pi, max, scratch, verbose))
- return 1;
- pi_unregister_parport(pi);
- return 0;
- }
- pi_unclaim(pi);
- pi_unregister_parport(pi);
- return 0;
- }
-
- if (!pi_probe_mode(pi, max, scratch, verbose)) {
- pi_unregister_parport(pi);
- return 0;
- }
- return 1;
-
-}
-
-int pi_init(PIA * pi, int autoprobe, int port, int mode,
- int unit, int protocol, int delay, char *scratch,
- int devtype, int verbose, char *device)
-{
- int p, k, s, e;
- int lpts[7] = { 0x3bc, 0x378, 0x278, 0x268, 0x27c, 0x26c, 0 };
-
- s = protocol;
- e = s + 1;
-
- if (!protocols[0])
- request_module("paride_protocol");
-
- if (autoprobe) {
- s = 0;
- e = MAX_PROTOS;
- } else if ((s < 0) || (s >= MAX_PROTOS) || (port <= 0) ||
- (!protocols[s]) || (unit < 0) ||
- (unit >= protocols[s]->max_units)) {
- printk("%s: Invalid parameters\n", device);
- return 0;
- }
-
- for (p = s; p < e; p++) {
- struct pi_protocol *proto = protocols[p];
- if (!proto)
- continue;
- /* still racy */
- if (!try_module_get(proto->owner))
- continue;
- pi->proto = proto;
- pi->private = 0;
- if (proto->init_proto && proto->init_proto(pi) < 0) {
- pi->proto = NULL;
- module_put(proto->owner);
- continue;
- }
- if (delay == -1)
- pi->delay = pi->proto->default_delay;
- else
- pi->delay = delay;
- pi->devtype = devtype;
- pi->device = device;
-
- pi->parname = NULL;
- pi->pardev = NULL;
- init_waitqueue_head(&pi->parq);
- pi->claimed = 0;
- pi->claim_cont = NULL;
-
- pi->mode = mode;
- if (port != -1) {
- pi->port = port;
- if (pi_probe_unit(pi, unit, scratch, verbose))
- break;
- pi->port = 0;
- } else {
- k = 0;
- while ((pi->port = lpts[k++]))
- if (pi_probe_unit
- (pi, unit, scratch, verbose))
- break;
- if (pi->port)
- break;
- }
- if (pi->proto->release_proto)
- pi->proto->release_proto(pi);
- module_put(proto->owner);
- }
-
- if (!pi->port) {
- if (autoprobe)
- printk("%s: Autoprobe failed\n", device);
- else
- printk("%s: Adapter not found\n", device);
- return 0;
- }
-
- if (pi->parname)
- printk("%s: Sharing %s at 0x%x\n", pi->device,
- pi->parname, pi->port);
-
- pi->proto->log_adapter(pi, scratch, verbose);
-
- return 1;
-}
-
-EXPORT_SYMBOL(pi_init);
-
-static int pi_probe(struct pardevice *par_dev)
-{
- struct device_driver *drv = par_dev->dev.driver;
- int len = strlen(drv->name);
-
- if (strncmp(par_dev->name, drv->name, len))
- return -ENODEV;
-
- return 0;
-}
-
-void *pi_register_driver(char *name)
-{
- struct parport_driver *parp_drv;
- int ret;
-
- parp_drv = kzalloc(sizeof(*parp_drv), GFP_KERNEL);
- if (!parp_drv)
- return NULL;
-
- parp_drv->name = name;
- parp_drv->probe = pi_probe;
- parp_drv->devmodel = true;
-
- ret = parport_register_driver(parp_drv);
- if (ret) {
- kfree(parp_drv);
- return NULL;
- }
- return (void *)parp_drv;
-}
-EXPORT_SYMBOL(pi_register_driver);
-
-void pi_unregister_driver(void *_drv)
-{
- struct parport_driver *drv = _drv;
-
- parport_unregister_driver(drv);
- kfree(drv);
-}
-EXPORT_SYMBOL(pi_unregister_driver);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
deleted file mode 100644
index ddb9e589da7f..000000000000
--- a/drivers/block/paride/paride.h
+++ /dev/null
@@ -1,172 +0,0 @@
-#ifndef __DRIVERS_PARIDE_H__
-#define __DRIVERS_PARIDE_H__
-
-/*
- paride.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GPL.
-
- This file defines the interface between the high-level parallel
- IDE device drivers (pd, pf, pcd, pt) and the adapter chips.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.05 init_proto, release_proto
-*/
-
-#define PARIDE_H_VERSION "1.01"
-
-/* Some adapters need to know what kind of device they are in
-
- Values for devtype:
-*/
-
-#define PI_PD 0 /* IDE disk */
-#define PI_PCD 1 /* ATAPI CDrom */
-#define PI_PF 2 /* ATAPI disk */
-#define PI_PT 3 /* ATAPI tape */
-#define PI_PG 4 /* ATAPI generic */
-
-/* The paride module contains no state, instead the drivers allocate
- a pi_adapter data structure and pass it to paride in every operation.
-
-*/
-
-struct pi_adapter {
-
- struct pi_protocol *proto; /* adapter protocol */
- int port; /* base address of parallel port */
- int mode; /* transfer mode in use */
- int delay; /* adapter delay setting */
- int devtype; /* device type: PI_PD etc. */
- char *device; /* name of driver */
- int unit; /* unit number for chained adapters */
- int saved_r0; /* saved port state */
- int saved_r2; /* saved port state */
- int reserved; /* number of ports reserved */
- unsigned long private; /* for protocol module */
-
- wait_queue_head_t parq; /* semaphore for parport sharing */
- void *pardev; /* pointer to pardevice */
- char *parname; /* parport name */
- int claimed; /* parport has already been claimed */
- void (*claim_cont)(void); /* continuation for parport wait */
-};
-
-typedef struct pi_adapter PIA;
-
-/* functions exported by paride to the high level drivers */
-
-extern int pi_init(PIA *pi,
- int autoprobe, /* 1 to autoprobe */
- int port, /* base port address */
- int mode, /* -1 for autoprobe */
- int unit, /* unit number, if supported */
- int protocol, /* protocol to use */
- int delay, /* -1 to use adapter specific default */
- char * scratch, /* address of 512 byte buffer */
- int devtype, /* device type: PI_PD, PI_PCD, etc ... */
- int verbose, /* log verbose data while probing */
- char *device /* name of the driver */
- ); /* returns 0 on failure, 1 on success */
-
-extern void pi_release(PIA *pi);
-
-/* registers are addressed as (cont,regr)
-
- cont: 0 for command register file, 1 for control register(s)
- regr: 0-7 for register number.
-
-*/
-
-extern void pi_write_regr(PIA *pi, int cont, int regr, int val);
-
-extern int pi_read_regr(PIA *pi, int cont, int regr);
-
-extern void pi_write_block(PIA *pi, char * buf, int count);
-
-extern void pi_read_block(PIA *pi, char * buf, int count);
-
-extern void pi_connect(PIA *pi);
-
-extern void pi_disconnect(PIA *pi);
-
-extern void pi_do_claimed(PIA *pi, void (*cont)(void));
-extern int pi_schedule_claimed(PIA *pi, void (*cont)(void));
-
-/* macros and functions exported to the protocol modules */
-
-#define delay_p (pi->delay?udelay(pi->delay):(void)0)
-#define out_p(offs,byte) outb(byte,pi->port+offs); delay_p;
-#define in_p(offs) (delay_p,inb(pi->port+offs))
-
-#define w0(byte) {out_p(0,byte);}
-#define r0() (in_p(0) & 0xff)
-#define w1(byte) {out_p(1,byte);}
-#define r1() (in_p(1) & 0xff)
-#define w2(byte) {out_p(2,byte);}
-#define r2() (in_p(2) & 0xff)
-#define w3(byte) {out_p(3,byte);}
-#define w4(byte) {out_p(4,byte);}
-#define r4() (in_p(4) & 0xff)
-#define w4w(data) {outw(data,pi->port+4); delay_p;}
-#define w4l(data) {outl(data,pi->port+4); delay_p;}
-#define r4w() (delay_p,inw(pi->port+4)&0xffff)
-#define r4l() (delay_p,inl(pi->port+4)&0xffffffff)
-
-static inline u16 pi_swab16( char *b, int k)
-
-{ union { u16 u; char t[2]; } r;
-
- r.t[0]=b[2*k+1]; r.t[1]=b[2*k];
- return r.u;
-}
-
-static inline u32 pi_swab32( char *b, int k)
-
-{ union { u32 u; char f[4]; } r;
-
- r.f[0]=b[4*k+1]; r.f[1]=b[4*k];
- r.f[2]=b[4*k+3]; r.f[3]=b[4*k+2];
- return r.u;
-}
-
-struct pi_protocol {
-
- char name[8]; /* name for this protocol */
- int index; /* index into protocol table */
-
- int max_mode; /* max mode number */
- int epp_first; /* modes >= this use 8 ports */
-
- int default_delay; /* delay parameter if not specified */
- int max_units; /* max chained units probed for */
-
- void (*write_regr)(PIA *,int,int,int);
- int (*read_regr)(PIA *,int,int);
- void (*write_block)(PIA *,char *,int);
- void (*read_block)(PIA *,char *,int);
-
- void (*connect)(PIA *);
- void (*disconnect)(PIA *);
-
- int (*test_port)(PIA *);
- int (*probe_unit)(PIA *);
- int (*test_proto)(PIA *,char *,int);
- void (*log_adapter)(PIA *,char *,int);
-
- int (*init_proto)(PIA *);
- void (*release_proto)(PIA *);
- struct module *owner;
-};
-
-typedef struct pi_protocol PIP;
-
-extern int paride_register( PIP * );
-extern void paride_unregister ( PIP * );
-void *pi_register_driver(char *);
-void pi_unregister_driver(void *);
-
-#endif /* __DRIVERS_PARIDE_H__ */
-/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
deleted file mode 100644
index a5ab40784119..000000000000
--- a/drivers/block/paride/pcd.c
+++ /dev/null
@@ -1,1042 +0,0 @@
-/*
- pcd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is a high-level driver for parallel port ATAPI CD-ROM
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI CD-ROM drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pcd driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI CD-ROMs can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (46) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pcd")
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use the
- following kernel command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pcd.drive0
- pcd.drive1
- pcd.drive2
- pcd.drive3
- pcd.nice
-
- In addition, you can use the parameter pcd.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.01.24 Added test unit ready support
- 1.02 GRG 1998.05.06 Changes to pcd_completion, ready_wait,
- and loosen interpretation of ATAPI
- standard for clearing error status.
- Use spinlocks. Eliminate sti().
- 1.03 GRG 1998.06.16 Eliminated an Ugh
- 1.04 GRG 1998.08.15 Added extra debugging, improvements to
- pcd_completion, use HZ in loop timing
- 1.05 GRG 1998.08.16 Conformed to "Uniform CD-ROM" standard
- 1.06 GRG 1998.08.19 Added audio ioctl support
- 1.07 GRG 1998.09.24 Increased reset timeout, added jumbo support
-
-*/
-
-#define PCD_VERSION "1.07"
-#define PCD_MAJOR 46
-#define PCD_NAME "pcd"
-#define PCD_UNITS 4
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-
-static int verbose = 0;
-static int major = PCD_MAJOR;
-static char *name = PCD_NAME;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-static int pcd_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/cdrom.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-static DEFINE_MUTEX(pcd_mutex);
-static DEFINE_SPINLOCK(pcd_lock);
-
-module_param(verbose, int, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-#include "pseudo.h"
-
-#define PCD_RETRIES 5
-#define PCD_TMO 800 /* timeout in jiffies */
-#define PCD_DELAY 50 /* spin delay in uS */
-#define PCD_READY_TMO 20 /* in seconds */
-#define PCD_RESET_TMO 100 /* in tenths of a second */
-
-#define PCD_SPIN (1000000*PCD_TMO)/(HZ*PCD_DELAY)
-
-#define IDE_ERR 0x01
-#define IDE_DRQ 0x08
-#define IDE_READY 0x40
-#define IDE_BUSY 0x80
-
-static int pcd_open(struct cdrom_device_info *cdi, int purpose);
-static void pcd_release(struct cdrom_device_info *cdi);
-static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
-static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr);
-static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
-static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
-static int pcd_drive_reset(struct cdrom_device_info *cdi);
-static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn);
-static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
- unsigned int cmd, void *arg);
-static int pcd_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc);
-
-static void do_pcd_read_drq(void);
-static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd);
-static void do_pcd_read(void);
-
-struct pcd_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int drive; /* master/slave */
- int last_sense; /* result of last request sense */
- int changed; /* media change seen */
- int present; /* does this unit exist ? */
- char *name; /* pcd0, pcd1, etc */
- struct cdrom_device_info info; /* uniform cdrom interface */
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pcd_unit pcd[PCD_UNITS];
-
-static char pcd_scratch[64];
-static char pcd_buffer[2048]; /* raw block buffer */
-static int pcd_bufblk = -1; /* block in buffer, in CD units,
- -1 for nothing there. See also
- pd_unit.
- */
-
-/* the variables below are used mainly in the I/O request engine, which
- processes only one request at a time.
-*/
-
-static struct pcd_unit *pcd_current; /* current request's drive */
-static struct request *pcd_req;
-static int pcd_retries; /* retries on current request */
-static int pcd_busy; /* request being processed ? */
-static int pcd_sector; /* address of next requested sector */
-static int pcd_count; /* number of blocks still to do */
-static char *pcd_buf; /* buffer for request in progress */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static int pcd_block_open(struct block_device *bdev, fmode_t mode)
-{
- struct pcd_unit *cd = bdev->bd_disk->private_data;
- int ret;
-
- bdev_check_media_change(bdev);
-
- mutex_lock(&pcd_mutex);
- ret = cdrom_open(&cd->info, bdev, mode);
- mutex_unlock(&pcd_mutex);
-
- return ret;
-}
-
-static void pcd_block_release(struct gendisk *disk, fmode_t mode)
-{
- struct pcd_unit *cd = disk->private_data;
- mutex_lock(&pcd_mutex);
- cdrom_release(&cd->info, mode);
- mutex_unlock(&pcd_mutex);
-}
-
-static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- struct pcd_unit *cd = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&pcd_mutex);
- ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
- mutex_unlock(&pcd_mutex);
-
- return ret;
-}
-
-static unsigned int pcd_block_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pcd_unit *cd = disk->private_data;
- return cdrom_check_events(&cd->info, clearing);
-}
-
-static const struct block_device_operations pcd_bdops = {
- .owner = THIS_MODULE,
- .open = pcd_block_open,
- .release = pcd_block_release,
- .ioctl = pcd_block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = blkdev_compat_ptr_ioctl,
-#endif
- .check_events = pcd_block_check_events,
-};
-
-static const struct cdrom_device_ops pcd_dops = {
- .open = pcd_open,
- .release = pcd_release,
- .drive_status = pcd_drive_status,
- .check_events = pcd_check_events,
- .tray_move = pcd_tray_move,
- .lock_door = pcd_lock_door,
- .get_mcn = pcd_get_mcn,
- .reset = pcd_drive_reset,
- .audio_ioctl = pcd_audio_ioctl,
- .generic_packet = pcd_packet,
- .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
- CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET |
- CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R |
- CDC_CD_RW,
-};
-
-static const struct blk_mq_ops pcd_mq_ops = {
- .queue_rq = pcd_queue_rq,
-};
-
-static int pcd_open(struct cdrom_device_info *cdi, int purpose)
-{
- struct pcd_unit *cd = cdi->handle;
- if (!cd->present)
- return -ENODEV;
- return 0;
-}
-
-static void pcd_release(struct cdrom_device_info *cdi)
-{
-}
-
-static inline int status_reg(struct pcd_unit *cd)
-{
- return pi_read_regr(cd->pi, 1, 6);
-}
-
-static inline int read_reg(struct pcd_unit *cd, int reg)
-{
- return pi_read_regr(cd->pi, 0, reg);
-}
-
-static inline void write_reg(struct pcd_unit *cd, int reg, int val)
-{
- pi_write_regr(cd->pi, 0, reg, val);
-}
-
-static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
-
- j = 0;
- while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop))))
- && (j++ < PCD_SPIN))
- udelay(PCD_DELAY);
-
- if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) {
- s = read_reg(cd, 7);
- e = read_reg(cd, 1);
- p = read_reg(cd, 2);
- if (j > PCD_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- cd->name, fun, msg, r, s, e, j, p);
- return (s << 8) + r;
- }
- return 0;
-}
-
-static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun)
-{
- pi_connect(cd->pi);
-
- write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
-
- if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) {
- pi_disconnect(cd->pi);
- return -1;
- }
-
- write_reg(cd, 4, dlen % 256);
- write_reg(cd, 5, dlen / 256);
- write_reg(cd, 7, 0xa0); /* ATAPI packet command */
-
- if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) {
- pi_disconnect(cd->pi);
- return -1;
- }
-
- if (read_reg(cd, 2) != 1) {
- printk("%s: %s: command phase error\n", cd->name, fun);
- pi_disconnect(cd->pi);
- return -1;
- }
-
- pi_write_block(cd->pi, cmd, 12);
-
- return 0;
-}
-
-static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
-{
- int r, d, p, n, k, j;
-
- r = -1;
- k = 0;
- j = 0;
-
- if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR,
- fun, "completion")) {
- r = 0;
- while (read_reg(cd, 7) & IDE_DRQ) {
- d = read_reg(cd, 4) + 256 * read_reg(cd, 5);
- n = (d + 3) & 0xfffc;
- p = read_reg(cd, 2) & 3;
-
- if ((p == 2) && (n > 0) && (j == 0)) {
- pi_read_block(cd->pi, buf, n);
- if (verbose > 1)
- printk("%s: %s: Read %d bytes\n",
- cd->name, fun, n);
- r = 0;
- j++;
- } else {
- if (verbose > 1)
- printk
- ("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
- cd->name, fun, p, d, k);
- if (verbose < 2)
- printk_once(
- "%s: WARNING: ATAPI phase errors\n",
- cd->name);
- mdelay(1);
- }
- if (k++ > PCD_TMO) {
- printk("%s: Stuck DRQ\n", cd->name);
- break;
- }
- if (pcd_wait
- (cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun,
- "completion")) {
- r = -1;
- break;
- }
- }
- }
-
- pi_disconnect(cd->pi);
-
- return r;
-}
-
-static void pcd_req_sense(struct pcd_unit *cd, char *fun)
-{
- char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r, c;
-
- r = pcd_command(cd, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pcd_completion(cd, buf, "Request sense");
-
- cd->last_sense = -1;
- c = 2;
- if (!r) {
- if (fun)
- printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n",
- cd->name, fun, buf[2] & 0xf, buf[12], buf[13]);
- c = buf[2] & 0xf;
- cd->last_sense =
- c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16);
- }
- if ((c == 2) || (c == 6))
- cd->changed = 1;
-}
-
-static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pcd_command(cd, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pcd_completion(cd, buf, fun);
- if (r)
- pcd_req_sense(cd, fun);
-
- return r;
-}
-
-static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
-{
- return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer,
- "generic packet");
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr)
-{
- struct pcd_unit *cd = cdi->handle;
- int res = cd->changed;
- if (res)
- cd->changed = 0;
- return res ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
-static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
-{
- char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 };
-
- return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch,
- lock ? "lock door" : "unlock door");
-}
-
-static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
-{
- char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 };
-
- return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch,
- position ? "eject" : "close tray");
-}
-
-static void pcd_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pcd_reset(struct pcd_unit *cd)
-{
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(cd->pi);
- write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
- write_reg(cd, 7, 8);
-
- pcd_sleep(20 * HZ / 1000); /* delay a bit */
-
- k = 0;
- while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY))
- pcd_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(cd, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", cd->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(cd, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(cd->pi);
- return flg - 1;
-}
-
-static int pcd_drive_reset(struct cdrom_device_info *cdi)
-{
- return pcd_reset(cdi->handle);
-}
-
-static int pcd_ready_wait(struct pcd_unit *cd, int tmo)
-{
- char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, p;
-
- k = 0;
- while (k < tmo) {
- cd->last_sense = 0;
- pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready"));
- p = cd->last_sense;
- if (!p)
- return 0;
- if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
- return p;
- k++;
- pcd_sleep(HZ);
- }
- return 0x000020; /* timeout */
-}
-
-static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
-{
- char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- struct pcd_unit *cd = cdi->handle;
-
- if (pcd_ready_wait(cd, PCD_READY_TMO))
- return CDS_DRIVE_NOT_READY;
- if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media")))
- return CDS_NO_DISC;
- return CDS_DISC_OK;
-}
-
-static int pcd_identify(struct pcd_unit *cd)
-{
- char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char id[18];
- int k, s;
-
- pcd_bufblk = -1;
-
- s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify");
-
- if (s)
- return -1;
- if ((pcd_buffer[0] & 0x1f) != 5) {
- if (verbose)
- printk("%s: %s is not a CD-ROM\n",
- cd->name, cd->drive ? "Slave" : "Master");
- return -1;
- }
- memcpy(id, pcd_buffer + 16, 16);
- id[16] = 0;
- k = 16;
- while ((k >= 0) && (id[k] <= 0x20)) {
- id[k] = 0;
- k--;
- }
-
- printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id);
-
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected, otherwise an error code.
- */
-static int pcd_probe(struct pcd_unit *cd, int ms)
-{
- if (ms == -1) {
- for (cd->drive = 0; cd->drive <= 1; cd->drive++)
- if (!pcd_reset(cd) && !pcd_identify(cd))
- return 0;
- } else {
- cd->drive = ms;
- if (!pcd_reset(cd) && !pcd_identify(cd))
- return 0;
- }
- return -ENODEV;
-}
-
-static int pcd_probe_capabilities(struct pcd_unit *cd)
-{
- char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
- char buffer[32];
- int ret;
-
- ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
- if (ret)
- return ret;
-
- /* we should now have the cap page */
- if ((buffer[11] & 1) == 0)
- cd->info.mask |= CDC_CD_R;
- if ((buffer[11] & 2) == 0)
- cd->info.mask |= CDC_CD_RW;
- if ((buffer[12] & 1) == 0)
- cd->info.mask |= CDC_PLAY_AUDIO;
- if ((buffer[14] & 1) == 0)
- cd->info.mask |= CDC_LOCK;
- if ((buffer[14] & 8) == 0)
- cd->info.mask |= CDC_OPEN_TRAY;
- if ((buffer[14] >> 6) == 0)
- cd->info.mask |= CDC_CLOSE_TRAY;
-
- return 0;
-}
-
-/* I/O request processing */
-static int pcd_queue;
-
-static int set_next_request(void)
-{
- struct pcd_unit *cd;
- int old_pos = pcd_queue;
-
- do {
- cd = &pcd[pcd_queue];
- if (++pcd_queue == PCD_UNITS)
- pcd_queue = 0;
- if (cd->present && !list_empty(&cd->rq_list)) {
- pcd_req = list_first_entry(&cd->rq_list, struct request,
- queuelist);
- list_del_init(&pcd_req->queuelist);
- blk_mq_start_request(pcd_req);
- break;
- }
- } while (pcd_queue != old_pos);
-
- return pcd_req != NULL;
-}
-
-static void pcd_request(void)
-{
- struct pcd_unit *cd;
-
- if (pcd_busy)
- return;
-
- if (!pcd_req && !set_next_request())
- return;
-
- cd = pcd_req->q->disk->private_data;
- if (cd != pcd_current)
- pcd_bufblk = -1;
- pcd_current = cd;
- pcd_sector = blk_rq_pos(pcd_req);
- pcd_count = blk_rq_cur_sectors(pcd_req);
- pcd_buf = bio_data(pcd_req->bio);
- pcd_busy = 1;
- ps_set_intr(do_pcd_read, NULL, 0, nice);
-}
-
-static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pcd_unit *cd = hctx->queue->queuedata;
-
- if (rq_data_dir(bd->rq) != READ) {
- blk_mq_start_request(bd->rq);
- return BLK_STS_IOERR;
- }
-
- spin_lock_irq(&pcd_lock);
- list_add_tail(&bd->rq->queuelist, &cd->rq_list);
- pcd_request();
- spin_unlock_irq(&pcd_lock);
-
- return BLK_STS_OK;
-}
-
-static inline void next_request(blk_status_t err)
-{
- unsigned long saved_flags;
-
- spin_lock_irqsave(&pcd_lock, saved_flags);
- if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
- __blk_mq_end_request(pcd_req, err);
- pcd_req = NULL;
- }
- pcd_busy = 0;
- pcd_request();
- spin_unlock_irqrestore(&pcd_lock, saved_flags);
-}
-
-static int pcd_ready(void)
-{
- return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ));
-}
-
-static void pcd_transfer(void)
-{
-
- while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) {
- int o = (pcd_sector % 4) * 512;
- memcpy(pcd_buf, pcd_buffer + o, 512);
- pcd_count--;
- pcd_buf += 512;
- pcd_sector++;
- }
-}
-
-static void pcd_start(void)
-{
- int b, i;
- char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 };
-
- pcd_bufblk = pcd_sector / 4;
- b = pcd_bufblk;
- for (i = 0; i < 4; i++) {
- rd_cmd[5 - i] = b & 0xff;
- b = b >> 8;
- }
-
- if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
- pcd_bufblk = -1;
- next_request(BLK_STS_IOERR);
- return;
- }
-
- mdelay(1);
-
- ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice);
-}
-
-static void do_pcd_read(void)
-{
- pcd_busy = 1;
- pcd_retries = 0;
- pcd_transfer();
- if (!pcd_count) {
- next_request(0);
- return;
- }
-
- pi_do_claimed(pcd_current->pi, pcd_start);
-}
-
-static void do_pcd_read_drq(void)
-{
- unsigned long saved_flags;
-
- if (pcd_completion(pcd_current, pcd_buffer, "read block")) {
- if (pcd_retries < PCD_RETRIES) {
- mdelay(1);
- pcd_retries++;
- pi_do_claimed(pcd_current->pi, pcd_start);
- return;
- }
- pcd_bufblk = -1;
- next_request(BLK_STS_IOERR);
- return;
- }
-
- do_pcd_read();
- spin_lock_irqsave(&pcd_lock, saved_flags);
- pcd_request();
- spin_unlock_irqrestore(&pcd_lock, saved_flags);
-}
-
-/* the audio_ioctl stuff is adapted from sr_ioctl.c */
-
-static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
-{
- struct pcd_unit *cd = cdi->handle;
-
- switch (cmd) {
-
- case CDROMREADTOCHDR:
-
- {
- char cmd[12] =
- { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
- 0, 0, 0 };
- struct cdrom_tochdr *tochdr =
- (struct cdrom_tochdr *) arg;
- char buffer[32];
- int r;
-
- r = pcd_atapi(cd, cmd, 12, buffer, "read toc header");
-
- tochdr->cdth_trk0 = buffer[2];
- tochdr->cdth_trk1 = buffer[3];
-
- return r ? -EIO : 0;
- }
-
- case CDROMREADTOCENTRY:
-
- {
- char cmd[12] =
- { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
- 0, 0, 0 };
-
- struct cdrom_tocentry *tocentry =
- (struct cdrom_tocentry *) arg;
- unsigned char buffer[32];
- int r;
-
- cmd[1] =
- (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
- cmd[6] = tocentry->cdte_track;
-
- r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry");
-
- tocentry->cdte_ctrl = buffer[5] & 0xf;
- tocentry->cdte_adr = buffer[5] >> 4;
- tocentry->cdte_datamode =
- (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
- if (tocentry->cdte_format == CDROM_MSF) {
- tocentry->cdte_addr.msf.minute = buffer[9];
- tocentry->cdte_addr.msf.second = buffer[10];
- tocentry->cdte_addr.msf.frame = buffer[11];
- } else
- tocentry->cdte_addr.lba =
- (((((buffer[8] << 8) + buffer[9]) << 8)
- + buffer[10]) << 8) + buffer[11];
-
- return r ? -EIO : 0;
- }
-
- default:
-
- return -ENOSYS;
- }
-}
-
-static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
-{
- char cmd[12] =
- { GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 };
- char buffer[32];
-
- if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn"))
- return -EIO;
-
- memcpy(mcn->medium_catalog_number, buffer + 9, 13);
- mcn->medium_catalog_number[13] = 0;
-
- return 0;
-}
-
-static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
- int mode, int unit, int protocol, int delay, int ms)
-{
- struct gendisk *disk;
- int ret;
-
- ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
- if (ret)
- return ret;
-
- disk = blk_mq_alloc_disk(&cd->tag_set, cd);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_free_tag_set;
- }
-
- INIT_LIST_HEAD(&cd->rq_list);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- cd->disk = disk;
- cd->pi = &cd->pia;
- cd->present = 0;
- cd->last_sense = 0;
- cd->changed = 1;
- cd->drive = (*drives[cd - pcd])[D_SLV];
-
- cd->name = &cd->info.name[0];
- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
- cd->info.ops = &pcd_dops;
- cd->info.handle = cd;
- cd->info.speed = 0;
- cd->info.capacity = 1;
- cd->info.mask = 0;
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, cd->name); /* umm... */
- disk->fops = &pcd_bdops;
- disk->flags |= GENHD_FL_NO_PART;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->event_flags = DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
-
- if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
- pcd_buffer, PI_PCD, verbose, cd->name)) {
- ret = -ENODEV;
- goto out_free_disk;
- }
- ret = pcd_probe(cd, ms);
- if (ret)
- goto out_pi_release;
-
- cd->present = 1;
- pcd_probe_capabilities(cd);
- ret = register_cdrom(cd->disk, &cd->info);
- if (ret)
- goto out_pi_release;
- ret = add_disk(cd->disk);
- if (ret)
- goto out_unreg_cdrom;
- return 0;
-
-out_unreg_cdrom:
- unregister_cdrom(&cd->info);
-out_pi_release:
- pi_release(cd->pi);
-out_free_disk:
- put_disk(cd->disk);
-out_free_tag_set:
- blk_mq_free_tag_set(&cd->tag_set);
- return ret;
-}
-
-static int __init pcd_init(void)
-{
- int found = 0, unit;
-
- if (disable)
- return -EINVAL;
-
- if (register_blkdev(major, name))
- return -EBUSY;
-
- pr_info("%s: %s version %s, major %d, nice %d\n",
- name, name, PCD_VERSION, major, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PCD_UNITS; unit++) {
- if ((*drives[unit])[D_PRT])
- pcd_drive_count++;
- }
-
- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
- found++;
- } else {
- for (unit = 0; unit < PCD_UNITS; unit++) {
- struct pcd_unit *cd = &pcd[unit];
- int *conf = *drives[unit];
-
- if (!conf[D_PRT])
- continue;
- if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- conf[D_SLV]))
- found++;
- }
- }
-
- if (!found) {
- pr_info("%s: No CD-ROM drive found\n", name);
- goto out_unregister_pi_driver;
- }
-
- return 0;
-
-out_unregister_pi_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pcd_exit(void)
-{
- struct pcd_unit *cd;
- int unit;
-
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->present)
- continue;
-
- unregister_cdrom(&cd->info);
- del_gendisk(cd->disk);
- pi_release(cd->pi);
- put_disk(cd->disk);
-
- blk_mq_free_tag_set(&cd->tag_set);
- }
- pi_unregister_driver(par_drv);
- unregister_blkdev(major, name);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pcd_init)
-module_exit(pcd_exit)
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
deleted file mode 100644
index f8a75bc90f70..000000000000
--- a/drivers/block/paride/pd.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port IDE hard
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port IDE drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pd driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-8 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <geo> this defaults to 0 to indicate that the driver
- should use the CHS geometry provided by the drive
- itself. If set to 1, the driver will provide
- a logical geometry with 64 heads and 32 sectors
- per track, to be consistent with most SCSI
- drivers. (0 if not given)
-
- <sby> set this to zero to disable the power saving
- standby mode, if needed. (1 if not given)
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- <slv> IDE disks can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
-
- major You may use this parameter to override the
- default major number (45) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pd")
-
- cluster The driver will attempt to aggregate requests
- for adjacent blocks into larger multi-block
- clusters. The maximum cluster size (in 512
- byte sectors) is set with this parameter.
- (default 64)
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use kernel
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pd.drive0
- pd.drive1
- pd.drive2
- pd.drive3
- pd.cluster
- pd.nice
-
- In addition, you can use the parameter pd.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1997.01.24 Restored pd_reset()
- Added eject ioctl
- 1.02 GRG 1998.05.06 SMP spinlock changes,
- Added slave support
- 1.03 GRG 1998.06.16 Eliminate an Ugh.
- 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
- 1.05 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PD_VERSION "1.05"
-#define PD_MAJOR 45
-#define PD_NAME "pd"
-#define PD_UNITS 4
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-#include <linux/types.h>
-
-static int verbose = 0;
-static int major = PD_MAJOR;
-static char *name = PD_NAME;
-static int cluster = 64;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-
-static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
-
-/* end of parameters */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/cdrom.h> /* for the eject ioctl */
-#include <linux/blk-mq.h>
-#include <linux/blkpg.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-
-static DEFINE_MUTEX(pd_mutex);
-static DEFINE_SPINLOCK(pd_lock);
-
-module_param(verbose, int, 0);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(cluster, int, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PD_BITS 4
-
-/* numbers for "SCSI" geometry */
-
-#define PD_LOG_HEADS 64
-#define PD_LOG_SECTS 32
-
-#define PD_ID_OFF 54
-#define PD_ID_LEN 14
-
-#define PD_MAX_RETRIES 5
-#define PD_TMO 800 /* interrupt timeout in jiffies */
-#define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
-
-#define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-
-#define ERR_AMNF 0x00100
-#define ERR_TK0NF 0x00200
-#define ERR_ABRT 0x00400
-#define ERR_MCR 0x00800
-#define ERR_IDNF 0x01000
-#define ERR_MC 0x02000
-#define ERR_UNC 0x04000
-#define ERR_TMO 0x10000
-
-#define IDE_READ 0x20
-#define IDE_WRITE 0x30
-#define IDE_READ_VRFY 0x40
-#define IDE_INIT_DEV_PARMS 0x91
-#define IDE_STANDBY 0x96
-#define IDE_ACKCHANGE 0xdb
-#define IDE_DOORLOCK 0xde
-#define IDE_DOORUNLOCK 0xdf
-#define IDE_IDENTIFY 0xec
-#define IDE_EJECT 0xed
-
-#define PD_NAMELEN 8
-
-struct pd_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int access; /* count of active opens ... */
- int capacity; /* Size of this volume in sectors */
- int heads; /* physical geometry */
- int sectors;
- int cylinders;
- int can_lba;
- int drive; /* master=0 slave=1 */
- int changed; /* Have we seen a disk change ? */
- int removable; /* removable media device ? */
- int standby;
- int alt_geom;
- char name[PD_NAMELEN]; /* pda, pdb, etc ... */
- struct gendisk *gd;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pd_unit pd[PD_UNITS];
-
-struct pd_req {
- /* for REQ_OP_DRV_IN: */
- enum action (*func)(struct pd_unit *disk);
-};
-
-static char pd_scratch[512]; /* scratch block buffer */
-
-static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
- "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
- "IDNF", "MC", "UNC", "???", "TMO"
-};
-
-static void *par_drv; /* reference of parport driver */
-
-static inline int status_reg(struct pd_unit *disk)
-{
- return pi_read_regr(disk->pi, 1, 6);
-}
-
-static inline int read_reg(struct pd_unit *disk, int reg)
-{
- return pi_read_regr(disk->pi, 0, reg);
-}
-
-static inline void write_status(struct pd_unit *disk, int val)
-{
- pi_write_regr(disk->pi, 1, 6, val);
-}
-
-static inline void write_reg(struct pd_unit *disk, int reg, int val)
-{
- pi_write_regr(disk->pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pd_unit *disk)
-{
- return 0xa0+0x10*disk->drive;
-}
-
-/* ide command interface */
-
-static void pd_print_error(struct pd_unit *disk, char *msg, int status)
-{
- int i;
-
- printk("%s: %s: status = 0x%x =", disk->name, msg, status);
- for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
- if (status & (1 << i))
- printk(" %s", pd_errs[i]);
- printk("\n");
-}
-
-static void pd_reset(struct pd_unit *disk)
-{ /* called only for MASTER drive */
- write_status(disk, 4);
- udelay(50);
- write_status(disk, 0);
- udelay(250);
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
-{ /* polled wait */
- int k, r, e;
-
- k = 0;
- while (k < PD_SPIN) {
- r = status_reg(disk);
- k++;
- if (((r & w) == w) && !(r & STAT_BUSY))
- break;
- udelay(PD_SPIN_DEL);
- }
- e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
- if (k >= PD_SPIN)
- e |= ERR_TMO;
- if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
- pd_print_error(disk, msg, e);
- return e;
-}
-
-static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
-{
- write_reg(disk, 6, DRIVE(disk) + h);
- write_reg(disk, 1, 0); /* the IDE task file */
- write_reg(disk, 2, n);
- write_reg(disk, 3, s);
- write_reg(disk, 4, c0);
- write_reg(disk, 5, c1);
- write_reg(disk, 7, func);
-
- udelay(1);
-}
-
-static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
-{
- int c1, c0, h, s;
-
- if (disk->can_lba) {
- s = block & 255;
- c0 = (block >>= 8) & 255;
- c1 = (block >>= 8) & 255;
- h = ((block >>= 8) & 15) + 0x40;
- } else {
- s = (block % disk->sectors) + 1;
- h = (block /= disk->sectors) % disk->heads;
- c0 = (block /= disk->heads) % 256;
- c1 = (block >>= 8);
- }
- pd_send_command(disk, count, s, h, c0, c1, func);
-}
-
-/* The i/o request engine */
-
-enum action {Fail = 0, Ok = 1, Hold, Wait};
-
-static struct request *pd_req; /* current request */
-static enum action (*phase)(void);
-
-static void run_fsm(void);
-
-static void ps_tq_int(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
-
-static void schedule_fsm(void)
-{
- if (!nice)
- schedule_delayed_work(&fsm_tq, 0);
- else
- schedule_delayed_work(&fsm_tq, nice-1);
-}
-
-static void ps_tq_int(struct work_struct *work)
-{
- run_fsm();
-}
-
-static enum action do_pd_io_start(void);
-static enum action pd_special(void);
-static enum action do_pd_read_start(void);
-static enum action do_pd_write_start(void);
-static enum action do_pd_read_drq(void);
-static enum action do_pd_write_done(void);
-
-static int pd_queue;
-static int pd_claimed;
-
-static struct pd_unit *pd_current; /* current request's drive */
-static PIA *pi_current; /* current request's PIA */
-
-static int set_next_request(void)
-{
- struct gendisk *disk;
- struct request_queue *q;
- int old_pos = pd_queue;
-
- do {
- disk = pd[pd_queue].gd;
- q = disk ? disk->queue : NULL;
- if (++pd_queue == PD_UNITS)
- pd_queue = 0;
- if (q) {
- struct pd_unit *disk = q->queuedata;
-
- if (list_empty(&disk->rq_list))
- continue;
-
- pd_req = list_first_entry(&disk->rq_list,
- struct request,
- queuelist);
- list_del_init(&pd_req->queuelist);
- blk_mq_start_request(pd_req);
- break;
- }
- } while (pd_queue != old_pos);
-
- return pd_req != NULL;
-}
-
-static void run_fsm(void)
-{
- while (1) {
- enum action res;
- int stop = 0;
-
- if (!phase) {
- pd_current = pd_req->q->disk->private_data;
- pi_current = pd_current->pi;
- phase = do_pd_io_start;
- }
-
- switch (pd_claimed) {
- case 0:
- pd_claimed = 1;
- if (!pi_schedule_claimed(pi_current, run_fsm))
- return;
- fallthrough;
- case 1:
- pd_claimed = 2;
- pi_current->proto->connect(pi_current);
- }
-
- switch(res = phase()) {
- case Ok: case Fail: {
- blk_status_t err;
-
- err = res == Ok ? 0 : BLK_STS_IOERR;
- pi_disconnect(pi_current);
- pd_claimed = 0;
- phase = NULL;
- spin_lock_irq(&pd_lock);
- if (!blk_update_request(pd_req, err,
- blk_rq_cur_bytes(pd_req))) {
- __blk_mq_end_request(pd_req, err);
- pd_req = NULL;
- stop = !set_next_request();
- }
- spin_unlock_irq(&pd_lock);
- if (stop)
- return;
- }
- fallthrough;
- case Hold:
- schedule_fsm();
- return;
- case Wait:
- pi_disconnect(pi_current);
- pd_claimed = 0;
- }
- }
-}
-
-static int pd_retries = 0; /* i/o error retry count */
-static int pd_block; /* address of next requested block */
-static int pd_count; /* number of blocks still to do */
-static int pd_run; /* sectors in current cluster */
-static char *pd_buf; /* buffer for request in progress */
-
-static enum action do_pd_io_start(void)
-{
- switch (req_op(pd_req)) {
- case REQ_OP_DRV_IN:
- phase = pd_special;
- return pd_special();
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- pd_block = blk_rq_pos(pd_req);
- pd_count = blk_rq_cur_sectors(pd_req);
- if (pd_block + pd_count > get_capacity(pd_req->q->disk))
- return Fail;
- pd_run = blk_rq_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
- pd_retries = 0;
- if (req_op(pd_req) == REQ_OP_READ)
- return do_pd_read_start();
- else
- return do_pd_write_start();
- default:
- break;
- }
- return Fail;
-}
-
-static enum action pd_special(void)
-{
- struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
-
- return req->func(pd_current);
-}
-
-static int pd_next_buf(void)
-{
- unsigned long saved_flags;
-
- pd_count--;
- pd_run--;
- pd_buf += 512;
- pd_block++;
- if (!pd_run)
- return 1;
- if (pd_count)
- return 0;
- spin_lock_irqsave(&pd_lock, saved_flags);
- if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
- __blk_mq_end_request(pd_req, 0);
- pd_req = NULL;
- pd_count = 0;
- pd_buf = NULL;
- } else {
- pd_count = blk_rq_cur_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
- }
- spin_unlock_irqrestore(&pd_lock, saved_flags);
- return !pd_count;
-}
-
-static unsigned long pd_timeout;
-
-static enum action do_pd_read_start(void)
-{
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
- phase = do_pd_read_drq;
- pd_timeout = jiffies + PD_TMO;
- return Hold;
-}
-
-static enum action do_pd_write_start(void)
-{
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
- while (1) {
- if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pi_write_block(pd_current->pi, pd_buf, 512);
- if (pd_next_buf())
- break;
- }
- phase = do_pd_write_done;
- pd_timeout = jiffies + PD_TMO;
- return Hold;
-}
-
-static inline int pd_ready(void)
-{
- return !(status_reg(pd_current) & STAT_BUSY);
-}
-
-static enum action do_pd_read_drq(void)
-{
- if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
- return Hold;
-
- while (1) {
- if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- phase = do_pd_read_start;
- return Wait;
- }
- return Fail;
- }
- pi_read_block(pd_current->pi, pd_buf, 512);
- if (pd_next_buf())
- break;
- }
- return Ok;
-}
-
-static enum action do_pd_write_done(void)
-{
- if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
- return Hold;
-
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- phase = do_pd_write_start;
- return Wait;
- }
- return Fail;
- }
- return Ok;
-}
-
-/* special io requests */
-
-/* According to the ATA standard, the default CHS geometry should be
- available following a reset. Some Western Digital drives come up
- in a mode where only LBA addresses are accepted until the device
- parameters are initialised.
-*/
-
-static void pd_init_dev_parms(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
- pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
- IDE_INIT_DEV_PARMS);
- udelay(300);
- pd_wait_for(disk, 0, "Initialise device parameters");
-}
-
-static enum action pd_door_lock(struct pd_unit *disk)
-{
- if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
- pd_wait_for(disk, STAT_READY, "Lock done");
- }
- return Ok;
-}
-
-static enum action pd_door_unlock(struct pd_unit *disk)
-{
- if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
- pd_wait_for(disk, STAT_READY, "Lock done");
- }
- return Ok;
-}
-
-static enum action pd_eject(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
- pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
- pd_wait_for(disk, 0, DBMSG("before eject"));
- pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
- pd_wait_for(disk, 0, DBMSG("after eject"));
- return Ok;
-}
-
-static enum action pd_media_check(struct pd_unit *disk)
-{
- int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
- if (!(r & STAT_ERR)) {
- pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
- r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
- } else
- disk->changed = 1; /* say changed if other error */
- if (r & ERR_MC) {
- disk->changed = 1;
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
- pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
- pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
- r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
- }
- return Ok;
-}
-
-static void pd_standby_off(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before STANDBY"));
- pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
- pd_wait_for(disk, 0, DBMSG("after STANDBY"));
-}
-
-static enum action pd_identify(struct pd_unit *disk)
-{
- int j;
- char id[PD_ID_LEN + 1];
-
-/* WARNING: here there may be dragons. reset() applies to both drives,
- but we call it only on probing the MASTER. This should allow most
- common configurations to work, but be warned that a reset can clear
- settings on the SLAVE drive.
-*/
-
- if (disk->drive == 0)
- pd_reset(disk);
-
- write_reg(disk, 6, DRIVE(disk));
- pd_wait_for(disk, 0, DBMSG("before IDENT"));
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
-
- if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
- return Fail;
- pi_read_block(disk->pi, pd_scratch, 512);
- disk->can_lba = pd_scratch[99] & 2;
- disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
- disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
- disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
- if (disk->can_lba)
- disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
- else
- disk->capacity = disk->sectors * disk->heads * disk->cylinders;
-
- for (j = 0; j < PD_ID_LEN; j++)
- id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
- j = PD_ID_LEN - 1;
- while ((j >= 0) && (id[j] <= 0x20))
- j--;
- j++;
- id[j] = 0;
-
- disk->removable = pd_scratch[0] & 0x80;
-
- printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
- disk->name, id,
- disk->drive ? "slave" : "master",
- disk->capacity, disk->capacity / 2048,
- disk->cylinders, disk->heads, disk->sectors,
- disk->removable ? "removable" : "fixed");
-
- if (disk->capacity)
- pd_init_dev_parms(disk);
- if (!disk->standby)
- pd_standby_off(disk);
-
- return Ok;
-}
-
-/* end of io request engine */
-
-static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pd_unit *disk = hctx->queue->queuedata;
-
- spin_lock_irq(&pd_lock);
- if (!pd_req) {
- pd_req = bd->rq;
- blk_mq_start_request(pd_req);
- } else
- list_add_tail(&bd->rq->queuelist, &disk->rq_list);
- spin_unlock_irq(&pd_lock);
-
- run_fsm();
- return BLK_STS_OK;
-}
-
-static int pd_special_command(struct pd_unit *disk,
- enum action (*func)(struct pd_unit *disk))
-{
- struct request *rq;
- struct pd_req *req;
-
- rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- req = blk_mq_rq_to_pdu(rq);
-
- req->func = func;
- blk_execute_rq(rq, false);
- blk_mq_free_request(rq);
- return 0;
-}
-
-/* kernel glue structures */
-
-static int pd_open(struct block_device *bdev, fmode_t mode)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- mutex_lock(&pd_mutex);
- disk->access++;
-
- if (disk->removable) {
- pd_special_command(disk, pd_media_check);
- pd_special_command(disk, pd_door_lock);
- }
- mutex_unlock(&pd_mutex);
- return 0;
-}
-
-static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- if (disk->alt_geom) {
- geo->heads = PD_LOG_HEADS;
- geo->sectors = PD_LOG_SECTS;
- geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
- } else {
- geo->heads = disk->heads;
- geo->sectors = disk->sectors;
- geo->cylinders = disk->cylinders;
- }
-
- return 0;
-}
-
-static int pd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case CDROMEJECT:
- mutex_lock(&pd_mutex);
- if (disk->access == 1)
- pd_special_command(disk, pd_eject);
- mutex_unlock(&pd_mutex);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static void pd_release(struct gendisk *p, fmode_t mode)
-{
- struct pd_unit *disk = p->private_data;
-
- mutex_lock(&pd_mutex);
- if (!--disk->access && disk->removable)
- pd_special_command(disk, pd_door_unlock);
- mutex_unlock(&pd_mutex);
-}
-
-static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
-{
- struct pd_unit *disk = p->private_data;
- int r;
- if (!disk->removable)
- return 0;
- pd_special_command(disk, pd_media_check);
- r = disk->changed;
- disk->changed = 0;
- return r ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
-static const struct block_device_operations pd_fops = {
- .owner = THIS_MODULE,
- .open = pd_open,
- .release = pd_release,
- .ioctl = pd_ioctl,
- .compat_ioctl = pd_ioctl,
- .getgeo = pd_getgeo,
- .check_events = pd_check_events,
-};
-
-/* probing */
-
-static const struct blk_mq_ops pd_mq_ops = {
- .queue_rq = pd_queue_rq,
-};
-
-static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
- int mode, int unit, int protocol, int delay)
-{
- int index = disk - pd;
- int *parm = *drives[index];
- struct gendisk *p;
- int ret;
-
- disk->pi = &disk->pia;
- disk->access = 0;
- disk->changed = 1;
- disk->capacity = 0;
- disk->drive = parm[D_SLV];
- snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
- disk->alt_geom = parm[D_GEO];
- disk->standby = parm[D_SBY];
- INIT_LIST_HEAD(&disk->rq_list);
-
- if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
- pd_scratch, PI_PD, verbose, disk->name))
- return -ENXIO;
-
- memset(&disk->tag_set, 0, sizeof(disk->tag_set));
- disk->tag_set.ops = &pd_mq_ops;
- disk->tag_set.cmd_size = sizeof(struct pd_req);
- disk->tag_set.nr_hw_queues = 1;
- disk->tag_set.nr_maps = 1;
- disk->tag_set.queue_depth = 2;
- disk->tag_set.numa_node = NUMA_NO_NODE;
- disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- ret = blk_mq_alloc_tag_set(&disk->tag_set);
- if (ret)
- goto pi_release;
-
- p = blk_mq_alloc_disk(&disk->tag_set, disk);
- if (IS_ERR(p)) {
- ret = PTR_ERR(p);
- goto free_tag_set;
- }
- disk->gd = p;
-
- strcpy(p->disk_name, disk->name);
- p->fops = &pd_fops;
- p->major = major;
- p->first_minor = (disk - pd) << PD_BITS;
- p->minors = 1 << PD_BITS;
- p->events = DISK_EVENT_MEDIA_CHANGE;
- p->private_data = disk;
- blk_queue_max_hw_sectors(p->queue, cluster);
- blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
-
- if (disk->drive == -1) {
- for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
- ret = pd_special_command(disk, pd_identify);
- if (ret == 0)
- break;
- }
- } else {
- ret = pd_special_command(disk, pd_identify);
- }
- if (ret)
- goto put_disk;
- set_capacity(disk->gd, disk->capacity);
- ret = add_disk(disk->gd);
- if (ret)
- goto cleanup_disk;
- return 0;
-cleanup_disk:
- put_disk(disk->gd);
-put_disk:
- put_disk(p);
- disk->gd = NULL;
-free_tag_set:
- blk_mq_free_tag_set(&disk->tag_set);
-pi_release:
- pi_release(disk->pi);
- return ret;
-}
-
-static int __init pd_init(void)
-{
- int found = 0, unit, pd_drive_count = 0;
- struct pd_unit *disk;
-
- if (disable)
- return -ENODEV;
-
- if (register_blkdev(major, name))
- return -ENODEV;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PD_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PD_UNITS; unit++) {
- int *parm = *drives[unit];
-
- if (parm[D_PRT])
- pd_drive_count++;
- }
-
- if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
- found++;
- } else {
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- int *parm = *drives[unit];
- if (!parm[D_PRT])
- continue;
- if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY]))
- found++;
- }
- }
- if (!found) {
- printk("%s: no valid drive found\n", name);
- goto out_pi_unregister_driver;
- }
-
- return 0;
-
-out_pi_unregister_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pd_exit(void)
-{
- struct pd_unit *disk;
- int unit;
- unregister_blkdev(major, name);
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- struct gendisk *p = disk->gd;
- if (p) {
- disk->gd = NULL;
- del_gendisk(p);
- put_disk(p);
- blk_mq_free_tag_set(&disk->tag_set);
- pi_release(disk->pi);
- }
- }
-}
-
-MODULE_LICENSE("GPL");
-module_init(pd_init)
-module_exit(pd_exit)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
deleted file mode 100644
index eec1b9fde245..000000000000
--- a/drivers/block/paride/pf.c
+++ /dev/null
@@ -1,1057 +0,0 @@
-/*
- pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port ATAPI disk
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI disk drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pf driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-7 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI CDroms can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <lun> Some ATAPI devices support multiple LUNs.
- One example is the ATAPI PD/CD drive from
- Matshita/Panasonic. This device has a
- CD drive on LUN 0 and a PD drive on LUN 1.
- By default, the driver will search for the
- first LUN with a supported device. Set
- this parameter to force it to use a specific
- LUN. (default -1)
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (47) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pf").
-
- cluster The driver will attempt to aggregate requests
- for adjacent blocks into larger multi-block
- clusters. The maximum cluster size (in 512
- byte sectors) is set with this parameter.
- (default 64)
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use the
- following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pf.drive0
- pf.drive1
- pf.drive2
- pf.drive3
- pf.cluster
- pf.nice
-
- In addition, you can use the parameter pf.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti().
- Fix for drives that don't clear STAT_ERR
- until after next CDB delivered.
- Small change in pf_completion to round
- up transfer size.
- 1.02 GRG 1998.06.16 Eliminated an Ugh
- 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging
- 1.04 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PF_VERSION "1.04"
-#define PF_MAJOR 47
-#define PF_NAME "pf"
-#define PF_UNITS 4
-
-#include <linux/types.h>
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-
-static bool verbose = 0;
-static int major = PF_MAJOR;
-static char *name = PF_NAME;
-static int cluster = 64;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
-
-static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
-static int pf_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/cdrom.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/blkpg.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-static DEFINE_MUTEX(pf_mutex);
-static DEFINE_SPINLOCK(pf_spin_lock);
-
-module_param(verbose, bool, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(cluster, int, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-#include "pseudo.h"
-
-/* constants for faking geometry numbers */
-
-#define PF_FD_MAX 8192 /* use FD geometry under this size */
-#define PF_FD_HDS 2
-#define PF_FD_SPT 18
-#define PF_HD_HDS 64
-#define PF_HD_SPT 32
-
-#define PF_MAX_RETRIES 5
-#define PF_TMO 800 /* interrupt timeout in jiffies */
-#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */
-
-#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-
-#define ATAPI_REQ_SENSE 0x03
-#define ATAPI_LOCK 0x1e
-#define ATAPI_DOOR 0x1b
-#define ATAPI_MODE_SENSE 0x5a
-#define ATAPI_CAPACITY 0x25
-#define ATAPI_IDENTIFY 0x12
-#define ATAPI_READ_10 0x28
-#define ATAPI_WRITE_10 0x2a
-
-static int pf_open(struct block_device *bdev, fmode_t mode);
-static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd);
-static int pf_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static void pf_release(struct gendisk *disk, fmode_t mode);
-
-static void do_pf_read(void);
-static void do_pf_read_start(void);
-static void do_pf_write(void);
-static void do_pf_write_start(void);
-static void do_pf_read_drq(void);
-static void do_pf_write_done(void);
-
-#define PF_NM 0
-#define PF_RO 1
-#define PF_RW 2
-
-#define PF_NAMELEN 8
-
-struct pf_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int removable; /* removable media device ? */
- int media_status; /* media present ? WP ? */
- int drive; /* drive */
- int lun;
- int access; /* count of active opens ... */
- int present; /* device present ? */
- char name[PF_NAMELEN]; /* pf0, pf1, ... */
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pf_unit units[PF_UNITS];
-
-static int pf_identify(struct pf_unit *pf);
-static void pf_lock(struct pf_unit *pf, int func);
-static void pf_eject(struct pf_unit *pf);
-static unsigned int pf_check_events(struct gendisk *disk,
- unsigned int clearing);
-
-static char pf_scratch[512]; /* scratch block buffer */
-
-/* the variables below are used mainly in the I/O request engine, which
- processes only one request at a time.
-*/
-
-static int pf_retries = 0; /* i/o error retry count */
-static int pf_busy = 0; /* request being processed ? */
-static struct request *pf_req; /* current request */
-static int pf_block; /* address of next requested block */
-static int pf_count; /* number of blocks still to do */
-static int pf_run; /* sectors in current cluster */
-static int pf_cmd; /* current command READ/WRITE */
-static struct pf_unit *pf_current;/* unit of current request */
-static int pf_mask; /* stopper for pseudo-int */
-static char *pf_buf; /* buffer for request in progress */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct block_device_operations pf_fops = {
- .owner = THIS_MODULE,
- .open = pf_open,
- .release = pf_release,
- .ioctl = pf_ioctl,
- .compat_ioctl = pf_ioctl,
- .getgeo = pf_getgeo,
- .check_events = pf_check_events,
-};
-
-static const struct blk_mq_ops pf_mq_ops = {
- .queue_rq = pf_queue_rq,
-};
-
-static int pf_open(struct block_device *bdev, fmode_t mode)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&pf_mutex);
- pf_identify(pf);
-
- ret = -ENODEV;
- if (pf->media_status == PF_NM)
- goto out;
-
- ret = -EROFS;
- if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
- goto out;
-
- ret = 0;
- pf->access++;
- if (pf->removable)
- pf_lock(pf, 1);
-out:
- mutex_unlock(&pf_mutex);
- return ret;
-}
-
-static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
- sector_t capacity = get_capacity(pf->disk);
-
- if (capacity < PF_FD_MAX) {
- geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
- geo->heads = PF_FD_HDS;
- geo->sectors = PF_FD_SPT;
- } else {
- geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
- geo->heads = PF_HD_HDS;
- geo->sectors = PF_HD_SPT;
- }
-
- return 0;
-}
-
-static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
-
- if (cmd != CDROMEJECT)
- return -EINVAL;
-
- if (pf->access != 1)
- return -EBUSY;
- mutex_lock(&pf_mutex);
- pf_eject(pf);
- mutex_unlock(&pf_mutex);
-
- return 0;
-}
-
-static void pf_release(struct gendisk *disk, fmode_t mode)
-{
- struct pf_unit *pf = disk->private_data;
-
- mutex_lock(&pf_mutex);
- if (pf->access <= 0) {
- mutex_unlock(&pf_mutex);
- WARN_ON(1);
- return;
- }
-
- pf->access--;
-
- if (!pf->access && pf->removable)
- pf_lock(pf, 0);
-
- mutex_unlock(&pf_mutex);
-}
-
-static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
-{
- return DISK_EVENT_MEDIA_CHANGE;
-}
-
-static inline int status_reg(struct pf_unit *pf)
-{
- return pi_read_regr(pf->pi, 1, 6);
-}
-
-static inline int read_reg(struct pf_unit *pf, int reg)
-{
- return pi_read_regr(pf->pi, 0, reg);
-}
-
-static inline void write_reg(struct pf_unit *pf, int reg, int val)
-{
- pi_write_regr(pf->pi, 0, reg, val);
-}
-
-static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
-
- j = 0;
- while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
- && (j++ < PF_SPIN))
- udelay(PF_SPIN_DEL);
-
- if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
- s = read_reg(pf, 7);
- e = read_reg(pf, 1);
- p = read_reg(pf, 2);
- if (j > PF_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- pf->name, fun, msg, r, s, e, j, p);
- return (e << 8) + s;
- }
- return 0;
-}
-
-static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
-{
- pi_connect(pf->pi);
-
- write_reg(pf, 6, 0xa0+0x10*pf->drive);
-
- if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
- pi_disconnect(pf->pi);
- return -1;
- }
-
- write_reg(pf, 4, dlen % 256);
- write_reg(pf, 5, dlen / 256);
- write_reg(pf, 7, 0xa0); /* ATAPI packet command */
-
- if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
- pi_disconnect(pf->pi);
- return -1;
- }
-
- if (read_reg(pf, 2) != 1) {
- printk("%s: %s: command phase error\n", pf->name, fun);
- pi_disconnect(pf->pi);
- return -1;
- }
-
- pi_write_block(pf->pi, cmd, 12);
-
- return 0;
-}
-
-static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
-{
- int r, s, n;
-
- r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- fun, "completion");
-
- if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
- n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
- 3) & 0xfffc);
- pi_read_block(pf->pi, buf, n);
- }
-
- s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
-
- pi_disconnect(pf->pi);
-
- return (r ? r : s);
-}
-
-static void pf_req_sense(struct pf_unit *pf, int quiet)
-{
- char rs_cmd[12] =
- { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r;
-
- r = pf_command(pf, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pf_completion(pf, buf, "Request sense");
-
- if ((!r) && (!quiet))
- printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
- pf->name, buf[2] & 0xf, buf[12], buf[13]);
-}
-
-static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pf_command(pf, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pf_completion(pf, buf, fun);
- if (r)
- pf_req_sense(pf, !fun);
-
- return r;
-}
-
-static void pf_lock(struct pf_unit *pf, int func)
-{
- char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
-
- pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
-}
-
-static void pf_eject(struct pf_unit *pf)
-{
- char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
-
- pf_lock(pf, 0);
- pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
-}
-
-#define PF_RESET_TMO 30 /* in tenths of a second */
-
-static void pf_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-/* the ATAPI standard actually specifies the contents of all 7 registers
- after a reset, but the specification is ambiguous concerning the last
- two bytes, and different drives interpret the standard differently.
- */
-
-static int pf_reset(struct pf_unit *pf)
-{
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(pf->pi);
- write_reg(pf, 6, 0xa0+0x10*pf->drive);
- write_reg(pf, 7, 8);
-
- pf_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
- pf_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(pf, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", pf->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(pf, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(pf->pi);
- return flg - 1;
-}
-
-static void pf_mode_sense(struct pf_unit *pf)
-{
- char ms_cmd[12] =
- { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
- char buf[8];
-
- pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
- pf->media_status = PF_RW;
- if (buf[3] & 0x80)
- pf->media_status = PF_RO;
-}
-
-static void xs(char *buf, char *targ, int offs, int len)
-{
- int j, k, l;
-
- j = 0;
- l = 0;
- for (k = 0; k < len; k++)
- if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
- l = targ[j++] = buf[k + offs];
- if (l == 0x20)
- j--;
- targ[j] = 0;
-}
-
-static int xl(char *buf, int offs)
-{
- int v, k;
-
- v = 0;
- for (k = 0; k < 4; k++)
- v = v * 256 + (buf[k + offs] & 0xff);
- return v;
-}
-
-static void pf_get_capacity(struct pf_unit *pf)
-{
- char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- char buf[8];
- int bs;
-
- if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
- pf->media_status = PF_NM;
- return;
- }
- set_capacity(pf->disk, xl(buf, 0) + 1);
- bs = xl(buf, 4);
- if (bs != 512) {
- set_capacity(pf->disk, 0);
- if (verbose)
- printk("%s: Drive %d, LUN %d,"
- " unsupported block size %d\n",
- pf->name, pf->drive, pf->lun, bs);
- }
-}
-
-static int pf_identify(struct pf_unit *pf)
-{
- int dt, s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] =
- { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char buf[36];
-
- s = pf_atapi(pf, id_cmd, 36, buf, "identify");
- if (s)
- return -1;
-
- dt = buf[0] & 0x1f;
- if ((dt != 0) && (dt != 7)) {
- if (verbose)
- printk("%s: Drive %d, LUN %d, unsupported type %d\n",
- pf->name, pf->drive, pf->lun, dt);
- return -1;
- }
-
- xs(buf, mf, 8, 8);
- xs(buf, id, 16, 16);
-
- pf->removable = (buf[1] & 0x80);
-
- pf_mode_sense(pf);
- pf_mode_sense(pf);
- pf_mode_sense(pf);
-
- pf_get_capacity(pf);
-
- printk("%s: %s %s, %s LUN %d, type %d",
- pf->name, mf, id, ms[pf->drive], pf->lun, dt);
- if (pf->removable)
- printk(", removable");
- if (pf->media_status == PF_NM)
- printk(", no media\n");
- else {
- if (pf->media_status == PF_RO)
- printk(", RO");
- printk(", %llu blocks\n",
- (unsigned long long)get_capacity(pf->disk));
- }
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected, otherwise an error code.
- */
-static int pf_probe(struct pf_unit *pf)
-{
- if (pf->drive == -1) {
- for (pf->drive = 0; pf->drive <= 1; pf->drive++)
- if (!pf_reset(pf)) {
- if (pf->lun != -1)
- return pf_identify(pf);
- else
- for (pf->lun = 0; pf->lun < 8; pf->lun++)
- if (!pf_identify(pf))
- return 0;
- }
- } else {
- if (pf_reset(pf))
- return -1;
- if (pf->lun != -1)
- return pf_identify(pf);
- for (pf->lun = 0; pf->lun < 8; pf->lun++)
- if (!pf_identify(pf))
- return 0;
- }
- return -ENODEV;
-}
-
-/* The i/o request engine */
-
-static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
-{
- int i;
- char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
- for (i = 0; i < 4; i++) {
- io_cmd[5 - i] = b & 0xff;
- b = b >> 8;
- }
-
- io_cmd[8] = c & 0xff;
- io_cmd[7] = (c >> 8) & 0xff;
-
- i = pf_command(pf, io_cmd, c * 512, "start i/o");
-
- mdelay(1);
-
- return i;
-}
-
-static int pf_ready(void)
-{
- return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
-}
-
-static int pf_queue;
-
-static int set_next_request(void)
-{
- struct pf_unit *pf;
- int old_pos = pf_queue;
-
- do {
- pf = &units[pf_queue];
- if (++pf_queue == PF_UNITS)
- pf_queue = 0;
- if (pf->present && !list_empty(&pf->rq_list)) {
- pf_req = list_first_entry(&pf->rq_list, struct request,
- queuelist);
- list_del_init(&pf_req->queuelist);
- blk_mq_start_request(pf_req);
- break;
- }
- } while (pf_queue != old_pos);
-
- return pf_req != NULL;
-}
-
-static void pf_end_request(blk_status_t err)
-{
- if (!pf_req)
- return;
- if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
- __blk_mq_end_request(pf_req, err);
- pf_req = NULL;
- }
-}
-
-static void pf_request(void)
-{
- if (pf_busy)
- return;
-repeat:
- if (!pf_req && !set_next_request())
- return;
-
- pf_current = pf_req->q->disk->private_data;
- pf_block = blk_rq_pos(pf_req);
- pf_run = blk_rq_sectors(pf_req);
- pf_count = blk_rq_cur_sectors(pf_req);
-
- if (pf_block + pf_count > get_capacity(pf_req->q->disk)) {
- pf_end_request(BLK_STS_IOERR);
- goto repeat;
- }
-
- pf_cmd = rq_data_dir(pf_req);
- pf_buf = bio_data(pf_req->bio);
- pf_retries = 0;
-
- pf_busy = 1;
- if (pf_cmd == READ)
- pi_do_claimed(pf_current->pi, do_pf_read);
- else if (pf_cmd == WRITE)
- pi_do_claimed(pf_current->pi, do_pf_write);
- else {
- pf_busy = 0;
- pf_end_request(BLK_STS_IOERR);
- goto repeat;
- }
-}
-
-static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pf_unit *pf = hctx->queue->queuedata;
-
- spin_lock_irq(&pf_spin_lock);
- list_add_tail(&bd->rq->queuelist, &pf->rq_list);
- pf_request();
- spin_unlock_irq(&pf_spin_lock);
-
- return BLK_STS_OK;
-}
-
-static int pf_next_buf(void)
-{
- unsigned long saved_flags;
-
- pf_count--;
- pf_run--;
- pf_buf += 512;
- pf_block++;
- if (!pf_run)
- return 1;
- if (!pf_count) {
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(0);
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
- if (!pf_req)
- return 1;
- pf_count = blk_rq_cur_sectors(pf_req);
- pf_buf = bio_data(pf_req->bio);
- }
- return 0;
-}
-
-static inline void next_request(blk_status_t err)
-{
- unsigned long saved_flags;
-
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(err);
- pf_busy = 0;
- pf_request();
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
-}
-
-/* detach from the calling context - in case the spinlock is held */
-static void do_pf_read(void)
-{
- ps_set_intr(do_pf_read_start, NULL, 0, nice);
-}
-
-static void do_pf_read_start(void)
-{
- pf_busy = 1;
-
- if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_read_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pf_mask = STAT_DRQ;
- ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
-}
-
-static void do_pf_read_drq(void)
-{
- while (1) {
- if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
- "read block", "completion") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_req_sense(pf_current, 0);
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_read_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_read_block(pf_current->pi, pf_buf, 512);
- if (pf_next_buf())
- break;
- }
- pi_disconnect(pf_current->pi);
- next_request(0);
-}
-
-static void do_pf_write(void)
-{
- ps_set_intr(do_pf_write_start, NULL, 0, nice);
-}
-
-static void do_pf_write_start(void)
-{
- pf_busy = 1;
-
- if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
-
- while (1) {
- if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
- "write block", "data wait") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_write_block(pf_current->pi, pf_buf, 512);
- if (pf_next_buf())
- break;
- }
- pf_mask = 0;
- ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
-}
-
-static void do_pf_write_done(void)
-{
- if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_disconnect(pf_current->pi);
- next_request(0);
-}
-
-static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
- int mode, int unit, int protocol, int delay, int ms)
-{
- struct gendisk *disk;
- int ret;
-
- ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
- if (ret)
- return ret;
-
- disk = blk_mq_alloc_disk(&pf->tag_set, pf);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_free_tag_set;
- }
- disk->major = major;
- disk->first_minor = pf - units;
- disk->minors = 1;
- strcpy(disk->disk_name, pf->name);
- disk->fops = &pf_fops;
- disk->flags |= GENHD_FL_NO_PART;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->private_data = pf;
-
- blk_queue_max_segments(disk->queue, cluster);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
-
- INIT_LIST_HEAD(&pf->rq_list);
- pf->disk = disk;
- pf->pi = &pf->pia;
- pf->media_status = PF_NM;
- pf->drive = (*drives[disk->first_minor])[D_SLV];
- pf->lun = (*drives[disk->first_minor])[D_LUN];
- snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
-
- if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
- pf_scratch, PI_PF, verbose, pf->name)) {
- ret = -ENODEV;
- goto out_free_disk;
- }
- ret = pf_probe(pf);
- if (ret)
- goto out_pi_release;
-
- ret = add_disk(disk);
- if (ret)
- goto out_pi_release;
- pf->present = 1;
- return 0;
-
-out_pi_release:
- pi_release(pf->pi);
-out_free_disk:
- put_disk(pf->disk);
-out_free_tag_set:
- blk_mq_free_tag_set(&pf->tag_set);
- return ret;
-}
-
-static int __init pf_init(void)
-{ /* preliminary initialisation */
- struct pf_unit *pf;
- int found = 0, unit;
-
- if (disable)
- return -EINVAL;
-
- if (register_blkdev(major, name))
- return -EBUSY;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PF_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PF_UNITS; unit++) {
- if (!(*drives[unit])[D_PRT])
- pf_drive_count++;
- }
-
- pf = units;
- if (pf_drive_count == 0) {
- if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
- found++;
- } else {
- for (unit = 0; unit < PF_UNITS; unit++, pf++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- verbose))
- found++;
- }
- }
- if (!found) {
- printk("%s: No ATAPI disk detected\n", name);
- goto out_unregister_pi_driver;
- }
- pf_busy = 0;
- return 0;
-
-out_unregister_pi_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pf_exit(void)
-{
- struct pf_unit *pf;
- int unit;
-
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->present)
- continue;
- del_gendisk(pf->disk);
- put_disk(pf->disk);
- blk_mq_free_tag_set(&pf->tag_set);
- pi_release(pf->pi);
- }
-
- unregister_blkdev(major, name);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pf_init)
-module_exit(pf_exit)
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
deleted file mode 100644
index 3b5882bfb736..000000000000
--- a/drivers/block/paride/pg.c
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- pg.c (c) 1998 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- The pg driver provides a simple character device interface for
- sending ATAPI commands to a device. With the exception of the
- ATAPI reset operation, all operations are performed by a pair
- of read and write operations to the appropriate /dev/pgN device.
- A write operation delivers a command and any outbound data in
- a single buffer. Normally, the write will succeed unless the
- device is offline or malfunctioning, or there is already another
- command pending. If the write succeeds, it should be followed
- immediately by a read operation, to obtain any returned data and
- status information. A read will fail if there is no operation
- in progress.
-
- As a special case, the device can be reset with a write operation,
- and in this case, no following read is expected, or permitted.
-
- There are no ioctl() operations. Any single operation
- may transfer at most PG_MAX_DATA bytes. Note that the driver must
- copy the data through an internal buffer. In keeping with all
- current ATAPI devices, command packets are assumed to be exactly
- 12 bytes in length.
-
- To permit future changes to this interface, the headers in the
- read and write buffers contain a single character "magic" flag.
- Currently this flag must be the character "P".
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI device, but if their individual parameters are
- specified, the driver can handle up to 4 devices.
-
- To use this device, you must have the following device
- special files defined:
-
- /dev/pg0 c 97 0
- /dev/pg1 c 97 1
- /dev/pg2 c 97 2
- /dev/pg3 c 97 3
-
- (You'll need to change the 97 to something else if you use
- the 'major' parameter to install the driver on a different
- major number.)
-
- The behaviour of the pg driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI devices can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (97) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pg").
-
- verbose This parameter controls the amount of logging
- that is done by the driver. Set it to 0 for
- quiet operation, to 1 to enable progress
- messages while the driver probes for devices,
- or to 2 for full debug logging. (default 0)
-
- If this driver is built into the kernel, you can use
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pg.drive0
- pg.drive1
- pg.drive2
- pg.drive3
-
- In addition, you can use the parameter pg.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.06.16 Bug fixes
- 1.02 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PG_VERSION "1.02"
-#define PG_MAJOR 97
-#define PG_NAME "pg"
-#define PG_UNITS 4
-
-#ifndef PI_PG
-#define PI_PG 4
-#endif
-
-#include <linux/types.h>
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is 0
- by default.
-
-*/
-
-static int verbose;
-static int major = PG_MAJOR;
-static char *name = PG_NAME;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-static int pg_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mtio.h>
-#include <linux/pg.h>
-#include <linux/device.h>
-#include <linux/sched.h> /* current, TASK_* */
-#include <linux/mutex.h>
-#include <linux/jiffies.h>
-
-#include <linux/uaccess.h>
-
-module_param(verbose, int, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PG_SPIN_DEL 50 /* spin delay in micro-seconds */
-#define PG_SPIN 200
-#define PG_TMO HZ
-#define PG_RESET_TMO 10*HZ
-
-#define STAT_ERR 0x01
-#define STAT_INDEX 0x02
-#define STAT_ECC 0x04
-#define STAT_DRQ 0x08
-#define STAT_SEEK 0x10
-#define STAT_WRERR 0x20
-#define STAT_READY 0x40
-#define STAT_BUSY 0x80
-
-#define ATAPI_IDENTIFY 0x12
-
-static DEFINE_MUTEX(pg_mutex);
-static int pg_open(struct inode *inode, struct file *file);
-static int pg_release(struct inode *inode, struct file *file);
-static ssize_t pg_read(struct file *filp, char __user *buf,
- size_t count, loff_t * ppos);
-static ssize_t pg_write(struct file *filp, const char __user *buf,
- size_t count, loff_t * ppos);
-static int pg_detect(void);
-
-#define PG_NAMELEN 8
-
-struct pg {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int busy; /* write done, read expected */
- int start; /* jiffies at command start */
- int dlen; /* transfer size requested */
- unsigned long timeout; /* timeout requested */
- int status; /* last sense key */
- int drive; /* drive */
- unsigned long access; /* count of active opens ... */
- int present; /* device present ? */
- char *bufptr;
- char name[PG_NAMELEN]; /* pg0, pg1, ... */
-};
-
-static struct pg devices[PG_UNITS];
-
-static int pg_identify(struct pg *dev, int log);
-
-static char pg_scratch[512]; /* scratch block buffer */
-
-static struct class *pg_class;
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct file_operations pg_fops = {
- .owner = THIS_MODULE,
- .read = pg_read,
- .write = pg_write,
- .open = pg_open,
- .release = pg_release,
- .llseek = noop_llseek,
-};
-
-static void pg_init_units(void)
-{
- int unit;
-
- pg_drive_count = 0;
- for (unit = 0; unit < PG_UNITS; unit++) {
- int *parm = *drives[unit];
- struct pg *dev = &devices[unit];
- dev->pi = &dev->pia;
- clear_bit(0, &dev->access);
- dev->busy = 0;
- dev->present = 0;
- dev->bufptr = NULL;
- dev->drive = parm[D_SLV];
- snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit);
- if (parm[D_PRT])
- pg_drive_count++;
- }
-}
-
-static inline int status_reg(struct pg *dev)
-{
- return pi_read_regr(dev->pi, 1, 6);
-}
-
-static inline int read_reg(struct pg *dev, int reg)
-{
- return pi_read_regr(dev->pi, 0, reg);
-}
-
-static inline void write_reg(struct pg *dev, int reg, int val)
-{
- pi_write_regr(dev->pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pg *dev)
-{
- return 0xa0+0x10*dev->drive;
-}
-
-static void pg_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
-{
- int j, r, e, s, p, to;
-
- dev->status = 0;
-
- j = 0;
- while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop))))
- && time_before(jiffies, tmo)) {
- if (j++ < PG_SPIN)
- udelay(PG_SPIN_DEL);
- else
- pg_sleep(1);
- }
-
- to = time_after_eq(jiffies, tmo);
-
- if ((r & (STAT_ERR & stop)) || to) {
- s = read_reg(dev, 7);
- e = read_reg(dev, 1);
- p = read_reg(dev, 2);
- if (verbose > 1)
- printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n",
- dev->name, msg, s, e, p, to ? " timeout" : "");
- if (to)
- e |= 0x100;
- dev->status = (e >> 4) & 0xff;
- return -1;
- }
- return 0;
-}
-
-static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo)
-{
- int k;
-
- pi_connect(dev->pi);
-
- write_reg(dev, 6, DRIVE(dev));
-
- if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command"))
- goto fail;
-
- write_reg(dev, 4, dlen % 256);
- write_reg(dev, 5, dlen / 256);
- write_reg(dev, 7, 0xa0); /* ATAPI packet command */
-
- if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ"))
- goto fail;
-
- if (read_reg(dev, 2) != 1) {
- printk("%s: command phase error\n", dev->name);
- goto fail;
- }
-
- pi_write_block(dev->pi, cmd, 12);
-
- if (verbose > 1) {
- printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen);
- for (k = 0; k < 12; k++)
- printk("%02x ", cmd[k] & 0xff);
- printk("\n");
- }
- return 0;
-fail:
- pi_disconnect(dev->pi);
- return -1;
-}
-
-static int pg_completion(struct pg *dev, char *buf, unsigned long tmo)
-{
- int r, d, n, p;
-
- r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- tmo, "completion");
-
- dev->dlen = 0;
-
- while (read_reg(dev, 7) & STAT_DRQ) {
- d = (read_reg(dev, 4) + 256 * read_reg(dev, 5));
- n = ((d + 3) & 0xfffc);
- p = read_reg(dev, 2) & 3;
- if (p == 0)
- pi_write_block(dev->pi, buf, n);
- if (p == 2)
- pi_read_block(dev->pi, buf, n);
- if (verbose > 1)
- printk("%s: %s %d bytes\n", dev->name,
- p ? "Read" : "Write", n);
- dev->dlen += (1 - p) * d;
- buf += d;
- r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- tmo, "completion");
- }
-
- pi_disconnect(dev->pi);
-
- return r;
-}
-
-static int pg_reset(struct pg *dev)
-{
- int i, k, err;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
- int got[5];
-
- pi_connect(dev->pi);
- write_reg(dev, 6, DRIVE(dev));
- write_reg(dev, 7, 8);
-
- pg_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY))
- pg_sleep(1);
-
- for (i = 0; i < 5; i++)
- got[i] = read_reg(dev, i + 1);
-
- err = memcmp(expect, got, sizeof(got)) ? -1 : 0;
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", dev->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", got[i]);
- if (err)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(dev->pi);
- return err;
-}
-
-static void xs(char *buf, char *targ, int len)
-{
- char l = '\0';
- int k;
-
- for (k = 0; k < len; k++) {
- char c = *buf++;
- if (c != ' ' && c != l)
- l = *targ++ = c;
- }
- if (l == ' ')
- targ--;
- *targ = '\0';
-}
-
-static int pg_identify(struct pg *dev, int log)
-{
- int s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char buf[36];
-
- s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO);
- if (s)
- return -1;
- s = pg_completion(dev, buf, jiffies + PG_TMO);
- if (s)
- return -1;
-
- if (log) {
- xs(buf + 8, mf, 8);
- xs(buf + 16, id, 16);
- printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]);
- }
-
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
- */
-static int pg_probe(struct pg *dev)
-{
- if (dev->drive == -1) {
- for (dev->drive = 0; dev->drive <= 1; dev->drive++)
- if (!pg_reset(dev))
- return pg_identify(dev, 1);
- } else {
- if (!pg_reset(dev))
- return pg_identify(dev, 1);
- }
- return -1;
-}
-
-static int pg_detect(void)
-{
- struct pg *dev = &devices[0];
- int k, unit;
-
- printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- k = 0;
- if (pg_drive_count == 0) {
- if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
- PI_PG, verbose, dev->name)) {
- if (!pg_probe(dev)) {
- dev->present = 1;
- k++;
- } else
- pi_release(dev->pi);
- }
-
- } else
- for (unit = 0; unit < PG_UNITS; unit++, dev++) {
- int *parm = *drives[unit];
- if (!parm[D_PRT])
- continue;
- if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY],
- pg_scratch, PI_PG, verbose, dev->name)) {
- if (!pg_probe(dev)) {
- dev->present = 1;
- k++;
- } else
- pi_release(dev->pi);
- }
- }
-
- if (k)
- return 0;
-
- pi_unregister_driver(par_drv);
- printk("%s: No ATAPI device detected\n", name);
- return -1;
-}
-
-static int pg_open(struct inode *inode, struct file *file)
-{
- int unit = iminor(inode) & 0x7f;
- struct pg *dev = &devices[unit];
- int ret = 0;
-
- mutex_lock(&pg_mutex);
- if ((unit >= PG_UNITS) || (!dev->present)) {
- ret = -ENODEV;
- goto out;
- }
-
- if (test_and_set_bit(0, &dev->access)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (dev->busy) {
- pg_reset(dev);
- dev->busy = 0;
- }
-
- pg_identify(dev, (verbose > 1));
-
- dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL);
- if (dev->bufptr == NULL) {
- clear_bit(0, &dev->access);
- printk("%s: buffer allocation failed\n", dev->name);
- ret = -ENOMEM;
- goto out;
- }
-
- file->private_data = dev;
-
-out:
- mutex_unlock(&pg_mutex);
- return ret;
-}
-
-static int pg_release(struct inode *inode, struct file *file)
-{
- struct pg *dev = file->private_data;
-
- kfree(dev->bufptr);
- dev->bufptr = NULL;
- clear_bit(0, &dev->access);
-
- return 0;
-}
-
-static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct pg *dev = filp->private_data;
- struct pg_write_hdr hdr;
- int hs = sizeof (hdr);
-
- if (dev->busy)
- return -EBUSY;
- if (count < hs)
- return -EINVAL;
-
- if (copy_from_user(&hdr, buf, hs))
- return -EFAULT;
-
- if (hdr.magic != PG_MAGIC)
- return -EINVAL;
- if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
- return -EINVAL;
- if ((count - hs) > PG_MAX_DATA)
- return -EINVAL;
-
- if (hdr.func == PG_RESET) {
- if (count != hs)
- return -EINVAL;
- if (pg_reset(dev))
- return -EIO;
- return count;
- }
-
- if (hdr.func != PG_COMMAND)
- return -EINVAL;
-
- dev->start = jiffies;
- dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies;
-
- if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) {
- if (dev->status & 0x10)
- return -ETIME;
- return -EIO;
- }
-
- dev->busy = 1;
-
- if (copy_from_user(dev->bufptr, buf + hs, count - hs))
- return -EFAULT;
- return count;
-}
-
-static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
-{
- struct pg *dev = filp->private_data;
- struct pg_read_hdr hdr;
- int hs = sizeof (hdr);
- int copy;
-
- if (!dev->busy)
- return -EINVAL;
- if (count < hs)
- return -EINVAL;
-
- dev->busy = 0;
-
- if (pg_completion(dev, dev->bufptr, dev->timeout))
- if (dev->status & 0x10)
- return -ETIME;
-
- memset(&hdr, 0, sizeof(hdr));
- hdr.magic = PG_MAGIC;
- hdr.dlen = dev->dlen;
- copy = 0;
-
- if (hdr.dlen < 0) {
- hdr.dlen = -1 * hdr.dlen;
- copy = hdr.dlen;
- if (copy > (count - hs))
- copy = count - hs;
- }
-
- hdr.duration = (jiffies - dev->start + HZ / 2) / HZ;
- hdr.scsi = dev->status & 0x0f;
-
- if (copy_to_user(buf, &hdr, hs))
- return -EFAULT;
- if (copy > 0)
- if (copy_to_user(buf + hs, dev->bufptr, copy))
- return -EFAULT;
- return copy + hs;
-}
-
-static int __init pg_init(void)
-{
- int unit;
- int err;
-
- if (disable){
- err = -EINVAL;
- goto out;
- }
-
- pg_init_units();
-
- if (pg_detect()) {
- err = -ENODEV;
- goto out;
- }
-
- err = register_chrdev(major, name, &pg_fops);
- if (err < 0) {
- printk("pg_init: unable to get major number %d\n", major);
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- pi_release(dev->pi);
- }
- goto out;
- }
- major = err; /* In case the user specified `major=0' (dynamic) */
- pg_class = class_create(THIS_MODULE, "pg");
- if (IS_ERR(pg_class)) {
- err = PTR_ERR(pg_class);
- goto out_chrdev;
- }
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- device_create(pg_class, NULL, MKDEV(major, unit), NULL,
- "pg%u", unit);
- }
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(major, "pg");
-out:
- return err;
-}
-
-static void __exit pg_exit(void)
-{
- int unit;
-
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- device_destroy(pg_class, MKDEV(major, unit));
- }
- class_destroy(pg_class);
- unregister_chrdev(major, name);
-
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- pi_release(dev->pi);
- }
-}
-
-MODULE_LICENSE("GPL");
-module_init(pg_init)
-module_exit(pg_exit)
diff --git a/drivers/block/paride/ppc6lnx.c b/drivers/block/paride/ppc6lnx.c
deleted file mode 100644
index 5e5521d3b1dd..000000000000
--- a/drivers/block/paride/ppc6lnx.c
+++ /dev/null
@@ -1,726 +0,0 @@
-/*
- ppc6lnx.c (c) 2001 Micro Solutions Inc.
- Released under the terms of the GNU General Public license
-
- ppc6lnx.c is a par of the protocol driver for the Micro Solutions
- "BACKPACK" parallel port IDE adapter
- (Works on Series 6 drives)
-
-*/
-
-//***************************************************************************
-
-// PPC 6 Code in C sanitized for LINUX
-// Original x86 ASM by Ron, Converted to C by Clive
-
-//***************************************************************************
-
-
-#define port_stb 1
-#define port_afd 2
-#define cmd_stb port_afd
-#define port_init 4
-#define data_stb port_init
-#define port_sel 8
-#define port_int 16
-#define port_dir 0x20
-
-#define ECR_EPP 0x80
-#define ECR_BI 0x20
-
-//***************************************************************************
-
-// 60772 Commands
-
-#define ACCESS_REG 0x00
-#define ACCESS_PORT 0x40
-
-#define ACCESS_READ 0x00
-#define ACCESS_WRITE 0x20
-
-// 60772 Command Prefix
-
-#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation
-#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
- #define PREFIX_IO16 0x01 // perform 16-bit wide I/O
- #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
- #define PREFIX_BLK 0x08 // enable block transfer mode
-
-// 60772 Registers
-
-#define REG_STATUS 0x00 // status register
- #define STATUS_IRQA 0x01 // Peripheral IRQA line
- #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
-#define REG_VERSION 0x01 // PPC version register (read)
-#define REG_HWCFG 0x02 // Hardware Config register
-#define REG_RAMSIZE 0x03 // Size of RAM Buffer
- #define RAMSIZE_128K 0x02
-#define REG_EEPROM 0x06 // EEPROM control register
- #define EEPROM_SK 0x01 // eeprom SK bit
- #define EEPROM_DI 0x02 // eeprom DI bit
- #define EEPROM_CS 0x04 // eeprom CS bit
- #define EEPROM_EN 0x08 // eeprom output enable
-#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
-
-//***************************************************************************
-
-typedef struct ppc_storage {
- u16 lpt_addr; // LPT base address
- u8 ppc_id;
- u8 mode; // operating mode
- // 0 = PPC Uni SW
- // 1 = PPC Uni FW
- // 2 = PPC Bi SW
- // 3 = PPC Bi FW
- // 4 = EPP Byte
- // 5 = EPP Word
- // 6 = EPP Dword
- u8 ppc_flags;
- u8 org_data; // original LPT data port contents
- u8 org_ctrl; // original LPT control port contents
- u8 cur_ctrl; // current control port contents
-} Interface;
-
-//***************************************************************************
-
-// ppc_flags
-
-#define fifo_wait 0x10
-
-//***************************************************************************
-
-// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
-
-#define PPCMODE_UNI_SW 0
-#define PPCMODE_UNI_FW 1
-#define PPCMODE_BI_SW 2
-#define PPCMODE_BI_FW 3
-#define PPCMODE_EPP_BYTE 4
-#define PPCMODE_EPP_WORD 5
-#define PPCMODE_EPP_DWORD 6
-
-//***************************************************************************
-
-static int ppc6_select(Interface *ppc);
-static void ppc6_deselect(Interface *ppc);
-static void ppc6_send_cmd(Interface *ppc, u8 cmd);
-static void ppc6_wr_data_byte(Interface *ppc, u8 data);
-static u8 ppc6_rd_data_byte(Interface *ppc);
-static u8 ppc6_rd_port(Interface *ppc, u8 port);
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_wait_for_fifo(Interface *ppc);
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_extout(Interface *ppc, u8 regdata);
-static int ppc6_open(Interface *ppc);
-static void ppc6_close(Interface *ppc);
-
-//***************************************************************************
-
-static int ppc6_select(Interface *ppc)
-{
- u8 i, j, k;
-
- i = inb(ppc->lpt_addr + 1);
-
- if (i & 1)
- outb(i, ppc->lpt_addr + 1);
-
- ppc->org_data = inb(ppc->lpt_addr);
-
- ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
-
- ppc->cur_ctrl = ppc->org_ctrl;
-
- ppc->cur_ctrl |= port_sel;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- if (ppc->org_data == 'b')
- outb('x', ppc->lpt_addr);
-
- outb('b', ppc->lpt_addr);
- outb('p', ppc->lpt_addr);
- outb(ppc->ppc_id, ppc->lpt_addr);
- outb(~ppc->ppc_id,ppc->lpt_addr);
-
- ppc->cur_ctrl &= ~port_sel;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- i = ppc->mode & 0x0C;
-
- if (i == 0)
- i = (ppc->mode & 2) | 1;
-
- outb(i, ppc->lpt_addr);
-
- ppc->cur_ctrl |= port_sel;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- // DELAY
-
- ppc->cur_ctrl |= port_afd;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
-
- k = inb(ppc->lpt_addr + 1) & 0xB8;
-
- if (j == k)
- {
- ppc->cur_ctrl &= ~port_afd;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
-
- if (j == k)
- {
- if (i & 4) // EPP
- ppc->cur_ctrl &= ~(port_sel | port_init);
- else // PPC/ECP
- ppc->cur_ctrl &= ~port_sel;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- return(1);
- }
- }
-
- outb(ppc->org_ctrl, ppc->lpt_addr + 2);
-
- outb(ppc->org_data, ppc->lpt_addr);
-
- return(0); // FAIL
-}
-
-//***************************************************************************
-
-static void ppc6_deselect(Interface *ppc)
-{
- if (ppc->mode & 4) // EPP
- ppc->cur_ctrl |= port_init;
- else // PPC/ECP
- ppc->cur_ctrl |= port_sel;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- outb(ppc->org_data, ppc->lpt_addr);
-
- outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
-
- outb(ppc->org_ctrl, ppc->lpt_addr + 2);
-}
-
-//***************************************************************************
-
-static void ppc6_send_cmd(Interface *ppc, u8 cmd)
-{
- switch(ppc->mode)
- {
- case PPCMODE_UNI_SW :
- case PPCMODE_UNI_FW :
- case PPCMODE_BI_SW :
- case PPCMODE_BI_FW :
- {
- outb(cmd, ppc->lpt_addr);
-
- ppc->cur_ctrl ^= cmd_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_BYTE :
- case PPCMODE_EPP_WORD :
- case PPCMODE_EPP_DWORD :
- {
- outb(cmd, ppc->lpt_addr + 3);
-
- break;
- }
- }
-}
-
-//***************************************************************************
-
-static void ppc6_wr_data_byte(Interface *ppc, u8 data)
-{
- switch(ppc->mode)
- {
- case PPCMODE_UNI_SW :
- case PPCMODE_UNI_FW :
- case PPCMODE_BI_SW :
- case PPCMODE_BI_FW :
- {
- outb(data, ppc->lpt_addr);
-
- ppc->cur_ctrl ^= data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_BYTE :
- case PPCMODE_EPP_WORD :
- case PPCMODE_EPP_DWORD :
- {
- outb(data, ppc->lpt_addr + 4);
-
- break;
- }
- }
-}
-
-//***************************************************************************
-
-static u8 ppc6_rd_data_byte(Interface *ppc)
-{
- u8 data = 0;
-
- switch(ppc->mode)
- {
- case PPCMODE_UNI_SW :
- case PPCMODE_UNI_FW :
- {
- ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- // DELAY
-
- data = inb(ppc->lpt_addr + 1);
-
- data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
-
- ppc->cur_ctrl |= port_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- // DELAY
-
- data |= inb(ppc->lpt_addr + 1) & 0xB8;
-
- break;
- }
-
- case PPCMODE_BI_SW :
- case PPCMODE_BI_FW :
- {
- ppc->cur_ctrl |= port_dir;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- data = inb(ppc->lpt_addr);
-
- ppc->cur_ctrl &= ~port_stb;
-
- outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
-
- ppc->cur_ctrl &= ~port_dir;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_BYTE :
- case PPCMODE_EPP_WORD :
- case PPCMODE_EPP_DWORD :
- {
- outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
-
- data = inb(ppc->lpt_addr + 4);
-
- outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
-
- break;
- }
- }
-
- return(data);
-}
-
-//***************************************************************************
-
-static u8 ppc6_rd_port(Interface *ppc, u8 port)
-{
- ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
-
- return(ppc6_rd_data_byte(ppc));
-}
-
-//***************************************************************************
-
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
-{
- ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
-
- ppc6_wr_data_byte(ppc, data);
-}
-
-//***************************************************************************
-
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
-{
- switch(ppc->mode)
- {
- case PPCMODE_UNI_SW :
- case PPCMODE_UNI_FW :
- {
- while(count)
- {
- u8 d;
-
- ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- // DELAY
-
- d = inb(ppc->lpt_addr + 1);
-
- d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
-
- ppc->cur_ctrl |= port_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- // DELAY
-
- d |= inb(ppc->lpt_addr + 1) & 0xB8;
-
- *data++ = d;
- count--;
- }
-
- break;
- }
-
- case PPCMODE_BI_SW :
- case PPCMODE_BI_FW :
- {
- ppc->cur_ctrl |= port_dir;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- ppc->cur_ctrl |= port_stb;
-
- while(count)
- {
- ppc->cur_ctrl ^= data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- *data++ = inb(ppc->lpt_addr);
- count--;
- }
-
- ppc->cur_ctrl &= ~port_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- ppc->cur_ctrl &= ~port_dir;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_BYTE :
- {
- outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
-
- // DELAY
-
- while(count)
- {
- *data++ = inb(ppc->lpt_addr + 4);
- count--;
- }
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_WORD :
- {
- outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
-
- // DELAY
-
- while(count > 1)
- {
- *((u16 *)data) = inw(ppc->lpt_addr + 4);
- data += 2;
- count -= 2;
- }
-
- while(count)
- {
- *data++ = inb(ppc->lpt_addr + 4);
- count--;
- }
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
-
- case PPCMODE_EPP_DWORD :
- {
- outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
-
- // DELAY
-
- while(count > 3)
- {
- *((u32 *)data) = inl(ppc->lpt_addr + 4);
- data += 4;
- count -= 4;
- }
-
- while(count)
- {
- *data++ = inb(ppc->lpt_addr + 4);
- count--;
- }
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- break;
- }
- }
-
-}
-
-//***************************************************************************
-
-static void ppc6_wait_for_fifo(Interface *ppc)
-{
- int i;
-
- if (ppc->ppc_flags & fifo_wait)
- {
- for(i=0; i<20; i++)
- inb(ppc->lpt_addr + 1);
- }
-}
-
-//***************************************************************************
-
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
-{
- switch(ppc->mode)
- {
- case PPCMODE_UNI_SW :
- case PPCMODE_BI_SW :
- {
- while(count--)
- {
- outb(*data++, ppc->lpt_addr);
-
- ppc->cur_ctrl ^= data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
- }
-
- break;
- }
-
- case PPCMODE_UNI_FW :
- case PPCMODE_BI_FW :
- {
- u8 this, last;
-
- ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
-
- ppc->cur_ctrl |= port_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- last = *data;
-
- outb(last, ppc->lpt_addr);
-
- while(count)
- {
- this = *data++;
- count--;
-
- if (this == last)
- {
- ppc->cur_ctrl ^= data_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
- }
- else
- {
- outb(this, ppc->lpt_addr);
-
- last = this;
- }
- }
-
- ppc->cur_ctrl &= ~port_stb;
-
- outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
-
- ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
-
- break;
- }
-
- case PPCMODE_EPP_BYTE :
- {
- while(count)
- {
- outb(*data++,ppc->lpt_addr + 4);
- count--;
- }
-
- ppc6_wait_for_fifo(ppc);
-
- break;
- }
-
- case PPCMODE_EPP_WORD :
- {
- while(count > 1)
- {
- outw(*((u16 *)data),ppc->lpt_addr + 4);
- data += 2;
- count -= 2;
- }
-
- while(count)
- {
- outb(*data++,ppc->lpt_addr + 4);
- count--;
- }
-
- ppc6_wait_for_fifo(ppc);
-
- break;
- }
-
- case PPCMODE_EPP_DWORD :
- {
- while(count > 3)
- {
- outl(*((u32 *)data),ppc->lpt_addr + 4);
- data += 4;
- count -= 4;
- }
-
- while(count)
- {
- outb(*data++,ppc->lpt_addr + 4);
- count--;
- }
-
- ppc6_wait_for_fifo(ppc);
-
- break;
- }
- }
-}
-
-//***************************************************************************
-
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
-{
- length = length << 1;
-
- ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
- ppc6_wr_data_byte(ppc,(u8)length);
- ppc6_wr_data_byte(ppc,(u8)(length >> 8));
- ppc6_wr_data_byte(ppc,0);
-
- ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
-
- ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
-
- ppc6_rd_data_blk(ppc, data, length);
-
- ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
-}
-
-//***************************************************************************
-
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
-{
- length = length << 1;
-
- ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
- ppc6_wr_data_byte(ppc,(u8)length);
- ppc6_wr_data_byte(ppc,(u8)(length >> 8));
- ppc6_wr_data_byte(ppc,0);
-
- ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
-
- ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
-
- ppc6_wr_data_blk(ppc, data, length);
-
- ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
-}
-
-//***************************************************************************
-
-static void ppc6_wr_extout(Interface *ppc, u8 regdata)
-{
- ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
-
- ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
-}
-
-//***************************************************************************
-
-static int ppc6_open(Interface *ppc)
-{
- int ret;
-
- ret = ppc6_select(ppc);
-
- if (ret == 0)
- return(ret);
-
- ppc->ppc_flags &= ~fifo_wait;
-
- ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
- ppc6_wr_data_byte(ppc, RAMSIZE_128K);
-
- ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
-
- if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
- ppc->ppc_flags |= fifo_wait;
-
- return(ret);
-}
-
-//***************************************************************************
-
-static void ppc6_close(Interface *ppc)
-{
- ppc6_deselect(ppc);
-}
-
-//***************************************************************************
-
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
deleted file mode 100644
index bc3703294143..000000000000
--- a/drivers/block/paride/pseudo.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- pseudo.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the "pseudo-interrupt" logic for parallel port drivers.
-
- This module is #included into each driver. It makes one
- function available:
-
- ps_set_intr( void (*continuation)(void),
- int (*ready)(void),
- int timeout,
- int nice )
-
- Which will arrange for ready() to be evaluated frequently and
- when either it returns true, or timeout jiffies have passed,
- continuation() will be invoked.
-
- If nice is 1, the test will done approximately once a
- jiffy. If nice is 0, the test will also be done whenever
- the scheduler runs (by adding it to a task queue). If
- nice is greater than 1, the test will be done once every
- (nice-1) jiffies.
-
-*/
-
-/* Changes:
-
- 1.01 1998.05.03 Switched from cli()/sti() to spinlocks
- 1.02 1998.12.14 Added support for nice > 1
-*/
-
-#define PS_VERSION "1.02"
-
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-
-static void ps_tq_int(struct work_struct *work);
-
-static void (* ps_continuation)(void);
-static int (* ps_ready)(void);
-static unsigned long ps_timeout;
-static int ps_tq_active = 0;
-static int ps_nice = 0;
-
-static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
-
-static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
-
-static void ps_set_intr(void (*continuation)(void),
- int (*ready)(void),
- int timeout, int nice)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ps_spinlock,flags);
-
- ps_continuation = continuation;
- ps_ready = ready;
- ps_timeout = jiffies + timeout;
- ps_nice = nice;
-
- if (!ps_tq_active) {
- ps_tq_active = 1;
- if (!ps_nice)
- schedule_delayed_work(&ps_tq, 0);
- else
- schedule_delayed_work(&ps_tq, ps_nice-1);
- }
- spin_unlock_irqrestore(&ps_spinlock,flags);
-}
-
-static void ps_tq_int(struct work_struct *work)
-{
- void (*con)(void);
- unsigned long flags;
-
- spin_lock_irqsave(&ps_spinlock,flags);
-
- con = ps_continuation;
- ps_tq_active = 0;
-
- if (!con) {
- spin_unlock_irqrestore(&ps_spinlock,flags);
- return;
- }
- if (!ps_ready || ps_ready() || time_after_eq(jiffies, ps_timeout)) {
- ps_continuation = NULL;
- spin_unlock_irqrestore(&ps_spinlock,flags);
- con();
- return;
- }
- ps_tq_active = 1;
- if (!ps_nice)
- schedule_delayed_work(&ps_tq, 0);
- else
- schedule_delayed_work(&ps_tq, ps_nice-1);
- spin_unlock_irqrestore(&ps_spinlock,flags);
-}
-
-/* end of pseudo.h */
-
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
deleted file mode 100644
index e815312a00ad..000000000000
--- a/drivers/block/paride/pt.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- pt.c (c) 1998 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port ATAPI tape
- drives based on chips supported by the paride module.
-
- The driver implements both rewinding and non-rewinding
- devices, filemarks, and the rewind ioctl. It allocates
- a small internal "bounce buffer" for each open device, but
- otherwise expects buffering and blocking to be done at the
- user level. As with most block-structured tapes, short
- writes are padded to full tape blocks, so reading back a file
- may return more data than was actually written.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI tape drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The rewinding devices are named /dev/pt0, /dev/pt1, ...
- while the non-rewinding devices are /dev/npt0, /dev/npt1, etc.
-
- The behaviour of the pt driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI devices can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (96) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pt").
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- If this driver is built into the kernel, you can use
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pt.drive0
- pt.drive1
- pt.drive2
- pt.drive3
-
- In addition, you can use the parameter pt.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.06 Round up transfer size, fix ready_wait,
- loosed interpretation of ATAPI standard
- for clearing error status.
- Eliminate sti();
- 1.02 GRG 1998.06.16 Eliminate an Ugh.
- 1.03 GRG 1998.08.15 Adjusted PT_TMO, use HZ in loop timing,
- extra debugging
- 1.04 GRG 1998.09.24 Repair minor coding error, added jumbo support
-
-*/
-
-#define PT_VERSION "1.04"
-#define PT_MAJOR 96
-#define PT_NAME "pt"
-#define PT_UNITS 4
-
-#include <linux/types.h>
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is on
- by default.
-
-*/
-
-static int verbose = 0;
-static int major = PT_MAJOR;
-static char *name = PT_NAME;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-
-#define D_PRT 0
-#define D_PRO 1
-#define D_UNI 2
-#define D_MOD 3
-#define D_SLV 4
-#define D_DLY 5
-
-#define DU (*drives[unit])
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mtio.h>
-#include <linux/device.h>
-#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
-#include <linux/mutex.h>
-
-#include <linux/uaccess.h>
-
-module_param(verbose, int, 0);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PT_MAX_RETRIES 5
-#define PT_TMO 3000 /* interrupt timeout in jiffies */
-#define PT_SPIN_DEL 50 /* spin delay in micro-seconds */
-#define PT_RESET_TMO 30 /* 30 seconds */
-#define PT_READY_TMO 60 /* 60 seconds */
-#define PT_REWIND_TMO 1200 /* 20 minutes */
-
-#define PT_SPIN ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-#define STAT_SENSE 0x1f000
-
-#define ATAPI_TEST_READY 0x00
-#define ATAPI_REWIND 0x01
-#define ATAPI_REQ_SENSE 0x03
-#define ATAPI_READ_6 0x08
-#define ATAPI_WRITE_6 0x0a
-#define ATAPI_WFM 0x10
-#define ATAPI_IDENTIFY 0x12
-#define ATAPI_MODE_SENSE 0x1a
-#define ATAPI_LOG_SENSE 0x4d
-
-static DEFINE_MUTEX(pt_mutex);
-static int pt_open(struct inode *inode, struct file *file);
-static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static int pt_release(struct inode *inode, struct file *file);
-static ssize_t pt_read(struct file *filp, char __user *buf,
- size_t count, loff_t * ppos);
-static ssize_t pt_write(struct file *filp, const char __user *buf,
- size_t count, loff_t * ppos);
-static int pt_detect(void);
-
-/* bits in tape->flags */
-
-#define PT_MEDIA 1
-#define PT_WRITE_OK 2
-#define PT_REWIND 4
-#define PT_WRITING 8
-#define PT_READING 16
-#define PT_EOF 32
-
-#define PT_NAMELEN 8
-#define PT_BUFSIZE 16384
-
-struct pt_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int flags; /* various state flags */
- int last_sense; /* result of last request sense */
- int drive; /* drive */
- atomic_t available; /* 1 if access is available 0 otherwise */
- int bs; /* block size */
- int capacity; /* Size of tape in KB */
- int present; /* device present ? */
- char *bufptr;
- char name[PT_NAMELEN]; /* pf0, pf1, ... */
-};
-
-static int pt_identify(struct pt_unit *tape);
-
-static struct pt_unit pt[PT_UNITS];
-
-static char pt_scratch[512]; /* scratch block buffer */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct file_operations pt_fops = {
- .owner = THIS_MODULE,
- .read = pt_read,
- .write = pt_write,
- .unlocked_ioctl = pt_ioctl,
- .open = pt_open,
- .release = pt_release,
- .llseek = noop_llseek,
-};
-
-/* sysfs class support */
-static struct class *pt_class;
-
-static inline int status_reg(struct pi_adapter *pi)
-{
- return pi_read_regr(pi, 1, 6);
-}
-
-static inline int read_reg(struct pi_adapter *pi, int reg)
-{
- return pi_read_regr(pi, 0, reg);
-}
-
-static inline void write_reg(struct pi_adapter *pi, int reg, int val)
-{
- pi_write_regr(pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pt_unit *tape)
-{
- return 0xa0+0x10*tape->drive;
-}
-
-static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
- struct pi_adapter *pi = tape->pi;
-
- j = 0;
- while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop))))
- && (j++ < PT_SPIN))
- udelay(PT_SPIN_DEL);
-
- if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) {
- s = read_reg(pi, 7);
- e = read_reg(pi, 1);
- p = read_reg(pi, 2);
- if (j > PT_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- tape->name, fun, msg, r, s, e, j, p);
- return (e << 8) + s;
- }
- return 0;
-}
-
-static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun)
-{
- struct pi_adapter *pi = tape->pi;
- pi_connect(pi);
-
- write_reg(pi, 6, DRIVE(tape));
-
- if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
- pi_disconnect(pi);
- return -1;
- }
-
- write_reg(pi, 4, dlen % 256);
- write_reg(pi, 5, dlen / 256);
- write_reg(pi, 7, 0xa0); /* ATAPI packet command */
-
- if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
- pi_disconnect(pi);
- return -1;
- }
-
- if (read_reg(pi, 2) != 1) {
- printk("%s: %s: command phase error\n", tape->name, fun);
- pi_disconnect(pi);
- return -1;
- }
-
- pi_write_block(pi, cmd, 12);
-
- return 0;
-}
-
-static int pt_completion(struct pt_unit *tape, char *buf, char *fun)
-{
- struct pi_adapter *pi = tape->pi;
- int r, s, n, p;
-
- r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- fun, "completion");
-
- if (read_reg(pi, 7) & STAT_DRQ) {
- n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) +
- 3) & 0xfffc);
- p = read_reg(pi, 2) & 3;
- if (p == 0)
- pi_write_block(pi, buf, n);
- if (p == 2)
- pi_read_block(pi, buf, n);
- }
-
- s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
-
- pi_disconnect(pi);
-
- return (r ? r : s);
-}
-
-static void pt_req_sense(struct pt_unit *tape, int quiet)
-{
- char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r;
-
- r = pt_command(tape, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pt_completion(tape, buf, "Request sense");
-
- tape->last_sense = -1;
- if (!r) {
- if (!quiet)
- printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
- tape->name, buf[2] & 0xf, buf[12], buf[13]);
- tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8)
- | ((buf[13] & 0xff) << 16);
- }
-}
-
-static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pt_command(tape, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pt_completion(tape, buf, fun);
- if (r)
- pt_req_sense(tape, !fun);
-
- return r;
-}
-
-static void pt_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
-{
- struct pi_adapter *pi = tape->pi;
- int k, e, s;
-
- k = 0;
- e = 0;
- s = 0;
- while (k < tmo) {
- pt_sleep(pause);
- k++;
- pi_connect(pi);
- write_reg(pi, 6, DRIVE(tape));
- s = read_reg(pi, 7);
- e = read_reg(pi, 1);
- pi_disconnect(pi);
- if (s & (STAT_ERR | STAT_SEEK))
- break;
- }
- if ((k >= tmo) || (s & STAT_ERR)) {
- if (k >= tmo)
- printk("%s: %s DSC timeout\n", tape->name, msg);
- else
- printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s,
- e);
- pt_req_sense(tape, 0);
- return 0;
- }
- return 1;
-}
-
-static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun)
-{
- if (pt_command(tape, cmd, 0, fun)) {
- pt_req_sense(tape, 0);
- return;
- }
- pi_disconnect(tape->pi);
- pt_poll_dsc(tape, HZ, tmo, fun);
-}
-
-static void pt_rewind(struct pt_unit *tape)
-{
- char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
- pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind");
-}
-
-static void pt_write_fm(struct pt_unit *tape)
-{
- char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
-
- pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark");
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static int pt_reset(struct pt_unit *tape)
-{
- struct pi_adapter *pi = tape->pi;
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(pi);
- write_reg(pi, 6, DRIVE(tape));
- write_reg(pi, 7, 8);
-
- pt_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY))
- pt_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(pi, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", tape->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(pi, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(pi);
- return flg - 1;
-}
-
-static int pt_ready_wait(struct pt_unit *tape, int tmo)
-{
- char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, p;
-
- k = 0;
- while (k < tmo) {
- tape->last_sense = 0;
- pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready"));
- p = tape->last_sense;
- if (!p)
- return 0;
- if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
- return p;
- k++;
- pt_sleep(HZ);
- }
- return 0x000020; /* timeout */
-}
-
-static void xs(char *buf, char *targ, int offs, int len)
-{
- int j, k, l;
-
- j = 0;
- l = 0;
- for (k = 0; k < len; k++)
- if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
- l = targ[j++] = buf[k + offs];
- if (l == 0x20)
- j--;
- targ[j] = 0;
-}
-
-static int xn(char *buf, int offs, int size)
-{
- int v, k;
-
- v = 0;
- for (k = 0; k < size; k++)
- v = v * 256 + (buf[k + offs] & 0xff);
- return v;
-}
-
-static int pt_identify(struct pt_unit *tape)
-{
- int dt, s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char ms_cmd[12] =
- { ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char ls_cmd[12] =
- { ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 };
- char buf[36];
-
- s = pt_atapi(tape, id_cmd, 36, buf, "identify");
- if (s)
- return -1;
-
- dt = buf[0] & 0x1f;
- if (dt != 1) {
- if (verbose)
- printk("%s: Drive %d, unsupported type %d\n",
- tape->name, tape->drive, dt);
- return -1;
- }
-
- xs(buf, mf, 8, 8);
- xs(buf, id, 16, 16);
-
- tape->flags = 0;
- tape->capacity = 0;
- tape->bs = 0;
-
- if (!pt_ready_wait(tape, PT_READY_TMO))
- tape->flags |= PT_MEDIA;
-
- if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) {
- if (!(buf[2] & 0x80))
- tape->flags |= PT_WRITE_OK;
- tape->bs = xn(buf, 10, 2);
- }
-
- if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense"))
- tape->capacity = xn(buf, 24, 4);
-
- printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]);
- if (!(tape->flags & PT_MEDIA))
- printk(", no media\n");
- else {
- if (!(tape->flags & PT_WRITE_OK))
- printk(", RO");
- printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024);
- }
-
- return 0;
-}
-
-
-/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
- */
-static int pt_probe(struct pt_unit *tape)
-{
- if (tape->drive == -1) {
- for (tape->drive = 0; tape->drive <= 1; tape->drive++)
- if (!pt_reset(tape))
- return pt_identify(tape);
- } else {
- if (!pt_reset(tape))
- return pt_identify(tape);
- }
- return -1;
-}
-
-static int pt_detect(void)
-{
- struct pt_unit *tape;
- int specified = 0, found = 0;
- int unit;
-
- printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- specified = 0;
- for (unit = 0; unit < PT_UNITS; unit++) {
- struct pt_unit *tape = &pt[unit];
- tape->pi = &tape->pia;
- atomic_set(&tape->available, 1);
- tape->flags = 0;
- tape->last_sense = 0;
- tape->present = 0;
- tape->bufptr = NULL;
- tape->drive = DU[D_SLV];
- snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit);
- if (!DU[D_PRT])
- continue;
- specified++;
- if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI],
- DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT,
- verbose, tape->name)) {
- if (!pt_probe(tape)) {
- tape->present = 1;
- found++;
- } else
- pi_release(tape->pi);
- }
- }
- if (specified == 0) {
- tape = pt;
- if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch,
- PI_PT, verbose, tape->name)) {
- if (!pt_probe(tape)) {
- tape->present = 1;
- found++;
- } else
- pi_release(tape->pi);
- }
-
- }
- if (found)
- return 0;
-
- pi_unregister_driver(par_drv);
- printk("%s: No ATAPI tape drive detected\n", name);
- return -1;
-}
-
-static int pt_open(struct inode *inode, struct file *file)
-{
- int unit = iminor(inode) & 0x7F;
- struct pt_unit *tape = pt + unit;
- int err;
-
- mutex_lock(&pt_mutex);
- if (unit >= PT_UNITS || (!tape->present)) {
- mutex_unlock(&pt_mutex);
- return -ENODEV;
- }
-
- err = -EBUSY;
- if (!atomic_dec_and_test(&tape->available))
- goto out;
-
- pt_identify(tape);
-
- err = -ENODEV;
- if (!(tape->flags & PT_MEDIA))
- goto out;
-
- err = -EROFS;
- if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE))
- goto out;
-
- if (!(iminor(inode) & 128))
- tape->flags |= PT_REWIND;
-
- err = -ENOMEM;
- tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL);
- if (tape->bufptr == NULL) {
- printk("%s: buffer allocation failed\n", tape->name);
- goto out;
- }
-
- file->private_data = tape;
- mutex_unlock(&pt_mutex);
- return 0;
-
-out:
- atomic_inc(&tape->available);
- mutex_unlock(&pt_mutex);
- return err;
-}
-
-static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct pt_unit *tape = file->private_data;
- struct mtop __user *p = (void __user *)arg;
- struct mtop mtop;
-
- switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, p, sizeof(struct mtop)))
- return -EFAULT;
-
- switch (mtop.mt_op) {
-
- case MTREW:
- mutex_lock(&pt_mutex);
- pt_rewind(tape);
- mutex_unlock(&pt_mutex);
- return 0;
-
- case MTWEOF:
- mutex_lock(&pt_mutex);
- pt_write_fm(tape);
- mutex_unlock(&pt_mutex);
- return 0;
-
- default:
- /* FIXME: rate limit ?? */
- printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
- mtop.mt_op);
- return -EINVAL;
- }
-
- default:
- return -ENOTTY;
- }
-}
-
-static int
-pt_release(struct inode *inode, struct file *file)
-{
- struct pt_unit *tape = file->private_data;
-
- if (atomic_read(&tape->available) > 1)
- return -EINVAL;
-
- if (tape->flags & PT_WRITING)
- pt_write_fm(tape);
-
- if (tape->flags & PT_REWIND)
- pt_rewind(tape);
-
- kfree(tape->bufptr);
- tape->bufptr = NULL;
-
- atomic_inc(&tape->available);
-
- return 0;
-
-}
-
-static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
-{
- struct pt_unit *tape = filp->private_data;
- struct pi_adapter *pi = tape->pi;
- char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, n, r, p, s, t, b;
-
- if (!(tape->flags & (PT_READING | PT_WRITING))) {
- tape->flags |= PT_READING;
- if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead"))
- return -EIO;
- } else if (tape->flags & PT_WRITING)
- return -EIO;
-
- if (tape->flags & PT_EOF)
- return 0;
-
- t = 0;
-
- while (count > 0) {
-
- if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read"))
- return -EIO;
-
- n = count;
- if (n > 32768)
- n = 32768; /* max per command */
- b = (n - 1 + tape->bs) / tape->bs;
- n = b * tape->bs; /* rounded up to even block */
-
- rd_cmd[4] = b;
-
- r = pt_command(tape, rd_cmd, n, "read");
-
- mdelay(1);
-
- if (r) {
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- while (1) {
-
- r = pt_wait(tape, STAT_BUSY,
- STAT_DRQ | STAT_ERR | STAT_READY,
- DBMSG("read DRQ"), "");
-
- if (r & STAT_SENSE) {
- pi_disconnect(pi);
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- if (r)
- tape->flags |= PT_EOF;
-
- s = read_reg(pi, 7);
-
- if (!(s & STAT_DRQ))
- break;
-
- n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
- p = (read_reg(pi, 2) & 3);
- if (p != 2) {
- pi_disconnect(pi);
- printk("%s: Phase error on read: %d\n", tape->name,
- p);
- return -EIO;
- }
-
- while (n > 0) {
- k = n;
- if (k > PT_BUFSIZE)
- k = PT_BUFSIZE;
- pi_read_block(pi, tape->bufptr, k);
- n -= k;
- b = k;
- if (b > count)
- b = count;
- if (copy_to_user(buf + t, tape->bufptr, b)) {
- pi_disconnect(pi);
- return -EFAULT;
- }
- t += b;
- count -= b;
- }
-
- }
- pi_disconnect(pi);
- if (tape->flags & PT_EOF)
- break;
- }
-
- return t;
-
-}
-
-static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
-{
- struct pt_unit *tape = filp->private_data;
- struct pi_adapter *pi = tape->pi;
- char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, n, r, p, s, t, b;
-
- if (!(tape->flags & PT_WRITE_OK))
- return -EROFS;
-
- if (!(tape->flags & (PT_READING | PT_WRITING))) {
- tape->flags |= PT_WRITING;
- if (pt_atapi
- (tape, wr_cmd, 0, NULL, "start buffer-available mode"))
- return -EIO;
- } else if (tape->flags & PT_READING)
- return -EIO;
-
- if (tape->flags & PT_EOF)
- return -ENOSPC;
-
- t = 0;
-
- while (count > 0) {
-
- if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write"))
- return -EIO;
-
- n = count;
- if (n > 32768)
- n = 32768; /* max per command */
- b = (n - 1 + tape->bs) / tape->bs;
- n = b * tape->bs; /* rounded up to even block */
-
- wr_cmd[4] = b;
-
- r = pt_command(tape, wr_cmd, n, "write");
-
- mdelay(1);
-
- if (r) { /* error delivering command only */
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- while (1) {
-
- r = pt_wait(tape, STAT_BUSY,
- STAT_DRQ | STAT_ERR | STAT_READY,
- DBMSG("write DRQ"), NULL);
-
- if (r & STAT_SENSE) {
- pi_disconnect(pi);
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- if (r)
- tape->flags |= PT_EOF;
-
- s = read_reg(pi, 7);
-
- if (!(s & STAT_DRQ))
- break;
-
- n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
- p = (read_reg(pi, 2) & 3);
- if (p != 0) {
- pi_disconnect(pi);
- printk("%s: Phase error on write: %d \n",
- tape->name, p);
- return -EIO;
- }
-
- while (n > 0) {
- k = n;
- if (k > PT_BUFSIZE)
- k = PT_BUFSIZE;
- b = k;
- if (b > count)
- b = count;
- if (copy_from_user(tape->bufptr, buf + t, b)) {
- pi_disconnect(pi);
- return -EFAULT;
- }
- pi_write_block(pi, tape->bufptr, k);
- t += b;
- count -= b;
- n -= k;
- }
-
- }
- pi_disconnect(pi);
- if (tape->flags & PT_EOF)
- break;
- }
-
- return t;
-}
-
-static int __init pt_init(void)
-{
- int unit;
- int err;
-
- if (disable) {
- err = -EINVAL;
- goto out;
- }
-
- if (pt_detect()) {
- err = -ENODEV;
- goto out;
- }
-
- err = register_chrdev(major, name, &pt_fops);
- if (err < 0) {
- printk("pt_init: unable to get major number %d\n", major);
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present)
- pi_release(pt[unit].pi);
- goto out;
- }
- major = err;
- pt_class = class_create(THIS_MODULE, "pt");
- if (IS_ERR(pt_class)) {
- err = PTR_ERR(pt_class);
- goto out_chrdev;
- }
-
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present) {
- device_create(pt_class, NULL, MKDEV(major, unit), NULL,
- "pt%d", unit);
- device_create(pt_class, NULL, MKDEV(major, unit + 128),
- NULL, "pt%dn", unit);
- }
- goto out;
-
-out_chrdev:
- unregister_chrdev(major, "pt");
-out:
- return err;
-}
-
-static void __exit pt_exit(void)
-{
- int unit;
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present) {
- device_destroy(pt_class, MKDEV(major, unit));
- device_destroy(pt_class, MKDEV(major, unit + 128));
- }
- class_destroy(pt_class);
- unregister_chrdev(major, name);
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present)
- pi_release(pt[unit].pi);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pt_init)
-module_exit(pt_exit)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644
index 000000000000..d5d7884cedd4
--- /dev/null
+++ b/drivers/block/pktcdvd.c
@@ -0,0 +1,2934 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
+ * DVD-RAM devices.
+ *
+ * Theory of operation:
+ *
+ * At the lowest level, there is the standard driver for the CD/DVD device,
+ * such as drivers/scsi/sr.c. This driver can handle read and write requests,
+ * but it doesn't know anything about the special restrictions that apply to
+ * packet writing. One restriction is that write requests must be aligned to
+ * packet boundaries on the physical media, and the size of a write request
+ * must be equal to the packet size. Another restriction is that a
+ * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
+ * command, if the previous command was a write.
+ *
+ * The purpose of the packet writing driver is to hide these restrictions from
+ * higher layers, such as file systems, and present a block device that can be
+ * randomly read and written using 2kB-sized blocks.
+ *
+ * The lowest layer in the packet writing driver is the packet I/O scheduler.
+ * Its data is defined by the struct packet_iosched and includes two bio
+ * queues with pending read and write requests. These queues are processed
+ * by the pkt_iosched_process_queue() function. The write requests in this
+ * queue are already properly aligned and sized. This layer is responsible for
+ * issuing the flush cache commands and scheduling the I/O in a good order.
+ *
+ * The next layer transforms unaligned write requests to aligned writes. This
+ * transformation requires reading missing pieces of data from the underlying
+ * block device, assembling the pieces to full packets and queuing them to the
+ * packet I/O scheduler.
+ *
+ * At the top layer there is a custom ->submit_bio function that forwards
+ * read requests directly to the iosched queue and puts write requests in the
+ * unaligned write queue. A kernel thread performs the necessary read
+ * gathering to convert the unaligned writes to aligned writes and then feeds
+ * them to the packet I/O scheduler.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pktcdvd.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/freezer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/uaccess.h>
+
+#define DRIVER_NAME "pktcdvd"
+
+#define pkt_err(pd, fmt, ...) \
+ pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...) \
+ pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...) \
+do { \
+ if (level == 2 && PACKET_DEBUG >= 2) \
+ pr_notice("%s: %s():" fmt, \
+ pd->name, __func__, ##__VA_ARGS__); \
+ else if (level == 1 && PACKET_DEBUG >= 1) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
+} while (0)
+
+#define MAX_SPEED 0xffff
+
+static DEFINE_MUTEX(pktcdvd_mutex);
+static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+static struct proc_dir_entry *pkt_proc;
+static int pktdev_major;
+static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
+static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
+static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
+static mempool_t psd_pool;
+static struct bio_set pkt_bio_set;
+
+/* /sys/class/pktcdvd */
+static struct class class_pktcdvd;
+static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
+
+/* forward declaration */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
+static int pkt_remove_dev(dev_t pkt_dev);
+static int pkt_seq_show(struct seq_file *m, void *p);
+
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
+
+/**********************************************************
+ * sysfs interface for pktcdvd
+ * by (C) 2006 Thomas Maier <balagi@justmail.de>
+
+ /sys/class/pktcdvd/pktcdvd[0-7]/
+ stat/reset
+ stat/packets_started
+ stat/packets_finished
+ stat/kb_written
+ stat/kb_read
+ stat/kb_read_gather
+ write_queue/size
+ write_queue/congestion_off
+ write_queue/congestion_on
+ **********************************************************/
+
+static ssize_t packets_started_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
+}
+static DEVICE_ATTR_RO(packets_started);
+
+static ssize_t packets_finished_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
+}
+static DEVICE_ATTR_RO(packets_finished);
+
+static ssize_t kb_written_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
+}
+static DEVICE_ATTR_RO(kb_written);
+
+static ssize_t kb_read_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
+}
+static DEVICE_ATTR_RO(kb_read);
+
+static ssize_t kb_read_gather_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
+}
+static DEVICE_ATTR_RO(kb_read_gather);
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ if (len > 0) {
+ pd->stats.pkt_started = 0;
+ pd->stats.pkt_ended = 0;
+ pd->stats.secs_w = 0;
+ pd->stats.secs_rg = 0;
+ pd->stats.secs_r = 0;
+ }
+ return len;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *pkt_stat_attrs[] = {
+ &dev_attr_packets_finished.attr,
+ &dev_attr_packets_started.attr,
+ &dev_attr_kb_read.attr,
+ &dev_attr_kb_written.attr,
+ &dev_attr_kb_read_gather.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_stat_group = {
+ .name = "stat",
+ .attrs = pkt_stat_attrs,
+};
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
+ spin_unlock(&pd->lock);
+ return n;
+}
+static DEVICE_ATTR_RO(size);
+
+static void init_write_congestion_marks(int* lo, int* hi)
+{
+ if (*hi > 0) {
+ *hi = max(*hi, 500);
+ *hi = min(*hi, 1000000);
+ if (*lo <= 0)
+ *lo = *hi - 100;
+ else {
+ *lo = min(*lo, *hi - 100);
+ *lo = max(*lo, 100);
+ }
+ } else {
+ *hi = -1;
+ *lo = -1;
+ }
+}
+
+static ssize_t congestion_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
+
+ if (sscanf(buf, "%d", &val) == 1) {
+ spin_lock(&pd->lock);
+ pd->write_congestion_off = val;
+ init_write_congestion_marks(&pd->write_congestion_off,
+ &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_off);
+
+static ssize_t congestion_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
+
+ if (sscanf(buf, "%d", &val) == 1) {
+ spin_lock(&pd->lock);
+ pd->write_congestion_on = val;
+ init_write_congestion_marks(&pd->write_congestion_off,
+ &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_on);
+
+static struct attribute *pkt_wq_attrs[] = {
+ &dev_attr_congestion_on.attr,
+ &dev_attr_congestion_off.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_wq_group = {
+ .name = "write_queue",
+ .attrs = pkt_wq_attrs,
+};
+
+static const struct attribute_group *pkt_groups[] = {
+ &pkt_stat_group,
+ &pkt_wq_group,
+ NULL,
+};
+
+static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
+{
+ if (class_is_registered(&class_pktcdvd)) {
+ pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
+ MKDEV(0, 0), pd, pkt_groups,
+ "%s", pd->name);
+ if (IS_ERR(pd->dev))
+ pd->dev = NULL;
+ }
+}
+
+static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
+{
+ if (class_is_registered(&class_pktcdvd))
+ device_unregister(pd->dev);
+}
+
+
+/********************************************************************
+ /sys/class/pktcdvd/
+ add map block device
+ remove unmap packet dev
+ device_map show mappings
+ *******************************************************************/
+
+static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
+ char *data)
+{
+ int n = 0;
+ int idx;
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ for (idx = 0; idx < MAX_WRITERS; idx++) {
+ struct pktcdvd_device *pd = pkt_devs[idx];
+ if (!pd)
+ continue;
+ n += sprintf(data+n, "%s %u:%u %u:%u\n",
+ pd->name,
+ MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
+ MAJOR(pd->bdev->bd_dev),
+ MINOR(pd->bdev->bd_dev));
+ }
+ mutex_unlock(&ctl_mutex);
+ return n;
+}
+static CLASS_ATTR_RO(device_map);
+
+static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int major, minor;
+
+ if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+ /* pkt_setup_dev() expects caller to hold reference to self */
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ pkt_setup_dev(MKDEV(major, minor), NULL);
+
+ module_put(THIS_MODULE);
+
+ return count;
+ }
+
+ return -EINVAL;
+}
+static CLASS_ATTR_WO(add);
+
+static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int major, minor;
+ if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+ pkt_remove_dev(MKDEV(major, minor));
+ return count;
+ }
+ return -EINVAL;
+}
+static CLASS_ATTR_WO(remove);
+
+static struct attribute *class_pktcdvd_attrs[] = {
+ &class_attr_add.attr,
+ &class_attr_remove.attr,
+ &class_attr_device_map.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(class_pktcdvd);
+
+static struct class class_pktcdvd = {
+ .name = DRIVER_NAME,
+ .class_groups = class_pktcdvd_groups,
+};
+
+static int pkt_sysfs_init(void)
+{
+ /*
+ * create control files in sysfs
+ * /sys/class/pktcdvd/...
+ */
+ return class_register(&class_pktcdvd);
+}
+
+static void pkt_sysfs_cleanup(void)
+{
+ class_unregister(&class_pktcdvd);
+}
+
+/********************************************************************
+ entries in debugfs
+
+ /sys/kernel/debug/pktcdvd[0-7]/
+ info
+
+ *******************************************************************/
+
+static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
+{
+ return pkt_seq_show(m, p);
+}
+
+static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pkt_debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = pkt_debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
+{
+ if (!pkt_debugfs_root)
+ return;
+ pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
+ if (!pd->dfs_d_root)
+ return;
+
+ pd->dfs_f_info = debugfs_create_file("info", 0444,
+ pd->dfs_d_root, pd, &debug_fops);
+}
+
+static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
+{
+ if (!pkt_debugfs_root)
+ return;
+ debugfs_remove(pd->dfs_f_info);
+ debugfs_remove(pd->dfs_d_root);
+ pd->dfs_f_info = NULL;
+ pd->dfs_d_root = NULL;
+}
+
+static void pkt_debugfs_init(void)
+{
+ pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
+}
+
+static void pkt_debugfs_cleanup(void)
+{
+ debugfs_remove(pkt_debugfs_root);
+ pkt_debugfs_root = NULL;
+}
+
+/* ----------------------------------------------------------*/
+
+
+static void pkt_bio_finished(struct pktcdvd_device *pd)
+{
+ BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
+ if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
+ pkt_dbg(2, pd, "queue empty\n");
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+ }
+}
+
+/*
+ * Allocate a packet_data struct
+ */
+static struct packet_data *pkt_alloc_packet_data(int frames)
+{
+ int i;
+ struct packet_data *pkt;
+
+ pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
+ if (!pkt)
+ goto no_pkt;
+
+ pkt->frames = frames;
+ pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
+ if (!pkt->w_bio)
+ goto no_bio;
+
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
+ pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!pkt->pages[i])
+ goto no_page;
+ }
+
+ spin_lock_init(&pkt->lock);
+ bio_list_init(&pkt->orig_bios);
+
+ for (i = 0; i < frames; i++) {
+ pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
+ if (!pkt->r_bios[i])
+ goto no_rd_bio;
+ }
+
+ return pkt;
+
+no_rd_bio:
+ for (i = 0; i < frames; i++)
+ kfree(pkt->r_bios[i]);
+no_page:
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
+ if (pkt->pages[i])
+ __free_page(pkt->pages[i]);
+ kfree(pkt->w_bio);
+no_bio:
+ kfree(pkt);
+no_pkt:
+ return NULL;
+}
+
+/*
+ * Free a packet_data struct
+ */
+static void pkt_free_packet_data(struct packet_data *pkt)
+{
+ int i;
+
+ for (i = 0; i < pkt->frames; i++)
+ kfree(pkt->r_bios[i]);
+ for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
+ __free_page(pkt->pages[i]);
+ kfree(pkt->w_bio);
+ kfree(pkt);
+}
+
+static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
+
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
+ pkt_free_packet_data(pkt);
+ }
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+}
+
+static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
+{
+ struct packet_data *pkt;
+
+ BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
+ while (nr_packets > 0) {
+ pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
+ if (!pkt) {
+ pkt_shrink_pktlist(pd);
+ return 0;
+ }
+ pkt->id = nr_packets;
+ pkt->pd = pd;
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ nr_packets--;
+ }
+ return 1;
+}
+
+static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
+{
+ struct rb_node *n = rb_next(&node->rb_node);
+ if (!n)
+ return NULL;
+ return rb_entry(n, struct pkt_rb_node, rb_node);
+}
+
+static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ rb_erase(&node->rb_node, &pd->bio_queue);
+ mempool_free(node, &pd->rb_pool);
+ pd->bio_queue_size--;
+ BUG_ON(pd->bio_queue_size < 0);
+}
+
+/*
+ * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
+ */
+static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
+{
+ struct rb_node *n = pd->bio_queue.rb_node;
+ struct rb_node *next;
+ struct pkt_rb_node *tmp;
+
+ if (!n) {
+ BUG_ON(pd->bio_queue_size > 0);
+ return NULL;
+ }
+
+ for (;;) {
+ tmp = rb_entry(n, struct pkt_rb_node, rb_node);
+ if (s <= tmp->bio->bi_iter.bi_sector)
+ next = n->rb_left;
+ else
+ next = n->rb_right;
+ if (!next)
+ break;
+ n = next;
+ }
+
+ if (s > tmp->bio->bi_iter.bi_sector) {
+ tmp = pkt_rbtree_next(tmp);
+ if (!tmp)
+ return NULL;
+ }
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
+ return tmp;
+}
+
+/*
+ * Insert a node into the pd->bio_queue rb tree.
+ */
+static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ struct rb_node **p = &pd->bio_queue.rb_node;
+ struct rb_node *parent = NULL;
+ sector_t s = node->bio->bi_iter.bi_sector;
+ struct pkt_rb_node *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
+ if (s < tmp->bio->bi_iter.bi_sector)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &pd->bio_queue);
+ pd->bio_queue_size++;
+}
+
+/*
+ * Send a packet_command to the underlying block device and
+ * wait for completion.
+ */
+static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ struct request_queue *q = bdev_get_queue(pd->bdev);
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+ int ret = 0;
+
+ rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (cgc->buflen) {
+ ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ GFP_NOIO);
+ if (ret)
+ goto out;
+ }
+
+ scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+ memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
+
+ rq->timeout = 60*HZ;
+ if (cgc->quiet)
+ rq->rq_flags |= RQF_QUIET;
+
+ blk_execute_rq(rq, false);
+ if (scmd->result)
+ ret = -EIO;
+out:
+ blk_mq_free_request(rq);
+ return ret;
+}
+
+static const char *sense_key_string(__u8 index)
+{
+ static const char * const info[] = {
+ "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check",
+ };
+
+ return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+ struct packet_command *cgc)
+{
+ struct scsi_sense_hdr *sshdr = cgc->sshdr;
+
+ if (sshdr)
+ pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ CDROM_PACKET_SIZE, cgc->cmd,
+ sshdr->sense_key, sshdr->asc, sshdr->ascq,
+ sense_key_string(sshdr->sense_key));
+ else
+ pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.quiet = 1;
+
+ /*
+ * the IMMED bit -- we default to not setting it, although that
+ * would allow a much faster close, this is safer
+ */
+#if 0
+ cgc.cmd[1] = 1 << 1;
+#endif
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
+ unsigned write_speed, unsigned read_speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ int ret;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_SET_SPEED;
+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
+ cgc.cmd[3] = read_speed & 0xff;
+ cgc.cmd[4] = (write_speed >> 8) & 0xff;
+ cgc.cmd[5] = write_speed & 0xff;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ pkt_dump_sense(pd, &cgc);
+
+ return ret;
+}
+
+/*
+ * Queue a bio for processing by the low-level CD device. Must be called
+ * from process context.
+ */
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
+{
+ spin_lock(&pd->iosched.lock);
+ if (bio_data_dir(bio) == READ)
+ bio_list_add(&pd->iosched.read_queue, bio);
+ else
+ bio_list_add(&pd->iosched.write_queue, bio);
+ spin_unlock(&pd->iosched.lock);
+
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+}
+
+/*
+ * Process the queued read/write requests. This function handles special
+ * requirements for CDRW drives:
+ * - A cache flush command must be inserted before a read request if the
+ * previous request was a write.
+ * - Switching between reading and writing is slow, so don't do it more often
+ * than necessary.
+ * - Optimize for throughput at the expense of latency. This means that streaming
+ * writes will never be interrupted by a read, but if the drive has to seek
+ * before the next write, switch to reading instead if there are any pending
+ * read requests.
+ * - Set the read speed according to current usage pattern. When only reading
+ * from the device, it's best to use the highest possible read speed, but
+ * when switching often between reading and writing, it's better to have the
+ * same read and write speeds.
+ */
+static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
+{
+
+ if (atomic_read(&pd->iosched.attention) == 0)
+ return;
+ atomic_set(&pd->iosched.attention, 0);
+
+ for (;;) {
+ struct bio *bio;
+ int reads_queued, writes_queued;
+
+ spin_lock(&pd->iosched.lock);
+ reads_queued = !bio_list_empty(&pd->iosched.read_queue);
+ writes_queued = !bio_list_empty(&pd->iosched.write_queue);
+ spin_unlock(&pd->iosched.lock);
+
+ if (!reads_queued && !writes_queued)
+ break;
+
+ if (pd->iosched.writing) {
+ int need_write_seek = 1;
+ spin_lock(&pd->iosched.lock);
+ bio = bio_list_peek(&pd->iosched.write_queue);
+ spin_unlock(&pd->iosched.lock);
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
+ need_write_seek = 0;
+ if (need_write_seek && reads_queued) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ pkt_dbg(2, pd, "write, waiting\n");
+ break;
+ }
+ pkt_flush_cache(pd);
+ pd->iosched.writing = 0;
+ }
+ } else {
+ if (!reads_queued && writes_queued) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ pkt_dbg(2, pd, "read, waiting\n");
+ break;
+ }
+ pd->iosched.writing = 1;
+ }
+ }
+
+ spin_lock(&pd->iosched.lock);
+ if (pd->iosched.writing)
+ bio = bio_list_pop(&pd->iosched.write_queue);
+ else
+ bio = bio_list_pop(&pd->iosched.read_queue);
+ spin_unlock(&pd->iosched.lock);
+
+ if (!bio)
+ continue;
+
+ if (bio_data_dir(bio) == READ)
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
+ else {
+ pd->iosched.successive_reads = 0;
+ pd->iosched.last_write = bio_end_sector(bio);
+ }
+ if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
+ if (pd->read_speed == pd->write_speed) {
+ pd->read_speed = MAX_SPEED;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ } else {
+ if (pd->read_speed != pd->write_speed) {
+ pd->read_speed = pd->write_speed;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ }
+
+ atomic_inc(&pd->cdrw.pending_bios);
+ submit_bio_noacct(bio);
+ }
+}
+
+/*
+ * Special care is needed if the underlying block device has a small
+ * max_phys_segments value.
+ */
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
+{
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_segments(q)) {
+ /*
+ * The cdrom device can handle one segment/frame
+ */
+ clear_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_segments(q)) {
+ /*
+ * We can handle this case at the expense of some extra memory
+ * copies during write operations
+ */
+ set_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else {
+ pkt_err(pd, "cdrom max_phys_segments too small\n");
+ return -EIO;
+ }
+}
+
+static void pkt_end_io_read(struct bio *bio)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, (unsigned long long)pkt->sector,
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
+
+ if (bio->bi_status)
+ atomic_inc(&pkt->io_errors);
+ bio_uninit(bio);
+ if (atomic_dec_and_test(&pkt->io_wait)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ pkt_bio_finished(pd);
+}
+
+static void pkt_end_io_packet_write(struct bio *bio)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
+
+ pd->stats.pkt_ended++;
+
+ bio_uninit(bio);
+ pkt_bio_finished(pd);
+ atomic_dec(&pkt->io_wait);
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+}
+
+/*
+ * Schedule reads for the holes in a packet
+ */
+static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int frames_read = 0;
+ struct bio *bio;
+ int f;
+ char written[PACKET_MAX_SIZE];
+
+ BUG_ON(bio_list_empty(&pkt->orig_bios));
+
+ atomic_set(&pkt->io_wait, 0);
+ atomic_set(&pkt->io_errors, 0);
+
+ /*
+ * Figure out which frames we need to read before we can write.
+ */
+ memset(written, 0, sizeof(written));
+ spin_lock(&pkt->lock);
+ bio_list_for_each(bio, &pkt->orig_bios) {
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
+ pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
+ BUG_ON(first_frame < 0);
+ BUG_ON(first_frame + num_frames > pkt->frames);
+ for (f = first_frame; f < first_frame + num_frames; f++)
+ written[f] = 1;
+ }
+ spin_unlock(&pkt->lock);
+
+ if (pkt->cache_valid) {
+ pkt_dbg(2, pd, "zone %llx cached\n",
+ (unsigned long long)pkt->sector);
+ goto out_account;
+ }
+
+ /*
+ * Schedule reads for missing parts of the packet.
+ */
+ for (f = 0; f < pkt->frames; f++) {
+ int p, offset;
+
+ if (written[f])
+ continue;
+
+ bio = pkt->r_bios[f];
+ bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_end_io = pkt_end_io_read;
+ bio->bi_private = pkt;
+
+ p = (f * CD_FRAMESIZE) / PAGE_SIZE;
+ offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
+ f, pkt->pages[p], offset);
+ if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
+ BUG();
+
+ atomic_inc(&pkt->io_wait);
+ pkt_queue_bio(pd, bio);
+ frames_read++;
+ }
+
+out_account:
+ pkt_dbg(2, pd, "need %d frames for zone %llx\n",
+ frames_read, (unsigned long long)pkt->sector);
+ pd->stats.pkt_started++;
+ pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
+}
+
+/*
+ * Find a packet matching zone, or the least recently used packet if
+ * there is no match.
+ */
+static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
+{
+ struct packet_data *pkt;
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
+ if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
+ list_del_init(&pkt->list);
+ if (pkt->sector != zone)
+ pkt->cache_valid = 0;
+ return pkt;
+ }
+ }
+ BUG();
+ return NULL;
+}
+
+static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ if (pkt->cache_valid) {
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ } else {
+ list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
+ }
+}
+
+static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+{
+#if PACKET_DEBUG > 1
+ static const char *state_name[] = {
+ "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
+ };
+ enum packet_data_state old_state = pkt->state;
+ pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, (unsigned long long)pkt->sector,
+ state_name[old_state], state_name[state]);
+#endif
+ pkt->state = state;
+}
+
+/*
+ * Scan the work queue to see if we can start a new packet.
+ * returns non-zero if any work was done.
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *p;
+ struct bio *bio = NULL;
+ sector_t zone = 0; /* Suppress gcc warning */
+ struct pkt_rb_node *node, *first_node;
+ struct rb_node *n;
+
+ atomic_set(&pd->scan_queue, 0);
+
+ if (list_empty(&pd->cdrw.pkt_free_list)) {
+ pkt_dbg(2, pd, "no pkt\n");
+ return 0;
+ }
+
+ /*
+ * Try to find a zone we are not already working on.
+ */
+ spin_lock(&pd->lock);
+ first_node = pkt_rbtree_find(pd, pd->current_sector);
+ if (!first_node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ first_node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ node = first_node;
+ while (node) {
+ bio = node->bio;
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
+ list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
+ if (p->sector == zone) {
+ bio = NULL;
+ goto try_next_bio;
+ }
+ }
+ break;
+try_next_bio:
+ node = pkt_rbtree_next(node);
+ if (!node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ if (node == first_node)
+ node = NULL;
+ }
+ spin_unlock(&pd->lock);
+ if (!bio) {
+ pkt_dbg(2, pd, "no bio\n");
+ return 0;
+ }
+
+ pkt = pkt_get_packet_data(pd, zone);
+
+ pd->current_sector = zone + pd->settings.size;
+ pkt->sector = zone;
+ BUG_ON(pkt->frames != pd->settings.size >> 2);
+ pkt->write_size = 0;
+
+ /*
+ * Scan work queue for bios in the same zone and link them
+ * to this packet.
+ */
+ spin_lock(&pd->lock);
+ pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
+ while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+ bio = node->bio;
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
+ break;
+ pkt_rbtree_erase(pd, node);
+ spin_lock(&pkt->lock);
+ bio_list_add(&pkt->orig_bios, bio);
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
+ spin_unlock(&pkt->lock);
+ }
+ /* check write congestion marks, and if bio_queue_size is
+ * below, wake up any waiters
+ */
+ if (pd->congested &&
+ pd->bio_queue_size <= pd->write_congestion_off) {
+ pd->congested = false;
+ wake_up_var(&pd->congested);
+ }
+ spin_unlock(&pd->lock);
+
+ pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
+ pkt_set_state(pkt, PACKET_WAITING_STATE);
+ atomic_set(&pkt->run_sm, 1);
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_add(&pkt->list, &pd->cdrw.pkt_active_list);
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ return 1;
+}
+
+/**
+ * bio_list_copy_data - copy contents of data buffers from one chain of bios to
+ * another
+ * @src: source bio list
+ * @dst: destination bio list
+ *
+ * Stops when it reaches the end of either the @src list or @dst list - that is,
+ * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
+ * bios).
+ */
+static void bio_list_copy_data(struct bio *dst, struct bio *src)
+{
+ struct bvec_iter src_iter = src->bi_iter;
+ struct bvec_iter dst_iter = dst->bi_iter;
+
+ while (1) {
+ if (!src_iter.bi_size) {
+ src = src->bi_next;
+ if (!src)
+ break;
+
+ src_iter = src->bi_iter;
+ }
+
+ if (!dst_iter.bi_size) {
+ dst = dst->bi_next;
+ if (!dst)
+ break;
+
+ dst_iter = dst->bi_iter;
+ }
+
+ bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
+ }
+}
+
+/*
+ * Assemble a bio to write one packet and queue the bio for processing
+ * by the underlying block device.
+ */
+static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int f;
+
+ bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
+ REQ_OP_WRITE);
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
+ pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->w_bio->bi_private = pkt;
+
+ /* XXX: locking? */
+ for (f = 0; f < pkt->frames; f++) {
+ struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+ if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
+ BUG();
+ }
+ pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
+
+ /*
+ * Fill-in bvec with data from orig_bios.
+ */
+ spin_lock(&pkt->lock);
+ bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
+
+ pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+ spin_unlock(&pkt->lock);
+
+ pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
+ pkt->write_size, (unsigned long long)pkt->sector);
+
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
+ pkt->cache_valid = 1;
+ else
+ pkt->cache_valid = 0;
+
+ /* Start the write request */
+ atomic_set(&pkt->io_wait, 1);
+ pkt_queue_bio(pd, pkt->w_bio);
+}
+
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
+{
+ struct bio *bio;
+
+ if (status)
+ pkt->cache_valid = 0;
+
+ /* Finish all bios corresponding to this packet */
+ while ((bio = bio_list_pop(&pkt->orig_bios))) {
+ bio->bi_status = status;
+ bio_endio(bio);
+ }
+}
+
+static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ pkt_dbg(2, pd, "pkt %d\n", pkt->id);
+
+ for (;;) {
+ switch (pkt->state) {
+ case PACKET_WAITING_STATE:
+ if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
+ return;
+
+ pkt->sleep_time = 0;
+ pkt_gather_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+ break;
+
+ case PACKET_READ_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (atomic_read(&pkt->io_errors) > 0) {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ } else {
+ pkt_start_write(pd, pkt);
+ }
+ break;
+
+ case PACKET_WRITE_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (!pkt->w_bio->bi_status) {
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ } else {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ }
+ break;
+
+ case PACKET_RECOVERY_STATE:
+ pkt_dbg(2, pd, "No recovery possible\n");
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ break;
+
+ case PACKET_FINISHED_STATE:
+ pkt_finish_packet(pkt, pkt->w_bio->bi_status);
+ return;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+static void pkt_handle_packets(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ /*
+ * Run state machine for active packets
+ */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0) {
+ atomic_set(&pkt->run_sm, 0);
+ pkt_run_state_machine(pd, pkt);
+ }
+ }
+
+ /*
+ * Move no longer active packets to the free list
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->state == PACKET_FINISHED_STATE) {
+ list_del(&pkt->list);
+ pkt_put_packet_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_IDLE_STATE);
+ atomic_set(&pd->scan_queue, 1);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
+{
+ struct packet_data *pkt;
+ int i;
+
+ for (i = 0; i < PACKET_NUM_STATES; i++)
+ states[i] = 0;
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ states[pkt->state]++;
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+/*
+ * kcdrwd is woken up when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+ struct pktcdvd_device *pd = foobar;
+ struct packet_data *pkt;
+ long min_sleep_time, residue;
+
+ set_user_nice(current, MIN_NICE);
+ set_freezable();
+
+ for (;;) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Wait until there is something to do
+ */
+ add_wait_queue(&pd->wqueue, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Check if we need to run pkt_handle_queue */
+ if (atomic_read(&pd->scan_queue) > 0)
+ goto work_to_do;
+
+ /* Check if we need to run the state machine for some packet */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0)
+ goto work_to_do;
+ }
+
+ /* Check if we need to process the iosched queues */
+ if (atomic_read(&pd->iosched.attention) != 0)
+ goto work_to_do;
+
+ /* Otherwise, go to sleep */
+ if (PACKET_DEBUG > 1) {
+ int states[PACKET_NUM_STATES];
+ pkt_count_states(pd, states);
+ pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2],
+ states[3], states[4], states[5]);
+ }
+
+ min_sleep_time = MAX_SCHEDULE_TIMEOUT;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
+ min_sleep_time = pkt->sleep_time;
+ }
+
+ pkt_dbg(2, pd, "sleeping\n");
+ residue = schedule_timeout(min_sleep_time);
+ pkt_dbg(2, pd, "wake up\n");
+
+ /* make swsusp happy with our thread */
+ try_to_freeze();
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (!pkt->sleep_time)
+ continue;
+ pkt->sleep_time -= min_sleep_time - residue;
+ if (pkt->sleep_time <= 0) {
+ pkt->sleep_time = 0;
+ atomic_inc(&pkt->run_sm);
+ }
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+work_to_do:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pd->wqueue, &wait);
+
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * if pkt_handle_queue returns true, we can queue
+ * another request.
+ */
+ while (pkt_handle_queue(pd))
+ ;
+
+ /*
+ * Handle packet state machine
+ */
+ pkt_handle_packets(pd);
+
+ /*
+ * Handle iosched queues
+ */
+ pkt_iosched_process_queue(pd);
+ }
+
+ return 0;
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+ pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ pd->settings.fp ? "Fixed" : "Variable",
+ pd->settings.size >> 2,
+ pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+ cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+ cgc->cmd[2] = page_code | (page_control << 6);
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_READ;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+ memset(cgc->buffer, 0, 2);
+ cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+ cgc->cmd[1] = 0x10; /* PF */
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_WRITE;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
+{
+ struct packet_command cgc;
+ int ret;
+
+ /* set up command and get the disc info */
+ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+ cgc.cmd[8] = cgc.buflen = 2;
+ cgc.quiet = 1;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ return ret;
+
+ /* not all drives have the same disc_info length, so requeue
+ * packet with the length the drive tells us it can supply
+ */
+ cgc.buflen = be16_to_cpu(di->disc_information_length) +
+ sizeof(di->disc_information_length);
+
+ if (cgc.buflen > sizeof(disc_information))
+ cgc.buflen = sizeof(disc_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
+{
+ struct packet_command cgc;
+ int ret;
+
+ init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+ cgc.cmd[1] = type & 3;
+ cgc.cmd[4] = (track & 0xff00) >> 8;
+ cgc.cmd[5] = track & 0xff;
+ cgc.cmd[8] = 8;
+ cgc.quiet = 1;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ return ret;
+
+ cgc.buflen = be16_to_cpu(ti->track_information_length) +
+ sizeof(ti->track_information_length);
+
+ if (cgc.buflen > sizeof(track_information))
+ cgc.buflen = sizeof(track_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
+ long *last_written)
+{
+ disc_information di;
+ track_information ti;
+ __u32 last_track;
+ int ret;
+
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret)
+ return ret;
+
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
+ return ret;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ last_track--;
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
+ return ret;
+ }
+
+ /* if last recorded field is valid, return it. */
+ if (ti.lra_v) {
+ *last_written = be32_to_cpu(ti.last_rec_address);
+ } else {
+ /* make it up instead */
+ *last_written = be32_to_cpu(ti.track_start) +
+ be32_to_cpu(ti.track_size);
+ if (ti.free_blocks)
+ *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ }
+ return 0;
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ write_param_page *wp;
+ char buffer[128];
+ int ret, size;
+
+ /* doesn't apply to DVD+RW or DVD-RAM */
+ if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
+ return 0;
+
+ memset(buffer, 0, sizeof(buffer));
+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+ if (size > sizeof(buffer))
+ size = sizeof(buffer);
+
+ /*
+ * now get it all
+ */
+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ /*
+ * write page is offset header + block descriptor length
+ */
+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+ wp->fp = pd->settings.fp;
+ wp->track_mode = pd->settings.track_mode;
+ wp->write_type = pd->settings.write_type;
+ wp->data_block_type = pd->settings.block_mode;
+
+ wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+ wp->link_size = 7;
+ wp->ls_v = 1;
+#endif
+
+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+ wp->session_format = 0;
+ wp->subhdr2 = 0x20;
+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+ wp->session_format = 0x20;
+ wp->subhdr2 = 8;
+#if 0
+ wp->mcn[0] = 0x80;
+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+ } else {
+ /*
+ * paranoia
+ */
+ pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
+ return 1;
+ }
+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+ cgc.buflen = cgc.cmd[8] = size;
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ pkt_print_settings(pd);
+ return 0;
+}
+
+/*
+ * 1 -- we can write to this track, 0 -- we can't
+ */
+static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
+{
+ switch (pd->mmc3_profile) {
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ /* The track is always writable on DVD+RW/DVD-RAM */
+ return 1;
+ default:
+ break;
+ }
+
+ if (!ti->packet || !ti->fp)
+ return 0;
+
+ /*
+ * "good" settings as per Mt Fuji.
+ */
+ if (ti->rt == 0 && ti->blank == 0)
+ return 1;
+
+ if (ti->rt == 0 && ti->blank == 1)
+ return 1;
+
+ if (ti->rt == 1 && ti->blank == 0)
+ return 1;
+
+ pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ return 0;
+}
+
+/*
+ * 1 -- we can write to this disc, 0 -- we can't
+ */
+static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+ switch (pd->mmc3_profile) {
+ case 0x0a: /* CD-RW */
+ case 0xffff: /* MMC3 not supported */
+ break;
+ case 0x1a: /* DVD+RW */
+ case 0x13: /* DVD-RW */
+ case 0x12: /* DVD-RAM */
+ return 1;
+ default:
+ pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+ pd->mmc3_profile);
+ return 0;
+ }
+
+ /*
+ * for disc type 0xff we should probably reserve a new track.
+ * but i'm not sure, should we leave this to user apps? probably.
+ */
+ if (di->disc_type == 0xff) {
+ pkt_notice(pd, "unknown disc - no track?\n");
+ return 0;
+ }
+
+ if (di->disc_type != 0x20 && di->disc_type != 0) {
+ pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
+ return 0;
+ }
+
+ if (di->erasable == 0) {
+ pkt_notice(pd, "disc not erasable\n");
+ return 0;
+ }
+
+ if (di->border_status == PACKET_SESSION_RESERVED) {
+ pkt_err(pd, "can't write to last track (reserved)\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ unsigned char buf[12];
+ disc_information di;
+ track_information ti;
+ int ret, track;
+
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[8] = 8;
+ ret = pkt_generic_packet(pd, &cgc);
+ pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+
+ memset(&di, 0, sizeof(disc_information));
+ memset(&ti, 0, sizeof(track_information));
+
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret) {
+ pkt_err(pd, "failed get_disc\n");
+ return ret;
+ }
+
+ if (!pkt_writable_disc(pd, &di))
+ return -EROFS;
+
+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+ ret = pkt_get_track_info(pd, track, 1, &ti);
+ if (ret) {
+ pkt_err(pd, "failed get_track\n");
+ return ret;
+ }
+
+ if (!pkt_writable_track(pd, &ti)) {
+ pkt_err(pd, "can't write to this track\n");
+ return -EROFS;
+ }
+
+ /*
+ * we keep packet size in 512 byte units, makes it easier to
+ * deal with request calculations.
+ */
+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+ if (pd->settings.size == 0) {
+ pkt_notice(pd, "detected zero packet size!\n");
+ return -ENXIO;
+ }
+ if (pd->settings.size > PACKET_MAX_SECTORS) {
+ pkt_err(pd, "packet size is too big\n");
+ return -EROFS;
+ }
+ pd->settings.fp = ti.fp;
+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+ if (ti.nwa_v) {
+ pd->nwa = be32_to_cpu(ti.next_writable);
+ set_bit(PACKET_NWA_VALID, &pd->flags);
+ }
+
+ /*
+ * in theory we could use lra on -RW media as well and just zero
+ * blocks that haven't been written yet, but in practice that
+ * is just a no-go. we'll use that for -R, naturally.
+ */
+ if (ti.lra_v) {
+ pd->lra = be32_to_cpu(ti.last_rec_address);
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ } else {
+ pd->lra = 0xffffffff;
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ }
+
+ /*
+ * fine for now
+ */
+ pd->settings.link_loss = 7;
+ pd->settings.write_type = 0; /* packet */
+ pd->settings.track_mode = ti.track_mode;
+
+ /*
+ * mode1 or mode2 disc
+ */
+ switch (ti.data_mode) {
+ case PACKET_MODE1:
+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
+ break;
+ case PACKET_MODE2:
+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
+ break;
+ default:
+ pkt_err(pd, "unknown data mode\n");
+ return -EROFS;
+ }
+ return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[64];
+ bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
+ int ret;
+
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.buflen = pd->mode_offset + 12;
+
+ /*
+ * caching mode page might not be there, so quiet this command
+ */
+ cgc.quiet = 1;
+
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * use drive write caching -- we need deferred error handling to be
+ * able to successfully recover with this option (drive will return good
+ * status as soon as the cdb is validated).
+ */
+ buf[pd->mode_offset + 10] |= (set << 2);
+
+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
+ pkt_err(pd, "write caching control failed\n");
+ pkt_dump_sense(pd, &cgc);
+ } else if (!ret && set)
+ pkt_notice(pd, "enabled write caching\n");
+ return ret;
+}
+
+static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+ cgc.cmd[4] = lockflag ? 1 : 0;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * Returns drive maximum write speed
+ */
+static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
+ unsigned *write_speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[256+18];
+ unsigned char *cap_buf;
+ int ret, offset;
+
+ cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+ cgc.sshdr = &sshdr;
+
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
+ sizeof(struct mode_page_header);
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+ }
+
+ offset = 20; /* Obsoleted field, used by older drives */
+ if (cap_buf[1] >= 28)
+ offset = 28; /* Current write speed selected */
+ if (cap_buf[1] >= 30) {
+ /* If the drive reports at least one "Logical Unit Write
+ * Speed Performance Descriptor Block", use the information
+ * in the first block. (contains the highest speed)
+ */
+ int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+ if (num_spdb > 0)
+ offset = 34;
+ }
+
+ *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+ return 0;
+}
+
+/* These tables from cdrecord - I don't have orange book */
+/* standard speed CD-RW (1-4x) */
+static char clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* high speed CD-RW (-10x) */
+static char hs_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* ultra high speed CD-RW */
+static char us_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
+};
+
+/*
+ * reads the maximum media speed from ATIP
+ */
+static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
+ unsigned *speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[64];
+ unsigned int size, st, sp;
+ int ret;
+
+ init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4; /* READ ATIP */
+ cgc.cmd[8] = 2;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+ size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+
+ init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4;
+ cgc.cmd[8] = size;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ if (!(buf[6] & 0x40)) {
+ pkt_notice(pd, "disc type is not CD-RW\n");
+ return 1;
+ }
+ if (!(buf[6] & 0x4)) {
+ pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
+ return 1;
+ }
+
+ st = (buf[6] >> 3) & 0x7; /* disc sub-type */
+
+ sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
+
+ /* Info from cdrecord */
+ switch (st) {
+ case 0: /* standard speed */
+ *speed = clv_to_speed[sp];
+ break;
+ case 1: /* high speed */
+ *speed = hs_clv_to_speed[sp];
+ break;
+ case 2: /* ultra high speed */
+ *speed = us_clv_to_speed[sp];
+ break;
+ default:
+ pkt_notice(pd, "unknown disc sub-type %d\n", st);
+ return 1;
+ }
+ if (*speed) {
+ pkt_info(pd, "maximum media speed: %d\n", *speed);
+ return 0;
+ } else {
+ pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
+ return 1;
+ }
+}
+
+static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ int ret;
+
+ pkt_dbg(2, pd, "Performing OPC\n");
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sshdr = &sshdr;
+ cgc.timeout = 60*HZ;
+ cgc.cmd[0] = GPCMD_SEND_OPC;
+ cgc.cmd[1] = 1;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+ int ret;
+ unsigned int write_speed, media_write_speed, read_speed;
+
+ ret = pkt_probe_settings(pd);
+ if (ret) {
+ pkt_dbg(2, pd, "failed probe\n");
+ return ret;
+ }
+
+ ret = pkt_set_write_settings(pd);
+ if (ret) {
+ pkt_dbg(1, pd, "failed saving write settings\n");
+ return -EIO;
+ }
+
+ pkt_write_caching(pd);
+
+ ret = pkt_get_max_speed(pd, &write_speed);
+ if (ret)
+ write_speed = 16 * 177;
+ switch (pd->mmc3_profile) {
+ case 0x13: /* DVD-RW */
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
+ break;
+ default:
+ ret = pkt_media_speed(pd, &media_write_speed);
+ if (ret)
+ media_write_speed = 16;
+ write_speed = min(write_speed, media_write_speed * 177);
+ pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
+ break;
+ }
+ read_speed = write_speed;
+
+ ret = pkt_set_speed(pd, write_speed, read_speed);
+ if (ret) {
+ pkt_dbg(1, pd, "couldn't set write speed\n");
+ return -EIO;
+ }
+ pd->write_speed = write_speed;
+ pd->read_speed = read_speed;
+
+ ret = pkt_perform_opc(pd);
+ if (ret) {
+ pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
+ }
+
+ return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
+{
+ int ret;
+ long lba;
+ struct request_queue *q;
+ struct block_device *bdev;
+
+ /*
+ * We need to re-open the cdrom device without O_NONBLOCK to be able
+ * to read/write from/to it. It is already opened in O_NONBLOCK mode
+ * so open should not fail.
+ */
+ bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ goto out;
+ }
+
+ ret = pkt_get_last_written(pd, &lba);
+ if (ret) {
+ pkt_err(pd, "pkt_get_last_written failed\n");
+ goto out_putdev;
+ }
+
+ set_capacity(pd->disk, lba << 2);
+ set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
+
+ q = bdev_get_queue(pd->bdev);
+ if (write) {
+ ret = pkt_open_write(pd);
+ if (ret)
+ goto out_putdev;
+ /*
+ * Some CDRW drives can not handle writes larger than one packet,
+ * even if the size is a multiple of the packet size.
+ */
+ blk_queue_max_hw_sectors(q, pd->settings.size);
+ set_bit(PACKET_WRITABLE, &pd->flags);
+ } else {
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ clear_bit(PACKET_WRITABLE, &pd->flags);
+ }
+
+ ret = pkt_set_segment_merging(pd, q);
+ if (ret)
+ goto out_putdev;
+
+ if (write) {
+ if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+ pkt_err(pd, "not enough memory for buffers\n");
+ ret = -ENOMEM;
+ goto out_putdev;
+ }
+ pkt_info(pd, "%lukB available on disc\n", lba << 1);
+ }
+
+ return 0;
+
+out_putdev:
+ blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
+out:
+ return ret;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+ if (flush && pkt_flush_cache(pd))
+ pkt_dbg(1, pd, "not flushing cache\n");
+
+ pkt_lock_door(pd, 0);
+
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+
+ pkt_shrink_pktlist(pd);
+}
+
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
+{
+ if (dev_minor >= MAX_WRITERS)
+ return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
+ return pkt_devs[dev_minor];
+}
+
+static int pkt_open(struct block_device *bdev, fmode_t mode)
+{
+ struct pktcdvd_device *pd = NULL;
+ int ret;
+
+ mutex_lock(&pktcdvd_mutex);
+ mutex_lock(&ctl_mutex);
+ pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
+ if (!pd) {
+ ret = -ENODEV;
+ goto out;
+ }
+ BUG_ON(pd->refcnt < 0);
+
+ pd->refcnt++;
+ if (pd->refcnt > 1) {
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(PACKET_WRITABLE, &pd->flags)) {
+ ret = -EBUSY;
+ goto out_dec;
+ }
+ } else {
+ ret = pkt_open_dev(pd, mode & FMODE_WRITE);
+ if (ret)
+ goto out_dec;
+ /*
+ * needed here as well, since ext2 (among others) may change
+ * the blocksize at mount time
+ */
+ set_blocksize(bdev, CD_FRAMESIZE);
+ }
+
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+ return 0;
+
+out_dec:
+ pd->refcnt--;
+out:
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+ return ret;
+}
+
+static void pkt_close(struct gendisk *disk, fmode_t mode)
+{
+ struct pktcdvd_device *pd = disk->private_data;
+
+ mutex_lock(&pktcdvd_mutex);
+ mutex_lock(&ctl_mutex);
+ pd->refcnt--;
+ BUG_ON(pd->refcnt < 0);
+ if (pd->refcnt == 0) {
+ int flush = test_bit(PACKET_WRITABLE, &pd->flags);
+ pkt_release_dev(pd, flush);
+ }
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+}
+
+
+static void pkt_end_io_read_cloned(struct bio *bio)
+{
+ struct packet_stacked_data *psd = bio->bi_private;
+ struct pktcdvd_device *pd = psd->pd;
+
+ psd->bio->bi_status = bio->bi_status;
+ bio_put(bio);
+ bio_endio(psd->bio);
+ mempool_free(psd, &psd_pool);
+ pkt_bio_finished(pd);
+}
+
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
+{
+ struct bio *cloned_bio =
+ bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
+ struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio_sectors(bio);
+ pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd = q->queuedata;
+ sector_t zone;
+ struct packet_data *pkt;
+ int was_empty, blocked_bio;
+ struct pkt_rb_node *node;
+
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
+
+ /*
+ * If we find a matching packet in state WAITING or READ_WAIT, we can
+ * just append this bio to that packet.
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ blocked_bio = 0;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sector == zone) {
+ spin_lock(&pkt->lock);
+ if ((pkt->state == PACKET_WAITING_STATE) ||
+ (pkt->state == PACKET_READ_WAIT_STATE)) {
+ bio_list_add(&pkt->orig_bios, bio);
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
+ if ((pkt->write_size >= pkt->frames) &&
+ (pkt->state == PACKET_WAITING_STATE)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ spin_unlock(&pkt->lock);
+ spin_unlock(&pd->cdrw.active_list_lock);
+ return;
+ } else {
+ blocked_bio = 1;
+ }
+ spin_unlock(&pkt->lock);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ /*
+ * Test if there is enough room left in the bio work queue
+ * (queue size >= congestion on mark).
+ * If not, wait till the work queue size is below the congestion off mark.
+ */
+ spin_lock(&pd->lock);
+ if (pd->write_congestion_on > 0
+ && pd->bio_queue_size >= pd->write_congestion_on) {
+ struct wait_bit_queue_entry wqe;
+
+ init_wait_var_entry(&wqe, &pd->congested, 0);
+ for (;;) {
+ prepare_to_wait_event(__var_waitqueue(&pd->congested),
+ &wqe.wq_entry,
+ TASK_UNINTERRUPTIBLE);
+ if (pd->bio_queue_size <= pd->write_congestion_off)
+ break;
+ pd->congested = true;
+ spin_unlock(&pd->lock);
+ schedule();
+ spin_lock(&pd->lock);
+ }
+ }
+ spin_unlock(&pd->lock);
+
+ /*
+ * No matching packet found. Store the bio in the work queue.
+ */
+ node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
+ node->bio = bio;
+ spin_lock(&pd->lock);
+ BUG_ON(pd->bio_queue_size < 0);
+ was_empty = (pd->bio_queue_size == 0);
+ pkt_rbtree_insert(pd, node);
+ spin_unlock(&pd->lock);
+
+ /*
+ * Wake up the worker thread.
+ */
+ atomic_set(&pd->scan_queue, 1);
+ if (was_empty) {
+ /* This wake_up is required for correct operation */
+ wake_up(&pd->wqueue);
+ } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
+ /*
+ * This wake up is not required for correct operation,
+ * but improves performance in some cases.
+ */
+ wake_up(&pd->wqueue);
+ }
+}
+
+static void pkt_submit_bio(struct bio *bio)
+{
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+ struct bio *split;
+
+ bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
+
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ (unsigned long long)bio_end_sector(bio));
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ pkt_make_request_read(pd, bio);
+ return;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+ pkt_err(pd, "wrong bio size\n");
+ goto end_io;
+ }
+
+ do {
+ sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+ sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+
+ split = bio_split(bio, last_zone -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, &pkt_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
+ } while (split != bio);
+
+ return;
+end_io:
+ bio_io_error(bio);
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+ struct request_queue *q = pd->disk->queue;
+
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
+ blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
+ q->queuedata = pd;
+}
+
+static int pkt_seq_show(struct seq_file *m, void *p)
+{
+ struct pktcdvd_device *pd = m->private;
+ char *msg;
+ int states[PACKET_NUM_STATES];
+
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
+
+ seq_printf(m, "\nSettings:\n");
+ seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+ if (pd->settings.write_type == 0)
+ msg = "Packet";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+ seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+ seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+ seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+ msg = "Mode 1";
+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+ msg = "Mode 2";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+ seq_printf(m, "\nStatistics:\n");
+ seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+ seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+ seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+ seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+ seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+ seq_printf(m, "\nMisc:\n");
+ seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+ seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+ seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+ seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+ seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+ seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+ seq_printf(m, "\nQueue state:\n");
+ seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+ seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+ seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
+
+ pkt_count_states(pd, states);
+ seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3], states[4], states[5]);
+
+ seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
+ pd->write_congestion_off,
+ pd->write_congestion_on);
+ return 0;
+}
+
+static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+{
+ int i;
+ struct block_device *bdev;
+ struct scsi_device *sdev;
+
+ if (pd->pkt_dev == dev) {
+ pkt_err(pd, "recursive setup not allowed\n");
+ return -EBUSY;
+ }
+ for (i = 0; i < MAX_WRITERS; i++) {
+ struct pktcdvd_device *pd2 = pkt_devs[i];
+ if (!pd2)
+ continue;
+ if (pd2->bdev->bd_dev == dev) {
+ pkt_err(pd, "%pg already setup\n", pd2->bdev);
+ return -EBUSY;
+ }
+ if (pd2->pkt_dev == dev) {
+ pkt_err(pd, "can't chain pktcdvd devices\n");
+ return -EBUSY;
+ }
+ }
+
+ bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ if (!sdev) {
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ return -EINVAL;
+ }
+ put_device(&sdev->sdev_gendev);
+
+ /* This is safe, since we have a reference from open(). */
+ __module_get(THIS_MODULE);
+
+ pd->bdev = bdev;
+ set_blocksize(bdev, CD_FRAMESIZE);
+
+ pkt_init_queue(pd);
+
+ atomic_set(&pd->cdrw.pending_bios, 0);
+ pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+ if (IS_ERR(pd->cdrw.thread)) {
+ pkt_err(pd, "can't start kernel thread\n");
+ goto out_mem;
+ }
+
+ proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
+ pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
+ return 0;
+
+out_mem:
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return -ENOMEM;
+}
+
+static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+ int ret;
+
+ pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+ cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+
+ mutex_lock(&pktcdvd_mutex);
+ switch (cmd) {
+ case CDROMEJECT:
+ /*
+ * The door gets locked when the device is opened, so we
+ * have to unlock it or else the eject command fails.
+ */
+ if (pd->refcnt == 1)
+ pkt_lock_door(pd, 0);
+ fallthrough;
+ /*
+ * forward selected CDROM ioctls to CD-ROM, for UDF
+ */
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case CDROM_LAST_WRITTEN:
+ case CDROM_SEND_PACKET:
+ case SCSI_IOCTL_SEND_COMMAND:
+ if (!bdev->bd_disk->fops->ioctl)
+ ret = -ENOTTY;
+ else
+ ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
+ break;
+ default:
+ pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
+ ret = -ENOTTY;
+ }
+ mutex_unlock(&pktcdvd_mutex);
+
+ return ret;
+}
+
+static unsigned int pkt_check_events(struct gendisk *disk,
+ unsigned int clearing)
+{
+ struct pktcdvd_device *pd = disk->private_data;
+ struct gendisk *attached_disk;
+
+ if (!pd)
+ return 0;
+ if (!pd->bdev)
+ return 0;
+ attached_disk = pd->bdev->bd_disk;
+ if (!attached_disk || !attached_disk->fops->check_events)
+ return 0;
+ return attached_disk->fops->check_events(attached_disk, clearing);
+}
+
+static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
+}
+
+static const struct block_device_operations pktcdvd_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = pkt_submit_bio,
+ .open = pkt_open,
+ .release = pkt_close,
+ .ioctl = pkt_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .check_events = pkt_check_events,
+ .devnode = pkt_devnode,
+};
+
+/*
+ * Set up mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
+{
+ int idx;
+ int ret = -ENOMEM;
+ struct pktcdvd_device *pd;
+ struct gendisk *disk;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++)
+ if (!pkt_devs[idx])
+ break;
+ if (idx == MAX_WRITERS) {
+ pr_err("max %d writers supported\n", MAX_WRITERS);
+ ret = -EBUSY;
+ goto out_mutex;
+ }
+
+ pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+ if (!pd)
+ goto out_mutex;
+
+ ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
+ sizeof(struct pkt_rb_node));
+ if (ret)
+ goto out_mem;
+
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+ INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+ spin_lock_init(&pd->cdrw.active_list_lock);
+
+ spin_lock_init(&pd->lock);
+ spin_lock_init(&pd->iosched.lock);
+ bio_list_init(&pd->iosched.read_queue);
+ bio_list_init(&pd->iosched.write_queue);
+ sprintf(pd->name, DRIVER_NAME"%d", idx);
+ init_waitqueue_head(&pd->wqueue);
+ pd->bio_queue = RB_ROOT;
+
+ pd->write_congestion_on = write_congestion_on;
+ pd->write_congestion_off = write_congestion_off;
+
+ ret = -ENOMEM;
+ disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (!disk)
+ goto out_mem;
+ pd->disk = disk;
+ disk->major = pktdev_major;
+ disk->first_minor = idx;
+ disk->minors = 1;
+ disk->fops = &pktcdvd_ops;
+ disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
+ strcpy(disk->disk_name, pd->name);
+ disk->private_data = pd;
+
+ pd->pkt_dev = MKDEV(pktdev_major, idx);
+ ret = pkt_new_dev(pd, dev);
+ if (ret)
+ goto out_mem2;
+
+ /* inherit events of the host device */
+ disk->events = pd->bdev->bd_disk->events;
+
+ ret = add_disk(disk);
+ if (ret)
+ goto out_mem2;
+
+ pkt_sysfs_dev_new(pd);
+ pkt_debugfs_dev_new(pd);
+
+ pkt_devs[idx] = pd;
+ if (pkt_dev)
+ *pkt_dev = pd->pkt_dev;
+
+ mutex_unlock(&ctl_mutex);
+ return 0;
+
+out_mem2:
+ put_disk(disk);
+out_mem:
+ mempool_exit(&pd->rb_pool);
+ kfree(pd);
+out_mutex:
+ mutex_unlock(&ctl_mutex);
+ pr_err("setup of pktcdvd device failed\n");
+ return ret;
+}
+
+/*
+ * Tear down mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_remove_dev(dev_t pkt_dev)
+{
+ struct pktcdvd_device *pd;
+ int idx;
+ int ret = 0;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++) {
+ pd = pkt_devs[idx];
+ if (pd && (pd->pkt_dev == pkt_dev))
+ break;
+ }
+ if (idx == MAX_WRITERS) {
+ pr_debug("dev not setup\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (pd->refcnt > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (!IS_ERR(pd->cdrw.thread))
+ kthread_stop(pd->cdrw.thread);
+
+ pkt_devs[idx] = NULL;
+
+ pkt_debugfs_dev_remove(pd);
+ pkt_sysfs_dev_remove(pd);
+
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
+
+ remove_proc_entry(pd->name, pkt_proc);
+ pkt_dbg(1, pd, "writer unmapped\n");
+
+ del_gendisk(pd->disk);
+ put_disk(pd->disk);
+
+ mempool_exit(&pd->rb_pool);
+ kfree(pd);
+
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+
+out:
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
+
+static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
+{
+ struct pktcdvd_device *pd;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
+ if (pd) {
+ ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+ ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+ } else {
+ ctrl_cmd->dev = 0;
+ ctrl_cmd->pkt_dev = 0;
+ }
+ ctrl_cmd->num_devices = MAX_WRITERS;
+
+ mutex_unlock(&ctl_mutex);
+}
+
+static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct pkt_ctrl_command ctrl_cmd;
+ int ret = 0;
+ dev_t pkt_dev = 0;
+
+ if (cmd != PACKET_CTRL_CMD)
+ return -ENOTTY;
+
+ if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+
+ switch (ctrl_cmd.command) {
+ case PKT_CTRL_CMD_SETUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
+ ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
+ break;
+ case PKT_CTRL_CMD_TEARDOWN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
+ break;
+ case PKT_CTRL_CMD_STATUS:
+ pkt_get_status(&ctrl_cmd);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations pkt_ctl_fops = {
+ .open = nonseekable_open,
+ .unlocked_ioctl = pkt_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pkt_ctl_compat_ioctl,
+#endif
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice pkt_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DRIVER_NAME,
+ .nodename = "pktcdvd/control",
+ .fops = &pkt_ctl_fops
+};
+
+static int __init pkt_init(void)
+{
+ int ret;
+
+ mutex_init(&ctl_mutex);
+
+ ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
+ sizeof(struct packet_stacked_data));
+ if (ret)
+ return ret;
+ ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
+ if (ret) {
+ mempool_exit(&psd_pool);
+ return ret;
+ }
+
+ ret = register_blkdev(pktdev_major, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("unable to register block device\n");
+ goto out2;
+ }
+ if (!pktdev_major)
+ pktdev_major = ret;
+
+ ret = pkt_sysfs_init();
+ if (ret)
+ goto out;
+
+ pkt_debugfs_init();
+
+ ret = misc_register(&pkt_misc);
+ if (ret) {
+ pr_err("unable to register misc device\n");
+ goto out_misc;
+ }
+
+ pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
+
+ return 0;
+
+out_misc:
+ pkt_debugfs_cleanup();
+ pkt_sysfs_cleanup();
+out:
+ unregister_blkdev(pktdev_major, DRIVER_NAME);
+out2:
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
+ return ret;
+}
+
+static void __exit pkt_exit(void)
+{
+ remove_proc_entry("driver/"DRIVER_NAME, NULL);
+ misc_deregister(&pkt_misc);
+
+ pkt_debugfs_cleanup();
+ pkt_sysfs_cleanup();
+
+ unregister_blkdev(pktdev_major, DRIVER_NAME);
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c76e0148eada..38d42af01b25 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -586,8 +586,6 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- bio = bio_split_to_limits(bio);
-
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
bio_list_add(&priv->list, bio);
@@ -747,9 +745,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->private_data = dev;
strscpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
- blk_queue_max_segments(gendisk->queue, BLK_MAX_SEGMENTS);
- blk_queue_max_segment_size(gendisk->queue, BLK_MAX_SEGMENT_SIZE);
- blk_queue_max_hw_sectors(gendisk->queue, BLK_SAFE_MAX_SECTORS);
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 04453f4a319c..84ad3b17956f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -491,12 +491,12 @@ static bool single_major = true;
module_param(single_major, bool, 0444);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
-static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
-static ssize_t remove_store(struct bus_type *bus, const char *buf,
+static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
+static ssize_t remove_store(const struct bus_type *bus, const char *buf,
size_t count);
-static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
+static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
size_t count);
-static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
+static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
@@ -538,7 +538,7 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
return is_lock_owner;
}
-static ssize_t supported_features_show(struct bus_type *bus, char *buf)
+static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
{
return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
}
@@ -3068,13 +3068,12 @@ static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
for (i = 0; i < obj_req->copyup_bvec_count; i++) {
unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
+ struct page *page = alloc_page(GFP_NOIO);
- obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
- if (!obj_req->copyup_bvecs[i].bv_page)
+ if (!page)
return -ENOMEM;
- obj_req->copyup_bvecs[i].bv_offset = 0;
- obj_req->copyup_bvecs[i].bv_len = len;
+ bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
obj_overlap -= len;
}
@@ -5292,8 +5291,7 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE);
}
-static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
- struct rbd_spec *spec)
+static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@@ -5338,9 +5336,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev);
- rbd_dev->rbd_client = rbdc;
- rbd_dev->spec = spec;
-
return rbd_dev;
}
@@ -5353,12 +5348,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
{
struct rbd_device *rbd_dev;
- rbd_dev = __rbd_dev_create(rbdc, spec);
+ rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev)
return NULL;
- rbd_dev->opts = opts;
-
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
@@ -5375,6 +5368,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE);
+ rbd_dev->rbd_client = rbdc;
+ rbd_dev->spec = spec;
+ rbd_dev->opts = opts;
+
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
@@ -6736,7 +6733,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err;
}
- parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
+ parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
@@ -6746,8 +6743,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
* Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts.
*/
- __rbd_get_client(rbd_dev->rbd_client);
- rbd_spec_get(rbd_dev->parent_spec);
+ parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
+ parent->spec = rbd_spec_get(rbd_dev->parent_spec);
__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
@@ -6970,9 +6967,7 @@ err_out_format:
return ret;
}
-static ssize_t do_rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_add(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct ceph_options *ceph_opts = NULL;
@@ -7084,18 +7079,18 @@ err_out_args:
goto out;
}
-static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
+static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
- return do_rbd_add(bus, buf, count);
+ return do_rbd_add(buf, count);
}
-static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
+static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- return do_rbd_add(bus, buf, count);
+ return do_rbd_add(buf, count);
}
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
@@ -7125,9 +7120,7 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
}
}
-static ssize_t do_rbd_remove(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_remove(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct list_head *tmp;
@@ -7199,18 +7192,18 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return count;
}
-static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
+static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
- return do_rbd_remove(bus, buf, count);
+ return do_rbd_remove(buf, count);
}
-static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
+static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- return do_rbd_remove(bus, buf, count);
+ return do_rbd_remove(buf, count);
}
/*
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index e7c7d9a68168..8c6087949794 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -646,7 +646,7 @@ int rnbd_clt_create_sysfs_files(void)
{
int err;
- rnbd_dev_class = class_create(THIS_MODULE, "rnbd-client");
+ rnbd_dev_class = class_create("rnbd-client");
if (IS_ERR(rnbd_dev_class))
return PTR_ERR(rnbd_dev_class);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 78334da74d8b..5eb8c7855970 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc;
}
- ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+ ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL);
if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index ea7ac8bca63c..da1d0542d7e2 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
bio_opf = REQ_OP_WRITE;
break;
case RNBD_OP_FLUSH:
- bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
break;
case RNBD_OP_DISCARD:
bio_opf = REQ_OP_DISCARD;
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index 297a6924ff4e..d5d9267e1fa5 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -215,7 +215,7 @@ int rnbd_srv_create_sysfs_files(void)
{
int err;
- rnbd_dev_class = class_create(THIS_MODULE, "rnbd-server");
+ rnbd_dev_class = class_create("rnbd-server");
if (IS_ERR(rnbd_dev_class))
return PTR_ERR(rnbd_dev_class);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index fb855da971ee..9fa821fa76b0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
print_version();
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e9de9d846b73..c7ed5d69e9ee 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -42,6 +42,7 @@
#include <linux/mm.h>
#include <asm/page.h>
#include <linux/task_work.h>
+#include <linux/namei.h>
#include <uapi/linux/ublk_cmd.h>
#define UBLK_MINORS (1U << MINORBITS)
@@ -51,10 +52,13 @@
| UBLK_F_URING_CMD_COMP_IN_TASK \
| UBLK_F_NEED_GET_DATA \
| UBLK_F_USER_RECOVERY \
- | UBLK_F_USER_RECOVERY_REISSUE)
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_UNPRIVILEGED_DEV \
+ | UBLK_F_CMD_IOCTL_ENCODE)
/* All UBLK_PARAM_TYPE_* should be included here */
-#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
+ UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
struct ublk_rq_data {
struct llist_node node;
@@ -125,6 +129,7 @@ struct ublk_queue {
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
+ bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[];
@@ -137,7 +142,7 @@ struct ublk_device {
char *__queues;
- unsigned short queue_size;
+ unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -147,6 +152,7 @@ struct ublk_device {
#define UB_STATE_OPEN 0
#define UB_STATE_USED 1
+#define UB_STATE_DELETED 2
unsigned long state;
int ub_number;
@@ -159,7 +165,7 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
- atomic_t nr_aborted_queues;
+ unsigned int nr_privileged_daemon;
/*
* Our ubq->daemon may be killed without any notification, so
@@ -185,6 +191,15 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+/*
+ * Max ublk devices allowed to add
+ *
+ * It can be extended to one per-user limit in future or even controlled
+ * by cgroup.
+ */
+static unsigned int ublks_max = 64;
+static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+
static struct miscdevice ublk_misc;
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
@@ -233,7 +248,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic;
- if (p->logical_bs_shift > PAGE_SHIFT)
+ if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
@@ -255,6 +270,10 @@ static int ublk_validate_params(const struct ublk_device *ub)
return -EINVAL;
}
+ /* dev_t is read-only */
+ if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
+ return -EINVAL;
+
return 0;
}
@@ -281,9 +300,7 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
{
- if (ubq->flags & UBLK_F_NEED_GET_DATA)
- return true;
- return false;
+ return ubq->flags & UBLK_F_NEED_GET_DATA;
}
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
@@ -306,7 +323,7 @@ static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
static inline bool ublk_rq_has_data(const struct request *rq)
{
- return rq->bio && bio_has_data(rq->bio);
+ return bio_has_data(rq->bio);
}
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
@@ -332,25 +349,19 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
static inline bool ublk_queue_can_use_recovery_reissue(
struct ublk_queue *ubq)
{
- if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
- return true;
- return false;
+ return (ubq->flags & UBLK_F_USER_RECOVERY) &&
+ (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
}
static inline bool ublk_queue_can_use_recovery(
struct ublk_queue *ubq)
{
- if (ubq->flags & UBLK_F_USER_RECOVERY)
- return true;
- return false;
+ return ubq->flags & UBLK_F_USER_RECOVERY;
}
static inline bool ublk_can_use_recovery(struct ublk_device *ub)
{
- if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
- return true;
- return false;
+ return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -361,18 +372,59 @@ static void ublk_free_disk(struct gendisk *disk)
put_device(&ub->cdev_dev);
}
+static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
+ unsigned int *owner_gid)
+{
+ kuid_t uid;
+ kgid_t gid;
+
+ current_uid_gid(&uid, &gid);
+
+ *owner_uid = from_kuid(&init_user_ns, uid);
+ *owner_gid = from_kgid(&init_user_ns, gid);
+}
+
+static int ublk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct ublk_device *ub = bdev->bd_disk->private_data;
+
+ if (capable(CAP_SYS_ADMIN))
+ return 0;
+
+ /*
+ * If it is one unprivileged device, only owner can open
+ * the disk. Otherwise it could be one trap made by one
+ * evil user who grants this disk's privileges to other
+ * users deliberately.
+ *
+ * This way is reasonable too given anyone can create
+ * unprivileged device, and no need other's grant.
+ */
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
+ unsigned int curr_uid, curr_gid;
+
+ ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
+
+ if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
+ ub->dev_info.owner_gid)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static const struct block_device_operations ub_fops = {
.owner = THIS_MODULE,
+ .open = ublk_open,
.free_disk = ublk_free_disk,
};
#define UBLK_MAX_PIN_PAGES 32
struct ublk_map_data {
- const struct ublk_queue *ubq;
const struct request *rq;
- const struct ublk_io *io;
- unsigned max_bytes;
+ unsigned long ubuf;
+ unsigned int len;
};
struct ublk_io_iter {
@@ -429,18 +481,17 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
return done;
}
-static inline int ublk_copy_user_pages(struct ublk_map_data *data,
- bool to_vm)
+static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
{
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
- const unsigned long start_vm = data->io->addr;
+ const unsigned long start_vm = data->ubuf;
unsigned int done = 0;
struct ublk_io_iter iter = {
.pg_off = start_vm & (PAGE_SIZE - 1),
.bio = data->rq->bio,
.iter = data->rq->bio->bi_iter,
};
- const unsigned int nr_pages = round_up(data->max_bytes +
+ const unsigned int nr_pages = round_up(data->len +
(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
while (done < nr_pages) {
@@ -453,42 +504,49 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data,
iter.pages);
if (iter.nr_pages <= 0)
return done == 0 ? iter.nr_pages : done;
- len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+ len = ublk_copy_io_pages(&iter, data->len, to_vm);
for (i = 0; i < iter.nr_pages; i++) {
if (to_vm)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
- data->max_bytes -= len;
+ data->len -= len;
done += iter.nr_pages;
}
return done;
}
+static inline bool ublk_need_map_req(const struct request *req)
+{
+ return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
+}
+
+static inline bool ublk_need_unmap_req(const struct request *req)
+{
+ return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
+}
+
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+
/*
* no zero copy, we delay copy WRITE request data into ublksrv
* context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages
*/
- if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
- return rq_bytes;
-
- if (ublk_rq_has_data(req)) {
+ if (ublk_need_map_req(req)) {
struct ublk_map_data data = {
- .ubq = ubq,
.rq = req,
- .io = io,
- .max_bytes = rq_bytes,
+ .ubuf = io->addr,
+ .len = rq_bytes,
};
ublk_copy_user_pages(&data, true);
- return rq_bytes - data.max_bytes;
+ return rq_bytes - data.len;
}
return rq_bytes;
}
@@ -499,19 +557,18 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+ if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = {
- .ubq = ubq,
.rq = req,
- .io = io,
- .max_bytes = io->res,
+ .ubuf = io->addr,
+ .len = io->res,
};
WARN_ON_ONCE(io->res > rq_bytes);
ublk_copy_user_pages(&data, false);
- return io->res - data.max_bytes;
+ return io->res - data.len;
}
return rq_bytes;
}
@@ -596,26 +653,25 @@ static void ublk_complete_rq(struct request *req)
struct ublk_queue *ubq = req->mq_hctx->driver_data;
struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
+ blk_status_t res = BLK_STS_OK;
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
if (io->res < 0) {
- blk_mq_end_request(req, errno_to_blk_status(io->res));
- return;
+ res = errno_to_blk_status(io->res);
+ goto exit;
}
/*
- * FLUSH or DISCARD usually won't return bytes returned, so end them
+ * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
* directly.
*
* Both the two needn't unmap.
*/
- if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
- blk_mq_end_request(req, BLK_STS_OK);
- return;
- }
+ if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
+ goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
unmapped_bytes = ublk_unmap_io(ubq, req, io);
@@ -632,6 +688,10 @@ static void ublk_complete_rq(struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+
+ return;
+exit:
+ blk_mq_end_request(req, res);
}
/*
@@ -656,7 +716,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
}
}
-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+static void ubq_complete_io_cmd(struct ublk_io *io, int res,
+ unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -668,7 +729,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0);
+ io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -685,7 +746,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
-static inline void __ublk_rq_task_work(struct request *req)
+static inline void __ublk_rq_task_work(struct request *req,
+ unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
@@ -710,9 +772,7 @@ static inline void __ublk_rq_task_work(struct request *req)
return;
}
- if (ublk_need_get_data(ubq) &&
- (req_op(req) == REQ_OP_WRITE ||
- req_op(req) == REQ_OP_FLUSH)) {
+ if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
@@ -723,7 +783,7 @@ static inline void __ublk_rq_task_work(struct request *req)
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
@@ -761,17 +821,18 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
+ unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+ __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
@@ -783,12 +844,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
@@ -797,8 +858,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ unsigned issue_flags = IO_URING_F_UNLOCKED;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@@ -837,6 +899,22 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
}
}
+static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+{
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+
+ if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
+ if (!ubq->timeout) {
+ send_sig(SIGKILL, ubq->ubq_daemon, 0);
+ ubq->timeout = true;
+ }
+
+ return BLK_EH_DONE;
+ }
+
+ return BLK_EH_RESET_TIMER;
+}
+
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -896,6 +974,7 @@ static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.init_hctx = ublk_init_hctx,
.init_request = ublk_init_rq,
+ .timeout = ublk_timeout,
};
static int ublk_ch_open(struct inode *inode, struct file *filp)
@@ -956,7 +1035,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
}
static void ublk_commit_completion(struct ublk_device *ub,
- struct ublksrv_io_cmd *ub_cmd)
+ const struct ublksrv_io_cmd *ub_cmd)
{
u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
struct ublk_queue *ubq = ublk_get_queue(ub, qid);
@@ -1052,7 +1131,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
+ IO_URING_F_UNLOCKED);
}
/* all io commands are canceled */
@@ -1179,6 +1259,9 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
ubq->ubq_daemon = current;
get_task_struct(ubq->ubq_daemon);
ub->nr_queues_ready++;
+
+ if (capable(CAP_SYS_ADMIN))
+ ub->nr_privileged_daemon++;
}
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
complete_all(&ub->completion);
@@ -1194,23 +1277,35 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req);
}
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static inline int ublk_check_cmd_op(u32 cmd_op)
+{
+ u32 ioc_type = _IOC_TYPE(cmd_op);
+
+ if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
+ return -EOPNOTSUPP;
+
+ if (ioc_type != 'u' && ioc_type != 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ const struct ublksrv_io_cmd *ub_cmd)
{
- struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
+ struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
- if (!(issue_flags & IO_URING_F_SQE128))
- goto out;
-
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
@@ -1237,10 +1332,15 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
*/
if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
- ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+ ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
+ goto out;
+
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
goto out;
- switch (cmd_op) {
+ ret = -EINVAL;
+ switch (_IOC_NR(cmd_op)) {
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
if (ublk_queue_ready(ubq)) {
@@ -1253,8 +1353,8 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
*/
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
- /* FETCH_RQ has to provide IO buffer */
- if (!ub_cmd->addr)
+ /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
+ if (!ub_cmd->addr && !ublk_need_get_data(ubq))
goto out;
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
@@ -1263,8 +1363,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- /* FETCH_RQ has to provide IO buffer */
- if (!ub_cmd->addr)
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
+ * not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
goto out;
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
@@ -1287,12 +1391,29 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
}
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ /*
+ * Not necessary for async retry, but let's keep it simple and always
+ * copy the values to avoid any potential reuse.
+ */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
+ const struct ublksrv_io_cmd ub_cmd = {
+ .q_id = READ_ONCE(ub_src->q_id),
+ .tag = READ_ONCE(ub_src->tag),
+ .result = READ_ONCE(ub_src->result),
+ .addr = READ_ONCE(ub_src->addr)
+ };
+
+ return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
@@ -1433,6 +1554,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
ret = cdev_device_add(&ub->cdev, dev);
if (ret)
goto fail;
+
+ ublks_added++;
return 0;
fail:
put_device(dev);
@@ -1475,6 +1598,7 @@ static void ublk_remove(struct ublk_device *ub)
cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
put_device(&ub->cdev_dev);
+ ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -1493,21 +1617,16 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
- struct ublk_device *ub;
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
wait_for_completion_interruptible(&ub->completion);
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
@@ -1519,7 +1638,7 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
goto out_unlock;
}
- disk = blk_mq_alloc_disk(&ub->tag_set, ub);
+ disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_unlock;
@@ -1535,38 +1654,42 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
if (ret)
goto out_put_disk;
+ /* don't probe partitions if any one ubq daemon is un-trusted */
+ if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+
get_device(&ub->cdev_dev);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = add_disk(disk);
if (ret) {
/*
* Has to drop the reference since ->free_disk won't be
* called in case of add_disk failure.
*/
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
out_put_disk:
if (ret)
put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
- struct ublk_device *ub;
cpumask_var_t cpumask;
unsigned long queue;
unsigned int retlen;
unsigned int i;
- int ret = -EINVAL;
-
+ int ret;
+
if (header->len * BITS_PER_BYTE < nr_cpu_ids)
return -EINVAL;
if (header->len & (sizeof(unsigned long)-1))
@@ -1574,17 +1697,12 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
if (!header->addr)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
queue = header->data[0];
if (queue >= ub->dev_info.nr_hw_queues)
- goto out_put_device;
+ return -EINVAL;
- ret = -ENOMEM;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
- goto out_put_device;
+ return -ENOMEM;
for_each_possible_cpu(i) {
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
@@ -1602,8 +1720,6 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
ret = 0;
out_free_cpumask:
free_cpumask_var(cpumask);
-out_put_device:
- ublk_put_device(ub);
return ret;
}
@@ -1617,7 +1733,7 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -1630,19 +1746,46 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
__func__, header->queue_id);
return -EINVAL;
}
+
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
- ublk_dump_dev_info(&info);
+
+ if (capable(CAP_SYS_ADMIN))
+ info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
+ else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
+ return -EPERM;
+
+ /*
+ * unprivileged device can't be trusted, but RECOVERY and
+ * RECOVERY_REISSUE still may hang error handling, so can't
+ * support recovery features for unprivileged ublk now
+ *
+ * TODO: provide forward progress for RECOVERY handler, so that
+ * unprivileged device can benefit from it
+ */
+ if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
+ UBLK_F_USER_RECOVERY);
+
+ /* the created device is always owned by current user */
+ ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
+
if (header->dev_id != info.dev_id) {
pr_warn("%s: dev id not match %u %u\n",
__func__, header->dev_id, info.dev_id);
return -EINVAL;
}
+ ublk_dump_dev_info(&info);
+
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
+ ret = -EACCES;
+ if (ublks_added >= ublks_max)
+ goto out_unlock;
+
ret = -ENOMEM;
ub = kzalloc(sizeof(*ub), GFP_KERNEL);
if (!ub)
@@ -1673,6 +1816,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
+ ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
+
/* We are not ready to support zero copy */
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
@@ -1724,88 +1869,100 @@ static inline bool ublk_idr_freed(int id)
return ptr == NULL;
}
-static int ublk_ctrl_del_dev(int idx)
+static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
{
- struct ublk_device *ub;
+ struct ublk_device *ub = *p_ub;
+ int idx = ub->ub_number;
int ret;
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
- ub = ublk_get_device_from_id(idx);
- if (ub) {
+ if (!test_bit(UB_STATE_DELETED, &ub->state)) {
ublk_remove(ub);
- ublk_put_device(ub);
- ret = 0;
- } else {
- ret = -ENODEV;
+ set_bit(UB_STATE_DELETED, &ub->state);
}
+ /* Mark the reference as consumed */
+ *p_ub = NULL;
+ ublk_put_device(ub);
+ mutex_unlock(&ublk_ctl_mutex);
+
/*
* Wait until the idr is removed, then it can be reused after
* DEL_DEV command is returned.
+ *
+ * If we returns because of user interrupt, future delete command
+ * may come:
+ *
+ * - the device number isn't freed, this device won't or needn't
+ * be deleted again, since UB_STATE_DELETED is set, and device
+ * will be released after the last reference is dropped
+ *
+ * - the device number is freed already, we will not find this
+ * device via ublk_get_device_from_id()
*/
- if (!ret)
- wait_event(ublk_idr_wq, ublk_idr_freed(idx));
- mutex_unlock(&ublk_ctl_mutex);
+ wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
- return ret;
+ return 0;
}
static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
__func__, cmd->cmd_op, header->dev_id, header->queue_id,
header->data[0], header->addr, header->len);
}
-static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
- struct ublk_device *ub;
-
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
- ublk_put_device(ub);
return 0;
}
-static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
- struct ublk_device *ub;
- int ret = 0;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
- ret = -EFAULT;
- ublk_put_device(ub);
+ return -EFAULT;
- return ret;
+ return 0;
+}
+
+/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
+static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
+{
+ ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
+ ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
+
+ if (ub->ub_disk) {
+ ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
+ ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
+ } else {
+ ub->params.devt.disk_major = 0;
+ ub->params.devt.disk_minor = 0;
+ }
+ ub->params.types |= UBLK_PARAM_TYPE_DEVT;
}
-static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_params(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
- struct ublk_device *ub;
int ret;
if (header->len <= sizeof(ph) || !header->addr)
@@ -1820,27 +1977,23 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
mutex_lock(&ub->mutex);
+ ublk_ctrl_fill_params_devt(ub);
if (copy_to_user(argp, &ub->params, ph.len))
ret = -EFAULT;
else
ret = 0;
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_set_params(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
- struct ublk_device *ub;
int ret = -EFAULT;
if (header->len <= sizeof(ph) || !header->addr)
@@ -1855,10 +2008,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
@@ -1869,9 +2018,10 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
+ if (ret)
+ ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
@@ -1887,6 +2037,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
ubq->ubq_daemon = NULL;
+ ubq->timeout = false;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1898,17 +2049,13 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
-static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
- struct ublk_device *ub;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
int i;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return ret;
-
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
@@ -1936,25 +2083,21 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
+ ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_end_recovery(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
- struct ublk_device *ub;
int ret = -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return ret;
-
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
@@ -1982,62 +2125,191 @@ static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
+ return ret;
+}
+
+/*
+ * All control commands are sent via /dev/ublk-control, so we have to check
+ * the destination device's permission
+ */
+static int ublk_char_dev_permission(struct ublk_device *ub,
+ const char *dev_path, int mask)
+{
+ int err;
+ struct path path;
+ struct kstat stat;
+
+ err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
+ if (err)
+ return err;
+
+ err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ if (err)
+ goto exit;
+
+ err = -EPERM;
+ if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
+ goto exit;
+
+ err = inode_permission(&nop_mnt_idmap,
+ d_backing_inode(path.dentry), mask);
+exit:
+ path_put(&path);
+ return err;
+}
+
+static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
+ bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ char *dev_path = NULL;
+ int ret = 0;
+ int mask;
+
+ if (!unprivileged) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /*
+ * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
+ * char_dev_path in payload too, since userspace may not
+ * know if the specified device is created as unprivileged
+ * mode.
+ */
+ if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
+ return 0;
+ }
+
+ /*
+ * User has to provide the char device path for unprivileged ublk
+ *
+ * header->addr always points to the dev path buffer, and
+ * header->dev_path_len records length of dev path buffer.
+ */
+ if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
+ return -EINVAL;
+
+ if (header->len < header->dev_path_len)
+ return -EINVAL;
+
+ dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
+ if (!dev_path)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(dev_path, argp, header->dev_path_len))
+ goto exit;
+ dev_path[header->dev_path_len] = 0;
+
+ ret = -EINVAL;
+ switch (_IOC_NR(cmd->cmd_op)) {
+ case UBLK_CMD_GET_DEV_INFO:
+ case UBLK_CMD_GET_DEV_INFO2:
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ case UBLK_CMD_GET_PARAMS:
+ mask = MAY_READ;
+ break;
+ case UBLK_CMD_START_DEV:
+ case UBLK_CMD_STOP_DEV:
+ case UBLK_CMD_ADD_DEV:
+ case UBLK_CMD_DEL_DEV:
+ case UBLK_CMD_SET_PARAMS:
+ case UBLK_CMD_START_USER_RECOVERY:
+ case UBLK_CMD_END_USER_RECOVERY:
+ mask = MAY_READ | MAY_WRITE;
+ break;
+ default:
+ goto exit;
+ }
+
+ ret = ublk_char_dev_permission(ub, dev_path, mask);
+ if (!ret) {
+ header->len -= header->dev_path_len;
+ header->addr += header->dev_path_len;
+ }
+ pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
+ __func__, ub->ub_number, cmd->cmd_op,
+ ub->dev_info.owner_uid, ub->dev_info.owner_gid,
+ dev_path, ret);
+exit:
+ kfree(dev_path);
return ret;
}
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
+ struct ublk_device *ub = NULL;
+ u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ return -EAGAIN;
+
ublk_ctrl_cmd_dump(cmd);
if (!(issue_flags & IO_URING_F_SQE128))
goto out;
- ret = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
goto out;
- ret = -ENODEV;
- switch (cmd->cmd_op) {
+ if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
+ ret = -ENODEV;
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ goto out;
+
+ ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
+ if (ret)
+ goto put_dev;
+ }
+
+ switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(cmd);
+ ret = ublk_ctrl_start_dev(ub, cmd);
break;
case UBLK_CMD_STOP_DEV:
- ret = ublk_ctrl_stop_dev(cmd);
+ ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
- ret = ublk_ctrl_get_dev_info(cmd);
+ case UBLK_CMD_GET_DEV_INFO2:
+ ret = ublk_ctrl_get_dev_info(ub, cmd);
break;
case UBLK_CMD_ADD_DEV:
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
- ret = ublk_ctrl_del_dev(header->dev_id);
+ ret = ublk_ctrl_del_dev(&ub);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, cmd);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(cmd);
+ ret = ublk_ctrl_get_params(ub, cmd);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(cmd);
+ ret = ublk_ctrl_set_params(ub, cmd);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(cmd);
+ ret = ublk_ctrl_start_recovery(ub, cmd);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(cmd);
+ ret = ublk_ctrl_end_recovery(ub, cmd);
break;
default:
+ ret = -ENOTSUPP;
break;
}
+
+ put_dev:
+ if (ub)
+ ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;
@@ -2070,7 +2342,7 @@ static int __init ublk_init(void)
if (ret)
goto unregister_mis;
- ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
+ ublk_chr_class = class_create("ublk-char");
if (IS_ERR(ublk_chr_class)) {
ret = PTR_ERR(ublk_chr_class);
goto free_chrdev_region;
@@ -2089,13 +2361,12 @@ static void __exit ublk_exit(void)
struct ublk_device *ub;
int id;
- class_destroy(ublk_chr_class);
-
- misc_deregister(&ublk_misc);
-
idr_for_each_entry(&ublk_index_idr, ub, id)
ublk_remove(ub);
+ class_destroy(ublk_chr_class);
+ misc_deregister(&ublk_misc);
+
idr_destroy(&ublk_index_idr);
unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
}
@@ -2103,5 +2374,8 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
+module_param(ublks_max, int, 0444);
+MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 68bd2f7961b3..2b918e28acaa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -15,6 +15,7 @@
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
+#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
#define PART_BITS 4
@@ -80,22 +81,49 @@ struct virtio_blk {
int num_vqs;
int io_queues[HCTX_MAX_TYPES];
struct virtio_blk_vq *vqs;
+
+ /* For zoned device */
+ unsigned int zone_sectors;
};
struct virtblk_req {
+ /* Out header */
struct virtio_blk_outhdr out_hdr;
- u8 status;
+
+ /* In header */
+ union {
+ u8 status;
+
+ /*
+ * The zone append command has an extended in header.
+ * The status field in zone_append_in_hdr must always
+ * be the last byte.
+ */
+ struct {
+ __virtio64 sector;
+ u8 status;
+ } zone_append;
+ } in_hdr;
+
+ size_t in_hdr_len;
+
struct sg_table sg_table;
struct scatterlist sg[];
};
-static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
+static inline blk_status_t virtblk_result(u8 status)
{
- switch (vbr->status) {
+ switch (status) {
case VIRTIO_BLK_S_OK:
return BLK_STS_OK;
case VIRTIO_BLK_S_UNSUPP:
return BLK_STS_NOTSUPP;
+ case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
+ case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case VIRTIO_BLK_S_IOERR:
+ case VIRTIO_BLK_S_ZONE_UNALIGNED_WP:
default:
return BLK_STS_IOERR;
}
@@ -111,11 +139,11 @@ static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
- struct scatterlist hdr, status, *sgs[3];
+ struct scatterlist out_hdr, in_hdr, *sgs[3];
unsigned int num_out = 0, num_in = 0;
- sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
- sgs[num_out++] = &hdr;
+ sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sgs[num_out++] = &out_hdr;
if (vbr->sg_table.nents) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
@@ -124,8 +152,8 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
- sg_init_one(&status, &vbr->status, sizeof(vbr->status));
- sgs[num_out + num_in++] = &status;
+ sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
+ sgs[num_out + num_in++] = &in_hdr;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
@@ -170,9 +198,7 @@ static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool un
WARN_ON_ONCE(n != segments);
- req->special_vec.bv_page = virt_to_page(range);
- req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range) * segments;
+ bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return 0;
@@ -214,21 +240,25 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req,
struct virtblk_req *vbr)
{
+ size_t in_hdr_len = sizeof(vbr->in_hdr.status);
bool unmap = false;
u32 type;
+ u64 sector = 0;
- vbr->out_hdr.sector = 0;
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
+ return BLK_STS_NOTSUPP;
+
+ /* Set fields for all request types */
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
switch (req_op(req)) {
case REQ_OP_READ:
type = VIRTIO_BLK_T_IN;
- vbr->out_hdr.sector = cpu_to_virtio64(vdev,
- blk_rq_pos(req));
+ sector = blk_rq_pos(req);
break;
case REQ_OP_WRITE:
type = VIRTIO_BLK_T_OUT;
- vbr->out_hdr.sector = cpu_to_virtio64(vdev,
- blk_rq_pos(req));
+ sector = blk_rq_pos(req);
break;
case REQ_OP_FLUSH:
type = VIRTIO_BLK_T_FLUSH;
@@ -243,16 +273,45 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
case REQ_OP_SECURE_ERASE:
type = VIRTIO_BLK_T_SECURE_ERASE;
break;
- case REQ_OP_DRV_IN:
- type = VIRTIO_BLK_T_GET_ID;
+ case REQ_OP_ZONE_OPEN:
+ type = VIRTIO_BLK_T_ZONE_OPEN;
+ sector = blk_rq_pos(req);
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ type = VIRTIO_BLK_T_ZONE_CLOSE;
+ sector = blk_rq_pos(req);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ type = VIRTIO_BLK_T_ZONE_FINISH;
+ sector = blk_rq_pos(req);
+ break;
+ case REQ_OP_ZONE_APPEND:
+ type = VIRTIO_BLK_T_ZONE_APPEND;
+ sector = blk_rq_pos(req);
+ in_hdr_len = sizeof(vbr->in_hdr.zone_append);
break;
+ case REQ_OP_ZONE_RESET:
+ type = VIRTIO_BLK_T_ZONE_RESET;
+ sector = blk_rq_pos(req);
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ type = VIRTIO_BLK_T_ZONE_RESET_ALL;
+ break;
+ case REQ_OP_DRV_IN:
+ /*
+ * Out header has already been prepared by the caller (virtblk_get_id()
+ * or virtblk_submit_zone_report()), nothing to do here.
+ */
+ return 0;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
+ /* Set fields for non-REQ_OP_DRV_IN request types */
+ vbr->in_hdr_len = in_hdr_len;
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
- vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
+ vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
type == VIRTIO_BLK_T_SECURE_ERASE) {
@@ -263,42 +322,89 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
return 0;
}
+/*
+ * The status byte is always the last byte of the virtblk request
+ * in-header. This helper fetches its value for all in-header formats
+ * that are currently defined.
+ */
+static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
+{
+ return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
+}
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
- blk_mq_end_request(req, virtblk_result(vbr));
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = virtio64_to_cpu(vblk->vdev,
+ vbr->in_hdr.zone_append.sector);
+
+ blk_mq_end_request(req, status);
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+ virtblk_cleanup_cmd(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_handle_req(struct virtio_blk_vq *vq,
+ struct io_comp_batch *iob)
+{
+ struct virtblk_req *vbr;
+ int req_done = 0;
+ unsigned int len;
+
+ while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ if (likely(!blk_should_fake_timeout(req->q)) &&
+ !blk_mq_complete_request_remote(req) &&
+ !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
+ virtblk_complete_batch))
+ virtblk_request_done(req);
+ req_done++;
+ }
+
+ return req_done;
}
static void virtblk_done(struct virtqueue *vq)
{
struct virtio_blk *vblk = vq->vdev->priv;
- bool req_done = false;
- int qid = vq->index;
- struct virtblk_req *vbr;
+ struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index];
+ int req_done = 0;
unsigned long flags;
- unsigned int len;
+ DEFINE_IO_COMP_BATCH(iob);
- spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
+ spin_lock_irqsave(&vblk_vq->lock, flags);
do {
virtqueue_disable_cb(vq);
- while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- struct request *req = blk_mq_rq_from_pdu(vbr);
+ req_done += virtblk_handle_req(vblk_vq, &iob);
- if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
- req_done = true;
- }
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
- /* In case queue is stopped waiting for more buffers. */
- if (req_done)
+ if (req_done) {
+ if (!rq_list_empty(iob.req_list))
+ iob.complete(&iob);
+
+ /* In case queue is stopped waiting for more buffers. */
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
- spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+ }
+ spin_unlock_irqrestore(&vblk_vq->lock, flags);
}
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@ -315,22 +421,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
+static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
+{
+ virtblk_cleanup_cmd(req);
+ switch (rc) {
+ case -ENOSPC:
+ return BLK_STS_DEV_RESOURCE;
+ case -ENOMEM:
+ return BLK_STS_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
struct virtio_blk *vblk,
struct request *req,
struct virtblk_req *vbr)
{
blk_status_t status;
+ int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
- vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
- if (unlikely(vbr->sg_table.nents < 0)) {
- virtblk_cleanup_cmd(req);
- return BLK_STS_RESOURCE;
- }
+ num = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(num < 0))
+ return virtblk_fail_to_queue(req, -ENOMEM);
+ vbr->sg_table.nents = num;
blk_mq_start_request(req);
@@ -364,15 +483,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
virtblk_unmap_data(req, vbr);
- virtblk_cleanup_cmd(req);
- switch (err) {
- case -ENOSPC:
- return BLK_STS_DEV_RESOURCE;
- case -ENOMEM:
- return BLK_STS_RESOURCE;
- default:
- return BLK_STS_IOERR;
- }
+ return virtblk_fail_to_queue(req, err);
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -452,6 +563,338 @@ static void virtio_queue_rqs(struct request **rqlist)
*rqlist = requeue_list;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
+ unsigned int nr_zones,
+ size_t *buflen)
+{
+ struct request_queue *q = vblk->disk->queue;
+ size_t bufsize;
+ void *buf;
+
+ nr_zones = min_t(unsigned int, nr_zones,
+ get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
+
+ bufsize = sizeof(struct virtio_blk_zone_report) +
+ nr_zones * sizeof(struct virtio_blk_zone_descriptor);
+ bufsize = min_t(size_t, bufsize,
+ queue_max_hw_sectors(q) << SECTOR_SHIFT);
+ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+
+ while (bufsize >= sizeof(struct virtio_blk_zone_report)) {
+ buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
+
+ return NULL;
+}
+
+static int virtblk_submit_zone_report(struct virtio_blk *vblk,
+ char *report_buf, size_t report_len,
+ sector_t sector)
+{
+ struct request_queue *q = vblk->disk->queue;
+ struct request *req;
+ struct virtblk_req *vbr;
+ int err;
+
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ vbr = blk_mq_rq_to_pdu(req);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
+ vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
+
+ err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ if (err)
+ goto out;
+
+ blk_execute_rq(req, false);
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
+out:
+ blk_mq_free_request(req);
+ return err;
+}
+
+static int virtblk_parse_zone(struct virtio_blk *vblk,
+ struct virtio_blk_zone_descriptor *entry,
+ unsigned int idx, report_zones_cb cb, void *data)
+{
+ struct blk_zone zone = { };
+
+ zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
+ if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
+ zone.len = vblk->zone_sectors;
+ else
+ zone.len = get_capacity(vblk->disk) - zone.start;
+ zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
+ zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
+
+ switch (entry->z_type) {
+ case VIRTIO_BLK_ZT_SWR:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ break;
+ case VIRTIO_BLK_ZT_SWP:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
+ break;
+ case VIRTIO_BLK_ZT_CONV:
+ zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
+ zone.start, entry->z_type);
+ return -EIO;
+ }
+
+ switch (entry->z_state) {
+ case VIRTIO_BLK_ZS_EMPTY:
+ zone.cond = BLK_ZONE_COND_EMPTY;
+ break;
+ case VIRTIO_BLK_ZS_CLOSED:
+ zone.cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case VIRTIO_BLK_ZS_FULL:
+ zone.cond = BLK_ZONE_COND_FULL;
+ zone.wp = zone.start + zone.len;
+ break;
+ case VIRTIO_BLK_ZS_EOPEN:
+ zone.cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_IOPEN:
+ zone.cond = BLK_ZONE_COND_IMP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_NOT_WP:
+ zone.cond = BLK_ZONE_COND_NOT_WP;
+ break;
+ case VIRTIO_BLK_ZS_RDONLY:
+ zone.cond = BLK_ZONE_COND_READONLY;
+ zone.wp = ULONG_MAX;
+ break;
+ case VIRTIO_BLK_ZS_OFFLINE:
+ zone.cond = BLK_ZONE_COND_OFFLINE;
+ zone.wp = ULONG_MAX;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
+ zone.start, entry->z_state);
+ return -EIO;
+ }
+
+ /*
+ * The callback below checks the validity of the reported
+ * entry data, no need to further validate it here.
+ */
+ return cb(&zone, idx, data);
+}
+
+static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb,
+ void *data)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct virtio_blk_zone_report *report;
+ unsigned long long nz, i;
+ size_t buflen;
+ unsigned int zone_idx = 0;
+ int ret;
+
+ if (WARN_ON_ONCE(!vblk->zone_sectors))
+ return -EOPNOTSUPP;
+
+ report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
+ if (!report)
+ return -ENOMEM;
+
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (!vblk->vdev) {
+ ret = -ENXIO;
+ goto fail_report;
+ }
+
+ while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
+ memset(report, 0, buflen);
+
+ ret = virtblk_submit_zone_report(vblk, (char *)report,
+ buflen, sector);
+ if (ret)
+ goto fail_report;
+
+ nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
+ nr_zones);
+ if (!nz)
+ break;
+
+ for (i = 0; i < nz && zone_idx < nr_zones; i++) {
+ ret = virtblk_parse_zone(vblk, &report->zones[i],
+ zone_idx, cb, data);
+ if (ret)
+ goto fail_report;
+
+ sector = virtio64_to_cpu(vblk->vdev,
+ report->zones[i].z_start) +
+ vblk->zone_sectors;
+ zone_idx++;
+ }
+ }
+
+ if (zone_idx > 0)
+ ret = zone_idx;
+ else
+ ret = -EINVAL;
+fail_report:
+ mutex_unlock(&vblk->vdev_mutex);
+ kvfree(report);
+ return ret;
+}
+
+static void virtblk_revalidate_zones(struct virtio_blk *vblk)
+{
+ u8 model;
+
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ zoned.model, &model);
+ switch (model) {
+ default:
+ dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
+ fallthrough;
+ case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
+ return;
+ case VIRTIO_BLK_Z_HM:
+ WARN_ON_ONCE(!vblk->zone_sectors);
+ if (!blk_revalidate_disk_zones(vblk->disk, NULL))
+ set_capacity_and_notify(vblk->disk, 0);
+ }
+}
+
+static int virtblk_probe_zoned_device(struct virtio_device *vdev,
+ struct virtio_blk *vblk,
+ struct request_queue *q)
+{
+ u32 v, wg;
+ u8 model;
+ int ret;
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.model, &model);
+
+ switch (model) {
+ case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ /* Present the host-aware device as non-zoned */
+ return 0;
+ case VIRTIO_BLK_Z_HM:
+ break;
+ default:
+ dev_err(&vdev->dev, "unsupported zone model %d\n", model);
+ return -EINVAL;
+ }
+
+ dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
+
+ disk_set_zoned(vblk->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.max_open_zones, &v);
+ disk_set_max_open_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max open zones = %u\n", v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.max_active_zones, &v);
+ disk_set_max_active_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max active zones = %u\n", v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.write_granularity, &wg);
+ if (!wg) {
+ dev_warn(&vdev->dev, "zero write granularity reported\n");
+ return -ENODEV;
+ }
+ blk_queue_physical_block_size(q, wg);
+ blk_queue_io_min(q, wg);
+
+ dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
+
+ /*
+ * virtio ZBD specification doesn't require zones to be a power of
+ * two sectors in size, but the code in this driver expects that.
+ */
+ virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
+ &vblk->zone_sectors);
+ if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
+ dev_err(&vdev->dev,
+ "zoned device with non power of two zone size %u\n",
+ vblk->zone_sectors);
+ return -ENODEV;
+ }
+ dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
+
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
+ dev_warn(&vblk->vdev->dev,
+ "ignoring negotiated F_DISCARD for zoned device\n");
+ blk_queue_max_discard_sectors(q, 0);
+ }
+
+ ret = blk_revalidate_disk_zones(vblk->disk, NULL);
+ if (!ret) {
+ virtio_cread(vdev, struct virtio_blk_config,
+ zoned.max_append_sectors, &v);
+ if (!v) {
+ dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
+ return -ENODEV;
+ }
+ if ((v << SECTOR_SHIFT) < wg) {
+ dev_err(&vdev->dev,
+ "write granularity %u exceeds max_append_sectors %u limit\n",
+ wg, v);
+ return -ENODEV;
+ }
+
+ blk_queue_max_zone_append_sectors(q, v);
+ dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
+ }
+
+ return ret;
+}
+
+#else
+
+/*
+ * Zoned block device support is not configured in this kernel.
+ * Host-managed zoned devices can't be supported, but others are
+ * good to go as regular block devices.
+ */
+#define virtblk_report_zones NULL
+
+static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
+{
+}
+
+static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
+ struct virtio_blk *vblk, struct request_queue *q)
+{
+ u8 model;
+
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
+ if (model == VIRTIO_BLK_Z_HM) {
+ dev_err(&vdev->dev,
+ "virtio_blk: zoned devices are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -459,18 +902,24 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct virtio_blk *vblk = disk->private_data;
struct request_queue *q = vblk->disk->queue;
struct request *req;
+ struct virtblk_req *vbr;
int err;
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ vbr = blk_mq_rq_to_pdu(req);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
+ vbr->out_hdr.sector = 0;
+
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
blk_execute_rq(req, false);
- err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
@@ -521,6 +970,7 @@ static const struct block_device_operations virtblk_fops = {
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
.free_disk = virtblk_free_disk,
+ .report_zones = virtblk_report_zones,
};
static int index_to_minor(int index)
@@ -591,6 +1041,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
+ virtblk_revalidate_zones(vblk);
virtblk_update_capacity(vblk, true);
}
@@ -832,36 +1283,15 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
}
}
-static void virtblk_complete_batch(struct io_comp_batch *iob)
-{
- struct request *req;
-
- rq_list_for_each(&iob->req_list, req) {
- virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
- virtblk_cleanup_cmd(req);
- }
- blk_mq_end_request_batch(iob);
-}
-
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
- struct virtblk_req *vbr;
unsigned long flags;
- unsigned int len;
int found = 0;
spin_lock_irqsave(&vq->lock, flags);
-
- while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
- struct request *req = blk_mq_rq_from_pdu(vbr);
-
- found++;
- if (!blk_mq_add_to_batch(req, iob, vbr->status,
- virtblk_complete_batch))
- blk_mq_complete_request(req);
- }
+ found = virtblk_handle_req(vq, iob);
if (found)
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
@@ -991,7 +1421,7 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */
- blk_queue_max_hw_sectors(q, -1U);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
max_size = virtio_max_dma_size(vdev);
@@ -1147,6 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
+ err = virtblk_probe_zoned_device(vdev, vblk, q);
+ if (err)
+ goto out_cleanup_disk;
+ }
+
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
@@ -1247,7 +1687,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
- VIRTIO_BLK_F_SECURE_ERASE,
+ VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index a5cf7f1e871c..c362f4ad80ab 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring,
atomic_dec(&ring->persistent_gnt_in_use);
}
-static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
- unsigned int num)
+static void free_persistent_gnts(struct xen_blkif_ring *ring)
{
+ struct rb_root *root = &ring->persistent_gnts;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
@@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
+ if (RB_EMPTY_ROOT(root))
+ return;
+
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
- num--;
+ ring->persistent_gnt_c--;
}
- BUG_ON(num != 0);
+
+ BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+ BUG_ON(ring->persistent_gnt_c != 0);
}
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
@@ -631,12 +636,7 @@ purge_gnt_list:
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{
/* Free all persistent grant pages */
- if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
- free_persistent_gnts(ring, &ring->persistent_gnts,
- ring->persistent_gnt_c);
-
- BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
- ring->persistent_gnt_c = 0;
+ free_persistent_gnts(ring);
/* Since we are shutting down remove all pages from the buffer */
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
@@ -891,7 +891,7 @@ next:
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
- if(i >= map_until)
+ if (i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
bio_put(bio);
}
+static void blkif_get_x86_32_req(struct blkif_request *dst,
+ const struct blkif_x86_32_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
+
+static void blkif_get_x86_64_req(struct blkif_request *dst,
+ const struct blkif_x86_64_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
/*
* Function to copy the from the ring buffer the 'struct blkif_request'
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index a28473470e66..40f67bfc052d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -296,7 +296,7 @@ struct xen_blkif_ring {
struct work_struct free_work;
/* Thread shutdown wait queue. */
wait_queue_head_t shutdown_wq;
- struct xen_blkif *blkif;
+ struct xen_blkif *blkif;
};
struct xen_blkif {
@@ -315,7 +315,7 @@ struct xen_blkif {
atomic_t drain;
struct work_struct free_work;
- unsigned int nr_ring_pages;
+ unsigned int nr_ring_pages;
bool multi_ref;
/* All rings for this device. */
struct xen_blkif_ring *rings;
@@ -329,7 +329,7 @@ struct seg_buf {
};
struct grant_page {
- struct page *page;
+ struct page *page;
struct persistent_gnt *persistent_gnt;
grant_handle_t handle;
grant_ref_t gref;
@@ -384,7 +384,6 @@ void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
-int xen_blkif_purge_persistent(void *arg);
void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
@@ -395,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
-static inline void blkif_get_x86_32_req(struct blkif_request *dst,
- struct blkif_x86_32_request *src)
-{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = READ_ONCE(src->operation);
- switch (dst->operation) {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- case BLKIF_OP_WRITE_BARRIER:
- case BLKIF_OP_FLUSH_DISKCACHE:
- dst->u.rw.nr_segments = src->u.rw.nr_segments;
- dst->u.rw.handle = src->u.rw.handle;
- dst->u.rw.id = src->u.rw.id;
- dst->u.rw.sector_number = src->u.rw.sector_number;
- barrier();
- if (n > dst->u.rw.nr_segments)
- n = dst->u.rw.nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->u.rw.seg[i];
- break;
- case BLKIF_OP_DISCARD:
- dst->u.discard.flag = src->u.discard.flag;
- dst->u.discard.id = src->u.discard.id;
- dst->u.discard.sector_number = src->u.discard.sector_number;
- dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- break;
- case BLKIF_OP_INDIRECT:
- dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
- dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
- dst->u.indirect.handle = src->u.indirect.handle;
- dst->u.indirect.id = src->u.indirect.id;
- dst->u.indirect.sector_number = src->u.indirect.sector_number;
- barrier();
- j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
- for (i = 0; i < j; i++)
- dst->u.indirect.indirect_grefs[i] =
- src->u.indirect.indirect_grefs[i];
- break;
- default:
- /*
- * Don't know how to translate this op. Only get the
- * ID so failure can be reported to the frontend.
- */
- dst->u.other.id = src->u.other.id;
- break;
- }
-}
-
-static inline void blkif_get_x86_64_req(struct blkif_request *dst,
- struct blkif_x86_64_request *src)
-{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = READ_ONCE(src->operation);
- switch (dst->operation) {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- case BLKIF_OP_WRITE_BARRIER:
- case BLKIF_OP_FLUSH_DISKCACHE:
- dst->u.rw.nr_segments = src->u.rw.nr_segments;
- dst->u.rw.handle = src->u.rw.handle;
- dst->u.rw.id = src->u.rw.id;
- dst->u.rw.sector_number = src->u.rw.sector_number;
- barrier();
- if (n > dst->u.rw.nr_segments)
- n = dst->u.rw.nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->u.rw.seg[i];
- break;
- case BLKIF_OP_DISCARD:
- dst->u.discard.flag = src->u.discard.flag;
- dst->u.discard.id = src->u.discard.id;
- dst->u.discard.sector_number = src->u.discard.sector_number;
- dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- break;
- case BLKIF_OP_INDIRECT:
- dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
- dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
- dst->u.indirect.handle = src->u.indirect.handle;
- dst->u.indirect.id = src->u.indirect.id;
- dst->u.indirect.sector_number = src->u.indirect.sector_number;
- barrier();
- j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
- for (i = 0; i < j; i++)
- dst->u.indirect.indirect_grefs[i] =
- src->u.indirect.indirect_grefs[i];
- break;
- default:
- /*
- * Don't know how to translate this op. Only get the
- * ID so failure can be reported to the frontend.
- */
- dst->u.other.id = src->u.other.id;
- break;
- }
-}
-
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c0227dfa4688..4807af1d5805 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -524,7 +524,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
return 0;
}
-static int xen_blkbk_remove(struct xenbus_device *dev)
+static void xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -547,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
}
-
- return 0;
}
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b28489290323..23ed258b57f0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2467,7 +2467,7 @@ static void blkback_changed(struct xenbus_device *dev,
}
}
-static int blkfront_remove(struct xenbus_device *xbdev)
+static void blkfront_remove(struct xenbus_device *xbdev)
{
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
@@ -2488,7 +2488,6 @@ static int blkfront_remove(struct xenbus_device *xbdev)
}
kfree(info);
- return 0;
}
static int blkfront_is_ready(struct xenbus_device *dev)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e290d6d97047..f6d90f1ba5cf 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -54,9 +54,8 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio);
-
+static int zram_read_page(struct zram *zram, struct page *page, u32 index,
+ struct bio *parent);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
@@ -148,6 +147,7 @@ static inline bool is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
+#define ZRAM_PARTIAL_IO 1
#else
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -174,36 +174,6 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
return prio & ZRAM_COMP_PRIORITY_MASK;
}
-/*
- * Check if request is within bounds and aligned on zram logical blocks.
- */
-static inline bool valid_io_request(struct zram *zram,
- sector_t start, unsigned int size)
-{
- u64 end, bound;
-
- /* unaligned request */
- if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
- return false;
- if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
- return false;
-
- end = start + (size >> SECTOR_SHIFT);
- bound = zram->disksize >> SECTOR_SHIFT;
- /* out of range range */
- if (unlikely(start >= bound || end > bound || start > end))
- return false;
-
- /* I/O request is valid */
- return true;
-}
-
-static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
-{
- *index += (*offset + bvec->bv_len) / PAGE_SIZE;
- *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
-}
-
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
@@ -606,41 +576,16 @@ static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
atomic64_dec(&zram->stats.bd_count);
}
-static void zram_page_end_io(struct bio *bio)
-{
- struct page *page = bio_first_page_all(bio);
-
- page_endio(page, op_is_write(bio_op(bio)),
- blk_status_to_errno(bio->bi_status));
- bio_put(bio);
-}
-
-/*
- * Returns 1 if the submission is successful.
- */
-static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+static void read_from_bdev_async(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent)
{
struct bio *bio;
- bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
- GFP_NOIO);
- if (!bio)
- return -ENOMEM;
-
+ bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
- bio_put(bio);
- return -EIO;
- }
-
- if (!parent)
- bio->bi_end_io = zram_page_end_io;
- else
- bio_chain(bio, parent);
-
+ __bio_add_page(bio, page, PAGE_SIZE, 0);
+ bio_chain(bio, parent);
submit_bio(bio);
- return 1;
}
#define PAGE_WB_SIG "page_index="
@@ -701,12 +646,6 @@ static ssize_t writeback_store(struct device *dev,
}
for (; nr_pages != 0; index++, nr_pages--) {
- struct bio_vec bvec;
-
- bvec.bv_page = page;
- bvec.bv_len = PAGE_SIZE;
- bvec.bv_offset = 0;
-
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
spin_unlock(&zram->wb_limit_lock);
@@ -750,7 +689,7 @@ static ssize_t writeback_store(struct device *dev,
/* Need for hugepage writeback racing */
zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
- if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
+ if (zram_read_page(zram, page, index, NULL)) {
zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
@@ -761,9 +700,8 @@ static ssize_t writeback_store(struct device *dev,
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
+ bio_add_page(&bio, page, PAGE_SIZE, 0);
- bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset);
/*
* XXX: A single page IO would be inefficient for write
* but it would be not bad as starter.
@@ -831,19 +769,20 @@ struct zram_work {
struct work_struct work;
struct zram *zram;
unsigned long entry;
- struct bio *bio;
- struct bio_vec bvec;
+ struct page *page;
+ int error;
};
-#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
struct zram_work *zw = container_of(work, struct zram_work, work);
- struct zram *zram = zw->zram;
- unsigned long entry = zw->entry;
- struct bio *bio = zw->bio;
+ struct bio_vec bv;
+ struct bio bio;
- read_from_bdev_async(zram, &zw->bvec, entry, bio);
+ bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
+ bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
+ __bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
+ zw->error = submit_bio_wait(&bio);
}
/*
@@ -851,45 +790,39 @@ static void zram_sync_read(struct work_struct *work)
* chained IO with parent IO in same context, it's a deadlock. To avoid that,
* use a worker thread context.
*/
-static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *bio)
+static int read_from_bdev_sync(struct zram *zram, struct page *page,
+ unsigned long entry)
{
struct zram_work work;
- work.bvec = *bvec;
+ work.page = page;
work.zram = zram;
work.entry = entry;
- work.bio = bio;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
queue_work(system_unbound_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
- return 1;
-}
-#else
-static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *bio)
-{
- WARN_ON(1);
- return -EIO;
+ return work.error;
}
-#endif
-static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *parent, bool sync)
+static int read_from_bdev(struct zram *zram, struct page *page,
+ unsigned long entry, struct bio *parent)
{
atomic64_inc(&zram->stats.bd_reads);
- if (sync)
- return read_from_bdev_sync(zram, bvec, entry, parent);
- else
- return read_from_bdev_async(zram, bvec, entry, parent);
+ if (!parent) {
+ if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
+ return -EIO;
+ return read_from_bdev_sync(zram, page, entry);
+ }
+ read_from_bdev_async(zram, page, entry, parent);
+ return 0;
}
#else
static inline void reset_bdev(struct zram *zram) {};
-static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
- unsigned long entry, struct bio *parent, bool sync)
+static int read_from_bdev(struct zram *zram, struct page *page,
+ unsigned long entry, struct bio *parent)
{
return -EIO;
}
@@ -1140,7 +1073,7 @@ static ssize_t recomp_algorithm_store(struct device *dev,
while (*args) {
args = next_arg(args, &param, &val);
- if (!*val)
+ if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "algo")) {
@@ -1192,10 +1125,9 @@ static ssize_t io_stat_show(struct device *dev,
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu %8llu\n",
+ "%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
- (u64)atomic64_read(&zram->stats.invalid_io),
(u64)atomic64_read(&zram->stats.notify_free));
up_read(&zram->init_lock);
@@ -1374,23 +1306,6 @@ out:
}
/*
- * Reads a page from the writeback devices. Corresponding ZRAM slot
- * should be unlocked.
- */
-static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page,
- u32 index, struct bio *bio, bool partial_io)
-{
- struct bio_vec bvec = {
- .bv_page = page,
- .bv_len = PAGE_SIZE,
- .bv_offset = 0,
- };
-
- return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio,
- partial_io);
-}
-
-/*
* Reads (decompresses if needed) a page from zspool (zsmalloc).
* Corresponding ZRAM slot should be locked.
*/
@@ -1439,8 +1354,8 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
return ret;
}
-static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
- struct bio *bio, bool partial_io)
+static int zram_read_page(struct zram *zram, struct page *page, u32 index,
+ struct bio *parent)
{
int ret;
@@ -1450,15 +1365,14 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
- /* Slot should be unlocked before the function call */
+ /*
+ * The slot should be unlocked before reading from the backing
+ * device.
+ */
zram_slot_unlock(zram, index);
- /* A null bio means rw_page was used, we must fallback to bio */
- if (!bio)
- return -EOPNOTSUPP;
-
- ret = zram_bvec_read_from_bdev(zram, page, index, bio,
- partial_io);
+ ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ parent);
}
/* Should NEVER happen. Return bio error if it does. */
@@ -1468,39 +1382,34 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
return ret;
}
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+/*
+ * Use a temporary buffer to decompress the page, as the decompressor
+ * always expects a full page for the output.
+ */
+static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
{
+ struct page *page = alloc_page(GFP_NOIO);
int ret;
- struct page *page;
- page = bvec->bv_page;
- if (is_partial_io(bvec)) {
- /* Use a temporary buffer to decompress the page */
- page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
- if (!page)
- return -ENOMEM;
- }
-
- ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
- if (unlikely(ret))
- goto out;
-
- if (is_partial_io(bvec)) {
- void *src = kmap_atomic(page);
+ if (!page)
+ return -ENOMEM;
+ ret = zram_read_page(zram, page, index, NULL);
+ if (likely(!ret))
+ memcpy_to_bvec(bvec, page_address(page) + offset);
+ __free_page(page);
+ return ret;
+}
- memcpy_to_bvec(bvec, src + offset);
- kunmap_atomic(src);
- }
-out:
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
if (is_partial_io(bvec))
- __free_page(page);
-
- return ret;
+ return zram_bvec_read_partial(zram, bvec, index, offset);
+ return zram_read_page(zram, bvec->bv_page, index, bio);
}
-static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, struct bio *bio)
+static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
unsigned long alloced_pages;
@@ -1508,7 +1417,6 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
- struct page *page = bvec->bv_page;
unsigned long element = 0;
enum zram_pageflags flags = 0;
@@ -1626,42 +1534,33 @@ out:
return ret;
}
-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+/*
+ * This is a partial IO. Read the full page before writing the changes.
+ */
+static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
{
+ struct page *page = alloc_page(GFP_NOIO);
int ret;
- struct page *page = NULL;
- struct bio_vec vec;
- vec = *bvec;
- if (is_partial_io(bvec)) {
- void *dst;
- /*
- * This is a partial IO. We need to read the full page
- * before to write the changes.
- */
- page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
- if (!page)
- return -ENOMEM;
-
- ret = __zram_bvec_read(zram, page, index, bio, true);
- if (ret)
- goto out;
-
- dst = kmap_atomic(page);
- memcpy_from_bvec(dst + offset, bvec);
- kunmap_atomic(dst);
+ if (!page)
+ return -ENOMEM;
- vec.bv_page = page;
- vec.bv_len = PAGE_SIZE;
- vec.bv_offset = 0;
+ ret = zram_read_page(zram, page, index, bio);
+ if (!ret) {
+ memcpy_from_bvec(page_address(page) + offset, bvec);
+ ret = zram_write_page(zram, page, index);
}
+ __free_page(page);
+ return ret;
+}
- ret = __zram_bvec_write(zram, &vec, index, bio);
-out:
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
if (is_partial_io(bvec))
- __free_page(page);
- return ret;
+ return zram_bvec_write_partial(zram, bvec, index, offset, bio);
+ return zram_write_page(zram, bvec->bv_page, index);
}
#ifdef CONFIG_ZRAM_MULTI_COMP
@@ -1772,7 +1671,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
/*
* No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in __zram_bvec_write()) since
+ * re-compression attempt (unlike in zram_write_bvec()) since
* we already have stored that object in zsmalloc. If we cannot
* alloc memory for recompressed object then we bail out and
* simply keep the old (existing) object in zsmalloc.
@@ -1824,7 +1723,7 @@ static ssize_t recompress_store(struct device *dev,
while (*args) {
args = next_arg(args, &param, &val);
- if (!*val)
+ if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "type")) {
@@ -1932,15 +1831,12 @@ release_init_lock:
}
#endif
-/*
- * zram_bio_discard - handler on discard request
- * @index: physical block index in PAGE_SIZE units
- * @offset: byte offset within physical block
- */
-static void zram_bio_discard(struct zram *zram, u32 index,
- int offset, struct bio *bio)
+static void zram_bio_discard(struct zram *zram, struct bio *bio)
{
size_t n = bio->bi_iter.bi_size;
+ u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
+ SECTOR_SHIFT;
/*
* zram manages data in physical block size units. Because logical block
@@ -1968,80 +1864,58 @@ static void zram_bio_discard(struct zram *zram, u32 index,
index++;
n -= PAGE_SIZE;
}
+
+ bio_endio(bio);
}
-/*
- * Returns errno if it has some problem. Otherwise return 0 or 1.
- * Returns 0 if IO request was done synchronously
- * Returns 1 if IO request was successfully submitted.
- */
-static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, enum req_op op, struct bio *bio)
+static void zram_bio_read(struct zram *zram, struct bio *bio)
{
- int ret;
-
- if (!op_is_write(op)) {
- ret = zram_bvec_read(zram, bvec, index, offset, bio);
- flush_dcache_page(bvec->bv_page);
- } else {
- ret = zram_bvec_write(zram, bvec, index, offset, bio);
- }
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ unsigned long start_time;
- zram_slot_lock(zram, index);
- zram_accessed(zram, index);
- zram_slot_unlock(zram, index);
+ start_time = bio_start_io_acct(bio);
+ bio_for_each_segment(bv, bio, iter) {
+ u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
+ SECTOR_SHIFT;
- if (unlikely(ret < 0)) {
- if (!op_is_write(op))
+ if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
- else
- atomic64_inc(&zram->stats.failed_writes);
- }
+ bio->bi_status = BLK_STS_IOERR;
+ break;
+ }
+ flush_dcache_page(bv.bv_page);
- return ret;
+ zram_slot_lock(zram, index);
+ zram_accessed(zram, index);
+ zram_slot_unlock(zram, index);
+ }
+ bio_end_io_acct(bio, start_time);
+ bio_endio(bio);
}
-static void __zram_make_request(struct zram *zram, struct bio *bio)
+static void zram_bio_write(struct zram *zram, struct bio *bio)
{
- int offset;
- u32 index;
- struct bio_vec bvec;
struct bvec_iter iter;
+ struct bio_vec bv;
unsigned long start_time;
- index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (bio->bi_iter.bi_sector &
- (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_WRITE_ZEROES:
- zram_bio_discard(zram, index, offset, bio);
- bio_endio(bio);
- return;
- default:
- break;
- }
-
start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bvec, bio, iter) {
- struct bio_vec bv = bvec;
- unsigned int unwritten = bvec.bv_len;
-
- do {
- bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
- unwritten);
- if (zram_bvec_rw(zram, &bv, index, offset,
- bio_op(bio), bio) < 0) {
- bio->bi_status = BLK_STS_IOERR;
- break;
- }
+ bio_for_each_segment(bv, bio, iter) {
+ u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
+ SECTOR_SHIFT;
- bv.bv_offset += bv.bv_len;
- unwritten -= bv.bv_len;
+ if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
+ atomic64_inc(&zram->stats.failed_writes);
+ bio->bi_status = BLK_STS_IOERR;
+ break;
+ }
- update_position(&index, &offset, &bv);
- } while (unwritten);
+ zram_slot_lock(zram, index);
+ zram_accessed(zram, index);
+ zram_slot_unlock(zram, index);
}
bio_end_io_acct(bio, start_time);
bio_endio(bio);
@@ -2054,14 +1928,21 @@ static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
- if (!valid_io_request(zram, bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size)) {
- atomic64_inc(&zram->stats.invalid_io);
- bio_io_error(bio);
- return;
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ zram_bio_read(zram, bio);
+ break;
+ case REQ_OP_WRITE:
+ zram_bio_write(zram, bio);
+ break;
+ case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_ZEROES:
+ zram_bio_discard(zram, bio);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ bio_endio(bio);
}
-
- __zram_make_request(zram, bio);
}
static void zram_slot_free_notify(struct block_device *bdev,
@@ -2081,61 +1962,6 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
-static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- int offset, ret;
- u32 index;
- struct zram *zram;
- struct bio_vec bv;
- unsigned long start_time;
-
- if (PageTransHuge(page))
- return -ENOTSUPP;
- zram = bdev->bd_disk->private_data;
-
- if (!valid_io_request(zram, sector, PAGE_SIZE)) {
- atomic64_inc(&zram->stats.invalid_io);
- ret = -EINVAL;
- goto out;
- }
-
- index = sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
-
- bv.bv_page = page;
- bv.bv_len = PAGE_SIZE;
- bv.bv_offset = 0;
-
- start_time = bdev_start_io_acct(bdev->bd_disk->part0,
- SECTORS_PER_PAGE, op, jiffies);
- ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
- bdev_end_io_acct(bdev->bd_disk->part0, op, start_time);
-out:
- /*
- * If I/O fails, just return error(ie, non-zero) without
- * calling page_endio.
- * It causes resubmit the I/O with bio request by upper functions
- * of rw_page(e.g., swap_readpage, __swap_writepage) and
- * bio->bi_end_io does things to handle the error
- * (e.g., SetPageError, set_page_dirty and extra works).
- */
- if (unlikely(ret < 0))
- return ret;
-
- switch (ret) {
- case 0:
- page_endio(page, op_is_write(op), 0);
- break;
- case 1:
- ret = 0;
- break;
- default:
- WARN_ON(1);
- }
- return ret;
-}
-
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
@@ -2290,7 +2116,6 @@ static const struct block_device_operations zram_devops = {
.open = zram_open,
.submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
- .rw_page = zram_rw_page,
.owner = THIS_MODULE
};
@@ -2385,11 +2210,11 @@ static int zram_add(void)
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
- /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
+ /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
@@ -2490,8 +2315,8 @@ static int zram_remove(struct zram *zram)
* creates a new un-initialized zram device and returns back this device's
* device_id (or an error code if it fails to create a new device).
*/
-static ssize_t hot_add_show(struct class *class,
- struct class_attribute *attr,
+static ssize_t hot_add_show(const struct class *class,
+ const struct class_attribute *attr,
char *buf)
{
int ret;
@@ -2504,11 +2329,12 @@ static ssize_t hot_add_show(struct class *class,
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
__ATTR(hot_add, 0400, hot_add_show, NULL);
-static ssize_t hot_remove_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t hot_remove_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf,
size_t count)
{
@@ -2547,7 +2373,6 @@ ATTRIBUTE_GROUPS(zram_control_class);
static struct class zram_control_class = {
.name = "zram-control",
- .owner = THIS_MODULE,
.class_groups = zram_control_class_groups,
};
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index c5254626f051..ca7a15bd4845 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -78,7 +78,6 @@ struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
atomic64_t failed_reads; /* can happen when memory is too low */
atomic64_t failed_writes; /* can happen when memory is too low */
- atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
atomic64_t same_pages; /* no. of same element filled pages */
atomic64_t huge_pages; /* no. of huge pages */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 5a1a7bec3c42..bc211c324206 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -363,6 +363,7 @@ config BT_HCIBLUECARD
config BT_HCIVHCI
tristate "HCI VHCI (Virtual HCI device) driver"
+ select WANT_DEV_COREDUMP
help
Bluetooth Virtual HCI device driver.
This driver is required if you want to use HCI Emulation software.
@@ -465,4 +466,17 @@ config BT_VIRTIO
Say Y here to compile support for HCI over Virtio into the
kernel or say M to compile as a module.
+config BT_NXPUART
+ tristate "NXP protocol support"
+ depends on SERIAL_DEV_BUS
+ select CRC32
+ select CRC8
+ help
+ NXP is serial driver required for NXP Bluetooth
+ devices with UART interface.
+
+ Say Y here to compile support for NXP Bluetooth UART device into
+ the kernel, or say M here to compile as a module (btnxpuart).
+
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index e0b261f24fc9..7a5967e9ac48 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_BT_QCA) += btqca.o
obj-$(CONFIG_BT_MTK) += btmtk.o
obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o
+obj-$(CONFIG_BT_NXPUART) += btnxpuart.o
obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 3006e2a0f37e..de2ea589aa49 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -6,6 +6,7 @@
* Copyright (C) 2015 Intel Corporation
*/
+#include <linux/efi.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/dmi.h>
@@ -34,6 +35,43 @@
/* For kmalloc-ing the fw-name array instead of putting it on the stack */
typedef char bcm_fw_name[BCM_FW_NAME_LEN];
+#ifdef CONFIG_EFI
+static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev)
+{
+ efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f,
+ 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13);
+ bdaddr_t efi_bdaddr, bdaddr;
+ efi_status_t status;
+ unsigned long len;
+ int ret;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return -EOPNOTSUPP;
+
+ len = sizeof(efi_bdaddr);
+ status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr);
+ if (status != EFI_SUCCESS)
+ return -ENXIO;
+
+ if (len != sizeof(efi_bdaddr))
+ return -EIO;
+
+ baswap(&bdaddr, &efi_bdaddr);
+
+ ret = btbcm_set_bdaddr(hdev, &bdaddr);
+ if (ret)
+ return ret;
+
+ bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr);
+ return 0;
+}
+#else
+static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -87,9 +125,12 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
!bacmp(&bda->bdaddr, BDADDR_BCM4345C5) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
- bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
- &bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ /* Try falling back to BDADDR EFI variable */
+ if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
+ bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
+ &bda->bdaddr);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
}
kfree_skb(skb);
@@ -511,7 +552,7 @@ static const char *btbcm_get_board_name(struct device *dev)
len = strlen(tmp) + 1;
board_type = devm_kzalloc(dev, len, GFP_KERNEL);
strscpy(board_type, tmp, len);
- for (i = 0; i < board_type[i]; i++) {
+ for (i = 0; i < len; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d4e2cb9a4eb4..d9349ba48281 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/acpi.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -24,6 +25,16 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
+#define BTINTEL_PPAG_NAME "PPAG"
+
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+ u32 domain;
+ u32 mode;
+ acpi_status status;
+ struct hci_dev *hdev;
+};
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -32,6 +43,12 @@ struct cmd_write_boot_params {
u8 fw_build_yy;
} __packed;
+static struct {
+ const char *driver_name;
+ u8 hw_variant;
+ u32 fw_build_num;
+} coredump_info;
+
int btintel_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -304,6 +321,9 @@ int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
return -EINVAL;
}
+ coredump_info.hw_variant = ver->hw_variant;
+ coredump_info.fw_build_num = ver->fw_build_num;
+
bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
ver->fw_build_num, ver->fw_build_ww,
@@ -498,6 +518,9 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
return -EINVAL;
}
+ coredump_info.hw_variant = INTEL_HW_VARIANT(version->cnvi_bt);
+ coredump_info.fw_build_num = version->build_num;
+
bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
2000 + (version->timestamp >> 8), version->timestamp & 0xff,
version->build_type, version->build_num);
@@ -1278,6 +1301,65 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
+static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
+ void **ret)
+{
+ acpi_status status;
+ size_t len;
+ struct btintel_ppag *ppag = data;
+ union acpi_object *p, *elements;
+ struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct hci_dev *hdev = ppag->hdev;
+
+ status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ len = strlen(string.pointer);
+ if (len < strlen(BTINTEL_PPAG_NAME)) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+
+ if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+ kfree(string.pointer);
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ppag->status = status;
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ p = buffer.pointer;
+ ppag = (struct btintel_ppag *)data;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+ kfree(buffer.pointer);
+ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ ppag->status = AE_ERROR;
+ return AE_ERROR;
+ }
+
+ elements = p->package.elements;
+
+ /* PPAG table is located at element[1] */
+ p = &elements[1];
+
+ ppag->domain = (u32)p->package.elements[0].integer.value;
+ ppag->mode = (u32)p->package.elements[1].integer.value;
+ ppag->status = AE_OK;
+ kfree(buffer.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@@ -1392,6 +1474,59 @@ int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
}
EXPORT_SYMBOL_GPL(btintel_set_quality_report);
+static void btintel_coredump(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc4e, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Coredump failed (%ld)", PTR_ERR(skb));
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+static void btintel_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ char buf[80];
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
+ coredump_info.hw_variant);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n",
+ coredump_info.fw_build_num);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info.driver_name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: Intel\n");
+ skb_put_data(skb, buf, strlen(buf));
+}
+
+static int btintel_register_devcoredump_support(struct hci_dev *hdev)
+{
+ struct intel_debug_features features;
+ int err;
+
+ err = btintel_read_debug_features(hdev, &features);
+ if (err) {
+ bt_dev_info(hdev, "Error reading debug features");
+ return err;
+ }
+
+ if (!(features.page1[0] & 0x3f)) {
+ bt_dev_dbg(hdev, "Telemetry exception format not supported");
+ return -EOPNOTSUPP;
+ }
+
+ hci_devcd_register(hdev, btintel_coredump, btintel_dmp_hdr, NULL);
+
+ return err;
+}
+
static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -2251,6 +2386,64 @@ error:
return err;
}
+static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ struct btintel_ppag ppag;
+ struct sk_buff *skb;
+ struct btintel_loc_aware_reg ppag_cmd;
+ acpi_handle handle;
+
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ switch (ver->cnvr_top & 0xFFF) {
+ case 0x504: /* Hrp2 */
+ case 0x202: /* Jfp2 */
+ case 0x201: /* Jfp1 */
+ return;
+ }
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_info(hdev, "No support for BT device in ACPI firmware");
+ return;
+ }
+
+ memset(&ppag, 0, sizeof(ppag));
+
+ ppag.hdev = hdev;
+ ppag.status = AE_NOT_FOUND;
+ acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
+ btintel_ppag_callback, &ppag, NULL);
+
+ if (ACPI_FAILURE(ppag.status)) {
+ if (ppag.status == AE_NOT_FOUND) {
+ bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
+ return;
+ }
+ return;
+ }
+
+ if (ppag.domain != 0x12) {
+ bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth");
+ return;
+ }
+
+ /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
+ if (!(ppag.mode & BIT(0))) {
+ bt_dev_dbg(hdev, "PPAG-BT: disabled");
+ return;
+ }
+
+ ppag_cmd.mcc = cpu_to_le32(0);
+ ppag_cmd.sel = cpu_to_le32(0); /* 0 - Enable , 1 - Disable, 2 - Testing mode */
+ ppag_cmd.delta = cpu_to_le32(0);
+ skb = __hci_cmd_sync(hdev, 0xfe19, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2297,6 +2490,9 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Set PPAG feature */
+ btintel_set_ppag(hdev, ver);
+
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
if (err)
@@ -2466,6 +2662,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_msft_opcode(hdev, ver.hw_variant);
err = btintel_bootloader_setup(hdev, &ver);
+ btintel_register_devcoredump_support(hdev);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
@@ -2539,6 +2736,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_msft_opcode(hdev, ver.hw_variant);
err = btintel_bootloader_setup(hdev, &ver);
+ btintel_register_devcoredump_support(hdev);
break;
case 0x17:
case 0x18:
@@ -2553,15 +2751,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
- /* Valid LE States quirk for GfP */
- if (INTEL_HW_VARIANT(ver_tlv.cnvi_bt) == 0x18)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ /* Apply LE States quirk from solar onwards */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
+ btintel_register_devcoredump_support(hdev);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
@@ -2611,7 +2809,7 @@ static int btintel_shutdown_combined(struct hci_dev *hdev)
return 0;
}
-int btintel_configure_setup(struct hci_dev *hdev)
+int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
{
hdev->manufacturer = 2;
hdev->setup = btintel_setup_combined;
@@ -2620,6 +2818,8 @@ int btintel_configure_setup(struct hci_dev *hdev)
hdev->set_diag = btintel_set_diag_combined;
hdev->set_bdaddr = btintel_set_bdaddr;
+ coredump_info.driver_name = driver_name;
+
return 0;
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index e0060e58573c..d6a1dc8d8a82 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -137,6 +137,19 @@ struct intel_offload_use_cases {
__u8 preset[8];
} __packed;
+struct btintel_loc_aware_reg {
+ __le32 mcc;
+ __le32 sel;
+ __le32 delta;
+} __packed;
+
+#define INTEL_TLV_TYPE_ID 0x01
+
+#define INTEL_TLV_SYSTEM_EXCEPTION 0x00
+#define INTEL_TLV_FATAL_EXCEPTION 0x01
+#define INTEL_TLV_DEBUG_EXCEPTION 0x02
+#define INTEL_TLV_TEST_EXCEPTION 0xDE
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -206,7 +219,7 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver,
const struct firmware *fw, u32 *boot_param);
-int btintel_configure_setup(struct hci_dev *hdev);
+int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
@@ -287,7 +300,8 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
return -EOPNOTSUPP;
}
-static inline int btintel_configure_setup(struct hci_dev *hdev)
+static inline int btintel_configure_setup(struct hci_dev *hdev,
+ const char *driver_name)
{
return -ENODEV;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index ba057ebfda5c..d76c799553aa 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -40,7 +40,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
-static const struct of_device_id btmrvl_sdio_of_match_table[] = {
+static const struct of_device_id btmrvl_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8897-bt" },
{ .compatible = "marvell,sd8997-bt" },
{ }
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index c98691cdbbd5..7680c67cdb35 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -959,16 +959,16 @@ static void btmtkuart_remove(struct serdev_device *serdev)
hci_free_dev(hdev);
}
-static const struct btmtkuart_data mt7622_data = {
+static const struct btmtkuart_data mt7622_data __maybe_unused = {
.fwname = FIRMWARE_MT7622,
};
-static const struct btmtkuart_data mt7663_data = {
+static const struct btmtkuart_data mt7663_data __maybe_unused = {
.flags = BTMTKUART_FLAG_STANDALONE_HW,
.fwname = FIRMWARE_MT7663,
};
-static const struct btmtkuart_data mt7668_data = {
+static const struct btmtkuart_data mt7668_data __maybe_unused = {
.flags = BTMTKUART_FLAG_STANDALONE_HW,
.fwname = FIRMWARE_MT7668,
};
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
new file mode 100644
index 000000000000..3a34d7c1475b
--- /dev/null
+++ b/drivers/bluetooth/btnxpuart.c
@@ -0,0 +1,1352 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NXP Bluetooth driver
+ * Copyright 2023 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/serdev.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <asm/unaligned.h>
+#include <linux/firmware.h>
+#include <linux/string.h>
+#include <linux/crc8.h>
+#include <linux/crc32.h>
+#include <linux/string_helpers.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define MANUFACTURER_NXP 37
+
+#define BTNXPUART_TX_STATE_ACTIVE 1
+#define BTNXPUART_FW_DOWNLOADING 2
+#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
+#define BTNXPUART_SERDEV_OPEN 4
+
+#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
+#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
+#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
+#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
+#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
+#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
+
+#define CHIP_ID_W9098 0x5c03
+#define CHIP_ID_IW416 0x7201
+#define CHIP_ID_IW612 0x7601
+
+#define HCI_NXP_PRI_BAUDRATE 115200
+#define HCI_NXP_SEC_BAUDRATE 3000000
+
+#define MAX_FW_FILE_NAME_LEN 50
+
+/* Default ps timeout period in milliseconds */
+#define PS_DEFAULT_TIMEOUT_PERIOD_MS 2000
+
+/* wakeup methods */
+#define WAKEUP_METHOD_DTR 0
+#define WAKEUP_METHOD_BREAK 1
+#define WAKEUP_METHOD_EXT_BREAK 2
+#define WAKEUP_METHOD_RTS 3
+#define WAKEUP_METHOD_INVALID 0xff
+
+/* power save mode status */
+#define PS_MODE_DISABLE 0
+#define PS_MODE_ENABLE 1
+
+/* Power Save Commands to ps_work_func */
+#define PS_CMD_EXIT_PS 1
+#define PS_CMD_ENTER_PS 2
+
+/* power save state */
+#define PS_STATE_AWAKE 0
+#define PS_STATE_SLEEP 1
+
+/* Bluetooth vendor command : Sleep mode */
+#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23
+/* Bluetooth vendor command : Wakeup method */
+#define HCI_NXP_WAKEUP_METHOD 0xfc53
+/* Bluetooth vendor command : Set operational baudrate */
+#define HCI_NXP_SET_OPER_SPEED 0xfc09
+/* Bluetooth vendor command: Independent Reset */
+#define HCI_NXP_IND_RESET 0xfcfc
+
+/* Bluetooth Power State : Vendor cmd params */
+#define BT_PS_ENABLE 0x02
+#define BT_PS_DISABLE 0x03
+
+/* Bluetooth Host Wakeup Methods */
+#define BT_HOST_WAKEUP_METHOD_NONE 0x00
+#define BT_HOST_WAKEUP_METHOD_DTR 0x01
+#define BT_HOST_WAKEUP_METHOD_BREAK 0x02
+#define BT_HOST_WAKEUP_METHOD_GPIO 0x03
+
+/* Bluetooth Chip Wakeup Methods */
+#define BT_CTRL_WAKEUP_METHOD_DSR 0x00
+#define BT_CTRL_WAKEUP_METHOD_BREAK 0x01
+#define BT_CTRL_WAKEUP_METHOD_GPIO 0x02
+#define BT_CTRL_WAKEUP_METHOD_EXT_BREAK 0x04
+#define BT_CTRL_WAKEUP_METHOD_RTS 0x05
+
+struct ps_data {
+ u8 target_ps_mode; /* ps mode to be set */
+ u8 cur_psmode; /* current ps_mode */
+ u8 ps_state; /* controller's power save state */
+ u8 ps_cmd;
+ u8 h2c_wakeupmode;
+ u8 cur_h2c_wakeupmode;
+ u8 c2h_wakeupmode;
+ u8 c2h_wakeup_gpio;
+ u8 h2c_wakeup_gpio;
+ bool driver_sent_cmd;
+ u16 h2c_ps_interval;
+ u16 c2h_ps_interval;
+ struct hci_dev *hdev;
+ struct work_struct work;
+ struct timer_list ps_timer;
+};
+
+struct wakeup_cmd_payload {
+ u8 c2h_wakeupmode;
+ u8 c2h_wakeup_gpio;
+ u8 h2c_wakeupmode;
+ u8 h2c_wakeup_gpio;
+} __packed;
+
+struct psmode_cmd_payload {
+ u8 ps_cmd;
+ __le16 c2h_ps_interval;
+} __packed;
+
+struct btnxpuart_data {
+ const char *helper_fw_name;
+ const char *fw_name;
+};
+
+struct btnxpuart_dev {
+ struct hci_dev *hdev;
+ struct serdev_device *serdev;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+ struct sk_buff *rx_skb;
+
+ const struct firmware *fw;
+ u8 fw_name[MAX_FW_FILE_NAME_LEN];
+ u32 fw_dnld_v1_offset;
+ u32 fw_v1_sent_bytes;
+ u32 fw_v3_offset_correction;
+ u32 fw_v1_expected_len;
+ wait_queue_head_t fw_dnld_done_wait_q;
+ wait_queue_head_t check_boot_sign_wait_q;
+
+ u32 new_baudrate;
+ u32 current_baudrate;
+ u32 fw_init_baudrate;
+ bool timeout_changed;
+ bool baudrate_changed;
+ bool helper_downloaded;
+
+ struct ps_data psdata;
+ struct btnxpuart_data *nxp_data;
+};
+
+#define NXP_V1_FW_REQ_PKT 0xa5
+#define NXP_V1_CHIP_VER_PKT 0xaa
+#define NXP_V3_FW_REQ_PKT 0xa7
+#define NXP_V3_CHIP_VER_PKT 0xab
+
+#define NXP_ACK_V1 0x5a
+#define NXP_NAK_V1 0xbf
+#define NXP_ACK_V3 0x7a
+#define NXP_NAK_V3 0x7b
+#define NXP_CRC_ERROR_V3 0x7c
+
+#define HDR_LEN 16
+
+#define NXP_RECV_CHIP_VER_V1 \
+ .type = NXP_V1_CHIP_VER_PKT, \
+ .hlen = 4, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = 4
+
+#define NXP_RECV_FW_REQ_V1 \
+ .type = NXP_V1_FW_REQ_PKT, \
+ .hlen = 4, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = 4
+
+#define NXP_RECV_CHIP_VER_V3 \
+ .type = NXP_V3_CHIP_VER_PKT, \
+ .hlen = 4, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = 4
+
+#define NXP_RECV_FW_REQ_V3 \
+ .type = NXP_V3_FW_REQ_PKT, \
+ .hlen = 9, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = 9
+
+struct v1_data_req {
+ __le16 len;
+ __le16 len_comp;
+} __packed;
+
+struct v1_start_ind {
+ __le16 chip_id;
+ __le16 chip_id_comp;
+} __packed;
+
+struct v3_data_req {
+ __le16 len;
+ __le32 offset;
+ __le16 error;
+ u8 crc;
+} __packed;
+
+struct v3_start_ind {
+ __le16 chip_id;
+ u8 loader_ver;
+ u8 crc;
+} __packed;
+
+/* UART register addresses of BT chip */
+#define CLKDIVADDR 0x7f00008f
+#define UARTDIVADDR 0x7f000090
+#define UARTMCRADDR 0x7f000091
+#define UARTREINITADDR 0x7f000092
+#define UARTICRADDR 0x7f000093
+#define UARTFCRADDR 0x7f000094
+
+#define MCR 0x00000022
+#define INIT 0x00000001
+#define ICR 0x000000c7
+#define FCR 0x000000c7
+
+#define POLYNOMIAL8 0x07
+
+struct uart_reg {
+ __le32 address;
+ __le32 value;
+} __packed;
+
+struct uart_config {
+ struct uart_reg clkdiv;
+ struct uart_reg uartdiv;
+ struct uart_reg mcr;
+ struct uart_reg re_init;
+ struct uart_reg icr;
+ struct uart_reg fcr;
+ __be32 crc;
+} __packed;
+
+struct nxp_bootloader_cmd {
+ __le32 header;
+ __le32 arg;
+ __le32 payload_len;
+ __be32 crc;
+} __packed;
+
+static u8 crc8_table[CRC8_TABLE_SIZE];
+
+/* Default configurations */
+#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK
+#define DEFAULT_PS_MODE PS_MODE_DISABLE
+#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE
+
+static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
+ u32 plen,
+ void *param)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+ struct sk_buff *skb;
+
+ /* set flag to prevent nxp_enqueue from parsing values from this command and
+ * calling hci_cmd_sync_queue() again.
+ */
+ psdata->driver_sent_cmd = true;
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ psdata->driver_sent_cmd = false;
+
+ return skb;
+}
+
+static void btnxpuart_tx_wakeup(struct btnxpuart_dev *nxpdev)
+{
+ if (schedule_work(&nxpdev->tx_work))
+ set_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state);
+}
+
+/* NXP Power Save Feature */
+static void ps_start_timer(struct btnxpuart_dev *nxpdev)
+{
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (!psdata)
+ return;
+
+ if (psdata->cur_psmode == PS_MODE_ENABLE)
+ mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
+}
+
+static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
+{
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ flush_work(&psdata->work);
+ del_timer_sync(&psdata->ps_timer);
+}
+
+static void ps_control(struct hci_dev *hdev, u8 ps_state)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+ int status;
+
+ if (psdata->ps_state == ps_state ||
+ !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
+ return;
+
+ switch (psdata->cur_h2c_wakeupmode) {
+ case WAKEUP_METHOD_DTR:
+ if (ps_state == PS_STATE_AWAKE)
+ status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
+ else
+ status = serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
+ break;
+ case WAKEUP_METHOD_BREAK:
+ default:
+ if (ps_state == PS_STATE_AWAKE)
+ status = serdev_device_break_ctl(nxpdev->serdev, 0);
+ else
+ status = serdev_device_break_ctl(nxpdev->serdev, -1);
+ bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
+ str_on_off(ps_state == PS_STATE_SLEEP), status);
+ break;
+ }
+ if (!status)
+ psdata->ps_state = ps_state;
+ if (ps_state == PS_STATE_AWAKE)
+ btnxpuart_tx_wakeup(nxpdev);
+}
+
+static void ps_work_func(struct work_struct *work)
+{
+ struct ps_data *data = container_of(work, struct ps_data, work);
+
+ if (data->ps_cmd == PS_CMD_ENTER_PS && data->cur_psmode == PS_MODE_ENABLE)
+ ps_control(data->hdev, PS_STATE_SLEEP);
+ else if (data->ps_cmd == PS_CMD_EXIT_PS)
+ ps_control(data->hdev, PS_STATE_AWAKE);
+}
+
+static void ps_timeout_func(struct timer_list *t)
+{
+ struct ps_data *data = from_timer(data, t, ps_timer);
+ struct hci_dev *hdev = data->hdev;
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ if (test_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state)) {
+ ps_start_timer(nxpdev);
+ } else {
+ data->ps_cmd = PS_CMD_ENTER_PS;
+ schedule_work(&data->work);
+ }
+}
+
+static int ps_init_work(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS;
+ psdata->ps_state = PS_STATE_AWAKE;
+ psdata->target_ps_mode = DEFAULT_PS_MODE;
+ psdata->hdev = hdev;
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
+ psdata->c2h_wakeup_gpio = 0xff;
+
+ switch (DEFAULT_H2C_WAKEUP_MODE) {
+ case WAKEUP_METHOD_DTR:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
+ break;
+ case WAKEUP_METHOD_BREAK:
+ default:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK;
+ break;
+ }
+ psdata->cur_psmode = PS_MODE_DISABLE;
+ psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
+ INIT_WORK(&psdata->work, ps_work_func);
+
+ return 0;
+}
+
+static void ps_init_timer(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
+}
+
+static void ps_wakeup(struct btnxpuart_dev *nxpdev)
+{
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->ps_state != PS_STATE_AWAKE) {
+ psdata->ps_cmd = PS_CMD_EXIT_PS;
+ schedule_work(&psdata->work);
+ }
+}
+
+static int send_ps_cmd(struct hci_dev *hdev, void *data)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+ struct psmode_cmd_payload pcmd;
+ struct sk_buff *skb;
+ u8 *status;
+
+ if (psdata->target_ps_mode == PS_MODE_ENABLE)
+ pcmd.ps_cmd = BT_PS_ENABLE;
+ else
+ pcmd.ps_cmd = BT_PS_DISABLE;
+ pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval);
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ status = skb_pull_data(skb, 1);
+ if (status) {
+ if (!*status)
+ psdata->cur_psmode = psdata->target_ps_mode;
+ else
+ psdata->target_ps_mode = psdata->cur_psmode;
+ if (psdata->cur_psmode == PS_MODE_ENABLE)
+ ps_start_timer(nxpdev);
+ else
+ ps_wakeup(nxpdev);
+ bt_dev_dbg(hdev, "Power Save mode response: status=%d, ps_mode=%d",
+ *status, psdata->cur_psmode);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+ struct wakeup_cmd_payload pcmd;
+ struct sk_buff *skb;
+ u8 *status;
+
+ pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
+ pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
+ switch (psdata->h2c_wakeupmode) {
+ case WAKEUP_METHOD_DTR:
+ pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
+ break;
+ case WAKEUP_METHOD_BREAK:
+ default:
+ pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK;
+ break;
+ }
+ pcmd.h2c_wakeup_gpio = 0xff;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ status = skb_pull_data(skb, 1);
+ if (status) {
+ if (*status == 0)
+ psdata->cur_h2c_wakeupmode = psdata->h2c_wakeupmode;
+ else
+ psdata->h2c_wakeupmode = psdata->cur_h2c_wakeupmode;
+ bt_dev_dbg(hdev, "Set Wakeup Method response: status=%d, h2c_wakeupmode=%d",
+ *status, psdata->cur_h2c_wakeupmode);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static void ps_init(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS);
+ usleep_range(5000, 10000);
+ serdev_device_set_tiocm(nxpdev->serdev, TIOCM_RTS, 0);
+ usleep_range(5000, 10000);
+
+ switch (psdata->h2c_wakeupmode) {
+ case WAKEUP_METHOD_DTR:
+ serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
+ serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
+ break;
+ case WAKEUP_METHOD_BREAK:
+ default:
+ serdev_device_break_ctl(nxpdev->serdev, -1);
+ usleep_range(5000, 10000);
+ serdev_device_break_ctl(nxpdev->serdev, 0);
+ usleep_range(5000, 10000);
+ break;
+ }
+ if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
+ hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
+ if (psdata->cur_psmode != psdata->target_ps_mode)
+ hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
+}
+
+/* NXP Firmware Download Feature */
+static int nxp_download_firmware(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ int err = 0;
+
+ nxpdev->fw_dnld_v1_offset = 0;
+ nxpdev->fw_v1_sent_bytes = 0;
+ nxpdev->fw_v1_expected_len = HDR_LEN;
+ nxpdev->fw_v3_offset_correction = 0;
+ nxpdev->baudrate_changed = false;
+ nxpdev->timeout_changed = false;
+ nxpdev->helper_downloaded = false;
+
+ serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
+ serdev_device_set_flow_control(nxpdev->serdev, false);
+ nxpdev->current_baudrate = HCI_NXP_PRI_BAUDRATE;
+
+ /* Wait till FW is downloaded and CTS becomes low */
+ err = wait_event_interruptible_timeout(nxpdev->fw_dnld_done_wait_q,
+ !test_bit(BTNXPUART_FW_DOWNLOADING,
+ &nxpdev->tx_state),
+ msecs_to_jiffies(60000));
+ if (err == 0) {
+ bt_dev_err(hdev, "FW Download Timeout.");
+ return -ETIMEDOUT;
+ }
+
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+ err = serdev_device_wait_for_cts(nxpdev->serdev, 1, 60000);
+ if (err < 0) {
+ bt_dev_err(hdev, "CTS is still high. FW Download failed.");
+ return err;
+ }
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+
+ /* Allow the downloaded FW to initialize */
+ usleep_range(800 * USEC_PER_MSEC, 1 * USEC_PER_SEC);
+
+ return 0;
+}
+
+static void nxp_send_ack(u8 ack, struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ u8 ack_nak[2];
+ int len = 1;
+
+ ack_nak[0] = ack;
+ if (ack == NXP_ACK_V3) {
+ ack_nak[1] = crc8(crc8_table, ack_nak, 1, 0xff);
+ len = 2;
+ }
+ serdev_device_write_buf(nxpdev->serdev, ack_nak, len);
+}
+
+static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct nxp_bootloader_cmd nxp_cmd5;
+ struct uart_config uart_config;
+
+ if (req_len == sizeof(nxp_cmd5)) {
+ nxp_cmd5.header = __cpu_to_le32(5);
+ nxp_cmd5.arg = 0;
+ nxp_cmd5.payload_len = __cpu_to_le32(sizeof(uart_config));
+ /* FW expects swapped CRC bytes */
+ nxp_cmd5.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd5,
+ sizeof(nxp_cmd5) - 4));
+
+ serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd5, sizeof(nxp_cmd5));
+ nxpdev->fw_v3_offset_correction += req_len;
+ } else if (req_len == sizeof(uart_config)) {
+ uart_config.clkdiv.address = __cpu_to_le32(CLKDIVADDR);
+ uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
+ uart_config.uartdiv.address = __cpu_to_le32(UARTDIVADDR);
+ uart_config.uartdiv.value = __cpu_to_le32(1);
+ uart_config.mcr.address = __cpu_to_le32(UARTMCRADDR);
+ uart_config.mcr.value = __cpu_to_le32(MCR);
+ uart_config.re_init.address = __cpu_to_le32(UARTREINITADDR);
+ uart_config.re_init.value = __cpu_to_le32(INIT);
+ uart_config.icr.address = __cpu_to_le32(UARTICRADDR);
+ uart_config.icr.value = __cpu_to_le32(ICR);
+ uart_config.fcr.address = __cpu_to_le32(UARTFCRADDR);
+ uart_config.fcr.value = __cpu_to_le32(FCR);
+ /* FW expects swapped CRC bytes */
+ uart_config.crc = __cpu_to_be32(crc32_be(0UL, (char *)&uart_config,
+ sizeof(uart_config) - 4));
+
+ serdev_device_write_buf(nxpdev->serdev, (u8 *)&uart_config, sizeof(uart_config));
+ serdev_device_wait_until_sent(nxpdev->serdev, 0);
+ nxpdev->fw_v3_offset_correction += req_len;
+ return true;
+ }
+ return false;
+}
+
+static bool nxp_fw_change_timeout(struct hci_dev *hdev, u16 req_len)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct nxp_bootloader_cmd nxp_cmd7;
+
+ if (req_len != sizeof(nxp_cmd7))
+ return false;
+
+ nxp_cmd7.header = __cpu_to_le32(7);
+ nxp_cmd7.arg = __cpu_to_le32(0x70);
+ nxp_cmd7.payload_len = 0;
+ /* FW expects swapped CRC bytes */
+ nxp_cmd7.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd7,
+ sizeof(nxp_cmd7) - 4));
+ serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd7, sizeof(nxp_cmd7));
+ serdev_device_wait_until_sent(nxpdev->serdev, 0);
+ nxpdev->fw_v3_offset_correction += req_len;
+ return true;
+}
+
+static u32 nxp_get_data_len(const u8 *buf)
+{
+ struct nxp_bootloader_cmd *hdr = (struct nxp_bootloader_cmd *)buf;
+
+ return __le32_to_cpu(hdr->payload_len);
+}
+
+static bool is_fw_downloading(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+}
+
+static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
+{
+ if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) {
+ clear_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state);
+ wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
+ return false;
+ }
+ return is_fw_downloading(nxpdev);
+}
+
+static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ int err = 0;
+
+ if (!strlen(nxpdev->fw_name)) {
+ snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name);
+
+ bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name);
+ err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name);
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ }
+ }
+ return err;
+}
+
+/* for legacy chipsets with V1 bootloader */
+static int nxp_recv_chip_ver_v1(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct v1_start_ind *req;
+ __u16 chip_id;
+
+ req = skb_pull_data(skb, sizeof(*req));
+ if (!req)
+ goto free_skb;
+
+ chip_id = le16_to_cpu(req->chip_id ^ req->chip_id_comp);
+ if (chip_id == 0xffff) {
+ nxpdev->fw_dnld_v1_offset = 0;
+ nxpdev->fw_v1_sent_bytes = 0;
+ nxpdev->fw_v1_expected_len = HDR_LEN;
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ nxp_send_ack(NXP_ACK_V1, hdev);
+ }
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct btnxpuart_data *nxp_data = nxpdev->nxp_data;
+ struct v1_data_req *req;
+ __u16 len;
+
+ if (!process_boot_signature(nxpdev))
+ goto free_skb;
+
+ req = skb_pull_data(skb, sizeof(*req));
+ if (!req)
+ goto free_skb;
+
+ len = __le16_to_cpu(req->len ^ req->len_comp);
+ if (len != 0xffff) {
+ bt_dev_dbg(hdev, "ERR: Send NAK");
+ nxp_send_ack(NXP_NAK_V1, hdev);
+ goto free_skb;
+ }
+ nxp_send_ack(NXP_ACK_V1, hdev);
+
+ len = __le16_to_cpu(req->len);
+
+ if (!nxp_data->helper_fw_name) {
+ if (!nxpdev->timeout_changed) {
+ nxpdev->timeout_changed = nxp_fw_change_timeout(hdev,
+ len);
+ goto free_skb;
+ }
+ if (!nxpdev->baudrate_changed) {
+ nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev,
+ len);
+ if (nxpdev->baudrate_changed) {
+ serdev_device_set_baudrate(nxpdev->serdev,
+ HCI_NXP_SEC_BAUDRATE);
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+ nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ }
+ goto free_skb;
+ }
+ }
+
+ if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) {
+ if (nxp_request_firmware(hdev, nxp_data->fw_name))
+ goto free_skb;
+ } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
+ if (nxp_request_firmware(hdev, nxp_data->helper_fw_name))
+ goto free_skb;
+ }
+
+ if (!len) {
+ bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ nxpdev->fw->size);
+ if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
+ nxpdev->helper_downloaded = true;
+ serdev_device_wait_until_sent(nxpdev->serdev, 0);
+ serdev_device_set_baudrate(nxpdev->serdev,
+ HCI_NXP_SEC_BAUDRATE);
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+ } else {
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
+ }
+ goto free_skb;
+ }
+ if (len & 0x01) {
+ /* The CRC did not match at the other end.
+ * Simply send the same bytes again.
+ */
+ len = nxpdev->fw_v1_sent_bytes;
+ bt_dev_dbg(hdev, "CRC error. Resend %d bytes of FW.", len);
+ } else {
+ nxpdev->fw_dnld_v1_offset += nxpdev->fw_v1_sent_bytes;
+
+ /* The FW bin file is made up of many blocks of
+ * 16 byte header and payload data chunks. If the
+ * FW has requested a header, read the payload length
+ * info from the header, before sending the header.
+ * In the next iteration, the FW should request the
+ * payload data chunk, which should be equal to the
+ * payload length read from header. If there is a
+ * mismatch, clearly the driver and FW are out of sync,
+ * and we need to re-send the previous header again.
+ */
+ if (len == nxpdev->fw_v1_expected_len) {
+ if (len == HDR_LEN)
+ nxpdev->fw_v1_expected_len = nxp_get_data_len(nxpdev->fw->data +
+ nxpdev->fw_dnld_v1_offset);
+ else
+ nxpdev->fw_v1_expected_len = HDR_LEN;
+ } else if (len == HDR_LEN) {
+ /* FW download out of sync. Send previous chunk again */
+ nxpdev->fw_dnld_v1_offset -= nxpdev->fw_v1_sent_bytes;
+ nxpdev->fw_v1_expected_len = HDR_LEN;
+ }
+ }
+
+ if (nxpdev->fw_dnld_v1_offset + len <= nxpdev->fw->size)
+ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
+ nxpdev->fw_dnld_v1_offset, len);
+ nxpdev->fw_v1_sent_bytes = len;
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid)
+{
+ char *fw_name = NULL;
+
+ switch (chipid) {
+ case CHIP_ID_W9098:
+ fw_name = FIRMWARE_W9098;
+ break;
+ case CHIP_ID_IW416:
+ fw_name = FIRMWARE_IW416;
+ break;
+ case CHIP_ID_IW612:
+ fw_name = FIRMWARE_IW612;
+ break;
+ default:
+ bt_dev_err(hdev, "Unknown chip signature %04x", chipid);
+ break;
+ }
+ return fw_name;
+}
+
+static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req));
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ u16 chip_id;
+
+ if (!process_boot_signature(nxpdev))
+ goto free_skb;
+
+ chip_id = le16_to_cpu(req->chip_id);
+ if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev,
+ chip_id)))
+ nxp_send_ack(NXP_ACK_V3, hdev);
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct v3_data_req *req;
+ __u16 len;
+ __u32 offset;
+
+ if (!process_boot_signature(nxpdev))
+ goto free_skb;
+
+ req = skb_pull_data(skb, sizeof(*req));
+ if (!req || !nxpdev->fw)
+ goto free_skb;
+
+ nxp_send_ack(NXP_ACK_V3, hdev);
+
+ len = __le16_to_cpu(req->len);
+
+ if (!nxpdev->timeout_changed) {
+ nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len);
+ goto free_skb;
+ }
+
+ if (!nxpdev->baudrate_changed) {
+ nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len);
+ if (nxpdev->baudrate_changed) {
+ serdev_device_set_baudrate(nxpdev->serdev,
+ HCI_NXP_SEC_BAUDRATE);
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+ nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ }
+ goto free_skb;
+ }
+
+ if (req->len == 0) {
+ bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ nxpdev->fw->size);
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
+ goto free_skb;
+ }
+ if (req->error)
+ bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip",
+ req->error);
+
+ offset = __le32_to_cpu(req->offset);
+ if (offset < nxpdev->fw_v3_offset_correction) {
+ /* This scenario should ideally never occur. But if it ever does,
+ * FW is out of sync and needs a power cycle.
+ */
+ bt_dev_err(hdev, "Something went wrong during FW download");
+ bt_dev_err(hdev, "Please power cycle and try again");
+ goto free_skb;
+ }
+
+ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
+ nxpdev->fw_v3_offset_correction, len);
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __le32 new_baudrate = __cpu_to_le32(nxpdev->new_baudrate);
+ struct ps_data *psdata = &nxpdev->psdata;
+ struct sk_buff *skb;
+ u8 *status;
+
+ if (!psdata)
+ return 0;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ status = (u8 *)skb_pull_data(skb, 1);
+ if (status) {
+ if (*status == 0) {
+ serdev_device_set_baudrate(nxpdev->serdev, nxpdev->new_baudrate);
+ nxpdev->current_baudrate = nxpdev->new_baudrate;
+ }
+ bt_dev_dbg(hdev, "Set baudrate response: status=%d, baudrate=%d",
+ *status, nxpdev->new_baudrate);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int nxp_set_ind_reset(struct hci_dev *hdev, void *data)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct sk_buff *skb;
+ u8 *status;
+ u8 pcmd = 0;
+ int err = 0;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ status = skb_pull_data(skb, 1);
+ if (!status || *status)
+ goto free_skb;
+
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ err = nxp_download_firmware(hdev);
+ if (err < 0)
+ goto free_skb;
+ serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
+ nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
+ if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
+ nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
+ hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
+
+free_skb:
+ kfree_skb(skb);
+ return err;
+}
+
+/* NXP protocol */
+static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev)
+{
+ serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+ set_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state);
+
+ return wait_event_interruptible_timeout(nxpdev->check_boot_sign_wait_q,
+ !test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE,
+ &nxpdev->tx_state),
+ msecs_to_jiffies(1000));
+}
+
+static int nxp_setup(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ int err = 0;
+
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q);
+ init_waitqueue_head(&nxpdev->check_boot_sign_wait_q);
+
+ if (nxp_check_boot_sign(nxpdev)) {
+ bt_dev_dbg(hdev, "Need FW Download.");
+ err = nxp_download_firmware(hdev);
+ if (err < 0)
+ return err;
+ } else {
+ bt_dev_dbg(hdev, "FW already running.");
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ }
+
+ device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate",
+ &nxpdev->fw_init_baudrate);
+ if (!nxpdev->fw_init_baudrate)
+ nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE;
+ serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
+ nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
+
+ if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
+ nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
+ }
+
+ ps_init(hdev);
+
+ return 0;
+}
+
+static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+ skb_queue_tail(&nxpdev->txq, skb);
+ btnxpuart_tx_wakeup(nxpdev);
+ return 0;
+}
+
+static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+ struct hci_command_hdr *hdr;
+ struct psmode_cmd_payload ps_parm;
+ struct wakeup_cmd_payload wakeup_parm;
+ __le32 baudrate_parm;
+
+ /* if vendor commands are received from user space (e.g. hcitool), update
+ * driver flags accordingly and ask driver to re-send the command to FW.
+ * In case the payload for any command does not match expected payload
+ * length, let the firmware and user space program handle it, or throw
+ * an error.
+ */
+ if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT && !psdata->driver_sent_cmd) {
+ hdr = (struct hci_command_hdr *)skb->data;
+ if (hdr->plen != (skb->len - HCI_COMMAND_HDR_SIZE))
+ return btnxpuart_queue_skb(hdev, skb);
+
+ switch (__le16_to_cpu(hdr->opcode)) {
+ case HCI_NXP_AUTO_SLEEP_MODE:
+ if (hdr->plen == sizeof(ps_parm)) {
+ memcpy(&ps_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen);
+ if (ps_parm.ps_cmd == BT_PS_ENABLE)
+ psdata->target_ps_mode = PS_MODE_ENABLE;
+ else if (ps_parm.ps_cmd == BT_PS_DISABLE)
+ psdata->target_ps_mode = PS_MODE_DISABLE;
+ psdata->c2h_ps_interval = __le16_to_cpu(ps_parm.c2h_ps_interval);
+ hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
+ goto free_skb;
+ }
+ break;
+ case HCI_NXP_WAKEUP_METHOD:
+ if (hdr->plen == sizeof(wakeup_parm)) {
+ memcpy(&wakeup_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen);
+ psdata->c2h_wakeupmode = wakeup_parm.c2h_wakeupmode;
+ psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio;
+ psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio;
+ switch (wakeup_parm.h2c_wakeupmode) {
+ case BT_CTRL_WAKEUP_METHOD_DSR:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
+ break;
+ case BT_CTRL_WAKEUP_METHOD_BREAK:
+ default:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK;
+ break;
+ }
+ hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
+ goto free_skb;
+ }
+ break;
+ case HCI_NXP_SET_OPER_SPEED:
+ if (hdr->plen == sizeof(baudrate_parm)) {
+ memcpy(&baudrate_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen);
+ nxpdev->new_baudrate = __le32_to_cpu(baudrate_parm);
+ hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
+ goto free_skb;
+ }
+ break;
+ case HCI_NXP_IND_RESET:
+ if (hdr->plen == 1) {
+ hci_cmd_sync_queue(hdev, nxp_set_ind_reset, NULL, NULL);
+ goto free_skb;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return btnxpuart_queue_skb(hdev, skb);
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static struct sk_buff *nxp_dequeue(void *data)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
+
+ ps_wakeup(nxpdev);
+ ps_start_timer(nxpdev);
+ return skb_dequeue(&nxpdev->txq);
+}
+
+/* btnxpuart based on serdev */
+static void btnxpuart_tx_work(struct work_struct *work)
+{
+ struct btnxpuart_dev *nxpdev = container_of(work, struct btnxpuart_dev,
+ tx_work);
+ struct serdev_device *serdev = nxpdev->serdev;
+ struct hci_dev *hdev = nxpdev->hdev;
+ struct sk_buff *skb;
+ int len;
+
+ while ((skb = nxp_dequeue(nxpdev))) {
+ len = serdev_device_write_buf(serdev, skb->data, skb->len);
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len > 0) {
+ skb_queue_head(&nxpdev->txq, skb);
+ break;
+ }
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+ clear_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state);
+}
+
+static int btnxpuart_open(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ int err = 0;
+
+ err = serdev_device_open(nxpdev->serdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to open UART device %s",
+ dev_name(&nxpdev->serdev->dev));
+ } else {
+ set_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
+ }
+ return err;
+}
+
+static int btnxpuart_close(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ ps_wakeup(nxpdev);
+ serdev_device_close(nxpdev->serdev);
+ clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
+ return 0;
+}
+
+static int btnxpuart_flush(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ /* Flush any pending characters */
+ serdev_device_write_flush(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+
+ cancel_work_sync(&nxpdev->tx_work);
+
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+
+ return 0;
+}
+
+static const struct h4_recv_pkt nxp_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 },
+ { NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 },
+ { NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 },
+ { NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 },
+};
+
+static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+{
+ struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
+
+ ps_start_timer(nxpdev);
+
+ nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
+ if (IS_ERR(nxpdev->rx_skb)) {
+ int err = PTR_ERR(nxpdev->rx_skb);
+ /* Safe to ignore out-of-sync bootloader signatures */
+ if (is_fw_downloading(nxpdev))
+ return count;
+ bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
+ nxpdev->rx_skb = NULL;
+ return err;
+ }
+ nxpdev->hdev->stat.byte_rx += count;
+ return count;
+}
+
+static void btnxpuart_write_wakeup(struct serdev_device *serdev)
+{
+ serdev_device_write_wakeup(serdev);
+}
+
+static const struct serdev_device_ops btnxpuart_client_ops = {
+ .receive_buf = btnxpuart_receive_buf,
+ .write_wakeup = btnxpuart_write_wakeup,
+};
+
+static int nxp_serdev_probe(struct serdev_device *serdev)
+{
+ struct hci_dev *hdev;
+ struct btnxpuart_dev *nxpdev;
+
+ nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
+ if (!nxpdev)
+ return -ENOMEM;
+
+ nxpdev->nxp_data = (struct btnxpuart_data *)device_get_match_data(&serdev->dev);
+
+ nxpdev->serdev = serdev;
+ serdev_device_set_drvdata(serdev, nxpdev);
+
+ serdev_device_set_client_ops(serdev, &btnxpuart_client_ops);
+
+ INIT_WORK(&nxpdev->tx_work, btnxpuart_tx_work);
+ skb_queue_head_init(&nxpdev->txq);
+
+ crc8_populate_msb(crc8_table, POLYNOMIAL8);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&serdev->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ nxpdev->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, nxpdev);
+
+ hdev->manufacturer = MANUFACTURER_NXP;
+ hdev->open = btnxpuart_open;
+ hdev->close = btnxpuart_close;
+ hdev->flush = btnxpuart_flush;
+ hdev->setup = nxp_setup;
+ hdev->send = nxp_enqueue;
+ SET_HCIDEV_DEV(hdev, &serdev->dev);
+
+ if (hci_register_dev(hdev) < 0) {
+ dev_err(&serdev->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return -ENODEV;
+ }
+
+ ps_init_work(hdev);
+ ps_init_timer(hdev);
+
+ return 0;
+}
+
+static void nxp_serdev_remove(struct serdev_device *serdev)
+{
+ struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+ /* Restore FW baudrate to fw_init_baudrate if changed.
+ * This will ensure FW baudrate is in sync with
+ * driver baudrate in case this driver is re-inserted.
+ */
+ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+
+ ps_cancel_timer(nxpdev);
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+static struct btnxpuart_data w8987_data = {
+ .helper_fw_name = NULL,
+ .fw_name = FIRMWARE_W8987,
+};
+
+static struct btnxpuart_data w8997_data = {
+ .helper_fw_name = FIRMWARE_HELPER,
+ .fw_name = FIRMWARE_W8997,
+};
+
+static const struct of_device_id nxpuart_of_match_table[] = {
+ { .compatible = "nxp,88w8987-bt", .data = &w8987_data },
+ { .compatible = "nxp,88w8997-bt", .data = &w8997_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxpuart_of_match_table);
+
+static struct serdev_device_driver nxp_serdev_driver = {
+ .probe = nxp_serdev_probe,
+ .remove = nxp_serdev_remove,
+ .driver = {
+ .name = "btnxpuart",
+ .of_match_table = of_match_ptr(nxpuart_of_match_table),
+ },
+};
+
+module_serdev_device_driver(nxp_serdev_driver);
+
+MODULE_AUTHOR("Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>");
+MODULE_DESCRIPTION("NXP Bluetooth Serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index c9064d34d830..fd0941fe8608 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -614,6 +614,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
config.type = ELF_TYPE_PATCH;
snprintf(config.fwname, sizeof(config.fwname),
"qca/msbtfw%02x.mbn", rom_ver);
+ } else if (soc_type == QCA_WCN6855) {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
@@ -648,6 +651,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
else if (soc_type == QCA_WCN6750)
snprintf(config.fwname, sizeof(config.fwname),
"qca/msnv%02x.bin", rom_ver);
+ else if (soc_type == QCA_WCN6855)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
@@ -685,11 +691,17 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
- if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) {
+ switch (soc_type) {
+ case QCA_WCN3991:
+ case QCA_WCN6750:
+ case QCA_WCN6855:
/* get fw build info */
err = qca_read_fw_build_info(hdev);
if (err < 0)
return err;
+ break;
+ default:
+ break;
}
bt_dev_info(hdev, "QCA setup on UART is completed");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 61e9a50e66ae..b884095bcd9d 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -147,6 +147,7 @@ enum qca_btsoc_type {
QCA_WCN3991,
QCA_QCA6390,
QCA_WCN6750,
+ QCA_WCN6855,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -168,6 +169,10 @@ static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
{
return soc_type == QCA_WCN6750;
}
+static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type)
+{
+ return soc_type == QCA_WCN6855;
+}
#else
@@ -206,6 +211,11 @@ static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
return false;
}
+static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type)
+{
+ return false;
+}
+
static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 2acb719e596f..11c7e04bf394 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
return 0;
}
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ int ret;
+
+ ret = qca_set_bdaddr_rome(hdev, bdaddr);
+ if (ret)
+ return ret;
+
+ /* The firmware stops responding for a while after setting the bdaddr,
+ * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+ */
+ usleep_range(1000, 10000);
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
- hdev->set_bdaddr = qca_set_bdaddr_rome;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;
ret = hci_register_dev(hdev);
if (ret < 0)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 69c3fe649ca7..2915c82d719d 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -17,19 +17,26 @@
#define VERSION "0.1"
+#define RTL_CHIP_8723CS_CG 3
+#define RTL_CHIP_8723CS_VF 4
+#define RTL_CHIP_8723CS_XX 5
#define RTL_EPATCH_SIGNATURE "Realtech"
+#define RTL_EPATCH_SIGNATURE_V2 "RTBTCore"
+#define RTL_ROM_LMP_8703B 0x8703
#define RTL_ROM_LMP_8723A 0x1200
#define RTL_ROM_LMP_8723B 0x8723
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
#define RTL_ROM_LMP_8852A 0x8852
+#define RTL_ROM_LMP_8851B 0x8851
#define RTL_CONFIG_MAGIC 0x8723ab55
#define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1)
#define IC_MATCH_FL_HCIVER (1 << 2)
#define IC_MATCH_FL_HCIBUS (1 << 3)
+#define IC_MATCH_FL_CHIP_TYPE (1 << 4)
#define IC_INFO(lmps, hcir, hciv, bus) \
.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \
IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \
@@ -38,6 +45,14 @@
.hci_ver = (hciv), \
.hci_bus = (bus)
+#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
+#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
+#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
+
+#define RTL_PATCH_SNIPPETS 0x01
+#define RTL_PATCH_DUMMY_HEADER 0x02
+#define RTL_PATCH_SECURITY_HEADER 0x03
+
enum btrtl_chip_id {
CHIP_ID_8723A,
CHIP_ID_8723B,
@@ -51,6 +66,7 @@ enum btrtl_chip_id {
CHIP_ID_8852A = 18,
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
+ CHIP_ID_8851B = 36,
};
struct id_table {
@@ -59,6 +75,7 @@ struct id_table {
__u16 hci_rev;
__u8 hci_ver;
__u8 hci_bus;
+ __u8 chip_type;
bool config_needed;
bool has_rom_version;
bool has_msft_ext;
@@ -75,6 +92,8 @@ struct btrtl_device_info {
int cfg_len;
bool drop_fw;
int project_id;
+ u8 key_id;
+ struct list_head patch_subsecs;
};
static const struct id_table ic_id_table[] = {
@@ -99,6 +118,39 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8723b_fw.bin",
.cfg_name = "rtl_bt/rtl8723b_config" },
+ /* 8723CS-CG */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
+ IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8703B,
+ .chip_type = RTL_CHIP_8723CS_CG,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723cs_cg_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723cs_cg_config" },
+
+ /* 8723CS-VF */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
+ IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8703B,
+ .chip_type = RTL_CHIP_8723CS_VF,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723cs_vf_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723cs_vf_config" },
+
+ /* 8723CS-XX */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
+ IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8703B,
+ .chip_type = RTL_CHIP_8723CS_XX,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723cs_xx_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723cs_xx_config" },
+
/* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB),
.config_needed = true,
@@ -128,6 +180,14 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config" },
+ /* 8821CS */
+ { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_UART),
+ .config_needed = true,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8821cs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8821cs_config" },
+
/* 8761A */
{ IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB),
.config_needed = false,
@@ -190,6 +250,14 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
+ /* 8852B with UART interface */
+ { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_UART),
+ .config_needed = true,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8852bs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8852bs_config" },
+
/* 8852B */
{ IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB),
.config_needed = false,
@@ -205,10 +273,19 @@ static const struct id_table ic_id_table[] = {
.has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852cu_fw.bin",
.cfg_name = "rtl_bt/rtl8852cu_config" },
+
+ /* 8851B */
+ { IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = false,
+ .fw_name = "rtl_bt/rtl8851bu_fw.bin",
+ .cfg_name = "rtl_bt/rtl8851bu_config" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
- u8 hci_ver, u8 hci_bus)
+ u8 hci_ver, u8 hci_bus,
+ u8 chip_type)
{
int i;
@@ -225,6 +302,9 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
(ic_id_table[i].hci_bus != hci_bus))
continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_CHIP_TYPE) &&
+ (ic_id_table[i].chip_type != chip_type))
+ continue;
break;
}
@@ -284,6 +364,227 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0;
}
+static int btrtl_vendor_read_reg16(struct hci_dev *hdev,
+ struct rtl_vendor_cmd *cmd, u8 *rp)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc61, sizeof(*cmd), cmd,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ rtl_dev_err(hdev, "RTL: Read reg16 failed (%d)", err);
+ return err;
+ }
+
+ if (skb->len != 3 || skb->data[0]) {
+ bt_dev_err(hdev, "RTL: Read reg16 length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ if (rp)
+ memcpy(rp, skb->data + 1, 2);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static void *rtl_iov_pull_data(struct rtl_iovec *iov, u32 len)
+{
+ void *data = iov->data;
+
+ if (iov->len < len)
+ return NULL;
+
+ iov->data += len;
+ iov->len -= len;
+
+ return data;
+}
+
+static void btrtl_insert_ordered_subsec(struct rtl_subsection *node,
+ struct btrtl_device_info *btrtl_dev)
+{
+ struct list_head *pos;
+ struct list_head *next;
+ struct rtl_subsection *subsec;
+
+ list_for_each_safe(pos, next, &btrtl_dev->patch_subsecs) {
+ subsec = list_entry(pos, struct rtl_subsection, list);
+ if (subsec->prio >= node->prio)
+ break;
+ }
+ __list_add(&node->list, pos->prev, pos);
+}
+
+static int btrtl_parse_section(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev, u32 opcode,
+ u8 *data, u32 len)
+{
+ struct rtl_section_hdr *hdr;
+ struct rtl_subsection *subsec;
+ struct rtl_common_subsec *common_subsec;
+ struct rtl_sec_hdr *sec_hdr;
+ int i;
+ u8 *ptr;
+ u16 num_subsecs;
+ u32 subsec_len;
+ int rc = 0;
+ struct rtl_iovec iov = {
+ .data = data,
+ .len = len,
+ };
+
+ hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
+ if (!hdr)
+ return -EINVAL;
+ num_subsecs = le16_to_cpu(hdr->num);
+
+ for (i = 0; i < num_subsecs; i++) {
+ common_subsec = rtl_iov_pull_data(&iov, sizeof(*common_subsec));
+ if (!common_subsec)
+ break;
+ subsec_len = le32_to_cpu(common_subsec->len);
+
+ rtl_dev_dbg(hdev, "subsec, eco 0x%02x, len %08x",
+ common_subsec->eco, subsec_len);
+
+ ptr = rtl_iov_pull_data(&iov, subsec_len);
+ if (!ptr)
+ break;
+
+ if (common_subsec->eco != btrtl_dev->rom_version + 1)
+ continue;
+
+ switch (opcode) {
+ case RTL_PATCH_SECURITY_HEADER:
+ sec_hdr = (void *)common_subsec;
+ if (sec_hdr->key_id != btrtl_dev->key_id)
+ continue;
+ break;
+ }
+
+ subsec = kzalloc(sizeof(*subsec), GFP_KERNEL);
+ if (!subsec)
+ return -ENOMEM;
+ subsec->opcode = opcode;
+ subsec->prio = common_subsec->prio;
+ subsec->len = subsec_len;
+ subsec->data = ptr;
+ btrtl_insert_ordered_subsec(subsec, btrtl_dev);
+ rc += subsec_len;
+ }
+
+ return rc;
+}
+
+static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned char **_buf)
+{
+ struct rtl_epatch_header_v2 *hdr;
+ int rc;
+ u8 reg_val[2];
+ u8 key_id;
+ u32 num_sections;
+ struct rtl_section *section;
+ struct rtl_subsection *entry, *tmp;
+ u32 section_len;
+ u32 opcode;
+ int len = 0;
+ int i;
+ u8 *ptr;
+ struct rtl_iovec iov = {
+ .data = btrtl_dev->fw_data,
+ .len = btrtl_dev->fw_len - 7, /* Cut the tail */
+ };
+
+ rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
+ if (rc < 0)
+ return -EIO;
+ key_id = reg_val[0];
+
+ rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
+
+ btrtl_dev->key_id = key_id;
+
+ hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
+ if (!hdr)
+ return -EINVAL;
+ num_sections = le32_to_cpu(hdr->num_sections);
+
+ rtl_dev_dbg(hdev, "FW version %08x-%08x", *((u32 *)hdr->fw_version),
+ *((u32 *)(hdr->fw_version + 4)));
+
+ for (i = 0; i < num_sections; i++) {
+ section = rtl_iov_pull_data(&iov, sizeof(*section));
+ if (!section)
+ break;
+ section_len = le32_to_cpu(section->len);
+ opcode = le32_to_cpu(section->opcode);
+
+ rtl_dev_dbg(hdev, "opcode 0x%04x", section->opcode);
+
+ ptr = rtl_iov_pull_data(&iov, section_len);
+ if (!ptr)
+ break;
+
+ switch (opcode) {
+ case RTL_PATCH_SNIPPETS:
+ rc = btrtl_parse_section(hdev, btrtl_dev, opcode,
+ ptr, section_len);
+ break;
+ case RTL_PATCH_SECURITY_HEADER:
+ /* If key_id from chip is zero, ignore all security
+ * headers.
+ */
+ if (!key_id)
+ break;
+ rc = btrtl_parse_section(hdev, btrtl_dev, opcode,
+ ptr, section_len);
+ break;
+ case RTL_PATCH_DUMMY_HEADER:
+ rc = btrtl_parse_section(hdev, btrtl_dev, opcode,
+ ptr, section_len);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ if (rc < 0) {
+ rtl_dev_err(hdev, "RTL: Parse section (%u) err %d",
+ opcode, rc);
+ return rc;
+ }
+ len += rc;
+ }
+
+ if (!len)
+ return -ENODATA;
+
+ /* Allocate mem and copy all found subsecs. */
+ ptr = kvmalloc(len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ len = 0;
+ list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) {
+ rtl_dev_dbg(hdev, "RTL: opcode %08x, addr %p, len 0x%x",
+ entry->opcode, entry->data, entry->len);
+ memcpy(ptr + len, entry->data, entry->len);
+ len += entry->len;
+ }
+
+ if (!len)
+ return -EPERM;
+
+ *_buf = ptr;
+ return len;
+}
+
static int rtlbt_parse_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned char **_buf)
@@ -307,6 +608,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8723B, 1 },
{ RTL_ROM_LMP_8821A, 2 },
{ RTL_ROM_LMP_8761A, 3 },
+ { RTL_ROM_LMP_8703B, 7 },
{ RTL_ROM_LMP_8822B, 8 },
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
@@ -315,9 +617,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 18 }, /* 8852A */
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
+ { RTL_ROM_LMP_8851B, 36 }, /* 8851B */
};
- min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+ if (btrtl_dev->fw_len <= 8)
+ return -EINVAL;
+
+ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8))
+ min_size = sizeof(struct rtl_epatch_header) +
+ sizeof(extension_sig) + 3;
+ else if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8))
+ min_size = sizeof(struct rtl_epatch_header_v2) +
+ sizeof(extension_sig) + 3;
+ else
+ return -EINVAL;
+
if (btrtl_dev->fw_len < min_size)
return -EINVAL;
@@ -382,12 +696,14 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
return -EINVAL;
}
- epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
- if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+ if (memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8) != 0) {
+ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8))
+ return rtlbt_parse_firmware_v2(hdev, btrtl_dev, _buf);
rtl_dev_err(hdev, "bad EPATCH signature");
return -EINVAL;
}
+ epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
num_patches = le16_to_cpu(epatch_info->num_patches);
BT_DBG("fw_version=%x, num_patches=%d",
le32_to_cpu(epatch_info->fw_version), num_patches);
@@ -451,6 +767,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
int frag_len = RTL_FRAG_LEN;
int ret = 0;
int i;
+ int j = 0;
struct sk_buff *skb;
struct hci_rp_read_local_version *rp;
@@ -461,17 +778,16 @@ static int rtl_download_firmware(struct hci_dev *hdev,
for (i = 0; i < frag_num; i++) {
struct sk_buff *skb;
- BT_DBG("download fw (%d/%d)", i, frag_num);
-
- if (i > 0x7f)
- dl_cmd->index = (i & 0x7f) + 1;
- else
- dl_cmd->index = i;
+ dl_cmd->index = j++;
+ if (dl_cmd->index == 0x7f)
+ j = 1;
if (i == (frag_num - 1)) {
dl_cmd->index |= 0x80; /* data end */
frag_len = fw_len % RTL_FRAG_LEN;
}
+ rtl_dev_dbg(hdev, "download fw (%d/%d). index = %d", i,
+ frag_num, dl_cmd->index);
memcpy(dl_cmd->data, data, frag_len);
/* Send download command */
@@ -587,10 +903,60 @@ out:
return ret;
}
+static bool rtl_has_chip_type(u16 lmp_subver)
+{
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8703B:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int rtl_read_chip_type(struct hci_dev *hdev, u8 *type)
+{
+ struct rtl_chip_type_evt *chip_type;
+ struct sk_buff *skb;
+ const unsigned char cmd_buf[] = {0x00, 0x94, 0xa0, 0x00, 0xb0};
+
+ /* Read RTL chip type command */
+ skb = __hci_cmd_sync(hdev, 0xfc61, 5, cmd_buf, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ rtl_dev_err(hdev, "Read chip type failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ chip_type = skb_pull_data(skb, sizeof(*chip_type));
+ if (!chip_type) {
+ rtl_dev_err(hdev, "RTL chip type event length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ rtl_dev_info(hdev, "chip_type status=%x type=%x",
+ chip_type->status, chip_type->type);
+
+ *type = chip_type->type & 0x0f;
+
+ kfree_skb(skb);
+ return 0;
+}
+
void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
+ struct rtl_subsection *entry, *tmp;
+
kvfree(btrtl_dev->fw_data);
kvfree(btrtl_dev->cfg_data);
+
+ list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
@@ -603,10 +969,11 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
struct hci_rp_read_local_version *resp;
char cfg_name[40];
u16 hci_rev, lmp_subver;
- u8 hci_ver;
+ u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
u16 opcode;
u8 cmd[2];
+ u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
if (!btrtl_dev) {
@@ -614,6 +981,31 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
goto err_alloc;
}
+ INIT_LIST_HEAD(&btrtl_dev->patch_subsecs);
+
+check_version:
+ ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_SUBVER, reg_val);
+ if (ret < 0)
+ goto err_free;
+ lmp_subver = get_unaligned_le16(reg_val);
+
+ if (lmp_subver == RTL_ROM_LMP_8822B) {
+ ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_REV, reg_val);
+ if (ret < 0)
+ goto err_free;
+ hci_rev = get_unaligned_le16(reg_val);
+
+ /* 8822E */
+ if (hci_rev == 0x000e) {
+ hci_ver = 0x0c;
+ lmp_ver = 0x0c;
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev,
+ hci_ver, hdev->bus,
+ chip_type);
+ goto next;
+ }
+ }
+
skb = btrtl_read_local_version(hdev);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
@@ -621,19 +1013,32 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
}
resp = (struct hci_rp_read_local_version *)skb->data;
- rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x",
- resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
- hci_ver = resp->hci_ver;
- hci_rev = le16_to_cpu(resp->hci_rev);
+ hci_ver = resp->hci_ver;
+ hci_rev = le16_to_cpu(resp->hci_rev);
+ lmp_ver = resp->lmp_ver;
lmp_subver = le16_to_cpu(resp->lmp_subver);
+ kfree_skb(skb);
+
+ if (rtl_has_chip_type(lmp_subver)) {
+ ret = rtl_read_chip_type(hdev, &chip_type);
+ if (ret)
+ goto err_free;
+ }
+
btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
+ hdev->bus, chip_type);
- if (!btrtl_dev->ic_info)
+next:
+ rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x",
+ hci_ver, hci_rev,
+ lmp_ver, lmp_subver);
+
+ if (!btrtl_dev->ic_info && !btrtl_dev->drop_fw)
btrtl_dev->drop_fw = true;
+ else
+ btrtl_dev->drop_fw = false;
if (btrtl_dev->drop_fw) {
opcode = hci_opcode_pack(0x3f, 0x66);
@@ -642,41 +1047,25 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb)
- goto out_free;
+ goto err_free;
skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
- hdev->send(hdev, skb);
+ ret = hdev->send(hdev, skb);
+ if (ret < 0) {
+ bt_dev_err(hdev, "sending frame failed (%d)", ret);
+ kfree_skb(skb);
+ goto err_free;
+ }
/* Ensure the above vendor command is sent to controller and
* process has done.
*/
msleep(200);
- /* Read the local version again. Expect to have the vanilla
- * version as cold boot.
- */
- skb = btrtl_read_local_version(hdev);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- goto err_free;
- }
-
- resp = (struct hci_rp_read_local_version *)skb->data;
- rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x",
- resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
-
- hci_ver = resp->hci_ver;
- hci_rev = le16_to_cpu(resp->hci_rev);
- lmp_subver = le16_to_cpu(resp->lmp_subver);
-
- btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
+ goto check_version;
}
-out_free:
- kfree_skb(skb);
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
@@ -755,6 +1144,8 @@ int btrtl_download_firmware(struct hci_dev *hdev,
case RTL_ROM_LMP_8761A:
case RTL_ROM_LMP_8822B:
case RTL_ROM_LMP_8852A:
+ case RTL_ROM_LMP_8703B:
+ case RTL_ROM_LMP_8851B:
return btrtl_setup_rtl8723b(hdev, btrtl_dev);
default:
rtl_dev_info(hdev, "assuming no firmware upload needed");
@@ -779,6 +1170,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852A:
case CHIP_ID_8852B:
case CHIP_ID_8852C:
+ case CHIP_ID_8851B:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
@@ -795,6 +1187,22 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
rtl_dev_dbg(hdev, "WBS supported not enabled.");
break;
}
+
+ if (!btrtl_dev->ic_info)
+ return;
+
+ switch (btrtl_dev->ic_info->lmp_subver) {
+ case RTL_ROM_LMP_8703B:
+ /* 8723CS reports two pages for local ext features,
+ * but it doesn't support any features from page 2 -
+ * it either responds with garbage or with error status
+ */
+ set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
+ &hdev->quirks);
+ break;
+ default:
+ break;
+ }
}
EXPORT_SYMBOL_GPL(btrtl_set_quirks);
@@ -953,6 +1361,12 @@ MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
@@ -963,7 +1377,11 @@ MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index ebf0101c959b..adb4c2c9abc5 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -14,6 +14,11 @@
struct btrtl_device_info;
+struct rtl_chip_type_evt {
+ __u8 status;
+ __u8 type;
+} __packed;
+
struct rtl_download_cmd {
__u8 index;
__u8 data[RTL_FRAG_LEN];
@@ -44,7 +49,58 @@ struct rtl_vendor_config_entry {
struct rtl_vendor_config {
__le32 signature;
__le16 total_len;
- struct rtl_vendor_config_entry entry[];
+ __u8 entry[];
+} __packed;
+
+struct rtl_epatch_header_v2 {
+ __u8 signature[8];
+ __u8 fw_version[8];
+ __le32 num_sections;
+} __packed;
+
+struct rtl_section {
+ __le32 opcode;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct rtl_section_hdr {
+ __le16 num;
+ __le16 reserved;
+} __packed;
+
+struct rtl_common_subsec {
+ __u8 eco;
+ __u8 prio;
+ __u8 cb[2];
+ __le32 len;
+ __u8 data[];
+};
+
+struct rtl_sec_hdr {
+ __u8 eco;
+ __u8 prio;
+ __u8 key_id;
+ __u8 reserved;
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+struct rtl_subsection {
+ struct list_head list;
+ u32 opcode;
+ u32 len;
+ u8 prio;
+ u8 *data;
+};
+
+struct rtl_iovec {
+ u8 *data;
+ u32 len;
+};
+
+struct rtl_vendor_cmd {
+ __u8 param[5];
} __packed;
enum {
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 795be33f2892..f19d31ee37ea 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -357,6 +357,7 @@ static void btsdio_remove(struct sdio_func *func)
if (!data)
return;
+ cancel_work_sync(&data->work);
hdev = data->hdev;
sdio_set_drvdata(func, NULL);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2ad4efdd9e40..2a8e2bb038f5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -64,6 +64,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
+#define BTUSB_ACTIONS_SEMI BIT(27)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -492,6 +493,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek 8821CE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -535,6 +540,10 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -553,6 +562,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional MediaTek MT7668 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
@@ -566,6 +578,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -604,6 +619,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -677,6 +695,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Actions Semiconductor ATS2851 based devices */
+ { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -712,6 +733,16 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
{}
};
+struct qca_dump_info {
+ /* fields for dump collection */
+ u16 id_vendor;
+ u16 id_product;
+ u32 fw_version;
+ u32 controller_id;
+ u32 ram_dump_size;
+ u16 ram_dump_seqno;
+};
+
#define BTUSB_MAX_ISOC_FRAMES 10
#define BTUSB_INTR_RUNNING 0
@@ -731,6 +762,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_WAKEUP_AUTOSUSPEND 14
#define BTUSB_USE_ALT3_FOR_WBS 15
#define BTUSB_ALT6_CONTINUOUS_TX 16
+#define BTUSB_HW_SSR_ACTIVE 17
struct btusb_data {
struct hci_dev *hdev;
@@ -793,6 +825,8 @@ struct btusb_data {
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
unsigned cmd_timeout_cnt;
+
+ struct qca_dump_info qca_dump;
};
static void btusb_reset(struct hci_dev *hdev)
@@ -883,6 +917,11 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
+ if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
+ bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ return;
+ }
+
if (++data->cmd_timeout_cnt < 5)
return;
@@ -1039,21 +1078,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
hci_skb_expect(skb) -= len;
if (skb->len == HCI_ACL_HDR_SIZE) {
- __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
__le16 dlen = hci_acl_hdr(skb)->dlen;
- __u8 type;
/* Complete ACL header */
hci_skb_expect(skb) = __le16_to_cpu(dlen);
- /* Detect if ISO packet has been sent over bulk */
- if (hci_conn_num(data->hdev, ISO_LINK)) {
- type = hci_conn_lookup_type(data->hdev,
- hci_handle(handle));
- if (type == ISO_LINK)
- hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
- }
-
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
kfree_skb(skb);
skb = NULL;
@@ -2375,16 +2404,47 @@ static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
return btusb_recv_bulk(data, buffer, count);
}
+static int btusb_intel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct intel_tlv *tlv = (void *)&skb->data[5];
+
+ /* The first event is always an event type TLV */
+ if (tlv->type != INTEL_TLV_TYPE_ID)
+ goto recv_frame;
+
+ switch (tlv->val[0]) {
+ case INTEL_TLV_SYSTEM_EXCEPTION:
+ case INTEL_TLV_FATAL_EXCEPTION:
+ case INTEL_TLV_DEBUG_EXCEPTION:
+ case INTEL_TLV_TEST_EXCEPTION:
+ /* Generate devcoredump from exception */
+ if (!hci_devcd_init(hdev, skb->len)) {
+ hci_devcd_append(hdev, skb);
+ hci_devcd_complete(hdev);
+ } else {
+ bt_dev_err(hdev, "Failed to generate devcoredump");
+ kfree_skb(skb);
+ }
+ return 0;
+ default:
+ bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
+ }
+
+recv_frame:
+ return hci_recv_frame(hdev, skb);
+}
+
static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
- struct hci_event_hdr *hdr = (void *)skb->data;
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
- if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
- hdr->plen > 0) {
- const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
- unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
switch (skb->data[2]) {
case 0x02:
/* When switching to the operational firmware
@@ -2403,6 +2463,15 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
}
+
+ /* Handle all diagnostics events separately. May still call
+ * hci_recv_frame.
+ */
+ if (len >= sizeof(diagnostics_hdr) &&
+ memcmp(&skb->data[2], diagnostics_hdr,
+ sizeof(diagnostics_hdr)) == 0) {
+ return btusb_intel_diagnostics(hdev, skb);
+ }
}
return hci_recv_frame(hdev, skb);
@@ -3243,6 +3312,202 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
return 0;
}
+#define QCA_MEMDUMP_ACL_HANDLE 0x2EDD
+#define QCA_MEMDUMP_SIZE_MAX 0x100000
+#define QCA_MEMDUMP_VSE_CLASS 0x01
+#define QCA_MEMDUMP_MSG_TYPE 0x08
+#define QCA_MEMDUMP_PKT_SIZE 248
+#define QCA_LAST_SEQUENCE_NUM 0xffff
+
+struct qca_dump_hdr {
+ u8 vse_class;
+ u8 msg_type;
+ __le16 seqno;
+ u8 reserved;
+ union {
+ u8 data[0];
+ struct {
+ __le32 ram_dump_size;
+ u8 data0[0];
+ } __packed;
+ };
+} __packed;
+
+
+static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ char buf[128];
+ struct btusb_data *btdata = hci_get_drvdata(hdev);
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n",
+ btdata->qca_dump.controller_id);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n",
+ btdata->qca_dump.fw_version);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\nVendor: qca\n",
+ btusb_driver.name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "VID: 0x%x\nPID:0x%x\n",
+ btdata->qca_dump.id_vendor, btdata->qca_dump.id_product);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Lmp Subversion: 0x%x\n",
+ hdev->lmp_subver);
+ skb_put_data(skb, buf, strlen(buf));
+}
+
+static void btusb_coredump_qca(struct hci_dev *hdev)
+{
+ static const u8 param[] = { 0x26 };
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
+ kfree_skb(skb);
+}
+
+/*
+ * ==0: not a dump pkt.
+ * < 0: fails to handle a dump pkt
+ * > 0: otherwise.
+ */
+static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int ret = 1;
+ u8 pkt_type;
+ u8 *sk_ptr;
+ unsigned int sk_len;
+ u16 seqno;
+ u32 dump_size;
+
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct btusb_data *btdata = hci_get_drvdata(hdev);
+ struct usb_device *udev = btdata->udev;
+
+ pkt_type = hci_skb_pkt_type(skb);
+ sk_ptr = skb->data;
+ sk_len = skb->len;
+
+ if (pkt_type == HCI_ACLDATA_PKT) {
+ acl_hdr = hci_acl_hdr(skb);
+ if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
+ return 0;
+ sk_ptr += HCI_ACL_HDR_SIZE;
+ sk_len -= HCI_ACL_HDR_SIZE;
+ event_hdr = (struct hci_event_hdr *)sk_ptr;
+ } else {
+ event_hdr = hci_event_hdr(skb);
+ }
+
+ if ((event_hdr->evt != HCI_VENDOR_PKT)
+ || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+ return 0;
+
+ sk_ptr += HCI_EVENT_HDR_SIZE;
+ sk_len -= HCI_EVENT_HDR_SIZE;
+
+ dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+ if ((sk_len < offsetof(struct qca_dump_hdr, data))
+ || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
+ || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ return 0;
+
+ /*it is dump pkt now*/
+ seqno = le16_to_cpu(dump_hdr->seqno);
+ if (seqno == 0) {
+ set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
+ dump_size = le32_to_cpu(dump_hdr->ram_dump_size);
+ if (!dump_size || (dump_size > QCA_MEMDUMP_SIZE_MAX)) {
+ ret = -EILSEQ;
+ bt_dev_err(hdev, "Invalid memdump size(%u)",
+ dump_size);
+ goto out;
+ }
+
+ ret = hci_devcd_init(hdev, dump_size);
+ if (ret < 0) {
+ bt_dev_err(hdev, "memdump init error(%d)", ret);
+ goto out;
+ }
+
+ btdata->qca_dump.ram_dump_size = dump_size;
+ btdata->qca_dump.ram_dump_seqno = 0;
+ sk_ptr += offsetof(struct qca_dump_hdr, data0);
+ sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ usb_disable_autosuspend(udev);
+ bt_dev_info(hdev, "%s memdump size(%u)\n",
+ (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
+ dump_size);
+ } else {
+ sk_ptr += offsetof(struct qca_dump_hdr, data);
+ sk_len -= offsetof(struct qca_dump_hdr, data);
+ }
+
+ if (!btdata->qca_dump.ram_dump_size) {
+ ret = -EINVAL;
+ bt_dev_err(hdev, "memdump is not active");
+ goto out;
+ }
+
+ if ((seqno > btdata->qca_dump.ram_dump_seqno + 1) && (seqno != QCA_LAST_SEQUENCE_NUM)) {
+ dump_size = QCA_MEMDUMP_PKT_SIZE * (seqno - btdata->qca_dump.ram_dump_seqno - 1);
+ hci_devcd_append_pattern(hdev, 0x0, dump_size);
+ bt_dev_err(hdev,
+ "expected memdump seqno(%u) is not received(%u)\n",
+ btdata->qca_dump.ram_dump_seqno, seqno);
+ btdata->qca_dump.ram_dump_seqno = seqno;
+ kfree_skb(skb);
+ return ret;
+ }
+
+ skb_pull(skb, skb->len - sk_len);
+ hci_devcd_append(hdev, skb);
+ btdata->qca_dump.ram_dump_seqno++;
+ if (seqno == QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_info(hdev,
+ "memdump done: pkts(%u), total(%u)\n",
+ btdata->qca_dump.ram_dump_seqno, btdata->qca_dump.ram_dump_size);
+
+ hci_devcd_complete(hdev);
+ goto out;
+ }
+ return ret;
+
+out:
+ if (btdata->qca_dump.ram_dump_size)
+ usb_enable_autosuspend(udev);
+ btdata->qca_dump.ram_dump_size = 0;
+ btdata->qca_dump.ram_dump_seqno = 0;
+ clear_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
+
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret;
+}
+
+static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (handle_dump_pkt_qca(hdev, skb))
+ return 0;
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (handle_dump_pkt_qca(hdev, skb))
+ return 0;
+ return hci_recv_frame(hdev, skb);
+}
+
+
#define QCA_DFU_PACKET_LEN 4096
#define QCA_GET_TARGET_VERSION 0x09
@@ -3577,6 +3842,9 @@ static int btusb_setup_qca(struct hci_dev *hdev)
if (err < 0)
return err;
+ btdata->qca_dump.fw_version = le32_to_cpu(ver.patch_version);
+ btdata->qca_dump.controller_id = le32_to_cpu(ver.rom_version);
+
if (!(status & QCA_SYSCFG_UPDATED)) {
err = btusb_setup_qca_load_nvm(hdev, &ver, info);
if (err < 0)
@@ -3830,13 +4098,9 @@ static int btusb_probe(struct usb_interface *intf,
BT_DBG("intf %p id %p", intf, id);
- /* interface numbers are hardcoded in the spec */
- if (intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- if (!(id->driver_info & BTUSB_IFNUM_2))
- return -ENODEV;
- if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
- return -ENODEV;
- }
+ if ((id->driver_info & BTUSB_IFNUM_2) &&
+ (intf->cur_altsetting->desc.bInterfaceNumber != 2))
+ return -ENODEV;
ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -4011,7 +4275,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Combined Intel Device setup to support multiple setup routine */
if (id->driver_info & BTUSB_INTEL_COMBINED) {
- err = btintel_configure_setup(hdev);
+ err = btintel_configure_setup(hdev, btusb_driver.name);
if (err)
goto out_free_dev;
@@ -4070,6 +4334,11 @@ static int btusb_probe(struct usb_interface *intf,
}
if (id->driver_info & BTUSB_QCA_WCN6855) {
+ data->qca_dump.id_vendor = id->idVendor;
+ data->qca_dump.id_product = id->idProduct;
+ data->recv_event = btusb_recv_evt_qca;
+ data->recv_acl = btusb_recv_acl_qca;
+ hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL);
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
@@ -4098,6 +4367,14 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
}
+ if (id->driver_info & BTUSB_ACTIONS_SEMI) {
+ /* Support is advertised, but not implemented */
+ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ }
+
if (!reset)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
@@ -4383,6 +4660,17 @@ done:
}
#endif
+#ifdef CONFIG_DEV_COREDUMP
+static void btusb_coredump(struct device *dev)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+ struct hci_dev *hdev = data->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
+
static struct usb_driver btusb_driver = {
.name = "btusb",
.probe = btusb_probe,
@@ -4394,6 +4682,14 @@ static struct usb_driver btusb_driver = {
.id_table = btusb_table,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
+
+#ifdef CONFIG_DEV_COREDUMP
+ .drvwrap = {
+ .driver = {
+ .coredump = btusb_coredump,
+ },
+ },
+#endif
};
module_usb_driver(btusb_driver);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 2b6c0e1922cb..83bf5d4330c4 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -55,12 +55,14 @@
* @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
* @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable
* @max_autobaud_speed: max baudrate supported by device in autobaud mode
+ * @max_speed: max baudrate supported
*/
struct bcm_device_data {
bool no_early_set_baudrate;
bool drive_rts_on_open;
bool no_uart_clock_set;
u32 max_autobaud_speed;
+ u32 max_speed;
};
/**
@@ -888,7 +890,7 @@ unlock:
#endif
/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */
-static struct gpiod_lookup_table asus_tf103c_irq_gpios = {
+static struct gpiod_lookup_table irq_on_int33fc02_pin17_gpios = {
.dev_id = "serial0-0",
.table = {
GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH),
@@ -898,12 +900,31 @@ static struct gpiod_lookup_table asus_tf103c_irq_gpios = {
static const struct dmi_system_id bcm_broken_irq_dmi_table[] = {
{
+ .ident = "Acer Iconia One 7 B1-750",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = &irq_on_int33fc02_pin17_gpios,
+ },
+ {
.ident = "Asus TF103C",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
- .driver_data = &asus_tf103c_irq_gpios,
+ .driver_data = &irq_on_int33fc02_pin17_gpios,
+ },
+ {
+ .ident = "Lenovo Yoga Tablet 2 830F/L / 1050F/L",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = &irq_on_int33fc02_pin17_gpios,
},
{
.ident = "Meegopad T08",
@@ -1300,6 +1321,12 @@ static const struct hci_uart_proto bcm_proto = {
};
#ifdef CONFIG_ACPI
+
+/* bcm43430a0/a1 BT does not support 48MHz UART clock, limit to 2000000 baud */
+static struct bcm_device_data bcm43430_device_data = {
+ .max_speed = 2000000,
+};
+
static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E00" },
{ "BCM2E01" },
@@ -1414,19 +1441,19 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E71" },
{ "BCM2E72" },
{ "BCM2E73" },
- { "BCM2E74" },
- { "BCM2E75" },
+ { "BCM2E74", (long)&bcm43430_device_data },
+ { "BCM2E75", (long)&bcm43430_device_data },
{ "BCM2E76" },
{ "BCM2E77" },
{ "BCM2E78" },
{ "BCM2E79" },
{ "BCM2E7A" },
- { "BCM2E7B" },
+ { "BCM2E7B", (long)&bcm43430_device_data },
{ "BCM2E7C" },
{ "BCM2E7D" },
{ "BCM2E7E" },
{ "BCM2E7F" },
- { "BCM2E80" },
+ { "BCM2E80", (long)&bcm43430_device_data },
{ "BCM2E81" },
{ "BCM2E82" },
{ "BCM2E83" },
@@ -1435,7 +1462,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E86" },
{ "BCM2E87" },
{ "BCM2E88" },
- { "BCM2E89" },
+ { "BCM2E89", (long)&bcm43430_device_data },
{ "BCM2E8A" },
{ "BCM2E8B" },
{ "BCM2E8C" },
@@ -1444,29 +1471,30 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E90" },
{ "BCM2E92" },
{ "BCM2E93" },
- { "BCM2E94" },
+ { "BCM2E94", (long)&bcm43430_device_data },
{ "BCM2E95" },
{ "BCM2E96" },
{ "BCM2E97" },
{ "BCM2E98" },
- { "BCM2E99" },
+ { "BCM2E99", (long)&bcm43430_device_data },
{ "BCM2E9A" },
- { "BCM2E9B" },
+ { "BCM2E9B", (long)&bcm43430_device_data },
{ "BCM2E9C" },
{ "BCM2E9D" },
+ { "BCM2E9F", (long)&bcm43430_device_data },
{ "BCM2EA0" },
{ "BCM2EA1" },
- { "BCM2EA2" },
- { "BCM2EA3" },
+ { "BCM2EA2", (long)&bcm43430_device_data },
+ { "BCM2EA3", (long)&bcm43430_device_data },
{ "BCM2EA4" },
{ "BCM2EA5" },
{ "BCM2EA6" },
{ "BCM2EA7" },
{ "BCM2EA8" },
{ "BCM2EA9" },
- { "BCM2EAA" },
- { "BCM2EAB" },
- { "BCM2EAC" },
+ { "BCM2EAA", (long)&bcm43430_device_data },
+ { "BCM2EAB", (long)&bcm43430_device_data },
+ { "BCM2EAC", (long)&bcm43430_device_data },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
@@ -1535,6 +1563,8 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
bcmdev->drive_rts_on_open = data->drive_rts_on_open;
bcmdev->no_uart_clock_set = data->no_uart_clock_set;
+ if (data->max_speed && bcmdev->oper_speed > data->max_speed)
+ bcmdev->oper_speed = data->max_speed;
}
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 6455bc4fb5bb..fefc37b98b4a 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -463,6 +463,8 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
H5_HDR_SEQ(hdr), h5->tx_ack);
+ set_bit(H5_TX_ACK_REQ, &h5->flags);
+ hci_uart_tx_wakeup(hu);
h5_reset_rx(h5);
return 0;
}
@@ -936,6 +938,8 @@ static int h5_btrtl_setup(struct h5 *h5)
err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
+ if (err)
+ goto out_free;
btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
@@ -1100,6 +1104,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
.data = (const void *)&h5_data_rtl8822cs },
{ .compatible = "realtek,rtl8723bs-bt",
.data = (const void *)&h5_data_rtl8723bs },
+ { .compatible = "realtek,rtl8723cs-bt",
+ .data = (const void *)&h5_data_rtl8723bs },
{ .compatible = "realtek,rtl8723ds-bt",
.data = (const void *)&h5_data_rtl8723bs },
#endif
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 865112e96ff9..efdda2c3fce8 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -323,9 +323,9 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
/* Disable hardware flow control */
ktermios = tty->termios;
ktermios.c_cflag &= ~CRTSCTS;
- status = tty_set_termios(tty, &ktermios);
+ tty_set_termios(tty, &ktermios);
BT_DBG("Disabling hardware flow control: %s",
- status ? "failed" : "success");
+ (tty->termios.c_cflag & CRTSCTS) ? "failed" : "success");
/* Clear RTS to prevent the device from sending */
/* Most UARTs need OUT2 to enable interrupts */
@@ -357,9 +357,9 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
/* Re-enable hardware flow control */
ktermios = tty->termios;
ktermios.c_cflag |= CRTSCTS;
- status = tty_set_termios(tty, &ktermios);
+ tty_set_termios(tty, &ktermios);
BT_DBG("Enabling hardware flow control: %s",
- status ? "failed" : "success");
+ !(tty->termios.c_cflag & CRTSCTS) ? "failed" : "success");
}
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 5abc01a2acf7..4a0b5c3160c2 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -786,7 +786,7 @@ MODULE_DEVICE_TABLE(of, hci_ti_of_match);
static struct serdev_device_driver hci_ti_drv = {
.driver = {
.name = "hci-ti",
- .of_match_table = of_match_ptr(hci_ti_of_match),
+ .of_match_table = hci_ti_of_match,
},
.probe = hci_ti_probe,
.remove = hci_ti_remove,
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index fbc3f7c3a5c7..e08222395772 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -27,10 +27,12 @@
#define MRVL_ACK 0x5A
#define MRVL_NAK 0xBF
#define MRVL_RAW_DATA 0x1F
+#define MRVL_SET_BAUDRATE 0xFC09
enum {
STATE_CHIP_VER_PENDING,
STATE_FW_REQ_PENDING,
+ STATE_FW_LOADED,
};
struct mrvl_data {
@@ -254,6 +256,14 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
+ /* We might receive some noise when there is no firmware loaded. Therefore,
+ * we drop data if the firmware is not loaded yet and if there is no fw load
+ * request pending.
+ */
+ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags) &&
+ !test_bit(STATE_FW_LOADED, &mrvl->flags))
+ return count;
+
mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
mrvl_recv_pkts,
ARRAY_SIZE(mrvl_recv_pkts));
@@ -354,6 +364,7 @@ static int mrvl_load_firmware(struct hci_dev *hdev, const char *name)
static int mrvl_setup(struct hci_uart *hu)
{
int err;
+ struct mrvl_data *mrvl = hu->priv;
hci_uart_set_flow_control(hu, true);
@@ -367,9 +378,9 @@ static int mrvl_setup(struct hci_uart *hu)
hci_uart_wait_until_sent(hu);
if (hu->serdev)
- serdev_device_set_baudrate(hu->serdev, 3000000);
+ serdev_device_set_baudrate(hu->serdev, hu->oper_speed);
else
- hci_uart_set_baudrate(hu, 3000000);
+ hci_uart_set_baudrate(hu, hu->oper_speed);
hci_uart_set_flow_control(hu, false);
@@ -377,13 +388,54 @@ static int mrvl_setup(struct hci_uart *hu)
if (err)
return err;
+ set_bit(STATE_FW_LOADED, &mrvl->flags);
+
+ return 0;
+}
+
+static int mrvl_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ int err;
+ struct mrvl_data *mrvl = hu->priv;
+ __le32 speed_le = cpu_to_le32(speed);
+
+ /* The firmware might be loaded by the Wifi driver over SDIO. We wait
+ * up to 10s for the CTS to go up. Afterward, we know that the firmware
+ * is ready.
+ */
+ err = serdev_device_wait_for_cts(hu->serdev, true, 10000);
+ if (err) {
+ bt_dev_err(hu->hdev, "Wait for CTS failed with %d\n", err);
+ return err;
+ }
+
+ set_bit(STATE_FW_LOADED, &mrvl->flags);
+
+ err = __hci_cmd_sync_status(hu->hdev, MRVL_SET_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
+ if (err) {
+ bt_dev_err(hu->hdev, "send command failed: %d", err);
+ return err;
+ }
+
+ serdev_device_set_baudrate(hu->serdev, speed);
+
+ /* We forcefully have to send a command to the bluetooth module so that
+ * the driver detects it after a baudrate change. This is foreseen by
+ * hci_serdev by setting HCI_UART_VND_DETECT which then causes a dummy
+ * local version read.
+ */
+ set_bit(HCI_UART_VND_DETECT, &hu->hdev_flags);
+
return 0;
}
-static const struct hci_uart_proto mrvl_proto = {
+static const struct hci_uart_proto mrvl_proto_8897 = {
.id = HCI_UART_MRVL,
.name = "Marvell",
.init_speed = 115200,
+ .oper_speed = 3000000,
.open = mrvl_open,
.close = mrvl_close,
.flush = mrvl_flush,
@@ -393,18 +445,37 @@ static const struct hci_uart_proto mrvl_proto = {
.dequeue = mrvl_dequeue,
};
+static const struct hci_uart_proto mrvl_proto_8997 = {
+ .id = HCI_UART_MRVL,
+ .name = "Marvell 8997",
+ .init_speed = 115200,
+ .oper_speed = 3000000,
+ .open = mrvl_open,
+ .close = mrvl_close,
+ .flush = mrvl_flush,
+ .set_baudrate = mrvl_set_baudrate,
+ .recv = mrvl_recv,
+ .enqueue = mrvl_enqueue,
+ .dequeue = mrvl_dequeue,
+};
+
static int mrvl_serdev_probe(struct serdev_device *serdev)
{
struct mrvl_serdev *mrvldev;
+ const struct hci_uart_proto *mrvl_proto = device_get_match_data(&serdev->dev);
mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL);
if (!mrvldev)
return -ENOMEM;
+ mrvldev->hu.oper_speed = mrvl_proto->oper_speed;
+ if (mrvl_proto->set_baudrate)
+ of_property_read_u32(serdev->dev.of_node, "max-speed", &mrvldev->hu.oper_speed);
+
mrvldev->hu.serdev = serdev;
serdev_device_set_drvdata(serdev, mrvldev);
- return hci_uart_register_device(&mrvldev->hu, &mrvl_proto);
+ return hci_uart_register_device(&mrvldev->hu, mrvl_proto);
}
static void mrvl_serdev_remove(struct serdev_device *serdev)
@@ -414,13 +485,12 @@ static void mrvl_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&mrvldev->hu);
}
-#ifdef CONFIG_OF
-static const struct of_device_id mrvl_bluetooth_of_match[] = {
- { .compatible = "mrvl,88w8897" },
+static const struct of_device_id __maybe_unused mrvl_bluetooth_of_match[] = {
+ { .compatible = "mrvl,88w8897", .data = &mrvl_proto_8897},
+ { .compatible = "mrvl,88w8997", .data = &mrvl_proto_8997},
{ },
};
MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match);
-#endif
static struct serdev_device_driver mrvl_serdev_driver = {
.probe = mrvl_serdev_probe,
@@ -435,12 +505,12 @@ int __init mrvl_init(void)
{
serdev_device_driver_register(&mrvl_serdev_driver);
- return hci_uart_register_proto(&mrvl_proto);
+ return hci_uart_register_proto(&mrvl_proto_8897);
}
int __exit mrvl_deinit(void)
{
serdev_device_driver_unregister(&mrvl_serdev_driver);
- return hci_uart_unregister_proto(&mrvl_proto);
+ return hci_uart_unregister_proto(&mrvl_proto_8897);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6eddc23e49d9..1b064504b388 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -128,13 +128,13 @@ struct qca_memdump_event_hdr {
__u8 evt;
__u8 plen;
__u16 opcode;
- __u16 seq_no;
+ __le16 seq_no;
__u8 reserved;
} __packed;
struct qca_dump_size {
- u32 dump_size;
+ __le32 dump_size;
} __packed;
struct qca_data {
@@ -1317,7 +1317,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
/* Give the controller time to process the request */
if (qca_is_wcn399x(qca_soc_type(hu)) ||
- qca_is_wcn6750(qca_soc_type(hu)))
+ qca_is_wcn6750(qca_soc_type(hu)) ||
+ qca_is_wcn6855(qca_soc_type(hu)))
usleep_range(1000, 10000);
else
msleep(300);
@@ -1394,7 +1395,8 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
if (qca_is_wcn399x(qca_soc_type(hu)) ||
- qca_is_wcn6750(qca_soc_type(hu))) {
+ qca_is_wcn6750(qca_soc_type(hu)) ||
+ qca_is_wcn6855(qca_soc_type(hu))) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1428,7 +1430,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
* changing the baudrate of chip and host.
*/
if (qca_is_wcn399x(soc_type) ||
- qca_is_wcn6750(soc_type))
+ qca_is_wcn6750(soc_type) ||
+ qca_is_wcn6855(soc_type))
hci_uart_set_flow_control(hu, true);
if (soc_type == QCA_WCN3990) {
@@ -1446,7 +1449,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
if (qca_is_wcn399x(soc_type) ||
- qca_is_wcn6750(soc_type))
+ qca_is_wcn6750(soc_type) ||
+ qca_is_wcn6855(soc_type))
hci_uart_set_flow_control(hu, false);
if (soc_type == QCA_WCN3990) {
@@ -1588,10 +1592,11 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
- /* UART driver handles the interrupt from BT SoC.So we need to use
- * device handle of UART driver to get the status of device may wakeup.
+ /* BT SoC attached through the serial bus is handled by the serdev driver.
+ * So we need to use the device handle of the serdev driver to get the
+ * status of device may wakeup.
*/
- wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
+ wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
return wakeup;
@@ -1681,7 +1686,8 @@ static int qca_power_on(struct hci_dev *hdev)
return 0;
if (qca_is_wcn399x(soc_type) ||
- qca_is_wcn6750(soc_type)) {
+ qca_is_wcn6750(soc_type) ||
+ qca_is_wcn6855(soc_type)) {
ret = qca_regulator_init(hu);
} else {
qcadev = serdev_device_get_drvdata(hu->serdev);
@@ -1722,7 +1728,8 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "setting up %s",
qca_is_wcn399x(soc_type) ? "wcn399x" :
- (soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390");
+ (soc_type == QCA_WCN6750) ? "wcn6750" :
+ (soc_type == QCA_WCN6855) ? "wcn6855" : "ROME/QCA6390");
qca->memdump_state = QCA_MEMDUMP_IDLE;
@@ -1734,7 +1741,8 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca_is_wcn399x(soc_type) ||
- qca_is_wcn6750(soc_type)) {
+ qca_is_wcn6750(soc_type) ||
+ qca_is_wcn6855(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -1756,7 +1764,8 @@ retry:
}
if (!(qca_is_wcn399x(soc_type) ||
- qca_is_wcn6750(soc_type))) {
+ qca_is_wcn6750(soc_type) ||
+ qca_is_wcn6855(soc_type))) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1826,7 +1835,7 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_device_data qca_soc_data_wcn3990 = {
+static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1837,7 +1846,7 @@ static const struct qca_device_data qca_soc_data_wcn3990 = {
.num_vregs = 4,
};
-static const struct qca_device_data qca_soc_data_wcn3991 = {
+static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = {
.soc_type = QCA_WCN3991,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1849,7 +1858,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = {
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
-static const struct qca_device_data qca_soc_data_wcn3998 = {
+static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
{ "vddio", 10000 },
@@ -1860,12 +1869,12 @@ static const struct qca_device_data qca_soc_data_wcn3998 = {
.num_vregs = 4,
};
-static const struct qca_device_data qca_soc_data_qca6390 = {
+static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
.soc_type = QCA_QCA6390,
.num_vregs = 0,
};
-static const struct qca_device_data qca_soc_data_wcn6750 = {
+static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
.soc_type = QCA_WCN6750,
.vregs = (struct qca_vreg []) {
{ "vddio", 5000 },
@@ -1882,6 +1891,20 @@ static const struct qca_device_data qca_soc_data_wcn6750 = {
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
+static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = {
+ .soc_type = QCA_WCN6855,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 5000 },
+ { "vddbtcxmx", 126000 },
+ { "vddrfacmn", 12500 },
+ { "vddrfa0p8", 102000 },
+ { "vddrfa1p7", 302000 },
+ { "vddrfa1p2", 257000 },
+ },
+ .num_vregs = 6,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1911,7 +1934,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
qca_regulator_disable(qcadev);
- } else if (soc_type == QCA_WCN6750) {
+ } else if (soc_type == QCA_WCN6750 || soc_type == QCA_WCN6855) {
gpiod_set_value_cansleep(qcadev->bt_en, 0);
msleep(100);
qca_regulator_disable(qcadev);
@@ -2046,7 +2069,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (data &&
(qca_is_wcn399x(data->soc_type) ||
- qca_is_wcn6750(data->soc_type))) {
+ qca_is_wcn6750(data->soc_type) ||
+ qca_is_wcn6855(data->soc_type))) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
@@ -2066,14 +2090,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) {
+ if (IS_ERR_OR_NULL(qcadev->bt_en) &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
power_ctrl_enabled = false;
}
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750)
+ if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
@@ -2149,8 +2177,9 @@ static void qca_serdev_remove(struct serdev_device *serdev)
struct qca_power *power = qcadev->bt_power;
if ((qca_is_wcn399x(qcadev->btsoc_type) ||
- qca_is_wcn6750(qcadev->btsoc_type)) &&
- power->vregs_on)
+ qca_is_wcn6750(qcadev->btsoc_type) ||
+ qca_is_wcn6855(qcadev->btsoc_type)) &&
+ power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
@@ -2164,10 +2193,17 @@ static void qca_serdev_shutdown(struct device *dev)
int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
struct serdev_device *serdev = to_serdev_device(dev);
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
+ struct hci_dev *hdev = hu->hdev;
+ struct qca_data *qca = hu->priv;
const u8 ibs_wake_cmd[] = { 0xFD };
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
if (qcadev->btsoc_type == QCA_QCA6390) {
+ if (test_bit(QCA_BT_OFF, &qca->flags) ||
+ !test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
serdev_device_write_flush(serdev);
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
sizeof(ibs_wake_cmd));
@@ -2327,6 +2363,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
+ { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index c443c3b0a4da..40e2b9fa11a2 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -278,6 +278,104 @@ static int vhci_setup(struct hci_dev *hdev)
return 0;
}
+static void vhci_coredump(struct hci_dev *hdev)
+{
+ /* No need to do anything */
+}
+
+static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ char buf[80];
+
+ snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: vhci\n");
+ skb_put_data(skb, buf, strlen(buf));
+}
+
+#define MAX_COREDUMP_LINE_LEN 40
+
+struct devcoredump_test_data {
+ enum devcoredump_state state;
+ unsigned int timeout;
+ char data[MAX_COREDUMP_LINE_LEN];
+};
+
+static inline void force_devcd_timeout(struct hci_dev *hdev,
+ unsigned int timeout)
+{
+#ifdef CONFIG_DEV_COREDUMP
+ hdev->dump.timeout = msecs_to_jiffies(timeout * 1000);
+#endif
+}
+
+static ssize_t force_devcd_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ struct hci_dev *hdev = data->hdev;
+ struct sk_buff *skb = NULL;
+ struct devcoredump_test_data dump_data;
+ size_t data_size;
+ int ret;
+
+ if (count < offsetof(struct devcoredump_test_data, data) ||
+ count > sizeof(dump_data))
+ return -EINVAL;
+
+ if (copy_from_user(&dump_data, user_buf, count))
+ return -EFAULT;
+
+ data_size = count - offsetof(struct devcoredump_test_data, data);
+ skb = alloc_skb(data_size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ skb_put_data(skb, &dump_data.data, data_size);
+
+ hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL);
+
+ /* Force the devcoredump timeout */
+ if (dump_data.timeout)
+ force_devcd_timeout(hdev, dump_data.timeout);
+
+ ret = hci_devcd_init(hdev, skb->len);
+ if (ret) {
+ BT_ERR("Failed to generate devcoredump");
+ kfree_skb(skb);
+ return ret;
+ }
+
+ hci_devcd_append(hdev, skb);
+
+ switch (dump_data.state) {
+ case HCI_DEVCOREDUMP_DONE:
+ hci_devcd_complete(hdev);
+ break;
+ case HCI_DEVCOREDUMP_ABORT:
+ hci_devcd_abort(hdev);
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ /* Do nothing */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations force_devcoredump_fops = {
+ .open = simple_open,
+ .write = force_devcd_write,
+};
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -355,6 +453,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
&aosp_capable_fops);
+ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+ &force_devcoredump_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7bfe998f3514..fcfa280df98a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -81,7 +81,7 @@ config MOXTET
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC)
- depends on HAS_IOMEM
+ depends on HAS_IOPORT
select INDIRECT_PIO if ARM64
help
Driver to enable I/O access to devices attached to the Low Pin
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 2344d560b144..b715c8ab36e8 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -126,4 +126,3 @@ static struct platform_driver integrator_ap_lm_driver = {
module_platform_driver(integrator_ap_lm_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Integrator AP Logical Module driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index b0c3704777e9..b6dfe4340da2 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -401,12 +401,10 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
struct device_node *dn = pdev->dev.of_node;
struct brcmstb_gisb_arb_device *gdev;
const struct of_device_id *of_id;
- struct resource *r;
int err, timeout_irq, tea_irq, bp_irq;
unsigned int num_masters, j = 0;
int i, first, last;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
timeout_irq = platform_get_irq(pdev, 0);
tea_irq = platform_get_irq(pdev, 1);
bp_irq = platform_get_irq(pdev, 2);
@@ -418,7 +416,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
mutex_init(&gdev->lock);
INIT_LIST_HEAD(&gdev->next);
- gdev->base = devm_ioremap_resource(&pdev->dev, r);
+ gdev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(gdev->base))
return PTR_ERR(gdev->base);
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index 63b1b4a76671..e97c1d1c7578 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -416,4 +416,3 @@ module_platform_driver(bt1_apb_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index 70e49a6e5374..4007e7322cf2 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -309,4 +309,3 @@ module_platform_driver(bt1_axi_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 774f307844b4..653e2d4c116f 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -124,9 +124,9 @@ out:
/*
* fsl_mc_bus_uevent - callback invoked when a device is added
*/
-static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
mc_dev->obj_desc.vendor,
@@ -231,7 +231,7 @@ exit:
return 0;
}
-static ssize_t rescan_store(struct bus_type *bus,
+static ssize_t rescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
unsigned long val;
@@ -284,7 +284,7 @@ exit:
return 0;
}
-static ssize_t autorescan_store(struct bus_type *bus,
+static ssize_t autorescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
@@ -292,7 +292,7 @@ static ssize_t autorescan_store(struct bus_type *bus,
return count;
}
-static ssize_t autorescan_show(struct bus_type *bus, char *buf)
+static ssize_t autorescan_show(const struct bus_type *bus, char *buf)
{
bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
return strlen(buf);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 828c66bbaa67..52a5d0447390 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -86,8 +87,8 @@ MODULE_DEVICE_TABLE(of, weim_id_table);
static int imx_weim_gpr_setup(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct property *prop;
- const __be32 *p;
+ struct of_range_parser parser;
+ struct of_range range;
struct regmap *gpr;
u32 gprvals[4] = {
05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
@@ -106,13 +107,13 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
return 0;
}
- of_property_for_each_u32(np, "ranges", prop, p, val) {
- if (i % 4 == 0) {
- cs = val;
- } else if (i % 4 == 3 && val) {
- val = (val / SZ_32M) | 1;
- gprval |= val << cs * 3;
- }
+ if (of_range_parser_init(&parser, np))
+ goto err;
+
+ for_each_of_range(&parser, &range) {
+ cs = range.bus_addr >> 32;
+ val = (range.size / SZ_32M) | 1;
+ gprval |= val << cs * 3;
i++;
}
@@ -204,8 +205,8 @@ static int weim_parse_dt(struct platform_device *pdev)
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
struct device_node *child;
- int ret, have_child = 0;
struct weim_priv *priv;
void __iomem *base;
u32 reg;
@@ -263,7 +264,6 @@ static int weim_parse_dt(struct platform_device *pdev)
static int weim_probe(struct platform_device *pdev)
{
struct weim_priv *priv;
- struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
@@ -273,8 +273,7 @@ static int weim_probe(struct platform_device *pdev)
return -ENOMEM;
/* get the resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -331,6 +330,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
"Failed to setup timing for '%pOF'\n", rd->dn);
if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ /*
+ * Clear the flag before adding the device so that
+ * fw_devlink doesn't skip adding consumers to this
+ * device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
index 91db001eb69a..f5ba6bee6fd8 100644
--- a/drivers/bus/intel-ixp4xx-eb.c
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -423,4 +423,3 @@ static struct platform_driver ixp4xx_exp_driver = {
module_platform_driver(ixp4xx_exp_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 46981331b38f..354204b0ef3a 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,5 +1,5 @@
# Host MHI stack
-obj-y += host/
+obj-$(CONFIG_MHI_BUS) += host/
# Endpoint MHI stack
-obj-y += ep/
+obj-$(CONFIG_MHI_BUS_EP) += ep/
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 1dc8a3557a46..600881808982 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -123,6 +123,13 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
int ret;
ch_id = MHI_TRE_GET_CMD_CHID(el);
+
+ /* Check if the channel is supported by the controller */
+ if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
+ dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
+ return -ENODEV;
+ }
+
mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
@@ -196,9 +203,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
/* Send channel disconnect status to client drivers */
- result.transaction_status = -ENOTCONN;
- result.bytes_xferd = 0;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
/* Set channel state to STOP */
mhi_chan->state = MHI_CH_STATE_STOP;
@@ -217,7 +226,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mutex_unlock(&mhi_chan->lock);
break;
case MHI_PKT_TYPE_RESET_CHAN_CMD:
- dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
if (!ch_ring->started) {
dev_err(dev, "Channel (%u) not opened\n", ch_id);
return -ENODEV;
@@ -228,9 +237,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mhi_ep_ring_reset(mhi_cntrl, ch_ring);
/* Send channel disconnect status to client driver */
- result.transaction_status = -ENOTCONN;
- result.bytes_xferd = 0;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
/* Set channel state to DISABLED */
mhi_chan->state = MHI_CH_STATE_DISABLED;
@@ -691,7 +702,7 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work)
el = &ring->ring_cache[ring->rd_offset];
ret = mhi_ep_process_cmd_ring(ring, el);
- if (ret)
+ if (ret && ret != -ENODEV)
dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
mhi_ep_ring_inc_index(ring);
@@ -719,24 +730,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
list_del(&itr->node);
ring = itr->ring;
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ mutex_lock(&chan->lock);
+
+ /*
+ * The ring could've stopped while we waited to grab the (chan->lock), so do
+ * a sanity check before going further.
+ */
+ if (!ring->started) {
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
/* Update the write offset for the ring */
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset) {
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
el = &ring->ring_cache[ring->rd_offset];
- chan = &mhi_cntrl->mhi_chan[ring->ch_id];
- mutex_lock(&chan->lock);
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
ret = mhi_ep_process_ch_ring(ring, el);
if (ret) {
@@ -973,44 +997,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
static void mhi_ep_reset_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state cur_state;
- int ret;
- mhi_ep_abort_transfer(mhi_cntrl);
+ mhi_ep_power_down(mhi_cntrl);
+
+ mutex_lock(&mhi_cntrl->state_lock);
- spin_lock_bh(&mhi_cntrl->state_lock);
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
mhi_ep_mmio_reset(mhi_cntrl);
cur_state = mhi_cntrl->mhi_state;
- spin_unlock_bh(&mhi_cntrl->state_lock);
/*
* Only proceed further if the reset is due to SYS_ERR. The host will
* issue reset during shutdown also and we don't need to do re-init in
* that case.
*/
- if (cur_state == MHI_STATE_SYS_ERR) {
- mhi_ep_mmio_init(mhi_cntrl);
-
- /* Set AMSS EE before signaling ready state */
- mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
-
- /* All set, notify the host that we are ready */
- ret = mhi_ep_set_ready_state(mhi_cntrl);
- if (ret)
- return;
-
- dev_dbg(dev, "READY state notification sent to the host\n");
-
- ret = mhi_ep_enable(mhi_cntrl);
- if (ret) {
- dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
- return;
- }
+ if (cur_state == MHI_STATE_SYS_ERR)
+ mhi_ep_power_up(mhi_cntrl);
- enable_irq(mhi_cntrl->irq);
- }
+ mutex_unlock(&mhi_cntrl->state_lock);
}
/*
@@ -1089,11 +1094,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
{
- if (mhi_cntrl->enabled)
+ if (mhi_cntrl->enabled) {
mhi_ep_abort_transfer(mhi_cntrl);
-
- kfree(mhi_cntrl->mhi_event);
- disable_irq(mhi_cntrl->irq);
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+ }
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
@@ -1119,6 +1124,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
/* Set channel state to SUSPENDED */
+ mhi_chan->state = MHI_CH_STATE_SUSPENDED;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
@@ -1148,6 +1154,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
/* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
@@ -1381,8 +1388,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
- spin_lock_init(&mhi_cntrl->state_lock);
spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->state_lock);
mutex_init(&mhi_cntrl->event_lock);
/* Set MHI version and AMSS EE before enumeration */
@@ -1543,9 +1550,9 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
}
EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
-static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
mhi_dev->name);
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
index 3655c19e23c7..fd200b2ac0bb 100644
--- a/drivers/bus/mhi/ep/sm.c
+++ b/drivers/bus/mhi/ep/sm.c
@@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
int ret;
/* If MHI is in M3, resume suspended channels */
- spin_lock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+
old_state = mhi_cntrl->mhi_state;
if (old_state == MHI_STATE_M3)
mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
- spin_unlock_bh(&mhi_cntrl->state_lock);
-
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
- return ret;
+ goto err_unlock;
}
/* Signal host that the device moved to M0 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
if (ret) {
dev_err(dev, "Failed sending M0 state change event\n");
- return ret;
+ goto err_unlock;
}
if (old_state == MHI_STATE_READY) {
@@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
if (ret) {
dev_err(dev, "Failed sending AMSS EE event\n");
- return ret;
+ goto err_unlock;
}
}
- return 0;
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
}
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
@@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
- spin_lock_bh(&mhi_cntrl->state_lock);
- ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
- spin_unlock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
- return ret;
+ goto err_unlock;
}
mhi_ep_suspend_channels(mhi_cntrl);
@@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
if (ret) {
dev_err(dev, "Failed sending M3 state change event\n");
- return ret;
+ goto err_unlock;
}
- return 0;
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
}
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
@@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
enum mhi_state mhi_state;
int ret, is_ready;
- spin_lock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+
/* Ensure that the MHISTATUS is set to RESET by host */
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
if (mhi_state != MHI_STATE_RESET || is_ready) {
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
- spin_unlock_bh(&mhi_cntrl->state_lock);
- return -EIO;
+ ret = -EIO;
+ goto err_unlock;
}
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
- spin_unlock_bh(&mhi_cntrl->state_lock);
-
if (ret)
mhi_ep_handle_syserr(mhi_cntrl);
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
return ret;
}
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index 1c69feee1703..d2a19b07ccb8 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -391,6 +391,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
@@ -508,14 +509,18 @@ error_ready_state:
}
error_fw_load:
- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
- wake_up_all(&mhi_cntrl->state_event);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
}
int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
{
struct image_info *image_info = mhi_cntrl->fbc_image;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
int ret;
if (!image_info)
@@ -526,8 +531,11 @@ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
&image_info->mhi_buf[image_info->entries - 1]);
if (ret) {
dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
- wake_up_all(&mhi_cntrl->state_event);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
}
return ret;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index bf672de35131..f72fcb66f408 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -516,6 +516,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return -EIO;
}
+ if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
+ dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
+ return -ERANGE;
+ }
+
/* Setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
mhi_cntrl->wake_set = false;
@@ -532,6 +538,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return -EIO;
}
+ if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
+ dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
+ return -ERANGE;
+ }
+
/* Setup event db address for each ev_ring */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
@@ -1100,7 +1112,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
if (bhi_off >= mhi_cntrl->reg_len) {
dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
bhi_off, mhi_cntrl->reg_len);
- ret = -EINVAL;
+ ret = -ERANGE;
goto error_reg_offset;
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
@@ -1117,7 +1129,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
dev_err(dev,
"BHIe offset: 0x%x is out of range: 0x%zx\n",
bhie_off, mhi_cntrl->reg_len);
- ret = -EINVAL;
+ ret = -ERANGE;
goto error_reg_offset;
}
mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
@@ -1395,9 +1407,9 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv)
}
EXPORT_SYMBOL_GPL(mhi_driver_unregister);
-static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mhi_device *mhi_dev = to_mhi_device(dev);
+ const struct mhi_device *mhi_dev = to_mhi_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
mhi_dev->name);
@@ -1449,4 +1461,4 @@ postcore_initcall(mhi_init);
module_exit(mhi_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MHI Host Interface");
+MODULE_DESCRIPTION("Modem Host Interface");
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index df0fbfee7b78..74a75439c713 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -503,7 +503,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
+ if (pm_state != MHI_PM_SYS_ERR_DETECT)
goto exit_intvec;
switch (ee) {
@@ -961,7 +961,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
}
read_lock_bh(&mhi_cntrl->pm_lock);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+
+ /* Ring EV DB only if there is any pending element to process */
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
@@ -1031,7 +1033,9 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
count++;
}
read_lock_bh(&mhi_cntrl->pm_lock);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+
+ /* Ring EV DB only if there is any pending element to process */
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
@@ -1679,18 +1683,3 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
}
}
EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
-
-int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
-{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
- struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
- int ret;
-
- spin_lock_bh(&mhi_event->lock);
- ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
- spin_unlock_bh(&mhi_event->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(mhi_poll);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index f39657f71483..db0a0b062d8e 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -8,7 +8,6 @@
* Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
*/
-#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mhi.h>
@@ -344,8 +343,6 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
- MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
- MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -366,6 +363,15 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
+static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
+ .name = "foxconn-sdx24",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.name = "foxconn-sdx55",
.fw = "qcom/sdx55m/sbl1.mbn",
@@ -590,6 +596,15 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W373 (sdx62) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W510 (sdx24), variant 1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* T99W510 (sdx24), variant 2 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* T99W510 (sdx24), variant 3 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -903,11 +918,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_pdev->pci_state = pci_store_saved_state(pdev);
pci_load_saved_state(pdev, NULL);
- pci_enable_pcie_error_reporting(pdev);
-
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
if (err)
- goto err_disable_reporting;
+ return err;
/* MHI bus does not power up the controller by default */
err = mhi_prepare_for_power_up(mhi_cntrl);
@@ -941,8 +954,6 @@ err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_unregister:
mhi_unregister_controller(mhi_cntrl);
-err_disable_reporting:
- pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -965,7 +976,6 @@ static void mhi_pci_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
mhi_unregister_controller(mhi_cntrl);
- pci_disable_pcie_error_reporting(pdev);
}
static void mhi_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index fca0d0669aa9..554e1992edd4 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -67,9 +67,9 @@ static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
-static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
int retval = 0;
retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index d51573ac525e..00cb792bda18 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1179,74 +1179,32 @@ static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
return 0;
}
-static int __init
-mbus_parse_ranges(struct device_node *node,
- int *addr_cells, int *c_addr_cells, int *c_size_cells,
- int *cell_count, const __be32 **ranges_start,
- const __be32 **ranges_end)
-{
- const __be32 *prop;
- int ranges_len, tuple_len;
-
- /* Allow a node with no 'ranges' property */
- *ranges_start = of_get_property(node, "ranges", &ranges_len);
- if (*ranges_start == NULL) {
- *addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0;
- *ranges_start = *ranges_end = NULL;
- return 0;
- }
- *ranges_end = *ranges_start + ranges_len / sizeof(__be32);
-
- *addr_cells = of_n_addr_cells(node);
-
- prop = of_get_property(node, "#address-cells", NULL);
- *c_addr_cells = be32_to_cpup(prop);
-
- prop = of_get_property(node, "#size-cells", NULL);
- *c_size_cells = be32_to_cpup(prop);
-
- *cell_count = *addr_cells + *c_addr_cells + *c_size_cells;
- tuple_len = (*cell_count) * sizeof(__be32);
-
- if (ranges_len % tuple_len) {
- pr_warn("malformed ranges entry '%pOFn'\n", node);
- return -EINVAL;
- }
- return 0;
-}
-
static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
struct device_node *np)
{
- int addr_cells, c_addr_cells, c_size_cells;
- int i, ret, cell_count;
- const __be32 *r, *ranges_start, *ranges_end;
+ int ret;
+ struct of_range_parser parser;
+ struct of_range range;
- ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
- &c_size_cells, &cell_count,
- &ranges_start, &ranges_end);
+ ret = of_range_parser_init(&parser, np);
if (ret < 0)
- return ret;
+ return 0;
- for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
- u32 windowid, base, size;
+ for_each_of_range(&parser, &range) {
+ u32 windowid = upper_32_bits(range.bus_addr);
u8 target, attr;
/*
* An entry with a non-zero custom field do not
* correspond to a static window, so skip it.
*/
- windowid = of_read_number(r, 1);
if (CUSTOM(windowid))
continue;
target = TARGET(windowid);
attr = ATTR(windowid);
- base = of_read_number(r + c_addr_cells, addr_cells);
- size = of_read_number(r + c_addr_cells + addr_cells,
- c_size_cells);
- ret = mbus_dt_setup_win(mbus, base, size, target, attr);
+ ret = mbus_dt_setup_win(mbus, range.cpu_addr, range.size, target, attr);
if (ret < 0)
return ret;
}
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index 663c82749222..c1fef1b4bd89 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -403,4 +403,3 @@ static struct platform_driver qcom_ebi2_driver = {
module_platform_driver(qcom_ebi2_driver);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Qualcomm EBI2 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index eedeb29a5ff3..3fef18a43c01 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -386,4 +386,3 @@ module_platform_driver(qcom_ssc_block_bus_driver);
MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 6b8d6257ed8a..4da77ca7b75a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Simple Power-Managed Bus Driver
*
@@ -8,17 +9,24 @@
* for more details.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+struct simple_pm_bus {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
static int simple_pm_bus_probe(struct platform_device *pdev)
{
const struct device *dev = &pdev->dev;
const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
const struct of_device_id *match;
+ struct simple_pm_bus *bus;
/*
* Allow user to use driver_override to bind this driver to a
@@ -44,6 +52,16 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
return -ENODEV;
}
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->num_clks = devm_clk_bulk_get_all(&pdev->dev, &bus->clks);
+ if (bus->num_clks < 0)
+ return dev_err_probe(&pdev->dev, bus->num_clks, "failed to get clocks\n");
+
+ dev_set_drvdata(&pdev->dev, bus);
+
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
@@ -67,6 +85,34 @@ static int simple_pm_bus_remove(struct platform_device *pdev)
return 0;
}
+static int simple_pm_bus_runtime_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(bus->num_clks, bus->clks);
+
+ return 0;
+}
+
+static int simple_pm_bus_runtime_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bus->num_clks, bus->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops simple_pm_bus_pm_ops = {
+ RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
static const struct of_device_id simple_pm_bus_of_match[] = {
@@ -85,6 +131,7 @@ static struct platform_driver simple_pm_bus_driver = {
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
+ .pm = pm_ptr(&simple_pm_bus_pm_ops),
},
};
@@ -92,4 +139,3 @@ module_platform_driver(simple_pm_bus_driver);
MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 3aa91aed3bf7..696c0aefb0ca 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -172,12 +172,17 @@ static void sunxi_rsb_device_remove(struct device *dev)
drv->remove(to_sunxi_rsb_device(dev));
}
+static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
static struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
- .uevent = of_device_uevent_modalias,
+ .uevent = sunxi_rsb_device_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
@@ -857,7 +862,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
- return platform_driver_register(&sunxi_rsb_driver);
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
}
module_init(sunxi_rsb_init);
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index 662266719682..e3506ef37051 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -9,7 +9,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 6afae9897843..6c49de37d5e9 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -648,43 +648,20 @@ static int sysc_init_resets(struct sysc *ddata)
static int sysc_parse_and_check_child_range(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
- const __be32 *ranges;
- u32 nr_addr, nr_size;
- int len, error;
-
- ranges = of_get_property(np, "ranges", &len);
- if (!ranges) {
- dev_err(ddata->dev, "missing ranges for %pOF\n", np);
-
- return -ENOENT;
- }
-
- len /= sizeof(*ranges);
-
- if (len < 3) {
- dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
-
- return -EINVAL;
- }
-
- error = of_property_read_u32(np, "#address-cells", &nr_addr);
- if (error)
- return -ENOENT;
+ struct of_range_parser parser;
+ struct of_range range;
+ int error;
- error = of_property_read_u32(np, "#size-cells", &nr_size);
+ error = of_range_parser_init(&parser, np);
if (error)
- return -ENOENT;
-
- if (nr_addr != 1 || nr_size != 1) {
- dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
+ return error;
- return -EINVAL;
+ for_each_of_range(&parser, &range) {
+ ddata->module_pa = range.cpu_addr;
+ ddata->module_size = range.size;
+ break;
}
- ranges++;
- ddata->module_pa = of_translate_address(np, ranges++);
- ddata->module_size = be32_to_cpup(ranges);
-
return 0;
}
@@ -913,7 +890,7 @@ static int sysc_check_registers(struct sysc *ddata)
* within the interconnect target module range. For example, SGX has
* them at offset 0x1fc00 in the 32MB module address space. And cpsw
* has them at offset 0x1200 in the CPSW_WR child. Usually the
- * the interconnect target module registers are at the beginning of
+ * interconnect target module registers are at the beginning of
* the module range though.
*/
static int sysc_ioremap(struct sysc *ddata)
@@ -964,7 +941,7 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
sysc_check_children(ddata);
- if (!of_get_property(np, "reg", NULL))
+ if (!of_property_present(np, "reg"))
return 0;
error = sysc_parse_registers(ddata);
@@ -2530,11 +2507,9 @@ static struct dev_pm_domain sysc_child_pm_domain = {
static void sysc_reinit_modules(struct sysc_soc_info *soc)
{
struct sysc_module *module;
- struct list_head *pos;
struct sysc *ddata;
- list_for_each(pos, &sysc_soc->restored_modules) {
- module = list_entry(pos, struct sysc_module, node);
+ list_for_each_entry(module, &sysc_soc->restored_modules, node) {
ddata = module->ddata;
sysc_reinit_module(ddata, ddata->enabled);
}
@@ -3214,12 +3189,10 @@ static void sysc_cleanup_static_data(void)
static int sysc_check_disabled_devices(struct sysc *ddata)
{
struct sysc_address *disabled_module;
- struct list_head *pos;
int error = 0;
mutex_lock(&sysc_soc->list_lock);
- list_for_each(pos, &sysc_soc->disabled_modules) {
- disabled_module = list_entry(pos, struct sysc_address, node);
+ list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
if (ddata->module_pa == disabled_module->base) {
dev_dbg(ddata->dev, "module disabled for this SoC\n");
error = -ENODEV;
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index f70dedace20b..cb5c89ce7b86 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -176,10 +176,9 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_system_bus_priv *priv;
- const __be32 *ranges;
- u32 cells, addr, size;
- u64 paddr;
- int pna, bank, rlen, rone, ret;
+ struct of_range_parser parser;
+ struct of_range range;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -191,48 +190,17 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
priv->dev = dev;
- pna = of_n_addr_cells(dev->of_node);
-
- ret = of_property_read_u32(dev->of_node, "#address-cells", &cells);
- if (ret) {
- dev_err(dev, "failed to get #address-cells\n");
- return ret;
- }
- if (cells != 2) {
- dev_err(dev, "#address-cells must be 2\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32(dev->of_node, "#size-cells", &cells);
- if (ret) {
- dev_err(dev, "failed to get #size-cells\n");
+ ret = of_range_parser_init(&parser, dev->of_node);
+ if (ret)
return ret;
- }
- if (cells != 1) {
- dev_err(dev, "#size-cells must be 1\n");
- return -EINVAL;
- }
- ranges = of_get_property(dev->of_node, "ranges", &rlen);
- if (!ranges) {
- dev_err(dev, "failed to get ranges property\n");
- return -ENOENT;
- }
-
- rlen /= sizeof(*ranges);
- rone = pna + 2;
-
- for (; rlen >= rone; rlen -= rone) {
- bank = be32_to_cpup(ranges++);
- addr = be32_to_cpup(ranges++);
- paddr = of_translate_address(dev->of_node, ranges);
- if (paddr == OF_BAD_ADDR)
+ for_each_of_range(&parser, &range) {
+ if (range.cpu_addr == OF_BAD_ADDR)
return -EINVAL;
- ranges += pna;
- size = be32_to_cpup(ranges++);
-
- ret = uniphier_system_bus_add_bank(priv, bank, addr,
- paddr, size);
+ ret = uniphier_system_bus_add_bank(priv,
+ upper_32_bits(range.bus_addr),
+ lower_32_bits(range.bus_addr),
+ range.cpu_addr, range.size);
if (ret)
return ret;
}
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index a58ac0c8e282..472a570bd53a 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/vexpress.h>
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
new file mode 100644
index 000000000000..a08958485e31
--- /dev/null
+++ b/drivers/cdx/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# CDX bus configuration
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+config CDX_BUS
+ bool "CDX Bus driver"
+ depends on OF && ARM64
+ help
+ Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
+ exposes Fabric devices which uses composable DMA IP to the
+ APU. CDX bus provides a mechanism for scanning and probing
+ of CDX devices. CDX devices are memory mapped on system bus
+ for embedded CPUs. CDX bus uses CDX controller and firmware
+ to scan these CDX devices.
+
+source "drivers/cdx/controller/Kconfig"
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
new file mode 100644
index 000000000000..0324e4914f6e
--- /dev/null
+++ b/drivers/cdx/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for CDX
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+obj-$(CONFIG_CDX_BUS) += cdx.o controller/
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
new file mode 100644
index 000000000000..38511fd36325
--- /dev/null
+++ b/drivers/cdx/cdx.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CDX bus driver.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * Architecture Overview
+ * =====================
+ * CDX is a Hardware Architecture designed for AMD FPGA devices. It
+ * consists of sophisticated mechanism for interaction between FPGA,
+ * Firmware and the APUs (Application CPUs).
+ *
+ * Firmware resides on RPU (Realtime CPUs) which interacts with
+ * the FPGA program manager and the APUs. The RPU provides memory-mapped
+ * interface (RPU if) which is used to communicate with APUs.
+ *
+ * The diagram below shows an overview of the CDX architecture:
+ *
+ * +--------------------------------------+
+ * | Application CPUs (APU) |
+ * | |
+ * | CDX device drivers|
+ * | Linux OS | |
+ * | CDX bus |
+ * | | |
+ * | CDX controller |
+ * | | |
+ * +-----------------------------|--------+
+ * | (discover, config,
+ * | reset, rescan)
+ * |
+ * +------------------------| RPU if |----+
+ * | | |
+ * | V |
+ * | Realtime CPUs (RPU) |
+ * | |
+ * +--------------------------------------+
+ * |
+ * +---------------------|----------------+
+ * | FPGA | |
+ * | +-----------------------+ |
+ * | | | | |
+ * | +-------+ +-------+ +-------+ |
+ * | | dev 1 | | dev 2 | | dev 3 | |
+ * | +-------+ +-------+ +-------+ |
+ * +--------------------------------------+
+ *
+ * The RPU firmware extracts the device information from the loaded FPGA
+ * image and implements a mechanism that allows the APU drivers to
+ * enumerate such devices (device personality and resource details) via
+ * a dedicated communication channel. RPU mediates operations such as
+ * discover, reset and rescan of the FPGA devices for the APU. This is
+ * done using memory mapped interface provided by the RPU to APU.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/xarray.h>
+#include <linux/cdx/cdx_bus.h>
+#include "cdx.h"
+
+/* Default DMA mask for devices on a CDX bus */
+#define CDX_DEFAULT_DMA_MASK (~0ULL)
+#define MAX_CDX_CONTROLLERS 16
+
+/* CDX controllers registered with the CDX bus */
+static DEFINE_XARRAY_ALLOC(cdx_controllers);
+
+/**
+ * cdx_dev_reset - Reset a CDX device
+ * @dev: CDX device
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_dev_reset(struct device *dev)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config = {0};
+ struct cdx_driver *cdx_drv;
+ int ret;
+
+ cdx_drv = to_cdx_driver(dev->driver);
+ /* Notify driver that device is being reset */
+ if (cdx_drv && cdx_drv->reset_prepare)
+ cdx_drv->reset_prepare(cdx_dev);
+
+ dev_config.type = CDX_DEV_RESET_CONF;
+ ret = cdx->ops->dev_configure(cdx, cdx_dev->bus_num,
+ cdx_dev->dev_num, &dev_config);
+ if (ret)
+ dev_err(dev, "cdx device reset failed\n");
+
+ /* Notify driver that device reset is complete */
+ if (cdx_drv && cdx_drv->reset_done)
+ cdx_drv->reset_done(cdx_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdx_dev_reset);
+
+/**
+ * cdx_unregister_device - Unregister a CDX device
+ * @dev: CDX device
+ * @data: This is always passed as NULL, and is not used in this API,
+ * but is required here as the bus_for_each_dev() API expects
+ * the passed function (cdx_unregister_device) to have this
+ * as an argument.
+ *
+ * Return: 0 on success.
+ */
+static int cdx_unregister_device(struct device *dev,
+ void *data)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ kfree(cdx_dev->driver_override);
+ cdx_dev->driver_override = NULL;
+ /*
+ * Do not free cdx_dev here as it would be freed in
+ * cdx_device_release() called from within put_device().
+ */
+ device_del(&cdx_dev->dev);
+ put_device(&cdx_dev->dev);
+
+ return 0;
+}
+
+static void cdx_unregister_devices(struct bus_type *bus)
+{
+ /* Reset all the devices attached to cdx bus */
+ bus_for_each_dev(bus, NULL, NULL, cdx_unregister_device);
+}
+
+/**
+ * cdx_match_one_device - Tell if a CDX device structure has a matching
+ * CDX device id structure
+ * @id: single CDX device id structure to match
+ * @dev: the CDX device structure to match against
+ *
+ * Return: matching cdx_device_id structure or NULL if there is no match.
+ */
+static inline const struct cdx_device_id *
+cdx_match_one_device(const struct cdx_device_id *id,
+ const struct cdx_device *dev)
+{
+ /* Use vendor ID and device ID for matching */
+ if ((id->vendor == CDX_ANY_ID || id->vendor == dev->vendor) &&
+ (id->device == CDX_ANY_ID || id->device == dev->device))
+ return id;
+ return NULL;
+}
+
+/**
+ * cdx_match_id - See if a CDX device matches a given cdx_id table
+ * @ids: array of CDX device ID structures to search in
+ * @dev: the CDX device structure to match against.
+ *
+ * Used by a driver to check whether a CDX device is in its list of
+ * supported devices. Returns the matching cdx_device_id structure or
+ * NULL if there is no match.
+ *
+ * Return: matching cdx_device_id structure or NULL if there is no match.
+ */
+static inline const struct cdx_device_id *
+cdx_match_id(const struct cdx_device_id *ids, struct cdx_device *dev)
+{
+ if (ids) {
+ while (ids->vendor || ids->device) {
+ if (cdx_match_one_device(ids, dev))
+ return ids;
+ ids++;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * cdx_bus_match - device to driver matching callback
+ * @dev: the cdx device to match against
+ * @drv: the device driver to search for matching cdx device
+ * structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static int cdx_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_driver *cdx_drv = to_cdx_driver(drv);
+ const struct cdx_device_id *found_id = NULL;
+ const struct cdx_device_id *ids;
+
+ ids = cdx_drv->match_id_table;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (cdx_dev->driver_override && strcmp(cdx_dev->driver_override, drv->name))
+ return false;
+
+ found_id = cdx_match_id(ids, cdx_dev);
+ if (!found_id)
+ return false;
+
+ do {
+ /*
+ * In case override_only was set, enforce driver_override
+ * matching.
+ */
+ if (!found_id->override_only)
+ return true;
+ if (cdx_dev->driver_override)
+ return true;
+
+ ids = found_id + 1;
+ found_id = cdx_match_id(ids, cdx_dev);
+ } while (found_id);
+
+ return false;
+}
+
+static int cdx_probe(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ int error;
+
+ error = cdx_drv->probe(cdx_dev);
+ if (error) {
+ dev_err_probe(dev, error, "%s failed\n", __func__);
+ return error;
+ }
+
+ return 0;
+}
+
+static void cdx_remove(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ if (cdx_drv && cdx_drv->remove)
+ cdx_drv->remove(cdx_dev);
+}
+
+static void cdx_shutdown(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ if (cdx_drv && cdx_drv->shutdown)
+ cdx_drv->shutdown(cdx_dev);
+}
+
+static int cdx_dma_configure(struct device *dev)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ u32 input_id = cdx_dev->req_id;
+ int ret;
+
+ ret = of_dma_configure_id(dev, dev->parent->of_node, 0, &input_id);
+ if (ret && ret != -EPROBE_DEFER) {
+ dev_err(dev, "of_dma_configure_id() failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* show configuration fields */
+#define cdx_config_attr(field, format_string) \
+static ssize_t \
+field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct cdx_device *cdx_dev = to_cdx_device(dev); \
+ return sysfs_emit(buf, format_string, cdx_dev->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+cdx_config_attr(vendor, "0x%04x\n");
+cdx_config_attr(device, "0x%04x\n");
+
+static ssize_t remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ if (device_remove_file_self(dev, attr)) {
+ int ret;
+
+ ret = cdx_unregister_device(dev, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(remove);
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = cdx_dev_reset(dev);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ int ret;
+
+ if (WARN_ON(dev->bus != &cdx_bus_type))
+ return -EINVAL;
+
+ ret = driver_set_override(dev, &cdx_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *cdx_dev_attrs[] = {
+ &dev_attr_remove.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cdx_dev);
+
+static ssize_t rescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct cdx_controller *cdx;
+ unsigned long index;
+ bool val;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ /* Unregister all the devices on the bus */
+ cdx_unregister_devices(&cdx_bus_type);
+
+ /* Rescan all the devices */
+ xa_for_each(&cdx_controllers, index, cdx) {
+ int ret;
+
+ ret = cdx->ops->scan(cdx);
+ if (ret)
+ dev_err(cdx->dev, "cdx bus scanning failed\n");
+ }
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static struct attribute *cdx_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cdx_bus);
+
+struct bus_type cdx_bus_type = {
+ .name = "cdx",
+ .match = cdx_bus_match,
+ .probe = cdx_probe,
+ .remove = cdx_remove,
+ .shutdown = cdx_shutdown,
+ .dma_configure = cdx_dma_configure,
+ .bus_groups = cdx_bus_groups,
+ .dev_groups = cdx_dev_groups,
+};
+EXPORT_SYMBOL_GPL(cdx_bus_type);
+
+int __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner)
+{
+ int error;
+
+ cdx_driver->driver.owner = owner;
+ cdx_driver->driver.bus = &cdx_bus_type;
+
+ error = driver_register(&cdx_driver->driver);
+ if (error) {
+ pr_err("driver_register() failed for %s: %d\n",
+ cdx_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cdx_driver_register);
+
+void cdx_driver_unregister(struct cdx_driver *cdx_driver)
+{
+ driver_unregister(&cdx_driver->driver);
+}
+EXPORT_SYMBOL_GPL(cdx_driver_unregister);
+
+static void cdx_device_release(struct device *dev)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ kfree(cdx_dev);
+}
+
+int cdx_device_add(struct cdx_dev_params *dev_params)
+{
+ struct cdx_controller *cdx = dev_params->cdx;
+ struct device *parent = cdx->dev;
+ struct cdx_device *cdx_dev;
+ int ret;
+
+ cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
+ if (!cdx_dev)
+ return -ENOMEM;
+
+ /* Populate resource */
+ memcpy(cdx_dev->res, dev_params->res, sizeof(struct resource) *
+ dev_params->res_count);
+ cdx_dev->res_count = dev_params->res_count;
+
+ /* Populate CDX dev params */
+ cdx_dev->req_id = dev_params->req_id;
+ cdx_dev->vendor = dev_params->vendor;
+ cdx_dev->device = dev_params->device;
+ cdx_dev->bus_num = dev_params->bus_num;
+ cdx_dev->dev_num = dev_params->dev_num;
+ cdx_dev->cdx = dev_params->cdx;
+ cdx_dev->dma_mask = CDX_DEFAULT_DMA_MASK;
+
+ /* Initialize generic device */
+ device_initialize(&cdx_dev->dev);
+ cdx_dev->dev.parent = parent;
+ cdx_dev->dev.bus = &cdx_bus_type;
+ cdx_dev->dev.dma_mask = &cdx_dev->dma_mask;
+ cdx_dev->dev.release = cdx_device_release;
+
+ /* Set Name */
+ dev_set_name(&cdx_dev->dev, "cdx-%02x:%02x",
+ ((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
+ cdx_dev->dev_num);
+
+ ret = device_add(&cdx_dev->dev);
+ if (ret) {
+ dev_err(&cdx_dev->dev,
+ "cdx device add failed: %d", ret);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ /*
+ * Do not free cdx_dev here as it would be freed in
+ * cdx_device_release() called from put_device().
+ */
+ put_device(&cdx_dev->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdx_device_add);
+
+int cdx_register_controller(struct cdx_controller *cdx)
+{
+ int ret;
+
+ ret = xa_alloc(&cdx_controllers, &cdx->id, cdx,
+ XA_LIMIT(0, MAX_CDX_CONTROLLERS - 1), GFP_KERNEL);
+ if (ret) {
+ dev_err(cdx->dev,
+ "No free index available. Maximum controllers already registered\n");
+ cdx->id = (u8)MAX_CDX_CONTROLLERS;
+ return ret;
+ }
+
+ /* Scan all the devices */
+ cdx->ops->scan(cdx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdx_register_controller);
+
+void cdx_unregister_controller(struct cdx_controller *cdx)
+{
+ if (cdx->id >= MAX_CDX_CONTROLLERS)
+ return;
+
+ device_for_each_child(cdx->dev, NULL, cdx_unregister_device);
+ xa_erase(&cdx_controllers, cdx->id);
+}
+EXPORT_SYMBOL_GPL(cdx_unregister_controller);
+
+static int __init cdx_bus_init(void)
+{
+ return bus_register(&cdx_bus_type);
+}
+postcore_initcall(cdx_bus_init);
diff --git a/drivers/cdx/cdx.h b/drivers/cdx/cdx.h
new file mode 100644
index 000000000000..c436ac7ac86f
--- /dev/null
+++ b/drivers/cdx/cdx.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for the CDX Bus
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _CDX_H_
+#define _CDX_H_
+
+#include <linux/cdx/cdx_bus.h>
+
+/**
+ * struct cdx_dev_params - CDX device parameters
+ * @cdx: CDX controller associated with the device
+ * @parent: Associated CDX controller
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_count: number of valid MMIO regions
+ * @req_id: Requestor ID associated with CDX device
+ */
+struct cdx_dev_params {
+ struct cdx_controller *cdx;
+ u16 vendor;
+ u16 device;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ u8 res_count;
+ u32 req_id;
+};
+
+/**
+ * cdx_register_controller - Register a CDX controller and its ports
+ * on the CDX bus.
+ * @cdx: The CDX controller to register
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_register_controller(struct cdx_controller *cdx);
+
+/**
+ * cdx_unregister_controller - Unregister a CDX controller
+ * @cdx: The CDX controller to unregister
+ */
+void cdx_unregister_controller(struct cdx_controller *cdx);
+
+/**
+ * cdx_device_add - Add a CDX device. This function adds a CDX device
+ * on the CDX bus as per the device parameters provided
+ * by caller. It also creates and registers an associated
+ * Linux generic device.
+ * @dev_params: device parameters associated with the device to be created.
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_device_add(struct cdx_dev_params *dev_params);
+
+#endif /* _CDX_H_ */
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
new file mode 100644
index 000000000000..c3e3b9ff8dfe
--- /dev/null
+++ b/drivers/cdx/controller/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# CDX controller configuration
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+if CDX_BUS
+
+config CDX_CONTROLLER
+ tristate "CDX bus controller"
+ select REMOTEPROC
+ select RPMSG
+ help
+ CDX controller drives the CDX bus. It interacts with
+ firmware to get the hardware devices and registers with
+ the CDX bus. Say Y to enable the CDX hardware driver.
+
+ If unsure, say N.
+
+config MCDI_LOGGING
+ bool "MCDI Logging for the CDX controller"
+ depends on CDX_CONTROLLER
+ help
+ Enable MCDI Logging for
+ the CDX Controller for debug
+ purpose.
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/cdx/controller/Makefile b/drivers/cdx/controller/Makefile
new file mode 100644
index 000000000000..f071be411d96
--- /dev/null
+++ b/drivers/cdx/controller/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for CDX controller drivers
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+obj-$(CONFIG_CDX_CONTROLLER) += cdx-controller.o
+cdx-controller-objs := cdx_controller.o cdx_rpmsg.o mcdi.o mcdi_functions.o
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/drivers/cdx/controller/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
new file mode 100644
index 000000000000..dc52f95f8978
--- /dev/null
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CDX host controller driver for AMD versal-net platform.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/cdx/cdx_bus.h>
+
+#include "cdx_controller.h"
+#include "../cdx.h"
+#include "mcdi_functions.h"
+#include "mcdi.h"
+
+static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void cdx_mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ if (cdx_rpmsg_send(cdx, hdr, hdr_len, sdu, sdu_len))
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data\n");
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = cdx_mcdi_rpc_timeout,
+ .mcdi_request = cdx_mcdi_request,
+};
+
+void cdx_rpmsg_post_probe(struct cdx_controller *cdx)
+{
+ /* Register CDX controller with CDX bus driver */
+ if (cdx_register_controller(cdx))
+ dev_err(cdx->dev, "Failed to register CDX controller\n");
+}
+
+void cdx_rpmsg_pre_remove(struct cdx_controller *cdx)
+{
+ cdx_unregister_controller(cdx);
+ cdx_mcdi_wait_for_quiescence(cdx->priv, MCDI_RPC_TIMEOUT);
+}
+
+static int cdx_configure_device(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config)
+{
+ int ret = 0;
+
+ switch (dev_config->type) {
+ case CDX_DEV_RESET_CONF:
+ ret = cdx_mcdi_reset_device(cdx->priv, bus_num, dev_num);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdx_scan_devices(struct cdx_controller *cdx)
+{
+ struct cdx_mcdi *cdx_mcdi = cdx->priv;
+ u8 bus_num, dev_num, num_cdx_bus;
+ int ret;
+
+ /* MCDI FW Read: Fetch the number of CDX buses on this controller */
+ ret = cdx_mcdi_get_num_buses(cdx_mcdi);
+ if (ret < 0) {
+ dev_err(cdx->dev,
+ "Get number of CDX buses failed: %d\n", ret);
+ return ret;
+ }
+ num_cdx_bus = (u8)ret;
+
+ for (bus_num = 0; bus_num < num_cdx_bus; bus_num++) {
+ u8 num_cdx_dev;
+
+ /* MCDI FW Read: Fetch the number of devices present */
+ ret = cdx_mcdi_get_num_devs(cdx_mcdi, bus_num);
+ if (ret < 0) {
+ dev_err(cdx->dev,
+ "Get devices on CDX bus %d failed: %d\n", bus_num, ret);
+ continue;
+ }
+ num_cdx_dev = (u8)ret;
+
+ for (dev_num = 0; dev_num < num_cdx_dev; dev_num++) {
+ struct cdx_dev_params dev_params;
+
+ /* MCDI FW: Get the device config */
+ ret = cdx_mcdi_get_dev_config(cdx_mcdi, bus_num,
+ dev_num, &dev_params);
+ if (ret) {
+ dev_err(cdx->dev,
+ "CDX device config get failed for %d(bus):%d(dev), %d\n",
+ bus_num, dev_num, ret);
+ continue;
+ }
+ dev_params.cdx = cdx;
+
+ /* Add the device to the cdx bus */
+ ret = cdx_device_add(&dev_params);
+ if (ret) {
+ dev_err(cdx->dev, "registering cdx dev: %d failed: %d\n",
+ dev_num, ret);
+ continue;
+ }
+
+ dev_dbg(cdx->dev, "CDX dev: %d on cdx bus: %d created\n",
+ dev_num, bus_num);
+ }
+ }
+
+ return 0;
+}
+
+static struct cdx_ops cdx_ops = {
+ .scan = cdx_scan_devices,
+ .dev_configure = cdx_configure_device,
+};
+
+static int xlnx_cdx_probe(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx;
+ struct cdx_mcdi *cdx_mcdi;
+ int ret;
+
+ cdx_mcdi = kzalloc(sizeof(*cdx_mcdi), GFP_KERNEL);
+ if (!cdx_mcdi)
+ return -ENOMEM;
+
+ /* Store the MCDI ops */
+ cdx_mcdi->mcdi_ops = &mcdi_ops;
+ /* MCDI FW: Initialize the FW path */
+ ret = cdx_mcdi_init(cdx_mcdi);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "MCDI Initialization failed\n");
+ goto mcdi_init_fail;
+ }
+
+ cdx = kzalloc(sizeof(*cdx), GFP_KERNEL);
+ if (!cdx) {
+ ret = -ENOMEM;
+ goto cdx_alloc_fail;
+ }
+ platform_set_drvdata(pdev, cdx);
+
+ cdx->dev = &pdev->dev;
+ cdx->priv = cdx_mcdi;
+ cdx->ops = &cdx_ops;
+
+ ret = cdx_setup_rpmsg(pdev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to register CDX RPMsg transport\n");
+ goto cdx_rpmsg_fail;
+ }
+
+ dev_info(&pdev->dev, "Successfully registered CDX controller with RPMsg as transport\n");
+ return 0;
+
+cdx_rpmsg_fail:
+ kfree(cdx);
+cdx_alloc_fail:
+ cdx_mcdi_finish(cdx_mcdi);
+mcdi_init_fail:
+ kfree(cdx_mcdi);
+
+ return ret;
+}
+
+static int xlnx_cdx_remove(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx = platform_get_drvdata(pdev);
+ struct cdx_mcdi *cdx_mcdi = cdx->priv;
+
+ cdx_destroy_rpmsg(pdev);
+
+ kfree(cdx);
+
+ cdx_mcdi_finish(cdx_mcdi);
+ kfree(cdx_mcdi);
+
+ return 0;
+}
+
+static const struct of_device_id cdx_match_table[] = {
+ {.compatible = "xlnx,versal-net-cdx",},
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, cdx_match_table);
+
+static struct platform_driver cdx_pdriver = {
+ .driver = {
+ .name = "cdx-controller",
+ .pm = NULL,
+ .of_match_table = cdx_match_table,
+ },
+ .probe = xlnx_cdx_probe,
+ .remove = xlnx_cdx_remove,
+};
+
+static int __init cdx_controller_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cdx_pdriver);
+ if (ret)
+ pr_err("platform_driver_register() failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit cdx_controller_exit(void)
+{
+ platform_driver_unregister(&cdx_pdriver);
+}
+
+module_init(cdx_controller_init);
+module_exit(cdx_controller_exit);
+
+MODULE_AUTHOR("AMD Inc.");
+MODULE_DESCRIPTION("CDX controller for AMD devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cdx/controller/cdx_controller.h b/drivers/cdx/controller/cdx_controller.h
new file mode 100644
index 000000000000..43b7c742df87
--- /dev/null
+++ b/drivers/cdx/controller/cdx_controller.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for the CDX Controller
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _CDX_CONTROLLER_H_
+#define _CDX_CONTROLLER_H_
+
+#include <linux/cdx/cdx_bus.h>
+#include "mcdi_functions.h"
+
+void cdx_rpmsg_post_probe(struct cdx_controller *cdx);
+
+void cdx_rpmsg_pre_remove(struct cdx_controller *cdx);
+
+int cdx_rpmsg_send(struct cdx_mcdi *cdx_mcdi,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+
+void cdx_rpmsg_read_resp(struct cdx_mcdi *cdx_mcdi,
+ struct cdx_dword *outbuf, size_t offset,
+ size_t outlen);
+
+int cdx_setup_rpmsg(struct platform_device *pdev);
+
+void cdx_destroy_rpmsg(struct platform_device *pdev);
+
+#endif /* _CDX_CONT_PRIV_H_ */
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
new file mode 100644
index 000000000000..f37e639d6ce3
--- /dev/null
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for CDX bus.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/rpmsg.h>
+#include <linux/remoteproc.h>
+#include <linux/of_platform.h>
+#include <linux/cdx/cdx_bus.h>
+#include <linux/module.h>
+
+#include "../cdx.h"
+#include "cdx_controller.h"
+#include "mcdi_functions.h"
+#include "mcdi.h"
+
+static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
+ { .name = "mcdi_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, cdx_rpmsg_id_table);
+
+int cdx_rpmsg_send(struct cdx_mcdi *cdx_mcdi,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ unsigned char *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return -ENOMEM;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx_mcdi->ept, send_buf, hdr_len + sdu_len);
+ kfree(send_buf);
+
+ return ret;
+}
+
+static int cdx_attach_to_rproc(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+ struct device *dev;
+ struct rproc *rp;
+ int ret;
+
+ dev = &pdev->dev;
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ r5_core_node = of_parse_phandle(dev->of_node, "xlnx,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "xlnx,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp) {
+ ret = -EPROBE_DEFER;
+ goto pdev_err;
+ }
+
+ /* Attach to remote processor */
+ ret = rproc_boot(rp);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ rproc_put(rp);
+ goto pdev_err;
+ }
+
+ cdx_mcdi->r5_rproc = rp;
+pdev_err:
+ of_node_put(r5_core_node);
+ return ret;
+}
+
+static void cdx_detach_to_r5(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ rproc_detach(cdx_mcdi->r5_rproc);
+ rproc_put(cdx_mcdi->r5_rproc);
+}
+
+static int cdx_rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct cdx_controller *cdx_c = dev_get_drvdata(&rpdev->dev);
+ struct cdx_mcdi *cdx_mcdi = cdx_c->priv;
+
+ if (len > MCDI_BUF_LEN)
+ return -EINVAL;
+
+ cdx_mcdi_process_cmd(cdx_mcdi, (struct cdx_dword *)data, len);
+
+ return 0;
+}
+
+static void cdx_rpmsg_post_probe_work(struct work_struct *work)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_mcdi = container_of(work, struct cdx_mcdi, work);
+ cdx_c = dev_get_drvdata(&cdx_mcdi->rpdev->dev);
+ cdx_rpmsg_post_probe(cdx_c);
+}
+
+static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo = {0};
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_c = (struct cdx_controller *)cdx_rpmsg_id_table[0].driver_data;
+ cdx_mcdi = cdx_c->priv;
+
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
+ strlen(cdx_rpmsg_id_table[0].name));
+
+ cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
+ if (!cdx_mcdi->ept) {
+ dev_err_probe(&rpdev->dev, -ENXIO,
+ "Failed to create ept for channel %s\n",
+ chinfo.name);
+ return -EINVAL;
+ }
+
+ cdx_mcdi->rpdev = rpdev;
+ dev_set_drvdata(&rpdev->dev, cdx_c);
+
+ schedule_work(&cdx_mcdi->work);
+ return 0;
+}
+
+static void cdx_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct cdx_controller *cdx_c = dev_get_drvdata(&rpdev->dev);
+ struct cdx_mcdi *cdx_mcdi = cdx_c->priv;
+
+ flush_work(&cdx_mcdi->work);
+ cdx_rpmsg_pre_remove(cdx_c);
+
+ rpmsg_destroy_ept(cdx_mcdi->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver cdx_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .id_table = cdx_rpmsg_id_table,
+ .probe = cdx_rpmsg_probe,
+ .remove = cdx_rpmsg_remove,
+ .callback = cdx_rpmsg_cb,
+};
+
+int cdx_setup_rpmsg(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+ int ret;
+
+ /* Attach to remote processor */
+ ret = cdx_attach_to_rproc(pdev);
+ if (ret)
+ return ret;
+
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ /* Register RPMsg driver */
+ cdx_rpmsg_id_table[0].driver_data = (kernel_ulong_t)cdx_c;
+
+ INIT_WORK(&cdx_mcdi->work, cdx_rpmsg_post_probe_work);
+ ret = register_rpmsg_driver(&cdx_rpmsg_driver);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register cdx RPMsg driver: %d\n", ret);
+ cdx_detach_to_r5(pdev);
+ }
+
+ return ret;
+}
+
+void cdx_destroy_rpmsg(struct platform_device *pdev)
+{
+ unregister_rpmsg_driver(&cdx_rpmsg_driver);
+
+ cdx_detach_to_r5(pdev);
+}
diff --git a/drivers/cdx/controller/mc_cdx_pcol.h b/drivers/cdx/controller/mc_cdx_pcol.h
new file mode 100644
index 000000000000..4ccb7b52951b
--- /dev/null
+++ b/drivers/cdx/controller/mc_cdx_pcol.h
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+
+/* The current version of the MCDI protocol. */
+#define MCDI_PCOL_VERSION 2
+
+/*
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
+ * back into shared memory, or by writing out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some responses may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request; a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+/*
+ * The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+
+/*
+ * the errno value may be followed by the (0-based) number of the
+ * first argument that could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* MC_CMD_ERR MCDI error codes. */
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 0x1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 0x2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 0x4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 0x5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 0x6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 0xb
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 0xc
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 0xd
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 0x10
+/* No such device */
+#define MC_CMD_ERR_ENODEV 0x13
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 0x16
+/* No space */
+#define MC_CMD_ERR_ENOSPC 0x1c
+/* Read-only */
+#define MC_CMD_ERR_EROFS 0x1e
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 0x20
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 0x22
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 0x23
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 0x26
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 0x3e
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 0x43
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 0x47
+/* Bad message */
+#define MC_CMD_ERR_EBADMSG 0x4a
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 0x5f
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 0x63
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 0x6b
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 0x72
+/* Stale handle. The handle references resource that no longer exists */
+#define MC_CMD_ERR_ESTALE 0x74
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/*
+ * The requested operation might require the command to be passed between
+ * MCs, and the transport doesn't support that. Should only ever been seen over
+ * the UART.
+ */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/*
+ * Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as
+ * sub-variant switching.
+ */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set doesn't exist */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/*
+ * Returned by MC_CMD_TESTASSERT if the action that should have caused an
+ * assertion failed to do so.
+ */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/*
+ * This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed.
+ */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/*
+ * The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport.
+ */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/*
+ * The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary
+ */
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/*
+ * The operation could not complete because some PIO buffers are
+ * allocated
+ */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_ENUM_BUSES
+ * CDX bus hosts devices (functions) that are implemented using the Composable
+ * DMA subsystem and directly mapped into the memory space of the FGPA PSX
+ * Application Processors (APUs). As such, they only apply to the PSX APU side,
+ * not the host (PCIe). Unlike PCIe, these devices have no native configuration
+ * space or enumeration mechanism, so this message set provides a minimal
+ * interface for discovery and management (bus reset, FLR, BME) of such
+ * devices. This command returns the number of CDX buses present in the system.
+ */
+#define MC_CMD_CDX_BUS_ENUM_BUSES 0x1
+#define MC_CMD_CDX_BUS_ENUM_BUSES_MSGSET 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_ENUM_BUSES_IN msgrequest */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_IN_LEN 0
+
+/* MC_CMD_CDX_BUS_ENUM_BUSES_OUT msgresponse */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN 4
+/*
+ * Number of CDX buses present in the system. Buses are numbered 0 to
+ * BUS_COUNT-1
+ */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_ENUM_DEVICES
+ * Enumerate CDX bus devices on a given bus
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES 0x2
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_MSGSET 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_ENUM_DEVICES_IN msgrequest */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_LEN 4
+/*
+ * Bus number to enumerate, in range 0 to BUS_COUNT-1, as returned by
+ * MC_CMD_CDX_BUS_ENUM_BUSES_OUT
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_BUS_LEN 4
+
+/* MC_CMD_CDX_BUS_ENUM_DEVICES_OUT msgresponse */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN 4
+/*
+ * Number of devices present on the bus. Devices on the bus are numbered 0 to
+ * DEVICE_COUNT-1. Returns EAGAIN if number of devices unknown or if the target
+ * devices are not ready (e.g. undergoing a bus reset)
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_GET_DEVICE_CONFIG
+ * Returns device identification and MMIO/MSI resource data for a CDX device.
+ * The expected usage is for the caller to first retrieve the number of devices
+ * on the bus using MC_CMD_BUS_ENUM_DEVICES, then loop through the range (0,
+ * DEVICE_COUNT - 1), retrieving device resource data. May return EAGAIN if the
+ * number of exposed devices or device resources change during enumeration (due
+ * to e.g. a PL reload / bus reset), in which case the caller is expected to
+ * restart the enumeration loop. MMIO addresses are specified in terms of bus
+ * addresses (prior to any potential IOMMU translation). For versal-net, these
+ * are equivalent to APU physical addresses. Implementation note - for this to
+ * work, the implementation needs to keep state (generation count) per client.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG 0x3
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_MSGSET 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN msgrequest */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE_LEN 4
+
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT msgresponse */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN 88
+/* 16-bit Vendor identifier, compliant with PCI-SIG VendorID assignment. */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID_OFST 0
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID_LEN 2
+/* 16-bit Device ID assigned by the vendor */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID_OFST 2
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID_LEN 2
+/*
+ * 16-bit Subsystem Vendor ID, , compliant with PCI-SIG VendorID assignment.
+ * For further device differentiation, as required. 0 if unused.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID_OFST 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID_LEN 2
+/*
+ * 16-bit Subsystem Device ID assigned by the vendor. For further device
+ * differentiation, as required. 0 if unused.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID_OFST 6
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID_LEN 2
+/* 24-bit Device Class code, compliant with PCI-SIG Device Class codes */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS_OFST 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS_LEN 3
+/* 8-bit vendor-assigned revision */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION_OFST 11
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION_LEN 1
+/* Reserved (alignment) */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_RESERVED_OFST 12
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_RESERVED_LEN 4
+/* MMIO region 0 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_OFST 16
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_OFST 16
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_LBN 128
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_OFST 20
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_LBN 160
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_WIDTH 32
+/* MMIO region 0 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_OFST 24
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_OFST 24
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_LBN 192
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_OFST 28
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_LBN 224
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_WIDTH 32
+/* MMIO region 1 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_OFST 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_OFST 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_LBN 256
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_OFST 36
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_LBN 288
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_WIDTH 32
+/* MMIO region 1 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_OFST 40
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_OFST 40
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_LBN 320
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_OFST 44
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_LBN 352
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_WIDTH 32
+/* MMIO region 2 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_OFST 48
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_OFST 48
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_LBN 384
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_OFST 52
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_LBN 416
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_WIDTH 32
+/* MMIO region 2 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_OFST 56
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_OFST 56
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_LBN 448
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_OFST 60
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_LBN 480
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_WIDTH 32
+/* MMIO region 3 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_OFST 64
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_OFST 64
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_LBN 512
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_OFST 68
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_LBN 544
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_WIDTH 32
+/* MMIO region 3 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_OFST 72
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_OFST 72
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_LBN 576
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_OFST 76
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_LBN 608
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_WIDTH 32
+/* MSI vector count */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT_OFST 80
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT_LEN 4
+/* Requester ID used by device (SMMU StreamID, GIC ITS DeviceID) */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_OFST 84
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_RESET
+ * After this call completes, device DMA and interrupts are quiesced, devices
+ * logic is reset in a hardware-specific way and DMA bus mastering is disabled.
+ */
+#define MC_CMD_CDX_DEVICE_RESET 0x6
+#define MC_CMD_CDX_DEVICE_RESET_MSGSET 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_RESET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_RESET_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_RESET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_RESET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_RESET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_RESET_IN_DEVICE_LEN 4
+
+/*
+ * MC_CMD_CDX_DEVICE_RESET_OUT msgresponse: The device is quiesced and all
+ * pending device initiated DMA has completed.
+ */
+#define MC_CMD_CDX_DEVICE_RESET_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_CONTROL_SET
+ * If BUS_MASTER is set to disabled, device DMA and interrupts are quiesced.
+ * Pending DMA requests and MSI interrupts are flushed and no further DMA or
+ * interrupts are issued after this command returns. If BUS_MASTER is set to
+ * enabled, device is allowed to initiate DMA. Whether interrupts are enabled
+ * also depends on the value of MSI_ENABLE bit. Note that, in this case, the
+ * device may start DMA before the host receives and processes the MCDI
+ * response. MSI_ENABLE masks or unmasks device interrupts only. Note that for
+ * interrupts to be delivered to the host, both BUS_MASTER and MSI_ENABLE needs
+ * to be set. MMIO_REGIONS_ENABLE enables or disables host accesses to device
+ * MMIO regions. Note that an implementation is allowed to permanently set this
+ * bit to 1, in which case MC_CMD_CDX_DEVICE_CONTROL_GET will always return 1
+ * for this bit, regardless of the value set here.
+ */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET 0x7
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_MSGSET 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_CONTROL_SET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_LEN 12
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_DEVICE_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_FLAGS_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_FLAGS_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_LBN 0
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_LBN 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_LBN 2
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_WIDTH 1
+
+/* MC_CMD_CDX_DEVICE_CONTROL_SET_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_CONTROL_GET
+ * Returns device DMA, interrupt and MMIO region access control bits. See
+ * MC_CMD_CDX_DEVICE_CONTROL_SET for definition of the available control bits.
+ */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET 0x8
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_MSGSET 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_CONTROL_GET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_DEVICE_LEN 4
+
+/* MC_CMD_CDX_DEVICE_CONTROL_GET_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_FLAGS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_FLAGS_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_LBN 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_LBN 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_LBN 2
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_WIDTH 1
+
+/***********************************/
+/* MC_CMD_V2_EXTN - Encapsulation for a v2 extended command */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/*
+ * enum: MCDI command directed to versal-net. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
new file mode 100644
index 000000000000..a211a2ca762e
--- /dev/null
+++ b/drivers/cdx/controller/mcdi.c
@@ -0,0 +1,903 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management-Controller-to-Driver Interface
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <net/netevent.h>
+#include <linux/log2.h>
+#include <linux/net_tstamp.h>
+#include <linux/wait.h>
+
+#include "bitfield.h"
+#include "mcdi.h"
+
+struct cdx_mcdi_copy_buffer {
+ struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)];
+};
+
+#ifdef CONFIG_MCDI_LOGGING
+#define LOG_LINE_MAX (1024 - 32)
+#endif
+
+static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
+static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
+static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd,
+ unsigned int *handle);
+static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ bool allow_retry);
+static void cdx_mcdi_cmd_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd);
+static bool cdx_mcdi_complete_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct cdx_dword *outbuf,
+ int len,
+ struct list_head *cleanup_list);
+static void cdx_mcdi_timeout_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list);
+static void cdx_mcdi_cmd_work(struct work_struct *context);
+static void cdx_mcdi_mode_fail(struct cdx_mcdi *cdx, struct list_head *cleanup_list);
+static void _cdx_mcdi_display_error(struct cdx_mcdi *cdx, unsigned int cmd,
+ size_t inlen, int raw, int arg, int err_no);
+
+static bool cdx_cmd_cancelled(struct cdx_mcdi_cmd *cmd)
+{
+ return cmd->state == MCDI_STATE_RUNNING_CANCELLED;
+}
+
+static void cdx_mcdi_cmd_release(struct kref *ref)
+{
+ kfree(container_of(ref, struct cdx_mcdi_cmd, ref));
+}
+
+static unsigned int cdx_mcdi_cmd_handle(struct cdx_mcdi_cmd *cmd)
+{
+ return cmd->handle;
+}
+
+static void _cdx_mcdi_remove_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ /* if cancelled, the completers have already been called */
+ if (cdx_cmd_cancelled(cmd))
+ return;
+
+ if (cmd->completer) {
+ list_add_tail(&cmd->cleanup_list, cleanup_list);
+ ++mcdi->outstanding_cleanups;
+ kref_get(&cmd->ref);
+ }
+}
+
+static void cdx_mcdi_remove_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ list_del(&cmd->list);
+ _cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ cmd->state = MCDI_STATE_FINISHED;
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ if (list_empty(&mcdi->cmd_list))
+ wake_up(&mcdi->cmd_complete_wq);
+}
+
+static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ if (!cdx->mcdi_ops->mcdi_rpc_timeout)
+ return MCDI_RPC_TIMEOUT;
+ else
+ return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
+}
+
+int cdx_mcdi_init(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi;
+ int rc = -ENOMEM;
+
+ cdx->mcdi = kzalloc(sizeof(*cdx->mcdi), GFP_KERNEL);
+ if (!cdx->mcdi)
+ goto fail;
+
+ mcdi = cdx_mcdi_if(cdx);
+ mcdi->cdx = cdx;
+
+#ifdef CONFIG_MCDI_LOGGING
+ mcdi->logging_buffer = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail2;
+#endif
+ mcdi->workqueue = alloc_ordered_workqueue("mcdi_wq", 0);
+ if (!mcdi->workqueue)
+ goto fail3;
+ mutex_init(&mcdi->iface_lock);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ INIT_LIST_HEAD(&mcdi->cmd_list);
+ init_waitqueue_head(&mcdi->cmd_complete_wq);
+
+ mcdi->new_epoch = true;
+
+ return 0;
+fail3:
+#ifdef CONFIG_MCDI_LOGGING
+ kfree(mcdi->logging_buffer);
+fail2:
+#endif
+ kfree(cdx->mcdi);
+ cdx->mcdi = NULL;
+fail:
+ return rc;
+}
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi;
+
+ mcdi = cdx_mcdi_if(cdx);
+ if (!mcdi)
+ return;
+
+ cdx_mcdi_wait_for_cleanup(cdx);
+
+#ifdef CONFIG_MCDI_LOGGING
+ kfree(mcdi->logging_buffer);
+#endif
+
+ destroy_workqueue(mcdi->workqueue);
+ kfree(cdx->mcdi);
+ cdx->mcdi = NULL;
+}
+
+static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
+{
+ bool flushed;
+
+ mutex_lock(&mcdi->iface_lock);
+ flushed = list_empty(&mcdi->cmd_list) &&
+ (ignore_cleanups || !mcdi->outstanding_cleanups);
+ mutex_unlock(&mcdi->iface_lock);
+ return flushed;
+}
+
+/* Wait for outstanding MCDI commands to complete. */
+static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+
+ if (!mcdi)
+ return;
+
+ wait_event(mcdi->cmd_complete_wq,
+ cdx_mcdi_flushed(mcdi, false));
+}
+
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int rc = 0;
+
+ if (!mcdi)
+ return -EINVAL;
+
+ flush_workqueue(mcdi->workqueue);
+
+ add_wait_queue(&mcdi->cmd_complete_wq, &wait);
+
+ while (!cdx_mcdi_flushed(mcdi, true)) {
+ rc = wait_woken(&wait, TASK_IDLE, timeout_jiffies);
+ if (rc)
+ continue;
+ break;
+ }
+
+ remove_wait_queue(&mcdi->cmd_complete_wq, &wait);
+
+ if (rc > 0)
+ rc = 0;
+ else if (rc == 0)
+ rc = -ETIMEDOUT;
+
+ return rc;
+}
+
+static u8 cdx_mcdi_payload_csum(const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ u8 *p = (u8 *)hdr;
+ u8 csum = 0;
+ int i;
+
+ for (i = 0; i < hdr_len; i++)
+ csum += p[i];
+
+ p = (u8 *)sdu;
+ for (i = 0; i < sdu_len; i++)
+ csum += p[i];
+
+ return ~csum & 0xff;
+}
+
+static void cdx_mcdi_send_request(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ const struct cdx_dword *inbuf = cmd->inbuf;
+ size_t inlen = cmd->inlen;
+ struct cdx_dword hdr[2];
+ size_t hdr_len;
+ bool not_epoch;
+ u32 xflags;
+#ifdef CONFIG_MCDI_LOGGING
+ char *buf;
+#endif
+
+ if (!mcdi)
+ return;
+#ifdef CONFIG_MCDI_LOGGING
+ buf = mcdi->logging_buffer; /* page-sized */
+#endif
+
+ mcdi->prev_seq = cmd->seq;
+ mcdi->seq_held_by[cmd->seq] = cmd;
+ mcdi->db_held_by = cmd;
+ cmd->started = jiffies;
+
+ not_epoch = !mcdi->new_epoch;
+ xflags = 0;
+
+ /* MCDI v2 */
+ WARN_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ CDX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, cmd->seq,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, not_epoch);
+ CDX_POPULATE_DWORD_3(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd->cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen,
+ MC_CMD_V2_EXTN_IN_MESSAGE_TYPE,
+ MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM);
+ hdr_len = 8;
+
+#ifdef CONFIG_MCDI_LOGGING
+ if (!WARN_ON_ONCE(!buf)) {
+ const struct cdx_dword *frags[] = { hdr, inbuf };
+ const size_t frag_len[] = { hdr_len, round_up(inlen, 4) };
+ int bytes = 0;
+ int i, j;
+
+ for (j = 0; j < ARRAY_SIZE(frags); j++) {
+ const struct cdx_dword *frag;
+
+ frag = frags[j];
+ for (i = 0;
+ i < frag_len[j] / 4;
+ i++) {
+ /*
+ * Do not exceed the internal printk limit.
+ * The string before that is just over 70 bytes.
+ */
+ if ((bytes + 75) > LOG_LINE_MAX) {
+ pr_info("MCDI RPC REQ:%s \\\n", buf);
+ bytes = 0;
+ }
+ bytes += snprintf(buf + bytes,
+ LOG_LINE_MAX - bytes, " %08x",
+ le32_to_cpu(frag[i].cdx_u32));
+ }
+ }
+
+ pr_info("MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+ hdr[0].cdx_u32 |= (__force __le32)(cdx_mcdi_payload_csum(hdr, hdr_len, inbuf, inlen) <<
+ MCDI_HEADER_XFLAGS_LBN);
+ cdx->mcdi_ops->mcdi_request(cdx, hdr, hdr_len, inbuf, inlen);
+
+ mcdi->new_epoch = false;
+}
+
+static int cdx_mcdi_errno(struct cdx_mcdi *cdx, unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ case MC_CMD_ERR_QUEUE_FULL:
+ return mcdi_err;
+ case MC_CMD_ERR_EPERM:
+ return -EPERM;
+ case MC_CMD_ERR_ENOENT:
+ return -ENOENT;
+ case MC_CMD_ERR_EINTR:
+ return -EINTR;
+ case MC_CMD_ERR_EAGAIN:
+ return -EAGAIN;
+ case MC_CMD_ERR_EACCES:
+ return -EACCES;
+ case MC_CMD_ERR_EBUSY:
+ return -EBUSY;
+ case MC_CMD_ERR_EINVAL:
+ return -EINVAL;
+ case MC_CMD_ERR_ERANGE:
+ return -ERANGE;
+ case MC_CMD_ERR_EDEADLK:
+ return -EDEADLK;
+ case MC_CMD_ERR_ENOSYS:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ETIME:
+ return -ETIME;
+ case MC_CMD_ERR_EALREADY:
+ return -EALREADY;
+ case MC_CMD_ERR_ENOSPC:
+ return -ENOSPC;
+ case MC_CMD_ERR_ENOMEM:
+ return -ENOMEM;
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return -EAGAIN;
+ default:
+ return -EPROTO;
+ }
+}
+
+static void cdx_mcdi_process_cleanup_list(struct cdx_mcdi *cdx,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ unsigned int cleanups = 0;
+
+ if (!mcdi)
+ return;
+
+ while (!list_empty(cleanup_list)) {
+ struct cdx_mcdi_cmd *cmd =
+ list_first_entry(cleanup_list,
+ struct cdx_mcdi_cmd, cleanup_list);
+ cmd->completer(cdx, cmd->cookie, cmd->rc,
+ cmd->outbuf, cmd->outlen);
+ list_del(&cmd->cleanup_list);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ ++cleanups;
+ }
+
+ if (cleanups) {
+ bool all_done;
+
+ mutex_lock(&mcdi->iface_lock);
+ CDX_WARN_ON_PARANOID(cleanups > mcdi->outstanding_cleanups);
+ all_done = (mcdi->outstanding_cleanups -= cleanups) == 0;
+ mutex_unlock(&mcdi->iface_lock);
+ if (all_done)
+ wake_up(&mcdi->cmd_complete_wq);
+ }
+}
+
+static void _cdx_mcdi_cancel_cmd(struct cdx_mcdi_iface *mcdi,
+ unsigned int handle,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_cmd *cmd;
+
+ list_for_each_entry(cmd, &mcdi->cmd_list, list)
+ if (cdx_mcdi_cmd_handle(cmd) == handle) {
+ switch (cmd->state) {
+ case MCDI_STATE_QUEUED:
+ case MCDI_STATE_RETRY:
+ pr_debug("command %#x inlen %zu cancelled in queue\n",
+ cmd->cmd, cmd->inlen);
+ /* if not yet running, properly cancel it */
+ cmd->rc = -EPIPE;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ break;
+ case MCDI_STATE_RUNNING:
+ case MCDI_STATE_RUNNING_CANCELLED:
+ case MCDI_STATE_FINISHED:
+ default:
+ /* invalid state? */
+ WARN_ON(1);
+ }
+ break;
+ }
+}
+
+static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ LIST_HEAD(cleanup_list);
+
+ if (!mcdi)
+ return;
+
+ mutex_lock(&mcdi->iface_lock);
+ cdx_mcdi_timeout_cmd(mcdi, cmd, &cleanup_list);
+ mutex_unlock(&mcdi->iface_lock);
+ cdx_mcdi_process_cleanup_list(cdx, &cleanup_list);
+}
+
+struct cdx_mcdi_blocking_data {
+ struct kref ref;
+ bool done;
+ wait_queue_head_t wq;
+ int rc;
+ struct cdx_dword *outbuf;
+ size_t outlen;
+ size_t outlen_actual;
+};
+
+static void cdx_mcdi_blocking_data_release(struct kref *ref)
+{
+ kfree(container_of(ref, struct cdx_mcdi_blocking_data, ref));
+}
+
+static void cdx_mcdi_rpc_completer(struct cdx_mcdi *cdx, unsigned long cookie,
+ int rc, struct cdx_dword *outbuf,
+ size_t outlen_actual)
+{
+ struct cdx_mcdi_blocking_data *wait_data =
+ (struct cdx_mcdi_blocking_data *)cookie;
+
+ wait_data->rc = rc;
+ memcpy(wait_data->outbuf, outbuf,
+ min(outlen_actual, wait_data->outlen));
+ wait_data->outlen_actual = outlen_actual;
+ /* memory barrier */
+ smp_wmb();
+ wait_data->done = true;
+ wake_up(&wait_data->wq);
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+}
+
+static int cdx_mcdi_rpc_sync(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct cdx_mcdi_blocking_data *wait_data;
+ struct cdx_mcdi_cmd *cmd_item;
+ unsigned int handle;
+ int rc;
+
+ if (outlen_actual)
+ *outlen_actual = 0;
+
+ wait_data = kmalloc(sizeof(*wait_data), GFP_KERNEL);
+ if (!wait_data)
+ return -ENOMEM;
+
+ cmd_item = kmalloc(sizeof(*cmd_item), GFP_KERNEL);
+ if (!cmd_item) {
+ kfree(wait_data);
+ return -ENOMEM;
+ }
+
+ kref_init(&wait_data->ref);
+ wait_data->done = false;
+ init_waitqueue_head(&wait_data->wq);
+ wait_data->outbuf = outbuf;
+ wait_data->outlen = outlen;
+
+ kref_init(&cmd_item->ref);
+ cmd_item->quiet = quiet;
+ cmd_item->cookie = (unsigned long)wait_data;
+ cmd_item->completer = &cdx_mcdi_rpc_completer;
+ cmd_item->cmd = cmd;
+ cmd_item->inlen = inlen;
+ cmd_item->inbuf = inbuf;
+
+ /* Claim an extra reference for the completer to put. */
+ kref_get(&wait_data->ref);
+ rc = cdx_mcdi_rpc_async_internal(cdx, cmd_item, &handle);
+ if (rc) {
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+ goto out;
+ }
+
+ if (!wait_event_timeout(wait_data->wq, wait_data->done,
+ cdx_mcdi_rpc_timeout(cdx, cmd)) &&
+ !wait_data->done) {
+ pr_err("MC command 0x%x inlen %zu timed out (sync)\n",
+ cmd, inlen);
+
+ cdx_mcdi_cancel_cmd(cdx, cmd_item);
+
+ wait_data->rc = -ETIMEDOUT;
+ wait_data->outlen_actual = 0;
+ }
+
+ if (outlen_actual)
+ *outlen_actual = wait_data->outlen_actual;
+ rc = wait_data->rc;
+
+out:
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+
+ return rc;
+}
+
+static bool cdx_mcdi_get_seq(struct cdx_mcdi_iface *mcdi, unsigned char *seq)
+{
+ *seq = mcdi->prev_seq;
+ do {
+ *seq = (*seq + 1) % ARRAY_SIZE(mcdi->seq_held_by);
+ } while (mcdi->seq_held_by[*seq] && *seq != mcdi->prev_seq);
+ return !mcdi->seq_held_by[*seq];
+}
+
+static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd,
+ unsigned int *handle)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ LIST_HEAD(cleanup_list);
+
+ if (!mcdi) {
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return -ENETDOWN;
+ }
+
+ if (mcdi->mode == MCDI_MODE_FAIL) {
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return -ENETDOWN;
+ }
+
+ cmd->mcdi = mcdi;
+ INIT_WORK(&cmd->work, cdx_mcdi_cmd_work);
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_LIST_HEAD(&cmd->cleanup_list);
+ cmd->rc = 0;
+ cmd->outbuf = NULL;
+ cmd->outlen = 0;
+
+ queue_work(mcdi->workqueue, &cmd->work);
+ return 0;
+}
+
+static void cdx_mcdi_cmd_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi *cdx = mcdi->cdx;
+ u8 seq;
+
+ if (!mcdi->db_held_by &&
+ cdx_mcdi_get_seq(mcdi, &seq)) {
+ cmd->seq = seq;
+ cmd->reboot_seen = false;
+ cdx_mcdi_send_request(cdx, cmd);
+ cmd->state = MCDI_STATE_RUNNING;
+ } else {
+ cmd->state = MCDI_STATE_QUEUED;
+ }
+}
+
+/* try to advance other commands */
+static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ bool allow_retry)
+{
+ struct cdx_mcdi_cmd *cmd, *tmp;
+
+ list_for_each_entry_safe(cmd, tmp, &mcdi->cmd_list, list)
+ if (cmd->state == MCDI_STATE_QUEUED ||
+ (cmd->state == MCDI_STATE_RETRY && allow_retry))
+ cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
+}
+
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
+{
+ struct cdx_mcdi_iface *mcdi;
+ struct cdx_mcdi_cmd *cmd;
+ LIST_HEAD(cleanup_list);
+ unsigned int respseq;
+
+ if (!len || !outbuf) {
+ pr_err("Got empty MC response\n");
+ return;
+ }
+
+ mcdi = cdx_mcdi_if(cdx);
+ if (!mcdi)
+ return;
+
+ respseq = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_SEQ);
+
+ mutex_lock(&mcdi->iface_lock);
+ cmd = mcdi->seq_held_by[respseq];
+
+ if (cmd) {
+ if (cmd->state == MCDI_STATE_FINISHED) {
+ mutex_unlock(&mcdi->iface_lock);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return;
+ }
+
+ cdx_mcdi_complete_cmd(mcdi, cmd, outbuf, len, &cleanup_list);
+ } else {
+ pr_err("MC response unexpected for seq : %0X\n", respseq);
+ }
+
+ mutex_unlock(&mcdi->iface_lock);
+
+ cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
+}
+
+static void cdx_mcdi_cmd_work(struct work_struct *context)
+{
+ struct cdx_mcdi_cmd *cmd =
+ container_of(context, struct cdx_mcdi_cmd, work);
+ struct cdx_mcdi_iface *mcdi = cmd->mcdi;
+
+ mutex_lock(&mcdi->iface_lock);
+
+ cmd->handle = mcdi->prev_handle++;
+ list_add_tail(&cmd->list, &mcdi->cmd_list);
+ cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
+
+ mutex_unlock(&mcdi->iface_lock);
+}
+
+/*
+ * Returns true if the MCDI module is finished with the command.
+ * (examples of false would be if the command was proxied, or it was
+ * rejected by the MC due to lack of resources and requeued).
+ */
+static bool cdx_mcdi_complete_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct cdx_dword *outbuf,
+ int len,
+ struct list_head *cleanup_list)
+{
+ size_t resp_hdr_len, resp_data_len;
+ struct cdx_mcdi *cdx = mcdi->cdx;
+ unsigned int respcmd, error;
+ bool completed = false;
+ int rc;
+
+ /* ensure the command can't go away before this function returns */
+ kref_get(&cmd->ref);
+
+ respcmd = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_CODE);
+ error = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_ERROR);
+
+ if (respcmd != MC_CMD_V2_EXTN) {
+ resp_hdr_len = 4;
+ resp_data_len = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_DATALEN);
+ } else {
+ resp_data_len = 0;
+ resp_hdr_len = 8;
+ if (len >= 8)
+ resp_data_len =
+ CDX_DWORD_FIELD(outbuf[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if ((resp_hdr_len + resp_data_len) > len) {
+ pr_warn("Incomplete MCDI response received %d. Expected %zu\n",
+ len, (resp_hdr_len + resp_data_len));
+ resp_data_len = 0;
+ }
+
+#ifdef CONFIG_MCDI_LOGGING
+ if (!WARN_ON_ONCE(!mcdi->logging_buffer)) {
+ char *log = mcdi->logging_buffer;
+ int i, bytes = 0;
+ size_t rlen;
+
+ WARN_ON_ONCE(resp_hdr_len % 4);
+
+ rlen = resp_hdr_len / 4 + DIV_ROUND_UP(resp_data_len, 4);
+
+ for (i = 0; i < rlen; i++) {
+ if ((bytes + 75) > LOG_LINE_MAX) {
+ pr_info("MCDI RPC RESP:%s \\\n", log);
+ bytes = 0;
+ }
+ bytes += snprintf(log + bytes, LOG_LINE_MAX - bytes,
+ " %08x", le32_to_cpu(outbuf[i].cdx_u32));
+ }
+
+ pr_info("MCDI RPC RESP:%s\n", log);
+ }
+#endif
+
+ if (error && resp_data_len == 0) {
+ /* MC rebooted during command */
+ rc = -EIO;
+ } else {
+ if (WARN_ON_ONCE(error && resp_data_len < 4))
+ resp_data_len = 4;
+ if (error) {
+ rc = CDX_DWORD_FIELD(outbuf[resp_hdr_len / 4], CDX_DWORD);
+ if (!cmd->quiet) {
+ int err_arg = 0;
+
+ if (resp_data_len >= MC_CMD_ERR_ARG_OFST + 4) {
+ int offset = (resp_hdr_len + MC_CMD_ERR_ARG_OFST) / 4;
+
+ err_arg = CDX_DWORD_VAL(outbuf[offset]);
+ }
+
+ _cdx_mcdi_display_error(cdx, cmd->cmd,
+ cmd->inlen, rc, err_arg,
+ cdx_mcdi_errno(cdx, rc));
+ }
+ rc = cdx_mcdi_errno(cdx, rc);
+ } else {
+ rc = 0;
+ }
+ }
+
+ /* free doorbell */
+ if (mcdi->db_held_by == cmd)
+ mcdi->db_held_by = NULL;
+
+ if (cdx_cmd_cancelled(cmd)) {
+ list_del(&cmd->list);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ completed = true;
+ } else if (rc == MC_CMD_ERR_QUEUE_FULL) {
+ cmd->state = MCDI_STATE_RETRY;
+ } else {
+ cmd->rc = rc;
+ cmd->outbuf = outbuf + DIV_ROUND_UP(resp_hdr_len, 4);
+ cmd->outlen = resp_data_len;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ completed = true;
+ }
+
+ /* free sequence number and buffer */
+ mcdi->seq_held_by[cmd->seq] = NULL;
+
+ cdx_mcdi_start_or_queue(mcdi, rc != MC_CMD_ERR_QUEUE_FULL);
+
+ /* wake up anyone waiting for flush */
+ wake_up(&mcdi->cmd_complete_wq);
+
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+
+ return completed;
+}
+
+static void cdx_mcdi_timeout_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi *cdx = mcdi->cdx;
+
+ pr_err("MC command 0x%x inlen %zu state %d timed out after %u ms\n",
+ cmd->cmd, cmd->inlen, cmd->state,
+ jiffies_to_msecs(jiffies - cmd->started));
+
+ cmd->rc = -ETIMEDOUT;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+
+ cdx_mcdi_mode_fail(cdx, cleanup_list);
+}
+
+/**
+ * cdx_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @cdx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes. Must be a multiple
+ * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer. May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes. If the actual
+ * response is longer than @outlen & ~3, it will be truncated
+ * to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ * length. May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in process
+ * context.
+ *
+ * Return: A negative error code, or zero if successful. The error
+ * code may come from the MCDI response or may indicate a failure
+ * to communicate with the MC. In the former case, the response
+ * will still be copied to @outbuf and *@outlen_actual will be
+ * set accordingly. In the latter case, *@outlen_actual will be
+ * set to zero.
+ */
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
+
+/**
+ * cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @cdx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in process context, when one of the following occurs:
+ * (a) the completion event is received (in process context)
+ * (b) event queues are disabled (in the process that disables them)
+ */
+int
+cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct cdx_mcdi_cmd *cmd_item =
+ kmalloc(sizeof(struct cdx_mcdi_cmd) + inlen, GFP_ATOMIC);
+
+ if (!cmd_item)
+ return -ENOMEM;
+
+ kref_init(&cmd_item->ref);
+ cmd_item->quiet = true;
+ cmd_item->cookie = cookie;
+ cmd_item->completer = complete;
+ cmd_item->cmd = cmd;
+ cmd_item->inlen = inlen;
+ /* inbuf is probably not valid after return, so take a copy */
+ cmd_item->inbuf = (struct cdx_dword *)(cmd_item + 1);
+ memcpy(cmd_item + 1, inbuf, inlen);
+
+ return cdx_mcdi_rpc_async_internal(cdx, cmd_item, NULL);
+}
+
+static void _cdx_mcdi_display_error(struct cdx_mcdi *cdx, unsigned int cmd,
+ size_t inlen, int raw, int arg, int err_no)
+{
+ pr_err("MC command 0x%x inlen %d failed err_no=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, err_no, raw, arg);
+}
+
+/*
+ * Set MCDI mode to fail to prevent any new commands, then cancel any
+ * outstanding commands.
+ * Caller must hold the mcdi iface_lock.
+ */
+static void cdx_mcdi_mode_fail(struct cdx_mcdi *cdx, struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+
+ if (!mcdi)
+ return;
+
+ mcdi->mode = MCDI_MODE_FAIL;
+
+ while (!list_empty(&mcdi->cmd_list)) {
+ struct cdx_mcdi_cmd *cmd;
+
+ cmd = list_first_entry(&mcdi->cmd_list, struct cdx_mcdi_cmd,
+ list);
+ _cdx_mcdi_cancel_cmd(mcdi, cdx_mcdi_cmd_handle(cmd), cleanup_list);
+ }
+}
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
new file mode 100644
index 000000000000..0bfbeab04e43
--- /dev/null
+++ b/drivers/cdx/controller/mcdi.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "bitfield.h"
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @logging_buffer: Buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: Whether to trace MCDI
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+#ifdef CONFIG_MCDI_LOGGING
+ bool logging_enabled;
+ char *logging_buffer;
+#endif
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
new file mode 100644
index 000000000000..0158f26533dd
--- /dev/null
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+
+#include "mcdi.h"
+#include "mcdi_functions.h"
+
+int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN);
+ size_t outlen;
+ int ret;
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_ENUM_BUSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN)
+ return -EIO;
+
+ return MCDI_DWORD(outbuf, CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT);
+}
+
+int cdx_mcdi_get_num_devs(struct cdx_mcdi *cdx, int bus_num)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_ENUM_DEVICES_IN_LEN);
+ size_t outlen;
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_ENUM_DEVICES_IN_BUS, bus_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_ENUM_DEVICES, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN)
+ return -EIO;
+
+ return MCDI_DWORD(outbuf, CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT);
+}
+
+int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_dev_params *dev_params)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN);
+ struct resource *res = &dev_params->res[0];
+ size_t outlen;
+ u32 req_id;
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_GET_DEVICE_CONFIG_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE, dev_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN)
+ return -EIO;
+
+ dev_params->bus_num = bus_num;
+ dev_params->dev_num = dev_num;
+
+ req_id = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID);
+ dev_params->req_id = req_id;
+
+ dev_params->res_count = 0;
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ dev_params->vendor = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID);
+ dev_params->device = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID);
+
+ return 0;
+}
+
+int cdx_mcdi_reset_device(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_RESET_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_RESET_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_RESET_IN_DEVICE, dev_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
new file mode 100644
index 000000000000..7440ace5539a
--- /dev/null
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for MCDI FW interaction for CDX bus.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_FUNCTIONS_H
+#define CDX_MCDI_FUNCTIONS_H
+
+#include "mcdi.h"
+#include "../cdx.h"
+
+/**
+ * cdx_mcdi_get_num_buses - Get the total number of buses on
+ * the controller.
+ * @cdx: pointer to MCDI interface.
+ *
+ * Return: total number of buses available on the controller,
+ * <0 on failure
+ */
+int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx);
+
+/**
+ * cdx_mcdi_get_num_devs - Get the total number of devices on
+ * a particular bus of the controller.
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: total number of devices available on the bus, <0 on failure
+ */
+int cdx_mcdi_get_num_devs(struct cdx_mcdi *cdx, int bus_num);
+
+/**
+ * cdx_mcdi_get_dev_config - Get configuration for a particular
+ * bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @dev_params: Pointer to cdx_dev_params, this is populated by this
+ * device with the configuration corresponding to the provided
+ * bus_num:dev_num.
+ *
+ * Return: 0 total number of devices available on the bus, <0 on failure
+ */
+int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_dev_params *dev_params);
+
+/**
+ * cdx_mcdi_reset_device - Reset cdx device represented by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_reset_device(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num);
+
+#endif /* CDX_MCDI_FUNCTIONS_H */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 30fe9848dac1..801d6c83f896 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -247,8 +247,6 @@ config SONYPI
To compile this driver as a module, choose M here: the
module will be called sonypi.
-source "drivers/char/pcmcia/Kconfig"
-
config MWAVE
tristate "ACP Modem (Mwave) support"
depends on X86 && TTY
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 1b35d1724565..c5f532e412f1 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
obj-y += agp/
-obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index bb09d64cd51e..8771dcc9b8e2 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -236,6 +236,12 @@ void agp3_generic_tlbflush(struct agp_memory *mem);
int agp3_generic_configure(void);
void agp3_generic_cleanup(void);
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
/* aperture sizes have been standardised since v3 */
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 36203d3fa6ea..69314532f38c 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -197,8 +197,10 @@ static int __init applicom_init(void)
if (!pci_match_id(applicom_pci_tbl, dev))
continue;
- if (pci_enable_device(dev))
+ if (pci_enable_device(dev)) {
+ pci_dev_put(dev);
return -EIO;
+ }
RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
@@ -207,6 +209,7 @@ static int __init applicom_init(void)
"space at 0x%llx\n",
(unsigned long long)pci_resource_start(dev, 0));
pci_disable_device(dev);
+ pci_dev_put(dev);
return -EIO;
}
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index d5f943938427..ff429ba02fa4 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -293,7 +293,7 @@ static int __init bsr_init(void)
if (!np)
goto out_err;
- bsr_class = class_create(THIS_MODULE, "bsr");
+ bsr_class = class_create("bsr");
if (IS_ERR(bsr_class)) {
printk(KERN_ERR "class_create() failed for bsr_class\n");
ret = PTR_ERR(bsr_class);
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 06749e295ada..b3eaf3e5ef2e 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -504,7 +504,7 @@ static int __init dsp56k_init_driver(void)
printk("DSP56k driver: Unable to register driver\n");
return -ENODEV;
}
- dsp56k_class = class_create(THIS_MODULE, "dsp56k");
+ dsp56k_class = class_create("dsp56k");
if (IS_ERR(dsp56k_class)) {
err = PTR_ERR(dsp56k_class);
goto out_chrdev;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3da8e85f8aae..4fdf07ae3c54 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -549,6 +549,16 @@ config HW_RANDOM_CN10K
To compile this driver as a module, choose M here.
The module will be called cn10k_rng. If unsure, say Y.
+config HW_RANDOM_JH7110
+ tristate "StarFive JH7110 Random Number Generator support"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ help
+ This driver provides support for the True Random Number
+ Generator in StarFive JH7110 SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called jh7110-trng.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3e948cf04476..09bde4a0f971 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
new file mode 100644
index 000000000000..38474d48a25e
--- /dev/null
+++ b/drivers/char/hw_random/jh7110-trng.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TRNG driver for the StarFive JH7110 SoC
+ *
+ * Copyright (C) 2022 StarFive Technology Co.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+
+/* trng register offset */
+#define STARFIVE_CTRL 0x00
+#define STARFIVE_STAT 0x04
+#define STARFIVE_MODE 0x08
+#define STARFIVE_SMODE 0x0C
+#define STARFIVE_IE 0x10
+#define STARFIVE_ISTAT 0x14
+#define STARFIVE_RAND0 0x20
+#define STARFIVE_RAND1 0x24
+#define STARFIVE_RAND2 0x28
+#define STARFIVE_RAND3 0x2C
+#define STARFIVE_RAND4 0x30
+#define STARFIVE_RAND5 0x34
+#define STARFIVE_RAND6 0x38
+#define STARFIVE_RAND7 0x3C
+#define STARFIVE_AUTO_RQSTS 0x60
+#define STARFIVE_AUTO_AGE 0x64
+
+/* CTRL CMD */
+#define STARFIVE_CTRL_EXEC_NOP 0x0
+#define STARFIVE_CTRL_GENE_RANDNUM 0x1
+#define STARFIVE_CTRL_EXEC_RANDRESEED 0x2
+
+/* STAT */
+#define STARFIVE_STAT_NONCE_MODE BIT(2)
+#define STARFIVE_STAT_R256 BIT(3)
+#define STARFIVE_STAT_MISSION_MODE BIT(8)
+#define STARFIVE_STAT_SEEDED BIT(9)
+#define STARFIVE_STAT_LAST_RESEED(x) ((x) << 16)
+#define STARFIVE_STAT_SRVC_RQST BIT(27)
+#define STARFIVE_STAT_RAND_GENERATING BIT(30)
+#define STARFIVE_STAT_RAND_SEEDING BIT(31)
+
+/* MODE */
+#define STARFIVE_MODE_R256 BIT(3)
+
+/* SMODE */
+#define STARFIVE_SMODE_NONCE_MODE BIT(2)
+#define STARFIVE_SMODE_MISSION_MODE BIT(8)
+#define STARFIVE_SMODE_MAX_REJECTS(x) ((x) << 16)
+
+/* IE */
+#define STARFIVE_IE_RAND_RDY_EN BIT(0)
+#define STARFIVE_IE_SEED_DONE_EN BIT(1)
+#define STARFIVE_IE_LFSR_LOCKUP_EN BIT(4)
+#define STARFIVE_IE_GLBL_EN BIT(31)
+
+#define STARFIVE_IE_ALL (STARFIVE_IE_GLBL_EN | \
+ STARFIVE_IE_RAND_RDY_EN | \
+ STARFIVE_IE_SEED_DONE_EN | \
+ STARFIVE_IE_LFSR_LOCKUP_EN)
+
+/* ISTAT */
+#define STARFIVE_ISTAT_RAND_RDY BIT(0)
+#define STARFIVE_ISTAT_SEED_DONE BIT(1)
+#define STARFIVE_ISTAT_LFSR_LOCKUP BIT(4)
+
+#define STARFIVE_RAND_LEN sizeof(u32)
+
+#define to_trng(p) container_of(p, struct starfive_trng, rng)
+
+enum reseed {
+ RANDOM_RESEED,
+ NONCE_RESEED,
+};
+
+enum mode {
+ PRNG_128BIT,
+ PRNG_256BIT,
+};
+
+struct starfive_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *ahb;
+ struct reset_control *rst;
+ struct hwrng rng;
+ struct completion random_done;
+ struct completion reseed_done;
+ u32 mode;
+ u32 mission;
+ u32 reseed;
+ /* protects against concurrent write to ctrl register */
+ spinlock_t write_lock;
+};
+
+static u16 autoreq;
+module_param(autoreq, ushort, 0);
+MODULE_PARM_DESC(autoreq, "Auto-reseeding after random number requests by host reaches specified counter:\n"
+ " 0 - disable counter\n"
+ " other - reload value for internal counter");
+
+static u16 autoage;
+module_param(autoage, ushort, 0);
+MODULE_PARM_DESC(autoage, "Auto-reseeding after specified timer countdowns to 0:\n"
+ " 0 - disable timer\n"
+ " other - reload value for internal timer");
+
+static inline int starfive_trng_wait_idle(struct starfive_trng *trng)
+{
+ u32 stat;
+
+ return readl_relaxed_poll_timeout(trng->base + STARFIVE_STAT, stat,
+ !(stat & (STARFIVE_STAT_RAND_GENERATING |
+ STARFIVE_STAT_RAND_SEEDING)),
+ 10, 100000);
+}
+
+static inline void starfive_trng_irq_mask_clear(struct starfive_trng *trng)
+{
+ /* clear register: ISTAT */
+ u32 data = readl(trng->base + STARFIVE_ISTAT);
+
+ writel(data, trng->base + STARFIVE_ISTAT);
+}
+
+static int starfive_trng_cmd(struct starfive_trng *trng, u32 cmd, bool wait)
+{
+ int wait_time = 1000;
+
+ /* allow up to 40 us for wait == 0 */
+ if (!wait)
+ wait_time = 40;
+
+ switch (cmd) {
+ case STARFIVE_CTRL_GENE_RANDNUM:
+ reinit_completion(&trng->random_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->random_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ case STARFIVE_CTRL_EXEC_RANDRESEED:
+ reinit_completion(&trng->reseed_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->reseed_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int starfive_trng_init(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ u32 mode, intr = 0;
+
+ /* setup Auto Request/Age register */
+ writel(autoage, trng->base + STARFIVE_AUTO_AGE);
+ writel(autoreq, trng->base + STARFIVE_AUTO_RQSTS);
+
+ /* clear register: ISTAT */
+ starfive_trng_irq_mask_clear(trng);
+
+ intr |= STARFIVE_IE_ALL;
+ writel(intr, trng->base + STARFIVE_IE);
+
+ mode = readl(trng->base + STARFIVE_MODE);
+
+ switch (trng->mode) {
+ case PRNG_128BIT:
+ mode &= ~STARFIVE_MODE_R256;
+ break;
+ case PRNG_256BIT:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ default:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ }
+
+ writel(mode, trng->base + STARFIVE_MODE);
+
+ return starfive_trng_cmd(trng, STARFIVE_CTRL_EXEC_RANDRESEED, 1);
+}
+
+static irqreturn_t starfive_trng_irq(int irq, void *priv)
+{
+ u32 status;
+ struct starfive_trng *trng = (struct starfive_trng *)priv;
+
+ status = readl(trng->base + STARFIVE_ISTAT);
+ if (status & STARFIVE_ISTAT_RAND_RDY) {
+ writel(STARFIVE_ISTAT_RAND_RDY, trng->base + STARFIVE_ISTAT);
+ complete(&trng->random_done);
+ }
+
+ if (status & STARFIVE_ISTAT_SEED_DONE) {
+ writel(STARFIVE_ISTAT_SEED_DONE, trng->base + STARFIVE_ISTAT);
+ complete(&trng->reseed_done);
+ }
+
+ if (status & STARFIVE_ISTAT_LFSR_LOCKUP) {
+ writel(STARFIVE_ISTAT_LFSR_LOCKUP, trng->base + STARFIVE_ISTAT);
+ /* SEU occurred, reseeding required*/
+ spin_lock(&trng->write_lock);
+ writel(STARFIVE_CTRL_EXEC_RANDRESEED, trng->base + STARFIVE_CTRL);
+ spin_unlock(&trng->write_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void starfive_trng_cleanup(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+
+ writel(0, trng->base + STARFIVE_CTRL);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+}
+
+static int starfive_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ int ret;
+
+ pm_runtime_get_sync(trng->dev);
+
+ if (trng->mode == PRNG_256BIT)
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 8));
+ else
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 4));
+
+ if (wait) {
+ ret = starfive_trng_wait_idle(trng);
+ if (ret)
+ return -ETIMEDOUT;
+ }
+
+ ret = starfive_trng_cmd(trng, STARFIVE_CTRL_GENE_RANDNUM, wait);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, trng->base + STARFIVE_RAND0, max);
+
+ pm_runtime_put_sync_autosuspend(trng->dev);
+
+ return max;
+}
+
+static int starfive_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ int irq;
+ struct starfive_trng *trng;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+ "Error remapping memory for platform device.\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ init_completion(&trng->random_done);
+ init_completion(&trng->reseed_done);
+ spin_lock_init(&trng->write_lock);
+
+ ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
+ (void *)trng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, irq,
+ "Failed to register interrupt handler\n");
+
+ trng->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(trng->hclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->hclk),
+ "Error getting hardware reference clock\n");
+
+ trng->ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(trng->ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->ahb),
+ "Error getting ahb reference clock\n");
+
+ trng->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(trng->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->rst),
+ "Error getting hardware reset line\n");
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+ reset_control_deassert(trng->rst);
+
+ trng->rng.name = dev_driver_string(&pdev->dev);
+ trng->rng.init = starfive_trng_init;
+ trng->rng.cleanup = starfive_trng_cleanup;
+ trng->rng.read = starfive_trng_read;
+
+ trng->mode = PRNG_256BIT;
+ trng->mission = 1;
+ trng->reseed = RANDOM_RESEED;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->ahb);
+ clk_disable_unprepare(trng->hclk);
+
+ return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
+ }
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_suspend(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_resume(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend,
+ starfive_trng_resume);
+
+static const struct of_device_id trng_dt_ids[] __maybe_unused = {
+ { .compatible = "starfive,jh7110-trng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, trng_dt_ids);
+
+static struct platform_driver starfive_trng_driver = {
+ .probe = starfive_trng_probe,
+ .driver = {
+ .name = "jh7110-trng",
+ .pm = &starfive_trng_pm_ops,
+ .of_match_table = of_match_ptr(trng_dt_ids),
+ },
+};
+
+module_platform_driver(starfive_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive True Random Number Generator");
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 8bb30282ca46..a4eb8e35f13d 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -18,9 +18,7 @@
struct meson_rng_data {
void __iomem *base;
- struct platform_device *pdev;
struct hwrng rng;
- struct clk *core_clk;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -33,47 +31,28 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
-static void meson_rng_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
- int ret;
+ struct clk *core_clk;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->pdev = pdev;
-
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->core_clk = devm_clk_get_optional(dev, "core");
- if (IS_ERR(data->core_clk))
- return dev_err_probe(dev, PTR_ERR(data->core_clk),
+ core_clk = devm_clk_get_optional_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk),
"Failed to get core clock\n");
- if (data->core_clk) {
- ret = clk_prepare_enable(data->core_clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
- data->core_clk);
- if (ret)
- return ret;
- }
-
data->rng.name = pdev->name;
data->rng.read = meson_rng_read;
- platform_set_drvdata(pdev, data);
-
return devm_hwrng_register(dev, &data->rng);
}
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 008e6db9ce01..7c8f3cb7c6af 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -84,7 +84,6 @@ struct xgene_rng_dev {
unsigned long failure_ts;/* First failure timestamp */
struct timer_list failure_timer;
struct device *dev;
- struct clk *clk;
};
static void xgene_rng_expired_timer(struct timer_list *t)
@@ -200,7 +199,7 @@ static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
{
- struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+ struct xgene_rng_dev *ctx = id;
/* RNG Alarm Counter overflow */
xgene_rng_chk_overflow(ctx);
@@ -314,6 +313,7 @@ static struct hwrng xgene_rng_func = {
static int xgene_rng_probe(struct platform_device *pdev)
{
struct xgene_rng_dev *ctx;
+ struct clk *clk;
int rc = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -337,58 +337,36 @@ static int xgene_rng_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
dev_name(&pdev->dev), ctx);
- if (rc) {
- dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n");
/* Enable IP clock */
- ctx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ctx->clk)) {
- dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
- } else {
- rc = clk_prepare_enable(ctx->clk);
- if (rc) {
- dev_warn(&pdev->dev,
- "clock prepare enable failed for RNG");
- return rc;
- }
- }
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n");
xgene_rng_func.priv = (unsigned long) ctx;
rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
- if (rc) {
- dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n");
rc = device_init_wakeup(&pdev->dev, 1);
- if (rc) {
- dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
- rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
- }
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n");
return 0;
}
static int xgene_rng_remove(struct platform_device *pdev)
{
- struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
int rc;
rc = device_init_wakeup(&pdev->dev, 0);
if (rc)
dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
- if (!IS_ERR(ctx->clk))
- clk_disable_unprepare(ctx->clk);
- return rc;
+ return 0;
}
static const struct of_device_id xgene_rng_of_match[] = {
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index b6c0d35fc1a5..f4adc6feb3b2 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -162,7 +162,8 @@ config IPMI_KCS_BMC_SERIO
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on MFD_SYSCON
+ select REGMAP_MMIO
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index d160fa4c73fe..73e5a9e28f85 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -860,7 +860,7 @@ static int __init init_ipmi_devintf(void)
pr_info("ipmi device interface\n");
- ipmi_class = class_create(THIS_MODULE, "ipmi");
+ ipmi_class = class_create("ipmi");
if (IS_ERR(ipmi_class)) {
pr_err("ipmi: can't register device class\n");
return PTR_ERR(ipmi_class);
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 7c1aee5e11b7..3f1c9f1573e7 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -27,7 +27,7 @@ MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
static unsigned int retry_time_ms = 250;
module_param(retry_time_ms, uint, 0644);
-MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
+MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
static unsigned int max_retries = 1;
module_param(max_retries, uint, 0644);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 163ec9749e55..870659d91db2 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -659,20 +659,6 @@ static struct ctl_table ipmi_table[] = {
{ }
};
-static struct ctl_table ipmi_dir_table[] = {
- { .procname = "ipmi",
- .mode = 0555,
- .child = ipmi_table },
- { }
-};
-
-static struct ctl_table ipmi_root_table[] = {
- { .procname = "dev",
- .mode = 0555,
- .child = ipmi_dir_table },
- { }
-};
-
static struct ctl_table_header *ipmi_table_header;
#endif /* CONFIG_PROC_FS */
@@ -689,7 +675,7 @@ static int __init ipmi_poweroff_init(void)
pr_info("Power cycle is enabled\n");
#ifdef CONFIG_PROC_FS
- ipmi_table_header = register_sysctl_table(ipmi_root_table);
+ ipmi_table_header = register_sysctl("dev/ipmi", ipmi_table);
if (!ipmi_table_header) {
pr_err("Unable to register powercycle sysctl\n");
rv = -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 4bfd1e306616..3b921c78ba08 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -74,7 +74,8 @@
/*
* Timer values
*/
-#define SSIF_MSG_USEC 60000 /* 60ms between message tries. */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
/* How many times to we retry sending/receiving the message. */
@@ -82,7 +83,9 @@
#define SSIF_RECV_RETRIES 250
#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
/*
@@ -92,7 +95,7 @@
#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
enum ssif_intf_state {
- SSIF_NORMAL,
+ SSIF_IDLE,
SSIF_GETTING_FLAGS,
SSIF_GETTING_EVENTS,
SSIF_CLEARING_FLAGS,
@@ -100,8 +103,8 @@ enum ssif_intf_state {
/* FIXME - add watchdog stuff. */
};
-#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
- && (ssif)->curr_msg == NULL)
+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
+ && (ssif)->curr_msg == NULL)
/*
* Indexes into stats[] in ssif_info below.
@@ -229,6 +232,9 @@ struct ssif_info {
bool got_alert;
bool waiting_alert;
+ /* Used to inform the timeout that it should do a resend. */
+ bool do_resend;
+
/*
* If set to true, this will request events the next time the
* state machine is idle.
@@ -241,12 +247,6 @@ struct ssif_info {
*/
bool req_flags;
- /*
- * Used to perform timer operations when run-to-completion
- * mode is on. This is a countdown timer.
- */
- int rtc_us_timer;
-
/* Used for sending/receiving data. +1 for the length. */
unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
unsigned int data_len;
@@ -348,9 +348,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -367,7 +367,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
if (start_send(ssif_info, msg, 3) != 0) {
/* Error, just go to normal state. */
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
}
@@ -382,7 +382,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
mb[1] = IPMI_GET_MSG_FLAGS_CMD;
if (start_send(ssif_info, mb, 2) != 0)
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
@@ -393,7 +393,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->curr_msg = NULL;
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
ipmi_free_smi_msg(msg);
}
@@ -407,7 +407,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -430,7 +430,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -448,9 +448,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -466,7 +466,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
/* Events available. */
start_event_fetch(ssif_info, flags);
else {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
@@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
static void start_get(struct ssif_info *ssif_info)
{
- ssif_info->rtc_us_timer = 0;
ssif_info->multi_pos = 0;
ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
@@ -538,22 +537,30 @@ static void start_get(struct ssif_info *ssif_info)
ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
}
+static void start_resend(struct ssif_info *ssif_info);
+
static void retry_timeout(struct timer_list *t)
{
struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
- bool waiting;
+ bool waiting, resend;
if (ssif_info->stopping)
return;
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ resend = ssif_info->do_resend;
+ ssif_info->do_resend = false;
waiting = ssif_info->waiting_alert;
ssif_info->waiting_alert = false;
ipmi_ssif_unlock_cond(ssif_info, flags);
if (waiting)
start_get(ssif_info);
+ if (resend) {
+ start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
}
static void watch_timeout(struct timer_list *t)
@@ -568,7 +575,7 @@ static void watch_timeout(struct timer_list *t)
if (ssif_info->watch_timeout) {
mod_timer(&ssif_info->watch_timer,
jiffies + ssif_info->watch_timeout);
- if (SSIF_IDLE(ssif_info)) {
+ if (IS_SSIF_IDLE(ssif_info)) {
start_flag_fetch(ssif_info, flags); /* Releases lock */
return;
}
@@ -602,8 +609,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
start_get(ssif_info);
}
-static int start_resend(struct ssif_info *ssif_info);
-
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len)
{
@@ -622,7 +627,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
@@ -756,7 +760,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
switch (ssif_info->ssif_state) {
- case SSIF_NORMAL:
+ case SSIF_IDLE:
ipmi_ssif_unlock_cond(ssif_info, flags);
if (!msg)
break;
@@ -774,7 +778,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
* Error fetching flags, or invalid length,
* just give up for now.
*/
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Error getting flags: %d %d, %x\n",
@@ -782,9 +786,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
/*
- * Don't abort here, maybe it was a queued
- * response to a previous command.
+ * Recv error response, give up.
*/
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Invalid response getting flags: %x %x\n",
@@ -809,7 +813,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
"Invalid response clearing flags: %x %x\n",
data[0], data[1]);
}
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
break;
@@ -887,7 +891,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
- if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
if (ssif_info->req_events)
start_event_fetch(ssif_info, flags);
else if (ssif_info->req_flags)
@@ -909,31 +913,23 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
if (result < 0) {
ssif_info->retries_left--;
if (ssif_info->retries_left > 0) {
- if (!start_resend(ssif_info)) {
- ssif_inc_stat(ssif_info, send_retries);
- return;
- }
- /* request failed, just return the error. */
- ssif_inc_stat(ssif_info, send_errors);
-
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, -EIO, NULL, 0);
+ /*
+ * Wait the retry timeout time per the spec,
+ * then redo the send.
+ */
+ ssif_info->do_resend = true;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_REQ_RETRY_JIFFIES);
return;
}
ssif_inc_stat(ssif_info, send_errors);
- /*
- * Got an error on transmit, let the done routine
- * handle it.
- */
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
dev_dbg(&ssif_info->client->dev,
- "%s: Error %d\n", __func__, result);
+ "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, result, NULL, 0);
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
return;
}
@@ -987,7 +983,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_PART_JIFFIES);
@@ -996,7 +991,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
}
}
-static int start_resend(struct ssif_info *ssif_info)
+static void start_resend(struct ssif_info *ssif_info)
{
int command;
@@ -1021,7 +1016,6 @@ static int start_resend(struct ssif_info *ssif_info)
ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
- return 0;
}
static int start_send(struct ssif_info *ssif_info,
@@ -1036,7 +1030,8 @@ static int start_send(struct ssif_info *ssif_info,
ssif_info->retries_left = SSIF_SEND_RETRIES;
memcpy(ssif_info->data + 1, data, len);
ssif_info->data_len = len;
- return start_resend(ssif_info);
+ start_resend(ssif_info);
+ return 0;
}
/* Must be called with the message lock held. */
@@ -1046,7 +1041,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
unsigned long oflags;
restart:
- if (!SSIF_IDLE(ssif_info)) {
+ if (!IS_SSIF_IDLE(ssif_info)) {
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -1269,7 +1264,7 @@ static void shutdown_ssif(void *send_info)
dev_set_drvdata(&ssif_info->client->dev, NULL);
/* make sure the driver is not looking for flags any more. */
- while (ssif_info->ssif_state != SSIF_NORMAL)
+ while (ssif_info->ssif_state != SSIF_IDLE)
schedule_timeout(1);
ssif_info->stopping = true;
@@ -1286,11 +1281,8 @@ static void ssif_remove(struct i2c_client *client)
struct ssif_info *ssif_info = i2c_get_clientdata(client);
struct ssif_addr_info *addr_info;
- if (!ssif_info)
- return;
-
/*
- * After this point, we won't deliver anything asychronously
+ * After this point, we won't deliver anything asynchronously
* to the message handler. We can unregister ourself.
*/
ipmi_unregister_smi(ssif_info->intf);
@@ -1334,8 +1326,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry1;
+ }
return -ENODEV;
}
@@ -1476,8 +1470,10 @@ retry_write:
32, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry_write;
+ }
dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
return ret;
}
@@ -1839,7 +1835,7 @@ static int ssif_probe(struct i2c_client *client)
}
spin_lock_init(&ssif_info->lock);
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
@@ -2074,9 +2070,6 @@ static int ssif_platform_remove(struct platform_device *dev)
{
struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
- if (!addr_info)
- return 0;
-
mutex_lock(&ssif_infos_mutex);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 38aad99ebb61..70cfc5140c2c 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -1049,7 +1049,7 @@ static int __init lp_init(void)
return -EIO;
}
- lp_class = class_create(THIS_MODULE, "printer");
+ lp_class = class_create("printer");
if (IS_ERR(lp_class)) {
err = PTR_ERR(lp_class);
goto out_reg;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 83bf2a4dcb57..f494d31f2b98 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@ static unsigned zero_mmap_capabilities(struct file *file)
/* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_struct *vma)
{
- return vma->vm_flags & VM_MAYSHARE;
+ return is_nommu_shared_mapping(vma->vm_flags);
}
#else
@@ -762,7 +762,7 @@ static int __init chr_dev_init(void)
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
- mem_class = class_create(THIS_MODULE, "mem");
+ mem_class = class_create("mem");
if (IS_ERR(mem_class))
return PTR_ERR(mem_class);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 7a1388b0572b..1c44c29a666e 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -286,7 +286,7 @@ static int __init misc_init(void)
struct proc_dir_entry *ret;
ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
- misc_class = class_create(THIS_MODULE, "misc");
+ misc_class = class_create("misc");
err = PTR_ERR(misc_class);
if (IS_ERR(misc_class))
goto fail_remove;
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f8231e2e84be..b35f651837c8 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
deleted file mode 100644
index f5d589b2be44..000000000000
--- a/drivers/char/pcmcia/Kconfig
+++ /dev/null
@@ -1,68 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# PCMCIA character device configuration
-#
-
-menu "PCMCIA character devices"
- depends on PCMCIA!=n
-
-config SYNCLINK_CS
- tristate "SyncLink PC Card support"
- depends on PCMCIA && TTY
- help
- Enable support for the SyncLink PC Card serial adapter, running
- asynchronous and HDLC communications up to 512Kbps. The port is
- selectable for RS-232, V.35, RS-449, RS-530, and X.21
-
- This driver may be built as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module will be called synclink_cs. If you want to do that, say M
- here.
-
-config CARDMAN_4000
- tristate "Omnikey Cardman 4000 support"
- depends on PCMCIA
- select BITREVERSE
- help
- Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
- reader.
-
- This kernel driver requires additional userspace support, either
- by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/),
- or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc).
-
-config CARDMAN_4040
- tristate "Omnikey CardMan 4040 support"
- depends on PCMCIA
- help
- Enable support for the Omnikey CardMan 4040 PCMCIA Smartcard
- reader.
-
- This card is basically a USB CCID device connected to a FIFO
- in I/O space. To use the kernel driver, you will need either the
- PC/SC ifdhandler provided from the Omnikey homepage
- (http://www.omnikey.com/), or a current development version of OpenCT
- (http://www.opensc-project.org/opensc).
-
-config SCR24X
- tristate "SCR24x Chip Card Interface support"
- depends on PCMCIA
- help
- Enable support for the SCR24x PCMCIA Chip Card Interface.
-
- To compile this driver as a module, choose M here.
- The module will be called scr24x_cs..
-
- If unsure say N.
-
-config IPWIRELESS
- tristate "IPWireless 3G UMTS PCMCIA card support"
- depends on PCMCIA && NETDEVICES && TTY
- select PPP
- help
- This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
- some countries (for example Czech Republic, T-Mobile ISP) this card
- is shipped for service called UMTS 4G.
-
-endmenu
-
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
deleted file mode 100644
index 024eed1c4ca5..000000000000
--- a/drivers/char/pcmcia/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# drivers/char/pcmcia/Makefile
-#
-# Makefile for the Linux PCMCIA char device drivers.
-#
-
-obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
-obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
-obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
-obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
deleted file mode 100644
index adaec8fd4b16..000000000000
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ /dev/null
@@ -1,1910 +0,0 @@
- /*
- * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000"
- *
- * cm4000_cs.c support.linux@omnikey.com
- *
- * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files
- * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files
- * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality
- * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty
- * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments
- *
- * current version: 2.4.0gm4
- *
- * (C) 2000,2001,2002,2003,2004 Omnikey AG
- *
- * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
- * - Adhere to Kernel process/coding-style.rst
- * - Port to 2.6.13 "new" style PCMCIA
- * - Check for copy_{from,to}_user return values
- * - Use nonseekable_open()
- * - add class interface for udev device creation
- *
- * All rights reserved. Licensed under dual BSD/GPL license.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/bitrev.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-
-#include <linux/cm4000_cs.h>
-
-/* #define ATR_CSUM */
-
-#define reader_to_dev(x) (&x->p_dev->dev)
-
-/* n (debug level) is ignored */
-/* additional debug output may be enabled by re-compiling with
- * CM4000_DEBUG set */
-/* #define CM4000_DEBUG */
-#define DEBUGP(n, rdr, x, args...) do { \
- dev_dbg(reader_to_dev(rdr), "%s:" x, \
- __func__ , ## args); \
- } while (0)
-
-static DEFINE_MUTEX(cmm_mutex);
-
-#define T_1SEC (HZ)
-#define T_10MSEC msecs_to_jiffies(10)
-#define T_20MSEC msecs_to_jiffies(20)
-#define T_40MSEC msecs_to_jiffies(40)
-#define T_50MSEC msecs_to_jiffies(50)
-#define T_100MSEC msecs_to_jiffies(100)
-#define T_500MSEC msecs_to_jiffies(500)
-
-static void cm4000_release(struct pcmcia_device *link);
-
-static int major; /* major number we get from the kernel */
-
-/* note: the first state has to have number 0 always */
-
-#define M_FETCH_ATR 0
-#define M_TIMEOUT_WAIT 1
-#define M_READ_ATR_LEN 2
-#define M_READ_ATR 3
-#define M_ATR_PRESENT 4
-#define M_BAD_CARD 5
-#define M_CARDOFF 6
-
-#define LOCK_IO 0
-#define LOCK_MONITOR 1
-
-#define IS_AUTOPPS_ACT 6
-#define IS_PROCBYTE_PRESENT 7
-#define IS_INVREV 8
-#define IS_ANY_T0 9
-#define IS_ANY_T1 10
-#define IS_ATR_PRESENT 11
-#define IS_ATR_VALID 12
-#define IS_CMM_ABSENT 13
-#define IS_BAD_LENGTH 14
-#define IS_BAD_CSUM 15
-#define IS_BAD_CARD 16
-
-#define REG_FLAGS0(x) (x + 0)
-#define REG_FLAGS1(x) (x + 1)
-#define REG_NUM_BYTES(x) (x + 2)
-#define REG_BUF_ADDR(x) (x + 3)
-#define REG_BUF_DATA(x) (x + 4)
-#define REG_NUM_SEND(x) (x + 5)
-#define REG_BAUDRATE(x) (x + 6)
-#define REG_STOPBITS(x) (x + 7)
-
-struct cm4000_dev {
- struct pcmcia_device *p_dev;
-
- unsigned char atr[MAX_ATR];
- unsigned char rbuf[512];
- unsigned char sbuf[512];
-
- wait_queue_head_t devq; /* when removing cardman must not be
- zeroed! */
-
- wait_queue_head_t ioq; /* if IO is locked, wait on this Q */
- wait_queue_head_t atrq; /* wait for ATR valid */
- wait_queue_head_t readq; /* used by write to wake blk.read */
-
- /* warning: do not move this struct group.
- * initialising to zero depends on it - see ZERO_DEV below. */
- struct_group(init,
- unsigned char atr_csum;
- unsigned char atr_len_retry;
- unsigned short atr_len;
- unsigned short rlen; /* bytes avail. after write */
- unsigned short rpos; /* latest read pos. write zeroes */
- unsigned char procbyte; /* T=0 procedure byte */
- unsigned char mstate; /* state of card monitor */
- unsigned char cwarn; /* slow down warning */
- unsigned char flags0; /* cardman IO-flags 0 */
- unsigned char flags1; /* cardman IO-flags 1 */
- unsigned int mdelay; /* variable monitor speeds, in jiffies */
-
- unsigned int baudv; /* baud value for speed */
- unsigned char ta1;
- unsigned char proto; /* T=0, T=1, ... */
- unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent
- access */
-
- unsigned char pts[4];
-
- struct timer_list timer; /* used to keep monitor running */
- int monitor_running;
- );
-};
-
-#define ZERO_DEV(dev) memset(&((dev)->init), 0, sizeof((dev)->init))
-
-static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
-static struct class *cmm_class;
-
-/* This table doesn't use spaces after the comma between fields and thus
- * violates process/coding-style.rst. However, I don't really think wrapping it around will
- * make it any clearer to read -HW */
-static unsigned char fi_di_table[10][14] = {
-/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */
-/*DI */
-/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
-/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11},
-/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11},
-/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3},
-/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4},
-/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5},
-/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6},
-/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
-/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8},
-/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
-};
-
-#ifndef CM4000_DEBUG
-#define xoutb outb
-#define xinb inb
-#else
-static inline void xoutb(unsigned char val, unsigned short port)
-{
- pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
- outb(val, port);
-}
-static inline unsigned char xinb(unsigned short port)
-{
- unsigned char val;
-
- val = inb(port);
- pr_debug("%.2x=inb(%.4x)\n", val, port);
-
- return val;
-}
-#endif
-
-static inline unsigned char invert_revert(unsigned char ch)
-{
- return bitrev8(~ch);
-}
-
-static void str_invert_revert(unsigned char *b, int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- b[i] = invert_revert(b[i]);
-}
-
-#define ATRLENCK(dev,pos) \
- if (pos>=dev->atr_len || pos>=MAX_ATR) \
- goto return_0;
-
-static unsigned int calc_baudv(unsigned char fidi)
-{
- unsigned int wcrcf, wbrcf, fi_rfu, di_rfu;
-
- fi_rfu = 372;
- di_rfu = 1;
-
- /* FI */
- switch ((fidi >> 4) & 0x0F) {
- case 0x00:
- wcrcf = 372;
- break;
- case 0x01:
- wcrcf = 372;
- break;
- case 0x02:
- wcrcf = 558;
- break;
- case 0x03:
- wcrcf = 744;
- break;
- case 0x04:
- wcrcf = 1116;
- break;
- case 0x05:
- wcrcf = 1488;
- break;
- case 0x06:
- wcrcf = 1860;
- break;
- case 0x07:
- wcrcf = fi_rfu;
- break;
- case 0x08:
- wcrcf = fi_rfu;
- break;
- case 0x09:
- wcrcf = 512;
- break;
- case 0x0A:
- wcrcf = 768;
- break;
- case 0x0B:
- wcrcf = 1024;
- break;
- case 0x0C:
- wcrcf = 1536;
- break;
- case 0x0D:
- wcrcf = 2048;
- break;
- default:
- wcrcf = fi_rfu;
- break;
- }
-
- /* DI */
- switch (fidi & 0x0F) {
- case 0x00:
- wbrcf = di_rfu;
- break;
- case 0x01:
- wbrcf = 1;
- break;
- case 0x02:
- wbrcf = 2;
- break;
- case 0x03:
- wbrcf = 4;
- break;
- case 0x04:
- wbrcf = 8;
- break;
- case 0x05:
- wbrcf = 16;
- break;
- case 0x06:
- wbrcf = 32;
- break;
- case 0x07:
- wbrcf = di_rfu;
- break;
- case 0x08:
- wbrcf = 12;
- break;
- case 0x09:
- wbrcf = 20;
- break;
- default:
- wbrcf = di_rfu;
- break;
- }
-
- return (wcrcf / wbrcf);
-}
-
-static unsigned short io_read_num_rec_bytes(unsigned int iobase,
- unsigned short *s)
-{
- unsigned short tmp;
-
- tmp = *s = 0;
- do {
- *s = tmp;
- tmp = inb(REG_NUM_BYTES(iobase)) |
- (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0);
- } while (tmp != *s);
-
- return *s;
-}
-
-static int parse_atr(struct cm4000_dev *dev)
-{
- unsigned char any_t1, any_t0;
- unsigned char ch, ifno;
- int ix, done;
-
- DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len);
-
- if (dev->atr_len < 3) {
- DEBUGP(5, dev, "parse_atr: atr_len < 3\n");
- return 0;
- }
-
- if (dev->atr[0] == 0x3f)
- set_bit(IS_INVREV, &dev->flags);
- else
- clear_bit(IS_INVREV, &dev->flags);
- ix = 1;
- ifno = 1;
- ch = dev->atr[1];
- dev->proto = 0; /* XXX PROTO */
- any_t1 = any_t0 = done = 0;
- dev->ta1 = 0x11; /* defaults to 9600 baud */
- do {
- if (ifno == 1 && (ch & 0x10)) {
- /* read first interface byte and TA1 is present */
- dev->ta1 = dev->atr[2];
- DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1);
- ifno++;
- } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */
- dev->ta1 = 0x11;
- ifno++;
- }
-
- DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0);
- ix += ((ch & 0x10) >> 4) /* no of int.face chars */
- +((ch & 0x20) >> 5)
- + ((ch & 0x40) >> 6)
- + ((ch & 0x80) >> 7);
- /* ATRLENCK(dev,ix); */
- if (ch & 0x80) { /* TDi */
- ch = dev->atr[ix];
- if ((ch & 0x0f)) {
- any_t1 = 1;
- DEBUGP(5, dev, "card is capable of T=1\n");
- } else {
- any_t0 = 1;
- DEBUGP(5, dev, "card is capable of T=0\n");
- }
- } else
- done = 1;
- } while (!done);
-
- DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n",
- ix, dev->atr[1] & 15, any_t1);
- if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) {
- DEBUGP(5, dev, "length error\n");
- return 0;
- }
- if (any_t0)
- set_bit(IS_ANY_T0, &dev->flags);
-
- if (any_t1) { /* compute csum */
- dev->atr_csum = 0;
-#ifdef ATR_CSUM
- for (i = 1; i < dev->atr_len; i++)
- dev->atr_csum ^= dev->atr[i];
- if (dev->atr_csum) {
- set_bit(IS_BAD_CSUM, &dev->flags);
- DEBUGP(5, dev, "bad checksum\n");
- goto return_0;
- }
-#endif
- if (any_t0 == 0)
- dev->proto = 1; /* XXX PROTO */
- set_bit(IS_ANY_T1, &dev->flags);
- }
-
- return 1;
-}
-
-struct card_fixup {
- char atr[12];
- u_int8_t atr_len;
- u_int8_t stopbits;
-};
-
-static struct card_fixup card_fixups[] = {
- { /* ACOS */
- .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 },
- .atr_len = 7,
- .stopbits = 0x03,
- },
- { /* Motorola */
- .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07,
- 0x41, 0x81, 0x81 },
- .atr_len = 11,
- .stopbits = 0x04,
- },
-};
-
-static void set_cardparameter(struct cm4000_dev *dev)
-{
- int i;
- unsigned int iobase = dev->p_dev->resource[0]->start;
- u_int8_t stopbits = 0x02; /* ISO default */
-
- DEBUGP(3, dev, "-> set_cardparameter\n");
-
- dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8);
- xoutb(dev->flags1, REG_FLAGS1(iobase));
- DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1);
-
- /* set baudrate */
- xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase));
-
- DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv,
- ((dev->baudv - 1) & 0xFF));
-
- /* set stopbits */
- for (i = 0; i < ARRAY_SIZE(card_fixups); i++) {
- if (!memcmp(dev->atr, card_fixups[i].atr,
- card_fixups[i].atr_len))
- stopbits = card_fixups[i].stopbits;
- }
- xoutb(stopbits, REG_STOPBITS(iobase));
-
- DEBUGP(3, dev, "<- set_cardparameter\n");
-}
-
-static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
-{
-
- unsigned long tmp, i;
- unsigned short num_bytes_read;
- unsigned char pts_reply[4];
- ssize_t rc;
- unsigned int iobase = dev->p_dev->resource[0]->start;
-
- rc = 0;
-
- DEBUGP(3, dev, "-> set_protocol\n");
- DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, "
- "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, "
- "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol,
- (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2,
- ptsreq->pts3);
-
- /* Fill PTS structure */
- dev->pts[0] = 0xff;
- dev->pts[1] = 0x00;
- tmp = ptsreq->protocol;
- while ((tmp = (tmp >> 1)) > 0)
- dev->pts[1]++;
- dev->proto = dev->pts[1]; /* Set new protocol */
- dev->pts[1] = (0x01 << 4) | (dev->pts[1]);
-
- /* Correct Fi/Di according to CM4000 Fi/Di table */
- DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1);
- /* set Fi/Di according to ATR TA(1) */
- dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F];
-
- /* Calculate PCK character */
- dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2];
-
- DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n",
- dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]);
-
- /* check card convention */
- if (test_bit(IS_INVREV, &dev->flags))
- str_invert_revert(dev->pts, 4);
-
- /* reset SM */
- xoutb(0x80, REG_FLAGS0(iobase));
-
- /* Enable access to the message buffer */
- DEBUGP(5, dev, "Enable access to the messages buffer\n");
- dev->flags1 = 0x20 /* T_Active */
- | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */
- | ((dev->baudv >> 8) & 0x01); /* MSB-baud */
- xoutb(dev->flags1, REG_FLAGS1(iobase));
-
- DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n",
- dev->flags1);
-
- /* write challenge to the buffer */
- DEBUGP(5, dev, "Write challenge to buffer: ");
- for (i = 0; i < 4; i++) {
- xoutb(i, REG_BUF_ADDR(iobase));
- xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */
-#ifdef CM4000_DEBUG
- pr_debug("0x%.2x ", dev->pts[i]);
- }
- pr_debug("\n");
-#else
- }
-#endif
-
- /* set number of bytes to write */
- DEBUGP(5, dev, "Set number of bytes to write\n");
- xoutb(0x04, REG_NUM_SEND(iobase));
-
- /* Trigger CARDMAN CONTROLLER */
- xoutb(0x50, REG_FLAGS0(iobase));
-
- /* Monitor progress */
- /* wait for xmit done */
- DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n");
-
- for (i = 0; i < 100; i++) {
- if (inb(REG_FLAGS0(iobase)) & 0x08) {
- DEBUGP(5, dev, "NumRecBytes is valid\n");
- break;
- }
- usleep_range(10000, 11000);
- }
- if (i == 100) {
- DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
- "valid\n");
- rc = -EIO;
- goto exit_setprotocol;
- }
-
- DEBUGP(5, dev, "Reading NumRecBytes\n");
- for (i = 0; i < 100; i++) {
- io_read_num_rec_bytes(iobase, &num_bytes_read);
- if (num_bytes_read >= 4) {
- DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
- if (num_bytes_read > 4) {
- rc = -EIO;
- goto exit_setprotocol;
- }
- break;
- }
- usleep_range(10000, 11000);
- }
-
- /* check whether it is a short PTS reply? */
- if (num_bytes_read == 3)
- i = 0;
-
- if (i == 100) {
- DEBUGP(5, dev, "Timeout reading num_bytes_read\n");
- rc = -EIO;
- goto exit_setprotocol;
- }
-
- DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n");
- xoutb(0x80, REG_FLAGS0(iobase));
-
- /* Read PPS reply */
- DEBUGP(5, dev, "Read PPS reply\n");
- for (i = 0; i < num_bytes_read; i++) {
- xoutb(i, REG_BUF_ADDR(iobase));
- pts_reply[i] = inb(REG_BUF_DATA(iobase));
- }
-
-#ifdef CM4000_DEBUG
- DEBUGP(2, dev, "PTSreply: ");
- for (i = 0; i < num_bytes_read; i++) {
- pr_debug("0x%.2x ", pts_reply[i]);
- }
- pr_debug("\n");
-#endif /* CM4000_DEBUG */
-
- DEBUGP(5, dev, "Clear Tactive in Flags1\n");
- xoutb(0x20, REG_FLAGS1(iobase));
-
- /* Compare ptsreq and ptsreply */
- if ((dev->pts[0] == pts_reply[0]) &&
- (dev->pts[1] == pts_reply[1]) &&
- (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) {
- /* setcardparameter according to PPS */
- dev->baudv = calc_baudv(dev->pts[2]);
- set_cardparameter(dev);
- } else if ((dev->pts[0] == pts_reply[0]) &&
- ((dev->pts[1] & 0xef) == pts_reply[1]) &&
- ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) {
- /* short PTS reply, set card parameter to default values */
- dev->baudv = calc_baudv(0x11);
- set_cardparameter(dev);
- } else
- rc = -EIO;
-
-exit_setprotocol:
- DEBUGP(3, dev, "<- set_protocol\n");
- return rc;
-}
-
-static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
-{
-
- /* note: statemachine is assumed to be reset */
- if (inb(REG_FLAGS0(iobase)) & 8) {
- clear_bit(IS_ATR_VALID, &dev->flags);
- set_bit(IS_CMM_ABSENT, &dev->flags);
- return 0; /* detect CMM = 1 -> failure */
- }
- /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */
- xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase));
- if ((inb(REG_FLAGS0(iobase)) & 8) == 0) {
- clear_bit(IS_ATR_VALID, &dev->flags);
- set_bit(IS_CMM_ABSENT, &dev->flags);
- return 0; /* detect CMM=0 -> failure */
- }
- /* clear detectCMM again by restoring original flags1 */
- xoutb(dev->flags1, REG_FLAGS1(iobase));
- return 1;
-}
-
-static void terminate_monitor(struct cm4000_dev *dev)
-{
-
- /* tell the monitor to stop and wait until
- * it terminates.
- */
- DEBUGP(3, dev, "-> terminate_monitor\n");
- wait_event_interruptible(dev->devq,
- test_and_set_bit(LOCK_MONITOR,
- (void *)&dev->flags));
-
- /* now, LOCK_MONITOR has been set.
- * allow a last cycle in the monitor.
- * the monitor will indicate that it has
- * finished by clearing this bit.
- */
- DEBUGP(5, dev, "Now allow last cycle of monitor!\n");
- while (test_bit(LOCK_MONITOR, (void *)&dev->flags))
- msleep(25);
-
- DEBUGP(5, dev, "Delete timer\n");
- del_timer_sync(&dev->timer);
-#ifdef CM4000_DEBUG
- dev->monitor_running = 0;
-#endif
-
- DEBUGP(3, dev, "<- terminate_monitor\n");
-}
-
-/*
- * monitor the card every 50msec. as a side-effect, retrieve the
- * atr once a card is inserted. another side-effect of retrieving the
- * atr is that the card will be powered on, so there is no need to
- * power on the card explicitly from the application: the driver
- * is already doing that for you.
- */
-
-static void monitor_card(struct timer_list *t)
-{
- struct cm4000_dev *dev = from_timer(dev, t, timer);
- unsigned int iobase = dev->p_dev->resource[0]->start;
- unsigned short s;
- struct ptsreq ptsreq;
- int i, atrc;
-
- DEBUGP(7, dev, "-> monitor_card\n");
-
- /* if someone has set the lock for us: we're done! */
- if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) {
- DEBUGP(4, dev, "About to stop monitor\n");
- /* no */
- dev->rlen =
- dev->rpos =
- dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
- dev->mstate = M_FETCH_ATR;
- clear_bit(LOCK_MONITOR, &dev->flags);
- /* close et al. are sleeping on devq, so wake it */
- wake_up_interruptible(&dev->devq);
- DEBUGP(2, dev, "<- monitor_card (we are done now)\n");
- return;
- }
-
- /* try to lock io: if it is already locked, just add another timer */
- if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) {
- DEBUGP(4, dev, "Couldn't get IO lock\n");
- goto return_with_timer;
- }
-
- /* is a card/a reader inserted at all ? */
- dev->flags0 = xinb(REG_FLAGS0(iobase));
- DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0);
- DEBUGP(7, dev, "smartcard present: %s\n",
- dev->flags0 & 1 ? "yes" : "no");
- DEBUGP(7, dev, "cardman present: %s\n",
- dev->flags0 == 0xff ? "no" : "yes");
-
- if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
- || dev->flags0 == 0xff) { /* no cardman inserted */
- /* no */
- dev->rlen =
- dev->rpos =
- dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
- dev->mstate = M_FETCH_ATR;
-
- dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */
-
- if (dev->flags0 == 0xff) {
- DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n");
- set_bit(IS_CMM_ABSENT, &dev->flags);
- } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
- DEBUGP(4, dev, "clear IS_CMM_ABSENT bit "
- "(card is removed)\n");
- clear_bit(IS_CMM_ABSENT, &dev->flags);
- }
-
- goto release_io;
- } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) {
- /* cardman and card present but cardman was absent before
- * (after suspend with inserted card) */
- DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n");
- clear_bit(IS_CMM_ABSENT, &dev->flags);
- }
-
- if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
- DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n");
- goto release_io;
- }
-
- switch (dev->mstate) {
- case M_CARDOFF: {
- unsigned char flags0;
-
- DEBUGP(4, dev, "M_CARDOFF\n");
- flags0 = inb(REG_FLAGS0(iobase));
- if (flags0 & 0x02) {
- /* wait until Flags0 indicate power is off */
- dev->mdelay = T_10MSEC;
- } else {
- /* Flags0 indicate power off and no card inserted now;
- * Reset CARDMAN CONTROLLER */
- xoutb(0x80, REG_FLAGS0(iobase));
-
- /* prepare for fetching ATR again: after card off ATR
- * is read again automatically */
- dev->rlen =
- dev->rpos =
- dev->atr_csum =
- dev->atr_len_retry = dev->cwarn = 0;
- dev->mstate = M_FETCH_ATR;
-
- /* minimal gap between CARDOFF and read ATR is 50msec */
- dev->mdelay = T_50MSEC;
- }
- break;
- }
- case M_FETCH_ATR:
- DEBUGP(4, dev, "M_FETCH_ATR\n");
- xoutb(0x80, REG_FLAGS0(iobase));
- DEBUGP(4, dev, "Reset BAUDV to 9600\n");
- dev->baudv = 0x173; /* 9600 */
- xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */
- xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */
- xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud
- value */
- /* warm start vs. power on: */
- xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase));
- dev->mdelay = T_40MSEC;
- dev->mstate = M_TIMEOUT_WAIT;
- break;
- case M_TIMEOUT_WAIT:
- DEBUGP(4, dev, "M_TIMEOUT_WAIT\n");
- /* numRecBytes */
- io_read_num_rec_bytes(iobase, &dev->atr_len);
- dev->mdelay = T_10MSEC;
- dev->mstate = M_READ_ATR_LEN;
- break;
- case M_READ_ATR_LEN:
- DEBUGP(4, dev, "M_READ_ATR_LEN\n");
- /* infinite loop possible, since there is no timeout */
-
-#define MAX_ATR_LEN_RETRY 100
-
- if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) {
- if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */
- dev->mdelay = T_10MSEC;
- dev->mstate = M_READ_ATR;
- }
- } else {
- dev->atr_len = s;
- dev->atr_len_retry = 0; /* set new timeout */
- }
-
- DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len);
- break;
- case M_READ_ATR:
- DEBUGP(4, dev, "M_READ_ATR\n");
- xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
- for (i = 0; i < dev->atr_len; i++) {
- xoutb(i, REG_BUF_ADDR(iobase));
- dev->atr[i] = inb(REG_BUF_DATA(iobase));
- }
- /* Deactivate T_Active flags */
- DEBUGP(4, dev, "Deactivate T_Active flags\n");
- dev->flags1 = 0x01;
- xoutb(dev->flags1, REG_FLAGS1(iobase));
-
- /* atr is present (which doesn't mean it's valid) */
- set_bit(IS_ATR_PRESENT, &dev->flags);
- if (dev->atr[0] == 0x03)
- str_invert_revert(dev->atr, dev->atr_len);
- atrc = parse_atr(dev);
- if (atrc == 0) { /* atr invalid */
- dev->mdelay = 0;
- dev->mstate = M_BAD_CARD;
- } else {
- dev->mdelay = T_50MSEC;
- dev->mstate = M_ATR_PRESENT;
- set_bit(IS_ATR_VALID, &dev->flags);
- }
-
- if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
- DEBUGP(4, dev, "monitor_card: ATR valid\n");
- /* if ta1 == 0x11, no PPS necessary (default values) */
- /* do not do PPS with multi protocol cards */
- if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) &&
- (dev->ta1 != 0x11) &&
- !(test_bit(IS_ANY_T0, &dev->flags) &&
- test_bit(IS_ANY_T1, &dev->flags))) {
- DEBUGP(4, dev, "Perform AUTOPPS\n");
- set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = (0x01 << dev->proto);
- ptsreq.flags = 0x01;
- ptsreq.pts1 = 0x00;
- ptsreq.pts2 = 0x00;
- ptsreq.pts3 = 0x00;
- if (set_protocol(dev, &ptsreq) == 0) {
- DEBUGP(4, dev, "AUTOPPS ret SUCC\n");
- clear_bit(IS_AUTOPPS_ACT, &dev->flags);
- wake_up_interruptible(&dev->atrq);
- } else {
- DEBUGP(4, dev, "AUTOPPS failed: "
- "repower using defaults\n");
- /* prepare for repowering */
- clear_bit(IS_ATR_PRESENT, &dev->flags);
- clear_bit(IS_ATR_VALID, &dev->flags);
- dev->rlen =
- dev->rpos =
- dev->atr_csum =
- dev->atr_len_retry = dev->cwarn = 0;
- dev->mstate = M_FETCH_ATR;
-
- dev->mdelay = T_50MSEC;
- }
- } else {
- /* for cards which use slightly different
- * params (extra guard time) */
- set_cardparameter(dev);
- if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1)
- DEBUGP(4, dev, "AUTOPPS already active "
- "2nd try:use default values\n");
- if (dev->ta1 == 0x11)
- DEBUGP(4, dev, "No AUTOPPS necessary "
- "TA(1)==0x11\n");
- if (test_bit(IS_ANY_T0, &dev->flags)
- && test_bit(IS_ANY_T1, &dev->flags))
- DEBUGP(4, dev, "Do NOT perform AUTOPPS "
- "with multiprotocol cards\n");
- clear_bit(IS_AUTOPPS_ACT, &dev->flags);
- wake_up_interruptible(&dev->atrq);
- }
- } else {
- DEBUGP(4, dev, "ATR invalid\n");
- wake_up_interruptible(&dev->atrq);
- }
- break;
- case M_BAD_CARD:
- DEBUGP(4, dev, "M_BAD_CARD\n");
- /* slow down warning, but prompt immediately after insertion */
- if (dev->cwarn == 0 || dev->cwarn == 10) {
- set_bit(IS_BAD_CARD, &dev->flags);
- dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
- if (test_bit(IS_BAD_CSUM, &dev->flags)) {
- DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
- "be zero) failed\n", dev->atr_csum);
- }
-#ifdef CM4000_DEBUG
- else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
- DEBUGP(4, dev, "ATR length error\n");
- } else {
- DEBUGP(4, dev, "card damaged or wrong way "
- "inserted\n");
- }
-#endif
- dev->cwarn = 0;
- wake_up_interruptible(&dev->atrq); /* wake open */
- }
- dev->cwarn++;
- dev->mdelay = T_100MSEC;
- dev->mstate = M_FETCH_ATR;
- break;
- default:
- DEBUGP(7, dev, "Unknown action\n");
- break; /* nothing */
- }
-
-release_io:
- DEBUGP(7, dev, "release_io\n");
- clear_bit(LOCK_IO, &dev->flags);
- wake_up_interruptible(&dev->ioq); /* whoever needs IO */
-
-return_with_timer:
- DEBUGP(7, dev, "<- monitor_card (returns with timer)\n");
- mod_timer(&dev->timer, jiffies + dev->mdelay);
- clear_bit(LOCK_MONITOR, &dev->flags);
-}
-
-/* Interface to userland (file_operations) */
-
-static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
- loff_t *ppos)
-{
- struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->resource[0]->start;
- ssize_t rc;
- int i, j, k;
-
- DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid);
-
- if (count == 0) /* according to manpage */
- return 0;
-
- if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
- test_bit(IS_CMM_ABSENT, &dev->flags))
- return -ENODEV;
-
- if (test_bit(IS_BAD_CSUM, &dev->flags))
- return -EIO;
-
- /* also see the note about this in cmm_write */
- if (wait_event_interruptible
- (dev->atrq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- return -ERESTARTSYS;
- }
-
- if (test_bit(IS_ATR_VALID, &dev->flags) == 0)
- return -EIO;
-
- /* this one implements blocking IO */
- if (wait_event_interruptible
- (dev->readq,
- ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- return -ERESTARTSYS;
- }
-
- /* lock io */
- if (wait_event_interruptible
- (dev->ioq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- return -ERESTARTSYS;
- }
-
- rc = 0;
- dev->flags0 = inb(REG_FLAGS0(iobase));
- if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
- || dev->flags0 == 0xff) { /* no cardman inserted */
- clear_bit(IS_ATR_VALID, &dev->flags);
- if (dev->flags0 & 1) {
- set_bit(IS_CMM_ABSENT, &dev->flags);
- rc = -ENODEV;
- } else {
- rc = -EIO;
- }
- goto release_io;
- }
-
- DEBUGP(4, dev, "begin read answer\n");
- j = min(count, (size_t)(dev->rlen - dev->rpos));
- k = dev->rpos;
- if (k + j > 255)
- j = 256 - k;
- DEBUGP(4, dev, "read1 j=%d\n", j);
- for (i = 0; i < j; i++) {
- xoutb(k++, REG_BUF_ADDR(iobase));
- dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
- }
- j = min(count, (size_t)(dev->rlen - dev->rpos));
- if (k + j > 255) {
- DEBUGP(4, dev, "read2 j=%d\n", j);
- dev->flags1 |= 0x10; /* MSB buf addr set */
- xoutb(dev->flags1, REG_FLAGS1(iobase));
- for (; i < j; i++) {
- xoutb(k++, REG_BUF_ADDR(iobase));
- dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
- }
- }
-
- if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) {
- DEBUGP(4, dev, "T=0 and count > buffer\n");
- dev->rbuf[i] = dev->rbuf[i - 1];
- dev->rbuf[i - 1] = dev->procbyte;
- j++;
- }
- count = j;
-
- dev->rpos = dev->rlen + 1;
-
- /* Clear T1Active */
- DEBUGP(4, dev, "Clear T1Active\n");
- dev->flags1 &= 0xdf;
- xoutb(dev->flags1, REG_FLAGS1(iobase));
-
- xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
- /* last check before exit */
- if (!io_detect_cm4000(iobase, dev)) {
- rc = -ENODEV;
- goto release_io;
- }
-
- if (test_bit(IS_INVREV, &dev->flags) && count > 0)
- str_invert_revert(dev->rbuf, count);
-
- if (copy_to_user(buf, dev->rbuf, count))
- rc = -EFAULT;
-
-release_io:
- clear_bit(LOCK_IO, &dev->flags);
- wake_up_interruptible(&dev->ioq);
-
- DEBUGP(2, dev, "<- cmm_read returns: rc = %zi\n",
- (rc < 0 ? rc : count));
- return rc < 0 ? rc : count;
-}
-
-static ssize_t cmm_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->resource[0]->start;
- unsigned short s;
- unsigned char infolen;
- unsigned char sendT0;
- unsigned short nsend;
- unsigned short nr;
- ssize_t rc;
- int i;
-
- DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid);
-
- if (count == 0) /* according to manpage */
- return 0;
-
- if (dev->proto == 0 && count < 4) {
- /* T0 must have at least 4 bytes */
- DEBUGP(4, dev, "T0 short write\n");
- return -EIO;
- }
-
- nr = count & 0x1ff; /* max bytes to write */
-
- sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
-
- if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
- test_bit(IS_CMM_ABSENT, &dev->flags))
- return -ENODEV;
-
- if (test_bit(IS_BAD_CSUM, &dev->flags)) {
- DEBUGP(4, dev, "bad csum\n");
- return -EIO;
- }
-
- /*
- * wait for atr to become valid.
- * note: it is important to lock this code. if we dont, the monitor
- * could be run between test_bit and the call to sleep on the
- * atr-queue. if *then* the monitor detects atr valid, it will wake up
- * any process on the atr-queue, *but* since we have been interrupted,
- * we do not yet sleep on this queue. this would result in a missed
- * wake_up and the calling process would sleep forever (until
- * interrupted). also, do *not* restore_flags before sleep_on, because
- * this could result in the same situation!
- */
- if (wait_event_interruptible
- (dev->atrq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- return -ERESTARTSYS;
- }
-
- if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */
- DEBUGP(4, dev, "invalid ATR\n");
- return -EIO;
- }
-
- /* lock io */
- if (wait_event_interruptible
- (dev->ioq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- return -ERESTARTSYS;
- }
-
- if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count)))
- return -EFAULT;
-
- rc = 0;
- dev->flags0 = inb(REG_FLAGS0(iobase));
- if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
- || dev->flags0 == 0xff) { /* no cardman inserted */
- clear_bit(IS_ATR_VALID, &dev->flags);
- if (dev->flags0 & 1) {
- set_bit(IS_CMM_ABSENT, &dev->flags);
- rc = -ENODEV;
- } else {
- DEBUGP(4, dev, "IO error\n");
- rc = -EIO;
- }
- goto release_io;
- }
-
- xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
-
- if (!io_detect_cm4000(iobase, dev)) {
- rc = -ENODEV;
- goto release_io;
- }
-
- /* reflect T=0 send/read mode in flags1 */
- dev->flags1 |= (sendT0);
-
- set_cardparameter(dev);
-
- /* dummy read, reset flag procedure received */
- inb(REG_FLAGS1(iobase));
-
- dev->flags1 = 0x20 /* T_Active */
- | (sendT0)
- | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */
- | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */
- DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1);
- xoutb(dev->flags1, REG_FLAGS1(iobase));
-
- /* xmit data */
- DEBUGP(4, dev, "Xmit data\n");
- for (i = 0; i < nr; i++) {
- if (i >= 256) {
- dev->flags1 = 0x20 /* T_Active */
- | (sendT0) /* SendT0 */
- /* inverse parity: */
- | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)
- | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */
- | 0x10; /* set address high */
- DEBUGP(4, dev, "dev->flags = 0x%.2x - set address "
- "high\n", dev->flags1);
- xoutb(dev->flags1, REG_FLAGS1(iobase));
- }
- if (test_bit(IS_INVREV, &dev->flags)) {
- DEBUGP(4, dev, "Apply inverse convention for 0x%.2x "
- "-> 0x%.2x\n", (unsigned char)dev->sbuf[i],
- invert_revert(dev->sbuf[i]));
- xoutb(i, REG_BUF_ADDR(iobase));
- xoutb(invert_revert(dev->sbuf[i]),
- REG_BUF_DATA(iobase));
- } else {
- xoutb(i, REG_BUF_ADDR(iobase));
- xoutb(dev->sbuf[i], REG_BUF_DATA(iobase));
- }
- }
- DEBUGP(4, dev, "Xmit done\n");
-
- if (dev->proto == 0) {
- /* T=0 proto: 0 byte reply */
- if (nr == 4) {
- DEBUGP(4, dev, "T=0 assumes 0 byte reply\n");
- xoutb(i, REG_BUF_ADDR(iobase));
- if (test_bit(IS_INVREV, &dev->flags))
- xoutb(0xff, REG_BUF_DATA(iobase));
- else
- xoutb(0x00, REG_BUF_DATA(iobase));
- }
-
- /* numSendBytes */
- if (sendT0)
- nsend = nr;
- else {
- if (nr == 4)
- nsend = 5;
- else {
- nsend = 5 + (unsigned char)dev->sbuf[4];
- if (dev->sbuf[4] == 0)
- nsend += 0x100;
- }
- }
- } else
- nsend = nr;
-
- /* T0: output procedure byte */
- if (test_bit(IS_INVREV, &dev->flags)) {
- DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) "
- "0x%.2x\n", invert_revert(dev->sbuf[1]));
- xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase));
- } else {
- DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]);
- xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase));
- }
-
- DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n",
- (unsigned char)(nsend & 0xff));
- xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase));
-
- DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n",
- 0x40 /* SM_Active */
- | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
- |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
- |(nsend & 0x100) >> 8 /* MSB numSendBytes */ );
- xoutb(0x40 /* SM_Active */
- | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
- |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
- |(nsend & 0x100) >> 8, /* MSB numSendBytes */
- REG_FLAGS0(iobase));
-
- /* wait for xmit done */
- if (dev->proto == 1) {
- DEBUGP(4, dev, "Wait for xmit done\n");
- for (i = 0; i < 1000; i++) {
- if (inb(REG_FLAGS0(iobase)) & 0x08)
- break;
- msleep_interruptible(10);
- }
- if (i == 1000) {
- DEBUGP(4, dev, "timeout waiting for xmit done\n");
- rc = -EIO;
- goto release_io;
- }
- }
-
- /* T=1: wait for infoLen */
-
- infolen = 0;
- if (dev->proto) {
- /* wait until infoLen is valid */
- for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */
- io_read_num_rec_bytes(iobase, &s);
- if (s >= 3) {
- infolen = inb(REG_FLAGS1(iobase));
- DEBUGP(4, dev, "infolen=%d\n", infolen);
- break;
- }
- msleep_interruptible(10);
- }
- if (i == 6000) {
- DEBUGP(4, dev, "timeout waiting for infoLen\n");
- rc = -EIO;
- goto release_io;
- }
- } else
- clear_bit(IS_PROCBYTE_PRESENT, &dev->flags);
-
- /* numRecBytes | bit9 of numRecytes */
- io_read_num_rec_bytes(iobase, &dev->rlen);
- for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */
- if (dev->proto) {
- if (dev->rlen >= infolen + 4)
- break;
- }
- msleep_interruptible(10);
- /* numRecBytes | bit9 of numRecytes */
- io_read_num_rec_bytes(iobase, &s);
- if (s > dev->rlen) {
- DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n");
- i = 0; /* reset timeout */
- dev->rlen = s;
- }
- /* T=0: we are done when numRecBytes doesn't
- * increment any more and NoProcedureByte
- * is set and numRecBytes == bytes sent + 6
- * (header bytes + data + 1 for sw2)
- * except when the card replies an error
- * which means, no data will be sent back.
- */
- else if (dev->proto == 0) {
- if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) {
- /* no procedure byte received since last read */
- DEBUGP(1, dev, "NoProcedure byte set\n");
- /* i=0; */
- } else {
- /* procedure byte received since last read */
- DEBUGP(1, dev, "NoProcedure byte unset "
- "(reset timeout)\n");
- dev->procbyte = inb(REG_FLAGS1(iobase));
- DEBUGP(1, dev, "Read procedure byte 0x%.2x\n",
- dev->procbyte);
- i = 0; /* resettimeout */
- }
- if (inb(REG_FLAGS0(iobase)) & 0x08) {
- DEBUGP(1, dev, "T0Done flag (read reply)\n");
- break;
- }
- }
- if (dev->proto)
- infolen = inb(REG_FLAGS1(iobase));
- }
- if (i == 600) {
- DEBUGP(1, dev, "timeout waiting for numRecBytes\n");
- rc = -EIO;
- goto release_io;
- } else {
- if (dev->proto == 0) {
- DEBUGP(1, dev, "Wait for T0Done bit to be set\n");
- for (i = 0; i < 1000; i++) {
- if (inb(REG_FLAGS0(iobase)) & 0x08)
- break;
- msleep_interruptible(10);
- }
- if (i == 1000) {
- DEBUGP(1, dev, "timeout waiting for T0Done\n");
- rc = -EIO;
- goto release_io;
- }
-
- dev->procbyte = inb(REG_FLAGS1(iobase));
- DEBUGP(4, dev, "Read procedure byte 0x%.2x\n",
- dev->procbyte);
-
- io_read_num_rec_bytes(iobase, &dev->rlen);
- DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen);
-
- }
- }
- /* T=1: read offset=zero, T=0: read offset=after challenge */
- dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr;
- DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n",
- dev->rlen, dev->rpos, nr);
-
-release_io:
- DEBUGP(4, dev, "Reset SM\n");
- xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
-
- if (rc < 0) {
- DEBUGP(4, dev, "Write failed but clear T_Active\n");
- dev->flags1 &= 0xdf;
- xoutb(dev->flags1, REG_FLAGS1(iobase));
- }
-
- clear_bit(LOCK_IO, &dev->flags);
- wake_up_interruptible(&dev->ioq);
- wake_up_interruptible(&dev->readq); /* tell read we have data */
-
- /* ITSEC E2: clear write buffer */
- memset((char *)dev->sbuf, 0, 512);
-
- /* return error or actually written bytes */
- DEBUGP(2, dev, "<- cmm_write\n");
- return rc < 0 ? rc : nr;
-}
-
-static void start_monitor(struct cm4000_dev *dev)
-{
- DEBUGP(3, dev, "-> start_monitor\n");
- if (!dev->monitor_running) {
- DEBUGP(5, dev, "create, init and add timer\n");
- timer_setup(&dev->timer, monitor_card, 0);
- dev->monitor_running = 1;
- mod_timer(&dev->timer, jiffies);
- } else
- DEBUGP(5, dev, "monitor already running\n");
- DEBUGP(3, dev, "<- start_monitor\n");
-}
-
-static void stop_monitor(struct cm4000_dev *dev)
-{
- DEBUGP(3, dev, "-> stop_monitor\n");
- if (dev->monitor_running) {
- DEBUGP(5, dev, "stopping monitor\n");
- terminate_monitor(dev);
- /* reset monitor SM */
- clear_bit(IS_ATR_VALID, &dev->flags);
- clear_bit(IS_ATR_PRESENT, &dev->flags);
- } else
- DEBUGP(5, dev, "monitor already stopped\n");
- DEBUGP(3, dev, "<- stop_monitor\n");
-}
-
-static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->resource[0]->start;
- struct inode *inode = file_inode(filp);
- struct pcmcia_device *link;
- int rc;
- void __user *argp = (void __user *)arg;
-#ifdef CM4000_DEBUG
- char *ioctl_names[CM_IOC_MAXNR + 1] = {
- [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
- [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
- [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF",
- [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
- [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
- };
- DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
- iminor(inode), ioctl_names[_IOC_NR(cmd)]);
-#endif
-
- mutex_lock(&cmm_mutex);
- rc = -ENODEV;
- link = dev_table[iminor(inode)];
- if (!pcmcia_dev_present(link)) {
- DEBUGP(4, dev, "DEV_OK false\n");
- goto out;
- }
-
- if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
- DEBUGP(4, dev, "CMM_ABSENT flag set\n");
- goto out;
- }
- rc = -EINVAL;
-
- if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
- DEBUGP(4, dev, "ioctype mismatch\n");
- goto out;
- }
- if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
- DEBUGP(4, dev, "iocnr mismatch\n");
- goto out;
- }
- rc = 0;
-
- switch (cmd) {
- case CM_IOCGSTATUS:
- DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n");
- {
- int status;
-
- /* clear other bits, but leave inserted & powered as
- * they are */
- status = dev->flags0 & 3;
- if (test_bit(IS_ATR_PRESENT, &dev->flags))
- status |= CM_ATR_PRESENT;
- if (test_bit(IS_ATR_VALID, &dev->flags))
- status |= CM_ATR_VALID;
- if (test_bit(IS_CMM_ABSENT, &dev->flags))
- status |= CM_NO_READER;
- if (test_bit(IS_BAD_CARD, &dev->flags))
- status |= CM_BAD_CARD;
- if (copy_to_user(argp, &status, sizeof(int)))
- rc = -EFAULT;
- }
- break;
- case CM_IOCGATR:
- DEBUGP(4, dev, "... in CM_IOCGATR\n");
- {
- struct atreq __user *atreq = argp;
- int tmp;
- /* allow nonblocking io and being interrupted */
- if (wait_event_interruptible
- (dev->atrq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
- != 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- rc = -EAGAIN;
- else
- rc = -ERESTARTSYS;
- break;
- }
-
- rc = -EFAULT;
- if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
- tmp = -1;
- if (copy_to_user(&(atreq->atr_len), &tmp,
- sizeof(int)))
- break;
- } else {
- if (copy_to_user(atreq->atr, dev->atr,
- dev->atr_len))
- break;
-
- tmp = dev->atr_len;
- if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
- break;
- }
- rc = 0;
- break;
- }
- case CM_IOCARDOFF:
-
-#ifdef CM4000_DEBUG
- DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
- if (dev->flags0 & 0x01) {
- DEBUGP(4, dev, " Card inserted\n");
- } else {
- DEBUGP(2, dev, " No card inserted\n");
- }
- if (dev->flags0 & 0x02) {
- DEBUGP(4, dev, " Card powered\n");
- } else {
- DEBUGP(2, dev, " Card not powered\n");
- }
-#endif
-
- /* is a card inserted and powered? */
- if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) {
-
- /* get IO lock */
- if (wait_event_interruptible
- (dev->ioq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
- == 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- rc = -EAGAIN;
- else
- rc = -ERESTARTSYS;
- break;
- }
- /* Set Flags0 = 0x42 */
- DEBUGP(4, dev, "Set Flags0=0x42 \n");
- xoutb(0x42, REG_FLAGS0(iobase));
- clear_bit(IS_ATR_PRESENT, &dev->flags);
- clear_bit(IS_ATR_VALID, &dev->flags);
- dev->mstate = M_CARDOFF;
- clear_bit(LOCK_IO, &dev->flags);
- if (wait_event_interruptible
- (dev->atrq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
- 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- rc = -EAGAIN;
- else
- rc = -ERESTARTSYS;
- break;
- }
- }
- /* release lock */
- clear_bit(LOCK_IO, &dev->flags);
- wake_up_interruptible(&dev->ioq);
-
- rc = 0;
- break;
- case CM_IOCSPTS:
- {
- struct ptsreq krnptsreq;
-
- if (copy_from_user(&krnptsreq, argp,
- sizeof(struct ptsreq))) {
- rc = -EFAULT;
- break;
- }
-
- rc = 0;
- DEBUGP(4, dev, "... in CM_IOCSPTS\n");
- /* wait for ATR to get valid */
- if (wait_event_interruptible
- (dev->atrq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
- != 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- rc = -EAGAIN;
- else
- rc = -ERESTARTSYS;
- break;
- }
- /* get IO lock */
- if (wait_event_interruptible
- (dev->ioq,
- ((filp->f_flags & O_NONBLOCK)
- || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
- == 0)))) {
- if (filp->f_flags & O_NONBLOCK)
- rc = -EAGAIN;
- else
- rc = -ERESTARTSYS;
- break;
- }
-
- if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
- /* auto power_on again */
- dev->mstate = M_FETCH_ATR;
- clear_bit(IS_ATR_VALID, &dev->flags);
- }
- /* release lock */
- clear_bit(LOCK_IO, &dev->flags);
- wake_up_interruptible(&dev->ioq);
-
- }
- break;
-#ifdef CM4000_DEBUG
- case CM_IOSDBGLVL:
- rc = -ENOTTY;
- break;
-#endif
- default:
- DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
- rc = -ENOTTY;
- }
-out:
- mutex_unlock(&cmm_mutex);
- return rc;
-}
-
-static int cmm_open(struct inode *inode, struct file *filp)
-{
- struct cm4000_dev *dev;
- struct pcmcia_device *link;
- int minor = iminor(inode);
- int ret;
-
- if (minor >= CM4000_MAX_DEV)
- return -ENODEV;
-
- mutex_lock(&cmm_mutex);
- link = dev_table[minor];
- if (link == NULL || !pcmcia_dev_present(link)) {
- ret = -ENODEV;
- goto out;
- }
-
- if (link->open) {
- ret = -EBUSY;
- goto out;
- }
-
- dev = link->priv;
- filp->private_data = dev;
-
- DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n",
- imajor(inode), minor, current->comm, current->pid);
-
- /* init device variables, they may be "polluted" after close
- * or, the device may never have been closed (i.e. open failed)
- */
-
- ZERO_DEV(dev);
-
- /* opening will always block since the
- * monitor will be started by open, which
- * means we have to wait for ATR becoming
- * valid = block until valid (or card
- * inserted)
- */
- if (filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto out;
- }
-
- dev->mdelay = T_50MSEC;
-
- /* start monitoring the cardstatus */
- start_monitor(dev);
-
- link->open = 1; /* only one open per device */
-
- DEBUGP(2, dev, "<- cmm_open\n");
- ret = stream_open(inode, filp);
-out:
- mutex_unlock(&cmm_mutex);
- return ret;
-}
-
-static int cmm_close(struct inode *inode, struct file *filp)
-{
- struct cm4000_dev *dev;
- struct pcmcia_device *link;
- int minor = iminor(inode);
-
- if (minor >= CM4000_MAX_DEV)
- return -ENODEV;
-
- link = dev_table[minor];
- if (link == NULL)
- return -ENODEV;
-
- dev = link->priv;
-
- DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n",
- imajor(inode), minor);
-
- stop_monitor(dev);
-
- ZERO_DEV(dev);
-
- link->open = 0; /* only one open per device */
- wake_up(&dev->devq); /* socket removed? */
-
- DEBUGP(2, dev, "cmm_close\n");
- return 0;
-}
-
-static void cmm_cm4000_release(struct pcmcia_device * link)
-{
- struct cm4000_dev *dev = link->priv;
-
- /* dont terminate the monitor, rather rely on
- * close doing that for us.
- */
- DEBUGP(3, dev, "-> cmm_cm4000_release\n");
- while (link->open) {
- printk(KERN_INFO MODULE_NAME ": delaying release until "
- "process has terminated\n");
- /* note: don't interrupt us:
- * close the applications which own
- * the devices _first_ !
- */
- wait_event(dev->devq, (link->open == 0));
- }
- /* dev->devq=NULL; this cannot be zeroed earlier */
- DEBUGP(3, dev, "<- cmm_cm4000_release\n");
- return;
-}
-
-/*==== Interface to PCMCIA Layer =======================================*/
-
-static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- return pcmcia_request_io(p_dev);
-}
-
-static int cm4000_config(struct pcmcia_device * link, int devno)
-{
- link->config_flags |= CONF_AUTO_SET_IO;
-
- /* read the config-tuples */
- if (pcmcia_loop_config(link, cm4000_config_check, NULL))
- goto cs_release;
-
- if (pcmcia_enable_device(link))
- goto cs_release;
-
- return 0;
-
-cs_release:
- cm4000_release(link);
- return -ENODEV;
-}
-
-static int cm4000_suspend(struct pcmcia_device *link)
-{
- struct cm4000_dev *dev;
-
- dev = link->priv;
- stop_monitor(dev);
-
- return 0;
-}
-
-static int cm4000_resume(struct pcmcia_device *link)
-{
- struct cm4000_dev *dev;
-
- dev = link->priv;
- if (link->open)
- start_monitor(dev);
-
- return 0;
-}
-
-static void cm4000_release(struct pcmcia_device *link)
-{
- cmm_cm4000_release(link); /* delay release until device closed */
- pcmcia_disable_device(link);
-}
-
-static int cm4000_probe(struct pcmcia_device *link)
-{
- struct cm4000_dev *dev;
- int i, ret;
-
- for (i = 0; i < CM4000_MAX_DEV; i++)
- if (dev_table[i] == NULL)
- break;
-
- if (i == CM4000_MAX_DEV) {
- printk(KERN_NOTICE MODULE_NAME ": all devices in use\n");
- return -ENODEV;
- }
-
- /* create a new cm4000_cs device */
- dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL);
- if (dev == NULL)
- return -ENOMEM;
-
- dev->p_dev = link;
- link->priv = dev;
- dev_table[i] = link;
-
- init_waitqueue_head(&dev->devq);
- init_waitqueue_head(&dev->ioq);
- init_waitqueue_head(&dev->atrq);
- init_waitqueue_head(&dev->readq);
-
- ret = cm4000_config(link, i);
- if (ret) {
- dev_table[i] = NULL;
- kfree(dev);
- return ret;
- }
-
- device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i);
-
- return 0;
-}
-
-static void cm4000_detach(struct pcmcia_device *link)
-{
- struct cm4000_dev *dev = link->priv;
- int devno;
-
- /* find device */
- for (devno = 0; devno < CM4000_MAX_DEV; devno++)
- if (dev_table[devno] == link)
- break;
- if (devno == CM4000_MAX_DEV)
- return;
-
- stop_monitor(dev);
-
- cm4000_release(link);
-
- dev_table[devno] = NULL;
- kfree(dev);
-
- device_destroy(cmm_class, MKDEV(major, devno));
-
- return;
-}
-
-static const struct file_operations cm4000_fops = {
- .owner = THIS_MODULE,
- .read = cmm_read,
- .write = cmm_write,
- .unlocked_ioctl = cmm_ioctl,
- .open = cmm_open,
- .release= cmm_close,
- .llseek = no_llseek,
-};
-
-static const struct pcmcia_device_id cm4000_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002),
- PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, cm4000_ids);
-
-static struct pcmcia_driver cm4000_driver = {
- .owner = THIS_MODULE,
- .name = "cm4000_cs",
- .probe = cm4000_probe,
- .remove = cm4000_detach,
- .suspend = cm4000_suspend,
- .resume = cm4000_resume,
- .id_table = cm4000_ids,
-};
-
-static int __init cmm_init(void)
-{
- int rc;
-
- cmm_class = class_create(THIS_MODULE, "cardman_4000");
- if (IS_ERR(cmm_class))
- return PTR_ERR(cmm_class);
-
- major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
- if (major < 0) {
- printk(KERN_WARNING MODULE_NAME
- ": could not get major number\n");
- class_destroy(cmm_class);
- return major;
- }
-
- rc = pcmcia_register_driver(&cm4000_driver);
- if (rc < 0) {
- unregister_chrdev(major, DEVICE_NAME);
- class_destroy(cmm_class);
- return rc;
- }
-
- return 0;
-}
-
-static void __exit cmm_exit(void)
-{
- pcmcia_unregister_driver(&cm4000_driver);
- unregister_chrdev(major, DEVICE_NAME);
- class_destroy(cmm_class);
-};
-
-module_init(cmm_init);
-module_exit(cmm_exit);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
deleted file mode 100644
index 827711911da4..000000000000
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * A driver for the Omnikey PCMCIA smartcard reader CardMan 4040
- *
- * (c) 2000-2004 Omnikey AG (http://www.omnikey.com/)
- *
- * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
- * - add support for poll()
- * - driver cleanup
- * - add waitqueues
- * - adhere to linux kernel coding style and policies
- * - support 2.6.13 "new style" pcmcia interface
- * - add class interface for udev device creation
- *
- * The device basically is a USB CCID compliant device that has been
- * attached to an I/O-Mapped FIFO.
- *
- * All rights reserved, Dual BSD/GPL Licensed.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-
-#include "cm4040_cs.h"
-
-
-#define reader_to_dev(x) (&x->p_dev->dev)
-
-/* n (debug level) is ignored */
-/* additional debug output may be enabled by re-compiling with
- * CM4040_DEBUG set */
-/* #define CM4040_DEBUG */
-#define DEBUGP(n, rdr, x, args...) do { \
- dev_dbg(reader_to_dev(rdr), "%s:" x, \
- __func__ , ## args); \
- } while (0)
-
-static DEFINE_MUTEX(cm4040_mutex);
-
-#define CCID_DRIVER_BULK_DEFAULT_TIMEOUT (150*HZ)
-#define CCID_DRIVER_ASYNC_POWERUP_TIMEOUT (35*HZ)
-#define CCID_DRIVER_MINIMUM_TIMEOUT (3*HZ)
-#define READ_WRITE_BUFFER_SIZE 512
-#define POLL_LOOP_COUNT 1000
-
-/* how often to poll for fifo status change */
-#define POLL_PERIOD msecs_to_jiffies(10)
-
-static void reader_release(struct pcmcia_device *link);
-
-static int major;
-static struct class *cmx_class;
-
-#define BS_READABLE 0x01
-#define BS_WRITABLE 0x02
-
-struct reader_dev {
- struct pcmcia_device *p_dev;
- wait_queue_head_t devq;
- wait_queue_head_t poll_wait;
- wait_queue_head_t read_wait;
- wait_queue_head_t write_wait;
- unsigned long buffer_status;
- unsigned long timeout;
- unsigned char s_buf[READ_WRITE_BUFFER_SIZE];
- unsigned char r_buf[READ_WRITE_BUFFER_SIZE];
- struct timer_list poll_timer;
-};
-
-static struct pcmcia_device *dev_table[CM_MAX_DEV];
-
-#ifndef CM4040_DEBUG
-#define xoutb outb
-#define xinb inb
-#else
-static inline void xoutb(unsigned char val, unsigned short port)
-{
- pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
- outb(val, port);
-}
-
-static inline unsigned char xinb(unsigned short port)
-{
- unsigned char val;
-
- val = inb(port);
- pr_debug("%.2x=inb(%.4x)\n", val, port);
- return val;
-}
-#endif
-
-/* poll the device fifo status register. not to be confused with
- * the poll syscall. */
-static void cm4040_do_poll(struct timer_list *t)
-{
- struct reader_dev *dev = from_timer(dev, t, poll_timer);
- unsigned int obs = xinb(dev->p_dev->resource[0]->start
- + REG_OFFSET_BUFFER_STATUS);
-
- if ((obs & BSR_BULK_IN_FULL)) {
- set_bit(BS_READABLE, &dev->buffer_status);
- DEBUGP(4, dev, "waking up read_wait\n");
- wake_up_interruptible(&dev->read_wait);
- } else
- clear_bit(BS_READABLE, &dev->buffer_status);
-
- if (!(obs & BSR_BULK_OUT_FULL)) {
- set_bit(BS_WRITABLE, &dev->buffer_status);
- DEBUGP(4, dev, "waking up write_wait\n");
- wake_up_interruptible(&dev->write_wait);
- } else
- clear_bit(BS_WRITABLE, &dev->buffer_status);
-
- if (dev->buffer_status)
- wake_up_interruptible(&dev->poll_wait);
-
- mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
-}
-
-static void cm4040_stop_poll(struct reader_dev *dev)
-{
- del_timer_sync(&dev->poll_timer);
-}
-
-static int wait_for_bulk_out_ready(struct reader_dev *dev)
-{
- int i, rc;
- int iobase = dev->p_dev->resource[0]->start;
-
- for (i = 0; i < POLL_LOOP_COUNT; i++) {
- if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
- & BSR_BULK_OUT_FULL) == 0) {
- DEBUGP(4, dev, "BulkOut empty (i=%d)\n", i);
- return 1;
- }
- }
-
- DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
- dev->timeout);
- rc = wait_event_interruptible_timeout(dev->write_wait,
- test_and_clear_bit(BS_WRITABLE,
- &dev->buffer_status),
- dev->timeout);
-
- if (rc > 0)
- DEBUGP(4, dev, "woke up: BulkOut empty\n");
- else if (rc == 0)
- DEBUGP(4, dev, "woke up: BulkOut full, returning 0 :(\n");
- else if (rc < 0)
- DEBUGP(4, dev, "woke up: signal arrived\n");
-
- return rc;
-}
-
-/* Write to Sync Control Register */
-static int write_sync_reg(unsigned char val, struct reader_dev *dev)
-{
- int iobase = dev->p_dev->resource[0]->start;
- int rc;
-
- rc = wait_for_bulk_out_ready(dev);
- if (rc <= 0)
- return rc;
-
- xoutb(val, iobase + REG_OFFSET_SYNC_CONTROL);
- rc = wait_for_bulk_out_ready(dev);
- if (rc <= 0)
- return rc;
-
- return 1;
-}
-
-static int wait_for_bulk_in_ready(struct reader_dev *dev)
-{
- int i, rc;
- int iobase = dev->p_dev->resource[0]->start;
-
- for (i = 0; i < POLL_LOOP_COUNT; i++) {
- if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
- & BSR_BULK_IN_FULL) == BSR_BULK_IN_FULL) {
- DEBUGP(3, dev, "BulkIn full (i=%d)\n", i);
- return 1;
- }
- }
-
- DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
- dev->timeout);
- rc = wait_event_interruptible_timeout(dev->read_wait,
- test_and_clear_bit(BS_READABLE,
- &dev->buffer_status),
- dev->timeout);
- if (rc > 0)
- DEBUGP(4, dev, "woke up: BulkIn full\n");
- else if (rc == 0)
- DEBUGP(4, dev, "woke up: BulkIn not full, returning 0 :(\n");
- else if (rc < 0)
- DEBUGP(4, dev, "woke up: signal arrived\n");
-
- return rc;
-}
-
-static ssize_t cm4040_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct reader_dev *dev = filp->private_data;
- int iobase = dev->p_dev->resource[0]->start;
- size_t bytes_to_read;
- unsigned long i;
- size_t min_bytes_to_read;
- int rc;
-
- DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid);
-
- if (count == 0)
- return 0;
-
- if (count < 10)
- return -EFAULT;
-
- if (filp->f_flags & O_NONBLOCK) {
- DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
- DEBUGP(2, dev, "<- cm4040_read (failure)\n");
- return -EAGAIN;
- }
-
- if (!pcmcia_dev_present(dev->p_dev))
- return -ENODEV;
-
- for (i = 0; i < 5; i++) {
- rc = wait_for_bulk_in_ready(dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
- DEBUGP(2, dev, "<- cm4040_read (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- return -EIO;
- }
- dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
-#ifdef CM4040_DEBUG
- pr_debug("%lu:%2x ", i, dev->r_buf[i]);
- }
- pr_debug("\n");
-#else
- }
-#endif
-
- bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);
-
- DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);
-
- min_bytes_to_read = min(count, bytes_to_read + 5);
- min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
-
- DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);
-
- for (i = 0; i < (min_bytes_to_read-5); i++) {
- rc = wait_for_bulk_in_ready(dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
- DEBUGP(2, dev, "<- cm4040_read (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- return -EIO;
- }
- dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN);
-#ifdef CM4040_DEBUG
- pr_debug("%lu:%2x ", i, dev->r_buf[i]);
- }
- pr_debug("\n");
-#else
- }
-#endif
-
- *ppos = min_bytes_to_read;
- if (copy_to_user(buf, dev->r_buf, min_bytes_to_read))
- return -EFAULT;
-
- rc = wait_for_bulk_in_ready(dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
- DEBUGP(2, dev, "<- cm4040_read (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- return -EIO;
- }
-
- rc = write_sync_reg(SCR_READER_TO_HOST_DONE, dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2x\n", rc);
- DEBUGP(2, dev, "<- cm4040_read (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- else
- return -EIO;
- }
-
- xinb(iobase + REG_OFFSET_BULK_IN);
-
- DEBUGP(2, dev, "<- cm4040_read (successfully)\n");
- return min_bytes_to_read;
-}
-
-static ssize_t cm4040_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct reader_dev *dev = filp->private_data;
- int iobase = dev->p_dev->resource[0]->start;
- ssize_t rc;
- int i;
- unsigned int bytes_to_write;
-
- DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid);
-
- if (count == 0) {
- DEBUGP(2, dev, "<- cm4040_write empty read (successfully)\n");
- return 0;
- }
-
- if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
- DEBUGP(2, dev, "<- cm4040_write buffersize=%zd < 5\n", count);
- return -EIO;
- }
-
- if (filp->f_flags & O_NONBLOCK) {
- DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
- DEBUGP(4, dev, "<- cm4040_write (failure)\n");
- return -EAGAIN;
- }
-
- if (!pcmcia_dev_present(dev->p_dev))
- return -ENODEV;
-
- bytes_to_write = count;
- if (copy_from_user(dev->s_buf, buf, bytes_to_write))
- return -EFAULT;
-
- switch (dev->s_buf[0]) {
- case CMD_PC_TO_RDR_XFRBLOCK:
- case CMD_PC_TO_RDR_SECURE:
- case CMD_PC_TO_RDR_TEST_SECURE:
- case CMD_PC_TO_RDR_OK_SECURE:
- dev->timeout = CCID_DRIVER_BULK_DEFAULT_TIMEOUT;
- break;
-
- case CMD_PC_TO_RDR_ICCPOWERON:
- dev->timeout = CCID_DRIVER_ASYNC_POWERUP_TIMEOUT;
- break;
-
- case CMD_PC_TO_RDR_GETSLOTSTATUS:
- case CMD_PC_TO_RDR_ICCPOWEROFF:
- case CMD_PC_TO_RDR_GETPARAMETERS:
- case CMD_PC_TO_RDR_RESETPARAMETERS:
- case CMD_PC_TO_RDR_SETPARAMETERS:
- case CMD_PC_TO_RDR_ESCAPE:
- case CMD_PC_TO_RDR_ICCCLOCK:
- default:
- dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
- break;
- }
-
- rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
- DEBUGP(2, dev, "<- cm4040_write (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- else
- return -EIO;
- }
-
- DEBUGP(4, dev, "start \n");
-
- for (i = 0; i < bytes_to_write; i++) {
- rc = wait_for_bulk_out_ready(dev);
- if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
- rc);
- DEBUGP(2, dev, "<- cm4040_write (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- else
- return -EIO;
- }
-
- xoutb(dev->s_buf[i],iobase + REG_OFFSET_BULK_OUT);
- }
- DEBUGP(4, dev, "end\n");
-
- rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
-
- if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
- DEBUGP(2, dev, "<- cm4040_write (failed)\n");
- if (rc == -ERESTARTSYS)
- return rc;
- else
- return -EIO;
- }
-
- DEBUGP(2, dev, "<- cm4040_write (successfully)\n");
- return count;
-}
-
-static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
-{
- struct reader_dev *dev = filp->private_data;
- __poll_t mask = 0;
-
- poll_wait(filp, &dev->poll_wait, wait);
-
- if (test_and_clear_bit(BS_READABLE, &dev->buffer_status))
- mask |= EPOLLIN | EPOLLRDNORM;
- if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status))
- mask |= EPOLLOUT | EPOLLWRNORM;
-
- DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask);
-
- return mask;
-}
-
-static int cm4040_open(struct inode *inode, struct file *filp)
-{
- struct reader_dev *dev;
- struct pcmcia_device *link;
- int minor = iminor(inode);
- int ret;
-
- if (minor >= CM_MAX_DEV)
- return -ENODEV;
-
- mutex_lock(&cm4040_mutex);
- link = dev_table[minor];
- if (link == NULL || !pcmcia_dev_present(link)) {
- ret = -ENODEV;
- goto out;
- }
-
- if (link->open) {
- ret = -EBUSY;
- goto out;
- }
-
- dev = link->priv;
- filp->private_data = dev;
-
- if (filp->f_flags & O_NONBLOCK) {
- DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
- ret = -EAGAIN;
- goto out;
- }
-
- link->open = 1;
-
- mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
-
- DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
- ret = nonseekable_open(inode, filp);
-out:
- mutex_unlock(&cm4040_mutex);
- return ret;
-}
-
-static int cm4040_close(struct inode *inode, struct file *filp)
-{
- struct reader_dev *dev = filp->private_data;
- struct pcmcia_device *link;
- int minor = iminor(inode);
-
- DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode),
- iminor(inode));
-
- if (minor >= CM_MAX_DEV)
- return -ENODEV;
-
- link = dev_table[minor];
- if (link == NULL)
- return -ENODEV;
-
- cm4040_stop_poll(dev);
-
- link->open = 0;
- wake_up(&dev->devq);
-
- DEBUGP(2, dev, "<- cm4040_close\n");
- return 0;
-}
-
-static void cm4040_reader_release(struct pcmcia_device *link)
-{
- struct reader_dev *dev = link->priv;
-
- DEBUGP(3, dev, "-> cm4040_reader_release\n");
- while (link->open) {
- DEBUGP(3, dev, MODULE_NAME ": delaying release "
- "until process has terminated\n");
- wait_event(dev->devq, (link->open == 0));
- }
- DEBUGP(3, dev, "<- cm4040_reader_release\n");
- return;
-}
-
-static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- return pcmcia_request_io(p_dev);
-}
-
-
-static int reader_config(struct pcmcia_device *link, int devno)
-{
- struct reader_dev *dev;
- int fail_rc;
-
- link->config_flags |= CONF_AUTO_SET_IO;
-
- if (pcmcia_loop_config(link, cm4040_config_check, NULL))
- goto cs_release;
-
- fail_rc = pcmcia_enable_device(link);
- if (fail_rc != 0) {
- dev_info(&link->dev, "pcmcia_enable_device failed 0x%x\n",
- fail_rc);
- goto cs_release;
- }
-
- dev = link->priv;
-
- DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno,
- link->resource[0]);
- DEBUGP(2, dev, "<- reader_config (succ)\n");
-
- return 0;
-
-cs_release:
- reader_release(link);
- return -ENODEV;
-}
-
-static void reader_release(struct pcmcia_device *link)
-{
- cm4040_reader_release(link);
- pcmcia_disable_device(link);
-}
-
-static int reader_probe(struct pcmcia_device *link)
-{
- struct reader_dev *dev;
- int i, ret;
-
- for (i = 0; i < CM_MAX_DEV; i++) {
- if (dev_table[i] == NULL)
- break;
- }
-
- if (i == CM_MAX_DEV)
- return -ENODEV;
-
- dev = kzalloc(sizeof(struct reader_dev), GFP_KERNEL);
- if (dev == NULL)
- return -ENOMEM;
-
- dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
- dev->buffer_status = 0;
-
- link->priv = dev;
- dev->p_dev = link;
-
- dev_table[i] = link;
-
- init_waitqueue_head(&dev->devq);
- init_waitqueue_head(&dev->poll_wait);
- init_waitqueue_head(&dev->read_wait);
- init_waitqueue_head(&dev->write_wait);
- timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
-
- ret = reader_config(link, i);
- if (ret) {
- dev_table[i] = NULL;
- kfree(dev);
- return ret;
- }
-
- device_create(cmx_class, NULL, MKDEV(major, i), NULL, "cmx%d", i);
-
- return 0;
-}
-
-static void reader_detach(struct pcmcia_device *link)
-{
- struct reader_dev *dev = link->priv;
- int devno;
-
- /* find device */
- for (devno = 0; devno < CM_MAX_DEV; devno++) {
- if (dev_table[devno] == link)
- break;
- }
- if (devno == CM_MAX_DEV)
- return;
-
- reader_release(link);
-
- dev_table[devno] = NULL;
- kfree(dev);
-
- device_destroy(cmx_class, MKDEV(major, devno));
-
- return;
-}
-
-static const struct file_operations reader_fops = {
- .owner = THIS_MODULE,
- .read = cm4040_read,
- .write = cm4040_write,
- .open = cm4040_open,
- .release = cm4040_close,
- .poll = cm4040_poll,
- .llseek = no_llseek,
-};
-
-static const struct pcmcia_device_id cm4040_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200),
- PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040",
- 0xE32CDD8C, 0x8F23318B),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, cm4040_ids);
-
-static struct pcmcia_driver reader_driver = {
- .owner = THIS_MODULE,
- .name = "cm4040_cs",
- .probe = reader_probe,
- .remove = reader_detach,
- .id_table = cm4040_ids,
-};
-
-static int __init cm4040_init(void)
-{
- int rc;
-
- cmx_class = class_create(THIS_MODULE, "cardman_4040");
- if (IS_ERR(cmx_class))
- return PTR_ERR(cmx_class);
-
- major = register_chrdev(0, DEVICE_NAME, &reader_fops);
- if (major < 0) {
- printk(KERN_WARNING MODULE_NAME
- ": could not get major number\n");
- class_destroy(cmx_class);
- return major;
- }
-
- rc = pcmcia_register_driver(&reader_driver);
- if (rc < 0) {
- unregister_chrdev(major, DEVICE_NAME);
- class_destroy(cmx_class);
- return rc;
- }
-
- return 0;
-}
-
-static void __exit cm4040_exit(void)
-{
- pcmcia_unregister_driver(&reader_driver);
- unregister_chrdev(major, DEVICE_NAME);
- class_destroy(cmx_class);
-}
-
-module_init(cm4040_init);
-module_exit(cm4040_exit);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.h b/drivers/char/pcmcia/cm4040_cs.h
deleted file mode 100644
index e2ffff995d51..000000000000
--- a/drivers/char/pcmcia/cm4040_cs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CM4040_H_
-#define _CM4040_H_
-
-#define CM_MAX_DEV 4
-
-#define DEVICE_NAME "cmx"
-#define MODULE_NAME "cm4040_cs"
-
-#define REG_OFFSET_BULK_OUT 0
-#define REG_OFFSET_BULK_IN 0
-#define REG_OFFSET_BUFFER_STATUS 1
-#define REG_OFFSET_SYNC_CONTROL 2
-
-#define BSR_BULK_IN_FULL 0x02
-#define BSR_BULK_OUT_FULL 0x01
-
-#define SCR_HOST_TO_READER_START 0x80
-#define SCR_ABORT 0x40
-#define SCR_EN_NOTIFY 0x20
-#define SCR_ACK_NOTIFY 0x10
-#define SCR_READER_TO_HOST_DONE 0x08
-#define SCR_HOST_TO_READER_DONE 0x04
-#define SCR_PULSE_INTERRUPT 0x02
-#define SCR_POWER_DOWN 0x01
-
-
-#define CMD_PC_TO_RDR_ICCPOWERON 0x62
-#define CMD_PC_TO_RDR_GETSLOTSTATUS 0x65
-#define CMD_PC_TO_RDR_ICCPOWEROFF 0x63
-#define CMD_PC_TO_RDR_SECURE 0x69
-#define CMD_PC_TO_RDR_GETPARAMETERS 0x6C
-#define CMD_PC_TO_RDR_RESETPARAMETERS 0x6D
-#define CMD_PC_TO_RDR_SETPARAMETERS 0x61
-#define CMD_PC_TO_RDR_XFRBLOCK 0x6F
-#define CMD_PC_TO_RDR_ESCAPE 0x6B
-#define CMD_PC_TO_RDR_ICCCLOCK 0x6E
-#define CMD_PC_TO_RDR_TEST_SECURE 0x74
-#define CMD_PC_TO_RDR_OK_SECURE 0x89
-
-
-#define CMD_RDR_TO_PC_SLOTSTATUS 0x81
-#define CMD_RDR_TO_PC_DATABLOCK 0x80
-#define CMD_RDR_TO_PC_PARAMETERS 0x82
-#define CMD_RDR_TO_PC_ESCAPE 0x83
-#define CMD_RDR_TO_PC_OK_SECURE 0x89
-
-#endif /* _CM4040_H_ */
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
deleted file mode 100644
index 1bdce08fae3d..000000000000
--- a/drivers/char/pcmcia/scr24x_cs.c
+++ /dev/null
@@ -1,359 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SCR24x PCMCIA Smart Card Reader Driver
- *
- * Copyright (C) 2005-2006 TL Sudheendran
- * Copyright (C) 2016 Lubomir Rintel
- *
- * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/cdev.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#define CCID_HEADER_SIZE 10
-#define CCID_LENGTH_OFFSET 1
-#define CCID_MAX_LEN 271
-
-#define SCR24X_DATA(n) (1 + n)
-#define SCR24X_CMD_STATUS 7
-#define CMD_START 0x40
-#define CMD_WRITE_BYTE 0x41
-#define CMD_READ_BYTE 0x42
-#define STATUS_BUSY 0x80
-
-struct scr24x_dev {
- struct device *dev;
- struct cdev c_dev;
- unsigned char buf[CCID_MAX_LEN];
- int devno;
- struct mutex lock;
- struct kref refcnt;
- u8 __iomem *regs;
-};
-
-#define SCR24X_DEVS 8
-static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
-
-static struct class *scr24x_class;
-static dev_t scr24x_devt;
-
-static void scr24x_delete(struct kref *kref)
-{
- struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
- refcnt);
-
- kfree(dev);
-}
-
-static int scr24x_wait_ready(struct scr24x_dev *dev)
-{
- u_char status;
- int timeout = 100;
-
- do {
- status = ioread8(dev->regs + SCR24X_CMD_STATUS);
- if (!(status & STATUS_BUSY))
- return 0;
-
- msleep(20);
- } while (--timeout);
-
- return -EIO;
-}
-
-static int scr24x_open(struct inode *inode, struct file *filp)
-{
- struct scr24x_dev *dev = container_of(inode->i_cdev,
- struct scr24x_dev, c_dev);
-
- kref_get(&dev->refcnt);
- filp->private_data = dev;
-
- return stream_open(inode, filp);
-}
-
-static int scr24x_release(struct inode *inode, struct file *filp)
-{
- struct scr24x_dev *dev = filp->private_data;
-
- /* We must not take the dev->lock here as scr24x_delete()
- * might be called to remove the dev structure altogether.
- * We don't need the lock anyway, since after the reference
- * acquired in probe() is released in remove() the chrdev
- * is already unregistered and noone can possibly acquire
- * a reference via open() anymore. */
- kref_put(&dev->refcnt, scr24x_delete);
- return 0;
-}
-
-static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
-{
- size_t i, y;
- int ret;
-
- for (i = offset; i < limit; i += 5) {
- iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
- ret = scr24x_wait_ready(dev);
- if (ret < 0)
- return ret;
-
- for (y = 0; y < 5 && i + y < limit; y++)
- dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
- }
-
- return 0;
-}
-
-static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct scr24x_dev *dev = filp->private_data;
- int ret;
- int len;
-
- if (count < CCID_HEADER_SIZE)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- if (!dev->dev) {
- ret = -ENODEV;
- goto out;
- }
-
- ret = scr24x_wait_ready(dev);
- if (ret < 0)
- goto out;
- len = CCID_HEADER_SIZE;
- ret = read_chunk(dev, 0, len);
- if (ret < 0)
- goto out;
-
- len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
- if (len > sizeof(dev->buf)) {
- ret = -EIO;
- goto out;
- }
- ret = read_chunk(dev, CCID_HEADER_SIZE, len);
- if (ret < 0)
- goto out;
-
- if (len < count)
- count = len;
-
- if (copy_to_user(buf, dev->buf, count)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = count;
-out:
- mutex_unlock(&dev->lock);
- return ret;
-}
-
-static ssize_t scr24x_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct scr24x_dev *dev = filp->private_data;
- size_t i, y;
- int ret;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- if (!dev->dev) {
- ret = -ENODEV;
- goto out;
- }
-
- if (count > sizeof(dev->buf)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_from_user(dev->buf, buf, count)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = scr24x_wait_ready(dev);
- if (ret < 0)
- goto out;
-
- iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
- ret = scr24x_wait_ready(dev);
- if (ret < 0)
- goto out;
-
- for (i = 0; i < count; i += 5) {
- for (y = 0; y < 5 && i + y < count; y++)
- iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
-
- iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
- ret = scr24x_wait_ready(dev);
- if (ret < 0)
- goto out;
- }
-
- ret = count;
-out:
- mutex_unlock(&dev->lock);
- return ret;
-}
-
-static const struct file_operations scr24x_fops = {
- .owner = THIS_MODULE,
- .read = scr24x_read,
- .write = scr24x_write,
- .open = scr24x_open,
- .release = scr24x_release,
- .llseek = no_llseek,
-};
-
-static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
-{
- if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
- return -ENODEV;
- return pcmcia_request_io(link);
-}
-
-static int scr24x_probe(struct pcmcia_device *link)
-{
- struct scr24x_dev *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
- if (dev->devno >= SCR24X_DEVS) {
- ret = -EBUSY;
- goto err;
- }
-
- mutex_init(&dev->lock);
- kref_init(&dev->refcnt);
-
- link->priv = dev;
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
- if (ret < 0)
- goto err;
-
- dev->dev = &link->dev;
- dev->regs = devm_ioport_map(&link->dev,
- link->resource[PCMCIA_IOPORT_0]->start,
- resource_size(link->resource[PCMCIA_IOPORT_0]));
- if (!dev->regs) {
- ret = -EIO;
- goto err;
- }
-
- cdev_init(&dev->c_dev, &scr24x_fops);
- dev->c_dev.owner = THIS_MODULE;
- ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
- if (ret < 0)
- goto err;
-
- ret = pcmcia_enable_device(link);
- if (ret < 0) {
- pcmcia_disable_device(link);
- goto err;
- }
-
- device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
- NULL, "scr24x%d", dev->devno);
-
- dev_info(&link->dev, "SCR24x Chip Card Interface\n");
- return 0;
-
-err:
- if (dev->devno < SCR24X_DEVS)
- clear_bit(dev->devno, scr24x_minors);
- kfree (dev);
- return ret;
-}
-
-static void scr24x_remove(struct pcmcia_device *link)
-{
- struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
-
- device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
- mutex_lock(&dev->lock);
- pcmcia_disable_device(link);
- cdev_del(&dev->c_dev);
- clear_bit(dev->devno, scr24x_minors);
- dev->dev = NULL;
- mutex_unlock(&dev->lock);
-
- kref_put(&dev->refcnt, scr24x_delete);
-}
-
-static const struct pcmcia_device_id scr24x_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
- 0x53cb94f9, 0xbfdf89a5),
- PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
- PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
- PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
-
-static struct pcmcia_driver scr24x_driver = {
- .owner = THIS_MODULE,
- .name = "scr24x_cs",
- .probe = scr24x_probe,
- .remove = scr24x_remove,
- .id_table = scr24x_ids,
-};
-
-static int __init scr24x_init(void)
-{
- int ret;
-
- scr24x_class = class_create(THIS_MODULE, "scr24x");
- if (IS_ERR(scr24x_class))
- return PTR_ERR(scr24x_class);
-
- ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
- if (ret < 0) {
- class_destroy(scr24x_class);
- return ret;
- }
-
- ret = pcmcia_register_driver(&scr24x_driver);
- if (ret < 0) {
- unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
- class_destroy(scr24x_class);
- }
-
- return ret;
-}
-
-static void __exit scr24x_exit(void)
-{
- pcmcia_unregister_driver(&scr24x_driver);
- unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
- class_destroy(scr24x_class);
-}
-
-module_init(scr24x_init);
-module_exit(scr24x_exit);
-
-MODULE_AUTHOR("Lubomir Rintel");
-MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
deleted file mode 100644
index b2735be81ab2..000000000000
--- a/drivers/char/pcmcia/synclink_cs.c
+++ /dev/null
@@ -1,4292 +0,0 @@
-/*
- * linux/drivers/char/pcmcia/synclink_cs.c
- *
- * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $
- *
- * Device driver for Microgate SyncLink PC Card
- * multiprotocol serial adapter.
- *
- * written by Paul Fulghum for Microgate Corporation
- * paulkf@microgate.com
- *
- * Microgate and SyncLink are trademarks of Microgate Corporation
- *
- * This code is released under the GNU General Public License (GPL)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
-#if defined(__i386__)
-# define BREAKPOINT() asm(" int $3");
-#else
-# define BREAKPOINT() { }
-#endif
-
-#define MAX_DEVICE_COUNT 4
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ioctl.h>
-#include <linux/synclink.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/dma.h>
-#include <linux/bitops.h>
-#include <asm/types.h>
-#include <linux/termios.h>
-#include <linux/workqueue.h>
-#include <linux/hdlc.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
-#define SYNCLINK_GENERIC_HDLC 1
-#else
-#define SYNCLINK_GENERIC_HDLC 0
-#endif
-
-#define GET_USER(error,value,addr) error = get_user(value,addr)
-#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
-#define PUT_USER(error,value,addr) error = put_user(value,addr)
-#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
-
-#include <linux/uaccess.h>
-
-static MGSL_PARAMS default_params = {
- MGSL_MODE_HDLC, /* unsigned long mode */
- 0, /* unsigned char loopback; */
- HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
- HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
- 0, /* unsigned long clock_speed; */
- 0xff, /* unsigned char addr_filter; */
- HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
- HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
- HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
- 9600, /* unsigned long data_rate; */
- 8, /* unsigned char data_bits; */
- 1, /* unsigned char stop_bits; */
- ASYNC_PARITY_NONE /* unsigned char parity; */
-};
-
-typedef struct {
- int count;
- unsigned char status;
- char data[1];
-} RXBUF;
-
-/* The queue of BH actions to be performed */
-
-#define BH_RECEIVE 1
-#define BH_TRANSMIT 2
-#define BH_STATUS 4
-
-#define IO_PIN_SHUTDOWN_LIMIT 100
-
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
-struct _input_signal_events {
- int ri_up;
- int ri_down;
- int dsr_up;
- int dsr_down;
- int dcd_up;
- int dcd_down;
- int cts_up;
- int cts_down;
-};
-
-
-/*
- * Device instance data structure
- */
-
-typedef struct _mgslpc_info {
- struct tty_port port;
- void *if_ptr; /* General purpose pointer (used by SPPP) */
- int magic;
- int line;
-
- struct mgsl_icount icount;
-
- int timeout;
- int x_char; /* xon/xoff character */
- unsigned char read_status_mask;
- unsigned char ignore_status_mask;
-
- unsigned char *tx_buf;
- int tx_put;
- int tx_get;
- int tx_count;
-
- /* circular list of fixed length rx buffers */
-
- unsigned char *rx_buf; /* memory allocated for all rx buffers */
- int rx_buf_total_size; /* size of memory allocated for rx buffers */
- int rx_put; /* index of next empty rx buffer */
- int rx_get; /* index of next full rx buffer */
- int rx_buf_size; /* size in bytes of single rx buffer */
- int rx_buf_count; /* total number of rx buffers */
- int rx_frame_count; /* number of full rx buffers */
-
- wait_queue_head_t status_event_wait_q;
- wait_queue_head_t event_wait_q;
- struct timer_list tx_timer; /* HDLC transmit timeout timer */
- struct _mgslpc_info *next_device; /* device list link */
-
- unsigned short imra_value;
- unsigned short imrb_value;
- unsigned char pim_value;
-
- spinlock_t lock;
- struct work_struct task; /* task structure for scheduling bh */
-
- u32 max_frame_size;
-
- u32 pending_bh;
-
- bool bh_running;
- bool bh_requested;
-
- int dcd_chkcount; /* check counts to prevent */
- int cts_chkcount; /* too many IRQs if a signal */
- int dsr_chkcount; /* is floating */
- int ri_chkcount;
-
- bool rx_enabled;
- bool rx_overflow;
-
- bool tx_enabled;
- bool tx_active;
- bool tx_aborting;
- u32 idle_mode;
-
- int if_mode; /* serial interface selection (RS-232, v.35 etc) */
-
- char device_name[25]; /* device instance name */
-
- unsigned int io_base; /* base I/O address of adapter */
- unsigned int irq_level;
-
- MGSL_PARAMS params; /* communications parameters */
-
- unsigned char serial_signals; /* current serial signal states */
-
- bool irq_occurred; /* for diagnostics use */
- char testing_irq;
- unsigned int init_error; /* startup error (DIAGS) */
-
- char *flag_buf;
- bool drop_rts_on_tx_done;
-
- struct _input_signal_events input_signal_events;
-
- /* PCMCIA support */
- struct pcmcia_device *p_dev;
- int stop;
-
- /* SPPP/Cisco HDLC device parts */
- int netcount;
- spinlock_t netlock;
-
-#if SYNCLINK_GENERIC_HDLC
- struct net_device *netdev;
-#endif
-
-} MGSLPC_INFO;
-
-#define MGSLPC_MAGIC 0x5402
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define TXBUFSIZE 4096
-
-
-#define CHA 0x00 /* channel A offset */
-#define CHB 0x40 /* channel B offset */
-
-/*
- * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it.
- */
-#undef PVR
-
-#define RXFIFO 0
-#define TXFIFO 0
-#define STAR 0x20
-#define CMDR 0x20
-#define RSTA 0x21
-#define PRE 0x21
-#define MODE 0x22
-#define TIMR 0x23
-#define XAD1 0x24
-#define XAD2 0x25
-#define RAH1 0x26
-#define RAH2 0x27
-#define DAFO 0x27
-#define RAL1 0x28
-#define RFC 0x28
-#define RHCR 0x29
-#define RAL2 0x29
-#define RBCL 0x2a
-#define XBCL 0x2a
-#define RBCH 0x2b
-#define XBCH 0x2b
-#define CCR0 0x2c
-#define CCR1 0x2d
-#define CCR2 0x2e
-#define CCR3 0x2f
-#define VSTR 0x34
-#define BGR 0x34
-#define RLCR 0x35
-#define AML 0x36
-#define AMH 0x37
-#define GIS 0x38
-#define IVA 0x38
-#define IPC 0x39
-#define ISR 0x3a
-#define IMR 0x3a
-#define PVR 0x3c
-#define PIS 0x3d
-#define PIM 0x3d
-#define PCR 0x3e
-#define CCR4 0x3f
-
-// IMR/ISR
-
-#define IRQ_BREAK_ON BIT15 // rx break detected
-#define IRQ_DATAOVERRUN BIT14 // receive data overflow
-#define IRQ_ALLSENT BIT13 // all sent
-#define IRQ_UNDERRUN BIT12 // transmit data underrun
-#define IRQ_TIMER BIT11 // timer interrupt
-#define IRQ_CTS BIT10 // CTS status change
-#define IRQ_TXREPEAT BIT9 // tx message repeat
-#define IRQ_TXFIFO BIT8 // transmit pool ready
-#define IRQ_RXEOM BIT7 // receive message end
-#define IRQ_EXITHUNT BIT6 // receive frame start
-#define IRQ_RXTIME BIT6 // rx char timeout
-#define IRQ_DCD BIT2 // carrier detect status change
-#define IRQ_OVERRUN BIT1 // receive frame overflow
-#define IRQ_RXFIFO BIT0 // receive pool full
-
-// STAR
-
-#define XFW BIT6 // transmit FIFO write enable
-#define CEC BIT2 // command executing
-#define CTS BIT1 // CTS state
-
-#define PVR_DTR BIT0
-#define PVR_DSR BIT1
-#define PVR_RI BIT2
-#define PVR_AUTOCTS BIT3
-#define PVR_RS232 0x20 /* 0010b */
-#define PVR_V35 0xe0 /* 1110b */
-#define PVR_RS422 0x40 /* 0100b */
-
-/* Register access functions */
-
-#define write_reg(info, reg, val) outb((val),(info)->io_base + (reg))
-#define read_reg(info, reg) inb((info)->io_base + (reg))
-
-#define read_reg16(info, reg) inw((info)->io_base + (reg))
-#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
-
-#define set_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
- (unsigned char) (read_reg(info, (reg)) | (mask)))
-#define clear_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
- (unsigned char) (read_reg(info, (reg)) & ~(mask)))
-/*
- * interrupt enable/disable routines
- */
-static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
-{
- if (channel == CHA) {
- info->imra_value |= mask;
- write_reg16(info, CHA + IMR, info->imra_value);
- } else {
- info->imrb_value |= mask;
- write_reg16(info, CHB + IMR, info->imrb_value);
- }
-}
-static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
-{
- if (channel == CHA) {
- info->imra_value &= ~mask;
- write_reg16(info, CHA + IMR, info->imra_value);
- } else {
- info->imrb_value &= ~mask;
- write_reg16(info, CHB + IMR, info->imrb_value);
- }
-}
-
-#define port_irq_disable(info, mask) \
- { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
-
-#define port_irq_enable(info, mask) \
- { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
-
-static void rx_start(MGSLPC_INFO *info);
-static void rx_stop(MGSLPC_INFO *info);
-
-static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty);
-static void tx_stop(MGSLPC_INFO *info);
-static void tx_set_idle(MGSLPC_INFO *info);
-
-static void get_signals(MGSLPC_INFO *info);
-static void set_signals(MGSLPC_INFO *info);
-
-static void reset_device(MGSLPC_INFO *info);
-
-static void hdlc_mode(MGSLPC_INFO *info);
-static void async_mode(MGSLPC_INFO *info);
-
-static void tx_timeout(struct timer_list *t);
-
-static int carrier_raised(struct tty_port *port);
-static void dtr_rts(struct tty_port *port, int onoff);
-
-#if SYNCLINK_GENERIC_HDLC
-#define dev_to_port(D) (dev_to_hdlc(D)->priv)
-static void hdlcdev_tx_done(MGSLPC_INFO *info);
-static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
-static int hdlcdev_init(MGSLPC_INFO *info);
-static void hdlcdev_exit(MGSLPC_INFO *info);
-#endif
-
-static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
-
-static bool register_test(MGSLPC_INFO *info);
-static bool irq_test(MGSLPC_INFO *info);
-static int adapter_test(MGSLPC_INFO *info);
-
-static int claim_resources(MGSLPC_INFO *info);
-static void release_resources(MGSLPC_INFO *info);
-static int mgslpc_add_device(MGSLPC_INFO *info);
-static void mgslpc_remove_device(MGSLPC_INFO *info);
-
-static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
-static void rx_reset_buffers(MGSLPC_INFO *info);
-static int rx_alloc_buffers(MGSLPC_INFO *info);
-static void rx_free_buffers(MGSLPC_INFO *info);
-
-static irqreturn_t mgslpc_isr(int irq, void *dev_id);
-
-/*
- * Bottom half interrupt handlers
- */
-static void bh_handler(struct work_struct *work);
-static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty);
-static void bh_status(MGSLPC_INFO *info);
-
-/*
- * ioctl handlers
- */
-static int tiocmget(struct tty_struct *tty);
-static int tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount);
-static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params);
-static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty);
-static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode);
-static int set_txidle(MGSLPC_INFO *info, int idle_mode);
-static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty);
-static int tx_abort(MGSLPC_INFO *info);
-static int set_rxenable(MGSLPC_INFO *info, int enable);
-static int wait_events(MGSLPC_INFO *info, int __user *mask);
-
-static MGSLPC_INFO *mgslpc_device_list = NULL;
-static int mgslpc_device_count = 0;
-
-/*
- * Set this param to non-zero to load eax with the
- * .text section address and breakpoint on module load.
- * This is useful for use with gdb and add-symbol-file command.
- */
-static bool break_on_load;
-
-/*
- * Driver major number, defaults to zero to get auto
- * assigned major number. May be forced as module parameter.
- */
-static int ttymajor=0;
-
-static int debug_level = 0;
-static int maxframe[MAX_DEVICE_COUNT] = {0,};
-
-module_param(break_on_load, bool, 0);
-module_param(ttymajor, int, 0);
-module_param(debug_level, int, 0);
-module_param_array(maxframe, int, NULL, 0);
-
-MODULE_LICENSE("GPL");
-
-static char *driver_name = "SyncLink PC Card driver";
-static char *driver_version = "$Revision: 4.34 $";
-
-static struct tty_driver *serial_driver;
-
-/* number of characters left in xmit buffer before we ask for more */
-#define WAKEUP_CHARS 256
-
-static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty);
-static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout);
-
-/* PCMCIA prototypes */
-
-static int mgslpc_config(struct pcmcia_device *link);
-static void mgslpc_release(u_long arg);
-static void mgslpc_detach(struct pcmcia_device *p_dev);
-
-/*
- * 1st function defined in .text section. Calling this function in
- * init_module() followed by a breakpoint allows a remote debugger
- * (gdb) to get the .text address for the add-symbol-file command.
- * This allows remote debugging of dynamically loadable modules.
- */
-static void* mgslpc_get_text_ptr(void)
-{
- return mgslpc_get_text_ptr;
-}
-
-/**
- * line discipline callback wrappers
- *
- * The wrappers maintain line discipline references
- * while calling into the line discipline.
- *
- * ldisc_receive_buf - pass receive data to line discipline
- */
-
-static void ldisc_receive_buf(struct tty_struct *tty,
- const __u8 *data, char *flags, int count)
-{
- struct tty_ldisc *ld;
- if (!tty)
- return;
- ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->ops->receive_buf)
- ld->ops->receive_buf(tty, data, flags, count);
- tty_ldisc_deref(ld);
- }
-}
-
-static const struct tty_port_operations mgslpc_port_ops = {
- .carrier_raised = carrier_raised,
- .dtr_rts = dtr_rts
-};
-
-static int mgslpc_probe(struct pcmcia_device *link)
-{
- MGSLPC_INFO *info;
- int ret;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_attach\n");
-
- info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
- if (!info) {
- printk("Error can't allocate device instance data\n");
- return -ENOMEM;
- }
-
- info->magic = MGSLPC_MAGIC;
- tty_port_init(&info->port);
- info->port.ops = &mgslpc_port_ops;
- INIT_WORK(&info->task, bh_handler);
- info->max_frame_size = 4096;
- init_waitqueue_head(&info->status_event_wait_q);
- init_waitqueue_head(&info->event_wait_q);
- spin_lock_init(&info->lock);
- spin_lock_init(&info->netlock);
- memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
- info->idle_mode = HDLC_TXIDLE_FLAGS;
- info->imra_value = 0xffff;
- info->imrb_value = 0xffff;
- info->pim_value = 0xff;
-
- info->p_dev = link;
- link->priv = info;
-
- /* Initialize the struct pcmcia_device structure */
-
- ret = mgslpc_config(link);
- if (ret != 0)
- goto failed;
-
- ret = mgslpc_add_device(info);
- if (ret != 0)
- goto failed_release;
-
- return 0;
-
-failed_release:
- mgslpc_release((u_long)link);
-failed:
- tty_port_destroy(&info->port);
- kfree(info);
- return ret;
-}
-
-/* Card has been inserted.
- */
-
-static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
-{
- return pcmcia_request_io(p_dev);
-}
-
-static int mgslpc_config(struct pcmcia_device *link)
-{
- MGSLPC_INFO *info = link->priv;
- int ret;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_config(0x%p)\n", link);
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
- if (ret != 0)
- goto failed;
-
- link->config_index = 8;
- link->config_regs = PRESENT_OPTION;
-
- ret = pcmcia_request_irq(link, mgslpc_isr);
- if (ret)
- goto failed;
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- info->io_base = link->resource[0]->start;
- info->irq_level = link->irq;
- return 0;
-
-failed:
- mgslpc_release((u_long)link);
- return -ENODEV;
-}
-
-/* Card has been removed.
- * Unregister device and release PCMCIA configuration.
- * If device is open, postpone until it is closed.
- */
-static void mgslpc_release(u_long arg)
-{
- struct pcmcia_device *link = (struct pcmcia_device *)arg;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_release(0x%p)\n", link);
-
- pcmcia_disable_device(link);
-}
-
-static void mgslpc_detach(struct pcmcia_device *link)
-{
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_detach(0x%p)\n", link);
-
- ((MGSLPC_INFO *)link->priv)->stop = 1;
- mgslpc_release((u_long)link);
-
- mgslpc_remove_device((MGSLPC_INFO *)link->priv);
-}
-
-static int mgslpc_suspend(struct pcmcia_device *link)
-{
- MGSLPC_INFO *info = link->priv;
-
- info->stop = 1;
-
- return 0;
-}
-
-static int mgslpc_resume(struct pcmcia_device *link)
-{
- MGSLPC_INFO *info = link->priv;
-
- info->stop = 0;
-
- return 0;
-}
-
-
-static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
- char *name, const char *routine)
-{
-#ifdef MGSLPC_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for mgsl struct (%s) in %s\n";
- static const char *badinfo =
- "Warning: null mgslpc_info for (%s) in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return true;
- }
- if (info->magic != MGSLPC_MAGIC) {
- printk(badmagic, name, routine);
- return true;
- }
-#else
- if (!info)
- return true;
-#endif
- return false;
-}
-
-
-#define CMD_RXFIFO BIT7 // release current rx FIFO
-#define CMD_RXRESET BIT6 // receiver reset
-#define CMD_RXFIFO_READ BIT5
-#define CMD_START_TIMER BIT4
-#define CMD_TXFIFO BIT3 // release current tx FIFO
-#define CMD_TXEOM BIT1 // transmit end message
-#define CMD_TXRESET BIT0 // transmit reset
-
-static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
-{
- int i = 0;
- /* wait for command completion */
- while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
- udelay(1);
- if (i++ == 1000)
- return false;
- }
- return true;
-}
-
-static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
-{
- wait_command_complete(info, channel);
- write_reg(info, (unsigned char) (channel + CMDR), cmd);
-}
-
-static void tx_pause(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
- return;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_pause(%s)\n", info->device_name);
-
- spin_lock_irqsave(&info->lock, flags);
- if (info->tx_enabled)
- tx_stop(info);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static void tx_release(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
- return;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_release(%s)\n", info->device_name);
-
- spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/* Return next bottom half action to perform.
- * or 0 if nothing to do.
- */
-static int bh_action(MGSLPC_INFO *info)
-{
- unsigned long flags;
- int rc = 0;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->pending_bh & BH_RECEIVE) {
- info->pending_bh &= ~BH_RECEIVE;
- rc = BH_RECEIVE;
- } else if (info->pending_bh & BH_TRANSMIT) {
- info->pending_bh &= ~BH_TRANSMIT;
- rc = BH_TRANSMIT;
- } else if (info->pending_bh & BH_STATUS) {
- info->pending_bh &= ~BH_STATUS;
- rc = BH_STATUS;
- }
-
- if (!rc) {
- /* Mark BH routine as complete */
- info->bh_running = false;
- info->bh_requested = false;
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- return rc;
-}
-
-static void bh_handler(struct work_struct *work)
-{
- MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
- struct tty_struct *tty;
- int action;
-
- if (debug_level >= DEBUG_LEVEL_BH)
- printk("%s(%d):bh_handler(%s) entry\n",
- __FILE__,__LINE__,info->device_name);
-
- info->bh_running = true;
- tty = tty_port_tty_get(&info->port);
-
- while((action = bh_action(info)) != 0) {
-
- /* Process work item */
- if (debug_level >= DEBUG_LEVEL_BH)
- printk("%s(%d):bh_handler() work item action=%d\n",
- __FILE__,__LINE__,action);
-
- switch (action) {
-
- case BH_RECEIVE:
- while(rx_get_frame(info, tty));
- break;
- case BH_TRANSMIT:
- bh_transmit(info, tty);
- break;
- case BH_STATUS:
- bh_status(info);
- break;
- default:
- /* unknown work item ID */
- printk("Unknown work item ID=%08X!\n", action);
- break;
- }
- }
-
- tty_kref_put(tty);
- if (debug_level >= DEBUG_LEVEL_BH)
- printk("%s(%d):bh_handler(%s) exit\n",
- __FILE__,__LINE__,info->device_name);
-}
-
-static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- if (debug_level >= DEBUG_LEVEL_BH)
- printk("bh_transmit() entry on %s\n", info->device_name);
-
- if (tty)
- tty_wakeup(tty);
-}
-
-static void bh_status(MGSLPC_INFO *info)
-{
- info->ri_chkcount = 0;
- info->dsr_chkcount = 0;
- info->dcd_chkcount = 0;
- info->cts_chkcount = 0;
-}
-
-/* eom: non-zero = end of frame */
-static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
-{
- unsigned char data[2];
- unsigned char fifo_count, read_count, i;
- RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
-
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
-
- if (!info->rx_enabled)
- return;
-
- if (info->rx_frame_count >= info->rx_buf_count) {
- /* no more free buffers */
- issue_command(info, CHA, CMD_RXRESET);
- info->pending_bh |= BH_RECEIVE;
- info->rx_overflow = true;
- info->icount.buf_overrun++;
- return;
- }
-
- if (eom) {
- /* end of frame, get FIFO count from RBCL register */
- fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
- if (fifo_count == 0)
- fifo_count = 32;
- } else
- fifo_count = 32;
-
- do {
- if (fifo_count == 1) {
- read_count = 1;
- data[0] = read_reg(info, CHA + RXFIFO);
- } else {
- read_count = 2;
- *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO);
- }
- fifo_count -= read_count;
- if (!fifo_count && eom)
- buf->status = data[--read_count];
-
- for (i = 0; i < read_count; i++) {
- if (buf->count >= info->max_frame_size) {
- /* frame too large, reset receiver and reset current buffer */
- issue_command(info, CHA, CMD_RXRESET);
- buf->count = 0;
- return;
- }
- *(buf->data + buf->count) = data[i];
- buf->count++;
- }
- } while (fifo_count);
-
- if (eom) {
- info->pending_bh |= BH_RECEIVE;
- info->rx_frame_count++;
- info->rx_put++;
- if (info->rx_put >= info->rx_buf_count)
- info->rx_put = 0;
- }
- issue_command(info, CHA, CMD_RXFIFO);
-}
-
-static void rx_ready_async(MGSLPC_INFO *info, int tcd)
-{
- struct tty_port *port = &info->port;
- unsigned char data, status, flag;
- int fifo_count;
- int work = 0;
- struct mgsl_icount *icount = &info->icount;
-
- if (tcd) {
- /* early termination, get FIFO count from RBCL register */
- fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
-
- /* Zero fifo count could mean 0 or 32 bytes available.
- * If BIT5 of STAR is set then at least 1 byte is available.
- */
- if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5))
- fifo_count = 32;
- } else
- fifo_count = 32;
-
- tty_buffer_request_room(port, fifo_count);
- /* Flush received async data to receive data buffer. */
- while (fifo_count) {
- data = read_reg(info, CHA + RXFIFO);
- status = read_reg(info, CHA + RXFIFO);
- fifo_count -= 2;
-
- icount->rx++;
- flag = TTY_NORMAL;
-
- // if no frameing/crc error then save data
- // BIT7:parity error
- // BIT6:framing error
-
- if (status & (BIT7 | BIT6)) {
- if (status & BIT7)
- icount->parity++;
- else
- icount->frame++;
-
- /* discard char if tty control flags say so */
- if (status & info->ignore_status_mask)
- continue;
-
- status &= info->read_status_mask;
-
- if (status & BIT7)
- flag = TTY_PARITY;
- else if (status & BIT6)
- flag = TTY_FRAME;
- }
- work += tty_insert_flip_char(port, data, flag);
- }
- issue_command(info, CHA, CMD_RXFIFO);
-
- if (debug_level >= DEBUG_LEVEL_ISR) {
- printk("%s(%d):rx_ready_async",
- __FILE__,__LINE__);
- printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
- __FILE__,__LINE__,icount->rx,icount->brk,
- icount->parity,icount->frame,icount->overrun);
- }
-
- if (work)
- tty_flip_buffer_push(port);
-}
-
-
-static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- if (!info->tx_active)
- return;
-
- info->tx_active = false;
- info->tx_aborting = false;
-
- if (info->params.mode == MGSL_MODE_ASYNC)
- return;
-
- info->tx_count = info->tx_put = info->tx_get = 0;
- del_timer(&info->tx_timer);
-
- if (info->drop_rts_on_tx_done) {
- get_signals(info);
- if (info->serial_signals & SerialSignal_RTS) {
- info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
- }
- info->drop_rts_on_tx_done = false;
- }
-
-#if SYNCLINK_GENERIC_HDLC
- if (info->netcount)
- hdlcdev_tx_done(info);
- else
-#endif
- {
- if (tty && (tty->flow.stopped || tty->hw_stopped)) {
- tx_stop(info);
- return;
- }
- info->pending_bh |= BH_TRANSMIT;
- }
-}
-
-static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- unsigned char fifo_count = 32;
- int c;
-
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
-
- if (info->params.mode == MGSL_MODE_HDLC) {
- if (!info->tx_active)
- return;
- } else {
- if (tty && (tty->flow.stopped || tty->hw_stopped)) {
- tx_stop(info);
- return;
- }
- if (!info->tx_count)
- info->tx_active = false;
- }
-
- if (!info->tx_count)
- return;
-
- while (info->tx_count && fifo_count) {
- c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get)));
-
- if (c == 1) {
- write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get));
- } else {
- write_reg16(info, CHA + TXFIFO,
- *((unsigned short*)(info->tx_buf + info->tx_get)));
- }
- info->tx_count -= c;
- info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1);
- fifo_count -= c;
- }
-
- if (info->params.mode == MGSL_MODE_ASYNC) {
- if (info->tx_count < WAKEUP_CHARS)
- info->pending_bh |= BH_TRANSMIT;
- issue_command(info, CHA, CMD_TXFIFO);
- } else {
- if (info->tx_count)
- issue_command(info, CHA, CMD_TXFIFO);
- else
- issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM);
- }
-}
-
-static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- get_signals(info);
- if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
- irq_disable(info, CHB, IRQ_CTS);
- info->icount.cts++;
- if (info->serial_signals & SerialSignal_CTS)
- info->input_signal_events.cts_up++;
- else
- info->input_signal_events.cts_down++;
- wake_up_interruptible(&info->status_event_wait_q);
- wake_up_interruptible(&info->event_wait_q);
-
- if (tty && tty_port_cts_enabled(&info->port)) {
- if (tty->hw_stopped) {
- if (info->serial_signals & SerialSignal_CTS) {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("CTS tx start...");
- tty->hw_stopped = 0;
- tx_start(info, tty);
- info->pending_bh |= BH_TRANSMIT;
- return;
- }
- } else {
- if (!(info->serial_signals & SerialSignal_CTS)) {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("CTS tx stop...");
- tty->hw_stopped = 1;
- tx_stop(info);
- }
- }
- }
- info->pending_bh |= BH_STATUS;
-}
-
-static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- get_signals(info);
- if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
- irq_disable(info, CHB, IRQ_DCD);
- info->icount.dcd++;
- if (info->serial_signals & SerialSignal_DCD) {
- info->input_signal_events.dcd_up++;
- }
- else
- info->input_signal_events.dcd_down++;
-#if SYNCLINK_GENERIC_HDLC
- if (info->netcount) {
- if (info->serial_signals & SerialSignal_DCD)
- netif_carrier_on(info->netdev);
- else
- netif_carrier_off(info->netdev);
- }
-#endif
- wake_up_interruptible(&info->status_event_wait_q);
- wake_up_interruptible(&info->event_wait_q);
-
- if (tty_port_check_carrier(&info->port)) {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s CD now %s...", info->device_name,
- (info->serial_signals & SerialSignal_DCD) ? "on" : "off");
- if (info->serial_signals & SerialSignal_DCD)
- wake_up_interruptible(&info->port.open_wait);
- else {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("doing serial hangup...");
- if (tty)
- tty_hangup(tty);
- }
- }
- info->pending_bh |= BH_STATUS;
-}
-
-static void dsr_change(MGSLPC_INFO *info)
-{
- get_signals(info);
- if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
- port_irq_disable(info, PVR_DSR);
- info->icount.dsr++;
- if (info->serial_signals & SerialSignal_DSR)
- info->input_signal_events.dsr_up++;
- else
- info->input_signal_events.dsr_down++;
- wake_up_interruptible(&info->status_event_wait_q);
- wake_up_interruptible(&info->event_wait_q);
- info->pending_bh |= BH_STATUS;
-}
-
-static void ri_change(MGSLPC_INFO *info)
-{
- get_signals(info);
- if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
- port_irq_disable(info, PVR_RI);
- info->icount.rng++;
- if (info->serial_signals & SerialSignal_RI)
- info->input_signal_events.ri_up++;
- else
- info->input_signal_events.ri_down++;
- wake_up_interruptible(&info->status_event_wait_q);
- wake_up_interruptible(&info->event_wait_q);
- info->pending_bh |= BH_STATUS;
-}
-
-/* Interrupt service routine entry point.
- *
- * Arguments:
- *
- * irq interrupt number that caused interrupt
- * dev_id device ID supplied during interrupt registration
- */
-static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
-{
- MGSLPC_INFO *info = dev_id;
- struct tty_struct *tty;
- unsigned short isr;
- unsigned char gis, pis;
- int count=0;
-
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("mgslpc_isr(%d) entry.\n", info->irq_level);
-
- if (!(info->p_dev->_locked))
- return IRQ_HANDLED;
-
- tty = tty_port_tty_get(&info->port);
-
- spin_lock(&info->lock);
-
- while ((gis = read_reg(info, CHA + GIS))) {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis);
-
- if ((gis & 0x70) || count > 1000) {
- printk("synclink_cs:hardware failed or ejected\n");
- break;
- }
- count++;
-
- if (gis & (BIT1 | BIT0)) {
- isr = read_reg16(info, CHB + ISR);
- if (isr & IRQ_DCD)
- dcd_change(info, tty);
- if (isr & IRQ_CTS)
- cts_change(info, tty);
- }
- if (gis & (BIT3 | BIT2))
- {
- isr = read_reg16(info, CHA + ISR);
- if (isr & IRQ_TIMER) {
- info->irq_occurred = true;
- irq_disable(info, CHA, IRQ_TIMER);
- }
-
- /* receive IRQs */
- if (isr & IRQ_EXITHUNT) {
- info->icount.exithunt++;
- wake_up_interruptible(&info->event_wait_q);
- }
- if (isr & IRQ_BREAK_ON) {
- info->icount.brk++;
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
- }
- if (isr & IRQ_RXTIME) {
- issue_command(info, CHA, CMD_RXFIFO_READ);
- }
- if (isr & (IRQ_RXEOM | IRQ_RXFIFO)) {
- if (info->params.mode == MGSL_MODE_HDLC)
- rx_ready_hdlc(info, isr & IRQ_RXEOM);
- else
- rx_ready_async(info, isr & IRQ_RXEOM);
- }
-
- /* transmit IRQs */
- if (isr & IRQ_UNDERRUN) {
- if (info->tx_aborting)
- info->icount.txabort++;
- else
- info->icount.txunder++;
- tx_done(info, tty);
- }
- else if (isr & IRQ_ALLSENT) {
- info->icount.txok++;
- tx_done(info, tty);
- }
- else if (isr & IRQ_TXFIFO)
- tx_ready(info, tty);
- }
- if (gis & BIT7) {
- pis = read_reg(info, CHA + PIS);
- if (pis & BIT1)
- dsr_change(info);
- if (pis & BIT2)
- ri_change(info);
- }
- }
-
- /* Request bottom half processing if there's something
- * for it to do and the bh is not already running
- */
-
- if (info->pending_bh && !info->bh_running && !info->bh_requested) {
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):%s queueing bh task.\n",
- __FILE__,__LINE__,info->device_name);
- schedule_work(&info->task);
- info->bh_requested = true;
- }
-
- spin_unlock(&info->lock);
- tty_kref_put(tty);
-
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):mgslpc_isr(%d)exit.\n",
- __FILE__, __LINE__, info->irq_level);
-
- return IRQ_HANDLED;
-}
-
-/* Initialize and start device.
- */
-static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
-{
- int retval = 0;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
-
- if (tty_port_initialized(&info->port))
- return 0;
-
- if (!info->tx_buf) {
- /* allocate a page of memory for a transmit buffer */
- info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
- if (!info->tx_buf) {
- printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
- __FILE__, __LINE__, info->device_name);
- return -ENOMEM;
- }
- }
-
- info->pending_bh = 0;
-
- memset(&info->icount, 0, sizeof(info->icount));
-
- timer_setup(&info->tx_timer, tx_timeout, 0);
-
- /* Allocate and claim adapter resources */
- retval = claim_resources(info);
-
- /* perform existence check and diagnostics */
- if (!retval)
- retval = adapter_test(info);
-
- if (retval) {
- if (capable(CAP_SYS_ADMIN) && tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
- release_resources(info);
- return retval;
- }
-
- /* program hardware for current parameters */
- mgslpc_change_params(info, tty);
-
- if (tty)
- clear_bit(TTY_IO_ERROR, &tty->flags);
-
- tty_port_set_initialized(&info->port, 1);
-
- return 0;
-}
-
-/* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware
- */
-static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
-{
- unsigned long flags;
-
- if (!tty_port_initialized(&info->port))
- return;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_shutdown(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- /* clear status wait queue because status changes */
- /* can't happen after shutting down the hardware */
- wake_up_interruptible(&info->status_event_wait_q);
- wake_up_interruptible(&info->event_wait_q);
-
- del_timer_sync(&info->tx_timer);
-
- if (info->tx_buf) {
- free_page((unsigned long) info->tx_buf);
- info->tx_buf = NULL;
- }
-
- spin_lock_irqsave(&info->lock, flags);
-
- rx_stop(info);
- tx_stop(info);
-
- /* TODO:disable interrupts instead of reset to preserve signal states */
- reset_device(info);
-
- if (!tty || C_HUPCL(tty)) {
- info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- release_resources(info);
-
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- tty_port_set_initialized(&info->port, 0);
-}
-
-static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
-
- rx_stop(info);
- tx_stop(info);
- info->tx_count = info->tx_put = info->tx_get = 0;
-
- if (info->params.mode == MGSL_MODE_HDLC || info->netcount)
- hdlc_mode(info);
- else
- async_mode(info);
-
- set_signals(info);
-
- info->dcd_chkcount = 0;
- info->cts_chkcount = 0;
- info->ri_chkcount = 0;
- info->dsr_chkcount = 0;
-
- irq_enable(info, CHB, IRQ_DCD | IRQ_CTS);
- port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
- get_signals(info);
-
- if (info->netcount || (tty && C_CREAD(tty)))
- rx_start(info);
-
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/* Reconfigure adapter based on new parameters
- */
-static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- unsigned cflag;
- int bits_per_char;
-
- if (!tty)
- return;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_change_params(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- cflag = tty->termios.c_cflag;
-
- /* if B0 rate (hangup) specified then negate RTS and DTR */
- /* otherwise assert RTS and DTR */
- if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
- else
- info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-
- /* byte size and parity */
- if ((cflag & CSIZE) != CS8) {
- cflag &= ~CSIZE;
- cflag |= CS7;
- tty->termios.c_cflag = cflag;
- }
- info->params.data_bits = tty_get_char_size(cflag);
-
- if (cflag & CSTOPB)
- info->params.stop_bits = 2;
- else
- info->params.stop_bits = 1;
-
- info->params.parity = ASYNC_PARITY_NONE;
- if (cflag & PARENB) {
- if (cflag & PARODD)
- info->params.parity = ASYNC_PARITY_ODD;
- else
- info->params.parity = ASYNC_PARITY_EVEN;
- if (cflag & CMSPAR)
- info->params.parity = ASYNC_PARITY_SPACE;
- }
-
- /* calculate number of jiffies to transmit a full
- * FIFO (32 bytes) at specified data rate
- */
- bits_per_char = info->params.data_bits +
- info->params.stop_bits + 1;
-
- /* if port data rate is set to 460800 or less then
- * allow tty settings to override, otherwise keep the
- * current data rate.
- */
- if (info->params.data_rate <= 460800) {
- info->params.data_rate = tty_get_baud_rate(tty);
- }
-
- if (info->params.data_rate) {
- info->timeout = (32*HZ*bits_per_char) /
- info->params.data_rate;
- }
- info->timeout += HZ/50; /* Add .02 seconds of slop */
-
- tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
- tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
-
- /* process tty input control flags */
-
- info->read_status_mask = 0;
- if (I_INPCK(tty))
- info->read_status_mask |= BIT7 | BIT6;
- if (I_IGNPAR(tty))
- info->ignore_status_mask |= BIT7 | BIT6;
-
- mgslpc_program_hw(info, tty);
-}
-
-/* Add a character to the transmit buffer
- */
-static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO) {
- printk("%s(%d):mgslpc_put_char(%d) on %s\n",
- __FILE__, __LINE__, ch, info->device_name);
- }
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
- return 0;
-
- if (!info->tx_buf)
- return 0;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
- if (info->tx_count < TXBUFSIZE - 1) {
- info->tx_buf[info->tx_put++] = ch;
- info->tx_put &= TXBUFSIZE-1;
- info->tx_count++;
- }
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
- return 1;
-}
-
-/* Enable transmitter so remaining characters in the
- * transmit buffer are sent.
- */
-static void mgslpc_flush_chars(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
- __FILE__, __LINE__, info->device_name, info->tx_count);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
- return;
-
- if (info->tx_count <= 0 || tty->flow.stopped ||
- tty->hw_stopped || !info->tx_buf)
- return;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
- __FILE__, __LINE__, info->device_name);
-
- spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/* Send a block of data
- *
- * Arguments:
- *
- * tty pointer to tty information structure
- * buf pointer to buffer containing send data
- * count size of send data in bytes
- *
- * Returns: number of characters written
- */
-static int mgslpc_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
-{
- int c, ret = 0;
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_write(%s) count=%d\n",
- __FILE__, __LINE__, info->device_name, count);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
- !info->tx_buf)
- goto cleanup;
-
- if (info->params.mode == MGSL_MODE_HDLC) {
- if (count > TXBUFSIZE) {
- ret = -EIO;
- goto cleanup;
- }
- if (info->tx_active)
- goto cleanup;
- else if (info->tx_count)
- goto start;
- }
-
- for (;;) {
- c = min(count,
- min(TXBUFSIZE - info->tx_count - 1,
- TXBUFSIZE - info->tx_put));
- if (c <= 0)
- break;
-
- memcpy(info->tx_buf + info->tx_put, buf, c);
-
- spin_lock_irqsave(&info->lock, flags);
- info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
- info->tx_count += c;
- spin_unlock_irqrestore(&info->lock, flags);
-
- buf += c;
- count -= c;
- ret += c;
- }
-start:
- if (info->tx_count && !tty->flow.stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-cleanup:
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_write(%s) returning=%d\n",
- __FILE__, __LINE__, info->device_name, ret);
- return ret;
-}
-
-/* Return the count of free bytes in transmit buffer
- */
-static unsigned int mgslpc_write_room(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- int ret;
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room"))
- return 0;
-
- if (info->params.mode == MGSL_MODE_HDLC) {
- /* HDLC (frame oriented) mode */
- if (info->tx_active)
- return 0;
- else
- return HDLC_MAX_FRAME_SIZE;
- } else {
- ret = TXBUFSIZE - info->tx_count - 1;
- if (ret < 0)
- ret = 0;
- }
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_write_room(%s)=%d\n",
- __FILE__, __LINE__, info->device_name, ret);
- return ret;
-}
-
-/* Return the count of bytes in transmit buffer
- */
-static unsigned int mgslpc_chars_in_buffer(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned int rc;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
- return 0;
-
- if (info->params.mode == MGSL_MODE_HDLC)
- rc = info->tx_active ? info->max_frame_size : 0;
- else
- rc = info->tx_count;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_chars_in_buffer(%s)=%u\n",
- __FILE__, __LINE__, info->device_name, rc);
-
- return rc;
-}
-
-/* Discard all data in the send buffer
- */
-static void mgslpc_flush_buffer(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- info->tx_count = info->tx_put = info->tx_get = 0;
- del_timer(&info->tx_timer);
- spin_unlock_irqrestore(&info->lock, flags);
-
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-}
-
-/* Send a high-priority XON/XOFF character
- */
-static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
- __FILE__, __LINE__, info->device_name, ch);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
- return;
-
- info->x_char = ch;
- if (ch) {
- spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-}
-
-/* Signal remote device to throttle send data (our receive data)
- */
-static void mgslpc_throttle(struct tty_struct * tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_throttle(%s) entry\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
- return;
-
- if (I_IXOFF(tty))
- mgslpc_send_xchar(tty, STOP_CHAR(tty));
-
- if (C_CRTSCTS(tty)) {
- spin_lock_irqsave(&info->lock, flags);
- info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-}
-
-/* Signal remote device to stop throttling send data (our receive data)
- */
-static void mgslpc_unthrottle(struct tty_struct * tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
- return;
-
- if (I_IXOFF(tty)) {
- if (info->x_char)
- info->x_char = 0;
- else
- mgslpc_send_xchar(tty, START_CHAR(tty));
- }
-
- if (C_CRTSCTS(tty)) {
- spin_lock_irqsave(&info->lock, flags);
- info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-}
-
-/* get the current serial statistics
- */
-static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount)
-{
- int err;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("get_params(%s)\n", info->device_name);
- if (!user_icount) {
- memset(&info->icount, 0, sizeof(info->icount));
- } else {
- COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
- if (err)
- return -EFAULT;
- }
- return 0;
-}
-
-/* get the current serial parameters
- */
-static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
-{
- int err;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("get_params(%s)\n", info->device_name);
- COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
- if (err)
- return -EFAULT;
- return 0;
-}
-
-/* set the serial parameters
- *
- * Arguments:
- *
- * info pointer to device instance data
- * new_params user buffer containing new serial params
- *
- * Returns: 0 if success, otherwise error code
- */
-static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
-{
- unsigned long flags;
- MGSL_PARAMS tmp_params;
- int err;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
- info->device_name);
- COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
- if (err) {
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):set_params(%s) user buffer copy failed\n",
- __FILE__, __LINE__, info->device_name);
- return -EFAULT;
- }
-
- spin_lock_irqsave(&info->lock, flags);
- memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
- spin_unlock_irqrestore(&info->lock, flags);
-
- mgslpc_change_params(info, tty);
-
- return 0;
-}
-
-static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
-{
- int err;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode);
- COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
- if (err)
- return -EFAULT;
- return 0;
-}
-
-static int set_txidle(MGSLPC_INFO * info, int idle_mode)
-{
- unsigned long flags;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
- spin_lock_irqsave(&info->lock, flags);
- info->idle_mode = idle_mode;
- tx_set_idle(info);
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
-{
- int err;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("get_interface(%s)=%d\n", info->device_name, info->if_mode);
- COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int));
- if (err)
- return -EFAULT;
- return 0;
-}
-
-static int set_interface(MGSLPC_INFO * info, int if_mode)
-{
- unsigned long flags;
- unsigned char val;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("set_interface(%s,%d)\n", info->device_name, if_mode);
- spin_lock_irqsave(&info->lock, flags);
- info->if_mode = if_mode;
-
- val = read_reg(info, PVR) & 0x0f;
- switch (info->if_mode)
- {
- case MGSL_INTERFACE_RS232: val |= PVR_RS232; break;
- case MGSL_INTERFACE_V35: val |= PVR_V35; break;
- case MGSL_INTERFACE_RS422: val |= PVR_RS422; break;
- }
- write_reg(info, PVR, val);
-
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
-{
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("set_txenable(%s,%d)\n", info->device_name, enable);
-
- spin_lock_irqsave(&info->lock, flags);
- if (enable) {
- if (!info->tx_enabled)
- tx_start(info, tty);
- } else {
- if (info->tx_enabled)
- tx_stop(info);
- }
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-static int tx_abort(MGSLPC_INFO * info)
-{
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_abort(%s)\n", info->device_name);
-
- spin_lock_irqsave(&info->lock, flags);
- if (info->tx_active && info->tx_count &&
- info->params.mode == MGSL_MODE_HDLC) {
- /* clear data count so FIFO is not filled on next IRQ.
- * This results in underrun and abort transmission.
- */
- info->tx_count = info->tx_put = info->tx_get = 0;
- info->tx_aborting = true;
- }
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-static int set_rxenable(MGSLPC_INFO * info, int enable)
-{
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("set_rxenable(%s,%d)\n", info->device_name, enable);
-
- spin_lock_irqsave(&info->lock, flags);
- if (enable) {
- if (!info->rx_enabled)
- rx_start(info);
- } else {
- if (info->rx_enabled)
- rx_stop(info);
- }
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-/* wait for specified event to occur
- *
- * Arguments: info pointer to device instance data
- * mask pointer to bitmask of events to wait for
- * Return Value: 0 if successful and bit mask updated with
- * of events triggerred,
- * otherwise error code
- */
-static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
-{
- unsigned long flags;
- int s;
- int rc=0;
- struct mgsl_icount cprev, cnow;
- int events;
- int mask;
- struct _input_signal_events oldsigs, newsigs;
- DECLARE_WAITQUEUE(wait, current);
-
- COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
- if (rc)
- return -EFAULT;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("wait_events(%s,%d)\n", info->device_name, mask);
-
- spin_lock_irqsave(&info->lock, flags);
-
- /* return immediately if state matches requested events */
- get_signals(info);
- s = info->serial_signals;
- events = mask &
- ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
- ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
- ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
- ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
- if (events) {
- spin_unlock_irqrestore(&info->lock, flags);
- goto exit;
- }
-
- /* save current irq counts */
- cprev = info->icount;
- oldsigs = info->input_signal_events;
-
- if ((info->params.mode == MGSL_MODE_HDLC) &&
- (mask & MgslEvent_ExitHuntMode))
- irq_enable(info, CHA, IRQ_EXITHUNT);
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&info->event_wait_q, &wait);
-
- spin_unlock_irqrestore(&info->lock, flags);
-
-
- for(;;) {
- schedule();
- if (signal_pending(current)) {
- rc = -ERESTARTSYS;
- break;
- }
-
- /* get current irq counts */
- spin_lock_irqsave(&info->lock, flags);
- cnow = info->icount;
- newsigs = info->input_signal_events;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock, flags);
-
- /* if no change, wait aborted for some reason */
- if (newsigs.dsr_up == oldsigs.dsr_up &&
- newsigs.dsr_down == oldsigs.dsr_down &&
- newsigs.dcd_up == oldsigs.dcd_up &&
- newsigs.dcd_down == oldsigs.dcd_down &&
- newsigs.cts_up == oldsigs.cts_up &&
- newsigs.cts_down == oldsigs.cts_down &&
- newsigs.ri_up == oldsigs.ri_up &&
- newsigs.ri_down == oldsigs.ri_down &&
- cnow.exithunt == cprev.exithunt &&
- cnow.rxidle == cprev.rxidle) {
- rc = -EIO;
- break;
- }
-
- events = mask &
- ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
- (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
- (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
- (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
- (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
- (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
- (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
- (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
- (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
- (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
- if (events)
- break;
-
- cprev = cnow;
- oldsigs = newsigs;
- }
-
- remove_wait_queue(&info->event_wait_q, &wait);
- set_current_state(TASK_RUNNING);
-
- if (mask & MgslEvent_ExitHuntMode) {
- spin_lock_irqsave(&info->lock, flags);
- if (!waitqueue_active(&info->event_wait_q))
- irq_disable(info, CHA, IRQ_EXITHUNT);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-exit:
- if (rc == 0)
- PUT_USER(rc, events, mask_ptr);
- return rc;
-}
-
-static int modem_input_wait(MGSLPC_INFO *info,int arg)
-{
- unsigned long flags;
- int rc;
- struct mgsl_icount cprev, cnow;
- DECLARE_WAITQUEUE(wait, current);
-
- /* save current irq counts */
- spin_lock_irqsave(&info->lock, flags);
- cprev = info->icount;
- add_wait_queue(&info->status_event_wait_q, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock, flags);
-
- for(;;) {
- schedule();
- if (signal_pending(current)) {
- rc = -ERESTARTSYS;
- break;
- }
-
- /* get new irq counts */
- spin_lock_irqsave(&info->lock, flags);
- cnow = info->icount;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock, flags);
-
- /* if no change, wait aborted for some reason */
- if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
- rc = -EIO;
- break;
- }
-
- /* check for change in caller specified modem input */
- if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
- (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
- (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
- (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
- rc = 0;
- break;
- }
-
- cprev = cnow;
- }
- remove_wait_queue(&info->status_event_wait_q, &wait);
- set_current_state(TASK_RUNNING);
- return rc;
-}
-
-/* return the state of the serial control and status signals
- */
-static int tiocmget(struct tty_struct *tty)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned int result;
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
- ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
- ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
- ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
- ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
- ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):%s tiocmget() value=%08X\n",
- __FILE__, __LINE__, info->device_name, result);
- return result;
-}
-
-/* set modem control signals (DTR/RTS)
- */
-static int tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):%s tiocmset(%x,%x)\n",
- __FILE__, __LINE__, info->device_name, set, clear);
-
- if (set & TIOCM_RTS)
- info->serial_signals |= SerialSignal_RTS;
- if (set & TIOCM_DTR)
- info->serial_signals |= SerialSignal_DTR;
- if (clear & TIOCM_RTS)
- info->serial_signals &= ~SerialSignal_RTS;
- if (clear & TIOCM_DTR)
- info->serial_signals &= ~SerialSignal_DTR;
-
- spin_lock_irqsave(&info->lock, flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- return 0;
-}
-
-/* Set or clear transmit break condition
- *
- * Arguments: tty pointer to tty instance data
- * break_state -1=set break condition, 0=clear
- */
-static int mgslpc_break(struct tty_struct *tty, int break_state)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_break(%s,%d)\n",
- __FILE__, __LINE__, info->device_name, break_state);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
- return -EINVAL;
-
- spin_lock_irqsave(&info->lock, flags);
- if (break_state == -1)
- set_reg_bits(info, CHA+DAFO, BIT6);
- else
- clear_reg_bits(info, CHA+DAFO, BIT6);
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-static int mgslpc_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
- struct mgsl_icount cnow; /* kernel counter temps */
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- cnow = info->icount;
- spin_unlock_irqrestore(&info->lock, flags);
-
- icount->cts = cnow.cts;
- icount->dsr = cnow.dsr;
- icount->rng = cnow.rng;
- icount->dcd = cnow.dcd;
- icount->rx = cnow.rx;
- icount->tx = cnow.tx;
- icount->frame = cnow.frame;
- icount->overrun = cnow.overrun;
- icount->parity = cnow.parity;
- icount->brk = cnow.brk;
- icount->buf_overrun = cnow.buf_overrun;
-
- return 0;
-}
-
-/* Service an IOCTL request
- *
- * Arguments:
- *
- * tty pointer to tty instance data
- * cmd IOCTL command code
- * arg command argument/context
- *
- * Return Value: 0 if success, otherwise error code
- */
-static int mgslpc_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
- void __user *argp = (void __user *)arg;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
- info->device_name, cmd);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
- return -ENODEV;
-
- if (cmd != TIOCMIWAIT) {
- if (tty_io_error(tty))
- return -EIO;
- }
-
- switch (cmd) {
- case MGSL_IOCGPARAMS:
- return get_params(info, argp);
- case MGSL_IOCSPARAMS:
- return set_params(info, argp, tty);
- case MGSL_IOCGTXIDLE:
- return get_txidle(info, argp);
- case MGSL_IOCSTXIDLE:
- return set_txidle(info, (int)arg);
- case MGSL_IOCGIF:
- return get_interface(info, argp);
- case MGSL_IOCSIF:
- return set_interface(info,(int)arg);
- case MGSL_IOCTXENABLE:
- return set_txenable(info,(int)arg, tty);
- case MGSL_IOCRXENABLE:
- return set_rxenable(info,(int)arg);
- case MGSL_IOCTXABORT:
- return tx_abort(info);
- case MGSL_IOCGSTATS:
- return get_stats(info, argp);
- case MGSL_IOCWAITEVENT:
- return wait_events(info, argp);
- case TIOCMIWAIT:
- return modem_input_wait(info,(int)arg);
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-/* Set new termios settings
- *
- * Arguments:
- *
- * tty pointer to tty structure
- * termios pointer to buffer to hold returned old termios
- */
-static void mgslpc_set_termios(struct tty_struct *tty,
- const struct ktermios *old_termios)
-{
- MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
- tty->driver->name);
-
- /* just return if nothing has changed */
- if ((tty->termios.c_cflag == old_termios->c_cflag)
- && (RELEVANT_IFLAG(tty->termios.c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
- mgslpc_change_params(info, tty);
-
- /* Handle transition to B0 status */
- if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
- info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- spin_lock_irqsave(&info->lock, flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-
- /* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
- info->serial_signals |= SerialSignal_DTR;
- if (!C_CRTSCTS(tty) || !tty_throttled(tty))
- info->serial_signals |= SerialSignal_RTS;
- spin_lock_irqsave(&info->lock, flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-
- /* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
- tty->hw_stopped = 0;
- tx_release(tty);
- }
-}
-
-static void mgslpc_close(struct tty_struct *tty, struct file * filp)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
- struct tty_port *port = &info->port;
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close"))
- return;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
- __FILE__, __LINE__, info->device_name, port->count);
-
- if (tty_port_close_start(port, tty, filp) == 0)
- goto cleanup;
-
- if (tty_port_initialized(port))
- mgslpc_wait_until_sent(tty, info->timeout);
-
- mgslpc_flush_buffer(tty);
-
- tty_ldisc_flush(tty);
- shutdown(info, tty);
-
- tty_port_close_end(port, tty);
- tty_port_tty_set(port, NULL);
-cleanup:
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
- tty->driver->name, port->count);
-}
-
-/* Wait until the transmitter is empty.
- */
-static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long orig_jiffies, char_time;
-
- if (!info)
- return;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
- return;
-
- if (!tty_port_initialized(&info->port))
- goto exit;
-
- orig_jiffies = jiffies;
-
- /* Set check interval to 1/5 of estimated time to
- * send a character, and make it at least 1. The check
- * interval should also be less than the timeout.
- * Note: use tight timings here to satisfy the NIST-PCTS.
- */
-
- if (info->params.data_rate) {
- char_time = info->timeout/(32 * 5);
- if (!char_time)
- char_time++;
- } else
- char_time = 1;
-
- if (timeout)
- char_time = min_t(unsigned long, char_time, timeout);
-
- if (info->params.mode == MGSL_MODE_HDLC) {
- while (info->tx_active) {
- msleep_interruptible(jiffies_to_msecs(char_time));
- if (signal_pending(current))
- break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- break;
- }
- } else {
- while ((info->tx_count || info->tx_active) &&
- info->tx_enabled) {
- msleep_interruptible(jiffies_to_msecs(char_time));
- if (signal_pending(current))
- break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- break;
- }
- }
-
-exit:
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
- __FILE__, __LINE__, info->device_name);
-}
-
-/* Called by tty_hangup() when a hangup is signaled.
- * This is the same as closing all open files for the port.
- */
-static void mgslpc_hangup(struct tty_struct *tty)
-{
- MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_hangup(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
- return;
-
- mgslpc_flush_buffer(tty);
- shutdown(info, tty);
- tty_port_hangup(&info->port);
-}
-
-static int carrier_raised(struct tty_port *port)
-{
- MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- if (info->serial_signals & SerialSignal_DCD)
- return 1;
- return 0;
-}
-
-static void dtr_rts(struct tty_port *port, int onoff)
-{
- MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- if (onoff)
- info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
- else
- info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-
-static int mgslpc_open(struct tty_struct *tty, struct file * filp)
-{
- MGSLPC_INFO *info;
- struct tty_port *port;
- int retval, line;
- unsigned long flags;
-
- /* verify range of specified line number */
- line = tty->index;
- if (line >= mgslpc_device_count) {
- printk("%s(%d):mgslpc_open with invalid line #%d.\n",
- __FILE__, __LINE__, line);
- return -ENODEV;
- }
-
- /* find the info structure for the specified line */
- info = mgslpc_device_list;
- while(info && info->line != line)
- info = info->next_device;
- if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open"))
- return -ENODEV;
-
- port = &info->port;
- tty->driver_data = info;
- tty_port_tty_set(port, tty);
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
- __FILE__, __LINE__, tty->driver->name, port->count);
-
- spin_lock_irqsave(&info->netlock, flags);
- if (info->netcount) {
- retval = -EBUSY;
- spin_unlock_irqrestore(&info->netlock, flags);
- goto cleanup;
- }
- spin_lock(&port->lock);
- port->count++;
- spin_unlock(&port->lock);
- spin_unlock_irqrestore(&info->netlock, flags);
-
- if (port->count == 1) {
- /* 1st open on this device, init hardware */
- retval = startup(info, tty);
- if (retval < 0)
- goto cleanup;
- }
-
- retval = tty_port_block_til_ready(&info->port, tty, filp);
- if (retval) {
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):block_til_ready(%s) returned %d\n",
- __FILE__, __LINE__, info->device_name, retval);
- goto cleanup;
- }
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_open(%s) success\n",
- __FILE__, __LINE__, info->device_name);
- retval = 0;
-
-cleanup:
- return retval;
-}
-
-/*
- * /proc fs routines....
- */
-
-static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
-{
- char stat_buf[30];
- unsigned long flags;
-
- seq_printf(m, "%s:io:%04X irq:%d",
- info->device_name, info->io_base, info->irq_level);
-
- /* output current serial signal states */
- spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- stat_buf[0] = 0;
- stat_buf[1] = 0;
- if (info->serial_signals & SerialSignal_RTS)
- strcat(stat_buf, "|RTS");
- if (info->serial_signals & SerialSignal_CTS)
- strcat(stat_buf, "|CTS");
- if (info->serial_signals & SerialSignal_DTR)
- strcat(stat_buf, "|DTR");
- if (info->serial_signals & SerialSignal_DSR)
- strcat(stat_buf, "|DSR");
- if (info->serial_signals & SerialSignal_DCD)
- strcat(stat_buf, "|CD");
- if (info->serial_signals & SerialSignal_RI)
- strcat(stat_buf, "|RI");
-
- if (info->params.mode == MGSL_MODE_HDLC) {
- seq_printf(m, " HDLC txok:%d rxok:%d",
- info->icount.txok, info->icount.rxok);
- if (info->icount.txunder)
- seq_printf(m, " txunder:%d", info->icount.txunder);
- if (info->icount.txabort)
- seq_printf(m, " txabort:%d", info->icount.txabort);
- if (info->icount.rxshort)
- seq_printf(m, " rxshort:%d", info->icount.rxshort);
- if (info->icount.rxlong)
- seq_printf(m, " rxlong:%d", info->icount.rxlong);
- if (info->icount.rxover)
- seq_printf(m, " rxover:%d", info->icount.rxover);
- if (info->icount.rxcrc)
- seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
- } else {
- seq_printf(m, " ASYNC tx:%d rx:%d",
- info->icount.tx, info->icount.rx);
- if (info->icount.frame)
- seq_printf(m, " fe:%d", info->icount.frame);
- if (info->icount.parity)
- seq_printf(m, " pe:%d", info->icount.parity);
- if (info->icount.brk)
- seq_printf(m, " brk:%d", info->icount.brk);
- if (info->icount.overrun)
- seq_printf(m, " oe:%d", info->icount.overrun);
- }
-
- /* Append serial signal status to end */
- seq_printf(m, " %s\n", stat_buf+1);
-
- seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
- info->tx_active,info->bh_requested,info->bh_running,
- info->pending_bh);
-}
-
-/* Called to print information about devices
- */
-static int mgslpc_proc_show(struct seq_file *m, void *v)
-{
- MGSLPC_INFO *info;
-
- seq_printf(m, "synclink driver:%s\n", driver_version);
-
- info = mgslpc_device_list;
- while (info) {
- line_info(m, info);
- info = info->next_device;
- }
- return 0;
-}
-
-static int rx_alloc_buffers(MGSLPC_INFO *info)
-{
- /* each buffer has header and data */
- info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size;
-
- /* calculate total allocation size for 8 buffers */
- info->rx_buf_total_size = info->rx_buf_size * 8;
-
- /* limit total allocated memory */
- if (info->rx_buf_total_size > 0x10000)
- info->rx_buf_total_size = 0x10000;
-
- /* calculate number of buffers */
- info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size;
-
- info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL);
- if (info->rx_buf == NULL)
- return -ENOMEM;
-
- /* unused flag buffer to satisfy receive_buf calling interface */
- info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
- if (!info->flag_buf) {
- kfree(info->rx_buf);
- info->rx_buf = NULL;
- return -ENOMEM;
- }
-
- rx_reset_buffers(info);
- return 0;
-}
-
-static void rx_free_buffers(MGSLPC_INFO *info)
-{
- kfree(info->rx_buf);
- info->rx_buf = NULL;
- kfree(info->flag_buf);
- info->flag_buf = NULL;
-}
-
-static int claim_resources(MGSLPC_INFO *info)
-{
- if (rx_alloc_buffers(info) < 0) {
- printk("Can't allocate rx buffer %s\n", info->device_name);
- release_resources(info);
- return -ENODEV;
- }
- return 0;
-}
-
-static void release_resources(MGSLPC_INFO *info)
-{
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("release_resources(%s)\n", info->device_name);
- rx_free_buffers(info);
-}
-
-/* Add the specified device instance data structure to the
- * global linked list of devices and increment the device count.
- *
- * Arguments: info pointer to device instance data
- */
-static int mgslpc_add_device(MGSLPC_INFO *info)
-{
- MGSLPC_INFO *current_dev = NULL;
- struct device *tty_dev;
- int ret;
-
- info->next_device = NULL;
- info->line = mgslpc_device_count;
- sprintf(info->device_name,"ttySLP%d",info->line);
-
- if (info->line < MAX_DEVICE_COUNT) {
- if (maxframe[info->line])
- info->max_frame_size = maxframe[info->line];
- }
-
- mgslpc_device_count++;
-
- if (!mgslpc_device_list)
- mgslpc_device_list = info;
- else {
- current_dev = mgslpc_device_list;
- while (current_dev->next_device)
- current_dev = current_dev->next_device;
- current_dev->next_device = info;
- }
-
- if (info->max_frame_size < 4096)
- info->max_frame_size = 4096;
- else if (info->max_frame_size > 65535)
- info->max_frame_size = 65535;
-
- printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
- info->device_name, info->io_base, info->irq_level);
-
-#if SYNCLINK_GENERIC_HDLC
- ret = hdlcdev_init(info);
- if (ret != 0)
- goto failed;
-#endif
-
- tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
- &info->p_dev->dev);
- if (IS_ERR(tty_dev)) {
- ret = PTR_ERR(tty_dev);
-#if SYNCLINK_GENERIC_HDLC
- hdlcdev_exit(info);
-#endif
- goto failed;
- }
-
- return 0;
-
-failed:
- if (current_dev)
- current_dev->next_device = NULL;
- else
- mgslpc_device_list = NULL;
- mgslpc_device_count--;
- return ret;
-}
-
-static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
-{
- MGSLPC_INFO *info = mgslpc_device_list;
- MGSLPC_INFO *last = NULL;
-
- while(info) {
- if (info == remove_info) {
- if (last)
- last->next_device = info->next_device;
- else
- mgslpc_device_list = info->next_device;
- tty_unregister_device(serial_driver, info->line);
-#if SYNCLINK_GENERIC_HDLC
- hdlcdev_exit(info);
-#endif
- release_resources(info);
- tty_port_destroy(&info->port);
- kfree(info);
- mgslpc_device_count--;
- return;
- }
- last = info;
- info = info->next_device;
- }
-}
-
-static const struct pcmcia_device_id mgslpc_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids);
-
-static struct pcmcia_driver mgslpc_driver = {
- .owner = THIS_MODULE,
- .name = "synclink_cs",
- .probe = mgslpc_probe,
- .remove = mgslpc_detach,
- .id_table = mgslpc_ids,
- .suspend = mgslpc_suspend,
- .resume = mgslpc_resume,
-};
-
-static const struct tty_operations mgslpc_ops = {
- .open = mgslpc_open,
- .close = mgslpc_close,
- .write = mgslpc_write,
- .put_char = mgslpc_put_char,
- .flush_chars = mgslpc_flush_chars,
- .write_room = mgslpc_write_room,
- .chars_in_buffer = mgslpc_chars_in_buffer,
- .flush_buffer = mgslpc_flush_buffer,
- .ioctl = mgslpc_ioctl,
- .throttle = mgslpc_throttle,
- .unthrottle = mgslpc_unthrottle,
- .send_xchar = mgslpc_send_xchar,
- .break_ctl = mgslpc_break,
- .wait_until_sent = mgslpc_wait_until_sent,
- .set_termios = mgslpc_set_termios,
- .stop = tx_pause,
- .start = tx_release,
- .hangup = mgslpc_hangup,
- .tiocmget = tiocmget,
- .tiocmset = tiocmset,
- .get_icount = mgslpc_get_icount,
- .proc_show = mgslpc_proc_show,
-};
-
-static int __init synclink_cs_init(void)
-{
- int rc;
-
- if (break_on_load) {
- mgslpc_get_text_ptr();
- BREAKPOINT();
- }
-
- serial_driver = tty_alloc_driver(MAX_DEVICE_COUNT,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(serial_driver)) {
- rc = PTR_ERR(serial_driver);
- goto err;
- }
-
- /* Initialize the tty_driver structure */
- serial_driver->driver_name = "synclink_cs";
- serial_driver->name = "ttySLP";
- serial_driver->major = ttymajor;
- serial_driver->minor_start = 64;
- serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- serial_driver->subtype = SERIAL_TYPE_NORMAL;
- serial_driver->init_termios = tty_std_termios;
- serial_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- tty_set_operations(serial_driver, &mgslpc_ops);
-
- rc = tty_register_driver(serial_driver);
- if (rc < 0) {
- printk(KERN_ERR "%s(%d):Couldn't register serial driver\n",
- __FILE__, __LINE__);
- goto err_put_tty;
- }
-
- rc = pcmcia_register_driver(&mgslpc_driver);
- if (rc < 0)
- goto err_unreg_tty;
-
- printk(KERN_INFO "%s %s, tty major#%d\n", driver_name, driver_version,
- serial_driver->major);
-
- return 0;
-err_unreg_tty:
- tty_unregister_driver(serial_driver);
-err_put_tty:
- tty_driver_kref_put(serial_driver);
-err:
- return rc;
-}
-
-static void __exit synclink_cs_exit(void)
-{
- pcmcia_unregister_driver(&mgslpc_driver);
- tty_unregister_driver(serial_driver);
- tty_driver_kref_put(serial_driver);
-}
-
-module_init(synclink_cs_init);
-module_exit(synclink_cs_exit);
-
-static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate)
-{
- unsigned int M, N;
- unsigned char val;
-
- /* note:standard BRG mode is broken in V3.2 chip
- * so enhanced mode is always used
- */
-
- if (rate) {
- N = 3686400 / rate;
- if (!N)
- N = 1;
- N >>= 1;
- for (M = 1; N > 64 && M < 16; M++)
- N >>= 1;
- N--;
-
- /* BGR[5..0] = N
- * BGR[9..6] = M
- * BGR[7..0] contained in BGR register
- * BGR[9..8] contained in CCR2[7..6]
- * divisor = (N+1)*2^M
- *
- * Note: M *must* not be zero (causes asymetric duty cycle)
- */
- write_reg(info, (unsigned char) (channel + BGR),
- (unsigned char) ((M << 6) + N));
- val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f;
- val |= ((M << 4) & 0xc0);
- write_reg(info, (unsigned char) (channel + CCR2), val);
- }
-}
-
-/* Enabled the AUX clock output at the specified frequency.
- */
-static void enable_auxclk(MGSLPC_INFO *info)
-{
- unsigned char val;
-
- /* MODE
- *
- * 07..06 MDS[1..0] 10 = transparent HDLC mode
- * 05 ADM Address Mode, 0 = no addr recognition
- * 04 TMD Timer Mode, 0 = external
- * 03 RAC Receiver Active, 0 = inactive
- * 02 RTS 0=RTS active during xmit, 1=RTS always active
- * 01 TRS Timer Resolution, 1=512
- * 00 TLP Test Loop, 0 = no loop
- *
- * 1000 0010
- */
- val = 0x82;
-
- /* channel B RTS is used to enable AUXCLK driver on SP505 */
- if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
- val |= BIT2;
- write_reg(info, CHB + MODE, val);
-
- /* CCR0
- *
- * 07 PU Power Up, 1=active, 0=power down
- * 06 MCE Master Clock Enable, 1=enabled
- * 05 Reserved, 0
- * 04..02 SC[2..0] Encoding
- * 01..00 SM[1..0] Serial Mode, 00=HDLC
- *
- * 11000000
- */
- write_reg(info, CHB + CCR0, 0xc0);
-
- /* CCR1
- *
- * 07 SFLG Shared Flag, 0 = disable shared flags
- * 06 GALP Go Active On Loop, 0 = not used
- * 05 GLP Go On Loop, 0 = not used
- * 04 ODS Output Driver Select, 1=TxD is push-pull output
- * 03 ITF Interframe Time Fill, 0=mark, 1=flag
- * 02..00 CM[2..0] Clock Mode
- *
- * 0001 0111
- */
- write_reg(info, CHB + CCR1, 0x17);
-
- /* CCR2 (Channel B)
- *
- * 07..06 BGR[9..8] Baud rate bits 9..8
- * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
- * 04 SSEL Clock source select, 1=submode b
- * 03 TOE 0=TxCLK is input, 1=TxCLK is output
- * 02 RWX Read/Write Exchange 0=disabled
- * 01 C32, CRC select, 0=CRC-16, 1=CRC-32
- * 00 DIV, data inversion 0=disabled, 1=enabled
- *
- * 0011 1000
- */
- if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
- write_reg(info, CHB + CCR2, 0x38);
- else
- write_reg(info, CHB + CCR2, 0x30);
-
- /* CCR4
- *
- * 07 MCK4 Master Clock Divide by 4, 1=enabled
- * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
- * 05 TST1 Test Pin, 0=normal operation
- * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
- * 03..02 Reserved, must be 0
- * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes
- *
- * 0101 0000
- */
- write_reg(info, CHB + CCR4, 0x50);
-
- /* if auxclk not enabled, set internal BRG so
- * CTS transitions can be detected (requires TxC)
- */
- if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
- mgslpc_set_rate(info, CHB, info->params.clock_speed);
- else
- mgslpc_set_rate(info, CHB, 921600);
-}
-
-static void loopback_enable(MGSLPC_INFO *info)
-{
- unsigned char val;
-
- /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */
- val = read_reg(info, CHA + CCR1) | (BIT2 | BIT1 | BIT0);
- write_reg(info, CHA + CCR1, val);
-
- /* CCR2:04 SSEL Clock source select, 1=submode b */
- val = read_reg(info, CHA + CCR2) | (BIT4 | BIT5);
- write_reg(info, CHA + CCR2, val);
-
- /* set LinkSpeed if available, otherwise default to 2Mbps */
- if (info->params.clock_speed)
- mgslpc_set_rate(info, CHA, info->params.clock_speed);
- else
- mgslpc_set_rate(info, CHA, 1843200);
-
- /* MODE:00 TLP Test Loop, 1=loopback enabled */
- val = read_reg(info, CHA + MODE) | BIT0;
- write_reg(info, CHA + MODE, val);
-}
-
-static void hdlc_mode(MGSLPC_INFO *info)
-{
- unsigned char val;
- unsigned char clkmode, clksubmode;
-
- /* disable all interrupts */
- irq_disable(info, CHA, 0xffff);
- irq_disable(info, CHB, 0xffff);
- port_irq_disable(info, 0xff);
-
- /* assume clock mode 0a, rcv=RxC xmt=TxC */
- clkmode = clksubmode = 0;
- if (info->params.flags & HDLC_FLAG_RXC_DPLL
- && info->params.flags & HDLC_FLAG_TXC_DPLL) {
- /* clock mode 7a, rcv = DPLL, xmt = DPLL */
- clkmode = 7;
- } else if (info->params.flags & HDLC_FLAG_RXC_BRG
- && info->params.flags & HDLC_FLAG_TXC_BRG) {
- /* clock mode 7b, rcv = BRG, xmt = BRG */
- clkmode = 7;
- clksubmode = 1;
- } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) {
- if (info->params.flags & HDLC_FLAG_TXC_BRG) {
- /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */
- clkmode = 6;
- clksubmode = 1;
- } else {
- /* clock mode 6a, rcv = DPLL, xmt = TxC */
- clkmode = 6;
- }
- } else if (info->params.flags & HDLC_FLAG_TXC_BRG) {
- /* clock mode 0b, rcv = RxC, xmt = BRG */
- clksubmode = 1;
- }
-
- /* MODE
- *
- * 07..06 MDS[1..0] 10 = transparent HDLC mode
- * 05 ADM Address Mode, 0 = no addr recognition
- * 04 TMD Timer Mode, 0 = external
- * 03 RAC Receiver Active, 0 = inactive
- * 02 RTS 0=RTS active during xmit, 1=RTS always active
- * 01 TRS Timer Resolution, 1=512
- * 00 TLP Test Loop, 0 = no loop
- *
- * 1000 0010
- */
- val = 0x82;
- if (info->params.loopback)
- val |= BIT0;
-
- /* preserve RTS state */
- if (info->serial_signals & SerialSignal_RTS)
- val |= BIT2;
- write_reg(info, CHA + MODE, val);
-
- /* CCR0
- *
- * 07 PU Power Up, 1=active, 0=power down
- * 06 MCE Master Clock Enable, 1=enabled
- * 05 Reserved, 0
- * 04..02 SC[2..0] Encoding
- * 01..00 SM[1..0] Serial Mode, 00=HDLC
- *
- * 11000000
- */
- val = 0xc0;
- switch (info->params.encoding)
- {
- case HDLC_ENCODING_NRZI:
- val |= BIT3;
- break;
- case HDLC_ENCODING_BIPHASE_SPACE:
- val |= BIT4;
- break; // FM0
- case HDLC_ENCODING_BIPHASE_MARK:
- val |= BIT4 | BIT2;
- break; // FM1
- case HDLC_ENCODING_BIPHASE_LEVEL:
- val |= BIT4 | BIT3;
- break; // Manchester
- }
- write_reg(info, CHA + CCR0, val);
-
- /* CCR1
- *
- * 07 SFLG Shared Flag, 0 = disable shared flags
- * 06 GALP Go Active On Loop, 0 = not used
- * 05 GLP Go On Loop, 0 = not used
- * 04 ODS Output Driver Select, 1=TxD is push-pull output
- * 03 ITF Interframe Time Fill, 0=mark, 1=flag
- * 02..00 CM[2..0] Clock Mode
- *
- * 0001 0000
- */
- val = 0x10 + clkmode;
- write_reg(info, CHA + CCR1, val);
-
- /* CCR2
- *
- * 07..06 BGR[9..8] Baud rate bits 9..8
- * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
- * 04 SSEL Clock source select, 1=submode b
- * 03 TOE 0=TxCLK is input, 0=TxCLK is input
- * 02 RWX Read/Write Exchange 0=disabled
- * 01 C32, CRC select, 0=CRC-16, 1=CRC-32
- * 00 DIV, data inversion 0=disabled, 1=enabled
- *
- * 0000 0000
- */
- val = 0x00;
- if (clkmode == 2 || clkmode == 3 || clkmode == 6
- || clkmode == 7 || (clkmode == 0 && clksubmode == 1))
- val |= BIT5;
- if (clksubmode)
- val |= BIT4;
- if (info->params.crc_type == HDLC_CRC_32_CCITT)
- val |= BIT1;
- if (info->params.encoding == HDLC_ENCODING_NRZB)
- val |= BIT0;
- write_reg(info, CHA + CCR2, val);
-
- /* CCR3
- *
- * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8
- * 05 EPT Enable preamble transmission, 1=enabled
- * 04 RADD Receive address pushed to FIFO, 0=disabled
- * 03 CRL CRC Reset Level, 0=FFFF
- * 02 RCRC Rx CRC 0=On 1=Off
- * 01 TCRC Tx CRC 0=On 1=Off
- * 00 PSD DPLL Phase Shift Disable
- *
- * 0000 0000
- */
- val = 0x00;
- if (info->params.crc_type == HDLC_CRC_NONE)
- val |= BIT2 | BIT1;
- if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
- val |= BIT5;
- switch (info->params.preamble_length)
- {
- case HDLC_PREAMBLE_LENGTH_16BITS:
- val |= BIT6;
- break;
- case HDLC_PREAMBLE_LENGTH_32BITS:
- val |= BIT6;
- break;
- case HDLC_PREAMBLE_LENGTH_64BITS:
- val |= BIT7 | BIT6;
- break;
- }
- write_reg(info, CHA + CCR3, val);
-
- /* PRE - Preamble pattern */
- val = 0;
- switch (info->params.preamble)
- {
- case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
- case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break;
- case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break;
- case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
- }
- write_reg(info, CHA + PRE, val);
-
- /* CCR4
- *
- * 07 MCK4 Master Clock Divide by 4, 1=enabled
- * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
- * 05 TST1 Test Pin, 0=normal operation
- * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
- * 03..02 Reserved, must be 0
- * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes
- *
- * 0101 0000
- */
- val = 0x50;
- write_reg(info, CHA + CCR4, val);
- if (info->params.flags & HDLC_FLAG_RXC_DPLL)
- mgslpc_set_rate(info, CHA, info->params.clock_speed * 16);
- else
- mgslpc_set_rate(info, CHA, info->params.clock_speed);
-
- /* RLCR Receive length check register
- *
- * 7 1=enable receive length check
- * 6..0 Max frame length = (RL + 1) * 32
- */
- write_reg(info, CHA + RLCR, 0);
-
- /* XBCH Transmit Byte Count High
- *
- * 07 DMA mode, 0 = interrupt driven
- * 06 NRM, 0=ABM (ignored)
- * 05 CAS Carrier Auto Start
- * 04 XC Transmit Continuously (ignored)
- * 03..00 XBC[10..8] Transmit byte count bits 10..8
- *
- * 0000 0000
- */
- val = 0x00;
- if (info->params.flags & HDLC_FLAG_AUTO_DCD)
- val |= BIT5;
- write_reg(info, CHA + XBCH, val);
- enable_auxclk(info);
- if (info->params.loopback || info->testing_irq)
- loopback_enable(info);
- if (info->params.flags & HDLC_FLAG_AUTO_CTS)
- {
- irq_enable(info, CHB, IRQ_CTS);
- /* PVR[3] 1=AUTO CTS active */
- set_reg_bits(info, CHA + PVR, BIT3);
- } else
- clear_reg_bits(info, CHA + PVR, BIT3);
-
- irq_enable(info, CHA,
- IRQ_RXEOM | IRQ_RXFIFO | IRQ_ALLSENT |
- IRQ_UNDERRUN | IRQ_TXFIFO);
- issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
- wait_command_complete(info, CHA);
- read_reg16(info, CHA + ISR); /* clear pending IRQs */
-
- /* Master clock mode enabled above to allow reset commands
- * to complete even if no data clocks are present.
- *
- * Disable master clock mode for normal communications because
- * V3.2 of the ESCC2 has a bug that prevents the transmit all sent
- * IRQ when in master clock mode.
- *
- * Leave master clock mode enabled for IRQ test because the
- * timer IRQ used by the test can only happen in master clock mode.
- */
- if (!info->testing_irq)
- clear_reg_bits(info, CHA + CCR0, BIT6);
-
- tx_set_idle(info);
-
- tx_stop(info);
- rx_stop(info);
-}
-
-static void rx_stop(MGSLPC_INFO *info)
-{
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_stop(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- /* MODE:03 RAC Receiver Active, 0=inactive */
- clear_reg_bits(info, CHA + MODE, BIT3);
-
- info->rx_enabled = false;
- info->rx_overflow = false;
-}
-
-static void rx_start(MGSLPC_INFO *info)
-{
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_start(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- rx_reset_buffers(info);
- info->rx_enabled = false;
- info->rx_overflow = false;
-
- /* MODE:03 RAC Receiver Active, 1=active */
- set_reg_bits(info, CHA + MODE, BIT3);
-
- info->rx_enabled = true;
-}
-
-static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_start(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- if (info->tx_count) {
- /* If auto RTS enabled and RTS is inactive, then assert */
- /* RTS and set a flag indicating that the driver should */
- /* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = false;
-
- if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
- get_signals(info);
- if (!(info->serial_signals & SerialSignal_RTS)) {
- info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
- info->drop_rts_on_tx_done = true;
- }
- }
-
- if (info->params.mode == MGSL_MODE_ASYNC) {
- if (!info->tx_active) {
- info->tx_active = true;
- tx_ready(info, tty);
- }
- } else {
- info->tx_active = true;
- tx_ready(info, tty);
- mod_timer(&info->tx_timer, jiffies +
- msecs_to_jiffies(5000));
- }
- }
-
- if (!info->tx_enabled)
- info->tx_enabled = true;
-}
-
-static void tx_stop(MGSLPC_INFO *info)
-{
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_stop(%s)\n",
- __FILE__, __LINE__, info->device_name);
-
- del_timer(&info->tx_timer);
-
- info->tx_enabled = false;
- info->tx_active = false;
-}
-
-/* Reset the adapter to a known state and prepare it for further use.
- */
-static void reset_device(MGSLPC_INFO *info)
-{
- /* power up both channels (set BIT7) */
- write_reg(info, CHA + CCR0, 0x80);
- write_reg(info, CHB + CCR0, 0x80);
- write_reg(info, CHA + MODE, 0);
- write_reg(info, CHB + MODE, 0);
-
- /* disable all interrupts */
- irq_disable(info, CHA, 0xffff);
- irq_disable(info, CHB, 0xffff);
- port_irq_disable(info, 0xff);
-
- /* PCR Port Configuration Register
- *
- * 07..04 DEC[3..0] Serial I/F select outputs
- * 03 output, 1=AUTO CTS control enabled
- * 02 RI Ring Indicator input 0=active
- * 01 DSR input 0=active
- * 00 DTR output 0=active
- *
- * 0000 0110
- */
- write_reg(info, PCR, 0x06);
-
- /* PVR Port Value Register
- *
- * 07..04 DEC[3..0] Serial I/F select (0000=disabled)
- * 03 AUTO CTS output 1=enabled
- * 02 RI Ring Indicator input
- * 01 DSR input
- * 00 DTR output (1=inactive)
- *
- * 0000 0001
- */
-// write_reg(info, PVR, PVR_DTR);
-
- /* IPC Interrupt Port Configuration
- *
- * 07 VIS 1=Masked interrupts visible
- * 06..05 Reserved, 0
- * 04..03 SLA Slave address, 00 ignored
- * 02 CASM Cascading Mode, 1=daisy chain
- * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low
- *
- * 0000 0101
- */
- write_reg(info, IPC, 0x05);
-}
-
-static void async_mode(MGSLPC_INFO *info)
-{
- unsigned char val;
-
- /* disable all interrupts */
- irq_disable(info, CHA, 0xffff);
- irq_disable(info, CHB, 0xffff);
- port_irq_disable(info, 0xff);
-
- /* MODE
- *
- * 07 Reserved, 0
- * 06 FRTS RTS State, 0=active
- * 05 FCTS Flow Control on CTS
- * 04 FLON Flow Control Enable
- * 03 RAC Receiver Active, 0 = inactive
- * 02 RTS 0=Auto RTS, 1=manual RTS
- * 01 TRS Timer Resolution, 1=512
- * 00 TLP Test Loop, 0 = no loop
- *
- * 0000 0110
- */
- val = 0x06;
- if (info->params.loopback)
- val |= BIT0;
-
- /* preserve RTS state */
- if (!(info->serial_signals & SerialSignal_RTS))
- val |= BIT6;
- write_reg(info, CHA + MODE, val);
-
- /* CCR0
- *
- * 07 PU Power Up, 1=active, 0=power down
- * 06 MCE Master Clock Enable, 1=enabled
- * 05 Reserved, 0
- * 04..02 SC[2..0] Encoding, 000=NRZ
- * 01..00 SM[1..0] Serial Mode, 11=Async
- *
- * 1000 0011
- */
- write_reg(info, CHA + CCR0, 0x83);
-
- /* CCR1
- *
- * 07..05 Reserved, 0
- * 04 ODS Output Driver Select, 1=TxD is push-pull output
- * 03 BCR Bit Clock Rate, 1=16x
- * 02..00 CM[2..0] Clock Mode, 111=BRG
- *
- * 0001 1111
- */
- write_reg(info, CHA + CCR1, 0x1f);
-
- /* CCR2 (channel A)
- *
- * 07..06 BGR[9..8] Baud rate bits 9..8
- * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
- * 04 SSEL Clock source select, 1=submode b
- * 03 TOE 0=TxCLK is input, 0=TxCLK is input
- * 02 RWX Read/Write Exchange 0=disabled
- * 01 Reserved, 0
- * 00 DIV, data inversion 0=disabled, 1=enabled
- *
- * 0001 0000
- */
- write_reg(info, CHA + CCR2, 0x10);
-
- /* CCR3
- *
- * 07..01 Reserved, 0
- * 00 PSD DPLL Phase Shift Disable
- *
- * 0000 0000
- */
- write_reg(info, CHA + CCR3, 0);
-
- /* CCR4
- *
- * 07 MCK4 Master Clock Divide by 4, 1=enabled
- * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
- * 05 TST1 Test Pin, 0=normal operation
- * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
- * 03..00 Reserved, must be 0
- *
- * 0101 0000
- */
- write_reg(info, CHA + CCR4, 0x50);
- mgslpc_set_rate(info, CHA, info->params.data_rate * 16);
-
- /* DAFO Data Format
- *
- * 07 Reserved, 0
- * 06 XBRK transmit break, 0=normal operation
- * 05 Stop bits (0=1, 1=2)
- * 04..03 PAR[1..0] Parity (01=odd, 10=even)
- * 02 PAREN Parity Enable
- * 01..00 CHL[1..0] Character Length (00=8, 01=7)
- *
- */
- val = 0x00;
- if (info->params.data_bits != 8)
- val |= BIT0; /* 7 bits */
- if (info->params.stop_bits != 1)
- val |= BIT5;
- if (info->params.parity != ASYNC_PARITY_NONE)
- {
- val |= BIT2; /* Parity enable */
- if (info->params.parity == ASYNC_PARITY_ODD)
- val |= BIT3;
- else
- val |= BIT4;
- }
- write_reg(info, CHA + DAFO, val);
-
- /* RFC Rx FIFO Control
- *
- * 07 Reserved, 0
- * 06 DPS, 1=parity bit not stored in data byte
- * 05 DXS, 0=all data stored in FIFO (including XON/XOFF)
- * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO
- * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte
- * 01 Reserved, 0
- * 00 TCDE Terminate Char Detect Enable, 0=disabled
- *
- * 0101 1100
- */
- write_reg(info, CHA + RFC, 0x5c);
-
- /* RLCR Receive length check register
- *
- * Max frame length = (RL + 1) * 32
- */
- write_reg(info, CHA + RLCR, 0);
-
- /* XBCH Transmit Byte Count High
- *
- * 07 DMA mode, 0 = interrupt driven
- * 06 NRM, 0=ABM (ignored)
- * 05 CAS Carrier Auto Start
- * 04 XC Transmit Continuously (ignored)
- * 03..00 XBC[10..8] Transmit byte count bits 10..8
- *
- * 0000 0000
- */
- val = 0x00;
- if (info->params.flags & HDLC_FLAG_AUTO_DCD)
- val |= BIT5;
- write_reg(info, CHA + XBCH, val);
- if (info->params.flags & HDLC_FLAG_AUTO_CTS)
- irq_enable(info, CHA, IRQ_CTS);
-
- /* MODE:03 RAC Receiver Active, 1=active */
- set_reg_bits(info, CHA + MODE, BIT3);
- enable_auxclk(info);
- if (info->params.flags & HDLC_FLAG_AUTO_CTS) {
- irq_enable(info, CHB, IRQ_CTS);
- /* PVR[3] 1=AUTO CTS active */
- set_reg_bits(info, CHA + PVR, BIT3);
- } else
- clear_reg_bits(info, CHA + PVR, BIT3);
- irq_enable(info, CHA,
- IRQ_RXEOM | IRQ_RXFIFO | IRQ_BREAK_ON | IRQ_RXTIME |
- IRQ_ALLSENT | IRQ_TXFIFO);
- issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
- wait_command_complete(info, CHA);
- read_reg16(info, CHA + ISR); /* clear pending IRQs */
-}
-
-/* Set the HDLC idle mode for the transmitter.
- */
-static void tx_set_idle(MGSLPC_INFO *info)
-{
- /* Note: ESCC2 only supports flags and one idle modes */
- if (info->idle_mode == HDLC_TXIDLE_FLAGS)
- set_reg_bits(info, CHA + CCR1, BIT3);
- else
- clear_reg_bits(info, CHA + CCR1, BIT3);
-}
-
-/* get state of the V24 status (input) signals.
- */
-static void get_signals(MGSLPC_INFO *info)
-{
- unsigned char status = 0;
-
- /* preserve RTS and DTR */
- info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
-
- if (read_reg(info, CHB + VSTR) & BIT7)
- info->serial_signals |= SerialSignal_DCD;
- if (read_reg(info, CHB + STAR) & BIT1)
- info->serial_signals |= SerialSignal_CTS;
-
- status = read_reg(info, CHA + PVR);
- if (!(status & PVR_RI))
- info->serial_signals |= SerialSignal_RI;
- if (!(status & PVR_DSR))
- info->serial_signals |= SerialSignal_DSR;
-}
-
-/* Set the state of RTS and DTR based on contents of
- * serial_signals member of device extension.
- */
-static void set_signals(MGSLPC_INFO *info)
-{
- unsigned char val;
-
- val = read_reg(info, CHA + MODE);
- if (info->params.mode == MGSL_MODE_ASYNC) {
- if (info->serial_signals & SerialSignal_RTS)
- val &= ~BIT6;
- else
- val |= BIT6;
- } else {
- if (info->serial_signals & SerialSignal_RTS)
- val |= BIT2;
- else
- val &= ~BIT2;
- }
- write_reg(info, CHA + MODE, val);
-
- if (info->serial_signals & SerialSignal_DTR)
- clear_reg_bits(info, CHA + PVR, PVR_DTR);
- else
- set_reg_bits(info, CHA + PVR, PVR_DTR);
-}
-
-static void rx_reset_buffers(MGSLPC_INFO *info)
-{
- RXBUF *buf;
- int i;
-
- info->rx_put = 0;
- info->rx_get = 0;
- info->rx_frame_count = 0;
- for (i=0 ; i < info->rx_buf_count ; i++) {
- buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size));
- buf->status = buf->count = 0;
- }
-}
-
-/* Attempt to return a received HDLC frame
- * Only frames received without errors are returned.
- *
- * Returns true if frame returned, otherwise false
- */
-static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
-{
- unsigned short status;
- RXBUF *buf;
- unsigned int framesize = 0;
- unsigned long flags;
- bool return_frame = false;
-
- if (info->rx_frame_count == 0)
- return false;
-
- buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
-
- status = buf->status;
-
- /* 07 VFR 1=valid frame
- * 06 RDO 1=data overrun
- * 05 CRC 1=OK, 0=error
- * 04 RAB 1=frame aborted
- */
- if ((status & 0xf0) != 0xA0) {
- if (!(status & BIT7) || (status & BIT4))
- info->icount.rxabort++;
- else if (status & BIT6)
- info->icount.rxover++;
- else if (!(status & BIT5)) {
- info->icount.rxcrc++;
- if (info->params.crc_type & HDLC_CRC_RETURN_EX)
- return_frame = true;
- }
- framesize = 0;
-#if SYNCLINK_GENERIC_HDLC
- {
- info->netdev->stats.rx_errors++;
- info->netdev->stats.rx_frame_errors++;
- }
-#endif
- } else
- return_frame = true;
-
- if (return_frame)
- framesize = buf->count;
-
- if (debug_level >= DEBUG_LEVEL_BH)
- printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
- __FILE__, __LINE__, info->device_name, status, framesize);
-
- if (debug_level >= DEBUG_LEVEL_DATA)
- trace_block(info, buf->data, framesize, 0);
-
- if (framesize) {
- if ((info->params.crc_type & HDLC_CRC_RETURN_EX &&
- framesize+1 > info->max_frame_size) ||
- framesize > info->max_frame_size)
- info->icount.rxlong++;
- else {
- if (status & BIT5)
- info->icount.rxok++;
-
- if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
- *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR;
- ++framesize;
- }
-
-#if SYNCLINK_GENERIC_HDLC
- if (info->netcount)
- hdlcdev_rx(info, buf->data, framesize);
- else
-#endif
- ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize);
- }
- }
-
- spin_lock_irqsave(&info->lock, flags);
- buf->status = buf->count = 0;
- info->rx_frame_count--;
- info->rx_get++;
- if (info->rx_get >= info->rx_buf_count)
- info->rx_get = 0;
- spin_unlock_irqrestore(&info->lock, flags);
-
- return true;
-}
-
-static bool register_test(MGSLPC_INFO *info)
-{
- static unsigned char patterns[] =
- { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
- static unsigned int count = ARRAY_SIZE(patterns);
- unsigned int i;
- bool rc = true;
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- reset_device(info);
-
- for (i = 0; i < count; i++) {
- write_reg(info, XAD1, patterns[i]);
- write_reg(info, XAD2, patterns[(i + 1) % count]);
- if ((read_reg(info, XAD1) != patterns[i]) ||
- (read_reg(info, XAD2) != patterns[(i + 1) % count])) {
- rc = false;
- break;
- }
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
- return rc;
-}
-
-static bool irq_test(MGSLPC_INFO *info)
-{
- unsigned long end_time;
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- reset_device(info);
-
- info->testing_irq = true;
- hdlc_mode(info);
-
- info->irq_occurred = false;
-
- /* init hdlc mode */
-
- irq_enable(info, CHA, IRQ_TIMER);
- write_reg(info, CHA + TIMR, 0); /* 512 cycles */
- issue_command(info, CHA, CMD_START_TIMER);
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- end_time=100;
- while(end_time-- && !info->irq_occurred) {
- msleep_interruptible(10);
- }
-
- info->testing_irq = false;
-
- spin_lock_irqsave(&info->lock, flags);
- reset_device(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- return info->irq_occurred;
-}
-
-static int adapter_test(MGSLPC_INFO *info)
-{
- if (!register_test(info)) {
- info->init_error = DiagStatus_AddressFailure;
- printk("%s(%d):Register test failure for device %s Addr=%04X\n",
- __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
- return -ENODEV;
- }
-
- if (!irq_test(info)) {
- info->init_error = DiagStatus_IrqFailure;
- printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
- __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
- return -ENODEV;
- }
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):device %s passed diagnostics\n",
- __FILE__, __LINE__, info->device_name);
- return 0;
-}
-
-static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
-{
- int i;
- int linecount;
- if (xmit)
- printk("%s tx data:\n", info->device_name);
- else
- printk("%s rx data:\n", info->device_name);
-
- while(count) {
- if (count > 16)
- linecount = 16;
- else
- linecount = count;
-
- for(i=0;i<linecount;i++)
- printk("%02X ", (unsigned char)data[i]);
- for(;i<17;i++)
- printk(" ");
- for(i=0;i<linecount;i++) {
- if (data[i]>=040 && data[i]<=0176)
- printk("%c", data[i]);
- else
- printk(".");
- }
- printk("\n");
-
- data += linecount;
- count -= linecount;
- }
-}
-
-/* HDLC frame time out
- * update stats and do tx completion processing
- */
-static void tx_timeout(struct timer_list *t)
-{
- MGSLPC_INFO *info = from_timer(info, t, tx_timer);
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):tx_timeout(%s)\n",
- __FILE__, __LINE__, info->device_name);
- if (info->tx_active &&
- info->params.mode == MGSL_MODE_HDLC) {
- info->icount.txtimeout++;
- }
- spin_lock_irqsave(&info->lock, flags);
- info->tx_active = false;
- info->tx_count = info->tx_put = info->tx_get = 0;
-
- spin_unlock_irqrestore(&info->lock, flags);
-
-#if SYNCLINK_GENERIC_HDLC
- if (info->netcount)
- hdlcdev_tx_done(info);
- else
-#endif
- {
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- bh_transmit(info, tty);
- tty_kref_put(tty);
- }
-}
-
-#if SYNCLINK_GENERIC_HDLC
-
-/**
- * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
- * set encoding and frame check sequence (FCS) options
- *
- * dev pointer to network device structure
- * encoding serial encoding setting
- * parity FCS setting
- *
- * returns 0 if success, otherwise error code
- */
-static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- MGSLPC_INFO *info = dev_to_port(dev);
- struct tty_struct *tty;
- unsigned char new_encoding;
- unsigned short new_crctype;
-
- /* return error if TTY interface open */
- if (info->port.count)
- return -EBUSY;
-
- switch (encoding)
- {
- case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
- case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
- case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
- case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
- case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
- default: return -EINVAL;
- }
-
- switch (parity)
- {
- case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
- case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
- case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
- default: return -EINVAL;
- }
-
- info->params.encoding = new_encoding;
- info->params.crc_type = new_crctype;
-
- /* if network interface up, reprogram hardware */
- if (info->netcount) {
- tty = tty_port_tty_get(&info->port);
- mgslpc_program_hw(info, tty);
- tty_kref_put(tty);
- }
-
- return 0;
-}
-
-/**
- * called by generic HDLC layer to send frame
- *
- * skb socket buffer containing HDLC frame
- * dev pointer to network device structure
- */
-static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- MGSLPC_INFO *info = dev_to_port(dev);
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
-
- /* stop sending until this frame completes */
- netif_stop_queue(dev);
-
- /* copy data to device buffers */
- skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
- info->tx_get = 0;
- info->tx_put = info->tx_count = skb->len;
-
- /* update network statistics */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- /* done with socket buffer, so free it */
- dev_kfree_skb(skb);
-
- /* save start time for transmit timeout detection */
- netif_trans_update(dev);
-
- /* start hardware transmitter if necessary */
- spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active) {
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- tx_start(info, tty);
- tty_kref_put(tty);
- }
- spin_unlock_irqrestore(&info->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-/**
- * called by network layer when interface enabled
- * claim resources and initialize hardware
- *
- * dev pointer to network device structure
- *
- * returns 0 if success, otherwise error code
- */
-static int hdlcdev_open(struct net_device *dev)
-{
- MGSLPC_INFO *info = dev_to_port(dev);
- struct tty_struct *tty;
- int rc;
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
-
- /* generic HDLC layer open processing */
- rc = hdlc_open(dev);
- if (rc != 0)
- return rc;
-
- /* arbitrate between network and tty opens */
- spin_lock_irqsave(&info->netlock, flags);
- if (info->port.count != 0 || info->netcount != 0) {
- printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
- spin_unlock_irqrestore(&info->netlock, flags);
- return -EBUSY;
- }
- info->netcount=1;
- spin_unlock_irqrestore(&info->netlock, flags);
-
- tty = tty_port_tty_get(&info->port);
- /* claim resources and init adapter */
- rc = startup(info, tty);
- if (rc != 0) {
- tty_kref_put(tty);
- spin_lock_irqsave(&info->netlock, flags);
- info->netcount=0;
- spin_unlock_irqrestore(&info->netlock, flags);
- return rc;
- }
- /* assert RTS and DTR, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
- mgslpc_program_hw(info, tty);
- tty_kref_put(tty);
-
- /* enable network layer transmit */
- netif_trans_update(dev);
- netif_start_queue(dev);
-
- /* inform generic HDLC layer of current DCD status */
- spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock, flags);
- if (info->serial_signals & SerialSignal_DCD)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- return 0;
-}
-
-/**
- * called by network layer when interface is disabled
- * shutdown hardware and release resources
- *
- * dev pointer to network device structure
- *
- * returns 0 if success, otherwise error code
- */
-static int hdlcdev_close(struct net_device *dev)
-{
- MGSLPC_INFO *info = dev_to_port(dev);
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
-
- netif_stop_queue(dev);
-
- /* shutdown adapter and release resources */
- shutdown(info, tty);
- tty_kref_put(tty);
- hdlc_close(dev);
-
- spin_lock_irqsave(&info->netlock, flags);
- info->netcount=0;
- spin_unlock_irqrestore(&info->netlock, flags);
-
- return 0;
-}
-
-/**
- * called by network layer to process IOCTL call to network device
- *
- * dev pointer to network device structure
- * ifs pointer to network interface settings structure
- *
- * returns 0 if success, otherwise error code
- */
-static int hdlcdev_wan_ioctl(struct net_device *dev, struct if_settings *ifs)
-{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
- MGSLPC_INFO *info = dev_to_port(dev);
- unsigned int flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
-
- /* return error if TTY interface open */
- if (info->port.count)
- return -EBUSY;
-
- memset(&new_line, 0, size);
-
- switch (ifs->type) {
- case IF_GET_IFACE: /* return current sync_serial_settings */
-
- ifs->type = IF_IFACE_SYNC_SERIAL;
- if (ifs->size < size) {
- ifs->size = size; /* data size wanted */
- return -ENOBUFS;
- }
-
- flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
- HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
- HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
- HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
-
- switch (flags){
- case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
- case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
- case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
- case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
- default: new_line.clock_type = CLOCK_DEFAULT;
- }
-
- new_line.clock_rate = info->params.clock_speed;
- new_line.loopback = info->params.loopback ? 1:0;
-
- if (copy_to_user(line, &new_line, size))
- return -EFAULT;
- return 0;
-
- case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
-
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(&new_line, line, size))
- return -EFAULT;
-
- switch (new_line.clock_type)
- {
- case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
- case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
- case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
- case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
- case CLOCK_DEFAULT: flags = info->params.flags &
- (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
- HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
- HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
- HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
- default: return -EINVAL;
- }
-
- if (new_line.loopback != 0 && new_line.loopback != 1)
- return -EINVAL;
-
- info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
- HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
- HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
- HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
- info->params.flags |= flags;
-
- info->params.loopback = new_line.loopback;
-
- if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
- info->params.clock_speed = new_line.clock_rate;
- else
- info->params.clock_speed = 0;
-
- /* if network interface up, reprogram hardware */
- if (info->netcount) {
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- mgslpc_program_hw(info, tty);
- tty_kref_put(tty);
- }
- return 0;
- default:
- return hdlc_ioctl(dev, ifs);
- }
-}
-
-/**
- * called by network layer when transmit timeout is detected
- *
- * dev pointer to network device structure
- */
-static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- MGSLPC_INFO *info = dev_to_port(dev);
- unsigned long flags;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_tx_timeout(%s)\n", dev->name);
-
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
-
- spin_lock_irqsave(&info->lock, flags);
- tx_stop(info);
- spin_unlock_irqrestore(&info->lock, flags);
-
- netif_wake_queue(dev);
-}
-
-/**
- * called by device driver when transmit completes
- * reenable network layer transmit if stopped
- *
- * info pointer to device instance information
- */
-static void hdlcdev_tx_done(MGSLPC_INFO *info)
-{
- if (netif_queue_stopped(info->netdev))
- netif_wake_queue(info->netdev);
-}
-
-/**
- * called by device driver when frame received
- * pass frame to network layer
- *
- * info pointer to device instance information
- * buf pointer to buffer contianing frame data
- * size count of data bytes in buf
- */
-static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
-{
- struct sk_buff *skb = dev_alloc_skb(size);
- struct net_device *dev = info->netdev;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_rx(%s)\n", dev->name);
-
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- }
-
- skb_put_data(skb, buf, size);
-
- skb->protocol = hdlc_type_trans(skb, dev);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += size;
-
- netif_rx(skb);
-}
-
-static const struct net_device_ops hdlcdev_ops = {
- .ndo_open = hdlcdev_open,
- .ndo_stop = hdlcdev_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlcdev_wan_ioctl,
- .ndo_tx_timeout = hdlcdev_tx_timeout,
-};
-
-/**
- * called by device driver when adding device instance
- * do generic HDLC initialization
- *
- * info pointer to device instance information
- *
- * returns 0 if success, otherwise error code
- */
-static int hdlcdev_init(MGSLPC_INFO *info)
-{
- int rc;
- struct net_device *dev;
- hdlc_device *hdlc;
-
- /* allocate and initialize network and HDLC layer objects */
-
- dev = alloc_hdlcdev(info);
- if (dev == NULL) {
- printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
- return -ENOMEM;
- }
-
- /* for network layer reporting purposes only */
- dev->base_addr = info->io_base;
- dev->irq = info->irq_level;
-
- /* network layer callbacks and settings */
- dev->netdev_ops = &hdlcdev_ops;
- dev->watchdog_timeo = 10 * HZ;
- dev->tx_queue_len = 50;
-
- /* generic HDLC layer callbacks and settings */
- hdlc = dev_to_hdlc(dev);
- hdlc->attach = hdlcdev_attach;
- hdlc->xmit = hdlcdev_xmit;
-
- /* register objects with HDLC layer */
- rc = register_hdlc_device(dev);
- if (rc) {
- printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
- free_netdev(dev);
- return rc;
- }
-
- info->netdev = dev;
- return 0;
-}
-
-/**
- * called by device driver when removing device instance
- * do generic HDLC cleanup
- *
- * info pointer to device instance information
- */
-static void hdlcdev_exit(MGSLPC_INFO *info)
-{
- unregister_hdlc_device(info->netdev);
- free_netdev(info->netdev);
- info->netdev = NULL;
-}
-
-#endif /* CONFIG_HDLC */
-
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 38b46c7d1737..81ed58157b15 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -841,7 +841,7 @@ static int __init ppdev_init(void)
pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
return -EIO;
}
- ppdev_class = class_create(THIS_MODULE, CHRDEV);
+ ppdev_class = class_create(CHRDEV);
if (IS_ERR(ppdev_class)) {
err = PTR_ERR(ppdev_class);
goto out_chrdev;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ce3ccd172cc8..253f2ddb8913 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
/* Basic CPU round-robin, which avoids the current CPU. */
do {
cpu = cpumask_next(cpu, &timer_cpus);
- if (cpu == nr_cpumask_bits)
+ if (cpu >= nr_cpu_ids)
cpu = cpumask_first(&timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 0913d3eb8d51..bd757d836c5c 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -14,6 +14,7 @@
* Access to the event log extended by the TCG BIOS of PC platform
*/
+#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -135,7 +136,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmalloc(len, GFP_KERNEL);
+ log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -143,8 +144,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
ret = -EIO;
virt = acpi_os_map_iomem(start, len);
- if (!virt)
+ if (!virt) {
+ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
+ /* try EFI log next */
+ ret = -ENODEV;
goto err;
+ }
memcpy_fromio(log->bios_event_log, virt, len);
@@ -160,7 +165,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
return format;
err:
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 8512ec76d526..639c3f395a5a 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -36,7 +36,7 @@ static int tpm_bios_measurements_open(struct inode *inode,
inode_unlock(inode);
return -ENODEV;
}
- chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ chip_seqops = inode->i_private;
seqops = chip_seqops->seqops;
chip = chip_seqops->chip;
get_device(&chip->dev);
@@ -55,8 +55,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
- struct seq_file *seq = (struct seq_file *)file->private_data;
- struct tpm_chip *chip = (struct tpm_chip *)seq->private;
+ struct seq_file *seq = file->private_data;
+ struct tpm_chip *chip = seq->private;
put_device(&chip->dev);
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index e6cb9d525e30..4e9d7c2bf32e 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -6,6 +6,7 @@
* Thiebaud Weksteen <tweek@google.com>
*/
+#include <linux/device.h>
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
@@ -55,7 +56,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL);
if (!log->bios_event_log) {
ret = -ENOMEM;
goto out;
@@ -76,7 +77,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
@@ -91,11 +92,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
* Allocate memory for the 'combined log' where we will append the
* 'final events log' to.
*/
- tmp = krealloc(log->bios_event_log,
- log_size + final_events_log_size,
- GFP_KERNEL);
+ tmp = devm_krealloc(&chip->dev, log->bios_event_log,
+ log_size + final_events_log_size,
+ GFP_KERNEL);
if (!tmp) {
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index a9ce66d09a75..930fe43d5daf 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -10,13 +10,44 @@
* Read the event log created by the firmware on PPC64
*/
+#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/tpm_eventlog.h>
#include "../tpm.h"
#include "common.h"
+static int tpm_read_log_memory_region(struct tpm_chip *chip)
+{
+ struct device_node *node;
+ struct resource res;
+ int rc;
+
+ node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
+ if (!node)
+ return -ENODEV;
+
+ rc = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (rc)
+ return rc;
+
+ chip->log.bios_event_log = devm_memremap(&chip->dev, res.start, resource_size(&res),
+ MEMREMAP_WB);
+ if (IS_ERR(chip->log.bios_event_log))
+ return -ENOMEM;
+
+ chip->log.bios_event_log_end = chip->log.bios_event_log + resource_size(&res);
+
+ return chip->flags & TPM_CHIP_FLAG_TPM2 ? EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 :
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
+
int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
@@ -38,7 +69,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
sizep = of_get_property(np, "linux,sml-size", NULL);
basep = of_get_property(np, "linux,sml-base", NULL);
if (sizep == NULL && basep == NULL)
- return -ENODEV;
+ return tpm_read_log_memory_region(chip);
if (sizep == NULL || basep == NULL)
return -EIO;
@@ -65,7 +96,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
return -EIO;
}
- log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 8156bb2af78c..2d28f55ef490 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -101,8 +101,7 @@ static const struct st33zp24_phy_ops i2c_phy_ops = {
* @return: 0 in case of success.
* -1 in other case.
*/
-static int st33zp24_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st33zp24_i2c_probe(struct i2c_client *client)
{
struct st33zp24_i2c_phy *phy;
@@ -139,13 +138,13 @@ static const struct i2c_device_id st33zp24_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
-static const struct of_device_id of_st33zp24_i2c_match[] = {
+static const struct of_device_id of_st33zp24_i2c_match[] __maybe_unused = {
{ .compatible = "st,st33zp24-i2c", },
{}
};
MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
-static const struct acpi_device_id st33zp24_i2c_acpi_match[] = {
+static const struct acpi_device_id st33zp24_i2c_acpi_match[] __maybe_unused = {
{"SMO3324"},
{}
};
@@ -161,7 +160,7 @@ static struct i2c_driver st33zp24_i2c_driver = {
.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
.acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
},
- .probe = st33zp24_i2c_probe,
+ .probe_new = st33zp24_i2c_probe,
.remove = st33zp24_i2c_remove,
.id_table = st33zp24_i2c_id
};
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index 2154059f0235..f5811b301d3b 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -255,13 +255,13 @@ static const struct spi_device_id st33zp24_spi_id[] = {
};
MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
-static const struct of_device_id of_st33zp24_spi_match[] = {
+static const struct of_device_id of_st33zp24_spi_match[] __maybe_unused = {
{ .compatible = "st,st33zp24-spi", },
{}
};
MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
-static const struct acpi_device_id st33zp24_spi_acpi_match[] = {
+static const struct acpi_device_id st33zp24_spi_acpi_match[] __maybe_unused = {
{"SMO3324"},
{}
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 741d8f3e8fb3..c10a4aa97373 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -267,7 +267,6 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
- kfree(chip->log.bios_event_log);
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
@@ -283,7 +282,7 @@ static void tpm_dev_release(struct device *dev)
*
* Return: always 0 (i.e. success)
*/
-static int tpm_class_shutdown(struct device *dev)
+int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
@@ -338,7 +337,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->dev);
chip->dev.class = tpm_class;
- chip->dev.class->shutdown_pre = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
@@ -512,6 +510,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+/*
+ * Some AMD fTPM versions may cause stutter
+ * https://www.amd.com/en/support/kb/faq/pa-410
+ *
+ * Fixes are available in two series of fTPM firmware:
+ * 6.x.y.z series: 6.0.18.6 +
+ * 3.x.y.z series: 3.57.y.5 +
+ */
+static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+{
+ u32 val1, val2;
+ u64 version;
+ int ret;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return false;
+
+ ret = tpm_request_locality(chip);
+ if (ret)
+ return false;
+
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
+ if (ret)
+ goto release;
+ if (val1 != 0x414D4400U /* AMD */) {
+ ret = -ENODEV;
+ goto release;
+ }
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
+ if (ret)
+ goto release;
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
+
+release:
+ tpm_relinquish_locality(chip);
+
+ if (ret)
+ return false;
+
+ version = ((u64)val1 << 32) | val2;
+ if ((version >> 48) == 6) {
+ if (version >= 0x0006000000180006ULL)
+ return false;
+ } else if ((version >> 48) == 3) {
+ if (version >= 0x0003005700000005ULL)
+ return false;
+ } else {
+ return false;
+ }
+
+ dev_warn(&chip->dev,
+ "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
+ version);
+
+ return true;
+}
+
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
@@ -521,7 +576,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int tpm_add_hwrng(struct tpm_chip *chip)
{
- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
+ tpm_amd_is_rng_defective(chip))
return 0;
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
@@ -549,6 +605,42 @@ static int tpm_get_pcr_allocation(struct tpm_chip *chip)
}
/*
+ * tpm_chip_bootstrap() - Boostrap TPM chip after power on
+ * @chip: TPM chip to use.
+ *
+ * Initialize TPM chip after power on. This a one-shot function: subsequent
+ * calls will have no effect.
+ */
+int tpm_chip_bootstrap(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->flags & TPM_CHIP_FLAG_BOOTSTRAPPED)
+ return 0;
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_auto_startup(chip);
+ if (rc)
+ goto stop;
+
+ rc = tpm_get_pcr_allocation(chip);
+stop:
+ tpm_chip_stop(chip);
+
+ /*
+ * Unconditionally set, as driver initialization should cease, when the
+ * boostrapping process fails.
+ */
+ chip->flags |= TPM_CHIP_FLAG_BOOTSTRAPPED;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_bootstrap);
+
+/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
*
@@ -563,17 +655,7 @@ int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
- rc = tpm_chip_start(chip);
- if (rc)
- return rc;
- rc = tpm_auto_startup(chip);
- if (rc) {
- tpm_chip_stop(chip);
- return rc;
- }
-
- rc = tpm_get_pcr_allocation(chip);
- tpm_chip_stop(chip);
+ rc = tpm_chip_bootstrap(chip);
if (rc)
return rc;
@@ -625,7 +707,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
+ !tpm_amd_is_rng_defective(chip))
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d69905233aff..4463d0018290 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev)
}
suspended:
- return rc;
+ if (rc)
+ dev_err(dev, "Ignoring error %d while suspending\n", rc);
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
@@ -464,13 +466,15 @@ static int __init tpm_init(void)
{
int rc;
- tpm_class = class_create(THIS_MODULE, "tpm");
+ tpm_class = class_create("tpm");
if (IS_ERR(tpm_class)) {
pr_err("couldn't create tpm class\n");
return PTR_ERR(tpm_class);
}
- tpmrm_class = class_create(THIS_MODULE, "tpmrm");
+ tpm_class->shutdown_pre = tpm_class_shutdown;
+
+ tpmrm_class = class_create("tpmrm");
if (IS_ERR(tpmrm_class)) {
pr_err("couldn't create tpmrm class\n");
rc = PTR_ERR(tpmrm_class);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 24ee4e1cc452..460bb85dd142 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -150,6 +150,79 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
+enum tpm2_pt_props {
+ TPM2_PT_NONE = 0x00000000,
+ TPM2_PT_GROUP = 0x00000100,
+ TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
+ TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
+ TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
+ TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
+ TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
+ TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
+ TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
+ TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
+ TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
+ TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
+ TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
+ TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
+ TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
+ TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
+ TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
+ TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
+ TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
+ TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
+ TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
+ TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
+ TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
+ TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
+ TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
+ TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
+ TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
+ TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
+ TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
+ TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
+ TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
+ TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
+ TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
+ TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
+ TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
+ TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
+ TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
+ TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
+ TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
+ TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
+ TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
+ TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
+ TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
+ TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
+ TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
+ TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
+ TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
+ TPM2_PT_MODES = TPM2_PT_FIXED + 45,
+ TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
+ TPM2_PT_VAR = TPM2_PT_GROUP * 2,
+ TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
+ TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
+ TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
+ TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
+ TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
+ TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
+ TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
+ TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
+ TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
+ TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
+ TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
+ TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
+ TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
+ TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
+ TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
+ TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
+ TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
+ TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
+ TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
+ TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
+ TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
+};
/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
* bytes, but 128 is still a relatively large number of random bytes and
@@ -183,6 +256,7 @@ int tpm1_get_pcr_allocation(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
+int tpm_class_shutdown(struct device *dev);
static inline void tpm_msleep(unsigned int delay_msec)
{
@@ -190,6 +264,7 @@ static inline void tpm_msleep(unsigned int delay_msec)
delay_msec * 1000);
};
+int tpm_chip_bootstrap(struct tpm_chip *chip);
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 65d03867e114..93545be190a5 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -777,10 +777,12 @@ out:
int tpm2_find_cc(struct tpm_chip *chip, u32 cc)
{
+ u32 cc_mask;
int i;
+ cc_mask = 1 << TPM2_CC_ATTR_VENDOR | GENMASK(15, 0);
for (i = 0; i < chip->nr_commands; i++)
- if (cc == (chip->cc_attrs_tbl[i] & GENMASK(15, 0)))
+ if (cc == (chip->cc_attrs_tbl[i] & cc_mask))
return i;
return -1;
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
index ba37e77e8af3..7ac3f69dcf0f 100644
--- a/drivers/char/tpm/tpm_atmel.h
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -26,7 +26,7 @@ struct tpm_atmel_priv {
#ifdef CONFIG_PPC64
-#include <asm/prom.h>
+#include <linux/of.h>
#define atmel_getb(priv, offset) readb(priv->iobase + offset)
#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 7e9da671a0e8..d43a0d7b97a8 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -98,6 +98,8 @@ struct crb_priv {
u8 __iomem *rsp;
u32 cmd_size;
u32 smc_func_id;
+ u32 __iomem *pluton_start_addr;
+ u32 __iomem *pluton_reply_addr;
};
struct tpm2_crb_smc {
@@ -108,6 +110,11 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+struct tpm2_crb_pluton {
+ u64 start_addr;
+ u64 reply_addr;
+};
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -127,6 +134,25 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
return ((ioread32(reg) & mask) == value);
}
+static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
+{
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
+ return -ETIME;
+
+ iowrite32(1, priv->pluton_start_addr);
+ if (wait_for_complete == false)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_start_addr,
+ 0xffffffff, 0, 200))
+ return -ETIME;
+
+ return 0;
+}
+
/**
* __crb_go_idle - request tpm crb device to go the idle state
*
@@ -145,6 +171,8 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*/
static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
+ int rc;
+
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
@@ -152,6 +180,10 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_GO_IDLE/* mask */,
0, /* value */
@@ -188,12 +220,19 @@ static int crb_go_idle(struct tpm_chip *chip)
*/
static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
+ int rc;
+
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_CMD_READY /* mask */,
0, /* value */
@@ -371,6 +410,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
return -E2BIG;
}
+ /* Seems to be necessary for every command */
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ __crb_cmd_ready(&chip->dev, priv);
+
memcpy_toio(priv->cmd, buf, len);
/* Make sure that cmd is populated before issuing start. */
@@ -394,7 +437,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
- return rc;
+ if (rc)
+ return rc;
+
+ return crb_try_pluton_doorbell(priv, false);
}
static void crb_cancel(struct tpm_chip *chip)
@@ -524,15 +570,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
return ret;
acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(iores_array) != IORESOURCE_MEM) {
- dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
- return -EINVAL;
- } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
- IORESOURCE_MEM) {
- dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
- memset(iores_array + TPM_CRB_MAX_RESOURCES,
- 0, sizeof(*iores_array));
- iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ /* Pluton doesn't appear to define ACPI memory regions */
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ }
}
iores = NULL;
@@ -656,6 +705,22 @@ out_relinquish_locality:
return ret;
}
+static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
+{
+ priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->start_addr, 4);
+ if (IS_ERR(priv->pluton_start_addr))
+ return PTR_ERR(priv->pluton_start_addr);
+
+ priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->reply_addr, 4);
+ if (IS_ERR(priv->pluton_reply_addr))
+ return PTR_ERR(priv->pluton_reply_addr);
+
+ return 0;
+}
+
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
@@ -663,6 +728,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
int rc;
@@ -700,6 +766,20 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+ return -EINVAL;
+ }
+ crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ if (rc)
+ return rc;
+ }
+
priv->sm = sm;
priv->hid = acpi_device_hid(device);
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index deff23bb54bf..528f35b14fb6 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -334,11 +334,11 @@ static int ftpm_tee_remove(struct device *dev)
return 0;
}
-static int ftpm_plat_tee_remove(struct platform_device *pdev)
+static void ftpm_plat_tee_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- return ftpm_tee_remove(dev);
+ ftpm_tee_remove(dev);
}
/**
@@ -367,7 +367,7 @@ static struct platform_driver ftpm_tee_plat_driver = {
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
- .remove = ftpm_plat_tee_remove,
+ .remove_new = ftpm_plat_tee_remove,
};
/* UUID of the fTPM TA */
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 4be3677c1463..8f77154e0550 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -146,8 +146,7 @@ static const struct tpm_class_ops i2c_atmel = {
.req_canceled = i2c_atmel_req_canceled,
};
-static int i2c_atmel_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int i2c_atmel_probe(struct i2c_client *client)
{
struct tpm_chip *chip;
struct device *dev = &client->dev;
@@ -204,7 +203,7 @@ static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
static struct i2c_driver i2c_atmel_driver = {
.id_table = i2c_atmel_id,
- .probe = i2c_atmel_probe,
+ .probe_new = i2c_atmel_probe,
.remove = i2c_atmel_remove,
.driver = {
.name = I2C_DRIVER_NAME,
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index fd3c3661e646..7cdaff52a96d 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -681,8 +681,7 @@ MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
-static int tpm_tis_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tpm_tis_i2c_probe(struct i2c_client *client)
{
int rc;
struct device *dev = &(client->dev);
@@ -717,7 +716,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client)
static struct i2c_driver tpm_tis_i2c_driver = {
.id_table = tpm_tis_i2c_table,
- .probe = tpm_tis_i2c_probe,
+ .probe_new = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.driver = {
.name = "tpm_i2c_infineon",
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 95c37350cc8e..a026e98add50 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -522,9 +522,9 @@ static int get_vid(struct i2c_client *client, u32 *res)
return 0;
}
-static int i2c_nuvoton_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int i2c_nuvoton_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int rc;
struct tpm_chip *chip;
struct device *dev = &client->dev;
@@ -650,7 +650,7 @@ static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
static struct i2c_driver i2c_nuvoton_driver = {
.id_table = i2c_nuvoton_id,
- .probe = i2c_nuvoton_probe,
+ .probe_new = i2c_nuvoton_probe,
.remove = i2c_nuvoton_remove,
.driver = {
.name = "tpm_i2c_nuvoton",
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index ed5dabd3c72d..7af389806643 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -50,6 +50,45 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * Flush previous write operations with a dummy read operation to the
+ * TPM MMIO base address.
+ */
+static inline void tpm_tis_flush(void __iomem *iobase)
+{
+ ioread8(iobase + TPM_ACCESS(0));
+}
+#else
+#define tpm_tis_flush(iobase) do { } while (0)
+#endif
+
+/*
+ * Write a byte word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
+{
+ iowrite8(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
+/*
+ * Write a 32-bit word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
+{
+ iowrite32(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
static int interrupts = -1;
module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -186,12 +225,12 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
switch (io_mode) {
case TPM_TIS_PHYS_8:
while (len--)
- iowrite8(*value++, phy->iobase + addr);
+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
break;
case TPM_TIS_PHYS_16:
return -EINVAL;
case TPM_TIS_PHYS_32:
- iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr);
+ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr);
break;
}
@@ -227,7 +266,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
irq = tpm_info->irq;
if (itpm || is_itpm(ACPI_COMPANION(dev)))
- phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND;
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &phy->priv.flags);
return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
ACPI_HANDLE(dev));
@@ -324,14 +363,12 @@ static int tpm_tis_plat_probe(struct platform_device *pdev)
return tpm_tis_init(&pdev->dev, &tpm_info);
}
-static int tpm_tis_plat_remove(struct platform_device *pdev)
+static void tpm_tis_plat_remove(struct platform_device *pdev)
{
struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -344,7 +381,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match);
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
- .remove = tpm_tis_plat_remove,
+ .remove_new = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 3f98e587b3e8..02945d53fcef 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -44,6 +44,20 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
return false;
}
+static u8 tpm_tis_filter_sts_mask(u8 int_mask, u8 sts_mask)
+{
+ if (!(int_mask & TPM_INTF_STS_VALID_INT))
+ sts_mask &= ~TPM_STS_VALID;
+
+ if (!(int_mask & TPM_INTF_DATA_AVAIL_INT))
+ sts_mask &= ~TPM_STS_DATA_AVAIL;
+
+ if (!(int_mask & TPM_INTF_CMD_READY_INT))
+ sts_mask &= ~TPM_STS_COMMAND_READY;
+
+ return sts_mask;
+}
+
static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
unsigned long timeout, wait_queue_head_t *queue,
bool check_cancel)
@@ -53,41 +67,56 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
long rc;
u8 status;
bool canceled = false;
+ u8 sts_mask;
+ int ret = 0;
/* check current status */
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
- stop = jiffies + timeout;
+ sts_mask = mask & (TPM_STS_VALID | TPM_STS_DATA_AVAIL |
+ TPM_STS_COMMAND_READY);
+ /* check what status changes can be handled by irqs */
+ sts_mask = tpm_tis_filter_sts_mask(priv->int_mask, sts_mask);
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ stop = jiffies + timeout;
+ /* process status changes with irq support */
+ if (sts_mask) {
+ ret = -ETIME;
again:
timeout = stop - jiffies;
if ((long)timeout <= 0)
return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
- wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ wait_for_tpm_stat_cond(chip, sts_mask, check_cancel,
&canceled),
timeout);
if (rc > 0) {
if (canceled)
return -ECANCELED;
- return 0;
+ ret = 0;
}
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
}
- } else {
- do {
- usleep_range(priv->timeout_min,
- priv->timeout_max);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
}
+
+ if (ret)
+ return ret;
+
+ mask &= ~sts_mask;
+ if (!mask) /* all done */
+ return 0;
+ /* process status changes without irq support */
+ do {
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
+ } while (time_before(jiffies, stop));
return -ETIME;
}
@@ -136,16 +165,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
-static int release_locality(struct tpm_chip *chip, int l)
+static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
+{
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+ return 0;
+}
+
+static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+ mutex_lock(&priv->locality_count_mutex);
+ priv->locality_count--;
+ if (priv->locality_count == 0)
+ __tpm_tis_relinquish_locality(priv, l);
+ mutex_unlock(&priv->locality_count_mutex);
return 0;
}
-static int request_locality(struct tpm_chip *chip, int l)
+static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop, timeout;
@@ -186,6 +226,20 @@ again:
return -1;
}
+static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ mutex_lock(&priv->locality_count_mutex);
+ if (priv->locality_count == 0)
+ ret = __tpm_tis_request_locality(chip, l);
+ if (!ret)
+ priv->locality_count++;
+ mutex_unlock(&priv->locality_count_mutex);
+ return ret;
+}
+
static u8 tpm_tis_status(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -351,7 +405,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
size_t count = 0;
- bool itpm = priv->flags & TPM_TIS_ITPM_WORKAROUND;
+ bool itpm = test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
@@ -484,7 +538,8 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
int rc, irq;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested)
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ) ||
+ test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
return tpm_tis_send_main(chip, buf, len);
/* Verify receipt of the expected IRQ */
@@ -494,11 +549,11 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_tis_send_main(chip, buf, len);
priv->irq = irq;
chip->flags |= TPM_CHIP_FLAG_IRQ;
- if (!priv->irq_tested)
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
tpm_msleep(1);
- if (!priv->irq_tested)
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
disable_interrupts(chip);
- priv->irq_tested = true;
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
return rc;
}
@@ -641,7 +696,7 @@ static int probe_itpm(struct tpm_chip *chip)
size_t len = sizeof(cmd_getticks);
u16 vendor;
- if (priv->flags & TPM_TIS_ITPM_WORKAROUND)
+ if (test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags))
return 0;
rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
@@ -652,7 +707,7 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;
- if (request_locality(chip, 0) != 0)
+ if (tpm_tis_request_locality(chip, 0) != 0)
return -EBUSY;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
@@ -661,19 +716,19 @@ static int probe_itpm(struct tpm_chip *chip)
tpm_tis_ready(chip);
- priv->flags |= TPM_TIS_ITPM_WORKAROUND;
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
dev_info(&chip->dev, "Detected an iTPM.\n");
else {
- priv->flags &= ~TPM_TIS_ITPM_WORKAROUND;
+ clear_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
rc = -EFAULT;
}
out:
tpm_tis_ready(chip);
- release_locality(chip, priv->locality);
+ tpm_tis_relinquish_locality(chip, priv->locality);
return rc;
}
@@ -702,7 +757,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
struct tpm_chip *chip = dev_id;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
u32 interrupt;
- int i, rc;
+ int rc;
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
if (rc < 0)
@@ -711,20 +766,19 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;
- priv->irq_tested = true;
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
wake_up_interruptible(&priv->read_queue);
- if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
- for (i = 0; i < 5; i++)
- if (check_locality(chip, i))
- break;
+
if (interrupt &
(TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
TPM_INTF_CMD_READY_INT))
wake_up_interruptible(&priv->int_queue);
/* Clear interrupts handled with TPM_EOI */
+ tpm_tis_request_locality(chip, 0);
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
+ tpm_tis_relinquish_locality(chip, 0);
if (rc < 0)
return IRQ_NONE;
@@ -732,25 +786,22 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
{
const char *desc = "attempting to generate an interrupt";
u32 cap2;
cap_t cap;
int ret;
- ret = request_locality(chip, 0);
- if (ret < 0)
- return ret;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
else
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
- release_locality(chip, 0);
-
- return ret;
+ if (ret)
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -765,60 +816,66 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
int rc;
u32 int_status;
- if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags,
- dev_name(&chip->dev), chip) != 0) {
+
+ rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
+ tis_int_handler, IRQF_ONESHOT | flags,
+ dev_name(&chip->dev), chip);
+ if (rc) {
dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
irq);
return -1;
}
priv->irq = irq;
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
&original_int_vec);
- if (rc < 0)
+ if (rc < 0) {
+ tpm_tis_relinquish_locality(chip, priv->locality);
return rc;
+ }
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
if (rc < 0)
- return rc;
+ goto restore_irqs;
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
if (rc < 0)
- return rc;
+ goto restore_irqs;
/* Clear all existing */
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
if (rc < 0)
- return rc;
-
+ goto restore_irqs;
/* Turn on */
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
intmask | TPM_GLOBAL_INT_ENABLE);
if (rc < 0)
- return rc;
+ goto restore_irqs;
- priv->irq_tested = false;
+ clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
- rc = tpm_tis_gen_interrupt(chip);
- if (rc < 0)
- return rc;
+ tpm_tis_gen_interrupt(chip);
+restore_irqs:
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- rc = tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
- if (rc < 0)
- return rc;
-
- return 1;
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ rc = -1;
}
- return 0;
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
}
/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
@@ -932,8 +989,8 @@ static const struct tpm_class_ops tpm_tis = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
- .request_locality = request_locality,
- .relinquish_locality = release_locality,
+ .request_locality = tpm_tis_request_locality,
+ .relinquish_locality = tpm_tis_relinquish_locality,
.clk_enable = tpm_tis_clkrun_enable,
};
@@ -967,6 +1024,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);
dev_set_drvdata(&chip->dev, priv);
@@ -1009,18 +1068,50 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (rc < 0)
goto out_err;
- intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
- TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+ /* Figure out the capabilities */
+ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+ if (rc < 0)
+ goto out_err;
+
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ dev_dbg(dev, "\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT) {
+ intmask |= TPM_INTF_CMD_READY_INT;
+ dev_dbg(dev, "\tCommand Ready Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ dev_dbg(dev, "\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ dev_dbg(dev, "\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ dev_dbg(dev, "\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ dev_dbg(dev, "\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) {
+ intmask |= TPM_INTF_LOCALITY_CHANGE_INT;
+ dev_dbg(dev, "\tLocality Change Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_STS_VALID_INT) {
+ intmask |= TPM_INTF_STS_VALID_INT;
+ dev_dbg(dev, "\tSts Valid Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT) {
+ intmask |= TPM_INTF_DATA_AVAIL_INT;
+ dev_dbg(dev, "\tData Avail Int Support\n");
+ }
+
intmask &= ~TPM_GLOBAL_INT_ENABLE;
- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0) {
rc = -ENODEV;
goto out_err;
}
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
- release_locality(chip, 0);
+ tpm_tis_relinquish_locality(chip, 0);
rc = tpm_chip_start(chip);
if (rc)
@@ -1044,35 +1135,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
goto out_err;
}
- /* Figure out the capabilities */
- rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
- if (rc < 0)
- goto out_err;
-
- dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
- intfcaps);
- if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
- dev_dbg(dev, "\tBurst Count Static\n");
- if (intfcaps & TPM_INTF_CMD_READY_INT)
- dev_dbg(dev, "\tCommand Ready Int Support\n");
- if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
- dev_dbg(dev, "\tInterrupt Edge Falling\n");
- if (intfcaps & TPM_INTF_INT_EDGE_RISING)
- dev_dbg(dev, "\tInterrupt Edge Rising\n");
- if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
- dev_dbg(dev, "\tInterrupt Level Low\n");
- if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
- dev_dbg(dev, "\tInterrupt Level High\n");
- if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
- dev_dbg(dev, "\tLocality Change Int Support\n");
- if (intfcaps & TPM_INTF_STS_VALID_INT)
- dev_dbg(dev, "\tSts Valid Int Support\n");
- if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
- dev_dbg(dev, "\tData Avail Int Support\n");
-
/* INTERRUPT Setup */
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ goto out_err;
+
if (irq != -1) {
/*
* Before doing irq testing issue a command to the TPM in polling mode
@@ -1080,13 +1150,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
* proper timeouts for the driver.
*/
- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0)
goto out_err;
rc = tpm_get_timeouts(chip);
- release_locality(chip, 0);
+ tpm_tis_relinquish_locality(chip, 0);
if (rc) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
@@ -1094,17 +1164,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
goto out_err;
}
- if (irq) {
+ if (irq)
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
- if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- dev_err(&chip->dev, FW_BUG
- "TPM interrupt not working, polling instead\n");
+ else
+ tpm_tis_probe_irq(chip, intmask);
- disable_interrupts(chip);
- }
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ priv->int_mask = intmask;
} else {
- tpm_tis_probe_irq(chip, intmask);
+ dev_err(&chip->dev, FW_BUG
+ "TPM interrupt not working, polling instead\n");
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+ disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
}
}
@@ -1143,13 +1219,7 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
if (rc < 0)
goto out;
- rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
- if (rc < 0)
- goto out;
-
- intmask |= TPM_INTF_CMD_READY_INT
- | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
- | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+ intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
@@ -1165,28 +1235,27 @@ int tpm_tis_resume(struct device *dev)
struct tpm_chip *chip = dev_get_drvdata(dev);
int ret;
+ ret = tpm_tis_request_locality(chip, 0);
+ if (ret < 0)
+ return ret;
+
if (chip->flags & TPM_CHIP_FLAG_IRQ)
tpm_tis_reenable_interrupts(chip);
ret = tpm_pm_resume(dev);
if (ret)
- return ret;
+ goto out;
/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- ret = request_locality(chip, 0);
- if (ret < 0)
- return ret;
-
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
tpm1_do_selftest(chip);
+out:
+ tpm_tis_relinquish_locality(chip, 0);
- release_locality(chip, 0);
- }
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
#endif
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b68479e0de10..e978f457fd4d 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -87,13 +87,16 @@ enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
TPM_TIS_INVALID_STATUS = BIT(1),
TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
+ TPM_TIS_IRQ_TESTED = BIT(3),
};
struct tpm_tis_data {
u16 manufacturer_id;
+ struct mutex locality_count_mutex;
+ unsigned int locality_count;
int locality;
int irq;
- bool irq_tested;
+ unsigned int int_mask;
unsigned long flags;
void __iomem *ilb_base_addr;
u16 clkrun_enabled;
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index f3a7251c8e38..c8c34adc14c0 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -312,8 +312,7 @@ static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
.verify_crc = tpm_tis_i2c_verify_crc,
};
-static int tpm_tis_i2c_probe(struct i2c_client *dev,
- const struct i2c_device_id *id)
+static int tpm_tis_i2c_probe(struct i2c_client *dev)
{
struct tpm_tis_i2c_phy *phy;
const u8 crc_enable = 1;
@@ -380,7 +379,7 @@ static struct i2c_driver tpm_tis_i2c_driver = {
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_i2c_match),
},
- .probe = tpm_tis_i2c_probe,
+ .probe_new = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.id_table = tpm_tis_i2c_id,
};
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index 77cea5b31c6e..376ae18a04eb 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -100,8 +100,7 @@ static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
}
/* Wait for interrupt to indicate TPM is ready to respond */
- if (!wait_for_completion_timeout(&priv->tpm_ready,
- msecs_to_jiffies(chip->timeout_a))) {
+ if (!wait_for_completion_timeout(&priv->tpm_ready, chip->timeout_a)) {
dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
return -ETIMEDOUT;
}
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index a0963a3e92bd..1f5207974a17 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -231,7 +231,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = {
};
MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
-static const struct of_device_id of_tis_spi_match[] = {
+static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
{ .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
{ .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
{ .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
@@ -240,7 +240,7 @@ static const struct of_device_id of_tis_spi_match[] = {
};
MODULE_DEVICE_TABLE(of, of_tis_spi_match);
-static const struct acpi_device_id acpi_tis_spi_match[] = {
+static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
{"SMO0768", 0},
{}
};
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index 679196c61401..49278746b0e2 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -127,14 +127,12 @@ static int tpm_tis_synquacer_probe(struct platform_device *pdev)
return tpm_tis_synquacer_init(&pdev->dev, &tpm_info);
}
-static int tpm_tis_synquacer_remove(struct platform_device *pdev)
+static void tpm_tis_synquacer_remove(struct platform_device *pdev)
{
struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -155,7 +153,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
static struct platform_driver tis_synquacer_drv = {
.probe = tpm_tis_synquacer_probe,
- .remove = tpm_tis_synquacer_remove,
+ .remove_new = tpm_tis_synquacer_remove,
.driver = {
.name = "tpm_tis_synquacer",
.pm = &tpm_tis_synquacer_pm,
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 379291826261..80cca3b83b22 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -360,14 +360,13 @@ static int tpmfront_probe(struct xenbus_device *dev,
return tpm_chip_register(priv->chip);
}
-static int tpmfront_remove(struct xenbus_device *dev)
+static void tpmfront_remove(struct xenbus_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
tpm_chip_unregister(chip);
ring_free(priv);
dev_set_drvdata(&chip->dev, NULL);
- return 0;
}
static int tpmfront_resume(struct xenbus_device *dev)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6a821118d553..b65c809a4e97 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1666,9 +1666,8 @@ static void handle_control_message(struct virtio_device *vdev,
"Not enough space to store port name\n");
break;
}
- strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
- name_size - 1);
- port->name[name_size - 1] = 0;
+ strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+ name_size);
/*
* Since we only have one sysfs attribute, 'name',
@@ -2245,7 +2244,7 @@ static int __init virtio_console_init(void)
{
int err;
- pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
+ pdrvdata.class = class_create("virtio-ports");
if (IS_ERR(pdrvdata.class)) {
err = PTR_ERR(pdrvdata.class);
pr_err("Error %d creating virtio-ports class\n", err);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 74a4928aea1d..a46f637da959 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -856,7 +856,7 @@ static int __init hwicap_module_init(void)
dev_t devt;
int retval;
- icap_class = class_create(THIS_MODULE, "xilinx_config");
+ icap_class = class_create("xilinx_config");
mutex_init(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
index e9a288e61c15..89926fe9d813 100644
--- a/drivers/char/xillybus/xillybus_class.c
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(xillybus_find_inode);
static int __init xillybus_class_init(void)
{
- xillybus_class = class_create(THIS_MODULE, "xillybus");
+ xillybus_class = class_create("xillybus");
if (IS_ERR(xillybus_class)) {
pr_warn("Failed to register xillybus class\n");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index d79905f3e174..016814e15536 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -23,7 +23,6 @@ menuconfig COMMON_CLK
depends on !HAVE_LEGACY_CLK
select HAVE_CLK_PREPARE
select HAVE_CLK
- select SRCU
select RATIONAL
help
The common clock framework is a single definition of struct
@@ -92,7 +91,7 @@ config COMMON_CLK_RK808
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
- depends on REGMAP
+ select REGMAP
default MFD_HI655X_PMIC
help
This driver supports the hi655x PMIC clock. This
@@ -326,6 +325,15 @@ config COMMON_CLK_LOCHNAGAR
This driver supports the clocking features of the Cirrus Logic
Lochnagar audio development board.
+config COMMON_CLK_LOONGSON2
+ bool "Clock driver for Loongson-2 SoC"
+ depends on LOONGARCH || COMPILE_TEST
+ help
+ This driver provides support for clock controller on Loongson-2 SoC.
+ The clock controller can generates and supplies clock to various
+ peripherals within the SoC.
+ Say Y here to support Loongson-2 SoC clock driver.
+
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
@@ -368,6 +376,15 @@ config COMMON_CLK_RS9_PCIE
This driver supports the Renesas 9-series PCIe clock generator
models 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ.
+config COMMON_CLK_SI521XX
+ tristate "Clock driver for SkyWorks Si521xx PCIe clock generators"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ This driver supports the SkyWorks Si521xx PCIe clock generator
+ models Si52144/Si52146/Si52147.
+
config COMMON_CLK_VC5
tristate "Clock driver for IDT VersaClock 5,6 devices"
depends on I2C
@@ -437,6 +454,16 @@ config COMMON_CLK_K210
help
Support for the Canaan Kendryte K210 RISC-V SoC clocks.
+config COMMON_CLK_SP7021
+ tristate "Clock driver for Sunplus SP7021 SoC"
+ depends on SOC_SP7021 || COMPILE_TEST
+ default SOC_SP7021
+ help
+ This driver supports the Sunplus SP7021 SoC clocks.
+ It implements SP7021 PLLs/gate.
+ Not all features of the PLL are currently supported
+ by the driver.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index e3ca0d058a25..0aebef17edc6 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
obj-$(CONFIG_LMK04832) += clk-lmk04832.o
obj-$(CONFIG_COMMON_CLK_LAN966X) += clk-lan966x.o
obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
+obj-$(CONFIG_MACH_LOONGSON32) += clk-loongson1.o
+obj-$(CONFIG_COMMON_CLK_LOONGSON2) += clk-loongson2.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
obj-$(CONFIG_COMMON_CLK_SI544) += clk-si544.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
+obj-$(CONFIG_COMMON_CLK_SP7021) += clk-sp7021.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
@@ -72,6 +75,7 @@ obj-$(CONFIG_COMMON_CLK_TPS68470) += clk-tps68470.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_RS9_PCIE) += clk-renesas-pcie.o
+obj-$(CONFIG_COMMON_CLK_SI521XX) += clk-si521xx.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
obj-$(CONFIG_COMMON_CLK_VC7) += clk-versaclock7.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
@@ -93,7 +97,6 @@ obj-y += imx/
obj-y += ingenic/
obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
-obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += meson/
obj-y += microchip/
@@ -117,7 +120,7 @@ obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_STM32) += stm32/
-obj-$(CONFIG_SOC_STARFIVE) += starfive/
+obj-y += starfive/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-y += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 79301e1c1c36..89061b85e7d2 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -3,7 +3,7 @@
# Makefile for at91 specific clk
#
-obj-y += pmc.o sckc.o dt-compat.o
+obj-y += pmc.o sckc.o
obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
obj-y += clk-system.o clk-peripheral.o clk-programmable.o
@@ -15,12 +15,12 @@ obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o
-obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
-obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
-obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o
-obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o
+obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o dt-compat.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o dt-compat.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o dt-compat.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o dt-compat.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
-obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o
-obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
-obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
+obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 16870943a13e..0b860126d589 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -183,7 +183,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
hw = at91_clk_register_system(regmap, at91rm9200_systemck[i].n,
at91rm9200_systemck[i].p,
- at91rm9200_systemck[i].id);
+ at91rm9200_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index 11550e50cd9f..b521f470428f 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -460,7 +460,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
for (i = 0; i < data->num_sck; i++) {
hw = at91_clk_register_system(regmap, data->sck[i].n,
data->sck[i].p,
- data->sck[i].id);
+ data->sck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 8c9344451f46..5099669ddcbd 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -40,9 +40,14 @@ static const struct clk_pll_characteristics plla_characteristics = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} at91sam9g45_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "pck0", .p = "prog0", .id = 8 },
{ .n = "pck1", .p = "prog1", .id = 9 },
@@ -198,7 +203,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9g45_systemck[i].n,
at91sam9g45_systemck[i].p,
- at91sam9g45_systemck[i].id);
+ at91sam9g45_systemck[i].id,
+ at91sam9g45_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 0bb19400d199..08a10e12d08d 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -54,9 +54,14 @@ static const struct clk_pll_characteristics pllb_characteristics = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} at91sam9n12_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "lcdck", .p = "masterck_div", .id = 3 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "udpck", .p = "usbck", .id = 7 },
@@ -223,7 +228,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9n12_systemck[i].n,
at91sam9n12_systemck[i].p,
- at91sam9n12_systemck[i].id);
+ at91sam9n12_systemck[i].id,
+ at91sam9n12_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -236,7 +242,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
at91sam9n12_periphck[i].n,
"masterck_div",
at91sam9n12_periphck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index b992137bab02..1a1b6b2bb0e3 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -160,7 +160,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9rl_systemck[i].n,
at91sam9rl_systemck[i].p,
- at91sam9rl_systemck[i].id);
+ at91sam9rl_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 3857db2e144b..13e589c95907 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -41,9 +41,14 @@ static const struct clk_pll_characteristics plla_characteristics = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} at91sam9x5_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "smdck", .p = "smdclk", .id = 4 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "udpck", .p = "usbck", .id = 7 },
@@ -248,7 +253,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9x5_systemck[i].n,
at91sam9x5_systemck[i].p,
- at91sam9x5_systemck[i].id);
+ at91sam9x5_systemck[i].id,
+ at91sam9x5_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -256,7 +262,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
}
if (has_lcdck) {
- hw = at91_clk_register_system(regmap, "lcdck", "masterck_div", 3);
+ hw = at91_clk_register_system(regmap, "lcdck", "masterck_div",
+ 3, 0);
if (IS_ERR(hw))
goto err_free;
@@ -269,7 +276,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
at91sam9x5_periphck[i].n,
"masterck_div",
at91sam9x5_periphck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
@@ -282,7 +289,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
extra_pcks[i].n,
"masterck_div",
extra_pcks[i].id,
- &range, INT_MIN);
+ &range, INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 5104d4025484..93ea685e27f6 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -445,7 +445,7 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range,
- int chg_pid)
+ int chg_pid, unsigned long flags)
{
struct clk_sam9x5_peripheral *periph;
struct clk_init_data init;
@@ -462,12 +462,12 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = flags;
if (chg_pid < 0) {
- init.flags = 0;
init.ops = &sam9x5_peripheral_ops;
} else {
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT;
+ init.flags |= CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
init.ops = &sam9x5_peripheral_chg_ops;
}
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index d757003004cb..0882ed01d5c2 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -668,7 +668,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
parent_rate, true);
- if (ret <= 0) {
+ if (ret < 0) {
hw = ERR_PTR(ret);
goto free;
}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 80720fd1a9cf..10193650429e 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -105,7 +105,7 @@ static const struct clk_ops system_ops = {
struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id)
+ const char *parent_name, u8 id, unsigned long flags)
{
struct clk_system *sys;
struct clk_hw *hw;
@@ -123,7 +123,7 @@ at91_clk_register_system(struct regmap *regmap, const char *name,
init.ops = &system_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT | flags;
sys->id = id;
sys->hw.init = &init;
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index 85a964cb2d89..97f67e23ef80 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -493,18 +493,28 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
parent_name, id);
} else {
struct clk_range range = CLK_RANGE(0, 0);
+ unsigned long flags = 0;
of_at91_get_clk_range(periphclknp,
"atmel,clk-output-range",
&range);
+ /*
+ * mpddr_clk feed DDR controller and is enabled by
+ * bootloader thus we need to keep it enabled in case
+ * there is no Linux consumer for it.
+ */
+ if (!strcmp(periphclknp->name, "mpddr_clk"))
+ flags = CLK_IS_CRITICAL;
+
hw = at91_clk_register_sam9x5_peripheral(regmap,
&pmc_pcr_lock,
&dt_pcr_layout,
name,
parent_name,
id, &range,
- INT_MIN);
+ INT_MIN,
+ flags);
}
if (IS_ERR(hw))
@@ -879,6 +889,8 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
return;
for_each_child_of_node(np, sysclknp) {
+ unsigned long flags = 0;
+
if (of_property_read_u32(sysclknp, "reg", &id))
continue;
@@ -887,7 +899,16 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
parent_name = of_clk_get_parent_name(sysclknp, 0);
- hw = at91_clk_register_system(regmap, name, parent_name, id);
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus
+ * we need to keep it enabled in case there is no Linux consumer
+ * for it.
+ */
+ if (!strcmp(sysclknp->name, "ddrck"))
+ flags = CLK_IS_CRITICAL;
+
+ hw = at91_clk_register_system(regmap, name, parent_name, id,
+ flags);
if (IS_ERR(hw))
continue;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index efe4975bddc3..1b3ca7dd9b57 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -199,7 +199,7 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range,
- int chg_pid);
+ int chg_pid, unsigned long flags);
struct clk_hw * __init
at91_clk_register_pll(struct regmap *regmap, const char *name,
@@ -242,7 +242,7 @@ at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id);
+ const char *parent_name, u8 id, unsigned long flags);
struct clk_hw * __init
at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 9ea4ce501bad..ac070db58195 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -75,9 +75,14 @@ static const struct clk_pcr_layout sam9x60_pcr_layout = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} sam9x60_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "pck0", .p = "prog0", .id = 8 },
{ .n = "pck1", .p = "prog1", .id = 9 },
@@ -86,6 +91,7 @@ static const struct {
static const struct {
char *n;
+ unsigned long flags;
u8 id;
} sam9x60_periphck[] = {
{ .n = "pioA_clk", .id = 2, },
@@ -132,7 +138,11 @@ static const struct {
{ .n = "pioD_clk", .id = 44, },
{ .n = "tcb1_clk", .id = 45, },
{ .n = "dbgu_clk", .id = 47, },
- { .n = "mpddr_clk", .id = 49, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 49, .flags = CLK_IS_CRITICAL },
};
static const struct {
@@ -315,7 +325,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
hw = at91_clk_register_system(regmap, sam9x60_systemck[i].n,
sam9x60_systemck[i].p,
- sam9x60_systemck[i].id);
+ sam9x60_systemck[i].id,
+ sam9x60_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -328,7 +339,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_periphck[i].n,
"masterck_div",
sam9x60_periphck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN,
+ sam9x60_periphck[i].flags);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 84156dc52bff..c0e3e1a4bbf3 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -40,9 +40,14 @@ static const struct clk_pcr_layout sama5d2_pcr_layout = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} sama5d2_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "lcdck", .p = "masterck_div", .id = 3 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "udpck", .p = "usbck", .id = 7 },
@@ -97,6 +102,7 @@ static const struct {
static const struct {
char *n;
+ unsigned long flags;
u8 id;
} sama5d2_periphck[] = {
{ .n = "dma0_clk", .id = 6, },
@@ -104,7 +110,11 @@ static const struct {
{ .n = "aes_clk", .id = 9, },
{ .n = "aesb_clk", .id = 10, },
{ .n = "sha_clk", .id = 12, },
- { .n = "mpddr_clk", .id = 13, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 13, .flags = CLK_IS_CRITICAL },
{ .n = "matrix0_clk", .id = 15, },
{ .n = "sdmmc0_hclk", .id = 31, },
{ .n = "sdmmc1_hclk", .id = 32, },
@@ -302,7 +312,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d2_systemck[i].n,
sama5d2_systemck[i].p,
- sama5d2_systemck[i].id);
+ sama5d2_systemck[i].id,
+ sama5d2_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -315,7 +326,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
sama5d2_periphck[i].n,
"masterck_div",
sama5d2_periphck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN,
+ sama5d2_periphck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -329,7 +341,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
"h32mxck",
sama5d2_periph32ck[i].id,
&sama5d2_periph32ck[i].r,
- INT_MIN);
+ INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 7e93c6edf305..ad6068b884de 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -40,9 +40,14 @@ static const struct clk_pcr_layout sama5d3_pcr_layout = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} sama5d3_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "lcdck", .p = "masterck_div", .id = 3 },
{ .n = "smdck", .p = "smdclk", .id = 4 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
@@ -56,6 +61,7 @@ static const struct {
char *n;
u8 id;
struct clk_range r;
+ unsigned long flags;
} sama5d3_periphck[] = {
{ .n = "dbgu_clk", .id = 2, },
{ .n = "hsmc_clk", .id = 5, },
@@ -99,7 +105,11 @@ static const struct {
{ .n = "tdes_clk", .id = 44, },
{ .n = "trng_clk", .id = 45, },
{ .n = "fuse_clk", .id = 48, },
- { .n = "mpddr_clk", .id = 49, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 49, .flags = CLK_IS_CRITICAL },
};
static void __init sama5d3_pmc_setup(struct device_node *np)
@@ -222,7 +232,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d3_systemck[i].n,
sama5d3_systemck[i].p,
- sama5d3_systemck[i].id);
+ sama5d3_systemck[i].id,
+ sama5d3_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -236,7 +247,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
"masterck_div",
sama5d3_periphck[i].id,
&sama5d3_periphck[i].r,
- INT_MIN);
+ INT_MIN,
+ sama5d3_periphck[i].flags);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 1a14a9bce308..e876ec971a39 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -39,9 +39,14 @@ static const struct clk_pcr_layout sama5d4_pcr_layout = {
static const struct {
char *n;
char *p;
+ unsigned long flags;
u8 id;
} sama5d4_systemck[] = {
- { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
{ .n = "lcdck", .p = "masterck_div", .id = 3 },
{ .n = "smdck", .p = "smdclk", .id = 4 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
@@ -103,12 +108,17 @@ static const struct {
static const struct {
char *n;
+ unsigned long flags;
u8 id;
} sama5d4_periphck[] = {
{ .n = "dma0_clk", .id = 8 },
{ .n = "cpkcc_clk", .id = 10 },
{ .n = "aesb_clk", .id = 13 },
- { .n = "mpddr_clk", .id = 16 },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 16, .flags = CLK_IS_CRITICAL },
{ .n = "matrix0_clk", .id = 18 },
{ .n = "vdec_clk", .id = 19 },
{ .n = "dma1_clk", .id = 50 },
@@ -245,7 +255,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d4_systemck[i].n,
sama5d4_systemck[i].p,
- sama5d4_systemck[i].id);
+ sama5d4_systemck[i].id,
+ sama5d4_systemck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -258,7 +269,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
sama5d4_periphck[i].n,
"masterck_div",
sama5d4_periphck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN,
+ sama5d4_periphck[i].flags);
if (IS_ERR(hw))
goto err_free;
@@ -271,7 +283,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
sama5d4_periph32ck[i].n,
"h32mxck",
sama5d4_periph32ck[i].id,
- &range, INT_MIN);
+ &range, INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 9a213ba9e58b..f135b662f1ff 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -1068,7 +1068,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
hw = at91_clk_register_system(regmap, sama7g5_systemck[i].n,
sama7g5_systemck[i].p,
- sama7g5_systemck[i].id);
+ sama7g5_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
@@ -1083,7 +1083,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_periphck[i].id,
&sama7g5_periphck[i].r,
sama7g5_periphck[i].chgp ? 0 :
- INT_MIN);
+ INT_MIN, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index e1fda6ad5cd5..2334e6c334cf 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -198,10 +198,9 @@ static int i2s_pll_clk_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
-static int i2s_pll_clk_remove(struct platform_device *pdev)
+static void i2s_pll_clk_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
- return 0;
}
static const struct of_device_id i2s_pll_clk_id[] = {
@@ -216,7 +215,7 @@ static struct platform_driver i2s_pll_clk_driver = {
.of_match_table = i2s_pll_clk_id,
},
.probe = i2s_pll_clk_probe,
- .remove = i2s_pll_clk_remove,
+ .remove_new = i2s_pll_clk_remove,
};
module_platform_driver(i2s_pll_clk_driver);
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 90fb0e6ff573..242bf5d75bab 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -253,14 +253,8 @@ static int axs10x_pll_clk_probe(struct platform_device *pdev)
return ret;
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
- &pll_clk->hw);
-}
-
-static int axs10x_pll_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &pll_clk->hw);
}
static void __init of_axs10x_pll_clk_setup(struct device_node *node)
@@ -332,7 +326,6 @@ static struct platform_driver axs10x_pll_clk_driver = {
.of_match_table = axs10x_pll_clk_id,
},
.probe = axs10x_pll_clk_probe,
- .remove = axs10x_pll_clk_remove,
};
builtin_platform_driver(axs10x_pll_clk_driver);
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index 77266afb1c79..a972d763eb77 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -37,6 +37,15 @@ config CLK_BCM_63XX_GATE
Enable common clock framework support for Broadcom BCM63xx DSL SoCs
based on the MIPS architecture
+config CLK_BCM63268_TIMER
+ bool "Broadcom BCM63268 timer clock and reset support"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ default BMIPS_GENERIC
+ select RESET_CONTROLLER
+ help
+ Enable timer clock and reset support for Broadcom BCM63268 DSL SoCs
+ based on the MIPS architecture.
+
config CLK_BCM_KONA
bool "Broadcom Kona CCU clock support"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index edb66b44cb27..d0b6f4b1fb08 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CLK_BCM_63XX) += clk-bcm63xx.o
obj-$(CONFIG_CLK_BCM_63XX_GATE) += clk-bcm63xx-gate.o
+obj-$(CONFIG_CLK_BCM63268_TIMER) += clk-bcm63268-timer.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index e63a42618ac2..e4fbbf3c40fe 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -92,15 +92,13 @@ unregister_clk0:
return ret;
};
-static int clk_dvp_remove(struct platform_device *pdev)
+static void clk_dvp_remove(struct platform_device *pdev)
{
struct clk_dvp *dvp = platform_get_drvdata(pdev);
struct clk_hw_onecell_data *data = dvp->data;
clk_hw_unregister_gate(data->hws[1]);
clk_hw_unregister_gate(data->hws[0]);
-
- return 0;
}
static const struct of_device_id clk_dvp_dt_ids[] = {
@@ -111,7 +109,7 @@ MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
- .remove = clk_dvp_remove,
+ .remove_new = clk_dvp_remove,
.driver = {
.name = "brcm2711-dvp",
.of_match_table = clk_dvp_dt_ids,
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 290a2846a86b..0fafa5cba442 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index e74fe6219d14..8dc476ef5bf9 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 clock driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-bcm63268-timer.c b/drivers/clk/bcm/clk-bcm63268-timer.c
new file mode 100644
index 000000000000..463710d272a1
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm63268-timer.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BCM63268 Timer Clock and Reset Controller Driver
+ *
+ * Copyright (C) 2023 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/bcm63268-clock.h>
+
+#define BCM63268_TIMER_RESET_SLEEP_MIN_US 10000
+#define BCM63268_TIMER_RESET_SLEEP_MAX_US 20000
+
+struct bcm63268_tclkrst_hw {
+ void __iomem *regs;
+ spinlock_t lock;
+
+ struct reset_controller_dev rcdev;
+ struct clk_hw_onecell_data data;
+};
+
+struct bcm63268_tclk_table_entry {
+ const char * const name;
+ u8 bit;
+};
+
+static const struct bcm63268_tclk_table_entry bcm63268_timer_clocks[] = {
+ {
+ .name = "ephy1",
+ .bit = BCM63268_TCLK_EPHY1,
+ }, {
+ .name = "ephy2",
+ .bit = BCM63268_TCLK_EPHY2,
+ }, {
+ .name = "ephy3",
+ .bit = BCM63268_TCLK_EPHY3,
+ }, {
+ .name = "gphy1",
+ .bit = BCM63268_TCLK_GPHY1,
+ }, {
+ .name = "dsl",
+ .bit = BCM63268_TCLK_DSL,
+ }, {
+ .name = "wakeon_ephy",
+ .bit = BCM63268_TCLK_WAKEON_EPHY,
+ }, {
+ .name = "wakeon_dsl",
+ .bit = BCM63268_TCLK_WAKEON_DSL,
+ }, {
+ .name = "fap1_pll",
+ .bit = BCM63268_TCLK_FAP1,
+ }, {
+ .name = "fap2_pll",
+ .bit = BCM63268_TCLK_FAP2,
+ }, {
+ .name = "uto_50",
+ .bit = BCM63268_TCLK_UTO_50,
+ }, {
+ .name = "uto_extin",
+ .bit = BCM63268_TCLK_UTO_EXTIN,
+ }, {
+ .name = "usb_ref",
+ .bit = BCM63268_TCLK_USB_REF,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct bcm63268_tclkrst_hw *
+to_bcm63268_timer_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct bcm63268_tclkrst_hw, rcdev);
+}
+
+static int bcm63268_timer_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct bcm63268_tclkrst_hw *reset = to_bcm63268_timer_reset(rcdev);
+ unsigned long flags;
+ uint32_t val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = __raw_readl(reset->regs);
+ if (assert)
+ val &= ~BIT(id);
+ else
+ val |= BIT(id);
+ __raw_writel(val, reset->regs);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int bcm63268_timer_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm63268_timer_reset_update(rcdev, id, true);
+}
+
+static int bcm63268_timer_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm63268_timer_reset_update(rcdev, id, false);
+}
+
+static int bcm63268_timer_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ bcm63268_timer_reset_update(rcdev, id, true);
+ usleep_range(BCM63268_TIMER_RESET_SLEEP_MIN_US,
+ BCM63268_TIMER_RESET_SLEEP_MAX_US);
+
+ bcm63268_timer_reset_update(rcdev, id, false);
+ /*
+ * Ensure component is taken out reset state by sleeping also after
+ * deasserting the reset. Otherwise, the component may not be ready
+ * for operation.
+ */
+ usleep_range(BCM63268_TIMER_RESET_SLEEP_MIN_US,
+ BCM63268_TIMER_RESET_SLEEP_MAX_US);
+
+ return 0;
+}
+
+static int bcm63268_timer_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm63268_tclkrst_hw *reset = to_bcm63268_timer_reset(rcdev);
+
+ return !(__raw_readl(reset->regs) & BIT(id));
+}
+
+static const struct reset_control_ops bcm63268_timer_reset_ops = {
+ .assert = bcm63268_timer_reset_assert,
+ .deassert = bcm63268_timer_reset_deassert,
+ .reset = bcm63268_timer_reset_reset,
+ .status = bcm63268_timer_reset_status,
+};
+
+static int bcm63268_tclk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct bcm63268_tclk_table_entry *entry;
+ struct bcm63268_tclkrst_hw *hw;
+ struct clk_hw *clk;
+ u8 maxbit = 0;
+ int i, ret;
+
+ for (entry = bcm63268_timer_clocks; entry->name; entry++)
+ maxbit = max(maxbit, entry->bit);
+ maxbit++;
+
+ hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
+ GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ spin_lock_init(&hw->lock);
+
+ hw->data.num = maxbit;
+ for (i = 0; i < maxbit; i++)
+ hw->data.hws[i] = ERR_PTR(-ENODEV);
+
+ hw->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
+
+ for (entry = bcm63268_timer_clocks; entry->name; entry++) {
+ clk = devm_clk_hw_register_gate(dev, entry->name, NULL, 0,
+ hw->regs, entry->bit,
+ CLK_GATE_BIG_ENDIAN,
+ &hw->lock);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw->data.hws[entry->bit] = clk;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &hw->data);
+ if (ret)
+ return ret;
+
+ hw->rcdev.of_node = dev->of_node;
+ hw->rcdev.ops = &bcm63268_timer_reset_ops;
+
+ ret = devm_reset_controller_register(dev, &hw->rcdev);
+ if (ret)
+ dev_err(dev, "Failed to register reset controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id bcm63268_tclk_dt_ids[] = {
+ { .compatible = "brcm,bcm63268-timer-clocks" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm63268_tclk = {
+ .probe = bcm63268_tclk_probe,
+ .driver = {
+ .name = "bcm63268-timer-clock",
+ .of_match_table = bcm63268_tclk_dt_ids,
+ },
+};
+builtin_platform_driver(bcm63268_tclk);
diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
index 89297c57881e..0769f98767da 100644
--- a/drivers/clk/bcm/clk-bcm63xx-gate.c
+++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
@@ -541,7 +541,7 @@ out_err:
return ret;
}
-static int clk_bcm63xx_remove(struct platform_device *pdev)
+static void clk_bcm63xx_remove(struct platform_device *pdev)
{
struct clk_bcm63xx_hw *hw = platform_get_drvdata(pdev);
int i;
@@ -552,8 +552,6 @@ static int clk_bcm63xx_remove(struct platform_device *pdev)
if (!IS_ERR(hw->data.hws[i]))
clk_hw_unregister_gate(hw->data.hws[i]);
}
-
- return 0;
}
static const struct of_device_id clk_bcm63xx_dt_ids[] = {
@@ -570,7 +568,7 @@ static const struct of_device_id clk_bcm63xx_dt_ids[] = {
static struct platform_driver clk_bcm63xx = {
.probe = clk_bcm63xx_probe,
- .remove = clk_bcm63xx_remove,
+ .remove_new = clk_bcm63xx_remove,
.driver = {
.name = "bcm63xx-clock",
.of_match_table = clk_bcm63xx_dt_ids,
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index ce2f93479736..eb399a4d141b 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -439,13 +439,11 @@ static int raspberrypi_clk_probe(struct platform_device *pdev)
return 0;
}
-static int raspberrypi_clk_remove(struct platform_device *pdev)
+static void raspberrypi_clk_remove(struct platform_device *pdev)
{
struct raspberrypi_clk *rpi = platform_get_drvdata(pdev);
platform_device_unregister(rpi->cpufreq);
-
- return 0;
}
static const struct of_device_id raspberrypi_clk_match[] = {
@@ -460,7 +458,7 @@ static struct platform_driver raspberrypi_clk_driver = {
.of_match_table = raspberrypi_clk_match,
},
.probe = raspberrypi_clk_probe,
- .remove = raspberrypi_clk_remove,
+ .remove_new = raspberrypi_clk_remove,
};
module_platform_driver(raspberrypi_clk_driver);
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 9c3305bcb27a..a094a2601a37 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -15,7 +15,11 @@
#include "clk-aspeed.h"
-#define ASPEED_G6_NUM_CLKS 71
+/*
+ * This includes the gates (configured from aspeed_g6_gates), plus the
+ * explicitly-configured clocks (ASPEED_CLK_HPLL and up).
+ */
+#define ASPEED_G6_NUM_CLKS 72
#define ASPEED_G6_SILICON_REV 0x014
#define CHIP_REVISION_ID GENMASK(23, 16)
@@ -32,6 +36,20 @@
#define ASPEED_G6_CLK_SELECTION1 0x300
#define ASPEED_G6_CLK_SELECTION2 0x304
#define ASPEED_G6_CLK_SELECTION4 0x310
+#define ASPEED_G6_CLK_SELECTION5 0x314
+#define I3C_CLK_SELECTION_SHIFT 31
+#define I3C_CLK_SELECTION BIT(31)
+#define I3C_CLK_SELECT_HCLK (0 << I3C_CLK_SELECTION_SHIFT)
+#define I3C_CLK_SELECT_APLL_DIV (1 << I3C_CLK_SELECTION_SHIFT)
+#define APLL_DIV_SELECTION_SHIFT 28
+#define APLL_DIV_SELECTION GENMASK(30, 28)
+#define APLL_DIV_2 (0b001 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_3 (0b010 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_4 (0b011 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_5 (0b100 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_6 (0b101 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_7 (0b110 << APLL_DIV_SELECTION_SHIFT)
+#define APLL_DIV_8 (0b111 << APLL_DIV_SELECTION_SHIFT)
#define ASPEED_HPLL_PARAM 0x200
#define ASPEED_APLL_PARAM 0x210
@@ -55,6 +73,27 @@ static void __iomem *scu_g6_base;
static u8 soc_rev;
/*
+ * The majority of the clocks in the system are gates paired with a reset
+ * controller that holds the IP in reset; this is represented by the @reset_idx
+ * member of entries here.
+ *
+ * This borrows from clk_hw_register_gate, but registers two 'gates', one
+ * to control the clock enable register and the other to control the reset
+ * IP. This allows us to enforce the ordering:
+ *
+ * 1. Place IP in reset
+ * 2. Enable clock
+ * 3. Delay
+ * 4. Release reset
+ *
+ * Consequently, if reset_idx is set, reset control is implicit: the clock
+ * consumer does not need its own reset handling, as enabling the clock will
+ * also deassert reset.
+ *
+ * There are some gates that do not have an associated reset; these are
+ * handled by using -1 as the index for the reset, and the consumer must
+ * explictly assert/deassert reset lines as required.
+ *
* Clocks marked with CLK_IS_CRITICAL:
*
* ref0 and ref1 are essential for the SoC to operate
@@ -97,14 +136,13 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_LHCCLK] = { 37, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
/* Reserved 38 RSA: no longer used */
/* Reserved 39 */
- [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", NULL, 0 }, /* I3C0 */
- [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", NULL, 0 }, /* I3C1 */
- [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", NULL, 0 }, /* I3C2 */
- [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", NULL, 0 }, /* I3C3 */
- [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", NULL, 0 }, /* I3C4 */
- [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", NULL, 0 }, /* I3C5 */
- [ASPEED_CLK_GATE_I3C6CLK] = { 46, 46, "i3c6clk-gate", NULL, 0 }, /* I3C6 */
- [ASPEED_CLK_GATE_I3C7CLK] = { 47, 47, "i3c7clk-gate", NULL, 0 }, /* I3C7 */
+ [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", "i3cclk", 0 }, /* I3C0 */
+ [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", "i3cclk", 0 }, /* I3C1 */
+ [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", "i3cclk", 0 }, /* I3C2 */
+ [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", "i3cclk", 0 }, /* I3C3 */
+ [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */
+ [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */
+ /* Reserved: 46 & 47 */
[ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
[ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
[ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
@@ -652,6 +690,9 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
const struct aspeed_gate_data *gd = &aspeed_g6_gates[i];
u32 gate_flags;
+ if (!gd->name)
+ continue;
+
/*
* Special case: the USB port 1 clock (bit 14) is always
* working the opposite way from the other ones.
@@ -772,6 +813,14 @@ static void __init aspeed_g6_cc(struct regmap *map)
/* USB 2.0 port1 phy 40MHz clock */
hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000);
aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw;
+
+ /* i3c clock: source from apll, divide by 8 */
+ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5,
+ I3C_CLK_SELECTION | APLL_DIV_SELECTION,
+ I3C_CLK_SELECT_APLL_DIV | APLL_DIV_8);
+
+ hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "apll", 0, 1, 8);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_I3C] = hw;
};
static void __init aspeed_g6_cc_init(struct device_node *np)
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index ac6ff736ac8f..a04a3d38c76e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -553,15 +553,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (ret)
return ret;
- return of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_simple_get,
- &axi_clkgen->clk_hw);
-}
-
-static int axi_clkgen_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &axi_clkgen->clk_hw);
}
static const struct of_device_id axi_clkgen_ids[] = {
@@ -583,7 +576,6 @@ static struct platform_driver axi_clkgen_driver = {
.of_match_table = axi_clkgen_ids,
},
.probe = axi_clkgen_probe,
- .remove = axi_clkgen_remove,
};
module_platform_driver(axi_clkgen_driver);
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 07e80fe8c310..1afcfdf2e6f9 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -569,18 +569,11 @@ static int axmclk_probe(struct platform_device *pdev)
return ret;
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_axmclk_get, NULL);
-}
-
-static int axmclk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
+ return devm_of_clk_add_hw_provider(dev, of_clk_axmclk_get, NULL);
}
static struct platform_driver axmclk_driver = {
.probe = axmclk_probe,
- .remove = axmclk_remove,
.driver = {
.name = "clk-axm5516",
.of_match_table = axmclk_match_table,
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index fad78a22218e..2a19e50fff68 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -949,4 +949,3 @@ module_platform_driver(bm1880_clk_driver);
MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
MODULE_DESCRIPTION("Clock driver for Bitmain BM1880 SoC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index 1449d0537674..d8bee8180a6b 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -661,16 +661,10 @@ static int cdce706_probe(struct i2c_client *client)
ret = cdce706_register_clkouts(cdce);
if (ret < 0)
return ret;
- return of_clk_add_hw_provider(client->dev.of_node, of_clk_cdce_get,
- cdce);
+ return devm_of_clk_add_hw_provider(&client->dev, of_clk_cdce_get,
+ cdce);
}
-static void cdce706_remove(struct i2c_client *client)
-{
- of_clk_del_provider(client->dev.of_node);
-}
-
-
#ifdef CONFIG_OF
static const struct of_device_id cdce706_dt_match[] = {
{ .compatible = "ti,cdce706" },
@@ -691,7 +685,6 @@ static struct i2c_driver cdce706_i2c_driver = {
.of_match_table = of_match_ptr(cdce706_dt_match),
},
.probe_new = cdce706_probe,
- .remove = cdce706_remove,
.id_table = cdce706_id,
};
module_i2c_driver(cdce706_i2c_driver);
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 2ef819606c41..1a4e6340f95c 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
pclk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
if (rc < 0)
goto err;
if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
rc = 0;
goto err;
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get clock %d for %pOF\n",
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index f734e34735a9..b3e66202b942 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -297,14 +297,12 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
of_fixed_factor_clk_setup);
-static int of_fixed_factor_clk_remove(struct platform_device *pdev)
+static void of_fixed_factor_clk_remove(struct platform_device *pdev)
{
struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_factor(clk);
-
- return 0;
}
static int of_fixed_factor_clk_probe(struct platform_device *pdev)
@@ -336,7 +334,7 @@ static struct platform_driver of_fixed_factor_clk_driver = {
.of_match_table = of_fixed_factor_clk_ids,
},
.probe = of_fixed_factor_clk_probe,
- .remove = of_fixed_factor_clk_remove,
+ .remove_new = of_fixed_factor_clk_remove,
};
builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 5225d17d6b3f..0e08cb22c196 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -71,14 +71,12 @@ static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
return 0;
}
-static int of_fixed_mmio_clk_remove(struct platform_device *pdev)
+static void of_fixed_mmio_clk_remove(struct platform_device *pdev)
{
struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_rate(clk);
-
- return 0;
}
static const struct of_device_id of_fixed_mmio_clk_ids[] = {
@@ -93,10 +91,9 @@ static struct platform_driver of_fixed_mmio_clk_driver = {
.of_match_table = of_fixed_mmio_clk_ids,
},
.probe = of_fixed_mmio_clk_probe,
- .remove = of_fixed_mmio_clk_remove,
+ .remove_new = of_fixed_mmio_clk_remove,
};
module_platform_driver(of_fixed_mmio_clk_driver);
MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 7d775954e26d..3481eb8cdeb3 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -196,14 +196,12 @@ void __init of_fixed_clk_setup(struct device_node *node)
}
CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
-static int of_fixed_clk_remove(struct platform_device *pdev)
+static void of_fixed_clk_remove(struct platform_device *pdev)
{
struct clk_hw *hw = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_rate(hw);
-
- return 0;
}
static int of_fixed_clk_probe(struct platform_device *pdev)
@@ -234,7 +232,7 @@ static struct platform_driver of_fixed_clk_driver = {
.of_match_table = of_fixed_clk_ids,
},
.probe = of_fixed_clk_probe,
- .remove = of_fixed_clk_remove,
+ .remove_new = of_fixed_clk_remove,
};
builtin_platform_driver(of_fixed_clk_driver);
#endif
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 6affe3565025..479297763e70 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -71,6 +71,7 @@ static void clk_fd_get_div(struct clk_hw *hw, struct u32_fract *fract)
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
unsigned long m, n;
+ u32 mmask, nmask;
u32 val;
if (fd->lock)
@@ -85,8 +86,11 @@ static void clk_fd_get_div(struct clk_hw *hw, struct u32_fract *fract)
else
__release(fd->lock);
- m = (val & fd->mmask) >> fd->mshift;
- n = (val & fd->nmask) >> fd->nshift;
+ mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
+ nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
+
+ m = (val & mmask) >> fd->mshift;
+ n = (val & nmask) >> fd->nshift;
if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
m++;
@@ -166,6 +170,7 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
unsigned long m, n;
+ u32 mmask, nmask;
u32 val;
rational_best_approximation(rate, parent_rate,
@@ -182,8 +187,11 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(fd->lock);
+ mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
+ nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
+
val = clk_fd_readl(fd);
- val &= ~(fd->mmask | fd->nmask);
+ val &= ~(mmask | nmask);
val |= (m << fd->mshift) | (n << fd->nshift);
clk_fd_writel(fd, val);
@@ -260,10 +268,8 @@ struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
fd->reg = reg;
fd->mshift = mshift;
fd->mwidth = mwidth;
- fd->mmask = GENMASK(mwidth - 1, 0) << mshift;
fd->nshift = nshift;
fd->nwidth = nwidth;
- fd->nmask = GENMASK(nwidth - 1, 0) << nshift;
fd->flags = clk_divider_flags;
fd->lock = lock;
fd->hw.init = &init;
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
index 6238fcea0467..ee5baf993ff2 100644
--- a/drivers/clk/clk-fsl-sai.c
+++ b/drivers/clk/clk-fsl-sai.c
@@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver);
MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
-MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:fsl-sai-clk");
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 60007b508590..33b48ea5ea3d 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -346,14 +346,8 @@ static int hsdk_pll_clk_probe(struct platform_device *pdev)
return ret;
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
- &pll_clk->hw);
-}
-
-static int hsdk_pll_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &pll_clk->hw);
}
static void __init of_hsdk_pll_clk_setup(struct device_node *node)
@@ -432,6 +426,5 @@ static struct platform_driver hsdk_pll_clk_driver = {
.of_match_table = hsdk_pll_clk_id,
},
.probe = hsdk_pll_clk_probe,
- .remove = hsdk_pll_clk_remove,
};
builtin_platform_driver(hsdk_pll_clk_driver);
diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
index 67a7cb3503c3..4eed667eddaf 100644
--- a/drivers/clk/clk-k210.c
+++ b/drivers/clk/clk-k210.c
@@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw,
f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
- return (u64)parent_rate * f / (r * od);
+ return div_u64((u64)parent_rate * f, r * od);
}
static const struct clk_ops k210_pll_ops = {
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 57485356de4c..afdfee3b365f 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -1522,8 +1522,8 @@ static int lmk04832_probe(struct spi_device *spi)
}
lmk->clk_data->num = info->num_channels;
- ret = of_clk_add_hw_provider(lmk->dev->of_node, of_clk_hw_onecell_get,
- lmk->clk_data);
+ ret = devm_of_clk_add_hw_provider(lmk->dev, of_clk_hw_onecell_get,
+ lmk->clk_data);
if (ret) {
dev_err(lmk->dev, "failed to add provider (%d)\n", ret);
goto err_disable_vco;
@@ -1547,7 +1547,6 @@ static void lmk04832_remove(struct spi_device *spi)
struct lmk04832 *lmk = spi_get_drvdata(spi);
clk_disable_unprepare(lmk->oscin);
- of_clk_del_provider(spi->dev.of_node);
}
static const struct spi_device_id lmk04832_id[] = {
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
new file mode 100644
index 000000000000..a3467aa6790f
--- /dev/null
+++ b/drivers/clk/clk-loongson1.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Clock driver for Loongson-1 SoC
+ *
+ * Copyright (C) 2012-2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/printk.h>
+
+#include <dt-bindings/clock/loongson,ls1x-clk.h>
+
+/* Loongson 1 Clock Register Definitions */
+#define CLK_PLL_FREQ 0x0
+#define CLK_PLL_DIV 0x4
+
+static DEFINE_SPINLOCK(ls1x_clk_div_lock);
+
+struct ls1x_clk_pll_data {
+ u32 fixed;
+ u8 shift;
+ u8 int_shift;
+ u8 int_width;
+ u8 frac_shift;
+ u8 frac_width;
+};
+
+struct ls1x_clk_div_data {
+ u8 shift;
+ u8 width;
+ unsigned long flags;
+ const struct clk_div_table *table;
+ u8 bypass_shift;
+ u8 bypass_inv;
+ spinlock_t *lock; /* protect access to DIV registers */
+};
+
+struct ls1x_clk {
+ void __iomem *reg;
+ unsigned int offset;
+ struct clk_hw hw;
+ const void *data;
+};
+
+#define to_ls1x_clk(_hw) container_of(_hw, struct ls1x_clk, hw)
+
+static inline unsigned long ls1x_pll_rate_part(unsigned int val,
+ unsigned int shift,
+ unsigned int width)
+{
+ return (val & GENMASK(shift + width, shift)) >> shift;
+}
+
+static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
+ const struct ls1x_clk_pll_data *d = ls1x_clk->data;
+ u32 val, rate;
+
+ val = readl(ls1x_clk->reg);
+ rate = d->fixed;
+ rate += ls1x_pll_rate_part(val, d->int_shift, d->int_width);
+ if (d->frac_width)
+ rate += ls1x_pll_rate_part(val, d->frac_shift, d->frac_width);
+ rate *= parent_rate;
+ rate >>= d->shift;
+
+ return rate;
+}
+
+static const struct clk_ops ls1x_pll_clk_ops = {
+ .recalc_rate = ls1x_pll_recalc_rate,
+};
+
+static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
+ const struct ls1x_clk_div_data *d = ls1x_clk->data;
+ unsigned int val;
+
+ val = readl(ls1x_clk->reg) >> d->shift;
+ val &= clk_div_mask(d->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, d->table,
+ d->flags, d->width);
+}
+
+static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
+ const struct ls1x_clk_div_data *d = ls1x_clk->data;
+
+ return divider_round_rate(hw, rate, prate, d->table,
+ d->width, d->flags);
+}
+
+static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
+ const struct ls1x_clk_div_data *d = ls1x_clk->data;
+ int val, div_val;
+ unsigned long flags = 0;
+
+ div_val = divider_get_val(rate, parent_rate, d->table,
+ d->width, d->flags);
+ if (div_val < 0)
+ return div_val;
+
+ spin_lock_irqsave(d->lock, flags);
+
+ /* Bypass the clock */
+ val = readl(ls1x_clk->reg);
+ if (d->bypass_inv)
+ val &= ~BIT(d->bypass_shift);
+ else
+ val |= BIT(d->bypass_shift);
+ writel(val, ls1x_clk->reg);
+
+ val = readl(ls1x_clk->reg);
+ val &= ~(clk_div_mask(d->width) << d->shift);
+ val |= (u32)div_val << d->shift;
+ writel(val, ls1x_clk->reg);
+
+ /* Restore the clock */
+ val = readl(ls1x_clk->reg);
+ if (d->bypass_inv)
+ val |= BIT(d->bypass_shift);
+ else
+ val &= ~BIT(d->bypass_shift);
+ writel(val, ls1x_clk->reg);
+
+ spin_unlock_irqrestore(d->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops ls1x_clk_divider_ops = {
+ .recalc_rate = ls1x_divider_recalc_rate,
+ .round_rate = ls1x_divider_round_rate,
+ .set_rate = ls1x_divider_set_rate,
+};
+
+#define LS1X_CLK_PLL(_name, _offset, _fixed, _shift, \
+ f_shift, f_width, i_shift, i_width) \
+struct ls1x_clk _name = { \
+ .offset = (_offset), \
+ .data = &(const struct ls1x_clk_pll_data) { \
+ .fixed = (_fixed), \
+ .shift = (_shift), \
+ .int_shift = (i_shift), \
+ .int_width = (i_width), \
+ .frac_shift = (f_shift), \
+ .frac_width = (f_width), \
+ }, \
+ .hw.init = &(const struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &ls1x_pll_clk_ops, \
+ .parent_data = &(const struct clk_parent_data) { \
+ .fw_name = "xtal", \
+ .name = "xtal", \
+ .index = -1, \
+ }, \
+ .num_parents = 1, \
+ }, \
+}
+
+#define LS1X_CLK_DIV(_name, _pname, _offset, _shift, _width, \
+ _table, _bypass_shift, _bypass_inv, _flags) \
+struct ls1x_clk _name = { \
+ .offset = (_offset), \
+ .data = &(const struct ls1x_clk_div_data){ \
+ .shift = (_shift), \
+ .width = (_width), \
+ .table = (_table), \
+ .flags = (_flags), \
+ .bypass_shift = (_bypass_shift), \
+ .bypass_inv = (_bypass_inv), \
+ .lock = &ls1x_clk_div_lock, \
+ }, \
+ .hw.init = &(const struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &ls1x_clk_divider_ops, \
+ .parent_hws = (const struct clk_hw *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_GET_RATE_NOCACHE, \
+ }, \
+}
+
+static LS1X_CLK_PLL(ls1b_clk_pll, CLK_PLL_FREQ, 12, 1, 0, 5, 0, 0);
+static LS1X_CLK_DIV(ls1b_clk_cpu, &ls1b_clk_pll.hw, CLK_PLL_DIV,
+ 20, 4, NULL, 8, 0,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
+static LS1X_CLK_DIV(ls1b_clk_dc, &ls1b_clk_pll.hw, CLK_PLL_DIV,
+ 26, 4, NULL, 12, 0, CLK_DIVIDER_ONE_BASED);
+static LS1X_CLK_DIV(ls1b_clk_ahb, &ls1b_clk_pll.hw, CLK_PLL_DIV,
+ 14, 4, NULL, 10, 0, CLK_DIVIDER_ONE_BASED);
+static CLK_FIXED_FACTOR(ls1b_clk_apb, "ls1b_clk_apb", "ls1b_clk_ahb", 2, 1,
+ CLK_SET_RATE_PARENT);
+
+static struct clk_hw_onecell_data ls1b_clk_hw_data = {
+ .hws = {
+ [LS1X_CLKID_PLL] = &ls1b_clk_pll.hw,
+ [LS1X_CLKID_CPU] = &ls1b_clk_cpu.hw,
+ [LS1X_CLKID_DC] = &ls1b_clk_dc.hw,
+ [LS1X_CLKID_AHB] = &ls1b_clk_ahb.hw,
+ [LS1X_CLKID_APB] = &ls1b_clk_apb.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static const struct clk_div_table ls1c_ahb_div_table[] = {
+ [0] = { .val = 0, .div = 2 },
+ [1] = { .val = 1, .div = 4 },
+ [2] = { .val = 2, .div = 3 },
+ [3] = { .val = 3, .div = 3 },
+ [4] = { /* sentinel */ }
+};
+
+static LS1X_CLK_PLL(ls1c_clk_pll, CLK_PLL_FREQ, 0, 2, 8, 8, 16, 8);
+static LS1X_CLK_DIV(ls1c_clk_cpu, &ls1c_clk_pll.hw, CLK_PLL_DIV,
+ 8, 7, NULL, 0, 1,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
+static LS1X_CLK_DIV(ls1c_clk_dc, &ls1c_clk_pll.hw, CLK_PLL_DIV,
+ 24, 7, NULL, 4, 1, CLK_DIVIDER_ONE_BASED);
+static LS1X_CLK_DIV(ls1c_clk_ahb, &ls1c_clk_cpu.hw, CLK_PLL_FREQ,
+ 0, 2, ls1c_ahb_div_table, 0, 0, CLK_DIVIDER_ALLOW_ZERO);
+static CLK_FIXED_FACTOR(ls1c_clk_apb, "ls1c_clk_apb", "ls1c_clk_ahb", 1, 1,
+ CLK_SET_RATE_PARENT);
+
+static struct clk_hw_onecell_data ls1c_clk_hw_data = {
+ .hws = {
+ [LS1X_CLKID_PLL] = &ls1c_clk_pll.hw,
+ [LS1X_CLKID_CPU] = &ls1c_clk_cpu.hw,
+ [LS1X_CLKID_DC] = &ls1c_clk_dc.hw,
+ [LS1X_CLKID_AHB] = &ls1c_clk_ahb.hw,
+ [LS1X_CLKID_APB] = &ls1c_clk_apb.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static void __init ls1x_clk_init(struct device_node *np,
+ struct clk_hw_onecell_data *hw_data)
+{
+ struct ls1x_clk *ls1x_clk;
+ void __iomem *reg;
+ int i, ret;
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ pr_err("Unable to map base for %pOF\n", np);
+ return;
+ }
+
+ for (i = 0; i < hw_data->num; i++) {
+ /* array might be sparse */
+ if (!hw_data->hws[i])
+ continue;
+
+ if (i != LS1X_CLKID_APB) {
+ ls1x_clk = to_ls1x_clk(hw_data->hws[i]);
+ ls1x_clk->reg = reg + ls1x_clk->offset;
+ }
+
+ ret = of_clk_hw_register(np, hw_data->hws[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_data);
+ if (!ret)
+ return;
+
+err:
+ pr_err("Failed to register %pOF\n", np);
+
+ while (--i >= 0)
+ clk_hw_unregister(hw_data->hws[i]);
+
+ iounmap(reg);
+}
+
+static void __init ls1b_clk_init(struct device_node *np)
+{
+ return ls1x_clk_init(np, &ls1b_clk_hw_data);
+}
+
+static void __init ls1c_clk_init(struct device_node *np)
+{
+ return ls1x_clk_init(np, &ls1c_clk_hw_data);
+}
+
+CLK_OF_DECLARE(ls1b_clk, "loongson,ls1b-clk", ls1b_clk_init);
+CLK_OF_DECLARE(ls1c_clk, "loongson,ls1c-clk", ls1c_clk_init);
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
new file mode 100644
index 000000000000..70ae1dd2e474
--- /dev/null
+++ b/drivers/clk/clk-loongson2.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+#define LOONGSON2_PLL_MULT_SHIFT 32
+#define LOONGSON2_PLL_MULT_WIDTH 10
+#define LOONGSON2_PLL_DIV_SHIFT 26
+#define LOONGSON2_PLL_DIV_WIDTH 6
+#define LOONGSON2_APB_FREQSCALE_SHIFT 20
+#define LOONGSON2_APB_FREQSCALE_WIDTH 3
+#define LOONGSON2_USB_FREQSCALE_SHIFT 16
+#define LOONGSON2_USB_FREQSCALE_WIDTH 3
+#define LOONGSON2_SATA_FREQSCALE_SHIFT 12
+#define LOONGSON2_SATA_FREQSCALE_WIDTH 3
+#define LOONGSON2_BOOT_FREQSCALE_SHIFT 8
+#define LOONGSON2_BOOT_FREQSCALE_WIDTH 3
+
+static void __iomem *loongson2_pll_base;
+
+static const struct clk_parent_data pdata[] = {
+ { .fw_name = "ref_100m",},
+};
+
+static struct clk_hw *loongson2_clk_register(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ const struct clk_ops *ops,
+ unsigned long flags)
+{
+ int ret;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags;
+ init.num_parents = 1;
+
+ if (!parent_name)
+ init.parent_data = pdata;
+ else
+ init.parent_names = &parent_name;
+
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ hw = ERR_PTR(ret);
+
+ return hw;
+}
+
+static unsigned long loongson2_calc_pll_rate(int offset, unsigned long rate)
+{
+ u64 val;
+ u32 mult, div;
+
+ val = readq(loongson2_pll_base + offset);
+
+ mult = (val >> LOONGSON2_PLL_MULT_SHIFT) &
+ clk_div_mask(LOONGSON2_PLL_MULT_WIDTH);
+ div = (val >> LOONGSON2_PLL_DIV_SHIFT) &
+ clk_div_mask(LOONGSON2_PLL_DIV_WIDTH);
+
+ return div_u64((u64)rate * mult, div);
+}
+
+static unsigned long loongson2_node_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_pll_rate(0x0, parent_rate);
+}
+
+static const struct clk_ops loongson2_node_clk_ops = {
+ .recalc_rate = loongson2_node_recalc_rate,
+};
+
+static unsigned long loongson2_ddr_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_pll_rate(0x10, parent_rate);
+}
+
+static const struct clk_ops loongson2_ddr_clk_ops = {
+ .recalc_rate = loongson2_ddr_recalc_rate,
+};
+
+static unsigned long loongson2_dc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_pll_rate(0x20, parent_rate);
+}
+
+static const struct clk_ops loongson2_dc_clk_ops = {
+ .recalc_rate = loongson2_dc_recalc_rate,
+};
+
+static unsigned long loongson2_pix0_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_pll_rate(0x30, parent_rate);
+}
+
+static const struct clk_ops loongson2_pix0_clk_ops = {
+ .recalc_rate = loongson2_pix0_recalc_rate,
+};
+
+static unsigned long loongson2_pix1_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_pll_rate(0x40, parent_rate);
+}
+
+static const struct clk_ops loongson2_pix1_clk_ops = {
+ .recalc_rate = loongson2_pix1_recalc_rate,
+};
+
+static unsigned long loongson2_calc_rate(unsigned long rate,
+ int shift, int width)
+{
+ u64 val;
+ u32 mult;
+
+ val = readq(loongson2_pll_base + 0x50);
+
+ mult = (val >> shift) & clk_div_mask(width);
+
+ return div_u64((u64)rate * (mult + 1), 8);
+}
+
+static unsigned long loongson2_boot_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_rate(parent_rate,
+ LOONGSON2_BOOT_FREQSCALE_SHIFT,
+ LOONGSON2_BOOT_FREQSCALE_WIDTH);
+}
+
+static const struct clk_ops loongson2_boot_clk_ops = {
+ .recalc_rate = loongson2_boot_recalc_rate,
+};
+
+static unsigned long loongson2_apb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_rate(parent_rate,
+ LOONGSON2_APB_FREQSCALE_SHIFT,
+ LOONGSON2_APB_FREQSCALE_WIDTH);
+}
+
+static const struct clk_ops loongson2_apb_clk_ops = {
+ .recalc_rate = loongson2_apb_recalc_rate,
+};
+
+static unsigned long loongson2_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_rate(parent_rate,
+ LOONGSON2_USB_FREQSCALE_SHIFT,
+ LOONGSON2_USB_FREQSCALE_WIDTH);
+}
+
+static const struct clk_ops loongson2_usb_clk_ops = {
+ .recalc_rate = loongson2_usb_recalc_rate,
+};
+
+static unsigned long loongson2_sata_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return loongson2_calc_rate(parent_rate,
+ LOONGSON2_SATA_FREQSCALE_SHIFT,
+ LOONGSON2_SATA_FREQSCALE_WIDTH);
+}
+
+static const struct clk_ops loongson2_sata_clk_ops = {
+ .recalc_rate = loongson2_sata_recalc_rate,
+};
+
+static inline int loongson2_check_clk_hws(struct clk_hw *clks[], unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("Loongson2 clk %u: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ return 0;
+}
+
+static int loongson2_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct clk_hw **hws;
+ struct clk_hw_onecell_data *clk_hw_data;
+ spinlock_t loongson2_clk_lock;
+ struct device *dev = &pdev->dev;
+
+ loongson2_pll_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(loongson2_pll_base))
+ return PTR_ERR(loongson2_pll_base);
+
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, LOONGSON2_CLK_END),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+ clk_hw_data->num = LOONGSON2_CLK_END;
+ hws = clk_hw_data->hws;
+
+ hws[LOONGSON2_NODE_PLL] = loongson2_clk_register(dev, "node_pll",
+ NULL,
+ &loongson2_node_clk_ops, 0);
+
+ hws[LOONGSON2_DDR_PLL] = loongson2_clk_register(dev, "ddr_pll",
+ NULL,
+ &loongson2_ddr_clk_ops, 0);
+
+ hws[LOONGSON2_DC_PLL] = loongson2_clk_register(dev, "dc_pll",
+ NULL,
+ &loongson2_dc_clk_ops, 0);
+
+ hws[LOONGSON2_PIX0_PLL] = loongson2_clk_register(dev, "pix0_pll",
+ NULL,
+ &loongson2_pix0_clk_ops, 0);
+
+ hws[LOONGSON2_PIX1_PLL] = loongson2_clk_register(dev, "pix1_pll",
+ NULL,
+ &loongson2_pix1_clk_ops, 0);
+
+ hws[LOONGSON2_BOOT_CLK] = loongson2_clk_register(dev, "boot",
+ NULL,
+ &loongson2_boot_clk_ops, 0);
+
+ hws[LOONGSON2_NODE_CLK] = devm_clk_hw_register_divider(dev, "node",
+ "node_pll", 0,
+ loongson2_pll_base + 0x8, 0,
+ 6, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ /*
+ * The hda clk divisor in the upper 32bits and the clk-prodiver
+ * layer code doesn't support 64bit io operation thus a conversion
+ * is required that subtract shift by 32 and add 4byte to the hda
+ * address
+ */
+ hws[LOONGSON2_HDA_CLK] = devm_clk_hw_register_divider(dev, "hda",
+ "ddr_pll", 0,
+ loongson2_pll_base + 0x22, 12,
+ 7, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_GPU_CLK] = devm_clk_hw_register_divider(dev, "gpu",
+ "ddr_pll", 0,
+ loongson2_pll_base + 0x18, 22,
+ 6, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_DDR_CLK] = devm_clk_hw_register_divider(dev, "ddr",
+ "ddr_pll", 0,
+ loongson2_pll_base + 0x18, 0,
+ 6, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_GMAC_CLK] = devm_clk_hw_register_divider(dev, "gmac",
+ "dc_pll", 0,
+ loongson2_pll_base + 0x28, 22,
+ 6, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_DC_CLK] = devm_clk_hw_register_divider(dev, "dc",
+ "dc_pll", 0,
+ loongson2_pll_base + 0x28, 0,
+ 6, CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_APB_CLK] = loongson2_clk_register(dev, "apb",
+ "gmac",
+ &loongson2_apb_clk_ops, 0);
+
+ hws[LOONGSON2_USB_CLK] = loongson2_clk_register(dev, "usb",
+ "gmac",
+ &loongson2_usb_clk_ops, 0);
+
+ hws[LOONGSON2_SATA_CLK] = loongson2_clk_register(dev, "sata",
+ "gmac",
+ &loongson2_sata_clk_ops, 0);
+
+ hws[LOONGSON2_PIX0_CLK] = clk_hw_register_divider(NULL, "pix0",
+ "pix0_pll", 0,
+ loongson2_pll_base + 0x38, 0, 6,
+ CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ hws[LOONGSON2_PIX1_CLK] = clk_hw_register_divider(NULL, "pix1",
+ "pix1_pll", 0,
+ loongson2_pll_base + 0x48, 0, 6,
+ CLK_DIVIDER_ONE_BASED,
+ &loongson2_clk_lock);
+
+ ret = loongson2_check_clk_hws(hws, LOONGSON2_CLK_END);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
+}
+
+static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k-clk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, loongson2_clk_match_table);
+
+static struct platform_driver loongson2_clk_driver = {
+ .probe = loongson2_clk_probe,
+ .driver = {
+ .name = "loongson2-clk",
+ .of_match_table = loongson2_clk_match_table,
+ },
+};
+module_platform_driver(loongson2_clk_driver);
+
+MODULE_DESCRIPTION("Loongson2 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 80b9d78493bc..050fd4fb588f 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -560,14 +560,12 @@ static void m10v_reg_mux_pre(const struct m10v_clk_mux_factors *factors,
static int m10v_clk_probe(struct platform_device *pdev)
{
int id;
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
const char *parent_name;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index b8c3d0da1918..74a241b1e1f4 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -271,10 +271,9 @@ static int palmas_clks_probe(struct platform_device *pdev)
return ret;
}
-static int palmas_clks_remove(struct platform_device *pdev)
+static void palmas_clks_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
- return 0;
}
static struct platform_driver palmas_clks_driver = {
@@ -283,7 +282,7 @@ static struct platform_driver palmas_clks_driver = {
.of_match_table = palmas_clks_of_match,
},
.probe = palmas_clks_probe,
- .remove = palmas_clks_remove,
+ .remove_new = palmas_clks_remove,
};
module_platform_driver(palmas_clks_driver);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index da2c8eddfd9f..3dd2b83d0404 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -129,11 +129,9 @@ static int clk_pwm_probe(struct platform_device *pdev)
return of_clk_add_hw_provider(node, of_clk_hw_simple_get, &clk_pwm->hw);
}
-static int clk_pwm_remove(struct platform_device *pdev)
+static void clk_pwm_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
}
static const struct of_device_id clk_pwm_dt_ids[] = {
@@ -144,7 +142,7 @@ MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
static struct platform_driver clk_pwm_driver = {
.probe = clk_pwm_probe,
- .remove = clk_pwm_remove,
+ .remove_new = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
.of_match_table = clk_pwm_dt_ids,
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index e6247141d0c0..10d31c222a1c 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -6,6 +6,7 @@
* - 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ
* Currently supported:
* - 9FGV0241
+ * - 9FGV0441
*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*/
@@ -18,7 +19,6 @@
#include <linux/regmap.h>
#define RS9_REG_OE 0x0
-#define RS9_REG_OE_DIF_OE(n) BIT((n) + 1)
#define RS9_REG_SS 0x1
#define RS9_REG_SS_AMP_0V6 0x0
#define RS9_REG_SS_AMP_0V7 0x1
@@ -31,9 +31,6 @@
#define RS9_REG_SS_SSC_MASK (3 << 3)
#define RS9_REG_SS_SSC_LOCK BIT(5)
#define RS9_REG_SR 0x2
-#define RS9_REG_SR_2V0_DIF(n) 0
-#define RS9_REG_SR_3V0_DIF(n) BIT((n) + 1)
-#define RS9_REG_SR_DIF_MASK(n) BIT((n) + 1)
#define RS9_REG_REF 0x3
#define RS9_REG_REF_OE BIT(4)
#define RS9_REG_REF_OD BIT(5)
@@ -45,23 +42,31 @@
#define RS9_REG_DID 0x6
#define RS9_REG_BCP 0x7
+#define RS9_REG_VID_IDT 0x01
+
+#define RS9_REG_DID_TYPE_FGV (0x0 << RS9_REG_DID_TYPE_SHIFT)
+#define RS9_REG_DID_TYPE_DBV (0x1 << RS9_REG_DID_TYPE_SHIFT)
+#define RS9_REG_DID_TYPE_DMV (0x2 << RS9_REG_DID_TYPE_SHIFT)
+#define RS9_REG_DID_TYPE_SHIFT 0x6
+
/* Supported Renesas 9-series models. */
enum rs9_model {
RENESAS_9FGV0241,
+ RENESAS_9FGV0441,
};
/* Structure to describe features of a particular 9-series model */
struct rs9_chip_info {
const enum rs9_model model;
unsigned int num_clks;
+ u8 did;
};
struct rs9_driver_data {
struct i2c_client *client;
struct regmap *regmap;
const struct rs9_chip_info *chip_info;
- struct clk *pin_xin;
- struct clk_hw *clk_dif[2];
+ struct clk_hw *clk_dif[4];
u8 pll_amplitude;
u8 pll_ssc;
u8 clk_dif_sr;
@@ -144,25 +149,38 @@ static int rs9_regmap_i2c_read(void *context,
static const struct regmap_config rs9_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_FLAT,
.max_register = RS9_REG_BCP,
+ .num_reg_defaults_raw = 0x8,
.rd_table = &rs9_readable_table,
.wr_table = &rs9_writeable_table,
.reg_write = rs9_regmap_i2c_write,
.reg_read = rs9_regmap_i2c_read,
};
+static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
+{
+ enum rs9_model model = rs9->chip_info->model;
+
+ if (model == RENESAS_9FGV0241)
+ return BIT(idx) + 1;
+ else if (model == RENESAS_9FGV0441)
+ return BIT(idx);
+
+ return 0;
+}
+
static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
{
struct i2c_client *client = rs9->client;
+ u8 dif = rs9_calc_dif(rs9, idx);
unsigned char name[5] = "DIF0";
struct device_node *np;
int ret;
u32 sr;
/* Set defaults */
- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
- rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
+ rs9->clk_dif_sr |= dif;
snprintf(name, 5, "DIF%d", idx);
np = of_get_child_by_name(client->dev.of_node, name);
@@ -174,11 +192,9 @@ static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
of_node_put(np);
if (!ret) {
if (sr == 2000000) { /* 2V/ns */
- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
- rs9->clk_dif_sr |= RS9_REG_SR_2V0_DIF(idx);
+ rs9->clk_dif_sr &= ~dif;
} else if (sr == 3000000) { /* 3V/ns (default) */
- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
- rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
+ rs9->clk_dif_sr |= dif;
} else
ret = dev_err_probe(&client->dev, -EINVAL,
"Invalid renesas,slew-rate value\n");
@@ -249,11 +265,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
}
for (i = 0; i < rs9->chip_info->num_clks; i++) {
- if (rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i))
+ u8 dif = rs9_calc_dif(rs9, i);
+
+ if (rs9->clk_dif_sr & dif)
continue;
- regmap_update_bits(rs9->regmap, RS9_REG_SR, RS9_REG_SR_3V0_DIF(i),
- rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i));
+ regmap_update_bits(rs9->regmap, RS9_REG_SR, dif,
+ rs9->clk_dif_sr & dif);
}
}
@@ -270,6 +288,7 @@ static int rs9_probe(struct i2c_client *client)
{
unsigned char name[5] = "DIF0";
struct rs9_driver_data *rs9;
+ unsigned int vid, did;
struct clk_hw *hw;
int i, ret;
@@ -306,6 +325,20 @@ static int rs9_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = regmap_read(rs9->regmap, RS9_REG_VID, &vid);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(rs9->regmap, RS9_REG_DID, &did);
+ if (ret < 0)
+ return ret;
+
+ if (vid != RS9_REG_VID_IDT || did != rs9->chip_info->did)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Incorrect VID/DID: %#02x, %#02x. Expected %#02x, %#02x\n",
+ vid, did, RS9_REG_VID_IDT,
+ rs9->chip_info->did);
+
/* Register clock */
for (i = 0; i < rs9->chip_info->num_clks; i++) {
snprintf(name, 5, "DIF%d", i);
@@ -349,16 +382,25 @@ static int __maybe_unused rs9_resume(struct device *dev)
static const struct rs9_chip_info renesas_9fgv0241_info = {
.model = RENESAS_9FGV0241,
.num_clks = 2,
+ .did = RS9_REG_DID_TYPE_FGV | 0x02,
+};
+
+static const struct rs9_chip_info renesas_9fgv0441_info = {
+ .model = RENESAS_9FGV0441,
+ .num_clks = 4,
+ .did = RS9_REG_DID_TYPE_FGV | 0x04,
};
static const struct i2c_device_id rs9_id[] = {
{ "9fgv0241", .driver_data = RENESAS_9FGV0241 },
+ { "9fgv0441", .driver_data = RENESAS_9FGV0441 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rs9_id);
static const struct of_device_id clk_rs9_of_match[] = {
{ .compatible = "renesas,9fgv0241", .data = &renesas_9fgv0241_info },
+ { .compatible = "renesas,9fgv0441", .data = &renesas_9fgv0441_info },
{ }
};
MODULE_DEVICE_TABLE(of, clk_rs9_of_match);
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index a3e883a9f406..38c456540d1b 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -202,7 +202,7 @@ err_reg:
return ret;
}
-static int s2mps11_clk_remove(struct platform_device *pdev)
+static void s2mps11_clk_remove(struct platform_device *pdev)
{
struct s2mps11_clk *s2mps11_clks = platform_get_drvdata(pdev);
int i;
@@ -217,8 +217,6 @@ static int s2mps11_clk_remove(struct platform_device *pdev)
continue;
clkdev_drop(s2mps11_clks[i].lookup);
}
-
- return 0;
}
static const struct platform_device_id s2mps11_clk_id[] = {
@@ -265,7 +263,7 @@ static struct platform_driver s2mps11_clk_driver = {
.name = "s2mps11-clk",
},
.probe = s2mps11_clk_probe,
- .remove = s2mps11_clk_remove,
+ .remove_new = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
module_platform_driver(s2mps11_clk_driver);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index a39af7616b13..3fb4003453ee 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -246,7 +246,7 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
return of_clk_add_hw_provider(np, scpi_of_clk_src_get, clk_data);
}
-static int scpi_clocks_remove(struct platform_device *pdev)
+static void scpi_clocks_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
@@ -258,7 +258,6 @@ static int scpi_clocks_remove(struct platform_device *pdev)
for_each_available_child_of_node(np, child)
of_clk_del_provider(np);
- return 0;
}
static int scpi_clocks_probe(struct platform_device *pdev)
@@ -305,7 +304,7 @@ static struct platform_driver scpi_clocks_driver = {
.of_match_table = scpi_clocks_ids,
},
.probe = scpi_clocks_probe,
- .remove = scpi_clocks_remove,
+ .remove_new = scpi_clocks_remove,
};
module_platform_driver(scpi_clocks_driver);
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index c028fa103bed..cabdd8e8f4db 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -360,8 +360,8 @@ static int si514_probe(struct i2c_client *client)
dev_err(&client->dev, "clock registration failed\n");
return err;
}
- err = of_clk_add_hw_provider(client->dev.of_node, of_clk_hw_simple_get,
- &data->hw);
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
+ &data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
@@ -370,11 +370,6 @@ static int si514_probe(struct i2c_client *client)
return 0;
}
-static void si514_remove(struct i2c_client *client)
-{
- of_clk_del_provider(client->dev.of_node);
-}
-
static const struct i2c_device_id si514_id[] = {
{ "si514", 0 },
{ }
@@ -393,7 +388,6 @@ static struct i2c_driver si514_driver = {
.of_match_table = clk_si514_of_match,
},
.probe_new = si514_probe,
- .remove = si514_remove,
.id_table = si514_id,
};
module_i2c_driver(si514_driver);
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
new file mode 100644
index 000000000000..ac8d4c59cd3d
--- /dev/null
+++ b/drivers/clk/clk-si521xx.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Skyworks Si521xx PCIe clock generator driver
+ *
+ * The following series can be supported:
+ * - Si52144 - 4x DIFF
+ * - Si52146 - 6x DIFF
+ * - Si52147 - 9x DIFF
+ * Currently tested:
+ * - Si52144
+ *
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitrev.h>
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+/* OE1 and OE2 register */
+#define SI521XX_REG_OE(n) (((n) & 0x1) + 1)
+#define SI521XX_REG_ID 0x3
+#define SI521XX_REG_ID_PROG GENMASK(7, 4)
+#define SI521XX_REG_ID_VENDOR GENMASK(3, 0)
+#define SI521XX_REG_BC 0x4
+#define SI521XX_REG_DA 0x5
+#define SI521XX_REG_DA_AMP_SEL BIT(7)
+#define SI521XX_REG_DA_AMP_MASK GENMASK(6, 4)
+#define SI521XX_REG_DA_AMP_MIN 300000
+#define SI521XX_REG_DA_AMP_DEFAULT 800000
+#define SI521XX_REG_DA_AMP_MAX 1000000
+#define SI521XX_REG_DA_AMP_STEP 100000
+#define SI521XX_REG_DA_AMP(UV) \
+ FIELD_PREP(SI521XX_REG_DA_AMP_MASK, \
+ ((UV) - SI521XX_REG_DA_AMP_MIN) / SI521XX_REG_DA_AMP_STEP)
+#define SI521XX_REG_DA_UNKNOWN BIT(3) /* Always set */
+
+/* Count of populated OE bits in control register ref, 1 and 2 */
+#define SI521XX_OE_MAP(cr1, cr2) (((cr2) << 8) | (cr1))
+#define SI521XX_OE_MAP_GET_OE(oe, map) (((map) >> (((oe) - 1) * 8)) & 0xff)
+
+#define SI521XX_DIFF_MULT 4
+#define SI521XX_DIFF_DIV 1
+
+/* Supported Skyworks Si521xx models. */
+enum si521xx_model {
+ SI52144 = 0x44,
+ SI52146 = 0x46,
+ SI52147 = 0x47,
+};
+
+struct si521xx;
+
+struct si_clk {
+ struct clk_hw hw;
+ struct si521xx *si;
+ u8 reg;
+ u8 bit;
+};
+
+struct si521xx {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct si_clk clk_dif[9];
+ u16 chip_info;
+ u8 pll_amplitude;
+};
+
+/*
+ * Si521xx i2c regmap
+ */
+static const struct regmap_range si521xx_readable_ranges[] = {
+ regmap_reg_range(SI521XX_REG_OE(0), SI521XX_REG_DA),
+};
+
+static const struct regmap_access_table si521xx_readable_table = {
+ .yes_ranges = si521xx_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si521xx_readable_ranges),
+};
+
+static const struct regmap_range si521xx_writeable_ranges[] = {
+ regmap_reg_range(SI521XX_REG_OE(0), SI521XX_REG_OE(1)),
+ regmap_reg_range(SI521XX_REG_BC, SI521XX_REG_DA),
+};
+
+static const struct regmap_access_table si521xx_writeable_table = {
+ .yes_ranges = si521xx_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si521xx_writeable_ranges),
+};
+
+static int si521xx_regmap_i2c_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct i2c_client *i2c = context;
+ const u8 data[3] = { reg, 1, val };
+ const int count = ARRAY_SIZE(data);
+ int ret;
+
+ ret = i2c_master_send(i2c, data, count);
+ if (ret == count)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int si521xx_regmap_i2c_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct i2c_client *i2c = context;
+ struct i2c_msg xfer[2];
+ u8 txdata = reg;
+ u8 rxdata[2];
+ int ret;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = (void *)&txdata;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (void *)rxdata;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ /*
+ * Byte 0 is transfer length, which is always 1 due
+ * to BCP register programming to 1 in si521xx_probe(),
+ * ignore it and use data from Byte 1.
+ */
+ *val = rxdata[1];
+ return 0;
+}
+
+static const struct regmap_config si521xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .max_register = SI521XX_REG_DA,
+ .rd_table = &si521xx_readable_table,
+ .wr_table = &si521xx_writeable_table,
+ .reg_write = si521xx_regmap_i2c_write,
+ .reg_read = si521xx_regmap_i2c_read,
+};
+
+static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long long rate;
+
+ rate = (unsigned long long)parent_rate * SI521XX_DIFF_MULT;
+ do_div(rate, SI521XX_DIFF_DIV);
+ return (unsigned long)rate;
+}
+
+static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long best_parent;
+
+ best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+ return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+}
+
+static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We must report success but we can do so unconditionally because
+ * si521xx_diff_round_rate returns values that ensure this call is a
+ * nop.
+ */
+
+ return 0;
+}
+
+#define to_si521xx_clk(_hw) container_of(_hw, struct si_clk, hw)
+
+static int si521xx_diff_prepare(struct clk_hw *hw)
+{
+ struct si_clk *si_clk = to_si521xx_clk(hw);
+ struct si521xx *si = si_clk->si;
+
+ regmap_set_bits(si->regmap, SI521XX_REG_OE(si_clk->reg), si_clk->bit);
+
+ return 0;
+}
+
+static void si521xx_diff_unprepare(struct clk_hw *hw)
+{
+ struct si_clk *si_clk = to_si521xx_clk(hw);
+ struct si521xx *si = si_clk->si;
+
+ regmap_clear_bits(si->regmap, SI521XX_REG_OE(si_clk->reg), si_clk->bit);
+}
+
+static const struct clk_ops si521xx_diff_clk_ops = {
+ .round_rate = si521xx_diff_round_rate,
+ .set_rate = si521xx_diff_set_rate,
+ .recalc_rate = si521xx_diff_recalc_rate,
+ .prepare = si521xx_diff_prepare,
+ .unprepare = si521xx_diff_unprepare,
+};
+
+static int si521xx_get_common_config(struct si521xx *si)
+{
+ struct i2c_client *client = si->client;
+ struct device_node *np = client->dev.of_node;
+ unsigned int amp;
+ int ret;
+
+ /* Set defaults */
+ si->pll_amplitude = SI521XX_REG_DA_AMP(SI521XX_REG_DA_AMP_DEFAULT);
+
+ /* Output clock amplitude */
+ ret = of_property_read_u32(np, "skyworks,out-amplitude-microvolt",
+ &amp);
+ if (!ret) {
+ if (amp < SI521XX_REG_DA_AMP_MIN || amp > SI521XX_REG_DA_AMP_MAX ||
+ amp % SI521XX_REG_DA_AMP_STEP) {
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Invalid skyworks,out-amplitude-microvolt value\n");
+ }
+ si->pll_amplitude = SI521XX_REG_DA_AMP(amp);
+ }
+
+ return 0;
+}
+
+static void si521xx_update_config(struct si521xx *si)
+{
+ /* If amplitude is non-default, update it. */
+ if (si->pll_amplitude == SI521XX_REG_DA_AMP(SI521XX_REG_DA_AMP_DEFAULT))
+ return;
+
+ regmap_update_bits(si->regmap, SI521XX_REG_DA,
+ SI521XX_REG_DA_AMP_MASK, si->pll_amplitude);
+}
+
+static void si521xx_diff_idx_to_reg_bit(const u16 chip_info, const int idx,
+ struct si_clk *clk)
+{
+ unsigned long mask;
+ int oe, b, ctr = 0;
+
+ for (oe = 1; oe <= 2; oe++) {
+ mask = bitrev8(SI521XX_OE_MAP_GET_OE(oe, chip_info));
+ for_each_set_bit(b, &mask, 8) {
+ if (ctr++ != idx)
+ continue;
+ clk->reg = SI521XX_REG_OE(oe);
+ clk->bit = 7 - b;
+ return;
+ }
+ }
+}
+
+static struct clk_hw *
+si521xx_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct si521xx *si = data;
+ unsigned int idx = clkspec->args[0];
+
+ return &si->clk_dif[idx].hw;
+}
+
+static int si521xx_probe(struct i2c_client *client)
+{
+ const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
+ const struct clk_parent_data clk_parent_data = { .index = 0 };
+ struct si521xx *si;
+ unsigned char name[6] = "DIFF0";
+ struct clk_init_data init = {};
+ int i, ret;
+
+ if (!chip_info)
+ return -EINVAL;
+
+ si = devm_kzalloc(&client->dev, sizeof(*si), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, si);
+ si->client = client;
+
+ /* Fetch common configuration from DT (if specified) */
+ ret = si521xx_get_common_config(si);
+ if (ret)
+ return ret;
+
+ si->regmap = devm_regmap_init(&client->dev, NULL, client,
+ &si521xx_regmap_config);
+ if (IS_ERR(si->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(si->regmap),
+ "Failed to allocate register map\n");
+
+ /* Always read back 1 Byte via I2C */
+ ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Register clock */
+ for (i = 0; i < hweight16(chip_info); i++) {
+ memset(&init, 0, sizeof(init));
+ snprintf(name, 6, "DIFF%d", i);
+ init.name = name;
+ init.ops = &si521xx_diff_clk_ops;
+ init.parent_data = &clk_parent_data;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ si->clk_dif[i].hw.init = &init;
+ si->clk_dif[i].si = si;
+
+ si521xx_diff_idx_to_reg_bit(chip_info, i, &si->clk_dif[i]);
+
+ ret = devm_clk_hw_register(&client->dev, &si->clk_dif[i].hw);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&client->dev, si521xx_of_clk_get, si);
+ if (!ret)
+ si521xx_update_config(si);
+
+ return ret;
+}
+
+static int __maybe_unused si521xx_suspend(struct device *dev)
+{
+ struct si521xx *si = dev_get_drvdata(dev);
+
+ regcache_cache_only(si->regmap, true);
+ regcache_mark_dirty(si->regmap);
+
+ return 0;
+}
+
+static int __maybe_unused si521xx_resume(struct device *dev)
+{
+ struct si521xx *si = dev_get_drvdata(dev);
+ int ret;
+
+ regcache_cache_only(si->regmap, false);
+ ret = regcache_sync(si->regmap);
+ if (ret)
+ dev_err(dev, "Failed to restore register map: %d\n", ret);
+ return ret;
+}
+
+static const struct i2c_device_id si521xx_id[] = {
+ { "si52144", .driver_data = SI521XX_OE_MAP(0x5, 0xc0) },
+ { "si52146", .driver_data = SI521XX_OE_MAP(0x15, 0xe0) },
+ { "si52147", .driver_data = SI521XX_OE_MAP(0x17, 0xf8) },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si521xx_id);
+
+static const struct of_device_id clk_si521xx_of_match[] = {
+ { .compatible = "skyworks,si52144", .data = (void *)SI521XX_OE_MAP(0x5, 0xc0) },
+ { .compatible = "skyworks,si52146", .data = (void *)SI521XX_OE_MAP(0x15, 0xe0) },
+ { .compatible = "skyworks,si52147", .data = (void *)SI521XX_OE_MAP(0x15, 0xf8) },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_si521xx_of_match);
+
+static SIMPLE_DEV_PM_OPS(si521xx_pm_ops, si521xx_suspend, si521xx_resume);
+
+static struct i2c_driver si521xx_driver = {
+ .driver = {
+ .name = "clk-si521xx",
+ .pm = &si521xx_pm_ops,
+ .of_match_table = clk_si521xx_of_match,
+ },
+ .probe_new = si521xx_probe,
+ .id_table = si521xx_id,
+};
+module_i2c_driver(si521xx_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Skyworks Si521xx PCIe clock generator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 9e939c98a455..4fcf7056717e 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1641,8 +1641,8 @@ static int si5351_i2c_probe(struct i2c_client *client)
}
}
- ret = of_clk_add_hw_provider(client->dev.of_node, si53351_of_clk_get,
- drvdata);
+ ret = devm_of_clk_add_hw_provider(&client->dev, si53351_of_clk_get,
+ drvdata);
if (ret) {
dev_err(&client->dev, "unable to add clk provider\n");
return ret;
@@ -1651,18 +1651,12 @@ static int si5351_i2c_probe(struct i2c_client *client)
return 0;
}
-static void si5351_i2c_remove(struct i2c_client *client)
-{
- of_clk_del_provider(client->dev.of_node);
-}
-
static struct i2c_driver si5351_driver = {
.driver = {
.name = "si5351",
.of_match_table = of_match_ptr(si5351_dt_ids),
},
.probe_new = si5351_i2c_probe,
- .remove = si5351_i2c_remove,
.id_table = si5351_i2c_ids,
};
module_i2c_driver(si5351_driver);
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 0a6d70c49726..0b834e9efb4b 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -474,8 +474,8 @@ static int si570_probe(struct i2c_client *client)
dev_err(&client->dev, "clock registration failed\n");
return err;
}
- err = of_clk_add_hw_provider(client->dev.of_node, of_clk_hw_simple_get,
- &data->hw);
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
+ &data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
@@ -485,10 +485,8 @@ static int si570_probe(struct i2c_client *client)
if (!of_property_read_u32(client->dev.of_node, "clock-frequency",
&initial_fout)) {
err = clk_set_rate(data->hw.clk, initial_fout);
- if (err) {
- of_clk_del_provider(client->dev.of_node);
+ if (err)
return err;
- }
}
/* Display a message indicating that we've successfully registered */
@@ -498,11 +496,6 @@ static int si570_probe(struct i2c_client *client)
return 0;
}
-static void si570_remove(struct i2c_client *client)
-{
- of_clk_del_provider(client->dev.of_node);
-}
-
static const struct of_device_id clk_si570_of_match[] = {
{ .compatible = "silabs,si570" },
{ .compatible = "silabs,si571" },
@@ -518,7 +511,6 @@ static struct i2c_driver si570_driver = {
.of_match_table = clk_si570_of_match,
},
.probe_new = si570_probe,
- .remove = si570_remove,
.id_table = si570_id,
};
module_i2c_driver(si570_driver);
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
new file mode 100644
index 000000000000..11d22043ddd7
--- /dev/null
+++ b/drivers/clk/clk-sp7021.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
+
+/* speical div_width values for PLLTV/PLLA */
+#define DIV_TV 33
+#define DIV_A 34
+
+/* PLLTV parameters */
+enum {
+ SEL_FRA,
+ SDM_MOD,
+ PH_SEL,
+ NFRA,
+ DIVR,
+ DIVN,
+ DIVM,
+ P_MAX
+};
+
+#define MASK_SEL_FRA GENMASK(1, 1)
+#define MASK_SDM_MOD GENMASK(2, 2)
+#define MASK_PH_SEL GENMASK(4, 4)
+#define MASK_NFRA GENMASK(12, 6)
+#define MASK_DIVR GENMASK(8, 7)
+#define MASK_DIVN GENMASK(7, 0)
+#define MASK_DIVM GENMASK(14, 8)
+
+/* HIWORD_MASK FIELD_PREP */
+#define HWM_FIELD_PREP(mask, value) \
+({ \
+ u64 _m = mask; \
+ (_m << 16) | FIELD_PREP(_m, value); \
+})
+
+struct sp_pll {
+ struct clk_hw hw;
+ void __iomem *reg;
+ spinlock_t lock; /* lock for reg */
+ int div_shift;
+ int div_width;
+ int pd_bit; /* power down bit idx */
+ int bp_bit; /* bypass bit idx */
+ unsigned long brate; /* base rate, TODO: replace brate with muldiv */
+ u32 p[P_MAX]; /* for hold PLLTV/PLLA parameters */
+};
+
+#define to_sp_pll(_hw) container_of(_hw, struct sp_pll, hw)
+
+struct sp_clk_gate_info {
+ u16 reg; /* reg_index_shift */
+ u16 ext_parent; /* parent is extclk */
+};
+
+static const struct sp_clk_gate_info sp_clk_gates[] = {
+ { 0x02 },
+ { 0x05 },
+ { 0x06 },
+ { 0x07 },
+ { 0x09 },
+ { 0x0b, 1 },
+ { 0x0f, 1 },
+ { 0x14 },
+ { 0x15 },
+ { 0x16 },
+ { 0x17 },
+ { 0x18, 1 },
+ { 0x19, 1 },
+ { 0x1a, 1 },
+ { 0x1b, 1 },
+ { 0x1c, 1 },
+ { 0x1d, 1 },
+ { 0x1e },
+ { 0x1f, 1 },
+ { 0x20 },
+ { 0x21 },
+ { 0x22 },
+ { 0x23 },
+ { 0x24 },
+ { 0x25 },
+ { 0x26 },
+ { 0x2a },
+ { 0x2b },
+ { 0x2d },
+ { 0x2e },
+ { 0x30 },
+ { 0x31 },
+ { 0x32 },
+ { 0x33 },
+ { 0x3d },
+ { 0x3e },
+ { 0x3f },
+ { 0x42 },
+ { 0x44 },
+ { 0x4b },
+ { 0x4c },
+ { 0x4d },
+ { 0x4e },
+ { 0x4f },
+ { 0x50 },
+ { 0x55 },
+ { 0x60 },
+ { 0x61 },
+ { 0x6a },
+ { 0x73 },
+ { 0x86 },
+ { 0x8a },
+ { 0x8b },
+ { 0x8d },
+ { 0x8e },
+ { 0x8f },
+ { 0x90 },
+ { 0x92 },
+ { 0x93 },
+ { 0x95 },
+ { 0x96 },
+ { 0x97 },
+ { 0x98 },
+ { 0x99 },
+};
+
+#define _M 1000000UL
+#define F_27M (27 * _M)
+
+/*********************************** PLL_TV **********************************/
+
+/* TODO: set proper FVCO range */
+#define FVCO_MIN (100 * _M)
+#define FVCO_MAX (200 * _M)
+
+#define F_MIN (FVCO_MIN / 8)
+#define F_MAX (FVCO_MAX)
+
+static long plltv_integer_div(struct sp_pll *clk, unsigned long freq)
+{
+ /* valid m values: 27M must be divisible by m */
+ static const u32 m_table[] = {
+ 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32
+ };
+ u32 m, n, r;
+ unsigned long fvco, nf;
+ long ret;
+
+ freq = clamp(freq, F_MIN, F_MAX);
+
+ /* DIVR 0~3 */
+ for (r = 0; r <= 3; r++) {
+ fvco = freq << r;
+ if (fvco <= FVCO_MAX)
+ break;
+ }
+
+ /* DIVM */
+ for (m = 0; m < ARRAY_SIZE(m_table); m++) {
+ nf = fvco * m_table[m];
+ n = nf / F_27M;
+ if ((n * F_27M) == nf)
+ break;
+ }
+ if (m >= ARRAY_SIZE(m_table)) {
+ ret = -EINVAL;
+ goto err_not_found;
+ }
+
+ /* save parameters */
+ clk->p[SEL_FRA] = 0;
+ clk->p[DIVR] = r;
+ clk->p[DIVN] = n;
+ clk->p[DIVM] = m_table[m];
+
+ return freq;
+
+err_not_found:
+ pr_err("%s: %s freq:%lu not found a valid setting\n",
+ __func__, clk_hw_get_name(&clk->hw), freq);
+
+ return ret;
+}
+
+/* parameters for PLLTV fractional divider */
+static const u32 pt[][5] = {
+ /* conventional fractional */
+ {
+ 1, /* factor */
+ 5, /* 5 * p0 (nint) */
+ 1, /* 1 * p0 */
+ F_27M, /* F_27M / p0 */
+ 1, /* p0 / p2 */
+ },
+ /* phase rotation */
+ {
+ 10, /* factor */
+ 54, /* 5.4 * p0 (nint) */
+ 2, /* 0.2 * p0 */
+ F_27M / 10, /* F_27M / p0 */
+ 5, /* p0 / p2 */
+ },
+};
+
+static const u32 sdm_mod_vals[] = { 91, 55 };
+
+static long plltv_fractional_div(struct sp_pll *clk, unsigned long freq)
+{
+ u32 m, r;
+ u32 nint, nfra;
+ u32 df_quotient_min = 210000000;
+ u32 df_remainder_min = 0;
+ unsigned long fvco, nf, f, fout = 0;
+ int sdm, ph;
+
+ freq = clamp(freq, F_MIN, F_MAX);
+
+ /* DIVR 0~3 */
+ for (r = 0; r <= 3; r++) {
+ fvco = freq << r;
+ if (fvco <= FVCO_MAX)
+ break;
+ }
+ f = F_27M >> r;
+
+ /* PH_SEL */
+ for (ph = ARRAY_SIZE(pt) - 1; ph >= 0; ph--) {
+ const u32 *pp = pt[ph];
+
+ /* SDM_MOD */
+ for (sdm = 0; sdm < ARRAY_SIZE(sdm_mod_vals); sdm++) {
+ u32 mod = sdm_mod_vals[sdm];
+
+ /* DIVM 1~32 */
+ for (m = 1; m <= 32; m++) {
+ u32 df; /* diff freq */
+ u32 df_quotient, df_remainder;
+
+ nf = fvco * m;
+ nint = nf / pp[3];
+
+ if (nint < pp[1])
+ continue;
+ if (nint > pp[1])
+ break;
+
+ nfra = (((nf % pp[3]) * mod * pp[4]) + (F_27M / 2)) / F_27M;
+ if (nfra) {
+ u32 df0 = f * (nint + pp[2]) / pp[0];
+ u32 df1 = f * (mod - nfra) / mod / pp[4];
+
+ df = df0 - df1;
+ } else {
+ df = f * (nint) / pp[0];
+ }
+
+ df_quotient = df / m;
+ df_remainder = ((df % m) * 1000) / m;
+
+ if (freq > df_quotient) {
+ df_quotient = freq - df_quotient - 1;
+ df_remainder = 1000 - df_remainder;
+ } else {
+ df_quotient = df_quotient - freq;
+ }
+
+ if (df_quotient_min > df_quotient ||
+ (df_quotient_min == df_quotient &&
+ df_remainder_min > df_remainder)) {
+ /* found a closer freq, save parameters */
+ clk->p[SEL_FRA] = 1;
+ clk->p[SDM_MOD] = sdm;
+ clk->p[PH_SEL] = ph;
+ clk->p[NFRA] = nfra;
+ clk->p[DIVR] = r;
+ clk->p[DIVM] = m;
+
+ fout = df / m;
+ df_quotient_min = df_quotient;
+ df_remainder_min = df_remainder;
+ }
+ }
+ }
+ }
+
+ if (!fout) {
+ pr_err("%s: %s freq:%lu not found a valid setting\n",
+ __func__, clk_hw_get_name(&clk->hw), freq);
+ return -EINVAL;
+ }
+
+ return fout;
+}
+
+static long plltv_div(struct sp_pll *clk, unsigned long freq)
+{
+ if (freq % 100)
+ return plltv_fractional_div(clk, freq);
+
+ return plltv_integer_div(clk, freq);
+}
+
+static int plltv_set_rate(struct sp_pll *clk)
+{
+ unsigned long flags;
+ u32 r0, r1, r2;
+
+ r0 = BIT(clk->bp_bit + 16);
+ r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+
+ r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+
+ r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+
+ spin_lock_irqsave(&clk->lock, flags);
+ writel(r0, clk->reg);
+ writel(r1, clk->reg + 4);
+ writel(r2, clk->reg + 8);
+ spin_unlock_irqrestore(&clk->lock, flags);
+
+ return 0;
+}
+
+/*********************************** PLL_A ***********************************/
+
+/* from Q628_PLLs_REG_setting.xlsx */
+static const struct {
+ u32 rate;
+ u32 regs[5];
+} pa[] = {
+ {
+ .rate = 135475200,
+ .regs = {
+ 0x4801,
+ 0x02df,
+ 0x248f,
+ 0x0211,
+ 0x33e9
+ }
+ },
+ {
+ .rate = 147456000,
+ .regs = {
+ 0x4801,
+ 0x1adf,
+ 0x2490,
+ 0x0349,
+ 0x33e9
+ }
+ },
+ {
+ .rate = 196608000,
+ .regs = {
+ 0x4801,
+ 0x42ef,
+ 0x2495,
+ 0x01c6,
+ 0x33e9
+ }
+ },
+};
+
+static int plla_set_rate(struct sp_pll *clk)
+{
+ const u32 *pp = pa[clk->p[0]].regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&clk->lock, flags);
+ for (i = 0; i < ARRAY_SIZE(pa->regs); i++)
+ writel(0xffff0000 | pp[i], clk->reg + (i * 4));
+ spin_unlock_irqrestore(&clk->lock, flags);
+
+ return 0;
+}
+
+static long plla_round_rate(struct sp_pll *clk, unsigned long rate)
+{
+ int i = ARRAY_SIZE(pa);
+
+ while (--i) {
+ if (rate >= pa[i].rate)
+ break;
+ }
+ clk->p[0] = i;
+
+ return pa[i].rate;
+}
+
+/********************************** SP_PLL ***********************************/
+
+static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
+{
+ u32 fbdiv;
+ u32 max = 1 << clk->div_width;
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, clk->brate);
+ if (fbdiv > max)
+ fbdiv = max;
+
+ return fbdiv;
+}
+
+static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+ long ret;
+
+ if (rate == *prate) {
+ ret = *prate; /* bypass */
+ } else if (clk->div_width == DIV_A) {
+ ret = plla_round_rate(clk, rate);
+ } else if (clk->div_width == DIV_TV) {
+ ret = plltv_div(clk, rate);
+ if (ret < 0)
+ ret = *prate;
+ } else {
+ ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ }
+
+ return ret;
+}
+
+static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+ u32 reg = readl(clk->reg);
+ unsigned long ret;
+
+ if (reg & BIT(clk->bp_bit)) {
+ ret = prate; /* bypass */
+ } else if (clk->div_width == DIV_A) {
+ ret = pa[clk->p[0]].rate;
+ } else if (clk->div_width == DIV_TV) {
+ u32 m, r, reg2;
+
+ r = FIELD_GET(MASK_DIVR, readl(clk->reg + 4));
+ reg2 = readl(clk->reg + 8);
+ m = FIELD_GET(MASK_DIVM, reg2) + 1;
+
+ if (reg & MASK_SEL_FRA) {
+ /* fractional divider */
+ u32 sdm = FIELD_GET(MASK_SDM_MOD, reg);
+ u32 ph = FIELD_GET(MASK_PH_SEL, reg);
+ u32 nfra = FIELD_GET(MASK_NFRA, reg);
+ const u32 *pp = pt[ph];
+ unsigned long r0, r1;
+
+ ret = prate >> r;
+ r0 = ret * (pp[1] + pp[2]) / pp[0];
+ r1 = ret * (sdm_mod_vals[sdm] - nfra) / sdm_mod_vals[sdm] / pp[4];
+ ret = (r0 - r1) / m;
+ } else {
+ /* integer divider */
+ u32 n = FIELD_GET(MASK_DIVN, reg2) + 1;
+
+ ret = (prate / m * n) >> r;
+ }
+ } else {
+ u32 fbdiv = ((reg >> clk->div_shift) & ((1 << clk->div_width) - 1)) + 1;
+
+ ret = clk->brate * fbdiv;
+ }
+
+ return ret;
+}
+
+static int sp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+ unsigned long flags;
+ u32 reg;
+
+ reg = BIT(clk->bp_bit + 16); /* HIWORD_MASK */
+
+ if (rate == prate) {
+ reg |= BIT(clk->bp_bit); /* bypass */
+ } else if (clk->div_width == DIV_A) {
+ return plla_set_rate(clk);
+ } else if (clk->div_width == DIV_TV) {
+ return plltv_set_rate(clk);
+ } else if (clk->div_width) {
+ u32 fbdiv = sp_pll_calc_div(clk, rate);
+ u32 mask = GENMASK(clk->div_shift + clk->div_width - 1, clk->div_shift);
+
+ reg |= mask << 16;
+ reg |= ((fbdiv - 1) << clk->div_shift) & mask;
+ }
+
+ spin_lock_irqsave(&clk->lock, flags);
+ writel(reg, clk->reg);
+ spin_unlock_irqrestore(&clk->lock, flags);
+
+ return 0;
+}
+
+static int sp_pll_enable(struct clk_hw *hw)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+
+ writel(BIT(clk->pd_bit + 16) | BIT(clk->pd_bit), clk->reg);
+
+ return 0;
+}
+
+static void sp_pll_disable(struct clk_hw *hw)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+
+ writel(BIT(clk->pd_bit + 16), clk->reg);
+}
+
+static int sp_pll_is_enabled(struct clk_hw *hw)
+{
+ struct sp_pll *clk = to_sp_pll(hw);
+
+ return readl(clk->reg) & BIT(clk->pd_bit);
+}
+
+static const struct clk_ops sp_pll_ops = {
+ .enable = sp_pll_enable,
+ .disable = sp_pll_disable,
+ .is_enabled = sp_pll_is_enabled,
+ .round_rate = sp_pll_round_rate,
+ .recalc_rate = sp_pll_recalc_rate,
+ .set_rate = sp_pll_set_rate
+};
+
+static const struct clk_ops sp_pll_sub_ops = {
+ .enable = sp_pll_enable,
+ .disable = sp_pll_disable,
+ .is_enabled = sp_pll_is_enabled,
+ .recalc_rate = sp_pll_recalc_rate,
+};
+
+static struct clk_hw *sp_pll_register(struct device *dev, const char *name,
+ const struct clk_parent_data *parent_data,
+ void __iomem *reg, int pd_bit, int bp_bit,
+ unsigned long brate, int shift, int width,
+ unsigned long flags)
+{
+ struct sp_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data initd = {
+ .name = name,
+ .parent_data = parent_data,
+ .ops = (bp_bit >= 0) ? &sp_pll_ops : &sp_pll_sub_ops,
+ .num_parents = 1,
+ .flags = flags,
+ };
+ int ret;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &initd;
+ pll->reg = reg;
+ pll->pd_bit = pd_bit;
+ pll->bp_bit = bp_bit;
+ pll->brate = brate;
+ pll->div_shift = shift;
+ pll->div_width = width;
+ spin_lock_init(&pll->lock);
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return hw;
+}
+
+#define PLLA_CTL (pll_base + 0x1c)
+#define PLLE_CTL (pll_base + 0x30)
+#define PLLF_CTL (pll_base + 0x34)
+#define PLLTV_CTL (pll_base + 0x38)
+
+static int sp7021_clk_probe(struct platform_device *pdev)
+{
+ static const u32 sp_clken[] = {
+ 0x67ef, 0x03ff, 0xff03, 0xfff0, 0x0004, /* G0.1~5 */
+ 0x0000, 0x8000, 0xffff, 0x0040, 0x0000, /* G0.6~10 */
+ };
+ static struct clk_parent_data pd_ext, pd_sys, pd_e;
+ struct device *dev = &pdev->dev;
+ void __iomem *clk_base, *pll_base, *sys_base;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ int i;
+
+ clk_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!clk_base)
+ return -ENXIO;
+ pll_base = devm_platform_ioremap_resource(pdev, 1);
+ if (!pll_base)
+ return -ENXIO;
+ sys_base = devm_platform_ioremap_resource(pdev, 2);
+ if (!sys_base)
+ return -ENXIO;
+
+ /* enable default clks */
+ for (i = 0; i < ARRAY_SIZE(sp_clken); i++)
+ writel((sp_clken[i] << 16) | sp_clken[i], clk_base + i * 4);
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_MAX),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ hws = clk_data->hws;
+ pd_ext.index = 0;
+
+ /* PLLs */
+ hws[PLL_A] = sp_pll_register(dev, "plla", &pd_ext, PLLA_CTL,
+ 11, 12, 27000000, 0, DIV_A, 0);
+ if (IS_ERR(hws[PLL_A]))
+ return PTR_ERR(hws[PLL_A]);
+
+ hws[PLL_E] = sp_pll_register(dev, "plle", &pd_ext, PLLE_CTL,
+ 6, 2, 50000000, 0, 0, 0);
+ if (IS_ERR(hws[PLL_E]))
+ return PTR_ERR(hws[PLL_E]);
+ pd_e.hw = hws[PLL_E];
+ hws[PLL_E_2P5] = sp_pll_register(dev, "plle_2p5", &pd_e, PLLE_CTL,
+ 13, -1, 2500000, 0, 0, 0);
+ if (IS_ERR(hws[PLL_E_2P5]))
+ return PTR_ERR(hws[PLL_E_2P5]);
+ hws[PLL_E_25] = sp_pll_register(dev, "plle_25", &pd_e, PLLE_CTL,
+ 12, -1, 25000000, 0, 0, 0);
+ if (IS_ERR(hws[PLL_E_25]))
+ return PTR_ERR(hws[PLL_E_25]);
+ hws[PLL_E_112P5] = sp_pll_register(dev, "plle_112p5", &pd_e, PLLE_CTL,
+ 11, -1, 112500000, 0, 0, 0);
+ if (IS_ERR(hws[PLL_E_112P5]))
+ return PTR_ERR(hws[PLL_E_112P5]);
+
+ hws[PLL_F] = sp_pll_register(dev, "pllf", &pd_ext, PLLF_CTL,
+ 0, 10, 13500000, 1, 4, 0);
+ if (IS_ERR(hws[PLL_F]))
+ return PTR_ERR(hws[PLL_F]);
+
+ hws[PLL_TV] = sp_pll_register(dev, "plltv", &pd_ext, PLLTV_CTL,
+ 0, 15, 27000000, 0, DIV_TV, 0);
+ if (IS_ERR(hws[PLL_TV]))
+ return PTR_ERR(hws[PLL_TV]);
+ hws[PLL_TV_A] = devm_clk_hw_register_divider(dev, "plltv_a", "plltv", 0,
+ PLLTV_CTL + 4, 5, 1,
+ CLK_DIVIDER_POWER_OF_TWO,
+ &to_sp_pll(hws[PLL_TV])->lock);
+ if (IS_ERR(hws[PLL_TV_A]))
+ return PTR_ERR(hws[PLL_TV_A]);
+
+ /* system clock, should not be disabled */
+ hws[PLL_SYS] = sp_pll_register(dev, "pllsys", &pd_ext, sys_base,
+ 10, 9, 13500000, 0, 4, CLK_IS_CRITICAL);
+ if (IS_ERR(hws[PLL_SYS]))
+ return PTR_ERR(hws[PLL_SYS]);
+ pd_sys.hw = hws[PLL_SYS];
+
+ /* gates */
+ for (i = 0; i < ARRAY_SIZE(sp_clk_gates); i++) {
+ char name[10];
+ u32 j = sp_clk_gates[i].reg;
+ struct clk_parent_data *pd = sp_clk_gates[i].ext_parent ? &pd_ext : &pd_sys;
+
+ sprintf(name, "%02d_0x%02x", i, j);
+ hws[i] = devm_clk_hw_register_gate_parent_data(dev, name, pd, 0,
+ clk_base + (j >> 4) * 4,
+ j & 0x0f,
+ CLK_GATE_HIWORD_MASK,
+ NULL);
+ if (IS_ERR(hws[i]))
+ return PTR_ERR(hws[i]);
+ }
+
+ clk_data->num = CLK_MAX;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+static const struct of_device_id sp7021_clk_dt_ids[] = {
+ { .compatible = "sunplus,sp7021-clkc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sp7021_clk_dt_ids);
+
+static struct platform_driver sp7021_clk_driver = {
+ .probe = sp7021_clk_probe,
+ .driver = {
+ .name = "sp7021-clk",
+ .of_match_table = sp7021_clk_dt_ids,
+ },
+};
+module_platform_driver(sp7021_clk_driver);
+
+MODULE_AUTHOR("Sunplus Technology");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clock driver for Sunplus SP7021 SoC");
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
index 1a701eada0c1..04c18a1d45d3 100644
--- a/drivers/clk/clk-stm32h7.c
+++ b/drivers/clk/clk-stm32h7.c
@@ -667,7 +667,6 @@ struct stm32_fractional_divider {
void __iomem *mreg;
u8 mshift;
u8 mwidth;
- u32 mmask;
void __iomem *nreg;
u8 nshift;
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 01e5a466897f..939779f66867 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2434,15 +2434,13 @@ static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
return ret;
}
-static int stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
+static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev_of_node(dev);
for_each_available_child_of_node(np, child)
of_clk_del_provider(child);
-
- return 0;
}
static struct platform_driver stm32mp1_rcc_clocks_driver = {
@@ -2451,7 +2449,7 @@ static struct platform_driver stm32mp1_rcc_clocks_driver = {
.of_match_table = stm32mp1_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
- .remove = stm32mp1_rcc_clocks_remove,
+ .remove_new = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp1_clocks_init(void)
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index e9737969170e..fa71a57875ce 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -122,9 +122,8 @@
#define VC5_GLOBAL_REGISTER 0x76
#define VC5_GLOBAL_REGISTER_GLOBAL_RESET BIT(5)
-/* PLL/VCO runs between 2.5 GHz and 3.0 GHz */
+/* The minimum VCO frequency is 2.5 GHz. The maximum is variant specific. */
#define VC5_PLL_VCO_MIN 2500000000UL
-#define VC5_PLL_VCO_MAX 3000000000UL
/* VC5 Input mux settings */
#define VC5_MUX_IN_XIN BIT(0)
@@ -150,6 +149,7 @@ enum vc5_model {
IDT_VC5_5P49V5925,
IDT_VC5_5P49V5933,
IDT_VC5_5P49V5935,
+ IDT_VC6_5P49V60,
IDT_VC6_5P49V6901,
IDT_VC6_5P49V6965,
IDT_VC6_5P49V6975,
@@ -161,6 +161,7 @@ struct vc5_chip_info {
const unsigned int clk_fod_cnt;
const unsigned int clk_out_cnt;
const u32 flags;
+ const unsigned long vco_max;
};
struct vc5_driver_data;
@@ -446,13 +447,11 @@ static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- if (rate < VC5_PLL_VCO_MIN)
- rate = VC5_PLL_VCO_MIN;
- if (rate > VC5_PLL_VCO_MAX)
- rate = VC5_PLL_VCO_MAX;
+ rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
div_int = rate / *parent_rate;
@@ -1212,6 +1211,7 @@ static const struct vc5_chip_info idt_5p49v5923_info = {
.clk_fod_cnt = 2,
.clk_out_cnt = 3,
.flags = 0,
+ .vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5925_info = {
@@ -1219,6 +1219,7 @@ static const struct vc5_chip_info idt_5p49v5925_info = {
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = 0,
+ .vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5933_info = {
@@ -1226,6 +1227,7 @@ static const struct vc5_chip_info idt_5p49v5933_info = {
.clk_fod_cnt = 2,
.clk_out_cnt = 3,
.flags = VC5_HAS_INTERNAL_XTAL,
+ .vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5935_info = {
@@ -1233,6 +1235,15 @@ static const struct vc5_chip_info idt_5p49v5935_info = {
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_INTERNAL_XTAL,
+ .vco_max = 3000000000UL,
+};
+
+static const struct vc5_chip_info idt_5p49v60_info = {
+ .model = IDT_VC6_5P49V60,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
+ .vco_max = 2700000000UL,
};
static const struct vc5_chip_info idt_5p49v6901_info = {
@@ -1240,6 +1251,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
+ .vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v6965_info = {
@@ -1247,6 +1259,7 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_BYPASS_SYNC_BIT,
+ .vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v6975_info = {
@@ -1254,6 +1267,7 @@ static const struct vc5_chip_info idt_5p49v6975_info = {
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_BYPASS_SYNC_BIT | VC5_HAS_INTERNAL_XTAL,
+ .vco_max = 3000000000UL,
};
static const struct i2c_device_id vc5_id[] = {
@@ -1261,6 +1275,7 @@ static const struct i2c_device_id vc5_id[] = {
{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
{ "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
+ { "5p49v60", .driver_data = IDT_VC6_5P49V60 },
{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
{ "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
{ "5p49v6975", .driver_data = IDT_VC6_5P49V6975 },
@@ -1273,6 +1288,7 @@ static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5925", .data = &idt_5p49v5925_info },
{ .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info },
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
+ { .compatible = "idt,5p49v60", .data = &idt_5p49v60_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
{ .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
{ .compatible = "idt,5p49v6975", .data = &idt_5p49v6975_info },
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e62552a75f08..27c30a533759 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -244,6 +244,17 @@ static bool clk_core_is_enabled(struct clk_core *core)
}
}
+ /*
+ * This could be called with the enable lock held, or from atomic
+ * context. If the parent isn't enabled already, we can't do
+ * anything here. We can also assume this clock isn't enabled.
+ */
+ if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
+ if (!clk_core_is_enabled(core->parent)) {
+ ret = false;
+ goto done;
+ }
+
ret = core->ops->is_enabled(core->hw);
done:
if (core->rpm_enabled)
@@ -1055,12 +1066,12 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable_rcuidle(core);
+ trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete_rcuidle(core);
+ trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
@@ -1114,12 +1125,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable_rcuidle(core);
+ trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete_rcuidle(core);
+ trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
@@ -1395,6 +1406,8 @@ static int __init clk_disable_unused(void)
return 0;
}
+ pr_info("clk: Disabling unused clocks\n");
+
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -3183,7 +3196,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
seq_puts(s, " enable prepare protect duty hardware\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
@@ -3242,7 +3255,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
seq_putc(s, '{');
clk_prepare_lock();
@@ -4869,8 +4882,8 @@ static struct device_node *get_clk_provider_node(struct device *dev)
np = dev->of_node;
parent_np = dev->parent ? dev->parent->of_node : NULL;
- if (!of_find_property(np, "#clock-cells", NULL))
- if (of_find_property(parent_np, "#clock-cells", NULL))
+ if (!of_property_present(np, "#clock-cells"))
+ if (of_property_present(parent_np, "#clock-cells"))
np = parent_np;
return np;
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index be6f55d37b49..5d0ae1ee72ec 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -6,12 +6,8 @@ obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
obj-y += pll.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
-obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
-obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
obj-y += psc.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
-obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
-obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
endif
diff --git a/drivers/clk/davinci/pll-dm355.c b/drivers/clk/davinci/pll-dm355.c
deleted file mode 100644
index 505aed80be9a..000000000000
--- a/drivers/clk/davinci/pll-dm355.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM355
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm355_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(7, 0),
- .pllm_min = 92,
- .pllm_max = 184,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED |
- PLL_PREDIV_FIXED8 | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm355-psc");
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm355_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(7, 0),
- .pllm_min = 92,
- .pllm_max = 184,
- .flags = PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-
-int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll-dm365.c b/drivers/clk/davinci/pll-dm365.c
deleted file mode 100644
index 2d29712753a3..000000000000
--- a/drivers/clk/davinci/pll-dm365.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM365
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clkdev.h>
-#include <linux/clk/davinci.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-#define OCSEL_OCSRC_ENABLE 0
-
-static const struct davinci_pll_clk_info dm365_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(9, 0),
- .pllm_min = 1,
- .pllm_max = 1023,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_PLLM_2X,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(6, pll1_sysclk6, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(7, pll1_sysclk7, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(8, pll1_sysclk8, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(9, pll1_sysclk9, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-/*
- * This is a bit of a hack to make OCSEL[OCSRC] on DM365 look like OCSEL[OCSRC]
- * on DA850. On DM365, OCSEL[OCSRC] is just an enable/disable bit instead of a
- * multiplexer. By modeling it as a single parent mux clock, the clock code will
- * still do the right thing in this case.
- */
-static const char * const dm365_pll_obsclk_parent_names[] = {
- "oscin",
-};
-
-static u32 dm365_pll_obsclk_table[] = {
- OCSEL_OCSRC_ENABLE,
-};
-
-static const struct davinci_pll_obsclk_info dm365_pll1_obsclk_info = {
- .name = "pll1_obsclk",
- .parent_names = dm365_pll_obsclk_parent_names,
- .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
- .table = dm365_pll_obsclk_table,
- .ocsrc_mask = BIT(4),
-};
-
-int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk7, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
- clk_register_clkdev(clk, "pll1_sysclk8", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- davinci_pll_obsclk_register(dev, &dm365_pll1_obsclk_info, base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm365_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(9, 0),
- .pllm_min = 1,
- .pllm_max = 1023,
- .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV | PLL_POSTDIV_ALWAYS_ENABLED |
- PLL_PLLM_2X,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll2_sysclk2, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll2_sysclk3, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll2_sysclk4, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(5, pll2_sysclk5, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-static const struct davinci_pll_obsclk_info dm365_pll2_obsclk_info = {
- .name = "pll2_obsclk",
- .parent_names = dm365_pll_obsclk_parent_names,
- .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
- .table = dm365_pll_obsclk_table,
- .ocsrc_mask = BIT(4),
-};
-
-int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk3, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll2_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk5, base);
-
- davinci_pll_auxclk_register(dev, "pll2_auxclk", base);
-
- davinci_pll_obsclk_register(dev, &dm365_pll2_obsclk_info, base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index f862f5e2b3fc..87bdf8879045 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -882,14 +882,6 @@ static const struct platform_device_id davinci_pll_id_table[] = {
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
- { .name = "dm355-pll1", .driver_data = (kernel_ulong_t)dm355_pll1_init },
- { .name = "dm355-pll2", .driver_data = (kernel_ulong_t)dm355_pll2_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
- { .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
- { .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 1773277bc690..20bfcec2d3b5 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -122,13 +122,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
/* Platform-specific callbacks */
-#ifdef CONFIG_ARCH_DAVINCI_DA850
int da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
void of_da850_pll0_init(struct device_node *node);
int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-dm355.c b/drivers/clk/davinci/psc-dm355.c
deleted file mode 100644
index ddd250107c4e..000000000000
--- a/drivers/clk/davinci/psc-dm355.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM355
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "dm6441-mmc.1");
-LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
-LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "dm6441-mmc.0");
-LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
-
-static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
- LPSC(0, 0, vpss_master, pll1_sysclk4, vpss_master_clkdev, 0),
- LPSC(1, 0, vpss_slave, pll1_sysclk4, vpss_slave_clkdev, 0),
- LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
- LPSC(6, 0, spi1, pll1_sysclk2, spi1_clkdev, 0),
- LPSC(7, 0, mmcsd1, pll1_sysclk2, mmcsd1_clkdev, 0),
- LPSC(8, 0, asp1, pll1_sysclk2, mcbsp1_clkdev, 0),
- LPSC(9, 0, usb, pll1_sysclk2, usb_clkdev, 0),
- LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
- LPSC(11, 0, spi2, pll1_sysclk2, spi2_clkdev, 0),
- LPSC(12, 0, rto, pll1_auxclk, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk2, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd0, pll1_sysclk2, mmcsd0_clkdev, 0),
- LPSC(17, 0, asp0, pll1_sysclk2, mcbsp0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
- LPSC(21, 0, uart2, pll1_sysclk2, uart2_clkdev, 0),
- LPSC(22, 0, spi0, pll1_sysclk2, spi0_clkdev, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk2, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(40, 0, mjcp, pll1_sysclk1, NULL, 0),
- LPSC(41, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
- { }
-};
-
-int dm355_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm355_psc_info, 42, base);
-}
-
-static struct clk_bulk_data dm355_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm355_psc_init_data = {
- .parent_clks = dm355_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm355_psc_parent_clks),
- .psc_init = &dm355_psc_init,
-};
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
deleted file mode 100644
index c75424f4ea3b..000000000000
--- a/drivers/clk/davinci/psc-dm365.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM365
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
-LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-LPSC_CLKDEV1(spi3_clkdev, NULL, "spi_davinci.3");
-LPSC_CLKDEV1(spi4_clkdev, NULL, "spi_davinci.4");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(voice_codec_clkdev, NULL, "davinci_voicecodec");
-LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-
-static const struct davinci_lpsc_clk_info dm365_psc_info[] = {
- LPSC(1, 0, vpss_slave, pll1_sysclk5, vpss_slave_clkdev, 0),
- LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
- LPSC(6, 0, spi1, pll1_sysclk4, spi1_clkdev, 0),
- LPSC(7, 0, mmcsd1, pll1_sysclk4, mmcsd1_clkdev, 0),
- LPSC(8, 0, asp0, pll1_sysclk4, asp0_clkdev, 0),
- LPSC(9, 0, usb, pll1_auxclk, usb_clkdev, 0),
- LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
- LPSC(11, 0, spi2, pll1_sysclk4, spi2_clkdev, 0),
- LPSC(12, 0, rto, pll1_sysclk4, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk4, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd0, pll1_sysclk8, mmcsd0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_sysclk4, uart1_clkdev, 0),
- LPSC(22, 0, spi0, pll1_sysclk4, spi0_clkdev, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk4, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll2_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(38, 0, spi3, pll1_sysclk4, spi3_clkdev, 0),
- LPSC(39, 0, spi4, pll1_auxclk, spi4_clkdev, 0),
- LPSC(40, 0, emac, pll1_sysclk4, emac_clkdev, 0),
- /*
- * The TRM (ARM Subsystem User's Guide) shows two clocks input into
- * voice codec module (PLL2 SYSCLK4 with a DIV2 and PLL1 SYSCLK4). Its
- * not fully clear from documentation which clock should be considered
- * as parent for PSC. The clock chosen here is to maintain
- * compatibility with existing code in arch/arm/mach-davinci/dm365.c
- */
- LPSC(44, 0, voice_codec, pll2_sysclk4, voice_codec_clkdev, 0),
- /*
- * Its not fully clear from TRM (ARM Subsystem User's Guide) as to what
- * the parent of VPSS DAC LPSC should actually be. PLL1 SYSCLK3 feeds
- * into HDVICP and MJCP. The clock chosen here is to remain compatible
- * with code existing in arch/arm/mach-davinci/dm365.c
- */
- LPSC(46, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
- LPSC(47, 0, vpss_master, pll1_sysclk5, vpss_master_clkdev, 0),
- LPSC(50, 0, mjcp, pll1_sysclk3, NULL, 0),
- { }
-};
-
-int dm365_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm365_psc_info, 52, base);
-}
-
-static struct clk_bulk_data dm365_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_sysclk5" },
- { .id = "pll1_sysclk8" },
- { .id = "pll2_sysclk2" },
- { .id = "pll2_sysclk4" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm365_psc_init_data = {
- .parent_clks = dm365_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm365_psc_parent_clks),
- .psc_init = &dm365_psc_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 42a59dbd49c8..cd85d9f158b0 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -511,12 +511,6 @@ static const struct platform_device_id davinci_psc_id_table[] = {
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
- { .name = "dm355-psc", .driver_data = (kernel_ulong_t)&dm355_psc_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
- { .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 5e382b675518..bd23f6fd56df 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -104,11 +104,4 @@ extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-extern const struct davinci_psc_init_data dm355_psc_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
-extern const struct davinci_psc_init_data dm365_psc_init_data;
-#endif
-
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index ad0c7f350cf0..b871872d9960 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -162,13 +162,12 @@ static int hi3519_clk_probe(struct platform_device *pdev)
return 0;
}
-static int hi3519_clk_remove(struct platform_device *pdev)
+static void hi3519_clk_remove(struct platform_device *pdev)
{
struct hi3519_crg_data *crg = platform_get_drvdata(pdev);
hisi_reset_exit(crg->rstc);
hi3519_clk_unregister(pdev);
- return 0;
}
@@ -180,7 +179,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
static struct platform_driver hi3519_clk_driver = {
.probe = hi3519_clk_probe,
- .remove = hi3519_clk_remove,
+ .remove_new = hi3519_clk_remove,
.driver = {
.name = "hi3519-clk",
.of_match_table = hi3519_clk_match_table,
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index 9ea1a80acbe8..ce4028102bc2 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -810,18 +810,17 @@ static int hi3559av100_crg_probe(struct platform_device *pdev)
return 0;
}
-static int hi3559av100_crg_remove(struct platform_device *pdev)
+static void hi3559av100_crg_remove(struct platform_device *pdev)
{
struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
hisi_reset_exit(crg->rstc);
crg->funcs->unregister_clks(pdev);
- return 0;
}
static struct platform_driver hi3559av100_crg_driver = {
.probe = hi3559av100_crg_probe,
- .remove = hi3559av100_crg_remove,
+ .remove_new = hi3559av100_crg_remove,
.driver = {
.name = "hi3559av100-clock",
.of_match_table = hi3559av100_crg_match_table,
@@ -841,5 +840,4 @@ static void __exit hi3559av100_crg_exit(void)
module_exit(hi3559av100_crg_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver");
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index 5d4e61c7a429..fe1bd3e3f988 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -284,18 +284,17 @@ static int hi3516cv300_crg_probe(struct platform_device *pdev)
return 0;
}
-static int hi3516cv300_crg_remove(struct platform_device *pdev)
+static void hi3516cv300_crg_remove(struct platform_device *pdev)
{
struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
hisi_reset_exit(crg->rstc);
crg->funcs->unregister_clks(pdev);
- return 0;
}
static struct platform_driver hi3516cv300_crg_driver = {
.probe = hi3516cv300_crg_probe,
- .remove = hi3516cv300_crg_remove,
+ .remove_new = hi3516cv300_crg_remove,
.driver = {
.name = "hi3516cv300-crg",
.of_match_table = hi3516cv300_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index 08a19ba776e6..a0b16be1e25d 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -367,18 +367,17 @@ static int hi3798cv200_crg_probe(struct platform_device *pdev)
return 0;
}
-static int hi3798cv200_crg_remove(struct platform_device *pdev)
+static void hi3798cv200_crg_remove(struct platform_device *pdev)
{
struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
hisi_reset_exit(crg->rstc);
crg->funcs->unregister_clks(pdev);
- return 0;
}
static struct platform_driver hi3798cv200_crg_driver = {
.probe = hi3798cv200_crg_probe,
- .remove = hi3798cv200_crg_remove,
+ .remove_new = hi3798cv200_crg_remove,
.driver = {
.name = "hi3798cv200-crg",
.of_match_table = hi3798cv200_crg_match_table,
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 25785ec9c276..f6b82e0b9703 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -115,7 +115,7 @@ config CLK_IMX93
config CLK_IMXRT1050
tristate "IMXRT1050 CCM Clock Driver"
- depends on SOC_IMXRT
+ depends on SOC_IMXRT || COMPILE_TEST
select MXC_CLK
help
Build the driver for i.MXRT1050 CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index e8aacb0ee6ac..ae9d84ef046b 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -22,11 +22,12 @@ mxc-clk-objs += clk-pllv3.o
mxc-clk-objs += clk-pllv4.o
mxc-clk-objs += clk-pll14xx.o
mxc-clk-objs += clk-sscg-pll.o
+mxc-clk-objs += clk-gpr-mux.o
obj-$(CONFIG_MXC_CLK) += mxc-clk.o
obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
-obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
+obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o clk-imx8mp-audiomix.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX93) += clk-imx93.o
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 4eedd45dbaa8..e208ddc51133 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -19,10 +19,8 @@
#define PCG_CGC_SHIFT 30
#define PCG_FRAC_SHIFT 3
#define PCG_FRAC_WIDTH 1
-#define PCG_FRAC_MASK BIT(3)
#define PCG_PCD_SHIFT 0
#define PCG_PCD_WIDTH 3
-#define PCG_PCD_MASK 0x7
#define SW_RST BIT(28)
@@ -102,10 +100,8 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
fd->reg = reg;
fd->mshift = PCG_FRAC_SHIFT;
fd->mwidth = PCG_FRAC_WIDTH;
- fd->mmask = PCG_FRAC_MASK;
fd->nshift = PCG_PCD_SHIFT;
fd->nwidth = PCG_PCD_WIDTH;
- fd->nmask = PCG_PCD_MASK;
fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED;
if (has_swrst)
fd->lock = &imx_ccm_lock;
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 74a66b0203e4..81164bdcd6cc 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -222,7 +222,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ro_ops, div_hw,
&clk_divider_ro_ops, NULL, NULL, flags);
- } else {
+ } else if (!mcore_booted) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
goto fail;
@@ -238,6 +238,12 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
&imx93_clk_composite_divider_ops, gate_hw,
&imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT);
+ } else {
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, &imx93_clk_composite_mux_ops, div_hw,
+ &imx93_clk_composite_divider_ops, NULL,
+ &imx93_clk_composite_gate_ops,
+ flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index a2aaa14fc1ae..c54f9999da04 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -15,6 +15,7 @@
#include "clk.h"
#define PLL_CTRL 0x0
+#define HW_CTRL_SEL BIT(16)
#define CLKMUX_BYPASS BIT(2)
#define CLKMUX_EN BIT(1)
#define POWERUP_MASK BIT(0)
@@ -52,26 +53,40 @@
.odiv = (_odiv), \
}
+#define PLL_FRACN_GP_INTEGER(_rate, _mfi, _rdiv, _odiv) \
+ { \
+ .rate = (_rate), \
+ .mfi = (_mfi), \
+ .mfn = 0, \
+ .mfd = 0, \
+ .rdiv = (_rdiv), \
+ .odiv = (_odiv), \
+ }
+
struct clk_fracn_gppll {
struct clk_hw hw;
void __iomem *base;
const struct imx_fracn_gppll_rate_table *rate_table;
int rate_count;
+ u32 flags;
};
/*
- * Fvco = Fref * (MFI + MFN / MFD)
- * Fout = Fvco / (rdiv * odiv)
+ * Fvco = (Fref / rdiv) * (MFI + MFN / MFD)
+ * Fout = Fvco / odiv
+ * The (Fref / rdiv) should be in range 20MHz to 40MHz
+ * The Fvco should be in range 2.5Ghz to 5Ghz
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
- PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
+ PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
- PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
- PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
+ PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
+ PLL_FRACN_GP(498000000U, 166, 0, 1, 0, 8),
PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
- PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
- PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
+ PLL_FRACN_GP(400000000U, 200, 0, 1, 0, 12),
+ PLL_FRACN_GP(393216000U, 163, 84, 100, 0, 10),
+ PLL_FRACN_GP(300000000U, 150, 0, 1, 0, 12)
};
struct imx_fracn_gppll_clk imx_fracn_gppll = {
@@ -80,6 +95,24 @@ struct imx_fracn_gppll_clk imx_fracn_gppll = {
};
EXPORT_SYMBOL_GPL(imx_fracn_gppll);
+/*
+ * Fvco = (Fref / rdiv) * MFI
+ * Fout = Fvco / odiv
+ * The (Fref / rdiv) should be in range 20MHz to 40MHz
+ * The Fvco should be in range 2.5Ghz to 5Ghz
+ */
+static const struct imx_fracn_gppll_rate_table int_tbl[] = {
+ PLL_FRACN_GP_INTEGER(1700000000U, 141, 1, 2),
+ PLL_FRACN_GP_INTEGER(1400000000U, 175, 1, 3),
+ PLL_FRACN_GP_INTEGER(900000000U, 150, 1, 4),
+};
+
+struct imx_fracn_gppll_clk imx_fracn_gppll_integer = {
+ .rate_table = int_tbl,
+ .rate_count = ARRAY_SIZE(int_tbl),
+};
+EXPORT_SYMBOL_GPL(imx_fracn_gppll_integer);
+
static inline struct clk_fracn_gppll *to_clk_fracn_gppll(struct clk_hw *hw)
{
return container_of(hw, struct clk_fracn_gppll, hw);
@@ -166,9 +199,15 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
break;
}
- /* Fvco = Fref * (MFI + MFN / MFD) */
- fvco = fvco * mfi * mfd + fvco * mfn;
- do_div(fvco, mfd * rdiv * odiv);
+ if (pll->flags & CLK_FRACN_GPPLL_INTEGER) {
+ /* Fvco = (Fref / rdiv) * MFI */
+ fvco = fvco * mfi;
+ do_div(fvco, rdiv * odiv);
+ } else {
+ /* Fvco = (Fref / rdiv) * (MFI + MFN / MFD) */
+ fvco = fvco * mfi * mfd + fvco * mfn;
+ do_div(fvco, mfd * rdiv * odiv);
+ }
return (unsigned long)fvco;
}
@@ -191,6 +230,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
rate = imx_get_pll_settings(pll, drate);
+ /* Hardware control select disable. PLL is control by register */
+ tmp = readl_relaxed(pll->base + PLL_CTRL);
+ tmp &= ~HW_CTRL_SEL;
+ writel_relaxed(tmp, pll->base + PLL_CTRL);
+
/* Disable output */
tmp = readl_relaxed(pll->base + PLL_CTRL);
tmp &= ~CLKMUX_EN;
@@ -207,8 +251,10 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
FIELD_PREP(PLL_MFI_MASK, rate->mfi);
writel_relaxed(pll_div, pll->base + PLL_DIV);
- writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
- writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN) {
+ writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
+ writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+ }
/* Wait for 5us according to fracn mode pll doc */
udelay(5);
@@ -292,8 +338,10 @@ static const struct clk_ops clk_fracn_gppll_ops = {
.set_rate = clk_fracn_gppll_set_rate,
};
-struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, void __iomem *base,
- const struct imx_fracn_gppll_clk *pll_clk)
+static struct clk_hw *_imx_clk_fracn_gppll(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_fracn_gppll_clk *pll_clk,
+ u32 pll_flags)
{
struct clk_fracn_gppll *pll;
struct clk_hw *hw;
@@ -314,6 +362,7 @@ struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, vo
pll->hw.init = &init;
pll->rate_table = pll_clk->rate_table;
pll->rate_count = pll_clk->rate_count;
+ pll->flags = pll_flags;
hw = &pll->hw;
@@ -326,4 +375,18 @@ struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, vo
return hw;
}
+
+struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, void __iomem *base,
+ const struct imx_fracn_gppll_clk *pll_clk)
+{
+ return _imx_clk_fracn_gppll(name, parent_name, base, pll_clk, CLK_FRACN_GPPLL_FRACN);
+}
EXPORT_SYMBOL_GPL(imx_clk_fracn_gppll);
+
+struct clk_hw *imx_clk_fracn_gppll_integer(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_fracn_gppll_clk *pll_clk)
+{
+ return _imx_clk_fracn_gppll(name, parent_name, base, pll_clk, CLK_FRACN_GPPLL_INTEGER);
+}
+EXPORT_SYMBOL_GPL(imx_clk_fracn_gppll_integer);
diff --git a/drivers/clk/imx/clk-gpr-mux.c b/drivers/clk/imx/clk-gpr-mux.c
new file mode 100644
index 000000000000..0b5a97698b47
--- /dev/null
+++ b/drivers/clk/imx/clk-gpr-mux.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ */
+
+#define pr_fmt(fmt) "imx:clk-gpr-mux: " fmt
+
+#include <linux/module.h>
+
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk.h"
+
+struct imx_clk_gpr {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 mask;
+ u32 reg;
+ const u32 *mux_table;
+};
+
+static struct imx_clk_gpr *to_imx_clk_gpr(struct clk_hw *hw)
+{
+ return container_of(hw, struct imx_clk_gpr, hw);
+}
+
+static u8 imx_clk_gpr_mux_get_parent(struct clk_hw *hw)
+{
+ struct imx_clk_gpr *priv = to_imx_clk_gpr(hw);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, priv->reg, &val);
+ if (ret)
+ goto get_parent_err;
+
+ val &= priv->mask;
+
+ ret = clk_mux_val_to_index(hw, priv->mux_table, 0, val);
+ if (ret < 0)
+ goto get_parent_err;
+
+ return ret;
+
+get_parent_err:
+ pr_err("%s: failed to get parent (%pe)\n",
+ clk_hw_get_name(hw), ERR_PTR(ret));
+
+ /* return some realistic non negative value. Potentially we could
+ * give index to some dummy error parent.
+ */
+ return 0;
+}
+
+static int imx_clk_gpr_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct imx_clk_gpr *priv = to_imx_clk_gpr(hw);
+ unsigned int val = clk_mux_index_to_val(priv->mux_table, 0, index);
+
+ return regmap_update_bits(priv->regmap, priv->reg, priv->mask, val);
+}
+
+static int imx_clk_gpr_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static const struct clk_ops imx_clk_gpr_mux_ops = {
+ .get_parent = imx_clk_gpr_mux_get_parent,
+ .set_parent = imx_clk_gpr_mux_set_parent,
+ .determine_rate = imx_clk_gpr_mux_determine_rate,
+};
+
+struct clk_hw *imx_clk_gpr_mux(const char *name, const char *compatible,
+ u32 reg, const char **parent_names,
+ u8 num_parents, const u32 *mux_table, u32 mask)
+{
+ struct clk_init_data init = { };
+ struct imx_clk_gpr *priv;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_compatible(compatible);
+ if (IS_ERR(regmap)) {
+ pr_err("failed to find %s regmap\n", compatible);
+ return ERR_CAST(regmap);
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &imx_clk_gpr_mux_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ priv->hw.init = &init;
+ priv->regmap = regmap;
+ priv->mux_table = mux_table;
+ priv->reg = reg;
+ priv->mask = mask;
+
+ hw = &priv->hw;
+ ret = clk_hw_register(NULL, &priv->hw);
+ if (ret) {
+ kfree(priv);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
index 66192fe0a898..cc013b343e62 100644
--- a/drivers/clk/imx/clk-imx25.c
+++ b/drivers/clk/imx/clk-imx25.c
@@ -218,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
*/
clk_set_parent(clk[cko_sel], clk[ipg]);
- imx_register_uart_clocks(6);
+ imx_register_uart_clocks();
return 0;
}
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 56a5fc402b10..5d177125728d 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -165,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
- imx_register_uart_clocks(7);
+ imx_register_uart_clocks();
imx_print_silicon_rev("i.MX27", mx27_revision());
}
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index 0fe5ac210156..7dcbaea3fea3 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -235,7 +235,7 @@ static void __init _mx35_clocks_init(void)
*/
clk_prepare_enable(clk[scc_gate]);
- imx_register_uart_clocks(4);
+ imx_register_uart_clocks();
imx_print_silicon_rev("i.MX35", mx35_revision());
}
diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
index e4493846454d..b82044911603 100644
--- a/drivers/clk/imx/clk-imx5.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -358,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
- imx_register_uart_clocks(5);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
@@ -464,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
val |= 1 << 23;
writel(val, MXC_CCM_CLPCR);
- imx_register_uart_clocks(3);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
@@ -609,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
- imx_register_uart_clocks(5);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index de36f58d551c..bf4c1d9c9928 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -115,6 +116,10 @@ static struct clk_div_table video_div_table[] = {
{ /* sentinel */ }
};
+static const char * enet_ref_sels[] = { "enet_ref", "enet_ref_pad", };
+static const u32 enet_ref_sels_table[] = { IMX6Q_GPR1_ENET_CLK_SEL_ANATOP, IMX6Q_GPR1_ENET_CLK_SEL_PAD };
+static const u32 enet_ref_sels_table_mask = IMX6Q_GPR1_ENET_CLK_SEL_ANATOP;
+
static unsigned int share_count_esai;
static unsigned int share_count_asrc;
static unsigned int share_count_ssi1;
@@ -908,6 +913,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
hws[IMX6QDL_CLK_GPT_3M] = hws[IMX6QDL_CLK_GPT_IPG_PER];
+ hws[IMX6QDL_CLK_ENET_REF_PAD] = imx6q_obtain_fixed_clk_hw(ccm_node, "enet_ref_pad", 0);
+
+ hws[IMX6QDL_CLK_ENET_REF_SEL] = imx_clk_gpr_mux("enet_ref_sel", "fsl,imx6q-iomuxc-gpr",
+ IOMUXC_GPR1, enet_ref_sels, ARRAY_SIZE(enet_ref_sels),
+ enet_ref_sels_table, enet_ref_sels_table_mask);
+
imx_check_clk_hws(hws, IMX6QDL_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
@@ -974,6 +985,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
}
- imx_register_uart_clocks(2);
+ clk_set_parent(hws[IMX6QDL_CLK_ENET_REF_SEL]->clk, hws[IMX6QDL_CLK_ENET_REF]->clk);
+
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 277365970320..47b8667cfa3f 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -440,6 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
hws[IMX6SL_CLK_PLL2_PFD2]->clk);
- imx_register_uart_clocks(2);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 1c9351649eab..2fa70bf35e45 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -340,7 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
- imx_register_uart_clocks(5);
+ imx_register_uart_clocks();
/* Lower the AHB clock rate before changing the clock source. */
clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index b378531240e6..7cf86707bc39 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -548,6 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
- imx_register_uart_clocks(2);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 67a7a77ca540..e3696a88b5a3 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -94,6 +95,19 @@ static const struct clk_div_table video_div_table[] = {
{ }
};
+static const char * enet1_ref_sels[] = { "enet1_ref_125m", "enet1_ref_pad", "dummy", "dummy"};
+static const u32 enet1_ref_sels_table[] = { IMX6UL_GPR1_ENET1_TX_CLK_DIR,
+ IMX6UL_GPR1_ENET1_CLK_SEL, 0,
+ IMX6UL_GPR1_ENET1_TX_CLK_DIR | IMX6UL_GPR1_ENET1_CLK_SEL };
+static const u32 enet1_ref_sels_table_mask = IMX6UL_GPR1_ENET1_TX_CLK_DIR |
+ IMX6UL_GPR1_ENET1_CLK_SEL;
+static const char * enet2_ref_sels[] = { "enet2_ref_125m", "enet2_ref_pad", "dummy", "dummy"};
+static const u32 enet2_ref_sels_table[] = { IMX6UL_GPR1_ENET2_TX_CLK_DIR,
+ IMX6UL_GPR1_ENET2_CLK_SEL, 0,
+ IMX6UL_GPR1_ENET2_TX_CLK_DIR | IMX6UL_GPR1_ENET2_CLK_SEL };
+static const u32 enet2_ref_sels_table_mask = IMX6UL_GPR1_ENET2_TX_CLK_DIR |
+ IMX6UL_GPR1_ENET2_CLK_SEL;
+
static u32 share_count_asrc;
static u32 share_count_audio;
static u32 share_count_sai1;
@@ -176,7 +190,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
hws[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
hws[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
- hws[IMX6UL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
+ hws[IMX6UL_CLK_PLL6_ENET] = imx_clk_hw_fixed_factor("pll6_enet", "pll6_bypass", 1, 1);
hws[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
/*
@@ -205,12 +219,13 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
hws[IMX6UL_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
- hws[IMX6UL_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
+ hws[IMX6UL_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet1_ref", "pll6_enet", 0,
base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
hws[IMX6UL_CLK_ENET2_REF] = clk_hw_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0,
base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
- hws[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_hw_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20);
+ hws[IMX6UL_CLK_ENET1_REF_125M] = imx_clk_hw_gate("enet1_ref_125m", "enet1_ref", base + 0xe0, 13);
+ hws[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_hw_gate("enet2_ref_125m", "enet2_ref", base + 0xe0, 20);
hws[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_hw_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
hws[IMX6UL_CLK_ENET_PTP] = imx_clk_hw_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
@@ -471,6 +486,17 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* mask handshake of mmdc */
imx_mmdc_mask_handshake(base, 0);
+ hws[IMX6UL_CLK_ENET1_REF_PAD] = imx_obtain_fixed_of_clock(ccm_node, "enet1_ref_pad", 0);
+
+ hws[IMX6UL_CLK_ENET1_REF_SEL] = imx_clk_gpr_mux("enet1_ref_sel", "fsl,imx6ul-iomuxc-gpr",
+ IOMUXC_GPR1, enet1_ref_sels, ARRAY_SIZE(enet1_ref_sels),
+ enet1_ref_sels_table, enet1_ref_sels_table_mask);
+ hws[IMX6UL_CLK_ENET2_REF_PAD] = imx_obtain_fixed_of_clock(ccm_node, "enet2_ref_pad", 0);
+
+ hws[IMX6UL_CLK_ENET2_REF_SEL] = imx_clk_gpr_mux("enet2_ref_sel", "fsl,imx6ul-iomuxc-gpr",
+ IOMUXC_GPR1, enet2_ref_sels, ARRAY_SIZE(enet2_ref_sels),
+ enet2_ref_sels_table, enet2_ref_sels_table_mask);
+
imx_check_clk_hws(hws, IMX6UL_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
@@ -515,6 +541,9 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6ULL_CLK_EPDC_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_PFD2]->clk);
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
+
+ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
}
CLK_OF_DECLARE(imx6ul, "fsl,imx6ul-ccm", imx6ul_clocks_init);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index d681b6c4b29a..2b77d1fc7bb9 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -882,7 +882,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
- imx_register_uart_clocks(7);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 208a0ab80d5e..f4a48a42637f 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -176,7 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
- imx_register_uart_clocks(2);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
@@ -223,7 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
- imx_register_uart_clocks(7);
+ imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index cb44e8148e53..075f643e3f35 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -468,7 +468,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
hws[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
hws[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
- hws[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+ hws[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite_flags("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500, CLK_SET_RATE_PARENT);
hws[IMX8MM_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
hws[IMX8MM_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
hws[IMX8MM_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
@@ -609,7 +609,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
goto unregister_hws;
}
- imx_register_uart_clocks(4);
+ imx_register_uart_clocks();
return 0;
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index af256ade554f..4b23a4648600 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -470,7 +470,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
hws[IMX8MN_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
- hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
+ hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite_flags("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500, CLK_SET_RATE_PARENT);
hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
hws[IMX8MN_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
hws[IMX8MN_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
@@ -602,7 +602,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
goto unregister_hws;
}
- imx_register_uart_clocks(4);
+ imx_register_uart_clocks();
return 0;
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
new file mode 100644
index 000000000000..e4300df88f1a
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for i.MX8M Plus Audio BLK_CTRL
+ *
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/imx8mp-clock.h>
+
+#include "clk.h"
+
+#define CLKEN0 0x000
+#define CLKEN1 0x004
+#define SAI_MCLK_SEL(n) (0x300 + 4 * (n)) /* n in 0..5 */
+#define PDM_SEL 0x318
+#define SAI_PLL_GNRL_CTL 0x400
+
+#define SAIn_MCLK1_PARENT(n) \
+static const struct clk_parent_data \
+clk_imx8mp_audiomix_sai##n##_mclk1_parents[] = { \
+ { \
+ .fw_name = "sai"__stringify(n), \
+ .name = "sai"__stringify(n) \
+ }, { \
+ .fw_name = "sai"__stringify(n)"_mclk", \
+ .name = "sai"__stringify(n)"_mclk" \
+ }, \
+}
+
+SAIn_MCLK1_PARENT(1);
+SAIn_MCLK1_PARENT(2);
+SAIn_MCLK1_PARENT(3);
+SAIn_MCLK1_PARENT(5);
+SAIn_MCLK1_PARENT(6);
+SAIn_MCLK1_PARENT(7);
+
+static const struct clk_parent_data clk_imx8mp_audiomix_sai_mclk2_parents[] = {
+ { .fw_name = "sai1", .name = "sai1" },
+ { .fw_name = "sai2", .name = "sai2" },
+ { .fw_name = "sai3", .name = "sai3" },
+ { .name = "dummy" },
+ { .fw_name = "sai5", .name = "sai5" },
+ { .fw_name = "sai6", .name = "sai6" },
+ { .fw_name = "sai7", .name = "sai7" },
+ { .fw_name = "sai1_mclk", .name = "sai1_mclk" },
+ { .fw_name = "sai2_mclk", .name = "sai2_mclk" },
+ { .fw_name = "sai3_mclk", .name = "sai3_mclk" },
+ { .name = "dummy" },
+ { .fw_name = "sai5_mclk", .name = "sai5_mclk" },
+ { .fw_name = "sai6_mclk", .name = "sai6_mclk" },
+ { .fw_name = "sai7_mclk", .name = "sai7_mclk" },
+ { .fw_name = "spdif_extclk", .name = "spdif_extclk" },
+ { .name = "dummy" },
+};
+
+static const struct clk_parent_data clk_imx8mp_audiomix_pdm_parents[] = {
+ { .fw_name = "pdm", .name = "pdm" },
+ { .name = "sai_pll_out_div2" },
+ { .fw_name = "sai1_mclk", .name = "sai1_mclk" },
+ { .name = "dummy" },
+};
+
+
+static const struct clk_parent_data clk_imx8mp_audiomix_pll_parents[] = {
+ { .fw_name = "osc_24m", .name = "osc_24m" },
+ { .name = "dummy" },
+ { .name = "dummy" },
+ { .name = "dummy" },
+};
+
+static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
+ { .fw_name = "sai_pll", .name = "sai_pll" },
+ { .fw_name = "sai_pll_ref_sel", .name = "sai_pll_ref_sel" },
+};
+
+#define CLK_GATE(gname, cname) \
+ { \
+ gname"_cg", \
+ IMX8MP_CLK_AUDIOMIX_##cname, \
+ { .fw_name = "ahb", .name = "ahb" }, NULL, 1, \
+ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
+ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
+ }
+
+#define CLK_SAIn(n) \
+ { \
+ "sai"__stringify(n)"_mclk1_sel", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1_SEL, {}, \
+ clk_imx8mp_audiomix_sai##n##_mclk1_parents, \
+ ARRAY_SIZE(clk_imx8mp_audiomix_sai##n##_mclk1_parents), \
+ SAI_MCLK_SEL(n), 1, 0 \
+ }, { \
+ "sai"__stringify(n)"_mclk2_sel", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2_SEL, {}, \
+ clk_imx8mp_audiomix_sai_mclk2_parents, \
+ ARRAY_SIZE(clk_imx8mp_audiomix_sai_mclk2_parents), \
+ SAI_MCLK_SEL(n), 4, 1 \
+ }, { \
+ "sai"__stringify(n)"_ipg_cg", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG, \
+ { .fw_name = "ahb", .name = "ahb" }, NULL, 1, \
+ CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG \
+ }, { \
+ "sai"__stringify(n)"_mclk1_cg", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1, \
+ { \
+ .fw_name = "sai"__stringify(n)"_mclk1_sel", \
+ .name = "sai"__stringify(n)"_mclk1_sel" \
+ }, NULL, 1, \
+ CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1 \
+ }, { \
+ "sai"__stringify(n)"_mclk2_cg", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2, \
+ { \
+ .fw_name = "sai"__stringify(n)"_mclk2_sel", \
+ .name = "sai"__stringify(n)"_mclk2_sel" \
+ }, NULL, 1, \
+ CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2 \
+ }, { \
+ "sai"__stringify(n)"_mclk3_cg", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK3, \
+ { \
+ .fw_name = "sai_pll_out_div2", \
+ .name = "sai_pll_out_div2" \
+ }, NULL, 1, \
+ CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK3 \
+ }
+
+#define CLK_PDM \
+ { \
+ "pdm_sel", IMX8MP_CLK_AUDIOMIX_PDM_SEL, {}, \
+ clk_imx8mp_audiomix_pdm_parents, \
+ ARRAY_SIZE(clk_imx8mp_audiomix_pdm_parents), \
+ PDM_SEL, 2, 0 \
+ }
+
+struct clk_imx8mp_audiomix_sel {
+ const char *name;
+ int clkid;
+ const struct clk_parent_data parent; /* For gate */
+ const struct clk_parent_data *parents; /* For mux */
+ int num_parents;
+ u16 reg;
+ u8 width;
+ u8 shift;
+};
+
+static struct clk_imx8mp_audiomix_sel sels[] = {
+ CLK_GATE("asrc", ASRC_IPG),
+ CLK_GATE("pdm", PDM_IPG),
+ CLK_GATE("earc", EARC_IPG),
+ CLK_GATE("ocrama", OCRAMA_IPG),
+ CLK_GATE("aud2htx", AUD2HTX_IPG),
+ CLK_GATE("earc_phy", EARC_PHY),
+ CLK_GATE("sdma2", SDMA2_ROOT),
+ CLK_GATE("sdma3", SDMA3_ROOT),
+ CLK_GATE("spba2", SPBA2_ROOT),
+ CLK_GATE("dsp", DSP_ROOT),
+ CLK_GATE("dspdbg", DSPDBG_ROOT),
+ CLK_GATE("edma", EDMA_ROOT),
+ CLK_GATE("audpll", AUDPLL_ROOT),
+ CLK_GATE("mu2", MU2_ROOT),
+ CLK_GATE("mu3", MU3_ROOT),
+ CLK_PDM,
+ CLK_SAIn(1),
+ CLK_SAIn(2),
+ CLK_SAIn(3),
+ CLK_SAIn(5),
+ CLK_SAIn(6),
+ CLK_SAIn(7)
+};
+
+static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *priv;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int i;
+
+ priv = devm_kzalloc(dev,
+ struct_size(priv, hws, IMX8MP_CLK_AUDIOMIX_END),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->num = IMX8MP_CLK_AUDIOMIX_END;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = 0; i < ARRAY_SIZE(sels); i++) {
+ if (sels[i].num_parents == 1) {
+ hw = devm_clk_hw_register_gate_parent_data(dev,
+ sels[i].name, &sels[i].parent, 0,
+ base + sels[i].reg, sels[i].shift, 0, NULL);
+ } else {
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ sels[i].name, sels[i].parents,
+ sels[i].num_parents, 0,
+ base + sels[i].reg,
+ sels[i].shift, sels[i].width,
+ 0, NULL, NULL);
+ }
+
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->hws[sels[i].clkid] = hw;
+ }
+
+ /* SAI PLL */
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ "sai_pll_ref_sel", clk_imx8mp_audiomix_pll_parents,
+ ARRAY_SIZE(clk_imx8mp_audiomix_pll_parents),
+ CLK_SET_RATE_NO_REPARENT, base + SAI_PLL_GNRL_CTL,
+ 0, 2, 0, NULL, NULL);
+ priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw;
+
+ hw = imx_dev_clk_hw_pll14xx(dev, "sai_pll", "sai_pll_ref_sel",
+ base + 0x400, &imx_1443x_pll);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ "sai_pll_bypass", clk_imx8mp_audiomix_pll_bypass_sels,
+ ARRAY_SIZE(clk_imx8mp_audiomix_pll_bypass_sels),
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ base + SAI_PLL_GNRL_CTL, 16, 1, 0, NULL, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
+
+ hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
+ 0, base + SAI_PLL_GNRL_CTL, 13,
+ 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
+ "sai_pll_out", 0, 1, 2);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ priv);
+}
+
+static const struct of_device_id clk_imx8mp_audiomix_of_match[] = {
+ { .compatible = "fsl,imx8mp-audio-blk-ctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
+
+static struct platform_driver clk_imx8mp_audiomix_driver = {
+ .probe = clk_imx8mp_audiomix_probe,
+ .driver = {
+ .name = "imx8mp-audio-blk-ctrl",
+ .of_match_table = clk_imx8mp_audiomix_of_match,
+ },
+};
+
+module_platform_driver(clk_imx8mp_audiomix_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale i.MX8MP Audio Block Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index a57d877d393d..f26ae8de4cc6 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -538,7 +538,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
- hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
+ hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
@@ -554,7 +554,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
- hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
@@ -696,6 +696,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT] = imx_clk_hw_gate2_shared2("media_mipi_phy1_ref_root", "media_mipi_phy1_ref", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_LDB_ROOT] = imx_clk_hw_gate2_shared2("media_ldb_root_clk", "media_ldb", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
@@ -723,7 +724,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
- imx_register_uart_clocks(4);
+ imx_register_uart_clocks();
return 0;
}
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 2bcaec19a999..4bd65879fcd3 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -601,7 +601,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
goto unregister_hws;
}
- imx_register_uart_clocks(4);
+ imx_register_uart_clocks();
return 0;
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
index 8eb1af2d6429..e308c88cb801 100644
--- a/drivers/clk/imx/clk-imx8ulp.c
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -198,10 +198,10 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x34, 28, 2, nic_sels, ARRAY_SIZE(nic_sels));
clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT);
+ clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT);
+ clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "xbar_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT);
+ clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "xbar_divbus", base + 0x38, 0, 6, CLK_SET_RATE_PARENT);
clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
@@ -255,9 +255,9 @@ static int imx8ulp_clk_cgc2_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_HIFI_DIVCORE] = imx_clk_hw_divider("hifi_core_div", "hifi_sel", base + 0x14, 21, 6);
clks[IMX8ULP_CLK_HIFI_DIVPLAT] = imx_clk_hw_divider("hifi_plat_div", "hifi_core_div", base + 0x14, 14, 6);
- clks[IMX8ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x40, 28, 3, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_PARENT_GATE);
- clks[IMX8ULP_CLK_DDR_DIV] = imx_clk_hw_divider_flags("ddr_div", "ddr_sel", base + 0x40, 21, 6, CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_LPAV_AXI_SEL] = imx_clk_hw_mux("lpav_sel", base + 0x3c, 28, 2, lpav_sels, ARRAY_SIZE(lpav_sels));
+ clks[IMX8ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x40, 28, 3, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_GET_RATE_NOCACHE);
+ clks[IMX8ULP_CLK_DDR_DIV] = imx_clk_hw_divider_flags("ddr_div", "ddr_sel", base + 0x40, 21, 6, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+ clks[IMX8ULP_CLK_LPAV_AXI_SEL] = imx_clk_hw_mux2("lpav_sel", base + 0x3c, 28, 2, lpav_sels, ARRAY_SIZE(lpav_sels));
clks[IMX8ULP_CLK_LPAV_AXI_DIV] = imx_clk_hw_divider_flags("lpav_axi_div", "lpav_sel", base + 0x3c, 21, 6, CLK_IS_CRITICAL);
clks[IMX8ULP_CLK_LPAV_AHB_DIV] = imx_clk_hw_divider_flags("lpav_ahb_div", "lpav_axi_div", base + 0x3c, 14, 6, CLK_IS_CRITICAL);
clks[IMX8ULP_CLK_LPAV_BUS_DIV] = imx_clk_hw_divider_flags("lpav_bus_div", "lpav_axi_div", base + 0x3c, 7, 6, CLK_IS_CRITICAL);
@@ -275,14 +275,14 @@ static int imx8ulp_clk_cgc2_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd2_div2_gate", "pll4_pfd2", base + 0x60c, 15);
clks[IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div1_gate", "pll4_pfd3", base + 0x60c, 23);
clks[IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div2_gate", "pll4_pfd3", base + 0x60c, 31);
- clks[IMX8ULP_CLK_PLL4_PFD0_DIV1] = imx_clk_hw_divider("pll4_pfd0_div1", "pll4_pfd0_div1_gate", base + 0x608, 0, 6);
- clks[IMX8ULP_CLK_PLL4_PFD0_DIV2] = imx_clk_hw_divider("pll4_pfd0_div2", "pll4_pfd0_div2_gate", base + 0x608, 8, 6);
- clks[IMX8ULP_CLK_PLL4_PFD1_DIV1] = imx_clk_hw_divider("pll4_pfd1_div1", "pll4_pfd1_div1_gate", base + 0x608, 16, 6);
- clks[IMX8ULP_CLK_PLL4_PFD1_DIV2] = imx_clk_hw_divider("pll4_pfd1_div2", "pll4_pfd1_div2_gate", base + 0x608, 24, 6);
- clks[IMX8ULP_CLK_PLL4_PFD2_DIV1] = imx_clk_hw_divider("pll4_pfd2_div1", "pll4_pfd2_div1_gate", base + 0x60c, 0, 6);
- clks[IMX8ULP_CLK_PLL4_PFD2_DIV2] = imx_clk_hw_divider("pll4_pfd2_div2", "pll4_pfd2_div2_gate", base + 0x60c, 8, 6);
- clks[IMX8ULP_CLK_PLL4_PFD3_DIV1] = imx_clk_hw_divider("pll4_pfd3_div1", "pll4_pfd3_div1_gate", base + 0x60c, 16, 6);
- clks[IMX8ULP_CLK_PLL4_PFD3_DIV2] = imx_clk_hw_divider("pll4_pfd3_div2", "pll4_pfd3_div2_gate", base + 0x60c, 24, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV1] = imx_clk_hw_divider_closest("pll4_pfd0_div1", "pll4_pfd0_div1_gate", base + 0x608, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV2] = imx_clk_hw_divider_closest("pll4_pfd0_div2", "pll4_pfd0_div2_gate", base + 0x608, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV1] = imx_clk_hw_divider_closest("pll4_pfd1_div1", "pll4_pfd1_div1_gate", base + 0x608, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV2] = imx_clk_hw_divider_closest("pll4_pfd1_div2", "pll4_pfd1_div2_gate", base + 0x608, 24, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV1] = imx_clk_hw_divider_closest("pll4_pfd2_div1", "pll4_pfd2_div1_gate", base + 0x60c, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV2] = imx_clk_hw_divider_closest("pll4_pfd2_div2", "pll4_pfd2_div2_gate", base + 0x60c, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV1] = imx_clk_hw_divider_closest("pll4_pfd3_div1", "pll4_pfd3_div1_gate", base + 0x60c, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV2] = imx_clk_hw_divider_closest("pll4_pfd3_div2", "pll4_pfd3_div2_gate", base + 0x60c, 24, 6);
clks[IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div1_gate", "sosc", base + 0x108, 7);
clks[IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div2_gate", "sosc", base + 0x108, 15);
@@ -333,7 +333,6 @@ static int imx8ulp_clk_pcc3_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_WDOG4] = imx8ulp_clk_hw_composite("wdog4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xac, 1);
clks[IMX8ULP_CLK_LPIT1] = imx8ulp_clk_hw_composite("lpit1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xc8, 1);
clks[IMX8ULP_CLK_TPM4] = imx8ulp_clk_hw_composite("tpm4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xcc, 1);
- clks[IMX8ULP_CLK_TPM5] = imx8ulp_clk_hw_composite("tpm5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd0, 1);
clks[IMX8ULP_CLK_FLEXIO1] = imx8ulp_clk_hw_composite("flexio1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd4, 1);
clks[IMX8ULP_CLK_I3C2] = imx8ulp_clk_hw_composite("i3c2", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd8, 1);
clks[IMX8ULP_CLK_LPI2C4] = imx8ulp_clk_hw_composite("lpi2c4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xdc, 1);
@@ -376,8 +375,9 @@ static int imx8ulp_clk_pcc3_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_DMA1_CH29] = imx_clk_hw_gate("pcc_dma1_ch29", "xbar_ad_divplat", base + 0x7c, 30);
clks[IMX8ULP_CLK_DMA1_CH30] = imx_clk_hw_gate("pcc_dma1_ch30", "xbar_ad_divplat", base + 0x80, 30);
clks[IMX8ULP_CLK_DMA1_CH31] = imx_clk_hw_gate("pcc_dma1_ch31", "xbar_ad_divplat", base + 0x84, 30);
- clks[IMX8ULP_CLK_MU0_B] = imx_clk_hw_gate("mu0_b", "xbar_ad_divplat", base + 0x88, 30);
+ clks[IMX8ULP_CLK_MU0_B] = imx_clk_hw_gate_flags("mu0_b", "xbar_ad_divplat", base + 0x88, 30, CLK_IS_CRITICAL);
clks[IMX8ULP_CLK_MU3_A] = imx_clk_hw_gate("mu3_a", "xbar_ad_divplat", base + 0x8c, 30);
+ clks[IMX8ULP_CLK_TPM5] = imx_clk_hw_gate_flags("tpm5", "sosc_div2", base + 0xd0, 30, CLK_IS_CRITICAL);
imx_check_clk_hws(clks, clk_data->num);
@@ -385,7 +385,7 @@ static int imx8ulp_clk_pcc3_init(struct platform_device *pdev)
if (ret)
return ret;
- imx_register_uart_clocks(1);
+ imx_register_uart_clocks();
/* register the pcc3 reset controller */
return imx8ulp_pcc_reset_init(pdev, base, pcc3_resets, ARRAY_SIZE(pcc3_resets));
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index e464d9e71fbc..07b4a043e449 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -33,6 +33,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
+static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "video_pll"},
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "sys_pll_pfd2_div2"},
@@ -55,7 +56,7 @@ static const struct imx93_clk_root {
/* a55/m33/bus critical clk for system run */
{ IMX93_CLK_A55_PERIPH, "a55_periph_root", 0x0000, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_A55_MTR_BUS, "a55_mtr_bus_root", 0x0080, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
- { IMX93_CLK_A55, "a55_root", 0x0100, FAST_SEL, CLK_IS_CRITICAL },
+ { IMX93_CLK_A55, "a55_alt_root", 0x0100, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_M33, "m33_root", 0x0180, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_BUS_WAKEUP, "bus_wakeup_root", 0x0280, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_BUS_AON, "bus_aon_root", 0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
@@ -117,6 +118,7 @@ static const struct imx93_clk_root {
{ IMX93_CLK_HSIO_USB_TEST_60M, "hsio_usb_test_60m_root", 0x1f00, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_HSIO_ACSCAN_80M, "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_HSIO_ACSCAN_480M, "hsio_acscan_480m_root", 0x2000, MISC_SEL, },
+ { IMX93_CLK_NIC_AXI, "nic_axi_root", 0x2080, FAST_SEL, CLK_IS_CRITICAL, },
{ IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, },
{ IMX93_CLK_MEDIA_AXI, "media_axi_root", 0x2280, FAST_SEL, },
@@ -153,7 +155,7 @@ static const struct imx93_clk_ccgr {
unsigned long flags;
u32 *shared_count;
} ccgr_array[] = {
- { IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
+ { IMX93_CLK_A55_GATE, "a55_alt", "a55_alt_root", 0x8000, },
/* M33 critical clk for system run */
{ IMX93_CLK_CM33_GATE, "cm33", "m33_root", 0x8040, CLK_IS_CRITICAL },
{ IMX93_CLK_ADC1_GATE, "adc1", "adc_root", 0x82c0, },
@@ -291,6 +293,9 @@ static int imx93_clocks_probe(struct platform_device *pdev)
if (WARN_ON(!anatop_base))
return -ENOMEM;
+ clks[IMX93_CLK_ARM_PLL] = imx_clk_fracn_gppll_integer("arm_pll", "osc_24m",
+ anatop_base + 0x1000,
+ &imx_fracn_gppll_integer);
clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200,
&imx_fracn_gppll);
clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", anatop_base + 0x1400,
@@ -318,6 +323,14 @@ static int imx93_clocks_probe(struct platform_device *pdev)
ccgr->shared_count);
}
+ clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels,
+ ARRAY_SIZE(a55_core_sels));
+ clks[IMX93_CLK_A55_CORE] = imx_clk_hw_cpu("a55_core", "a55_sel",
+ clks[IMX93_CLK_A55_SEL]->clk,
+ clks[IMX93_CLK_A55_SEL]->clk,
+ clks[IMX93_CLK_ARM_PLL]->clk,
+ clks[IMX93_CLK_A55_GATE]->clk);
+
imx_check_clk_hws(clks, IMX93_CLK_END);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
@@ -326,6 +339,8 @@ static int imx93_clocks_probe(struct platform_device *pdev)
goto unregister_hws;
}
+ imx_register_uart_clocks();
+
return 0;
unregister_hws:
@@ -350,6 +365,8 @@ static struct platform_driver imx93_clk_driver = {
},
};
module_platform_driver(imx93_clk_driver);
+module_param(mcore_booted, bool, 0444);
+MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not");
MODULE_DESCRIPTION("NXP i.MX93 clock driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
index e972abd299a8..fd5c51fc92c0 100644
--- a/drivers/clk/imx/clk-imxrt1050.c
+++ b/drivers/clk/imx/clk-imxrt1050.c
@@ -167,3 +167,7 @@ static struct platform_driver imxrt1050_clk_driver = {
},
};
module_platform_driver(imxrt1050_clk_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jesse Taube <Mr.Bossman075@gmail.com>");
+MODULE_AUTHOR("Giulio Benetti <giulio.benetti@benettiengineering.com>");
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 5d2a9a3be95e..5cf0149dfa15 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -153,3 +154,4 @@ struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pfd);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 828336873a98..7150c59bbfc9 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -256,7 +256,7 @@ static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
if (pll->type == PLL_1443X) {
pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
- kdiv = FIELD_GET(KDIV_MASK, pll_div_ctl1);
+ kdiv = (s16)FIELD_GET(KDIV_MASK, pll_div_ctl1);
} else {
kdiv = 0;
}
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index eea32f87c60a..11fb238ee8f0 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -486,3 +487,4 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pllv3);
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index b636cc099d96..19cde59a20cb 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -110,6 +110,20 @@ struct clk_hw *imx_obtain_fixed_clock_hw(
return __clk_get_hw(clk);
}
+struct clk_hw *imx_obtain_fixed_of_clock(struct device_node *np,
+ const char *name, unsigned long rate)
+{
+ struct clk *clk = of_clk_get_by_name(np, name);
+ struct clk_hw *hw;
+
+ if (IS_ERR(clk))
+ hw = imx_obtain_fixed_clock_hw(name, rate);
+ else
+ hw = __clk_get_hw(clk);
+
+ return hw;
+}
+
struct clk_hw *imx_get_clk_hw_by_name(struct device_node *np, const char *name)
{
struct clk *clk;
@@ -165,8 +179,10 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
__setup_param("earlyprintk", imx_keep_uart_earlyprintk,
imx_keep_uart_clocks_param, 0);
-void imx_register_uart_clocks(unsigned int clk_count)
+void imx_register_uart_clocks(void)
{
+ unsigned int num __maybe_unused;
+
imx_enabled_uart_clocks = 0;
/* i.MX boards use device trees now. For build tests without CONFIG_OF, do nothing */
@@ -174,14 +190,18 @@ void imx_register_uart_clocks(unsigned int clk_count)
if (imx_keep_uart_clocks) {
int i;
- imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
- if (!imx_uart_clocks)
+ num = of_clk_get_parent_count(of_stdout);
+ if (!num)
return;
if (!of_stdout)
return;
- for (i = 0; i < clk_count; i++) {
+ imx_uart_clocks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
+ if (!imx_uart_clocks)
+ return;
+
+ for (i = 0; i < num; i++) {
imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
/* Stop if there are no more of_stdout references */
@@ -205,9 +225,10 @@ static int __init imx_clk_disable_uart(void)
clk_disable_unprepare(imx_uart_clocks[i]);
clk_put(imx_uart_clocks[i]);
}
- kfree(imx_uart_clocks);
}
+ kfree(imx_uart_clocks);
+
return 0;
}
late_initcall_sync(imx_clk_disable_uart);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 689b3ad927c0..1031468701d7 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -12,9 +12,9 @@ extern bool mcore_booted;
void imx_check_clocks(struct clk *clks[], unsigned int count);
void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
#ifndef MODULE
-void imx_register_uart_clocks(unsigned int clk_count);
+void imx_register_uart_clocks(void);
#else
-static inline void imx_register_uart_clocks(unsigned int clk_count)
+static inline void imx_register_uart_clocks(void)
{
}
#endif
@@ -73,6 +73,9 @@ extern struct imx_pll14xx_clk imx_1416x_pll;
extern struct imx_pll14xx_clk imx_1443x_pll;
extern struct imx_pll14xx_clk imx_1443x_dram_pll;
+#define CLK_FRACN_GPPLL_INTEGER BIT(0)
+#define CLK_FRACN_GPPLL_FRACN BIT(1)
+
/* NOTE: Rate table should be kept sorted in descending order. */
struct imx_fracn_gppll_rate_table {
unsigned int rate;
@@ -91,8 +94,12 @@ struct imx_fracn_gppll_clk {
struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, void __iomem *base,
const struct imx_fracn_gppll_clk *pll_clk);
+struct clk_hw *imx_clk_fracn_gppll_integer(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_fracn_gppll_clk *pll_clk);
extern struct imx_fracn_gppll_clk imx_fracn_gppll;
+extern struct imx_fracn_gppll_clk imx_fracn_gppll_integer;
#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
@@ -153,9 +160,6 @@ extern struct imx_fracn_gppll_clk imx_fracn_gppll;
#define imx_clk_pllv2(name, parent, base) \
to_clk(imx_clk_hw_pllv2(name, parent, base))
-#define imx_clk_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
- to_clk(imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags))
-
#define imx_clk_hw_gate(name, parent, reg, shift) \
imx_clk_hw_gate_flags(name, parent, reg, shift, 0)
@@ -288,6 +292,9 @@ struct clk * imx_obtain_fixed_clock(
struct clk_hw *imx_obtain_fixed_clock_hw(
const char *name, unsigned long rate);
+struct clk_hw *imx_obtain_fixed_of_clock(struct device_node *np,
+ const char *name, unsigned long rate);
+
struct clk_hw *imx_get_clk_hw_by_name(struct device_node *np, const char *name);
struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
@@ -346,6 +353,15 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
CLK_SET_RATE_PARENT, mult, div);
}
+static inline struct clk_hw *imx_clk_hw_divider_closest(const char *name,
+ const char *parent,
+ void __iomem *reg, u8 shift,
+ u8 width)
+{
+ return clk_hw_register_divider(NULL, name, parent, 0,
+ reg, shift, width, CLK_DIVIDER_ROUND_CLOSEST, &imx_ccm_lock);
+}
+
static inline struct clk_hw *__imx_clk_hw_divider(const char *name,
const char *parent,
void __iomem *reg, u8 shift,
@@ -414,6 +430,10 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
_imx8m_clk_hw_composite(name, parent_names, reg, \
0, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+#define imx8m_clk_hw_composite_flags(name, parent_names, reg, flags) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ 0, IMX_COMPOSITE_CLK_FLAGS_DEFAULT | flags)
+
#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
0, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
@@ -458,4 +478,9 @@ struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
+
+struct clk_hw *imx_clk_gpr_mux(const char *name, const char *compatible,
+ u32 reg, const char **parent_names,
+ u8 num_parents, const u32 *mux_table, u32 mask);
+
#endif
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index ecd395ac8a28..e407f00bd594 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
- for (;; n >>= 1) {
- od = (unsigned int)-1;
+ rate /= MHZ;
+ parent_rate /= MHZ;
- do {
- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
- } while ((m > m_max || m & 1) && (od < 4));
-
- if (od < 4 && m >= 4 && m <= m_max)
- break;
+ for (m = m_max; m >= m_max && n >= 2; n--) {
+ m = rate * n / parent_rate;
+ od = m & 1;
+ m <<= od;
}
*pm = m;
- *pn = n;
+ *pn = n + 1;
*pod = 1 << od;
}
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index d4b4e74e22da..910ecd58c4ca 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -689,16 +689,14 @@ static int ti_sci_clk_probe(struct platform_device *pdev)
* via common clock framework. Any memory allocated for the device will
* be free'd silently via the devm framework. Returns 0 always.
*/
-static int ti_sci_clk_remove(struct platform_device *pdev)
+static void ti_sci_clk_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
}
static struct platform_driver ti_sci_clk_driver = {
.probe = ti_sci_clk_probe,
- .remove = ti_sci_clk_remove,
+ .remove_new = ti_sci_clk_remove,
.driver = {
.name = "ti-sci-clk",
.of_match_table = of_match_ptr(ti_sci_clk_of_match),
diff --git a/drivers/clk/loongson1/Makefile b/drivers/clk/loongson1/Makefile
deleted file mode 100644
index 251d0fe9dcd1..000000000000
--- a/drivers/clk/loongson1/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += clk.o
-obj-$(CONFIG_LOONGSON1_LS1B) += clk-loongson1b.o
-obj-$(CONFIG_LOONGSON1_LS1C) += clk-loongson1c.o
diff --git a/drivers/clk/loongson1/clk-loongson1b.c b/drivers/clk/loongson1/clk-loongson1b.c
deleted file mode 100644
index 13a2ca23a159..000000000000
--- a/drivers/clk/loongson1/clk-loongson1b.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include <loongson1.h>
-#include "clk.h"
-
-#define OSC (33 * 1000000)
-#define DIV_APB 2
-
-static DEFINE_SPINLOCK(_lock);
-
-static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u32 pll, rate;
-
- pll = __raw_readl(LS1X_CLK_PLL_FREQ);
- rate = 12 + (pll & GENMASK(5, 0));
- rate *= OSC;
- rate >>= 1;
-
- return rate;
-}
-
-static const struct clk_ops ls1x_pll_clk_ops = {
- .recalc_rate = ls1x_pll_recalc_rate,
-};
-
-static const char *const cpu_parents[] = { "cpu_clk_div", "osc_clk", };
-static const char *const ahb_parents[] = { "ahb_clk_div", "osc_clk", };
-static const char *const dc_parents[] = { "dc_clk_div", "osc_clk", };
-
-void __init ls1x_clk_init(void)
-{
- struct clk_hw *hw;
-
- hw = clk_hw_register_fixed_rate(NULL, "osc_clk", NULL, 0, OSC);
- clk_hw_register_clkdev(hw, "osc_clk", NULL);
-
- /* clock derived from 33 MHz OSC clk */
- hw = clk_hw_register_pll(NULL, "pll_clk", "osc_clk",
- &ls1x_pll_clk_ops, 0);
- clk_hw_register_clkdev(hw, "pll_clk", NULL);
-
- /* clock derived from PLL clk */
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ CPU CLK
- * \___ PLL ___ CPU DIV ___| |
- * |_____|
- */
- hw = clk_hw_register_divider(NULL, "cpu_clk_div", "pll_clk",
- CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
- DIV_CPU_SHIFT, DIV_CPU_WIDTH,
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST, &_lock);
- clk_hw_register_clkdev(hw, "cpu_clk_div", NULL);
- hw = clk_hw_register_mux(NULL, "cpu_clk", cpu_parents,
- ARRAY_SIZE(cpu_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock);
- clk_hw_register_clkdev(hw, "cpu_clk", NULL);
-
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ DC CLK
- * \___ PLL ___ DC DIV ___| |
- * |_____|
- */
- hw = clk_hw_register_divider(NULL, "dc_clk_div", "pll_clk",
- 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
- DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_hw_register_clkdev(hw, "dc_clk_div", NULL);
- hw = clk_hw_register_mux(NULL, "dc_clk", dc_parents,
- ARRAY_SIZE(dc_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock);
- clk_hw_register_clkdev(hw, "dc_clk", NULL);
-
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ DDR CLK
- * \___ PLL ___ DDR DIV ___| |
- * |_____|
- */
- hw = clk_hw_register_divider(NULL, "ahb_clk_div", "pll_clk",
- 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
- DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED,
- &_lock);
- clk_hw_register_clkdev(hw, "ahb_clk_div", NULL);
- hw = clk_hw_register_mux(NULL, "ahb_clk", ahb_parents,
- ARRAY_SIZE(ahb_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock);
- clk_hw_register_clkdev(hw, "ahb_clk", NULL);
- clk_hw_register_clkdev(hw, "ls1x-dma", NULL);
- clk_hw_register_clkdev(hw, "stmmaceth", NULL);
-
- /* clock derived from AHB clk */
- /* APB clk is always half of the AHB clk */
- hw = clk_hw_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
- DIV_APB);
- clk_hw_register_clkdev(hw, "apb_clk", NULL);
- clk_hw_register_clkdev(hw, "ls1x-ac97", NULL);
- clk_hw_register_clkdev(hw, "ls1x-i2c", NULL);
- clk_hw_register_clkdev(hw, "ls1x-nand", NULL);
- clk_hw_register_clkdev(hw, "ls1x-pwmtimer", NULL);
- clk_hw_register_clkdev(hw, "ls1x-spi", NULL);
- clk_hw_register_clkdev(hw, "ls1x-wdt", NULL);
- clk_hw_register_clkdev(hw, "serial8250", NULL);
-}
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
deleted file mode 100644
index 1ebf740380ef..000000000000
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-
-#include <loongson1.h>
-#include "clk.h"
-
-#define OSC (24 * 1000000)
-#define DIV_APB 1
-
-static DEFINE_SPINLOCK(_lock);
-
-static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u32 pll, rate;
-
- pll = __raw_readl(LS1X_CLK_PLL_FREQ);
- rate = ((pll >> 8) & 0xff) + ((pll >> 16) & 0xff);
- rate *= OSC;
- rate >>= 2;
-
- return rate;
-}
-
-static const struct clk_ops ls1x_pll_clk_ops = {
- .recalc_rate = ls1x_pll_recalc_rate,
-};
-
-static const struct clk_div_table ahb_div_table[] = {
- [0] = { .val = 0, .div = 2 },
- [1] = { .val = 1, .div = 4 },
- [2] = { .val = 2, .div = 3 },
- [3] = { .val = 3, .div = 3 },
- [4] = { /* sentinel */ }
-};
-
-void __init ls1x_clk_init(void)
-{
- struct clk_hw *hw;
-
- hw = clk_hw_register_fixed_rate(NULL, "osc_clk", NULL, 0, OSC);
- clk_hw_register_clkdev(hw, "osc_clk", NULL);
-
- /* clock derived from 24 MHz OSC clk */
- hw = clk_hw_register_pll(NULL, "pll_clk", "osc_clk",
- &ls1x_pll_clk_ops, 0);
- clk_hw_register_clkdev(hw, "pll_clk", NULL);
-
- hw = clk_hw_register_divider(NULL, "cpu_clk_div", "pll_clk",
- CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
- DIV_CPU_SHIFT, DIV_CPU_WIDTH,
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST, &_lock);
- clk_hw_register_clkdev(hw, "cpu_clk_div", NULL);
- hw = clk_hw_register_fixed_factor(NULL, "cpu_clk", "cpu_clk_div",
- 0, 1, 1);
- clk_hw_register_clkdev(hw, "cpu_clk", NULL);
-
- hw = clk_hw_register_divider(NULL, "dc_clk_div", "pll_clk",
- 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
- DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_hw_register_clkdev(hw, "dc_clk_div", NULL);
- hw = clk_hw_register_fixed_factor(NULL, "dc_clk", "dc_clk_div",
- 0, 1, 1);
- clk_hw_register_clkdev(hw, "dc_clk", NULL);
-
- hw = clk_hw_register_divider_table(NULL, "ahb_clk_div", "cpu_clk_div",
- 0, LS1X_CLK_PLL_FREQ, DIV_DDR_SHIFT,
- DIV_DDR_WIDTH, CLK_DIVIDER_ALLOW_ZERO,
- ahb_div_table, &_lock);
- clk_hw_register_clkdev(hw, "ahb_clk_div", NULL);
- hw = clk_hw_register_fixed_factor(NULL, "ahb_clk", "ahb_clk_div",
- 0, 1, 1);
- clk_hw_register_clkdev(hw, "ahb_clk", NULL);
- clk_hw_register_clkdev(hw, "ls1x-dma", NULL);
- clk_hw_register_clkdev(hw, "stmmaceth", NULL);
-
- /* clock derived from AHB clk */
- hw = clk_hw_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
- DIV_APB);
- clk_hw_register_clkdev(hw, "apb_clk", NULL);
- clk_hw_register_clkdev(hw, "ls1x-ac97", NULL);
- clk_hw_register_clkdev(hw, "ls1x-i2c", NULL);
- clk_hw_register_clkdev(hw, "ls1x-nand", NULL);
- clk_hw_register_clkdev(hw, "ls1x-pwmtimer", NULL);
- clk_hw_register_clkdev(hw, "ls1x-spi", NULL);
- clk_hw_register_clkdev(hw, "ls1x-wdt", NULL);
- clk_hw_register_clkdev(hw, "serial8250", NULL);
-}
diff --git a/drivers/clk/loongson1/clk.c b/drivers/clk/loongson1/clk.c
deleted file mode 100644
index f336a3126d31..000000000000
--- a/drivers/clk/loongson1/clk.c
+++ /dev/null
@@ -1,41 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/slab.h>
-
-#include "clk.h"
-
-struct clk_hw *__init clk_hw_register_pll(struct device *dev,
- const char *name,
- const char *parent_name,
- const struct clk_ops *ops,
- unsigned long flags)
-{
- int ret;
- struct clk_hw *hw;
- struct clk_init_data init;
-
- /* allocate the divider */
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = ops;
- init.flags = flags;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- hw->init = &init;
-
- /* register the clock */
- ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(hw);
- hw = ERR_PTR(ret);
- }
-
- return hw;
-}
diff --git a/drivers/clk/loongson1/clk.h b/drivers/clk/loongson1/clk.h
deleted file mode 100644
index 124642302b12..000000000000
--- a/drivers/clk/loongson1/clk.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- */
-
-#ifndef __LOONGSON1_CLK_H
-#define __LOONGSON1_CLK_H
-
-struct clk_hw *clk_hw_register_pll(struct device *dev,
- const char *name,
- const char *parent_name,
- const struct clk_ops *ops,
- unsigned long flags);
-
-#endif /* __LOONGSON1_CLK_H */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 22e8e79475ee..99e67c07e638 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -75,7 +75,7 @@ config COMMON_CLK_MT2701_G3DSYS
This driver supports MediaTek MT2701 g3dsys clocks.
config COMMON_CLK_MT2712
- bool "Clock driver for MediaTek MT2712"
+ tristate "Clock driver for MediaTek MT2712"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
@@ -83,43 +83,43 @@ config COMMON_CLK_MT2712
This driver supports MediaTek MT2712 basic clocks.
config COMMON_CLK_MT2712_BDPSYS
- bool "Clock driver for MediaTek MT2712 bdpsys"
+ tristate "Clock driver for MediaTek MT2712 bdpsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 bdpsys clocks.
config COMMON_CLK_MT2712_IMGSYS
- bool "Clock driver for MediaTek MT2712 imgsys"
+ tristate "Clock driver for MediaTek MT2712 imgsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 imgsys clocks.
config COMMON_CLK_MT2712_JPGDECSYS
- bool "Clock driver for MediaTek MT2712 jpgdecsys"
+ tristate "Clock driver for MediaTek MT2712 jpgdecsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 jpgdecsys clocks.
config COMMON_CLK_MT2712_MFGCFG
- bool "Clock driver for MediaTek MT2712 mfgcfg"
+ tristate "Clock driver for MediaTek MT2712 mfgcfg"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 mfgcfg clocks.
config COMMON_CLK_MT2712_MMSYS
- bool "Clock driver for MediaTek MT2712 mmsys"
+ tristate "Clock driver for MediaTek MT2712 mmsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 mmsys clocks.
config COMMON_CLK_MT2712_VDECSYS
- bool "Clock driver for MediaTek MT2712 vdecsys"
+ tristate "Clock driver for MediaTek MT2712 vdecsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 vdecsys clocks.
config COMMON_CLK_MT2712_VENCSYS
- bool "Clock driver for MediaTek MT2712 vencsys"
+ tristate "Clock driver for MediaTek MT2712 vencsys"
depends on COMMON_CLK_MT2712
help
This driver supports MediaTek MT2712 vencsys clocks.
@@ -133,79 +133,79 @@ config COMMON_CLK_MT6765
This driver supports MediaTek MT6765 basic clocks.
config COMMON_CLK_MT6765_AUDIOSYS
- bool "Clock driver for MediaTek MT6765 audiosys"
+ tristate "Clock driver for MediaTek MT6765 audiosys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 audiosys clocks.
config COMMON_CLK_MT6765_CAMSYS
- bool "Clock driver for MediaTek MT6765 camsys"
+ tristate "Clock driver for MediaTek MT6765 camsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 camsys clocks.
config COMMON_CLK_MT6765_GCESYS
- bool "Clock driver for MediaTek MT6765 gcesys"
+ tristate "Clock driver for MediaTek MT6765 gcesys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 gcesys clocks.
config COMMON_CLK_MT6765_MMSYS
- bool "Clock driver for MediaTek MT6765 mmsys"
+ tristate "Clock driver for MediaTek MT6765 mmsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mmsys clocks.
config COMMON_CLK_MT6765_IMGSYS
- bool "Clock driver for MediaTek MT6765 imgsys"
+ tristate "Clock driver for MediaTek MT6765 imgsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 imgsys clocks.
config COMMON_CLK_MT6765_VCODECSYS
- bool "Clock driver for MediaTek MT6765 vcodecsys"
+ tristate "Clock driver for MediaTek MT6765 vcodecsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 vcodecsys clocks.
config COMMON_CLK_MT6765_MFGSYS
- bool "Clock driver for MediaTek MT6765 mfgsys"
+ tristate "Clock driver for MediaTek MT6765 mfgsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mfgsys clocks.
config COMMON_CLK_MT6765_MIPI0ASYS
- bool "Clock driver for MediaTek MT6765 mipi0asys"
+ tristate "Clock driver for MediaTek MT6765 mipi0asys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi0asys clocks.
config COMMON_CLK_MT6765_MIPI0BSYS
- bool "Clock driver for MediaTek MT6765 mipi0bsys"
+ tristate "Clock driver for MediaTek MT6765 mipi0bsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi0bsys clocks.
config COMMON_CLK_MT6765_MIPI1ASYS
- bool "Clock driver for MediaTek MT6765 mipi1asys"
+ tristate "Clock driver for MediaTek MT6765 mipi1asys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi1asys clocks.
config COMMON_CLK_MT6765_MIPI1BSYS
- bool "Clock driver for MediaTek MT6765 mipi1bsys"
+ tristate "Clock driver for MediaTek MT6765 mipi1bsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi1bsys clocks.
config COMMON_CLK_MT6765_MIPI2ASYS
- bool "Clock driver for MediaTek MT6765 mipi2asys"
+ tristate "Clock driver for MediaTek MT6765 mipi2asys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi2asys clocks.
config COMMON_CLK_MT6765_MIPI2BSYS
- bool "Clock driver for MediaTek MT6765 mipi2bsys"
+ tristate "Clock driver for MediaTek MT6765 mipi2bsys"
depends on COMMON_CLK_MT6765
help
This driver supports MediaTek MT6765 mipi2bsys clocks.
@@ -270,6 +270,7 @@ config COMMON_CLK_MT6795
tristate "Clock driver for MediaTek MT6795"
depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
+ select COMMON_CLK_MEDIATEK_FHCTL
default ARCH_MEDIATEK
help
This driver supports MediaTek MT6795 basic clocks and clocks
@@ -312,31 +313,31 @@ config COMMON_CLK_MT6797
This driver supports MediaTek MT6797 basic clocks.
config COMMON_CLK_MT6797_MMSYS
- bool "Clock driver for MediaTek MT6797 mmsys"
+ tristate "Clock driver for MediaTek MT6797 mmsys"
depends on COMMON_CLK_MT6797
help
This driver supports MediaTek MT6797 mmsys clocks.
config COMMON_CLK_MT6797_IMGSYS
- bool "Clock driver for MediaTek MT6797 imgsys"
+ tristate "Clock driver for MediaTek MT6797 imgsys"
depends on COMMON_CLK_MT6797
help
This driver supports MediaTek MT6797 imgsys clocks.
config COMMON_CLK_MT6797_VDECSYS
- bool "Clock driver for MediaTek MT6797 vdecsys"
+ tristate "Clock driver for MediaTek MT6797 vdecsys"
depends on COMMON_CLK_MT6797
help
This driver supports MediaTek MT6797 vdecsys clocks.
config COMMON_CLK_MT6797_VENCSYS
- bool "Clock driver for MediaTek MT6797 vencsys"
+ tristate "Clock driver for MediaTek MT6797 vencsys"
depends on COMMON_CLK_MT6797
help
This driver supports MediaTek MT6797 vencsys clocks.
config COMMON_CLK_MT7622
- bool "Clock driver for MediaTek MT7622"
+ tristate "Clock driver for MediaTek MT7622"
depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
@@ -345,21 +346,21 @@ config COMMON_CLK_MT7622
required for various periperals found on MediaTek.
config COMMON_CLK_MT7622_ETHSYS
- bool "Clock driver for MediaTek MT7622 ETHSYS"
+ tristate "Clock driver for MediaTek MT7622 ETHSYS"
depends on COMMON_CLK_MT7622
help
This driver add support for clocks for Ethernet and SGMII
required on MediaTek MT7622 SoC.
config COMMON_CLK_MT7622_HIFSYS
- bool "Clock driver for MediaTek MT7622 HIFSYS"
+ tristate "Clock driver for MediaTek MT7622 HIFSYS"
depends on COMMON_CLK_MT7622
help
This driver supports MediaTek MT7622 HIFSYS clocks providing
to PCI-E and USB.
config COMMON_CLK_MT7622_AUDSYS
- bool "Clock driver for MediaTek MT7622 AUDSYS"
+ tristate "Clock driver for MediaTek MT7622 AUDSYS"
depends on COMMON_CLK_MT7622
help
This driver supports MediaTek MT7622 AUDSYS clocks providing
@@ -388,8 +389,25 @@ config COMMON_CLK_MT7629_HIFSYS
This driver supports MediaTek MT7629 HIFSYS clocks providing
to PCI-E and USB.
+config COMMON_CLK_MT7981
+ bool "Clock driver for MediaTek MT7981"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT7981 basic clocks and clocks
+ required for various peripherals found on this SoC.
+
+config COMMON_CLK_MT7981_ETHSYS
+ tristate "Clock driver for MediaTek MT7981 ETHSYS"
+ depends on COMMON_CLK_MT7981
+ default COMMON_CLK_MT7981
+ help
+ This driver adds support for clocks for Ethernet and SGMII
+ required on MediaTek MT7981 SoC.
+
config COMMON_CLK_MT7986
- bool "Clock driver for MediaTek MT7986"
+ tristate "Clock driver for MediaTek MT7986"
depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
@@ -398,7 +416,7 @@ config COMMON_CLK_MT7986
required for various peripherals found on MediaTek.
config COMMON_CLK_MT7986_ETHSYS
- bool "Clock driver for MediaTek MT7986 ETHSYS"
+ tristate "Clock driver for MediaTek MT7986 ETHSYS"
depends on COMMON_CLK_MT7986
default COMMON_CLK_MT7986
help
@@ -406,7 +424,7 @@ config COMMON_CLK_MT7986_ETHSYS
required on MediaTek MT7986 SoC.
config COMMON_CLK_MT8135
- bool "Clock driver for MediaTek MT8135"
+ tristate "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM
@@ -414,7 +432,7 @@ config COMMON_CLK_MT8135
This driver supports MediaTek MT8135 clocks.
config COMMON_CLK_MT8167
- bool "Clock driver for MediaTek MT8167"
+ tristate "Clock driver for MediaTek MT8167"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
@@ -422,57 +440,80 @@ config COMMON_CLK_MT8167
This driver supports MediaTek MT8167 basic clocks.
config COMMON_CLK_MT8167_AUDSYS
- bool "Clock driver for MediaTek MT8167 audsys"
+ tristate "Clock driver for MediaTek MT8167 audsys"
depends on COMMON_CLK_MT8167
default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 audsys clocks.
config COMMON_CLK_MT8167_IMGSYS
- bool "Clock driver for MediaTek MT8167 imgsys"
+ tristate "Clock driver for MediaTek MT8167 imgsys"
depends on COMMON_CLK_MT8167
default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 imgsys clocks.
config COMMON_CLK_MT8167_MFGCFG
- bool "Clock driver for MediaTek MT8167 mfgcfg"
+ tristate "Clock driver for MediaTek MT8167 mfgcfg"
depends on COMMON_CLK_MT8167
default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 mfgcfg clocks.
config COMMON_CLK_MT8167_MMSYS
- bool "Clock driver for MediaTek MT8167 mmsys"
+ tristate "Clock driver for MediaTek MT8167 mmsys"
depends on COMMON_CLK_MT8167
default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 mmsys clocks.
config COMMON_CLK_MT8167_VDECSYS
- bool "Clock driver for MediaTek MT8167 vdecsys"
+ tristate "Clock driver for MediaTek MT8167 vdecsys"
depends on COMMON_CLK_MT8167
default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 vdecsys clocks.
config COMMON_CLK_MT8173
- bool "Clock driver for MediaTek MT8173"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ tristate "Clock driver for MediaTek MT8173"
+ depends on ARM64 || COMPILE_TEST
select COMMON_CLK_MEDIATEK
+ select COMMON_CLK_MEDIATEK_FHCTL
default ARCH_MEDIATEK
help
- This driver supports MediaTek MT8173 clocks.
+ This driver supports MediaTek MT8173 basic clocks and clocks
+ required for various peripherals found on MediaTek.
+
+config COMMON_CLK_MT8173_IMGSYS
+ tristate "Clock driver for MediaTek MT8173 imgsys"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 imgsys clocks.
config COMMON_CLK_MT8173_MMSYS
- bool "Clock driver for MediaTek MT8173 mmsys"
+ tristate "Clock driver for MediaTek MT8173 mmsys"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 mmsys clocks.
+
+config COMMON_CLK_MT8173_VDECSYS
+ tristate "Clock driver for MediaTek MT8173 VDECSYS"
depends on COMMON_CLK_MT8173
default COMMON_CLK_MT8173
help
- This driver supports MediaTek MT8173 mmsys clocks.
+ This driver supports MediaTek MT8173 vdecsys clocks.
+
+config COMMON_CLK_MT8173_VENCSYS
+ tristate "Clock driver for MediaTek MT8173 VENCSYS"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 vencsys clocks.
config COMMON_CLK_MT8183
- bool "Clock driver for MediaTek MT8183"
+ tristate "Clock driver for MediaTek MT8183"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
@@ -480,84 +521,84 @@ config COMMON_CLK_MT8183
This driver supports MediaTek MT8183 basic clocks.
config COMMON_CLK_MT8183_AUDIOSYS
- bool "Clock driver for MediaTek MT8183 audiosys"
+ tristate "Clock driver for MediaTek MT8183 audiosys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 audiosys clocks.
config COMMON_CLK_MT8183_CAMSYS
- bool "Clock driver for MediaTek MT8183 camsys"
+ tristate "Clock driver for MediaTek MT8183 camsys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 camsys clocks.
config COMMON_CLK_MT8183_IMGSYS
- bool "Clock driver for MediaTek MT8183 imgsys"
+ tristate "Clock driver for MediaTek MT8183 imgsys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 imgsys clocks.
config COMMON_CLK_MT8183_IPU_CORE0
- bool "Clock driver for MediaTek MT8183 ipu_core0"
+ tristate "Clock driver for MediaTek MT8183 ipu_core0"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core0 clocks.
config COMMON_CLK_MT8183_IPU_CORE1
- bool "Clock driver for MediaTek MT8183 ipu_core1"
+ tristate "Clock driver for MediaTek MT8183 ipu_core1"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core1 clocks.
config COMMON_CLK_MT8183_IPU_ADL
- bool "Clock driver for MediaTek MT8183 ipu_adl"
+ tristate "Clock driver for MediaTek MT8183 ipu_adl"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_adl clocks.
config COMMON_CLK_MT8183_IPU_CONN
- bool "Clock driver for MediaTek MT8183 ipu_conn"
+ tristate "Clock driver for MediaTek MT8183 ipu_conn"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_conn clocks.
config COMMON_CLK_MT8183_MFGCFG
- bool "Clock driver for MediaTek MT8183 mfgcfg"
+ tristate "Clock driver for MediaTek MT8183 mfgcfg"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mfgcfg clocks.
config COMMON_CLK_MT8183_MMSYS
- bool "Clock driver for MediaTek MT8183 mmsys"
+ tristate "Clock driver for MediaTek MT8183 mmsys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mmsys clocks.
config COMMON_CLK_MT8183_VDECSYS
- bool "Clock driver for MediaTek MT8183 vdecsys"
+ tristate "Clock driver for MediaTek MT8183 vdecsys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vdecsys clocks.
config COMMON_CLK_MT8183_VENCSYS
- bool "Clock driver for MediaTek MT8183 vencsys"
+ tristate "Clock driver for MediaTek MT8183 vencsys"
depends on COMMON_CLK_MT8183
default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vencsys clocks.
config COMMON_CLK_MT8186
- bool "Clock driver for MediaTek MT8186"
+ tristate "Clock driver for MediaTek MT8186"
depends on ARM64 || COMPILE_TEST
select COMMON_CLK_MEDIATEK
select COMMON_CLK_MEDIATEK_FHCTL
@@ -565,82 +606,246 @@ config COMMON_CLK_MT8186
help
This driver supports MediaTek MT8186 clocks.
+config COMMON_CLK_MT8186_CAMSYS
+ tristate "Clock driver for MediaTek MT8186 camsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 camsys and camsys_raw clocks.
+
+config COMMON_CLK_MT8186_IMGSYS
+ tristate "Clock driver for MediaTek MT8186 imgsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 imgsys and imgsys2 clocks.
+
+config COMMON_CLK_MT8186_IPESYS
+ tristate "Clock driver for MediaTek MT8186 ipesys"
+ depends on COMMON_CLK_MT8186_IMGSYS
+ default COMMON_CLK_MT8186_IMGSYS
+ help
+ This driver supports MediaTek MT8186 ipesys clocks.
+
+config COMMON_CLK_MT8186_WPESYS
+ tristate "Clock driver for MediaTek MT8186 wpesys"
+ depends on COMMON_CLK_MT8186_IMGSYS
+ default COMMON_CLK_MT8186_IMGSYS
+ help
+ This driver supports MediaTek MT8186 Warp Engine clocks.
+
+config COMMON_CLK_MT8186_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8186 imp_iic_wrap"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 imp_iic_wrap clocks.
+
+config COMMON_CLK_MT8186_MCUSYS
+ tristate "Clock driver for MediaTek MT8186 mcusys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 mcusys clocks.
+
+config COMMON_CLK_MT8186_MDPSYS
+ tristate "Clock driver for MediaTek MT8186 mdpsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 mdpsys clocks.
+
+config COMMON_CLK_MT8186_MFGCFG
+ tristate "Clock driver for MediaTek MT8186 mfgcfg"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 mfgcfg clocks.
+
+config COMMON_CLK_MT8186_MMSYS
+ tristate "Clock driver for MediaTek MT8186 mmsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 mmsys clocks.
+
+config COMMON_CLK_MT8186_VDECSYS
+ tristate "Clock driver for MediaTek MT8186 vdecsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 vdecsys and vdecsys_soc clocks.
+
+config COMMON_CLK_MT8186_VENCSYS
+ tristate "Clock driver for MediaTek MT8186 vencsys"
+ depends on COMMON_CLK_MT8186
+ default COMMON_CLK_MT8186
+ help
+ This driver supports MediaTek MT8186 vencsys clocks.
+
+config COMMON_CLK_MT8188
+ tristate "Clock driver for MediaTek MT8188"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ select COMMON_CLK_MEDIATEK_FHCTL
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8188 clocks.
+
+config COMMON_CLK_MT8188_ADSP_AUDIO26M
+ tristate "Clock driver for MediaTek MT8188 adsp audio26m"
+ depends on COMMON_CLK_MT8188
+ default COMMON_CLK_MT8188
+ help
+ This driver supports MediaTek MT8188 adsp audio26m clocks.
+
+config COMMON_CLK_MT8188_CAMSYS
+ tristate "Clock driver for MediaTek MT8188 camsys"
+ depends on COMMON_CLK_MT8188_VPPSYS
+ default COMMON_CLK_MT8188_VPPSYS
+ help
+ This driver supports MediaTek MT8188 camsys and camsys_raw clocks.
+
+config COMMON_CLK_MT8188_IMGSYS
+ tristate "Clock driver for MediaTek MT8188 imgsys"
+ depends on COMMON_CLK_MT8188_VPPSYS
+ default COMMON_CLK_MT8188_VPPSYS
+ help
+ This driver supports MediaTek MT8188 imgsys and imgsys2 clocks.
+
+config COMMON_CLK_MT8188_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8188 imp_iic_wrap"
+ depends on COMMON_CLK_MT8188
+ default COMMON_CLK_MT8188
+ help
+ This driver supports MediaTek MT8188 I2C/I3C clocks.
+
+config COMMON_CLK_MT8188_IPESYS
+ tristate "Clock driver for MediaTek MT8188 ipesys"
+ depends on COMMON_CLK_MT8188_IMGSYS
+ default COMMON_CLK_MT8188_IMGSYS
+ help
+ This driver supports MediaTek MT8188 ipesys clocks.
+
+config COMMON_CLK_MT8188_MFGCFG
+ tristate "Clock driver for MediaTek MT8188 mfgcfg"
+ depends on COMMON_CLK_MT8188
+ default COMMON_CLK_MT8188
+ help
+ This driver supports MediaTek MT8188 mfgcfg clocks.
+
+config COMMON_CLK_MT8188_VDECSYS
+ tristate "Clock driver for MediaTek MT8188 vdecsys"
+ depends on COMMON_CLK_MT8188_VPPSYS
+ default COMMON_CLK_MT8188_VPPSYS
+ help
+ This driver supports MediaTek MT8188 vdecsys and vdecsys_soc clocks.
+
+config COMMON_CLK_MT8188_VDOSYS
+ tristate "Clock driver for MediaTek MT8188 vdosys"
+ depends on COMMON_CLK_MT8188
+ default COMMON_CLK_MT8188
+ help
+ This driver supports MediaTek MT8188 vdosys0/1 (multimedia) clocks.
+
+config COMMON_CLK_MT8188_VENCSYS
+ tristate "Clock driver for MediaTek MT8188 vencsys"
+ depends on COMMON_CLK_MT8188_VPPSYS
+ default COMMON_CLK_MT8188_VPPSYS
+ help
+ This driver supports MediaTek MT8188 vencsys clocks.
+
+config COMMON_CLK_MT8188_VPPSYS
+ tristate "Clock driver for MediaTek MT8188 vppsys"
+ depends on COMMON_CLK_MT8188
+ default COMMON_CLK_MT8188
+ help
+ This driver supports MediaTek MT8188 vppsys0/1 clocks.
+
+config COMMON_CLK_MT8188_WPESYS
+ tristate "Clock driver for MediaTek MT8188 wpesys"
+ depends on COMMON_CLK_MT8188_IMGSYS
+ default COMMON_CLK_MT8188_IMGSYS
+ help
+ This driver supports MediaTek MT8188 Warp Engine clocks.
+
config COMMON_CLK_MT8192
- bool "Clock driver for MediaTek MT8192"
+ tristate "Clock driver for MediaTek MT8192"
depends on ARM64 || COMPILE_TEST
select COMMON_CLK_MEDIATEK
+ select COMMON_CLK_MEDIATEK_FHCTL
default ARM64
help
This driver supports MediaTek MT8192 basic clocks.
config COMMON_CLK_MT8192_AUDSYS
- bool "Clock driver for MediaTek MT8192 audsys"
+ tristate "Clock driver for MediaTek MT8192 audsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 audsys clocks.
config COMMON_CLK_MT8192_CAMSYS
- bool "Clock driver for MediaTek MT8192 camsys"
+ tristate "Clock driver for MediaTek MT8192 camsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 camsys and camsys_raw clocks.
config COMMON_CLK_MT8192_IMGSYS
- bool "Clock driver for MediaTek MT8192 imgsys"
+ tristate "Clock driver for MediaTek MT8192 imgsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 imgsys and imgsys2 clocks.
config COMMON_CLK_MT8192_IMP_IIC_WRAP
- bool "Clock driver for MediaTek MT8192 imp_iic_wrap"
+ tristate "Clock driver for MediaTek MT8192 imp_iic_wrap"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 imp_iic_wrap clocks.
config COMMON_CLK_MT8192_IPESYS
- bool "Clock driver for MediaTek MT8192 ipesys"
+ tristate "Clock driver for MediaTek MT8192 ipesys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 ipesys clocks.
config COMMON_CLK_MT8192_MDPSYS
- bool "Clock driver for MediaTek MT8192 mdpsys"
+ tristate "Clock driver for MediaTek MT8192 mdpsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 mdpsys clocks.
config COMMON_CLK_MT8192_MFGCFG
- bool "Clock driver for MediaTek MT8192 mfgcfg"
+ tristate "Clock driver for MediaTek MT8192 mfgcfg"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 mfgcfg clocks.
config COMMON_CLK_MT8192_MMSYS
- bool "Clock driver for MediaTek MT8192 mmsys"
+ tristate "Clock driver for MediaTek MT8192 mmsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 mmsys clocks.
config COMMON_CLK_MT8192_MSDC
- bool "Clock driver for MediaTek MT8192 msdc"
+ tristate "Clock driver for MediaTek MT8192 msdc"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 msdc and msdc_top clocks.
config COMMON_CLK_MT8192_SCP_ADSP
- bool "Clock driver for MediaTek MT8192 scp_adsp"
+ tristate "Clock driver for MediaTek MT8192 scp_adsp"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 scp_adsp clocks.
config COMMON_CLK_MT8192_VDECSYS
- bool "Clock driver for MediaTek MT8192 vdecsys"
+ tristate "Clock driver for MediaTek MT8192 vdecsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 vdecsys and vdecsys_soc clocks.
config COMMON_CLK_MT8192_VENCSYS
- bool "Clock driver for MediaTek MT8192 vencsys"
+ tristate "Clock driver for MediaTek MT8192 vencsys"
depends on COMMON_CLK_MT8192
help
This driver supports MediaTek MT8192 vencsys clocks.
@@ -649,10 +854,111 @@ config COMMON_CLK_MT8195
bool "Clock driver for MediaTek MT8195"
depends on ARM64 || COMPILE_TEST
select COMMON_CLK_MEDIATEK
+ select COMMON_CLK_MEDIATEK_FHCTL
default ARCH_MEDIATEK
help
This driver supports MediaTek MT8195 clocks.
+config COMMON_CLK_MT8195_APUSYS
+ tristate "Clock driver for MediaTek MT8195 apusys"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 AI Processor Unit System clocks.
+
+config COMMON_CLK_MT8195_AUDSYS
+ tristate "Clock driver for MediaTek MT8195 audsys"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 audsys clocks.
+
+config COMMON_CLK_MT8195_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8195 imp_iic_wrap"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 I2C/I3C clocks.
+
+config COMMON_CLK_MT8195_MFGCFG
+ tristate "Clock driver for MediaTek MT8195 mfgcfg"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 mfgcfg clocks.
+
+config COMMON_CLK_MT8195_MSDC
+ tristate "Clock driver for MediaTek MT8195 msdc"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 MMC and SD Controller's
+ msdc and msdc_top clocks.
+
+config COMMON_CLK_MT8195_SCP_ADSP
+ tristate "Clock driver for MediaTek MT8195 scp_adsp"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 System Companion Processor
+ Audio DSP clocks.
+
+config COMMON_CLK_MT8195_VDOSYS
+ tristate "Clock driver for MediaTek MT8195 vdosys"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 vdosys0/1 (multimedia) clocks.
+
+config COMMON_CLK_MT8195_VPPSYS
+ tristate "Clock driver for MediaTek MT8195 vppsys"
+ depends on COMMON_CLK_MT8195
+ default COMMON_CLK_MT8195
+ help
+ This driver supports MediaTek MT8195 vppsys0/1 clocks.
+
+config COMMON_CLK_MT8195_CAMSYS
+ tristate "Clock driver for MediaTek MT8195 camsys"
+ depends on COMMON_CLK_MT8195_VPPSYS
+ default COMMON_CLK_MT8195_VPPSYS
+ help
+ This driver supports MediaTek MT8195 camsys and camsys_raw clocks.
+
+config COMMON_CLK_MT8195_IMGSYS
+ tristate "Clock driver for MediaTek MT8195 imgsys"
+ depends on COMMON_CLK_MT8195_VPPSYS
+ default COMMON_CLK_MT8195_VPPSYS
+ help
+ This driver supports MediaTek MT8195 imgsys and imgsys2 clocks.
+
+config COMMON_CLK_MT8195_IPESYS
+ tristate "Clock driver for MediaTek MT8195 ipesys"
+ depends on COMMON_CLK_MT8195_IMGSYS
+ default COMMON_CLK_MT8195_IMGSYS
+ help
+ This driver supports MediaTek MT8195 ipesys clocks.
+
+config COMMON_CLK_MT8195_WPESYS
+ tristate "Clock driver for MediaTek MT8195 wpesys"
+ depends on COMMON_CLK_MT8195_IMGSYS
+ default COMMON_CLK_MT8195_IMGSYS
+ help
+ This driver supports MediaTek MT8195 Warp Engine clocks.
+
+config COMMON_CLK_MT8195_VDECSYS
+ tristate "Clock driver for MediaTek MT8195 vdecsys"
+ depends on COMMON_CLK_MT8195_VPPSYS
+ default COMMON_CLK_MT8195_VPPSYS
+ help
+ This driver supports MediaTek MT8195 vdecsys and vdecsys_soc clocks.
+
+config COMMON_CLK_MT8195_VENCSYS
+ tristate "Clock driver for MediaTek MT8195 vencsys"
+ depends on COMMON_CLK_MT8195_VPPSYS
+ default COMMON_CLK_MT8195_VPPSYS
+ help
+ This driver supports MediaTek MT8195 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
@@ -704,7 +1010,7 @@ config COMMON_CLK_MT8365_VENC
This driver supports MediaTek MT8365 venc clocks.
config COMMON_CLK_MT8516
- bool "Clock driver for MediaTek MT8516"
+ tristate "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
@@ -712,7 +1018,7 @@ config COMMON_CLK_MT8516
This driver supports MediaTek MT8516 clocks.
config COMMON_CLK_MT8516_AUDSYS
- bool "Clock driver for MediaTek MT8516 audsys"
+ tristate "Clock driver for MediaTek MT8516 audsys"
depends on COMMON_CLK_MT8516
help
This driver supports MediaTek MT8516 audsys clocks.
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index e24080fd6e7f..dbeaa5b41177 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
-obj-$(CONFIG_COMMON_CLK_MT2712) += clk-mt2712.o
+obj-$(CONFIG_COMMON_CLK_MT2712) += clk-mt2712-apmixedsys.o clk-mt2712.o
obj-$(CONFIG_COMMON_CLK_MT2712_BDPSYS) += clk-mt2712-bdp.o
obj-$(CONFIG_COMMON_CLK_MT2712_IMGSYS) += clk-mt2712-img.o
obj-$(CONFIG_COMMON_CLK_MT2712_JPGDECSYS) += clk-mt2712-jpgdec.o
@@ -46,27 +46,36 @@ obj-$(CONFIG_COMMON_CLK_MT2712_MFGCFG) += clk-mt2712-mfg.o
obj-$(CONFIG_COMMON_CLK_MT2712_MMSYS) += clk-mt2712-mm.o
obj-$(CONFIG_COMMON_CLK_MT2712_VDECSYS) += clk-mt2712-vdec.o
obj-$(CONFIG_COMMON_CLK_MT2712_VENCSYS) += clk-mt2712-venc.o
-obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622.o
+obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622-apmixedsys.o clk-mt7622.o \
+ clk-mt7622-infracfg.o
obj-$(CONFIG_COMMON_CLK_MT7622_ETHSYS) += clk-mt7622-eth.o
obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7981) += clk-mt7981-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MT7981) += clk-mt7981-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT7981) += clk-mt7981-infracfg.o
+obj-$(CONFIG_COMMON_CLK_MT7981_ETHSYS) += clk-mt7981-eth.o
obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-apmixed.o
obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-topckgen.o
obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-infracfg.o
obj-$(CONFIG_COMMON_CLK_MT7986_ETHSYS) += clk-mt7986-eth.o
-obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
-obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167.o
+obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135-apmixedsys.o clk-mt8135.o
+obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167-apmixedsys.o clk-mt8167.o
obj-$(CONFIG_COMMON_CLK_MT8167_AUDSYS) += clk-mt8167-aud.o
obj-$(CONFIG_COMMON_CLK_MT8167_IMGSYS) += clk-mt8167-img.o
obj-$(CONFIG_COMMON_CLK_MT8167_MFGCFG) += clk-mt8167-mfgcfg.o
obj-$(CONFIG_COMMON_CLK_MT8167_MMSYS) += clk-mt8167-mm.o
obj-$(CONFIG_COMMON_CLK_MT8167_VDECSYS) += clk-mt8167-vdec.o
-obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173-apmixedsys.o clk-mt8173-infracfg.o \
+ clk-mt8173-pericfg.o clk-mt8173-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT8173_IMGSYS) += clk-mt8173-img.o
obj-$(CONFIG_COMMON_CLK_MT8173_MMSYS) += clk-mt8173-mm.o
-obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+obj-$(CONFIG_COMMON_CLK_MT8173_VDECSYS) += clk-mt8173-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT8173_VENCSYS) += clk-mt8173-vencsys.o
+obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183-apmixedsys.o clk-mt8183.o
obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
obj-$(CONFIG_COMMON_CLK_MT8183_IMGSYS) += clk-mt8183-img.o
@@ -78,12 +87,33 @@ obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
-obj-$(CONFIG_COMMON_CLK_MT8186) += clk-mt8186-mcu.o clk-mt8186-topckgen.o clk-mt8186-infra_ao.o \
- clk-mt8186-apmixedsys.o clk-mt8186-imp_iic_wrap.o \
- clk-mt8186-mfg.o clk-mt8186-mm.o clk-mt8186-wpe.o \
- clk-mt8186-img.o clk-mt8186-vdec.o clk-mt8186-venc.o \
- clk-mt8186-cam.o clk-mt8186-mdp.o clk-mt8186-ipe.o
-obj-$(CONFIG_COMMON_CLK_MT8192) += clk-mt8192.o
+obj-$(CONFIG_COMMON_CLK_MT8186) += clk-mt8186-apmixedsys.o clk-mt8186-topckgen.o \
+ clk-mt8186-infra_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8186_CAMSYS) += clk-mt8186-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8186_IMGSYS) += clk-mt8186-img.o
+obj-$(CONFIG_COMMON_CLK_MT8186_IMP_IIC_WRAP) += clk-mt8186-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8186_IPESYS) += clk-mt8186-ipe.o
+obj-$(CONFIG_COMMON_CLK_MT8186_MCUSYS) += clk-mt8186-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8186_MDPSYS) += clk-mt8186-mdp.o
+obj-$(CONFIG_COMMON_CLK_MT8186_MFGCFG) += clk-mt8186-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8186_MMSYS) += clk-mt8186-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8186_VDECSYS) += clk-mt8186-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8186_VENCSYS) += clk-mt8186-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8186_WPESYS) += clk-mt8186-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8188) += clk-mt8188-apmixedsys.o clk-mt8188-topckgen.o \
+ clk-mt8188-peri_ao.o clk-mt8188-infra_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8188_ADSP_AUDIO26M) += clk-mt8188-adsp_audio26m.o
+obj-$(CONFIG_COMMON_CLK_MT8188_CAMSYS) += clk-mt8188-cam.o clk-mt8188-ccu.o
+obj-$(CONFIG_COMMON_CLK_MT8188_IMGSYS) += clk-mt8188-img.o
+obj-$(CONFIG_COMMON_CLK_MT8188_IMP_IIC_WRAP) += clk-mt8188-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8188_IPESYS) += clk-mt8188-ipe.o
+obj-$(CONFIG_COMMON_CLK_MT8188_MFGCFG) += clk-mt8188-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8188_VDECSYS) += clk-mt8188-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8188_VDOSYS) += clk-mt8188-vdo0.o clk-mt8188-vdo1.o
+obj-$(CONFIG_COMMON_CLK_MT8188_VENCSYS) += clk-mt8188-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8188_VPPSYS) += clk-mt8188-vpp0.o clk-mt8188-vpp1.o
+obj-$(CONFIG_COMMON_CLK_MT8188_WPESYS) += clk-mt8188-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8192) += clk-mt8192-apmixedsys.o clk-mt8192.o
obj-$(CONFIG_COMMON_CLK_MT8192_AUDSYS) += clk-mt8192-aud.o
obj-$(CONFIG_COMMON_CLK_MT8192_CAMSYS) += clk-mt8192-cam.o
obj-$(CONFIG_COMMON_CLK_MT8192_IMGSYS) += clk-mt8192-img.o
@@ -97,19 +127,25 @@ obj-$(CONFIG_COMMON_CLK_MT8192_SCP_ADSP) += clk-mt8192-scp_adsp.o
obj-$(CONFIG_COMMON_CLK_MT8192_VDECSYS) += clk-mt8192-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8192_VENCSYS) += clk-mt8192-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195) += clk-mt8195-apmixedsys.o clk-mt8195-topckgen.o \
- clk-mt8195-peri_ao.o clk-mt8195-infra_ao.o \
- clk-mt8195-cam.o clk-mt8195-ccu.o clk-mt8195-img.o \
- clk-mt8195-ipe.o clk-mt8195-mfg.o clk-mt8195-scp_adsp.o \
- clk-mt8195-vdec.o clk-mt8195-vdo0.o clk-mt8195-vdo1.o \
- clk-mt8195-venc.o clk-mt8195-vpp0.o clk-mt8195-vpp1.o \
- clk-mt8195-wpe.o clk-mt8195-imp_iic_wrap.o \
- clk-mt8195-apusys_pll.o
-obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365.o
+ clk-mt8195-peri_ao.o clk-mt8195-infra_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8195_APUSYS) += clk-mt8195-apusys_pll.o
+obj-$(CONFIG_COMMON_CLK_MT8195_CAMSYS) += clk-mt8195-cam.o clk-mt8195-ccu.o
+obj-$(CONFIG_COMMON_CLK_MT8195_IMGSYS) += clk-mt8195-img.o
+obj-$(CONFIG_COMMON_CLK_MT8195_IMP_IIC_WRAP) += clk-mt8195-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8195_IPESYS) += clk-mt8195-ipe.o
+obj-$(CONFIG_COMMON_CLK_MT8195_MFGCFG) += clk-mt8195-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8195_SCP_ADSP) += clk-mt8195-scp_adsp.o
+obj-$(CONFIG_COMMON_CLK_MT8195_VDECSYS) += clk-mt8195-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
+obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
+obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
obj-$(CONFIG_COMMON_CLK_MT8365_MFG) += clk-mt8365-mfg.o
obj-$(CONFIG_COMMON_CLK_MT8365_MMSYS) += clk-mt8365-mm.o
obj-$(CONFIG_COMMON_CLK_MT8365_VDEC) += clk-mt8365-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8365_VENC) += clk-mt8365-venc.o
-obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
+obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516-apmixedsys.o clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 25618eff6f2a..da05f06192c0 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -58,7 +58,7 @@ static const struct clk_ops clk_cpumux_ops = {
};
static struct clk_hw *
-mtk_clk_register_cpumux(const struct mtk_composite *mux,
+mtk_clk_register_cpumux(struct device *dev, const struct mtk_composite *mux,
struct regmap *regmap)
{
struct mtk_clk_cpumux *cpumux;
@@ -81,7 +81,7 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux,
cpumux->regmap = regmap;
cpumux->hw.init = &init;
- ret = clk_hw_register(NULL, &cpumux->hw);
+ ret = clk_hw_register(dev, &cpumux->hw);
if (ret) {
kfree(cpumux);
return ERR_PTR(ret);
@@ -102,7 +102,7 @@ static void mtk_clk_unregister_cpumux(struct clk_hw *hw)
kfree(cpumux);
}
-int mtk_clk_register_cpumuxes(struct device_node *node,
+int mtk_clk_register_cpumuxes(struct device *dev, struct device_node *node,
const struct mtk_composite *clks, int num,
struct clk_hw_onecell_data *clk_data)
{
@@ -125,7 +125,7 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
continue;
}
- hw = mtk_clk_register_cpumux(mux, regmap);
+ hw = mtk_clk_register_cpumux(dev, mux, regmap);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
hw);
diff --git a/drivers/clk/mediatek/clk-cpumux.h b/drivers/clk/mediatek/clk-cpumux.h
index 325adbef25d1..64e45c63b4a0 100644
--- a/drivers/clk/mediatek/clk-cpumux.h
+++ b/drivers/clk/mediatek/clk-cpumux.h
@@ -11,7 +11,7 @@ struct clk_hw_onecell_data;
struct device_node;
struct mtk_composite;
-int mtk_clk_register_cpumuxes(struct device_node *node,
+int mtk_clk_register_cpumuxes(struct device *dev, struct device_node *node,
const struct mtk_composite *clks, int num,
struct clk_hw_onecell_data *clk_data);
diff --git a/drivers/clk/mediatek/clk-fhctl.c b/drivers/clk/mediatek/clk-fhctl.c
index 4f271acef5fe..33b6ad8fdc2e 100644
--- a/drivers/clk/mediatek/clk-fhctl.c
+++ b/drivers/clk/mediatek/clk-fhctl.c
@@ -14,7 +14,20 @@
#define PERCENT_TO_DDSLMT(dds, percent_m10) \
((((dds) * (percent_m10)) >> 5) / 100)
-static const struct fhctl_offset fhctl_offset = {
+static const struct fhctl_offset fhctl_offset_v1 = {
+ .offset_hp_en = 0x0,
+ .offset_clk_con = 0x4,
+ .offset_rst_con = 0x8,
+ .offset_slope0 = 0xc,
+ .offset_slope1 = 0x10,
+ .offset_cfg = 0x0,
+ .offset_updnlmt = 0x4,
+ .offset_dds = 0x8,
+ .offset_dvfs = 0xc,
+ .offset_mon = 0x10,
+};
+
+static const struct fhctl_offset fhctl_offset_v2 = {
.offset_hp_en = 0x0,
.offset_clk_con = 0x8,
.offset_rst_con = 0xc,
@@ -27,9 +40,16 @@ static const struct fhctl_offset fhctl_offset = {
.offset_mon = 0x10,
};
-const struct fhctl_offset *fhctl_get_offset_table(void)
+const struct fhctl_offset *fhctl_get_offset_table(enum fhctl_variant v)
{
- return &fhctl_offset;
+ switch (v) {
+ case FHCTL_PLLFH_V1:
+ return &fhctl_offset_v1;
+ case FHCTL_PLLFH_V2:
+ return &fhctl_offset_v2;
+ default:
+ return ERR_PTR(-EINVAL);
+ };
}
static void dump_hw(struct mtk_clk_pll *pll, struct fh_pll_regs *regs,
diff --git a/drivers/clk/mediatek/clk-fhctl.h b/drivers/clk/mediatek/clk-fhctl.h
index 51275febf086..bfa6d281a3ee 100644
--- a/drivers/clk/mediatek/clk-fhctl.h
+++ b/drivers/clk/mediatek/clk-fhctl.h
@@ -7,6 +7,13 @@
#ifndef __CLK_FHCTL_H
#define __CLK_FHCTL_H
+#include "clk-pllfh.h"
+
+enum fhctl_variant {
+ FHCTL_PLLFH_V1,
+ FHCTL_PLLFH_V2,
+};
+
struct fhctl_offset {
u32 offset_hp_en;
u32 offset_clk_con;
@@ -19,7 +26,7 @@ struct fhctl_offset {
u32 offset_dvfs;
u32 offset_mon;
};
-const struct fhctl_offset *fhctl_get_offset_table(void);
+const struct fhctl_offset *fhctl_get_offset_table(enum fhctl_variant v);
const struct fh_operation *fhctl_get_ops(void);
void fhctl_hw_init(struct mtk_fh *fh);
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 0c867136e49d..67d9e741c5e7 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -152,12 +152,12 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(const char *name,
+static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
const char *parent_name,
struct regmap *regmap, int set_ofs,
int clr_ofs, int sta_ofs, u8 bit,
const struct clk_ops *ops,
- unsigned long flags, struct device *dev)
+ unsigned long flags)
{
struct mtk_clk_gate *cg;
int ret;
@@ -202,10 +202,9 @@ static void mtk_clk_unregister_gate(struct clk_hw *hw)
kfree(cg);
}
-int mtk_clk_register_gates_with_dev(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data,
- struct device *dev)
+int mtk_clk_register_gates(struct device *dev, struct device_node *node,
+ const struct mtk_gate *clks, int num,
+ struct clk_hw_onecell_data *clk_data)
{
int i;
struct clk_hw *hw;
@@ -229,13 +228,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(gate->name, gate->parent_name,
+ hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
regmap,
gate->regs->set_ofs,
gate->regs->clr_ofs,
gate->regs->sta_ofs,
gate->shift, gate->ops,
- gate->flags, dev);
+ gate->flags);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
@@ -261,14 +260,6 @@ err:
return PTR_ERR(hw);
}
-EXPORT_SYMBOL_GPL(mtk_clk_register_gates_with_dev);
-
-int mtk_clk_register_gates(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data)
-{
- return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL);
-}
EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index d9897ef53528..1a46b4c56fc5 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -50,15 +50,10 @@ struct mtk_gate {
#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops) \
GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
-int mtk_clk_register_gates(struct device_node *node,
+int mtk_clk_register_gates(struct device *dev, struct device_node *node,
const struct mtk_gate *clks, int num,
struct clk_hw_onecell_data *clk_data);
-int mtk_clk_register_gates_with_dev(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data,
- struct device *dev);
-
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
struct clk_hw_onecell_data *clk_data);
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 6ba398eb7df9..5cd343b98685 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -15,41 +15,17 @@
#include <dt-bindings/clock/mt2701-clk.h>
-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate_regs audio0_cg_regs = {
.set_ofs = 0x0,
@@ -76,6 +52,7 @@ static const struct mtk_gate_regs audio3_cg_regs = {
};
static const struct mtk_gate audio_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
@@ -138,29 +115,28 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO3(CLK_AUD_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
};
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
+};
+
static const struct of_device_id of_match_clk_mt2701_aud[] = {
- { .compatible = "mediatek,mt2701-audsys", },
- {}
+ { .compatible = "mediatek,mt2701-audsys", .data = &audio_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_aud);
static int clk_mt2701_aud_probe(struct platform_device *pdev)
{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
int r;
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR);
-
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ r = mtk_clk_simple_probe(pdev);
if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
pdev->name, r);
- goto err_clk_provider;
+ return r;
}
r = devm_of_platform_populate(&pdev->dev);
@@ -170,17 +146,23 @@ static int clk_mt2701_aud_probe(struct platform_device *pdev)
return 0;
err_plat_populate:
- of_clk_del_provider(node);
-err_clk_provider:
+ mtk_clk_simple_remove(pdev);
return r;
}
+static int clk_mt2701_aud_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ return mtk_clk_simple_remove(pdev);
+}
+
static struct platform_driver clk_mt2701_aud_drv = {
.probe = clk_mt2701_aud_probe,
+ .remove = clk_mt2701_aud_remove,
.driver = {
.name = "clk-mt2701-aud",
.of_match_table = of_match_clk_mt2701_aud,
},
};
-
-builtin_platform_driver(clk_mt2701_aud_drv);
+module_platform_driver(clk_mt2701_aud_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 435ed4819d56..4c5b70d48df9 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
.sta_ofs = 0x0110,
};
-#define GATE_BDP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_BDP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-#define GATE_BDP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_BDP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
@@ -107,6 +95,7 @@ static const struct of_device_id of_match_clk_mt2701_bdp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_bdp);
static struct platform_driver clk_mt2701_bdp_drv = {
.probe = mtk_clk_simple_probe,
@@ -116,5 +105,5 @@ static struct platform_driver clk_mt2701_bdp_drv = {
.of_match_table = of_match_clk_mt2701_bdp,
},
};
-
-builtin_platform_driver(clk_mt2701_bdp_drv);
+module_platform_driver(clk_mt2701_bdp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index edf1e2ed2b59..9a1fb0c93964 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -16,16 +16,11 @@ static const struct mtk_gate_regs eth_cg_regs = {
.sta_ofs = 0x0030,
};
-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate eth_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "eth_dummy"),
GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
GATE_ETH(CLK_ETHSYS_ESW, "esw_clk", "ethpll_500m_ck", 6),
GATE_ETH(CLK_ETHSYS_GP2, "gp2_clk", "trgpll", 7),
@@ -44,39 +39,25 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static const struct of_device_id of_match_clk_mt2701_eth[] = {
- { .compatible = "mediatek,mt2701-ethsys", },
- {}
+static const struct mtk_clk_desc eth_desc = {
+ .clks = eth_clks,
+ .num_clks = ARRAY_SIZE(eth_clks),
+ .rst_desc = &clk_rst_desc,
};
-static int clk_mt2701_eth_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_ETHSYS_NR);
-
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_eth[] = {
+ { .compatible = "mediatek,mt2701-ethsys", .data = &eth_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_eth);
static struct platform_driver clk_mt2701_eth_drv = {
- .probe = clk_mt2701_eth_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-eth",
.of_match_table = of_match_clk_mt2701_eth,
},
};
-
-builtin_platform_driver(clk_mt2701_eth_drv);
+module_platform_driver(clk_mt2701_eth_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 1458109d99d9..c0006861a317 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -16,14 +16,8 @@
#include <dt-bindings/clock/mt2701-clk.h>
-#define GATE_G3D(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &g3d_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_G3D(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &g3d_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate_regs g3d_cg_regs = {
.sta_ofs = 0x0,
@@ -32,6 +26,7 @@ static const struct mtk_gate_regs g3d_cg_regs = {
};
static const struct mtk_gate g3d_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "g3d_dummy"),
GATE_G3D(CLK_G3DSYS_CORE, "g3d_core", "mfg_sel", 0),
};
@@ -43,61 +38,25 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_G3DSYS_NR);
-
- mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
+static const struct mtk_clk_desc g3d_desc = {
+ .clks = g3d_clks,
+ .num_clks = ARRAY_SIZE(g3d_clks),
+ .rst_desc = &clk_rst_desc,
+};
static const struct of_device_id of_match_clk_mt2701_g3d[] = {
- {
- .compatible = "mediatek,mt2701-g3dsys",
- .data = clk_mt2701_g3dsys_init,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt2701-g3dsys", .data = &g3d_desc },
+ { /* sentinel */ }
};
-
-static int clk_mt2701_g3d_probe(struct platform_device *pdev)
-{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_g3d);
static struct platform_driver clk_mt2701_g3d_drv = {
- .probe = clk_mt2701_g3d_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-g3d",
.of_match_table = of_match_clk_mt2701_g3d,
},
};
-
-builtin_platform_driver(clk_mt2701_g3d_drv);
+module_platform_driver(clk_mt2701_g3d_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 434cbbe8c037..ff7c0b3228e4 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -16,16 +16,11 @@ static const struct mtk_gate_regs hif_cg_regs = {
.sta_ofs = 0x0030,
};
-#define GATE_HIF(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &hif_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_HIF(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &hif_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate hif_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "hif_dummy"),
GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
GATE_HIF(CLK_HIFSYS_USB1PHY, "usb1_phy_clk", "ethpll_500m_ck", 22),
GATE_HIF(CLK_HIFSYS_PCIE0, "pcie0_clk", "ethpll_500m_ck", 24),
@@ -41,41 +36,25 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static const struct of_device_id of_match_clk_mt2701_hif[] = {
- { .compatible = "mediatek,mt2701-hifsys", },
- {}
+static const struct mtk_clk_desc hif_desc = {
+ .clks = hif_clks,
+ .num_clks = ARRAY_SIZE(hif_clks),
+ .rst_desc = &clk_rst_desc,
};
-static int clk_mt2701_hif_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_HIFSYS_NR);
-
- mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r) {
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
- return r;
- }
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return 0;
-}
+static const struct of_device_id of_match_clk_mt2701_hif[] = {
+ { .compatible = "mediatek,mt2701-hifsys", .data = &hif_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_hif);
static struct platform_driver clk_mt2701_hif_drv = {
- .probe = clk_mt2701_hif_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-hif",
.of_match_table = of_match_clk_mt2701_hif,
},
};
-
-builtin_platform_driver(clk_mt2701_hif_drv);
+module_platform_driver(clk_mt2701_hif_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 7e53deb7f990..baa1194eb01e 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0000,
};
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
@@ -49,6 +43,7 @@ static const struct of_device_id of_match_clk_mt2701_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_img);
static struct platform_driver clk_mt2701_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -58,5 +53,5 @@ static struct platform_driver clk_mt2701_img_drv = {
.of_match_table = of_match_clk_mt2701_img,
},
};
-
-builtin_platform_driver(clk_mt2701_img_drv);
+module_platform_driver(clk_mt2701_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 9ea7abad99d2..c62c56fd2b7e 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs disp1_cg_regs = {
.sta_ofs = 0x0110,
};
-#define GATE_DISP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &disp0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_DISP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &disp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_DISP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &disp1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_DISP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
@@ -79,32 +67,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
};
-static int clk_mt2701_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return r;
-}
+static const struct platform_device_id clk_mt2701_mm_id_table[] = {
+ { .name = "clk-mt2701-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt2701_mm_id_table);
static struct platform_driver clk_mt2701_mm_drv = {
- .probe = clk_mt2701_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2701-mm",
},
+ .id_table = clk_mt2701_mm_id_table,
};
-
-builtin_platform_driver(clk_mt2701_mm_drv);
+module_platform_driver(clk_mt2701_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index d3089da0ab62..b7f97bc51c16 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x0008,
};
-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
@@ -60,6 +48,7 @@ static const struct of_device_id of_match_clk_mt2701_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_vdec);
static struct platform_driver clk_mt2701_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -69,5 +58,5 @@ static struct platform_driver clk_mt2701_vdec_drv = {
.of_match_table = of_match_clk_mt2701_vdec,
},
};
-
-builtin_platform_driver(clk_mt2701_vdec_drv);
+module_platform_driver(clk_mt2701_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 9b442af37e67..4a154da8a543 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -636,14 +636,8 @@ static const struct mtk_gate_regs top_aud_cg_regs = {
.sta_ofs = 0x012C,
};
-#define GATE_TOP_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top_aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top_aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate top_clks[] = {
GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
@@ -683,14 +677,15 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt2701_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt2701_clk_lock, clk_data);
- mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ mtk_clk_register_dividers(&pdev->dev, top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt2701_clk_lock, clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -701,14 +696,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x0048,
};
-#define GATE_ICG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate infra_clks[] = {
GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
@@ -769,7 +758,7 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
- mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
+ mtk_clk_register_cpumuxes(NULL, node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
infra_clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
@@ -795,8 +784,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
}
}
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- infra_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), infra_clk_data);
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
@@ -822,23 +811,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
.sta_ofs = 0x001c,
};
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
@@ -918,11 +895,12 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
- mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
- &mt2701_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, peri_muxs,
+ ARRAY_SIZE(peri_muxs), base,
+ &mt2701_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -1021,6 +999,7 @@ static const struct of_device_id of_match_clk_mt2701[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2701);
static int clk_mt2701_probe(struct platform_device *pdev)
{
@@ -1054,3 +1033,4 @@ static int __init clk_mt2701_init(void)
}
arch_initcall(clk_mt2701_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
new file mode 100644
index 000000000000..9d2fcda285fb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Weiyi Lu <weiyi.lu@mediatek.com>
+ * Copyright (c) 2023 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-pll.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+#define MT2712_PLL_FMAX (3000UL * MHZ)
+
+#define CON0_MT2712_RST_BAR BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT2712_RST_BAR, \
+ .fmax = MT2712_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, NULL)
+
+static const struct mtk_pll_div_table armca35pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1202500000 },
+ { .div = 2, .freq = 500500000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_div_table armca72pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 994500000 },
+ { .div = 2, .freq = 520000000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1001000000 },
+ { .div = 2, .freq = 601250000 },
+ { .div = 3, .freq = 250250000 },
+ { .div = 4, .freq = 125125000 },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000100,
+ HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000100,
+ HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000100,
+ 0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000100,
+ 0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000100,
+ 0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000100,
+ 0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000100,
+ 0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
+ PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000100,
+ 0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000100,
+ 0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000100,
+ 0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000100,
+ 0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000100,
+ 0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0, mmpll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000100,
+ HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0, armca35pll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000100,
+ 0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0, armca72pll_div_table),
+ PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000100,
+ 0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
+};
+
+static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev, "Cannot register clock provider: %d\n", r);
+ goto unregister_plls;
+ }
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt2712_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_clk_mt2712_apmixed[] = {
+ { .compatible = "mediatek,mt2712-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_apmixed);
+
+static struct platform_driver clk_mt2712_apmixed_drv = {
+ .probe = clk_mt2712_apmixed_probe,
+ .remove = clk_mt2712_apmixed_remove,
+ .driver = {
+ .name = "clk-mt2712-apmixed",
+ .of_match_table = of_match_clk_mt2712_apmixed,
+ },
+};
+module_platform_driver(clk_mt2712_apmixed_drv)
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 684d03e9f6de..f78e01819316 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs bdp_cg_regs = {
.sta_ofs = 0x100,
};
-#define GATE_BDP(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_BDP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate bdp_clks[] = {
GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
@@ -71,6 +65,7 @@ static const struct of_device_id of_match_clk_mt2712_bdp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_bdp);
static struct platform_driver clk_mt2712_bdp_drv = {
.probe = mtk_clk_simple_probe,
@@ -80,5 +75,5 @@ static struct platform_driver clk_mt2712_bdp_drv = {
.of_match_table = of_match_clk_mt2712_bdp,
},
};
-
-builtin_platform_driver(clk_mt2712_bdp_drv);
+module_platform_driver(clk_mt2712_bdp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 335049cdc856..fbe7084886a0 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
@@ -49,6 +43,7 @@ static const struct of_device_id of_match_clk_mt2712_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_img);
static struct platform_driver clk_mt2712_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -58,5 +53,5 @@ static struct platform_driver clk_mt2712_img_drv = {
.of_match_table = of_match_clk_mt2712_img,
},
};
-
-builtin_platform_driver(clk_mt2712_img_drv);
+module_platform_driver(clk_mt2712_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 07ba7c5e80af..7e8c2ebcdee0 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs jpgdec_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_JPGDEC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &jpgdec_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_JPGDEC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &jpgdec_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate jpgdec_clks[] = {
GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
@@ -45,6 +39,7 @@ static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_jpgdec);
static struct platform_driver clk_mt2712_jpgdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -54,5 +49,5 @@ static struct platform_driver clk_mt2712_jpgdec_drv = {
.of_match_table = of_match_clk_mt2712_jpgdec,
},
};
-
-builtin_platform_driver(clk_mt2712_jpgdec_drv);
+module_platform_driver(clk_mt2712_jpgdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index 42f8cf3ecf4c..932ea449d299 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mfg_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
@@ -44,6 +38,7 @@ static const struct of_device_id of_match_clk_mt2712_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_mfg);
static struct platform_driver clk_mt2712_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -53,5 +48,5 @@ static struct platform_driver clk_mt2712_mfg_drv = {
.of_match_table = of_match_clk_mt2712_mfg,
},
};
-
-builtin_platform_driver(clk_mt2712_mfg_drv);
+module_platform_driver(clk_mt2712_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 7d44b09b8a0a..204a3eae08dc 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -30,32 +30,14 @@ static const struct mtk_gate_regs mm2_cg_regs = {
.sta_ofs = 0x220,
};
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_MM2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
/* MM0 */
@@ -126,32 +108,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM2(CLK_MM_DSI3_DIGITAL, "mm_dsi3_digital", "dsi1_lntc", 6),
};
-static int clk_mt2712_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return r;
-}
+static const struct platform_device_id clk_mt2712_mm_id_table[] = {
+ { .name = "clk-mt2712-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt2712_mm_id_table);
static struct platform_driver clk_mt2712_mm_drv = {
- .probe = clk_mt2712_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2712-mm",
},
+ .id_table = clk_mt2712_mm_id_table,
};
-
-builtin_platform_driver(clk_mt2712_mm_drv);
+module_platform_driver(clk_mt2712_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index 6296ed5c5b55..2fc1f82ebf5d 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x8,
};
-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
/* VDEC0 */
@@ -63,6 +51,7 @@ static const struct of_device_id of_match_clk_mt2712_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_vdec);
static struct platform_driver clk_mt2712_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -72,5 +61,5 @@ static struct platform_driver clk_mt2712_vdec_drv = {
.of_match_table = of_match_clk_mt2712_vdec,
},
};
-
-builtin_platform_driver(clk_mt2712_vdec_drv);
+module_platform_driver(clk_mt2712_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index b9bfc35de629..6d053a00cf95 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
@@ -46,6 +40,7 @@ static const struct of_device_id of_match_clk_mt2712_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_venc);
static struct platform_driver clk_mt2712_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -55,5 +50,5 @@ static struct platform_driver clk_mt2712_venc_drv = {
.of_match_table = of_match_clk_mt2712_venc,
},
};
-
-builtin_platform_driver(clk_mt2712_venc_drv);
+module_platform_driver(clk_mt2712_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 56980dd6c2ea..74c529f6163d 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include "clk-gate.h"
-#include "clk-pll.h"
#include "clk-mtk.h"
#include <dt-bindings/clock/mt2712-clk.h>
@@ -36,188 +35,96 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_CVBSPLL, "cvbspll", NULL, 108000000),
};
-static const struct mtk_fixed_factor top_early_divs[] = {
- FACTOR(CLK_TOP_SYS_26M, "sys_26m", "clk26m", 1,
- 1),
- FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "sys_26m", 1,
- 2),
-};
-
static const struct mtk_fixed_factor top_divs[] = {
- FACTOR(CLK_TOP_ARMCA35PLL, "armca35pll_ck", "armca35pll", 1,
- 1),
- FACTOR(CLK_TOP_ARMCA35PLL_600M, "armca35pll_600m", "armca35pll_ck", 1,
- 2),
- FACTOR(CLK_TOP_ARMCA35PLL_400M, "armca35pll_400m", "armca35pll_ck", 1,
- 3),
- FACTOR(CLK_TOP_ARMCA72PLL, "armca72pll_ck", "armca72pll", 1,
- 1),
- FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1,
- 1),
- FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
- 2),
- FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1,
- 2),
- FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1,
- 4),
- FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1,
- 8),
- FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1,
- 16),
- FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "syspll_ck", 1,
- 3),
- FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1,
- 2),
- FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1,
- 4),
- FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "syspll_ck", 1,
- 5),
- FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1,
- 2),
- FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1,
- 4),
- FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "syspll_ck", 1,
- 7),
- FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1,
- 2),
- FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1,
- 4),
- FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1,
- 1),
- FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_ck", 1,
- 7),
- FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_ck", 1,
- 26),
- FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll_ck", 1,
- 52),
- FACTOR(CLK_TOP_UNIVPLL_D104, "univpll_d104", "univpll_ck", 1,
- 104),
- FACTOR(CLK_TOP_UNIVPLL_D208, "univpll_d208", "univpll_ck", 1,
- 208),
- FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1,
- 2),
- FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1,
- 4),
- FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1,
- 8),
- FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_ck", 1,
- 3),
- FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1,
- 2),
- FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1,
- 4),
- FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1,
- 8),
- FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_ck", 1,
- 5),
- FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1,
- 2),
- FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1,
- 4),
- FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1,
- 8),
- FACTOR(CLK_TOP_F_MP0_PLL1, "f_mp0_pll1_ck", "univpll_d2", 1,
- 1),
- FACTOR(CLK_TOP_F_MP0_PLL2, "f_mp0_pll2_ck", "univpll1_d2", 1,
- 1),
- FACTOR(CLK_TOP_F_BIG_PLL1, "f_big_pll1_ck", "univpll_d2", 1,
- 1),
- FACTOR(CLK_TOP_F_BIG_PLL2, "f_big_pll2_ck", "univpll1_d2", 1,
- 1),
- FACTOR(CLK_TOP_F_BUS_PLL1, "f_bus_pll1_ck", "univpll_d2", 1,
- 1),
- FACTOR(CLK_TOP_F_BUS_PLL2, "f_bus_pll2_ck", "univpll1_d2", 1,
- 1),
- FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1,
- 1),
- FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1,
- 2),
- FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1,
- 4),
- FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1,
- 8),
- FACTOR(CLK_TOP_APLL1_D16, "apll1_d16", "apll1_ck", 1,
- 16),
- FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1,
- 1),
- FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1,
- 2),
- FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1,
- 4),
- FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1,
- 8),
- FACTOR(CLK_TOP_APLL2_D16, "apll2_d16", "apll2_ck", 1,
- 16),
- FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1,
- 1),
- FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll_ck", 1,
- 2),
- FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll_ck", 1,
- 4),
- FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll_ck", 1,
- 8),
- FACTOR(CLK_TOP_LVDSPLL2, "lvdspll2_ck", "lvdspll2", 1,
- 1),
- FACTOR(CLK_TOP_LVDSPLL2_D2, "lvdspll2_d2", "lvdspll2_ck", 1,
- 2),
- FACTOR(CLK_TOP_LVDSPLL2_D4, "lvdspll2_d4", "lvdspll2_ck", 1,
- 4),
- FACTOR(CLK_TOP_LVDSPLL2_D8, "lvdspll2_d8", "lvdspll2_ck", 1,
- 8),
- FACTOR(CLK_TOP_ETHERPLL_125M, "etherpll_125m", "etherpll", 1,
- 1),
- FACTOR(CLK_TOP_ETHERPLL_50M, "etherpll_50m", "etherpll", 1,
- 1),
- FACTOR(CLK_TOP_CVBS, "cvbs", "cvbspll", 1,
- 1),
- FACTOR(CLK_TOP_CVBS_D2, "cvbs_d2", "cvbs", 1,
- 2),
- FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1,
- 1),
- FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1,
- 1),
- FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1,
- 1),
- FACTOR(CLK_TOP_VCODECPLL_D2, "vcodecpll_d2", "vcodecpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1,
- 1),
- FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1,
- 4),
- FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1,
- 8),
- FACTOR(CLK_TOP_TVDPLL_429M, "tvdpll_429m", "tvdpll", 1,
- 1),
- FACTOR(CLK_TOP_TVDPLL_429M_D2, "tvdpll_429m_d2", "tvdpll_429m", 1,
- 2),
- FACTOR(CLK_TOP_TVDPLL_429M_D4, "tvdpll_429m_d4", "tvdpll_429m", 1,
- 4),
- FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1,
- 1),
- FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1,
- 2),
- FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1,
- 4),
- FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1,
- 1),
- FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2_ck", 1,
- 2),
- FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2_ck", 1,
- 4),
- FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
- 4),
- FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1,
- 3),
- FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2_ck", 1,
- 3),
+ FACTOR(CLK_TOP_SYS_26M, "sys_26m", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "sys_26m", 1, 2),
+ FACTOR(CLK_TOP_ARMCA35PLL, "armca35pll_ck", "armca35pll", 1, 1),
+ FACTOR(CLK_TOP_ARMCA35PLL_600M, "armca35pll_600m", "armca35pll_ck", 1, 2),
+ FACTOR(CLK_TOP_ARMCA35PLL_400M, "armca35pll_400m", "armca35pll_ck", 1, 3),
+ FACTOR(CLK_TOP_ARMCA72PLL, "armca72pll_ck", "armca72pll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "syspll_ck", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "syspll_ck", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "syspll_ck", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_ck", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_ck", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll_ck", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_D104, "univpll_d104", "univpll_ck", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_D208, "univpll_d208", "univpll_ck", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_ck", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_ck", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1, 8),
+ FACTOR(CLK_TOP_F_MP0_PLL1, "f_mp0_pll1_ck", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_F_MP0_PLL2, "f_mp0_pll2_ck", "univpll1_d2", 1, 1),
+ FACTOR(CLK_TOP_F_BIG_PLL1, "f_big_pll1_ck", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_F_BIG_PLL2, "f_big_pll2_ck", "univpll1_d2", 1, 1),
+ FACTOR(CLK_TOP_F_BUS_PLL1, "f_bus_pll1_ck", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_F_BUS_PLL2, "f_bus_pll2_ck", "univpll1_d2", 1, 1),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8),
+ FACTOR(CLK_TOP_APLL1_D16, "apll1_d16", "apll1_ck", 1, 16),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1, 8),
+ FACTOR(CLK_TOP_APLL2_D16, "apll2_d16", "apll2_ck", 1, 16),
+ FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1, 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll_ck", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll_ck", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll_ck", 1, 8),
+ FACTOR(CLK_TOP_LVDSPLL2, "lvdspll2_ck", "lvdspll2", 1, 1),
+ FACTOR(CLK_TOP_LVDSPLL2_D2, "lvdspll2_d2", "lvdspll2_ck", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL2_D4, "lvdspll2_d4", "lvdspll2_ck", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL2_D8, "lvdspll2_d8", "lvdspll2_ck", 1, 8),
+ FACTOR(CLK_TOP_ETHERPLL_125M, "etherpll_125m", "etherpll", 1, 1),
+ FACTOR(CLK_TOP_ETHERPLL_50M, "etherpll_50m", "etherpll", 1, 1),
+ FACTOR(CLK_TOP_CVBS, "cvbs", "cvbspll", 1, 1),
+ FACTOR(CLK_TOP_CVBS_D2, "cvbs_d2", "cvbs", 1, 2),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1, 2),
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll_ck", 1, 2),
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 1),
+ FACTOR(CLK_TOP_VCODECPLL_D2, "vcodecpll_d2", "vcodecpll_ck", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_429M, "tvdpll_429m", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_429M_D2, "tvdpll_429m_d2", "tvdpll_429m", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_429M_D4, "tvdpll_429m_d4", "tvdpll_429m", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2_ck", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2_ck", 1, 4),
+ FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1, 3),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2_ck", 1, 3),
};
static const char * const axi_parents[] = {
@@ -737,169 +644,118 @@ static const char * const audull_vtx_parents[] = {
static struct mtk_composite top_muxes[] = {
/* CLK_CFG_0 */
MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x040, 0, 3,
- 7, CLK_IS_CRITICAL),
+ 7, CLK_IS_CRITICAL),
MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040, 8, 1,
- 15, CLK_IS_CRITICAL),
- MUX_GATE(CLK_TOP_MM_SEL, "mm_sel",
- mm_parents, 0x040, 24, 3, 31),
+ 15, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x040, 24, 3, 31),
/* CLK_CFG_1 */
- MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel",
- pwm_parents, 0x050, 0, 2, 7),
- MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel",
- vdec_parents, 0x050, 8, 4, 15),
- MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel",
- venc_parents, 0x050, 16, 4, 23),
- MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel",
- mfg_parents, 0x050, 24, 4, 31),
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x050, 24, 4, 31),
/* CLK_CFG_2 */
- MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel",
- camtg_parents, 0x060, 0, 4, 7),
- MUX_GATE(CLK_TOP_UART_SEL, "uart_sel",
- uart_parents, 0x060, 8, 1, 15),
- MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel",
- spi_parents, 0x060, 16, 3, 23),
- MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel",
- usb20_parents, 0x060, 24, 2, 31),
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x060, 0, 4, 7),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x060, 8, 1, 15),
+ MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x060, 24, 2, 31),
/* CLK_CFG_3 */
- MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel",
- usb30_parents, 0x070, 0, 2, 7),
- MUX_GATE(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc50_0_h_sel",
- msdc50_0_h_parents, 0x070, 8, 3, 15),
- MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
- msdc50_0_parents, 0x070, 16, 4, 23),
- MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
- msdc30_1_parents, 0x070, 24, 3, 31),
+ MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x070, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc50_0_h_sel", msdc50_0_h_parents,
+ 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents,
+ 0x070, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents,
+ 0x070, 24, 3, 31),
/* CLK_CFG_4 */
- MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
- msdc30_1_parents, 0x080, 0, 3, 7),
- MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel",
- msdc30_3_parents, 0x080, 8, 4, 15),
- MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel",
- audio_parents, 0x080, 16, 2, 23),
- MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
- aud_intbus_parents, 0x080, 24, 3, 31),
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_1_parents,
+ 0x080, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents,
+ 0x080, 8, 4, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x080, 16, 2, 23),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x080, 24, 3, 31),
/* CLK_CFG_5 */
- MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel",
- pmicspi_parents, 0x090, 0, 3, 7),
- MUX_GATE(CLK_TOP_DPILVDS1_SEL, "dpilvds1_sel",
- dpilvds1_parents, 0x090, 8, 3, 15),
- MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel",
- atb_parents, 0x090, 16, 2, 23),
- MUX_GATE(CLK_TOP_NR_SEL, "nr_sel",
- nr_parents, 0x090, 24, 3, 31),
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x090, 0, 3, 7),
+ MUX_GATE(CLK_TOP_DPILVDS1_SEL, "dpilvds1_sel", dpilvds1_parents,
+ 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_NR_SEL, "nr_sel", nr_parents, 0x090, 24, 3, 31),
/* CLK_CFG_6 */
- MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel",
- nfi2x_parents, 0x0a0, 0, 4, 7),
- MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel",
- irda_parents, 0x0a0, 8, 2, 15),
- MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel",
- cci400_parents, 0x0a0, 16, 3, 23),
- MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel",
- aud_1_parents, 0x0a0, 24, 2, 31),
+ MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel", nfi2x_parents, 0x0a0, 0, 4, 7),
+ MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x0a0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x0a0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x0a0, 24, 2, 31),
/* CLK_CFG_7 */
- MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel",
- aud_2_parents, 0x0b0, 0, 2, 7),
- MUX_GATE(CLK_TOP_MEM_MFG_IN_AS_SEL, "mem_mfg_sel",
- mem_mfg_parents, 0x0b0, 8, 2, 15),
- MUX_GATE(CLK_TOP_AXI_MFG_IN_AS_SEL, "axi_mfg_sel",
- axi_mfg_parents, 0x0b0, 16, 2, 23),
- MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel",
- scam_parents, 0x0b0, 24, 2, 31),
+ MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0x0b0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MEM_MFG_IN_AS_SEL, "mem_mfg_sel", mem_mfg_parents,
+ 0x0b0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AXI_MFG_IN_AS_SEL, "axi_mfg_sel", axi_mfg_parents,
+ 0x0b0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0x0b0, 24, 2, 31),
/* CLK_CFG_8 */
- MUX_GATE(CLK_TOP_NFIECC_SEL, "nfiecc_sel",
- nfiecc_parents, 0x0c0, 0, 3, 7),
- MUX_GATE(CLK_TOP_PE2_MAC_P0_SEL, "pe2_mac_p0_sel",
- pe2_mac_p0_parents, 0x0c0, 8, 3, 15),
- MUX_GATE(CLK_TOP_PE2_MAC_P1_SEL, "pe2_mac_p1_sel",
- pe2_mac_p0_parents, 0x0c0, 16, 3, 23),
- MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel",
- dpilvds_parents, 0x0c0, 24, 3, 31),
+ MUX_GATE(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents, 0x0c0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PE2_MAC_P0_SEL, "pe2_mac_p0_sel", pe2_mac_p0_parents,
+ 0x0c0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_PE2_MAC_P1_SEL, "pe2_mac_p1_sel", pe2_mac_p0_parents,
+ 0x0c0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x0c0, 24, 3, 31),
/* CLK_CFG_9 */
- MUX_GATE(CLK_TOP_MSDC50_3_HCLK_SEL, "msdc50_3_h_sel",
- msdc50_0_h_parents, 0x0d0, 0, 3, 7),
- MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel",
- hdcp_parents, 0x0d0, 8, 2, 15),
- MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel",
- hdcp_24m_parents, 0x0d0, 16, 2, 23),
- MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x0d0, 24, 2,
- 31, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MSDC50_3_HCLK_SEL, "msdc50_3_h_sel", msdc50_0_h_parents,
+ 0x0d0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel", hdcp_parents, 0x0d0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel", hdcp_24m_parents,
+ 0x0d0, 16, 2, 23),
+ MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents,
+ 0x0d0, 24, 2, 31, CLK_IS_CRITICAL),
/* CLK_CFG_10 */
- MUX_GATE(CLK_TOP_SPINOR_SEL, "spinor_sel",
- spinor_parents, 0x500, 0, 4, 7),
- MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel",
- apll_parents, 0x500, 8, 4, 15),
- MUX_GATE(CLK_TOP_APLL2_SEL, "apll2_sel",
- apll_parents, 0x500, 16, 4, 23),
- MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel",
- a1sys_hp_parents, 0x500, 24, 3, 31),
+ MUX_GATE(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents, 0x500, 0, 4, 7),
+ MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x500, 8, 4, 15),
+ MUX_GATE(CLK_TOP_APLL2_SEL, "apll2_sel", apll_parents, 0x500, 16, 4, 23),
+ MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel", a1sys_hp_parents,
+ 0x500, 24, 3, 31),
/* CLK_CFG_11 */
- MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel",
- a2sys_hp_parents, 0x510, 0, 3, 7),
- MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel",
- asm_l_parents, 0x510, 8, 2, 15),
- MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel",
- asm_l_parents, 0x510, 16, 2, 23),
- MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel",
- asm_l_parents, 0x510, 24, 2, 31),
+ MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel", a2sys_hp_parents, 0x510, 0, 3, 7),
+ MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel", asm_l_parents, 0x510, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_l_parents, 0x510, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_l_parents, 0x510, 24, 2, 31),
/* CLK_CFG_12 */
- MUX_GATE(CLK_TOP_I2SO1_SEL, "i2so1_sel",
- i2so1_parents, 0x520, 0, 2, 7),
- MUX_GATE(CLK_TOP_I2SO2_SEL, "i2so2_sel",
- i2so1_parents, 0x520, 8, 2, 15),
- MUX_GATE(CLK_TOP_I2SO3_SEL, "i2so3_sel",
- i2so1_parents, 0x520, 16, 2, 23),
- MUX_GATE(CLK_TOP_TDMO0_SEL, "tdmo0_sel",
- i2so1_parents, 0x520, 24, 2, 31),
+ MUX_GATE(CLK_TOP_I2SO1_SEL, "i2so1_sel", i2so1_parents, 0x520, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SO2_SEL, "i2so2_sel", i2so1_parents, 0x520, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SO3_SEL, "i2so3_sel", i2so1_parents, 0x520, 16, 2, 23),
+ MUX_GATE(CLK_TOP_TDMO0_SEL, "tdmo0_sel", i2so1_parents, 0x520, 24, 2, 31),
/* CLK_CFG_13 */
- MUX_GATE(CLK_TOP_TDMO1_SEL, "tdmo1_sel",
- i2so1_parents, 0x530, 0, 2, 7),
- MUX_GATE(CLK_TOP_I2SI1_SEL, "i2si1_sel",
- i2so1_parents, 0x530, 8, 2, 15),
- MUX_GATE(CLK_TOP_I2SI2_SEL, "i2si2_sel",
- i2so1_parents, 0x530, 16, 2, 23),
- MUX_GATE(CLK_TOP_I2SI3_SEL, "i2si3_sel",
- i2so1_parents, 0x530, 24, 2, 31),
+ MUX_GATE(CLK_TOP_TDMO1_SEL, "tdmo1_sel", i2so1_parents, 0x530, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SI1_SEL, "i2si1_sel", i2so1_parents, 0x530, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SI2_SEL, "i2si2_sel", i2so1_parents, 0x530, 16, 2, 23),
+ MUX_GATE(CLK_TOP_I2SI3_SEL, "i2si3_sel", i2so1_parents, 0x530, 24, 2, 31),
/* CLK_CFG_14 */
- MUX_GATE(CLK_TOP_ETHER_125M_SEL, "ether_125m_sel",
- ether_125m_parents, 0x540, 0, 2, 7),
- MUX_GATE(CLK_TOP_ETHER_50M_SEL, "ether_50m_sel",
- ether_50m_parents, 0x540, 8, 2, 15),
- MUX_GATE(CLK_TOP_JPGDEC_SEL, "jpgdec_sel",
- jpgdec_parents, 0x540, 16, 4, 23),
- MUX_GATE(CLK_TOP_SPISLV_SEL, "spislv_sel",
- spislv_parents, 0x540, 24, 3, 31),
+ MUX_GATE(CLK_TOP_ETHER_125M_SEL, "ether_125m_sel", ether_125m_parents,
+ 0x540, 0, 2, 7),
+ MUX_GATE(CLK_TOP_ETHER_50M_SEL, "ether_50m_sel", ether_50m_parents,
+ 0x540, 8, 2, 15),
+ MUX_GATE(CLK_TOP_JPGDEC_SEL, "jpgdec_sel", jpgdec_parents, 0x540, 16, 4, 23),
+ MUX_GATE(CLK_TOP_SPISLV_SEL, "spislv_sel", spislv_parents, 0x540, 24, 3, 31),
/* CLK_CFG_15 */
- MUX_GATE(CLK_TOP_ETHER_50M_RMII_SEL, "ether_sel",
- ether_parents, 0x550, 0, 2, 7),
- MUX_GATE(CLK_TOP_CAM2TG_SEL, "cam2tg_sel",
- camtg_parents, 0x550, 8, 4, 15),
- MUX_GATE(CLK_TOP_DI_SEL, "di_sel",
- di_parents, 0x550, 16, 3, 23),
- MUX_GATE(CLK_TOP_TVD_SEL, "tvd_sel",
- tvd_parents, 0x550, 24, 2, 31),
+ MUX_GATE(CLK_TOP_ETHER_50M_RMII_SEL, "ether_sel", ether_parents, 0x550, 0, 2, 7),
+ MUX_GATE(CLK_TOP_CAM2TG_SEL, "cam2tg_sel", camtg_parents, 0x550, 8, 4, 15),
+ MUX_GATE(CLK_TOP_DI_SEL, "di_sel", di_parents, 0x550, 16, 3, 23),
+ MUX_GATE(CLK_TOP_TVD_SEL, "tvd_sel", tvd_parents, 0x550, 24, 2, 31),
/* CLK_CFG_16 */
- MUX_GATE(CLK_TOP_I2C_SEL, "i2c_sel",
- i2c_parents, 0x560, 0, 3, 7),
- MUX_GATE(CLK_TOP_PWM_INFRA_SEL, "pwm_infra_sel",
- pwm_parents, 0x560, 8, 2, 15),
- MUX_GATE(CLK_TOP_MSDC0P_AES_SEL, "msdc0p_aes_sel",
- msdc0p_aes_parents, 0x560, 16, 2, 23),
- MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel",
- cmsys_parents, 0x560, 24, 3, 31),
+ MUX_GATE(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x560, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PWM_INFRA_SEL, "pwm_infra_sel", pwm_parents, 0x560, 8, 2, 15),
+ MUX_GATE(CLK_TOP_MSDC0P_AES_SEL, "msdc0p_aes_sel", msdc0p_aes_parents,
+ 0x560, 16, 2, 23),
+ MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel", cmsys_parents, 0x560, 24, 3, 31),
/* CLK_CFG_17 */
- MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel",
- gcpu_parents, 0x570, 0, 3, 7),
+ MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x570, 0, 3, 7),
/* CLK_AUDDIV_4 */
- MUX(CLK_TOP_AUD_APLL1_SEL, "aud_apll1_sel",
- aud_apll1_parents, 0x134, 0, 1),
- MUX(CLK_TOP_AUD_APLL2_SEL, "aud_apll2_sel",
- aud_apll2_parents, 0x134, 1, 1),
- MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
- audull_vtx_parents, 0x134, 31, 1),
- MUX(CLK_TOP_APLL1_REF_SEL, "apll1_ref_sel",
- apll1_ref_parents, 0x134, 4, 3),
- MUX(CLK_TOP_APLL2_REF_SEL, "apll2_ref_sel",
- apll1_ref_parents, 0x134, 7, 3),
+ MUX(CLK_TOP_AUD_APLL1_SEL, "aud_apll1_sel", aud_apll1_parents, 0x134, 0, 1),
+ MUX(CLK_TOP_AUD_APLL2_SEL, "aud_apll2_sel", aud_apll2_parents, 0x134, 1, 1),
+ MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel", audull_vtx_parents,
+ 0x134, 31, 1),
+ MUX(CLK_TOP_APLL1_REF_SEL, "apll1_ref_sel", apll1_ref_parents, 0x134, 4, 3),
+ MUX(CLK_TOP_APLL2_REF_SEL, "apll2_ref_sel", apll1_ref_parents, 0x134, 7, 3),
};
static const char * const mcu_mp0_parents[] = {
@@ -926,13 +782,13 @@ static const char * const mcu_bus_parents[] = {
static struct mtk_composite mcu_muxes[] = {
/* mp0_pll_divider_cfg */
MUX_GATE_FLAGS(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0,
- 9, 2, -1, CLK_IS_CRITICAL),
+ 9, 2, -1, CLK_IS_CRITICAL),
/* mp2_pll_divider_cfg */
MUX_GATE_FLAGS(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8,
- 9, 2, -1, CLK_IS_CRITICAL),
+ 9, 2, -1, CLK_IS_CRITICAL),
/* bus_pll_divider_cfg */
MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
- 9, 2, -1, CLK_IS_CRITICAL),
+ 9, 2, -1, CLK_IS_CRITICAL),
};
static const struct mtk_clk_divider top_adj_divs[] = {
@@ -958,23 +814,11 @@ static const struct mtk_gate_regs top1_cg_regs = {
.sta_ofs = 0x424,
};
-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
-
-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate top_clks[] = {
/* TOP0 */
@@ -998,14 +842,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x48,
};
-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate infra_clks[] = {
GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
@@ -1035,227 +873,65 @@ static const struct mtk_gate_regs peri2_cg_regs = {
.sta_ofs = 0x42c,
};
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_PERI2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_PERI2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate peri_clks[] = {
/* PERI0 */
- GATE_PERI0(CLK_PERI_NFI, "per_nfi",
- "axi_sel", 0),
- GATE_PERI0(CLK_PERI_THERM, "per_therm",
- "axi_sel", 1),
- GATE_PERI0(CLK_PERI_PWM0, "per_pwm0",
- "pwm_sel", 2),
- GATE_PERI0(CLK_PERI_PWM1, "per_pwm1",
- "pwm_sel", 3),
- GATE_PERI0(CLK_PERI_PWM2, "per_pwm2",
- "pwm_sel", 4),
- GATE_PERI0(CLK_PERI_PWM3, "per_pwm3",
- "pwm_sel", 5),
- GATE_PERI0(CLK_PERI_PWM4, "per_pwm4",
- "pwm_sel", 6),
- GATE_PERI0(CLK_PERI_PWM5, "per_pwm5",
- "pwm_sel", 7),
- GATE_PERI0(CLK_PERI_PWM6, "per_pwm6",
- "pwm_sel", 8),
- GATE_PERI0(CLK_PERI_PWM7, "per_pwm7",
- "pwm_sel", 9),
- GATE_PERI0(CLK_PERI_PWM, "per_pwm",
- "pwm_sel", 10),
- GATE_PERI0(CLK_PERI_AP_DMA, "per_ap_dma",
- "axi_sel", 13),
- GATE_PERI0(CLK_PERI_MSDC30_0, "per_msdc30_0",
- "msdc50_0_sel", 14),
- GATE_PERI0(CLK_PERI_MSDC30_1, "per_msdc30_1",
- "msdc30_1_sel", 15),
- GATE_PERI0(CLK_PERI_MSDC30_2, "per_msdc30_2",
- "msdc30_2_sel", 16),
- GATE_PERI0(CLK_PERI_MSDC30_3, "per_msdc30_3",
- "msdc30_3_sel", 17),
- GATE_PERI0(CLK_PERI_UART0, "per_uart0",
- "uart_sel", 20),
- GATE_PERI0(CLK_PERI_UART1, "per_uart1",
- "uart_sel", 21),
- GATE_PERI0(CLK_PERI_UART2, "per_uart2",
- "uart_sel", 22),
- GATE_PERI0(CLK_PERI_UART3, "per_uart3",
- "uart_sel", 23),
- GATE_PERI0(CLK_PERI_I2C0, "per_i2c0",
- "axi_sel", 24),
- GATE_PERI0(CLK_PERI_I2C1, "per_i2c1",
- "axi_sel", 25),
- GATE_PERI0(CLK_PERI_I2C2, "per_i2c2",
- "axi_sel", 26),
- GATE_PERI0(CLK_PERI_I2C3, "per_i2c3",
- "axi_sel", 27),
- GATE_PERI0(CLK_PERI_I2C4, "per_i2c4",
- "axi_sel", 28),
- GATE_PERI0(CLK_PERI_AUXADC, "per_auxadc",
- "ltepll_fs26m", 29),
- GATE_PERI0(CLK_PERI_SPI0, "per_spi0",
- "spi_sel", 30),
+ GATE_PERI0(CLK_PERI_NFI, "per_nfi", "axi_sel", 0),
+ GATE_PERI0(CLK_PERI_THERM, "per_therm", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM0, "per_pwm0", "pwm_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM1, "per_pwm1", "pwm_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM2, "per_pwm2", "pwm_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM3, "per_pwm3", "pwm_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM4, "per_pwm4", "pwm_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM5, "per_pwm5", "pwm_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM6, "per_pwm6", "pwm_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM7, "per_pwm7", "pwm_sel", 9),
+ GATE_PERI0(CLK_PERI_PWM, "per_pwm", "pwm_sel", 10),
+ GATE_PERI0(CLK_PERI_AP_DMA, "per_ap_dma", "axi_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "per_msdc30_0", "msdc50_0_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "per_msdc30_1", "msdc30_1_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "per_msdc30_2", "msdc30_2_sel", 16),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "per_msdc30_3", "msdc30_3_sel", 17),
+ GATE_PERI0(CLK_PERI_UART0, "per_uart0", "uart_sel", 20),
+ GATE_PERI0(CLK_PERI_UART1, "per_uart1", "uart_sel", 21),
+ GATE_PERI0(CLK_PERI_UART2, "per_uart2", "uart_sel", 22),
+ GATE_PERI0(CLK_PERI_UART3, "per_uart3", "uart_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C0, "per_i2c0", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C1, "per_i2c1", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C2, "per_i2c2", "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C3, "per_i2c3", "axi_sel", 27),
+ GATE_PERI0(CLK_PERI_I2C4, "per_i2c4", "axi_sel", 28),
+ GATE_PERI0(CLK_PERI_AUXADC, "per_auxadc", "ltepll_fs26m", 29),
+ GATE_PERI0(CLK_PERI_SPI0, "per_spi0", "spi_sel", 30),
/* PERI1 */
- GATE_PERI1(CLK_PERI_SPI, "per_spi",
- "spinor_sel", 1),
- GATE_PERI1(CLK_PERI_I2C5, "per_i2c5",
- "axi_sel", 3),
- GATE_PERI1(CLK_PERI_SPI2, "per_spi2",
- "spi_sel", 5),
- GATE_PERI1(CLK_PERI_SPI3, "per_spi3",
- "spi_sel", 6),
- GATE_PERI1(CLK_PERI_SPI5, "per_spi5",
- "spi_sel", 8),
- GATE_PERI1(CLK_PERI_UART4, "per_uart4",
- "uart_sel", 9),
- GATE_PERI1(CLK_PERI_SFLASH, "per_sflash",
- "uart_sel", 11),
- GATE_PERI1(CLK_PERI_GMAC, "per_gmac",
- "uart_sel", 12),
- GATE_PERI1(CLK_PERI_PCIE0, "per_pcie0",
- "uart_sel", 14),
- GATE_PERI1(CLK_PERI_PCIE1, "per_pcie1",
- "uart_sel", 15),
- GATE_PERI1(CLK_PERI_GMAC_PCLK, "per_gmac_pclk",
- "uart_sel", 16),
+ GATE_PERI1(CLK_PERI_SPI, "per_spi", "spinor_sel", 1),
+ GATE_PERI1(CLK_PERI_I2C5, "per_i2c5", "axi_sel", 3),
+ GATE_PERI1(CLK_PERI_SPI2, "per_spi2", "spi_sel", 5),
+ GATE_PERI1(CLK_PERI_SPI3, "per_spi3", "spi_sel", 6),
+ GATE_PERI1(CLK_PERI_SPI5, "per_spi5", "spi_sel", 8),
+ GATE_PERI1(CLK_PERI_UART4, "per_uart4", "uart_sel", 9),
+ GATE_PERI1(CLK_PERI_SFLASH, "per_sflash", "uart_sel", 11),
+ GATE_PERI1(CLK_PERI_GMAC, "per_gmac", "uart_sel", 12),
+ GATE_PERI1(CLK_PERI_PCIE0, "per_pcie0", "uart_sel", 14),
+ GATE_PERI1(CLK_PERI_PCIE1, "per_pcie1", "uart_sel", 15),
+ GATE_PERI1(CLK_PERI_GMAC_PCLK, "per_gmac_pclk", "uart_sel", 16),
/* PERI2 */
- GATE_PERI2(CLK_PERI_MSDC50_0_EN, "per_msdc50_0_en",
- "msdc50_0_sel", 0),
- GATE_PERI2(CLK_PERI_MSDC30_1_EN, "per_msdc30_1_en",
- "msdc30_1_sel", 1),
- GATE_PERI2(CLK_PERI_MSDC30_2_EN, "per_msdc30_2_en",
- "msdc30_2_sel", 2),
- GATE_PERI2(CLK_PERI_MSDC30_3_EN, "per_msdc30_3_en",
- "msdc30_3_sel", 3),
- GATE_PERI2(CLK_PERI_MSDC50_0_HCLK_EN, "per_msdc50_0_h",
- "msdc50_0_h_sel", 4),
- GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
- "msdc50_3_h_sel", 5),
- GATE_PERI2(CLK_PERI_MSDC30_0_QTR_EN, "per_msdc30_0_q",
- "axi_sel", 6),
- GATE_PERI2(CLK_PERI_MSDC30_3_QTR_EN, "per_msdc30_3_q",
- "mem_sel", 7),
-};
-
-#define MT2712_PLL_FMAX (3000UL * MHZ)
-
-#define CON0_MT2712_RST_BAR BIT(24)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift, \
- _div_table) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT2712_RST_BAR, \
- .fmax = MT2712_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .tuner_en_reg = _tuner_en_reg, \
- .tuner_en_bit = _tuner_en_bit, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _pcwbits, _pd_reg, _pd_shift, _tuner_reg, \
- _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
- _pcw_shift, NULL)
-
-static const struct mtk_pll_div_table armca35pll_div_table[] = {
- { .div = 0, .freq = MT2712_PLL_FMAX },
- { .div = 1, .freq = 1202500000 },
- { .div = 2, .freq = 500500000 },
- { .div = 3, .freq = 315250000 },
- { .div = 4, .freq = 157625000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_div_table armca72pll_div_table[] = {
- { .div = 0, .freq = MT2712_PLL_FMAX },
- { .div = 1, .freq = 994500000 },
- { .div = 2, .freq = 520000000 },
- { .div = 3, .freq = 315250000 },
- { .div = 4, .freq = 157625000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_div_table mmpll_div_table[] = {
- { .div = 0, .freq = MT2712_PLL_FMAX },
- { .div = 1, .freq = 1001000000 },
- { .div = 2, .freq = 601250000 },
- { .div = 3, .freq = 250250000 },
- { .div = 4, .freq = 125125000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000100,
- HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000100,
- HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
- PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000100,
- 0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
- PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000100,
- 0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000100,
- 0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000100,
- 0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000100,
- 0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
- PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000100,
- 0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000100,
- 0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
- PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000100,
- 0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000100,
- 0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000100,
- 0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0,
- mmpll_div_table),
- PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000100,
- HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0,
- armca35pll_div_table),
- PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000100,
- 0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0,
- armca72pll_div_table),
- PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000100,
- 0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
+ GATE_PERI2(CLK_PERI_MSDC50_0_EN, "per_msdc50_0_en", "msdc50_0_sel", 0),
+ GATE_PERI2(CLK_PERI_MSDC30_1_EN, "per_msdc30_1_en", "msdc30_1_sel", 1),
+ GATE_PERI2(CLK_PERI_MSDC30_2_EN, "per_msdc30_2_en", "msdc30_2_sel", 2),
+ GATE_PERI2(CLK_PERI_MSDC30_3_EN, "per_msdc30_3_en", "msdc30_3_sel", 3),
+ GATE_PERI2(CLK_PERI_MSDC50_0_HCLK_EN, "per_msdc50_0_h", "msdc50_0_h_sel", 4),
+ GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h", "msdc50_3_h_sel", 5),
+ GATE_PERI2(CLK_PERI_MSDC30_0_QTR_EN, "per_msdc30_0_q", "axi_sel", 6),
+ GATE_PERI2(CLK_PERI_MSDC30_3_QTR_EN, "per_msdc30_3_q", "mem_sel", 7),
};
static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
@@ -1276,213 +952,54 @@ static const struct mtk_clk_rst_desc clk_rst_desc[] = {
},
};
-static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
-
-static struct clk_hw_onecell_data *top_clk_data;
-
-static void clk_mt2712_top_init_early(struct device_node *node)
-{
- int r, i;
-
- if (!top_clk_data) {
- top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
- }
-
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
- top_clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-
-CLK_OF_DECLARE_DRIVER(mt2712_topckgen, "mediatek,mt2712-topckgen",
- clk_mt2712_top_init_early);
-
-static int clk_mt2712_top_probe(struct platform_device *pdev)
-{
- int r, i;
- struct device_node *node = pdev->dev.of_node;
- void __iomem *base;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- pr_err("%s(): ioremap failed\n", __func__);
- return PTR_ERR(base);
- }
-
- if (!top_clk_data) {
- top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- } else {
- for (i = 0; i < CLK_TOP_NR_CLK; i++) {
- if (top_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
- top_clk_data->hws[i] = ERR_PTR(-ENOENT);
- }
- }
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- top_clk_data);
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
- top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt2712_clk_lock, top_clk_data);
- mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
- &mt2712_clk_lock, top_clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- top_clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
-
-static int clk_mt2712_infra_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
-
- return r;
-}
-
-static int clk_mt2712_peri_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
-
- return r;
-}
-
-static int clk_mt2712_mcu_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
- void __iomem *base;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- pr_err("%s(): ioremap failed\n", __func__);
- return PTR_ERR(base);
- }
-
- clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
-
- mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
- &mt2712_clk_lock, clk_data);
+static const struct mtk_clk_desc topck_desc = {
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .divider_clks = top_adj_divs,
+ .num_divider_clks = ARRAY_SIZE(top_adj_divs),
+ .clk_lock = &mt2712_clk_lock,
+};
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+static const struct mtk_clk_desc mcu_desc = {
+ .composite_clks = mcu_muxes,
+ .num_composite_clks = ARRAY_SIZE(mcu_muxes),
+ .clk_lock = &mt2712_clk_lock,
+};
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .rst_desc = &clk_rst_desc[0],
+};
- return r;
-}
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_clks,
+ .num_clks = ARRAY_SIZE(peri_clks),
+ .rst_desc = &clk_rst_desc[1],
+};
static const struct of_device_id of_match_clk_mt2712[] = {
- {
- .compatible = "mediatek,mt2712-apmixedsys",
- .data = clk_mt2712_apmixed_probe,
- }, {
- .compatible = "mediatek,mt2712-topckgen",
- .data = clk_mt2712_top_probe,
- }, {
- .compatible = "mediatek,mt2712-infracfg",
- .data = clk_mt2712_infra_probe,
- }, {
- .compatible = "mediatek,mt2712-pericfg",
- .data = clk_mt2712_peri_probe,
- }, {
- .compatible = "mediatek,mt2712-mcucfg",
- .data = clk_mt2712_mcu_probe,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt2712_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *);
- int r;
-
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- return -EINVAL;
-
- r = clk_probe(pdev);
- if (r != 0)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+ { .compatible = "mediatek,mt2712-infracfg", .data = &infra_desc },
+ { .compatible = "mediatek,mt2712-mcucfg", .data = &mcu_desc },
+ { .compatible = "mediatek,mt2712-pericfg", .data = &peri_desc, },
+ { .compatible = "mediatek,mt2712-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt2712);
static struct platform_driver clk_mt2712_drv = {
- .probe = clk_mt2712_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712",
.of_match_table = of_match_clk_mt2712,
},
};
-
-static int __init clk_mt2712_init(void)
-{
- return platform_driver_register(&clk_mt2712_drv);
-}
-
-arch_initcall(clk_mt2712_init);
+module_platform_driver(clk_mt2712_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 0aa6c0d352ca..9e98d6997329 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs audio1_cg_regs = {
.sta_ofs = 0x4,
};
-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate audio_clks[] = {
/* AUDIO0 */
@@ -77,6 +65,7 @@ static const struct of_device_id of_match_clk_mt6765_audio[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_audio);
static struct platform_driver clk_mt6765_audio_drv = {
.probe = mtk_clk_simple_probe,
@@ -86,5 +75,5 @@ static struct platform_driver clk_mt6765_audio_drv = {
.of_match_table = of_match_clk_mt6765_audio,
},
};
-
-builtin_platform_driver(clk_mt6765_audio_drv);
+module_platform_driver(clk_mt6765_audio_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index 25f2bef38126..6f6b29d8b29a 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_CAM(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &cam_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
@@ -52,6 +46,7 @@ static const struct of_device_id of_match_clk_mt6765_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_cam);
static struct platform_driver clk_mt6765_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -61,5 +56,5 @@ static struct platform_driver clk_mt6765_cam_drv = {
.of_match_table = of_match_clk_mt6765_cam,
},
};
-
-builtin_platform_driver(clk_mt6765_cam_drv);
+module_platform_driver(clk_mt6765_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index a62303ef4f41..984201077a20 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
@@ -48,6 +42,7 @@ static const struct of_device_id of_match_clk_mt6765_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_img);
static struct platform_driver clk_mt6765_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -57,5 +52,5 @@ static struct platform_driver clk_mt6765_img_drv = {
.of_match_table = of_match_clk_mt6765_img,
},
};
-
-builtin_platform_driver(clk_mt6765_img_drv);
+module_platform_driver(clk_mt6765_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index 25c829fc3866..a47937f4efe5 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mipi0a_cg_regs = {
.sta_ofs = 0x80,
};
-#define GATE_MIPI0A(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mipi0a_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_MIPI0A(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mipi0a_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate mipi0a_clks[] = {
GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
@@ -45,6 +39,7 @@ static const struct of_device_id of_match_clk_mt6765_mipi0a[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mipi0a);
static struct platform_driver clk_mt6765_mipi0a_drv = {
.probe = mtk_clk_simple_probe,
@@ -54,5 +49,5 @@ static struct platform_driver clk_mt6765_mipi0a_drv = {
.of_match_table = of_match_clk_mt6765_mipi0a,
},
};
-
-builtin_platform_driver(clk_mt6765_mipi0a_drv);
+module_platform_driver(clk_mt6765_mipi0a_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index bda774668a36..2b8fc052558e 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mm_cg_regs = {
.sta_ofs = 0x100,
};
-#define GATE_MM(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
/* MM */
@@ -74,6 +68,7 @@ static const struct of_device_id of_match_clk_mt6765_mm[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mm);
static struct platform_driver clk_mt6765_mm_drv = {
.probe = mtk_clk_simple_probe,
@@ -83,5 +78,5 @@ static struct platform_driver clk_mt6765_mm_drv = {
.of_match_table = of_match_clk_mt6765_mm,
},
};
-
-builtin_platform_driver(clk_mt6765_mm_drv);
+module_platform_driver(clk_mt6765_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index 2bc1fbde87da..36df9615b1be 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
@@ -47,6 +41,7 @@ static const struct of_device_id of_match_clk_mt6765_vcodec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec);
static struct platform_driver clk_mt6765_vcodec_drv = {
.probe = mtk_clk_simple_probe,
@@ -56,5 +51,5 @@ static struct platform_driver clk_mt6765_vcodec_drv = {
.of_match_table = of_match_clk_mt6765_vcodec,
},
};
-
-builtin_platform_driver(clk_mt6765_vcodec_drv);
+module_platform_driver(clk_mt6765_vcodec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index e9b9e6729733..fa7948ef1e68 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -483,32 +483,14 @@ static const struct mtk_gate_regs top2_cg_regs = {
.sta_ofs = 0x320,
};
-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-#define GATE_TOP2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate top_clks[] = {
/* TOP0 */
@@ -559,41 +541,17 @@ static const struct mtk_gate_regs ifr5_cg_regs = {
.sta_ofs = 0xc8,
};
-#define GATE_IFR2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_IFR3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_IFR4(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_IFR5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate ifr_clks[] = {
/* INFRA_TOPAXI */
@@ -674,14 +632,8 @@ static const struct mtk_gate_regs apmixed_cg_regs = {
.sta_ofs = 0x14,
};
-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate apmixed_clks[] = {
/* AUDIO0 */
@@ -789,7 +741,7 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -826,10 +778,11 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
&mt6765_clk_lock, clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -862,8 +815,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
- mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ ARRAY_SIZE(ifr_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -887,6 +840,7 @@ static const struct of_device_id of_match_clk_mt6765[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6765);
static int clk_mt6765_probe(struct platform_device *pdev)
{
@@ -920,3 +874,4 @@ static int __init clk_mt6765_init(void)
}
arch_initcall(clk_mt6765_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 6e473ae1fd90..6e3280d3a2e6 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -102,6 +102,7 @@ static const struct of_device_id of_match_clk_mt6779_aud[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_aud);
static struct platform_driver clk_mt6779_aud_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 7be3db90fa4a..b4c4c7248672 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -51,6 +51,7 @@ static const struct of_device_id of_match_clk_mt6779_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_cam);
static struct platform_driver clk_mt6779_cam_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 9bc51fc82dbd..b760a8af3462 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -43,6 +43,7 @@ static const struct of_device_id of_match_clk_mt6779_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_img);
static struct platform_driver clk_mt6779_img_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index 92e9d1ade422..9285a792c59b 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -45,6 +45,7 @@ static const struct of_device_id of_match_clk_mt6779_ipe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_ipe);
static struct platform_driver clk_mt6779_ipe_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index efc793a1969a..d20f32d4f827 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt6779_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_mfg);
static struct platform_driver clk_mt6779_mfg_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index eda8cbee3d23..c2f700ae6c2c 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -85,25 +85,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DISP_OVL_FBDC, "mm_disp_ovl_fbdc", "mm_sel", 16),
};
-static int clk_mt6779_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct platform_device_id clk_mt6779_mm_id_table[] = {
+ { .name = "clk-mt6779-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt6779_mm_id_table);
static struct platform_driver clk_mt6779_mm_drv = {
- .probe = clk_mt6779_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6779-mm",
},
+ .id_table = clk_mt6779_mm_id_table,
};
module_platform_driver(clk_mt6779_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index 3209a6518d5b..e062ed5aa45f 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -52,6 +52,7 @@ static const struct of_device_id of_match_clk_mt6779_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_vdec);
static struct platform_driver clk_mt6779_vdec_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index c25035c0f334..0ae8ac28f838 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -43,6 +43,7 @@ static const struct of_device_id of_match_clk_mt6779_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_venc);
static struct platform_driver clk_mt6779_venc_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 0d0a90ee5eb2..1f5ea1508f61 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -880,6 +880,7 @@ static const struct mtk_gate_regs infra3_cg_regs = {
&mtk_clk_gate_ops_setclr)
static const struct mtk_gate infra_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "ifa_dummy"),
/* INFRA0 */
GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr",
"axi_sel", 0),
@@ -1221,7 +1222,7 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -1244,27 +1245,17 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt6779_clk_lock, clk_data);
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
+ &mt6779_clk_lock, clk_data);
- mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt6779_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_muxes,
+ ARRAY_SIZE(top_aud_muxes), base,
+ &mt6779_clk_lock, clk_data);
- mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt6779_clk_lock, clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
-
-static int clk_mt6779_infra_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_divs,
+ ARRAY_SIZE(top_aud_divs), base,
+ &mt6779_clk_lock, clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -1277,9 +1268,6 @@ static const struct of_device_id of_match_clk_mt6779[] = {
.compatible = "mediatek,mt6779-topckgen",
.data = clk_mt6779_top_probe,
}, {
- .compatible = "mediatek,mt6779-infracfg_ao",
- .data = clk_mt6779_infra_probe,
- }, {
/* sentinel */
}
};
@@ -1302,6 +1290,26 @@ static int clk_mt6779_probe(struct platform_device *pdev)
return r;
}
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6779_infra[] = {
+ { .compatible = "mediatek,mt6779-infracfg_ao", .data = &infra_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6779);
+
+static struct platform_driver clk_mt6779_infra_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6779-infra",
+ .of_match_table = of_match_clk_mt6779_infra,
+ },
+};
+
static struct platform_driver clk_mt6779_drv = {
.probe = clk_mt6779_probe,
.driver = {
@@ -1312,7 +1320,11 @@ static struct platform_driver clk_mt6779_drv = {
static int __init clk_mt6779_init(void)
{
- return platform_driver_register(&clk_mt6779_drv);
+ int ret = platform_driver_register(&clk_mt6779_drv);
+
+ if (ret)
+ return ret;
+ return platform_driver_register(&clk_mt6779_infra_drv);
}
arch_initcall(clk_mt6779_init);
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
index 59761c72d3bc..8b30109f253c 100644
--- a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -7,8 +7,10 @@
#include <dt-bindings/clock/mediatek,mt6795-clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include "clk-fhctl.h"
#include "clk-mtk.h"
#include "clk-pll.h"
+#include "clk-pllfh.h"
#define REG_REF2USB 0x8
#define REG_AP_PLL_CON7 0x1c
@@ -58,6 +60,56 @@ static const struct mtk_pll_data plls[] = {
PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2bc, 0x2b8, 0),
};
+enum fh_pll_id {
+ FH_CA53PLL_LL,
+ FH_CA53PLL_BL,
+ FH_MAINPLL,
+ FH_MPLL,
+ FH_MSDCPLL,
+ FH_MMPLL,
+ FH_VENCPLL,
+ FH_TVDPLL,
+ FH_VCODECPLL,
+ FH_NR_FH,
+};
+
+#define _FH(_pllid, _fhid, _slope, _offset) { \
+ .data = { \
+ .pll_id = _pllid, \
+ .fh_id = _fhid, \
+ .fh_ver = FHCTL_PLLFH_V1, \
+ .fhx_offset = _offset, \
+ .dds_mask = GENMASK(21, 0), \
+ .slope0_value = _slope, \
+ .slope1_value = _slope, \
+ .sfstrx_en = BIT(2), \
+ .frddsx_en = BIT(1), \
+ .fhctlx_en = BIT(0), \
+ .tgl_org = BIT(31), \
+ .dvfs_tri = BIT(31), \
+ .pcwchg = BIT(31), \
+ .dt_val = 0x0, \
+ .df_val = 0x9, \
+ .updnlmt_shft = 16, \
+ .msk_frddsx_dys = GENMASK(23, 20), \
+ .msk_frddsx_dts = GENMASK(19, 16), \
+ }, \
+ }
+
+#define FH(_pllid, _fhid, _offset) _FH(_pllid, _fhid, 0x6003c97, _offset)
+#define FH_M(_pllid, _fhid, _offset) _FH(_pllid, _fhid, 0x6000140, _offset)
+
+static struct mtk_pllfh_data pllfhs[] = {
+ FH(CLK_APMIXED_ARMCA53PLL, FH_CA53PLL_BL, 0x38),
+ FH(CLK_APMIXED_MAINPLL, FH_MAINPLL, 0x60),
+ FH_M(CLK_APMIXED_MPLL, FH_MPLL, 0x74),
+ FH(CLK_APMIXED_MSDCPLL, FH_MSDCPLL, 0x88),
+ FH(CLK_APMIXED_MMPLL, FH_MMPLL, 0x9c),
+ FH(CLK_APMIXED_VENCPLL, FH_VENCPLL, 0xb0),
+ FH(CLK_APMIXED_TVDPLL, FH_TVDPLL, 0xc4),
+ FH(CLK_APMIXED_VCODECPLL, FH_VCODECPLL, 0xd8),
+};
+
static void clk_mt6795_apmixed_setup_md1(void __iomem *base)
{
void __iomem *reg = base + REG_AP_PLL_CON7;
@@ -79,12 +131,14 @@ static const struct of_device_id of_match_clk_mt6795_apmixed[] = {
{ .compatible = "mediatek,mt6795-apmixedsys" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_apmixed);
static int clk_mt6795_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
+ const u8 *fhctl_node = "mediatek,mt6795-fhctl";
void __iomem *base;
struct clk_hw *hw;
int ret;
@@ -97,7 +151,9 @@ static int clk_mt6795_apmixed_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ fhctl_parse_dt(fhctl_node, pllfhs, ARRAY_SIZE(pllfhs));
+ ret = mtk_clk_register_pllfhs(node, plls, ARRAY_SIZE(plls),
+ pllfhs, ARRAY_SIZE(pllfhs), clk_data);
if (ret)
goto free_clk_data;
@@ -124,7 +180,8 @@ static int clk_mt6795_apmixed_probe(struct platform_device *pdev)
unregister_ref2usb:
mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
unregister_plls:
- mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
return ret;
@@ -137,7 +194,8 @@ static int clk_mt6795_apmixed_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
- mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
mtk_free_clk_data(clk_data);
return 0;
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
index df7eed6e071e..086ea1438564 100644
--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -81,6 +81,7 @@ static const struct of_device_id of_match_clk_mt6795_infracfg[] = {
{ .compatible = "mediatek,mt6795-infracfg" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_infracfg);
static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
{
@@ -101,11 +102,13 @@ static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_gates(node, infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
+ ARRAY_SIZE(infra_gates), clk_data);
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ ret = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
+ ARRAY_SIZE(cpu_muxes), clk_data);
if (ret)
goto unregister_gates;
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
index ee7aab24eb24..1d658bb19e82 100644
--- a/drivers/clk/mediatek/clk-mt6795-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -35,6 +35,7 @@ static const struct of_device_id of_match_clk_mt6795_mfg[] = {
{ .compatible = "mediatek,mt6795-mfgcfg", .data = &mfg_desc },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_mfg);
static struct platform_driver clk_mt6795_mfg_drv = {
.driver = {
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
index fd73f202f292..8acc9cad2875 100644
--- a/drivers/clk/mediatek/clk-mt6795-mm.c
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -76,55 +76,24 @@ static const struct mtk_gate mm_gates[] = {
GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
};
-static int clk_mt6795_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int ret;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- ret = mtk_clk_register_gates(node, mm_gates, ARRAY_SIZE(mm_gates), clk_data);
- if (ret)
- goto free_clk_data;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_gates;
-
- platform_set_drvdata(pdev, clk_data);
-
- return 0;
-
-unregister_gates:
- mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
-free_clk_data:
- mtk_free_clk_data(clk_data);
- return ret;
-}
-
-static int clk_mt6795_mm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
-
- of_clk_del_provider(node);
- mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_gates,
+ .num_clks = ARRAY_SIZE(mm_gates),
+};
- return 0;
-}
+static const struct platform_device_id clk_mt6795_mm_id_table[] = {
+ { .name = "clk-mt6795-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt6795_mm_id_table);
static struct platform_driver clk_mt6795_mm_drv = {
.driver = {
.name = "clk-mt6795-mm",
},
- .probe = clk_mt6795_mm_probe,
- .remove = clk_mt6795_mm_remove,
+ .id_table = clk_mt6795_mm_id_table,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt6795_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
index cb28d35dad59..62cc19eee2c7 100644
--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -89,6 +89,7 @@ static const struct of_device_id of_match_clk_mt6795_pericfg[] = {
{ .compatible = "mediatek,mt6795-pericfg" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_pericfg);
static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
{
@@ -109,11 +110,13 @@ static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, peri_gates,
+ ARRAY_SIZE(peri_gates), clk_data);
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+ ret = mtk_clk_register_composites(&pdev->dev, peri_clks,
+ ARRAY_SIZE(peri_clks), base,
&mt6795_peri_clk_lock, clk_data);
if (ret)
goto unregister_gates;
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
index 8b8307635a35..9c6d63a80b19 100644
--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -523,86 +523,31 @@ static struct mtk_composite top_aud_divs[] = {
DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
};
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs),
+ .clk_lock = &mt6795_top_clk_lock,
+};
static const struct of_device_id of_match_clk_mt6795_topckgen[] = {
- { .compatible = "mediatek,mt6795-topckgen" },
+ { .compatible = "mediatek,mt6795-topckgen", .data = &topck_desc },
{ /* sentinel */ }
};
-
-static int clk_mt6795_topckgen_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- void __iomem *base;
- int ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- ret = mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
- if (ret)
- goto free_clk_data;
-
- ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- if (ret)
- goto unregister_fixed_clks;
-
- ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
- &mt6795_top_clk_lock, clk_data);
- if (ret)
- goto unregister_factors;
-
- ret = mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base,
- &mt6795_top_clk_lock, clk_data);
- if (ret)
- goto unregister_muxes;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_composites;
-
- return 0;
-
-unregister_composites:
- mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
-unregister_muxes:
- mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
-unregister_factors:
- mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
-unregister_fixed_clks:
- mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
-free_clk_data:
- mtk_free_clk_data(clk_data);
- return ret;
-}
-
-static int clk_mt6795_topckgen_remove(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
-
- of_clk_del_provider(node);
- mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
- mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
- mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
- mtk_free_clk_data(clk_data);
-
- return 0;
-}
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_topckgen);
static struct platform_driver clk_mt6795_topckgen_drv = {
.driver = {
.name = "clk-mt6795-topckgen",
.of_match_table = of_match_clk_mt6795_topckgen,
},
- .probe = clk_mt6795_topckgen_probe,
- .remove = clk_mt6795_topckgen_remove,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
index d85d04e0d016..f2968f859dca 100644
--- a/drivers/clk/mediatek/clk-mt6795-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt6795_vdecsys[] = {
{ .compatible = "mediatek,mt6795-vdecsys", .data = &vdec_desc },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vdecsys);
static struct platform_driver clk_mt6795_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
index de40a982ca96..2f8d48da1a85 100644
--- a/drivers/clk/mediatek/clk-mt6795-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -35,6 +35,7 @@ static const struct of_device_id of_match_clk_mt6795_vencsys[] = {
{ .compatible = "mediatek,mt6795-vencsys", .data = &venc_desc },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vencsys);
static struct platform_driver clk_mt6795_vencsys_drv = {
.driver = {
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 7c6a53fbb8be..00fc0a03e646 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -16,14 +16,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0000,
};
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_sel", 11),
@@ -45,6 +39,7 @@ static const struct of_device_id of_match_clk_mt6797_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_img);
static struct platform_driver clk_mt6797_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -54,5 +49,5 @@ static struct platform_driver clk_mt6797_img_drv = {
.of_match_table = of_match_clk_mt6797_img,
},
};
-
-builtin_platform_driver(clk_mt6797_img_drv);
+module_platform_driver(clk_mt6797_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 0846011fc894..caacfa40a5bc 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -23,23 +23,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x0110,
};
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
@@ -92,32 +80,24 @@ static const struct mtk_gate mm_clks[] = {
"clk26m", 3),
};
-static int clk_mt6797_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return r;
-}
+static const struct platform_device_id clk_mt6797_mm_id_table[] = {
+ { .name = "clk-mt6797-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt6797_mm_id_table);
static struct platform_driver clk_mt6797_mm_drv = {
- .probe = clk_mt6797_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6797-mm",
},
+ .id_table = clk_mt6797_mm_id_table,
};
-
-builtin_platform_driver(clk_mt6797_mm_drv);
+module_platform_driver(clk_mt6797_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index 6120fccc859f..447fe6fa8e15 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x0008,
};
-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
-}
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
-}
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "vdec_sel", 8),
@@ -62,6 +50,7 @@ static const struct of_device_id of_match_clk_mt6797_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_vdec);
static struct platform_driver clk_mt6797_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -71,5 +60,5 @@ static struct platform_driver clk_mt6797_vdec_drv = {
.of_match_table = of_match_clk_mt6797_vdec,
},
};
-
-builtin_platform_driver(clk_mt6797_vdec_drv);
+module_platform_driver(clk_mt6797_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 834d3834d2bb..95b89ff8fd19 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0000,
};
-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0),
@@ -47,6 +41,7 @@ static const struct of_device_id of_match_clk_mt6797_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_venc);
static struct platform_driver clk_mt6797_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -56,5 +51,5 @@ static struct platform_driver clk_mt6797_venc_drv = {
.of_match_table = of_match_clk_mt6797_venc,
},
};
-
-builtin_platform_driver(clk_mt6797_venc_drv);
+module_platform_driver(clk_mt6797_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index b89f325a4b9b..4c87c0348e5f 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -396,7 +396,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
&mt6797_clk_lock, clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -420,40 +421,22 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x00b0,
};
-#define GATE_ICG0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_ICG0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_ICG1(_id, _name, _parent, _shift) \
- GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_ICG1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- .flags = _flags, \
-}
+#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
-#define GATE_ICG2(_id, _name, _parent, _shift) \
- GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_ICG2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- .flags = _flags, \
-}
+#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
/*
* Clock gates dramc and dramc_b are needed by the DRAM controller.
@@ -596,8 +579,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
}
}
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- infra_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), infra_clk_data);
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
@@ -687,6 +670,7 @@ static const struct of_device_id of_match_clk_mt6797[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt6797);
static int clk_mt6797_probe(struct platform_device *pdev)
{
@@ -720,3 +704,4 @@ static int __init clk_mt6797_init(void)
}
arch_initcall(clk_mt6797_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
new file mode 100644
index 000000000000..a36808d074d6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt7622-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MT7622_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7622_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table, _parent_name) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT7622_RST_BAR, \
+ .fmax = MT7622_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL, "clkxtal")
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_APMIXED_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv, CLK_IS_CRITICAL)
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0,
+ PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0,
+ HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0,
+ HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0,
+ 0, 21, 0x0300, 1, 0, 0x0304, 0),
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0,
+ 0, 21, 0x0314, 1, 0, 0x0318, 0),
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0,
+ 0, 31, 0x0324, 1, 0, 0x0328, 0),
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0,
+ 0, 31, 0x0334, 1, 0, 0x0338, 0),
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0,
+ 0, 21, 0x0344, 1, 0, 0x0348, 0),
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0,
+ 0, 21, 0x0358, 1, 0, 0x035C, 0),
+};
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED_AO(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+};
+
+static int clk_mt7622_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static int clk_mt7622_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_clk_mt7622_apmixed[] = {
+ { .compatible = "mediatek,mt7622-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_apmixed);
+
+static struct platform_driver clk_mt7622_apmixed_drv = {
+ .probe = clk_mt7622_apmixed_probe,
+ .remove = clk_mt7622_apmixed_remove,
+ .driver = {
+ .name = "clk-mt7622-apmixed",
+ .of_match_table = of_match_clk_mt7622_apmixed,
+ },
+};
+module_platform_driver(clk_mt7622_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT7622 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 9f2e5aa7b5d9..dd1799dd8435 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -16,41 +16,17 @@
#include <dt-bindings/clock/mt7622-clk.h>
-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
static const struct mtk_gate_regs audio0_cg_regs = {
.set_ofs = 0x0,
@@ -130,24 +106,22 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO3(CLK_AUDIO_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
};
-static int clk_mt7622_audiosys_init(struct platform_device *pdev)
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
+};
+
+static int clk_mt7622_aud_probe(struct platform_device *pdev)
{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
int r;
- clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ r = mtk_clk_simple_probe(pdev);
if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
pdev->name, r);
- goto err_clk_provider;
+ return r;
}
r = devm_of_platform_populate(&pdev->dev);
@@ -157,44 +131,29 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
return 0;
err_plat_populate:
- of_clk_del_provider(node);
-err_clk_provider:
+ mtk_clk_simple_remove(pdev);
return r;
}
-static const struct of_device_id of_match_clk_mt7622_aud[] = {
- {
- .compatible = "mediatek,mt7622-audsys",
- .data = clk_mt7622_audiosys_init,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt7622_aud_probe(struct platform_device *pdev)
+static int clk_mt7622_aud_remove(struct platform_device *pdev)
{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
+ of_platform_depopulate(&pdev->dev);
+ return mtk_clk_simple_remove(pdev);
}
+static const struct of_device_id of_match_clk_mt7622_aud[] = {
+ { .compatible = "mediatek,mt7622-audsys", .data = &audio_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_aud);
+
static struct platform_driver clk_mt7622_aud_drv = {
.probe = clk_mt7622_aud_probe,
+ .remove = clk_mt7622_aud_remove,
.driver = {
.name = "clk-mt7622-aud",
.of_match_table = of_match_clk_mt7622_aud,
},
};
-
-builtin_platform_driver(clk_mt7622_aud_drv);
+module_platform_driver(clk_mt7622_aud_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index 43de0477d5d9..f96b36737029 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -16,14 +16,8 @@
#include <dt-bindings/clock/mt7622-clk.h>
-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate_regs eth_cg_regs = {
.set_ofs = 0x30,
@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
.sta_ofs = 0xE4,
};
-#define GATE_SGMII(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &sgmii_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate sgmii_clks[] = {
GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
@@ -73,84 +61,31 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static int clk_mt7622_ethsys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
-
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
-
-static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
-
- mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
+static const struct mtk_clk_desc eth_desc = {
+ .clks = eth_clks,
+ .num_clks = ARRAY_SIZE(eth_clks),
+ .rst_desc = &clk_rst_desc,
+};
- return r;
-}
+static const struct mtk_clk_desc sgmii_desc = {
+ .clks = sgmii_clks,
+ .num_clks = ARRAY_SIZE(sgmii_clks),
+};
static const struct of_device_id of_match_clk_mt7622_eth[] = {
- {
- .compatible = "mediatek,mt7622-ethsys",
- .data = clk_mt7622_ethsys_init,
- }, {
- .compatible = "mediatek,mt7622-sgmiisys",
- .data = clk_mt7622_sgmiisys_init,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt7622-ethsys", .data = &eth_desc },
+ { .compatible = "mediatek,mt7622-sgmiisys", .data = &sgmii_desc },
+ { /* sentinel */ }
};
-
-static int clk_mt7622_eth_probe(struct platform_device *pdev)
-{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_eth);
static struct platform_driver clk_mt7622_eth_drv = {
- .probe = clk_mt7622_eth_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-eth",
.of_match_table = of_match_clk_mt7622_eth,
},
};
-
-builtin_platform_driver(clk_mt7622_eth_drv);
+module_platform_driver(clk_mt7622_eth_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 67e96231dd25..f440943f0d46 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -16,23 +16,11 @@
#include <dt-bindings/clock/mt7622-clk.h>
-#define GATE_PCIE(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &pcie_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PCIE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-#define GATE_SSUSB(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ssusb_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SSUSB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate_regs pcie_cg_regs = {
.set_ofs = 0x30,
@@ -84,86 +72,32 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
-
- mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
-
-static int clk_mt7622_pciesys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
-
- mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+static const struct mtk_clk_desc ssusb_desc = {
+ .clks = ssusb_clks,
+ .num_clks = ARRAY_SIZE(ssusb_clks),
+ .rst_desc = &clk_rst_desc,
+};
- return r;
-}
+static const struct mtk_clk_desc pcie_desc = {
+ .clks = pcie_clks,
+ .num_clks = ARRAY_SIZE(pcie_clks),
+ .rst_desc = &clk_rst_desc,
+};
static const struct of_device_id of_match_clk_mt7622_hif[] = {
- {
- .compatible = "mediatek,mt7622-pciesys",
- .data = clk_mt7622_pciesys_init,
- }, {
- .compatible = "mediatek,mt7622-ssusbsys",
- .data = clk_mt7622_ssusbsys_init,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt7622-pciesys", .data = &pcie_desc },
+ { .compatible = "mediatek,mt7622-ssusbsys", .data = &ssusb_desc },
+ { /* sentinel */ }
};
-
-static int clk_mt7622_hif_probe(struct platform_device *pdev)
-{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_hif);
static struct platform_driver clk_mt7622_hif_drv = {
- .probe = clk_mt7622_hif_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-hif",
.of_match_table = of_match_clk_mt7622_hif,
},
};
-
-builtin_platform_driver(clk_mt7622_hif_drv);
+module_platform_driver(clk_mt7622_hif_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-infracfg.c b/drivers/clk/mediatek/clk-mt7622-infracfg.c
new file mode 100644
index 000000000000..9dc05526f287
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-infracfg.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt7622-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const char * const infra_mux1_parents[] = {
+ "clkxtal",
+ "armpll",
+ "main_core_en",
+ "armpll"
+};
+
+static const struct mtk_composite cpu_muxes[] = {
+ MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents, 0x000, 2, 2),
+};
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "axi_sel", 0),
+ GATE_INFRA(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 2),
+ GATE_INFRA(CLK_INFRA_AUDIO_PD, "infra_audio_pd", "aud_intbus_sel", 5),
+ GATE_INFRA(CLK_INFRA_IRRX_PD, "infra_irrx_pd", "irrx_sel", 16),
+ GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "f10m_ref_sel", 18),
+ GATE_INFRA(CLK_INFRA_PMIC_PD, "infra_pmic_pd", "pmicspi_sel", 22),
+};
+
+static u16 infrasys_rst_ofs[] = { 0x30 };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+};
+
+static const struct of_device_id of_match_clk_mt7622_infracfg[] = {
+ { .compatible = "mediatek,mt7622-infracfg" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_infracfg);
+
+static int clk_mt7622_infracfg_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
+ ARRAY_SIZE(cpu_muxes), clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_cpumuxes;
+
+ return 0;
+
+unregister_cpumuxes:
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt7622_infracfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ mtk_clk_unregister_gates(infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt7622_infracfg_drv = {
+ .driver = {
+ .name = "clk-mt7622-infracfg",
+ .of_match_table = of_match_clk_mt7622_infracfg,
+ },
+ .probe = clk_mt7622_infracfg_probe,
+ .remove = clk_mt7622_infracfg_remove,
+};
+module_platform_driver(clk_mt7622_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7622 infracfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 3b55f8641fae..274895264427 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -14,104 +14,27 @@
#include "clk-cpumux.h"
#include "clk-gate.h"
#include "clk-mtk.h"
-#include "clk-pll.h"
#include <dt-bindings/clock/mt7622-clk.h>
#include <linux/clk.h> /* for consumer */
-#define MT7622_PLL_FMAX (2500UL * MHZ)
-#define CON0_MT7622_RST_BAR BIT(27)
-
-#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift, _div_table, _parent_name) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT7622_RST_BAR, \
- .fmax = MT7622_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .div_table = _div_table, \
- .parent_name = _parent_name, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift) \
- PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
- NULL, "clkxtal")
-
-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
-
-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
-
-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
-
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-static DEFINE_SPINLOCK(mt7622_clk_lock);
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-static const char * const infra_mux1_parents[] = {
- "clkxtal",
- "armpll",
- "main_core_en",
- "armpll"
-};
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_PERI0_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &peri0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
+
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static DEFINE_SPINLOCK(mt7622_clk_lock);
static const char * const axi_parents[] = {
"clkxtal",
@@ -292,18 +215,6 @@ static const char * const peribus_ck_parents[] = {
"syspll1_d4"
};
-static const struct mtk_gate_regs apmixed_cg_regs = {
- .set_ofs = 0x8,
- .clr_ofs = 0x8,
- .sta_ofs = 0x8,
-};
-
-static const struct mtk_gate_regs infra_cg_regs = {
- .set_ofs = 0x40,
- .clr_ofs = 0x44,
- .sta_ofs = 0x48,
-};
-
static const struct mtk_gate_regs top0_cg_regs = {
.set_ofs = 0x120,
.clr_ofs = 0x120,
@@ -328,40 +239,6 @@ static const struct mtk_gate_regs peri1_cg_regs = {
.sta_ofs = 0x1C,
};
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0,
- PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0,
- HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0,
- HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
- PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0,
- 0, 21, 0x0300, 1, 0, 0x0304, 0),
- PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0,
- 0, 21, 0x0314, 1, 0, 0x0318, 0),
- PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0,
- 0, 31, 0x0324, 1, 0, 0x0328, 0),
- PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0,
- 0, 31, 0x0334, 1, 0, 0x0338, 0),
- PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0,
- 0, 21, 0x0344, 1, 0, 0x0348, 0),
- PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0,
- 0, 21, 0x0358, 1, 0, 0x035C, 0),
-};
-
-static const struct mtk_gate apmixed_clks[] = {
- GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
-};
-
-static const struct mtk_gate infra_clks[] = {
- GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "axi_sel", 0),
- GATE_INFRA(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 2),
- GATE_INFRA(CLK_INFRA_AUDIO_PD, "infra_audio_pd", "aud_intbus_sel", 5),
- GATE_INFRA(CLK_INFRA_IRRX_PD, "infra_irrx_pd", "irrx_sel", 16),
- GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "f10m_ref_sel", 18),
- GATE_INFRA(CLK_INFRA_PMIC_PD, "infra_pmic_pd", "pmicspi_sel", 22),
-};
-
static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_TO_U2_PHY, "to_u2_phy", "clkxtal",
31250000),
@@ -485,7 +362,7 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
- GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ GATE_PERI0_AO(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
@@ -506,19 +383,14 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2),
};
-static struct mtk_composite infra_muxes[] = {
- MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents,
- 0x000, 2, 2),
-};
-
static struct mtk_composite top_muxes[] = {
/* CLK_CFG_0 */
- MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
- 0x040, 0, 3, 7),
- MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
- 0x040, 8, 1, 15),
- MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
- 0x040, 16, 1, 23),
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0, 3, 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x040, 8, 1, 15, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x040, 16, 1, 23, CLK_IS_CRITICAL),
MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
0x040, 24, 3, 31),
@@ -610,180 +482,53 @@ static struct mtk_composite peri_muxes[] = {
MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
};
-static u16 infrasys_rst_ofs[] = { 0x30, };
static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
-static const struct mtk_clk_rst_desc clk_rst_desc[] = {
- /* infrasys */
- {
- .version = MTK_RST_SIMPLE,
- .rst_bank_ofs = infrasys_rst_ofs,
- .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
- },
- /* pericfg */
- {
- .version = MTK_RST_SIMPLE,
- .rst_bank_ofs = pericfg_rst_ofs,
- .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
- },
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
};
-static int mtk_topckgen_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- struct device_node *node = pdev->dev.of_node;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
-
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
- clk_data);
-
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt7622_clk_lock, clk_data);
-
- mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
- base, &mt7622_clk_lock, clk_data);
-
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
-
- clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
-
-static int mtk_infrasys_init(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
-
- mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
- clk_data);
- if (r)
- return r;
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
-
- return 0;
-}
-
-static int mtk_apmixedsys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
- clk_data);
-
- mtk_clk_register_gates(node, apmixed_clks,
- ARRAY_SIZE(apmixed_clks), clk_data);
-
- clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
-
-static int mtk_pericfg_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
-
- mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
- &mt7622_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- return r;
-
- clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
+static const struct mtk_clk_desc topck_desc = {
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .divider_clks = top_adj_divs,
+ .num_divider_clks = ARRAY_SIZE(top_adj_divs),
+ .clk_lock = &mt7622_clk_lock,
+};
- return 0;
-}
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_clks,
+ .num_clks = ARRAY_SIZE(peri_clks),
+ .composite_clks = peri_muxes,
+ .num_composite_clks = ARRAY_SIZE(peri_muxes),
+ .rst_desc = &clk_rst_desc,
+ .clk_lock = &mt7622_clk_lock,
+};
static const struct of_device_id of_match_clk_mt7622[] = {
- {
- .compatible = "mediatek,mt7622-apmixedsys",
- .data = mtk_apmixedsys_init,
- }, {
- .compatible = "mediatek,mt7622-infracfg",
- .data = mtk_infrasys_init,
- }, {
- .compatible = "mediatek,mt7622-topckgen",
- .data = mtk_topckgen_init,
- }, {
- .compatible = "mediatek,mt7622-pericfg",
- .data = mtk_pericfg_init,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt7622_probe(struct platform_device *pdev)
-{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+ { .compatible = "mediatek,mt7622-topckgen", .data = &topck_desc },
+ { .compatible = "mediatek,mt7622-pericfg", .data = &peri_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7622);
static struct platform_driver clk_mt7622_drv = {
- .probe = clk_mt7622_probe,
.driver = {
.name = "clk-mt7622",
.of_match_table = of_match_clk_mt7622,
},
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
+module_platform_driver(clk_mt7622_drv)
-static int clk_mt7622_init(void)
-{
- return platform_driver_register(&clk_mt7622_drv);
-}
-
-arch_initcall(clk_mt7622_init);
+MODULE_DESCRIPTION("MediaTek MT7622 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index 282dd6559465..1e1c77cc14ba 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -16,14 +16,8 @@
#include <dt-bindings/clock/mt7629-clk.h>
-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate_regs eth_cg_regs = {
.set_ofs = 0x30,
@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
.sta_ofs = 0xE4,
};
-#define GATE_SGMII(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &sgmii_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate sgmii_clks[2][4] = {
{
@@ -92,7 +80,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
- mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ CLK_ETH_NR_CLK, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -114,8 +103,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
- mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
+ CLK_SGMII_NR_CLK, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -137,6 +126,7 @@ static const struct of_device_id of_match_clk_mt7629_eth[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_eth);
static int clk_mt7629_eth_probe(struct platform_device *pdev)
{
@@ -165,3 +155,4 @@ static struct platform_driver clk_mt7629_eth_drv = {
};
builtin_platform_driver(clk_mt7629_eth_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 0c8b9e139789..c89036bee9a7 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -16,23 +16,11 @@
#include <dt-bindings/clock/mt7629-clk.h>
-#define GATE_PCIE(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &pcie_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PCIE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-#define GATE_SSUSB(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ssusb_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SSUSB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate_regs pcie_cg_regs = {
.set_ofs = 0x30,
@@ -79,86 +67,32 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(rst_ofs),
};
-static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
-
- mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
-
-static int clk_mt7629_pciesys_init(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
-
- mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+static const struct mtk_clk_desc ssusb_desc = {
+ .clks = ssusb_clks,
+ .num_clks = ARRAY_SIZE(ssusb_clks),
+ .rst_desc = &clk_rst_desc,
+};
- return r;
-}
+static const struct mtk_clk_desc pcie_desc = {
+ .clks = pcie_clks,
+ .num_clks = ARRAY_SIZE(pcie_clks),
+ .rst_desc = &clk_rst_desc,
+};
static const struct of_device_id of_match_clk_mt7629_hif[] = {
- {
- .compatible = "mediatek,mt7629-pciesys",
- .data = clk_mt7629_pciesys_init,
- }, {
- .compatible = "mediatek,mt7629-ssusbsys",
- .data = clk_mt7629_ssusbsys_init,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt7629-pciesys", .data = &pcie_desc },
+ { .compatible = "mediatek,mt7629-ssusbsys", .data = &ssusb_desc },
+ { /* sentinel */ }
};
-
-static int clk_mt7629_hif_probe(struct platform_device *pdev)
-{
- int (*clk_init)(struct platform_device *);
- int r;
-
- clk_init = of_device_get_match_data(&pdev->dev);
- if (!clk_init)
- return -EINVAL;
-
- r = clk_init(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_hif);
static struct platform_driver clk_mt7629_hif_drv = {
- .probe = clk_mt7629_hif_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7629-hif",
.of_match_table = of_match_clk_mt7629_hif,
},
};
-
-builtin_platform_driver(clk_mt7629_hif_drv);
+module_platform_driver(clk_mt7629_hif_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index e4a08c811adc..0893fbbb68cc 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -50,41 +50,17 @@
_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
NULL, "clk20m")
-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static DEFINE_SPINLOCK(mt7629_clk_lock);
@@ -588,8 +564,9 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt7629_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt7629_clk_lock, clk_data);
clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
@@ -605,11 +582,11 @@ static int mtk_infrasys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
- mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
- clk_data);
+ mtk_clk_register_cpumuxes(&pdev->dev, node, infra_muxes,
+ ARRAY_SIZE(infra_muxes), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
clk_data);
@@ -628,10 +605,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
- mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ mtk_clk_register_composites(&pdev->dev, peri_muxes,
+ ARRAY_SIZE(peri_muxes), base,
&mt7629_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -655,7 +633,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
@@ -682,6 +660,7 @@ static const struct of_device_id of_match_clk_mt7629[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7629);
static int clk_mt7629_probe(struct platform_device *pdev)
{
@@ -715,3 +694,4 @@ static int clk_mt7629_init(void)
}
arch_initcall(clk_mt7629_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-apmixed.c b/drivers/clk/mediatek/clk-mt7981-apmixed.c
new file mode 100644
index 000000000000..875813d8b4a9
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7981-apmixed.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/mediatek,mt7981-clk.h>
+#include <linux/clk.h>
+
+#define MT7981_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7981_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ _div_table, _parent_name) \
+ { \
+ .id = _id, .name = _name, .reg = _reg, .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, .flags = _flags, \
+ .rst_bar_mask = CON0_MT7981_RST_BAR, .fmax = MT7981_PLL_FMAX, \
+ .pcwbits = _pcwbits, .pd_reg = _pd_reg, .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, NULL, \
+ "clkxtal")
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, PLL_AO,
+ 32, 0x0200, 4, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x00000001, 0, 32,
+ 0x0210, 4, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x00000001, 0, 32,
+ 0x0220, 4, 0, 0x0224, 0),
+ PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023C, 0x00000001, 0, 32,
+ 0x0230, 4, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024C, 0x00000001, 0, 32,
+ 0x0240, 4, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025C, 0x00000001, 0, 32,
+ 0x0250, 4, 0, 0x0254, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x00000001, 0, 32,
+ 0x0260, 4, 0, 0x0264, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x00000001, 0, 32,
+ 0x0278, 4, 0, 0x027C, 0),
+};
+
+static const struct of_device_id of_match_clk_mt7981_apmixed[] = {
+ { .compatible = "mediatek,mt7981-apmixedsys", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_apmixed);
+
+static int clk_mt7981_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(plls));
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_apmixed_data;
+ }
+ return r;
+
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt7981_apmixed_drv = {
+ .probe = clk_mt7981_apmixed_probe,
+ .driver = {
+ .name = "clk-mt7981-apmixed",
+ .of_match_table = of_match_clk_mt7981_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt7981_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-eth.c b/drivers/clk/mediatek/clk-mt7981-eth.c
new file mode 100644
index 000000000000..b1f256b5ed4e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7981-eth.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mediatek,mt7981-clk.h>
+
+static const struct mtk_gate_regs sgmii0_cg_regs = {
+ .set_ofs = 0xE4,
+ .clr_ofs = 0xE4,
+ .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii0_clks[] __initconst = {
+ GATE_SGMII0(CLK_SGM0_TX_EN, "sgm0_tx_en", "usb_tx250m", 2),
+ GATE_SGMII0(CLK_SGM0_RX_EN, "sgm0_rx_en", "usb_eq_rx250m", 3),
+ GATE_SGMII0(CLK_SGM0_CK0_EN, "sgm0_ck0_en", "usb_ln0", 4),
+ GATE_SGMII0(CLK_SGM0_CDR_CK0_EN, "sgm0_cdr_ck0_en", "usb_cdr", 5),
+};
+
+static const struct mtk_gate_regs sgmii1_cg_regs = {
+ .set_ofs = 0xE4,
+ .clr_ofs = 0xE4,
+ .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii1_clks[] __initconst = {
+ GATE_SGMII1(CLK_SGM1_TX_EN, "sgm1_tx_en", "usb_tx250m", 2),
+ GATE_SGMII1(CLK_SGM1_RX_EN, "sgm1_rx_en", "usb_eq_rx250m", 3),
+ GATE_SGMII1(CLK_SGM1_CK1_EN, "sgm1_ck1_en", "usb_ln0", 4),
+ GATE_SGMII1(CLK_SGM1_CDR_CK1_EN, "sgm1_cdr_ck1_en", "usb_cdr", 5),
+};
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate eth_clks[] __initconst = {
+ GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "sgm_325m", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "sgm_325m", 8),
+ GATE_ETH(CLK_ETH_WOCPU0_EN, "eth_wocpu0_en", "netsys_wed_mcu", 15),
+};
+
+static const struct mtk_clk_desc eth_desc = {
+ .clks = eth_clks,
+ .num_clks = ARRAY_SIZE(eth_clks),
+};
+
+static const struct mtk_clk_desc sgmii0_desc = {
+ .clks = sgmii0_clks,
+ .num_clks = ARRAY_SIZE(sgmii0_clks),
+};
+
+static const struct mtk_clk_desc sgmii1_desc = {
+ .clks = sgmii1_clks,
+ .num_clks = ARRAY_SIZE(sgmii1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt7981_eth[] = {
+ { .compatible = "mediatek,mt7981-ethsys", .data = &eth_desc },
+ { .compatible = "mediatek,mt7981-sgmiisys_0", .data = &sgmii0_desc },
+ { .compatible = "mediatek,mt7981-sgmiisys_1", .data = &sgmii1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_eth);
+
+static struct platform_driver clk_mt7981_eth_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt7981-eth",
+ .of_match_table = of_match_clk_mt7981_eth,
+ },
+};
+module_platform_driver(clk_mt7981_eth_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-infracfg.c b/drivers/clk/mediatek/clk-mt7981-infracfg.c
new file mode 100644
index 000000000000..293261ef71e6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7981-infracfg.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mediatek,mt7981-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7981_clk_lock);
+
+static const struct mtk_fixed_factor infra_divs[] = {
+ FACTOR(CLK_INFRA_66M_MCK, "infra_66m_mck", "sysaxi_sel", 1, 2),
+};
+
+static const char *const infra_uart_parent[] __initconst = { "csw_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_spi0_parents[] __initconst = { "i2c_sel",
+ "spi_sel" };
+
+static const char *const infra_spi1_parents[] __initconst = { "i2c_sel",
+ "spim_mst_sel" };
+
+static const char *const infra_pwm1_parents[] __initconst = { "pwm_sel" };
+
+static const char *const infra_pwm_bsel_parents[] __initconst = {
+ "cb_rtc_32p7k", "csw_f26m_sel", "infra_66m_mck", "pwm_sel"
+};
+
+static const char *const infra_pcie_parents[] __initconst = {
+ "cb_rtc_32p7k", "csw_f26m_sel", "cb_cksq_40m", "pextp_tl_ck_sel"
+};
+
+static const struct mtk_mux infra_muxes[] = {
+ /* MODULE_CLK_SEL_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART0_SEL, "infra_uart0_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 0, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART1_SEL, "infra_uart1_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 1, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART2_SEL, "infra_uart2_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 2, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI0_SEL, "infra_spi0_sel",
+ infra_spi0_parents, 0x0018, 0x0010, 0x0014, 4, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI1_SEL, "infra_spi1_sel",
+ infra_spi1_parents, 0x0018, 0x0010, 0x0014, 5, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI2_SEL, "infra_spi2_sel",
+ infra_spi0_parents, 0x0018, 0x0010, 0x0014, 6, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM1_SEL, "infra_pwm1_sel",
+ infra_pwm1_parents, 0x0018, 0x0010, 0x0014, 9, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM2_SEL, "infra_pwm2_sel",
+ infra_pwm1_parents, 0x0018, 0x0010, 0x0014, 11, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM3_SEL, "infra_pwm3_sel",
+ infra_pwm1_parents, 0x0018, 0x0010, 0x0014, 15, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_BSEL, "infra_pwm_bsel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 13,
+ 2, -1, -1, -1),
+ /* MODULE_CLK_SEL_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_SEL, "infra_pcie_sel",
+ infra_pcie_parents, 0x0028, 0x0020, 0x0024, 0, 2,
+ -1, -1, -1),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x50,
+ .clr_ofs = 0x54,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0x60,
+ .clr_ofs = 0x64,
+ .sta_ofs = 0x68,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra0_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra1_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra2_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_GPT_STA, "infra_gpt_sta", "infra_66m_mck", 0),
+ GATE_INFRA0(CLK_INFRA_PWM_HCK, "infra_pwm_hck", "infra_66m_mck", 1),
+ GATE_INFRA0(CLK_INFRA_PWM_STA, "infra_pwm_sta", "infra_pwm_bsel", 2),
+ GATE_INFRA0(CLK_INFRA_PWM1_CK, "infra_pwm1", "infra_pwm1_sel", 3),
+ GATE_INFRA0(CLK_INFRA_PWM2_CK, "infra_pwm2", "infra_pwm2_sel", 4),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_CK, "infra_cq_dma", "sysaxi", 6),
+
+ GATE_INFRA0(CLK_INFRA_AUD_BUS_CK, "infra_aud_bus", "sysaxi", 8),
+ GATE_INFRA0(CLK_INFRA_AUD_26M_CK, "infra_aud_26m", "csw_f26m_sel", 9),
+ GATE_INFRA0(CLK_INFRA_AUD_L_CK, "infra_aud_l", "aud_l", 10),
+ GATE_INFRA0(CLK_INFRA_AUD_AUD_CK, "infra_aud_aud", "a1sys", 11),
+ GATE_INFRA0(CLK_INFRA_AUD_EG2_CK, "infra_aud_eg2", "a_tuner", 13),
+ GATE_INFRA0(CLK_INFRA_DRAMC_26M_CK, "infra_dramc_26m", "csw_f26m_sel",
+ 14),
+ GATE_INFRA0(CLK_INFRA_DBG_CK, "infra_dbg", "infra_66m_mck", 15),
+ GATE_INFRA0(CLK_INFRA_AP_DMA_CK, "infra_ap_dma", "infra_66m_mck", 16),
+ GATE_INFRA0(CLK_INFRA_SEJ_CK, "infra_sej", "infra_66m_mck", 24),
+ GATE_INFRA0(CLK_INFRA_SEJ_13M_CK, "infra_sej_13m", "csw_f26m_sel", 25),
+ GATE_INFRA0(CLK_INFRA_PWM3_CK, "infra_pwm3", "infra_pwm3_sel", 27),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_THERM_CK, "infra_therm", "csw_f26m_sel", 0),
+ GATE_INFRA1(CLK_INFRA_I2C0_CK, "infra_i2c0", "i2c_bck", 1),
+ GATE_INFRA1(CLK_INFRA_UART0_CK, "infra_uart0", "infra_uart0_sel", 2),
+ GATE_INFRA1(CLK_INFRA_UART1_CK, "infra_uart1", "infra_uart1_sel", 3),
+ GATE_INFRA1(CLK_INFRA_UART2_CK, "infra_uart2", "infra_uart2_sel", 4),
+ GATE_INFRA1(CLK_INFRA_SPI2_CK, "infra_spi2", "infra_spi2_sel", 6),
+ GATE_INFRA1(CLK_INFRA_SPI2_HCK_CK, "infra_spi2_hck", "infra_66m_mck", 7),
+ GATE_INFRA1(CLK_INFRA_NFI1_CK, "infra_nfi1", "nfi1x", 8),
+ GATE_INFRA1(CLK_INFRA_SPINFI1_CK, "infra_spinfi1", "spinfi_bck", 9),
+ GATE_INFRA1(CLK_INFRA_NFI_HCK_CK, "infra_nfi_hck", "infra_66m_mck", 10),
+ GATE_INFRA1(CLK_INFRA_SPI0_CK, "infra_spi0", "infra_spi0_sel", 11),
+ GATE_INFRA1(CLK_INFRA_SPI1_CK, "infra_spi1", "infra_spi1_sel", 12),
+ GATE_INFRA1(CLK_INFRA_SPI0_HCK_CK, "infra_spi0_hck", "infra_66m_mck",
+ 13),
+ GATE_INFRA1(CLK_INFRA_SPI1_HCK_CK, "infra_spi1_hck", "infra_66m_mck",
+ 14),
+ GATE_INFRA1(CLK_INFRA_FRTC_CK, "infra_frtc", "cb_rtc_32k", 15),
+ GATE_INFRA1(CLK_INFRA_MSDC_CK, "infra_msdc", "emmc_400m", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC_HCK_CK, "infra_msdc_hck", "emmc_208m", 17),
+ GATE_INFRA1(CLK_INFRA_MSDC_133M_CK, "infra_msdc_133m", "sysaxi", 18),
+ GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "sysaxi", 19),
+ GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "infra_adc_frc", 20),
+ GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m", 21),
+ GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x", 23),
+ GATE_INFRA1(CLK_INFRA_I2C_MCK_CK, "infra_i2c_mck", "sysaxi", 25),
+ GATE_INFRA1(CLK_INFRA_I2C_PCK_CK, "infra_i2c_pck", "infra_66m_mck", 26),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IUSB_133_CK, "infra_iusb_133", "sysaxi", 0),
+ GATE_INFRA2(CLK_INFRA_IUSB_66M_CK, "infra_iusb_66m", "sysaxi", 1),
+ GATE_INFRA2(CLK_INFRA_IUSB_SYS_CK, "infra_iusb_sys", "u2u3_sys", 2),
+ GATE_INFRA2(CLK_INFRA_IUSB_CK, "infra_iusb", "u2u3_ref", 3),
+ GATE_INFRA2(CLK_INFRA_IPCIE_CK, "infra_ipcie", "pextp_tl", 12),
+ GATE_INFRA2(CLK_INFRA_IPCIE_PIPE_CK, "infra_ipcie_pipe", "cb_cksq_40m",
+ 13),
+ GATE_INFRA2(CLK_INFRA_IPCIER_CK, "infra_ipcier", "csw_f26m", 14),
+ GATE_INFRA2(CLK_INFRA_IPCIEB_CK, "infra_ipcieb", "sysaxi", 15),
+};
+
+static const struct mtk_clk_desc infracfg_desc = {
+ .factor_clks = infra_divs,
+ .num_factor_clks = ARRAY_SIZE(infra_divs),
+ .mux_clks = infra_muxes,
+ .num_mux_clks = ARRAY_SIZE(infra_muxes),
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .clk_lock = &mt7981_clk_lock,
+};
+
+static const struct of_device_id of_match_clk_mt7981_infracfg[] = {
+ { .compatible = "mediatek,mt7981-infracfg", .data = &infracfg_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_infracfg);
+
+static struct platform_driver clk_mt7981_infracfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt7981-infracfg",
+ .of_match_table = of_match_clk_mt7981_infracfg,
+ },
+};
+module_platform_driver(clk_mt7981_infracfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
new file mode 100644
index 000000000000..3aba1a9b9a36
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ */
+
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mediatek,mt7981-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7981_clk_lock);
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CB_CKSQ_40M, "cb_cksq_40m", "clkxtal", 1, 1),
+ FACTOR(CLK_TOP_CB_M_416M, "cb_m_416m", "mpll", 1, 1),
+ FACTOR(CLK_TOP_CB_M_D2, "cb_m_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_CB_M_D3, "cb_m_d3", "mpll", 1, 3),
+ FACTOR(CLK_TOP_M_D3_D2, "m_d3_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_CB_M_D4, "cb_m_d4", "mpll", 1, 4),
+ FACTOR(CLK_TOP_CB_M_D8, "cb_m_d8", "mpll", 1, 8),
+ FACTOR(CLK_TOP_M_D8_D2, "m_d8_d2", "mpll", 1, 16),
+ FACTOR(CLK_TOP_CB_MM_720M, "cb_mm_720m", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_CB_MM_D2, "cb_mm_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_CB_MM_D3, "cb_mm_d3", "mmpll", 1, 3),
+ FACTOR(CLK_TOP_CB_MM_D3_D5, "cb_mm_d3_d5", "mmpll", 1, 15),
+ FACTOR(CLK_TOP_CB_MM_D4, "cb_mm_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_CB_MM_D6, "cb_mm_d6", "mmpll", 1, 6),
+ FACTOR(CLK_TOP_MM_D6_D2, "mm_d6_d2", "mmpll", 1, 12),
+ FACTOR(CLK_TOP_CB_MM_D8, "cb_mm_d8", "mmpll", 1, 8),
+ FACTOR(CLK_TOP_CB_APLL2_196M, "cb_apll2_196m", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_NET1_2500M, "net1_2500m", "net1pll", 1, 1),
+ FACTOR(CLK_TOP_CB_NET1_D4, "cb_net1_d4", "net1pll", 1, 4),
+ FACTOR(CLK_TOP_CB_NET1_D5, "cb_net1_d5", "net1pll", 1, 5),
+ FACTOR(CLK_TOP_NET1_D5_D2, "net1_d5_d2", "net1pll", 1, 10),
+ FACTOR(CLK_TOP_NET1_D5_D4, "net1_d5_d4", "net1pll", 1, 20),
+ FACTOR(CLK_TOP_CB_NET1_D8, "cb_net1_d8", "net1pll", 1, 8),
+ FACTOR(CLK_TOP_NET1_D8_D2, "net1_d8_d2", "net1pll", 1, 16),
+ FACTOR(CLK_TOP_NET1_D8_D4, "net1_d8_d4", "net1pll", 1, 32),
+ FACTOR(CLK_TOP_CB_NET2_800M, "cb_net2_800m", "net2pll", 1, 1),
+ FACTOR(CLK_TOP_CB_NET2_D2, "cb_net2_d2", "net2pll", 1, 2),
+ FACTOR(CLK_TOP_CB_NET2_D4, "cb_net2_d4", "net2pll", 1, 4),
+ FACTOR(CLK_TOP_NET2_D4_D2, "net2_d4_d2", "net2pll", 1, 8),
+ FACTOR(CLK_TOP_NET2_D4_D4, "net2_d4_d4", "net2pll", 1, 16),
+ FACTOR(CLK_TOP_CB_NET2_D6, "cb_net2_d6", "net2pll", 1, 6),
+ FACTOR(CLK_TOP_CB_WEDMCU_208M, "cb_wedmcu_208m", "wedmcupll", 1, 1),
+ FACTOR(CLK_TOP_CB_SGM_325M, "cb_sgm_325m", "sgmpll", 1, 1),
+ FACTOR(CLK_TOP_CKSQ_40M_D2, "cksq_40m_d2", "cb_cksq_40m", 1, 2),
+ FACTOR(CLK_TOP_CB_RTC_32K, "cb_rtc_32k", "cb_cksq_40m", 1, 1250),
+ FACTOR(CLK_TOP_CB_RTC_32P7K, "cb_rtc_32p7k", "cb_cksq_40m", 1, 1220),
+ FACTOR(CLK_TOP_USB_TX250M, "usb_tx250m", "cb_cksq_40m", 1, 1),
+ FACTOR(CLK_TOP_FAUD, "faud", "aud_sel", 1, 1),
+ FACTOR(CLK_TOP_NFI1X, "nfi1x", "nfi1x_sel", 1, 1),
+ FACTOR(CLK_TOP_USB_EQ_RX250M, "usb_eq_rx250m", "cb_cksq_40m", 1, 1),
+ FACTOR(CLK_TOP_USB_CDR_CK, "usb_cdr", "cb_cksq_40m", 1, 1),
+ FACTOR(CLK_TOP_USB_LN0_CK, "usb_ln0", "cb_cksq_40m", 1, 1),
+ FACTOR(CLK_TOP_SPINFI_BCK, "spinfi_bck", "spinfi_sel", 1, 1),
+ FACTOR(CLK_TOP_SPI, "spi", "spi_sel", 1, 1),
+ FACTOR(CLK_TOP_SPIM_MST, "spim_mst", "spim_mst_sel", 1, 1),
+ FACTOR(CLK_TOP_UART_BCK, "uart_bck", "uart_sel", 1, 1),
+ FACTOR(CLK_TOP_PWM_BCK, "pwm_bck", "pwm_sel", 1, 1),
+ FACTOR(CLK_TOP_I2C_BCK, "i2c_bck", "i2c_sel", 1, 1),
+ FACTOR(CLK_TOP_PEXTP_TL, "pextp_tl", "pextp_tl_ck_sel", 1, 1),
+ FACTOR(CLK_TOP_EMMC_208M, "emmc_208m", "emmc_208m_sel", 1, 1),
+ FACTOR(CLK_TOP_EMMC_400M, "emmc_400m", "emmc_400m_sel", 1, 1),
+ FACTOR(CLK_TOP_DRAMC_REF, "dramc_ref", "dramc_sel", 1, 1),
+ FACTOR(CLK_TOP_DRAMC_MD32, "dramc_md32", "dramc_md32_sel", 1, 1),
+ FACTOR(CLK_TOP_SYSAXI, "sysaxi", "sysaxi_sel", 1, 1),
+ FACTOR(CLK_TOP_SYSAPB, "sysapb", "sysapb_sel", 1, 1),
+ FACTOR(CLK_TOP_ARM_DB_MAIN, "arm_db_main", "arm_db_main_sel", 1, 1),
+ FACTOR(CLK_TOP_AP2CNN_HOST, "ap2cnn_host", "ap2cnn_host_sel", 1, 1),
+ FACTOR(CLK_TOP_NETSYS, "netsys", "netsys_sel", 1, 1),
+ FACTOR(CLK_TOP_NETSYS_500M, "netsys_500m", "netsys_500m_sel", 1, 1),
+ FACTOR(CLK_TOP_NETSYS_WED_MCU, "netsys_wed_mcu", "netsys_mcu_sel", 1, 1),
+ FACTOR(CLK_TOP_NETSYS_2X, "netsys_2x", "netsys_2x_sel", 1, 1),
+ FACTOR(CLK_TOP_SGM_325M, "sgm_325m", "sgm_325m_sel", 1, 1),
+ FACTOR(CLK_TOP_SGM_REG, "sgm_reg", "sgm_reg_sel", 1, 1),
+ FACTOR(CLK_TOP_F26M, "csw_f26m", "csw_f26m_sel", 1, 1),
+ FACTOR(CLK_TOP_EIP97B, "eip97b", "eip97b_sel", 1, 1),
+ FACTOR(CLK_TOP_USB3_PHY, "usb3_phy", "usb3_phy_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD, "aud", "faud", 1, 1),
+ FACTOR(CLK_TOP_A1SYS, "a1sys", "a1sys_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD_L, "aud_l", "aud_l_sel", 1, 1),
+ FACTOR(CLK_TOP_A_TUNER, "a_tuner", "a_tuner_sel", 1, 1),
+ FACTOR(CLK_TOP_U2U3_REF, "u2u3_ref", "u2u3_sel", 1, 1),
+ FACTOR(CLK_TOP_U2U3_SYS, "u2u3_sys", "u2u3_sys_sel", 1, 1),
+ FACTOR(CLK_TOP_U2U3_XHCI, "u2u3_xhci", "u2u3_xhci_sel", 1, 1),
+ FACTOR(CLK_TOP_USB_FRMCNT, "usb_frmcnt", "usb_frmcnt_sel", 1, 1),
+};
+
+static const char * const nfi1x_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_mm_d4",
+ "net1_d8_d2",
+ "cb_net2_d6",
+ "cb_m_d4",
+ "cb_mm_d8",
+ "net1_d8_d4",
+ "cb_m_d8"
+};
+
+static const char * const spinfi_parents[] __initconst = {
+ "cksq_40m_d2",
+ "cb_cksq_40m",
+ "net1_d5_d4",
+ "cb_m_d4",
+ "cb_mm_d8",
+ "net1_d8_d4",
+ "mm_d6_d2",
+ "cb_m_d8"
+};
+
+static const char * const spi_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_m_d2",
+ "cb_mm_d4",
+ "net1_d8_d2",
+ "cb_net2_d6",
+ "net1_d5_d4",
+ "cb_m_d4",
+ "net1_d8_d4"
+};
+
+static const char * const uart_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_m_d8",
+ "m_d8_d2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d8_d2",
+ "net1_d5_d4",
+ "cb_m_d4",
+ "m_d8_d2",
+ "cb_rtc_32k"
+};
+
+static const char * const i2c_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d5_d4",
+ "cb_m_d4",
+ "net1_d8_d4"
+};
+
+static const char * const pextp_tl_ck_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d5_d4",
+ "cb_m_d4",
+ "cb_rtc_32k"
+};
+
+static const char * const emmc_208m_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_m_d2",
+ "cb_net2_d4",
+ "cb_apll2_196m",
+ "cb_mm_d4",
+ "net1_d8_d2",
+ "cb_mm_d6"
+};
+
+static const char * const emmc_400m_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net2_d2",
+ "cb_mm_d2",
+ "cb_net2_d2"
+};
+
+static const char * const csw_f26m_parents[] __initconst = {
+ "cksq_40m_d2",
+ "m_d8_d2"
+};
+
+static const char * const dramc_md32_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_m_d2",
+ "cb_wedmcu_208m"
+};
+
+static const char * const sysaxi_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d8_d2"
+};
+
+static const char * const sysapb_parents[] __initconst = {
+ "cb_cksq_40m",
+ "m_d3_d2"
+};
+
+static const char * const arm_db_main_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net2_d6"
+};
+
+static const char * const ap2cnn_host_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d8_d4"
+};
+
+static const char * const netsys_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_mm_d2"
+};
+
+static const char * const netsys_500m_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net1_d5"
+};
+
+static const char * const netsys_mcu_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_mm_720m",
+ "cb_net1_d4",
+ "cb_net1_d5",
+ "cb_m_416m"
+};
+
+static const char * const netsys_2x_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net2_800m",
+ "cb_mm_720m"
+};
+
+static const char * const sgm_325m_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_sgm_325m"
+};
+
+static const char * const sgm_reg_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net2_d4"
+};
+
+static const char * const eip97b_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_net1_d5",
+ "cb_m_416m",
+ "cb_mm_d2",
+ "net1_d5_d2"
+};
+
+static const char * const aud_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_apll2_196m"
+};
+
+static const char * const a1sys_parents[] __initconst = {
+ "cb_cksq_40m",
+ "apll2_d4"
+};
+
+static const char * const aud_l_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_apll2_196m",
+ "m_d8_d2"
+};
+
+static const char * const a_tuner_parents[] __initconst = {
+ "cb_cksq_40m",
+ "apll2_d4",
+ "m_d8_d2"
+};
+
+static const char * const u2u3_parents[] __initconst = {
+ "cb_cksq_40m",
+ "m_d8_d2"
+};
+
+static const char * const u2u3_sys_parents[] __initconst = {
+ "cb_cksq_40m",
+ "net1_d5_d4"
+};
+
+static const char * const usb_frmcnt_parents[] __initconst = {
+ "cb_cksq_40m",
+ "cb_mm_d3_d5"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X_SEL, "nfi1x_sel", nfi1x_parents,
+ 0x000, 0x004, 0x008, 0, 3, 7, 0x1C0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_SEL, "spinfi_sel", spinfi_parents,
+ 0x000, 0x004, 0x008, 8, 3, 15, 0x1C0, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+ 0x000, 0x004, 0x008, 16, 3, 23, 0x1C0, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIM_MST_SEL, "spim_mst_sel", spi_parents,
+ 0x000, 0x004, 0x008, 24, 3, 31, 0x1C0, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x010, 0x014, 0x018, 0, 2, 7, 0x1C0, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x010, 0x014, 0x018, 8, 3, 15, 0x1C0, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+ 0x010, 0x014, 0x018, 16, 2, 23, 0x1C0, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_SEL, "pextp_tl_ck_sel",
+ pextp_tl_ck_parents, 0x010, 0x014, 0x018, 24, 2, 31,
+ 0x1C0, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_208M_SEL, "emmc_208m_sel",
+ emmc_208m_parents, 0x020, 0x024, 0x028, 0, 3, 7,
+ 0x1C0, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_400M_SEL, "emmc_400m_sel",
+ emmc_400m_parents, 0x020, 0x024, 0x028, 8, 2, 15,
+ 0x1C0, 9),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_F26M_SEL, "csw_f26m_sel",
+ csw_f26m_parents, 0x020, 0x024, 0x028, 16, 1, 23,
+ 0x1C0, 10,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_SEL, "dramc_sel",
+ csw_f26m_parents, 0x020, 0x024, 0x028, 24, 1,
+ 31, 0x1C0, 11,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel",
+ dramc_md32_parents, 0x030, 0x034, 0x038, 0, 2,
+ 7, 0x1C0, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAXI_SEL, "sysaxi_sel",
+ sysaxi_parents, 0x030, 0x034, 0x038, 8, 1, 15,
+ 0x1C0, 13,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAPB_SEL, "sysapb_sel",
+ sysapb_parents, 0x030, 0x034, 0x038, 16, 1,
+ 23, 0x1C0, 14,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_MAIN_SEL, "arm_db_main_sel",
+ arm_db_main_parents, 0x030, 0x034, 0x038, 24, 1, 31,
+ 0x1C0, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AP2CNN_HOST_SEL, "ap2cnn_host_sel",
+ ap2cnn_host_parents, 0x040, 0x044, 0x048, 0, 1, 7,
+ 0x1C0, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_SEL, "netsys_sel", netsys_parents,
+ 0x040, 0x044, 0x048, 8, 1, 15, 0x1C0, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_500M_SEL, "netsys_500m_sel",
+ netsys_500m_parents, 0x040, 0x044, 0x048, 16, 1, 23,
+ 0x1C0, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_MCU_SEL, "netsys_mcu_sel",
+ netsys_mcu_parents, 0x040, 0x044, 0x048, 24, 3, 31,
+ 0x1C0, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_2X_SEL, "netsys_2x_sel",
+ netsys_2x_parents, 0x050, 0x054, 0x058, 0, 2, 7,
+ 0x1C0, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
+ sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
+ 0x1C0, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
+ 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP97B_SEL, "eip97b_sel", eip97b_parents,
+ 0x050, 0x054, 0x058, 24, 3, 31, 0x1C0, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB3_PHY_SEL, "usb3_phy_sel",
+ csw_f26m_parents, 0x060, 0x064, 0x068, 0, 1,
+ 7, 0x1C0, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_SEL, "aud_sel", aud_parents, 0x060,
+ 0x064, 0x068, 8, 1, 15, 0x1C0, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_SEL, "a1sys_sel", a1sys_parents,
+ 0x060, 0x064, 0x068, 16, 1, 23, 0x1C0, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_L_SEL, "aud_l_sel", aud_l_parents,
+ 0x060, 0x064, 0x068, 24, 2, 31, 0x1C0, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A_TUNER_SEL, "a_tuner_sel",
+ a_tuner_parents, 0x070, 0x074, 0x078, 0, 2, 7,
+ 0x1C0, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SEL, "u2u3_sel", u2u3_parents, 0x070,
+ 0x074, 0x078, 8, 1, 15, 0x1C0, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SYS_SEL, "u2u3_sys_sel",
+ u2u3_sys_parents, 0x070, 0x074, 0x078, 16, 1, 23,
+ 0x1C0, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_XHCI_SEL, "u2u3_xhci_sel",
+ u2u3_sys_parents, 0x070, 0x074, 0x078, 24, 1, 31,
+ 0x1C4, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_FRMCNT_SEL, "usb_frmcnt_sel",
+ usb_frmcnt_parents, 0x080, 0x084, 0x088, 0, 1, 7,
+ 0x1C4, 1),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+ DIV_GATE(CLK_TOP_AUD_I2S_M, "aud_i2s_m", "aud",
+ 0x0420, 0, 0x0420, 8, 8),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs),
+ .clk_lock = &mt7981_clk_lock,
+};
+
+static const struct of_device_id of_match_clk_mt7981_topckgen[] = {
+ { .compatible = "mediatek,mt7981-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_topckgen);
+
+static struct platform_driver clk_mt7981_topckgen_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt7981-topckgen",
+ .of_match_table = of_match_clk_mt7981_topckgen,
+ },
+};
+module_platform_driver(clk_mt7981_topckgen_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c
index 62080ee4dbe3..6767e9c43886 100644
--- a/drivers/clk/mediatek/clk-mt7986-apmixed.c
+++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c
@@ -42,7 +42,7 @@
"clkxtal")
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x0, 0, 32,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x0, PLL_AO, 32,
0x0200, 4, 0, 0x0204, 0),
PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x0, 0, 32,
0x0210, 4, 0, 0x0214, 0),
@@ -62,8 +62,9 @@ static const struct mtk_pll_data plls[] = {
static const struct of_device_id of_match_clk_mt7986_apmixed[] = {
{ .compatible = "mediatek,mt7986-apmixedsys", },
- {}
+ { }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_apmixed);
static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
{
@@ -77,8 +78,6 @@ static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
-
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
pr_err("%s(): could not register clock provider: %d\n",
@@ -100,3 +99,4 @@ static struct platform_driver clk_mt7986_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt7986_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 7868c0728e96..0681988960cc 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -22,14 +22,10 @@ static const struct mtk_gate_regs sgmii0_cg_regs = {
.sta_ofs = 0xe4,
};
-#define GATE_SGMII0(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &sgmii0_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
-
-static const struct mtk_gate sgmii0_clks[] __initconst = {
+#define GATE_SGMII0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate sgmii0_clks[] = {
GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
GATE_SGMII0(CLK_SGMII0_RX250M_EN, "sgmii0_rx250m_en", "top_xtal", 3),
GATE_SGMII0(CLK_SGMII0_CDR_REF, "sgmii0_cdr_ref", "top_xtal", 4),
@@ -42,14 +38,10 @@ static const struct mtk_gate_regs sgmii1_cg_regs = {
.sta_ofs = 0xe4,
};
-#define GATE_SGMII1(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &sgmii1_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-static const struct mtk_gate sgmii1_clks[] __initconst = {
+static const struct mtk_gate sgmii1_clks[] = {
GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
GATE_SGMII1(CLK_SGMII1_RX250M_EN, "sgmii1_rx250m_en", "top_xtal", 3),
GATE_SGMII1(CLK_SGMII1_CDR_REF, "sgmii1_cdr_ref", "top_xtal", 4),
@@ -62,14 +54,10 @@ static const struct mtk_gate_regs eth_cg_regs = {
.sta_ofs = 0x30,
};
-#define GATE_ETH(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &eth_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-static const struct mtk_gate eth_clks[] __initconst = {
+static const struct mtk_gate eth_clks[] = {
GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "sgm_325m_sel", 7),
GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "sgm_325m_sel", 8),
@@ -77,56 +65,38 @@ static const struct mtk_gate eth_clks[] __initconst = {
GATE_ETH(CLK_ETH_WOCPU0_EN, "eth_wocpu0_en", "netsys_mcu_sel", 15),
};
-static void __init mtk_sgmiisys_0_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
-
- mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_sgmiisys_0, "mediatek,mt7986-sgmiisys_0",
- mtk_sgmiisys_0_init);
-
-static void __init mtk_sgmiisys_1_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
-
- mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_sgmiisys_1, "mediatek,mt7986-sgmiisys_1",
- mtk_sgmiisys_1_init);
-
-static void __init mtk_ethsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
+static const struct mtk_clk_desc eth_desc = {
+ .clks = eth_clks,
+ .num_clks = ARRAY_SIZE(eth_clks),
+};
- clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
+static const struct mtk_clk_desc sgmii0_desc = {
+ .clks = sgmii0_clks,
+ .num_clks = ARRAY_SIZE(sgmii0_clks),
+};
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
+static const struct mtk_clk_desc sgmii1_desc = {
+ .clks = sgmii1_clks,
+ .num_clks = ARRAY_SIZE(sgmii1_clks),
+};
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+static const struct of_device_id of_match_clk_mt7986_eth[] = {
+ { .compatible = "mediatek,mt7986-ethsys", .data = &eth_desc },
+ { .compatible = "mediatek,mt7986-sgmiisys_0", .data = &sgmii0_desc },
+ { .compatible = "mediatek,mt7986-sgmiisys_1", .data = &sgmii1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_eth);
+
+static struct platform_driver clk_mt7986_eth_drv = {
+ .driver = {
+ .name = "clk-mt7986-eth",
+ .of_match_table = of_match_clk_mt7986_eth,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt7986_eth_drv);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys", mtk_ethsys_init);
+MODULE_DESCRIPTION("MediaTek MT7986 Ethernet clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index 49666047bf0e..b7efa70c2d6c 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -87,26 +87,14 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x68,
};
-#define GATE_INFRA0(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra0_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_INFRA1(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra1_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_INFRA2(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra2_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate infra_clks[] = {
/* INFRA0 */
@@ -169,56 +157,31 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA2(CLK_INFRA_IPCIEB_CK, "infra_ipcieb", "sysaxi_sel", 15),
};
-static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
- void __iomem *base;
- int nr = ARRAY_SIZE(infra_divs) + ARRAY_SIZE(infra_muxes) +
- ARRAY_SIZE(infra_clks);
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return -ENOMEM;
- }
-
- clk_data = mtk_alloc_clk_data(nr);
-
- if (!clk_data)
- return -ENOMEM;
-
- mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
- mtk_clk_register_muxes(infra_muxes, ARRAY_SIZE(infra_muxes), node,
- &mt7986_clk_lock, clk_data);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r) {
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
- goto free_infracfg_data;
- }
- return r;
-
-free_infracfg_data:
- mtk_free_clk_data(clk_data);
- return r;
-
-}
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .factor_clks = infra_divs,
+ .num_factor_clks = ARRAY_SIZE(infra_divs),
+ .mux_clks = infra_muxes,
+ .num_mux_clks = ARRAY_SIZE(infra_muxes),
+ .clk_lock = &mt7986_clk_lock,
+};
static const struct of_device_id of_match_clk_mt7986_infracfg[] = {
- { .compatible = "mediatek,mt7986-infracfg", },
- {}
+ { .compatible = "mediatek,mt7986-infracfg", .data = &infra_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_infracfg);
static struct platform_driver clk_mt7986_infracfg_drv = {
- .probe = clk_mt7986_infracfg_probe,
.driver = {
.name = "clk-mt7986-infracfg",
.of_match_table = of_match_clk_mt7986_infracfg,
},
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
-builtin_platform_driver(clk_mt7986_infracfg_drv);
+module_platform_driver(clk_mt7986_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7986 infracfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index de5121cf2877..fbca3feded8f 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -202,16 +202,23 @@ static const struct mtk_mux top_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_F_26M_ADC_SEL, "f_26m_adc_sel",
f_26m_adc_parents, 0x020, 0x024, 0x028, 16, 1, 23,
0x1C0, 10),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_SEL, "dramc_sel", f_26m_adc_parents,
- 0x020, 0x024, 0x028, 24, 1, 31, 0x1C0, 11),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_SEL, "dramc_sel",
+ f_26m_adc_parents, 0x020, 0x024, 0x028,
+ 24, 1, 31, 0x1C0, 11,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/* CLK_CFG_3 */
- MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel",
- dramc_md32_parents, 0x030, 0x034, 0x038, 0, 1, 7,
- 0x1C0, 12),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAXI_SEL, "sysaxi_sel", sysaxi_parents,
- 0x030, 0x034, 0x038, 8, 2, 15, 0x1C0, 13),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAPB_SEL, "sysapb_sel", sysapb_parents,
- 0x030, 0x034, 0x038, 16, 2, 23, 0x1C0, 14),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel",
+ dramc_md32_parents, 0x030, 0x034, 0x038,
+ 0, 1, 7, 0x1C0, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAXI_SEL, "sysaxi_sel",
+ sysaxi_parents, 0x030, 0x034, 0x038,
+ 8, 2, 15, 0x1C0, 13,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAPB_SEL, "sysapb_sel",
+ sysapb_parents, 0x030, 0x034, 0x038,
+ 16, 2, 23, 0x1C0, 14,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_MAIN_SEL, "arm_db_main_sel",
arm_db_main_parents, 0x030, 0x034, 0x038, 24, 1,
31, 0x1C0, 15),
@@ -234,9 +241,10 @@ static const struct mtk_mux top_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
0x1C0, 21),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel",
- sgm_reg_parents, 0x050, 0x054, 0x058, 16, 1, 23,
- 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel",
+ sgm_reg_parents, 0x050, 0x054, 0x058,
+ 16, 1, 23, 0x1C0, 22,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_SEL, "a1sys_sel", a1sys_parents,
0x050, 0x054, 0x058, 24, 1, 31, 0x1C0, 23),
/* CLK_CFG_6 */
@@ -252,9 +260,10 @@ static const struct mtk_mux top_muxes[] = {
f_26m_adc_parents, 0x060, 0x064, 0x068, 24, 1, 31,
0x1C0, 27),
/* CLK_CFG_7 */
- MUX_GATE_CLR_SET_UPD(CLK_TOP_F26M_SEL, "csw_f26m_sel",
- f_26m_adc_parents, 0x070, 0x074, 0x078, 0, 1, 7,
- 0x1C0, 28),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_F26M_SEL, "csw_f26m_sel",
+ f_26m_adc_parents, 0x070, 0x074, 0x078,
+ 0, 1, 7, 0x1C0, 28,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_L_SEL, "aud_l_sel", aud_l_parents,
0x070, 0x074, 0x078, 8, 2, 15, 0x1C0, 29),
MUX_GATE_CLR_SET_UPD(CLK_TOP_A_TUNER_SEL, "a_tuner_sel",
@@ -281,62 +290,29 @@ static const struct mtk_mux top_muxes[] = {
0x1C4, 5),
};
-static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
- void __iomem *base;
- int nr = ARRAY_SIZE(top_fixed_clks) + ARRAY_SIZE(top_divs) +
- ARRAY_SIZE(top_muxes);
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return -ENOMEM;
- }
-
- clk_data = mtk_alloc_clk_data(nr);
- if (!clk_data)
- return -ENOMEM;
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
- &mt7986_clk_lock, clk_data);
-
- clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAXI_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAPB_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_MD32_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_F26M_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_SGM_REG_SEL]->clk);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r) {
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
- goto free_topckgen_data;
- }
- return r;
-
-free_topckgen_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .clk_lock = &mt7986_clk_lock,
+};
static const struct of_device_id of_match_clk_mt7986_topckgen[] = {
- { .compatible = "mediatek,mt7986-topckgen", },
- {}
+ { .compatible = "mediatek,mt7986-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_topckgen);
static struct platform_driver clk_mt7986_topckgen_drv = {
- .probe = clk_mt7986_topckgen_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7986-topckgen",
.of_match_table = of_match_clk_mt7986_topckgen,
},
};
-builtin_platform_driver(clk_mt7986_topckgen_drv);
+module_platform_driver(clk_mt7986_topckgen_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
new file mode 100644
index 000000000000..744aae092281
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * James Liao <jamesjj.liao@mediatek.com>
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8135-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MT8135_PLL_FMAX (2000 * MHZ)
+#define CON0_MT8135_RST_BAR BIT(27)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8135_RST_BAR, \
+ .fmax = MT8135_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000000, 0, 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000000, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000000, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000000, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000000, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000000, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000000, 0, 31, 0x294, 6, 0x0, 0x298, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
+ PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000000, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
+};
+
+static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static int clk_mt8135_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_clk_mt8135_apmixed[] = {
+ { .compatible = "mediatek,mt8135-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed);
+
+static struct platform_driver clk_mt8135_apmixed_drv = {
+ .probe = clk_mt8135_apmixed_probe,
+ .remove = clk_mt8135_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8135-apmixed",
+ .of_match_table = of_match_clk_mt8135_apmixed,
+ },
+};
+module_platform_driver(clk_mt8135_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8135 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index b68888a034c4..084e48a554c2 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -5,8 +5,10 @@
*/
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/clock/mt8135-clk.h>
@@ -17,14 +19,13 @@
static DEFINE_SPINLOCK(mt8135_clk_lock);
-static const struct mtk_fixed_factor root_clk_alias[] __initconst = {
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_DUMMY, "top_divs_dummy", "clk_null", 1, 1),
FACTOR(CLK_TOP_DSI0_LNTC_DSICLK, "dsi0_lntc_dsiclk", "clk_null", 1, 1),
FACTOR(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_clkdig_cts", "clk_null", 1, 1),
FACTOR(CLK_TOP_CLKPH_MCK, "clkph_mck", "clk_null", 1, 1),
FACTOR(CLK_TOP_CPUM_TCK_IN, "cpum_tck_in", "clk_null", 1, 1),
-};
-static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_MAINPLL_806M, "mainpll_806m", "mainpll", 1, 2),
FACTOR(CLK_TOP_MAINPLL_537P3M, "mainpll_537p3m", "mainpll", 1, 3),
FACTOR(CLK_TOP_MAINPLL_322P4M, "mainpll_322p4m", "mainpll", 1, 5),
@@ -100,7 +101,7 @@ static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_MEMPLL_MCK_D4, "mempll_mck_d4", "clkph_mck", 1, 4),
};
-static const char * const axi_parents[] __initconst = {
+static const char * const axi_parents[] = {
"clk26m",
"syspll_d3",
"syspll_d4",
@@ -110,7 +111,7 @@ static const char * const axi_parents[] __initconst = {
"syspll_d3p5"
};
-static const char * const smi_parents[] __initconst = {
+static const char * const smi_parents[] = {
"clk26m",
"clkph_mck",
"syspll_d2p5",
@@ -128,7 +129,7 @@ static const char * const smi_parents[] __initconst = {
"lvdspll"
};
-static const char * const mfg_parents[] __initconst = {
+static const char * const mfg_parents[] = {
"clk26m",
"univpll1_d4",
"syspll_d2",
@@ -144,13 +145,13 @@ static const char * const mfg_parents[] __initconst = {
"mmpll_d7"
};
-static const char * const irda_parents[] __initconst = {
+static const char * const irda_parents[] = {
"clk26m",
"univpll2_d8",
"univpll1_d6"
};
-static const char * const cam_parents[] __initconst = {
+static const char * const cam_parents[] = {
"clk26m",
"syspll_d3",
"syspll_d3p5",
@@ -161,13 +162,13 @@ static const char * const cam_parents[] __initconst = {
"univpll1_d4"
};
-static const char * const aud_intbus_parents[] __initconst = {
+static const char * const aud_intbus_parents[] = {
"clk26m",
"syspll_d6",
"univpll_d10"
};
-static const char * const jpg_parents[] __initconst = {
+static const char * const jpg_parents[] = {
"clk26m",
"syspll_d5",
"syspll_d4",
@@ -177,7 +178,7 @@ static const char * const jpg_parents[] __initconst = {
"univpll_d5"
};
-static const char * const disp_parents[] __initconst = {
+static const char * const disp_parents[] = {
"clk26m",
"syspll_d3p5",
"syspll_d3",
@@ -188,7 +189,7 @@ static const char * const disp_parents[] __initconst = {
"vdecpll"
};
-static const char * const msdc30_parents[] __initconst = {
+static const char * const msdc30_parents[] = {
"clk26m",
"syspll_d6",
"syspll_d5",
@@ -197,13 +198,13 @@ static const char * const msdc30_parents[] __initconst = {
"msdcpll"
};
-static const char * const usb20_parents[] __initconst = {
+static const char * const usb20_parents[] = {
"clk26m",
"univpll2_d6",
"univpll1_d10"
};
-static const char * const venc_parents[] __initconst = {
+static const char * const venc_parents[] = {
"clk26m",
"syspll_d3",
"syspll_d8",
@@ -214,7 +215,7 @@ static const char * const venc_parents[] __initconst = {
"mmpll_d6"
};
-static const char * const spi_parents[] __initconst = {
+static const char * const spi_parents[] = {
"clk26m",
"syspll_d6",
"syspll_d8",
@@ -223,17 +224,17 @@ static const char * const spi_parents[] __initconst = {
"univpll1_d8"
};
-static const char * const uart_parents[] __initconst = {
+static const char * const uart_parents[] = {
"clk26m",
"univpll2_d8"
};
-static const char * const mem_parents[] __initconst = {
+static const char * const mem_parents[] = {
"clk26m",
"clkph_mck"
};
-static const char * const camtg_parents[] __initconst = {
+static const char * const camtg_parents[] = {
"clk26m",
"univpll_d26",
"univpll1_d6",
@@ -241,12 +242,12 @@ static const char * const camtg_parents[] __initconst = {
"syspll_d8"
};
-static const char * const audio_parents[] __initconst = {
+static const char * const audio_parents[] = {
"clk26m",
"syspll_d24"
};
-static const char * const fix_parents[] __initconst = {
+static const char * const fix_parents[] = {
"rtc32k",
"clk26m",
"univpll_d5",
@@ -257,7 +258,7 @@ static const char * const fix_parents[] __initconst = {
"univpll1_d8"
};
-static const char * const vdec_parents[] __initconst = {
+static const char * const vdec_parents[] = {
"clk26m",
"vdecpll",
"clkph_mck",
@@ -276,13 +277,13 @@ static const char * const vdec_parents[] __initconst = {
"lvdspll"
};
-static const char * const ddrphycfg_parents[] __initconst = {
+static const char * const ddrphycfg_parents[] = {
"clk26m",
"axi_sel",
"syspll_d12"
};
-static const char * const dpilvds_parents[] __initconst = {
+static const char * const dpilvds_parents[] = {
"clk26m",
"lvdspll",
"lvdspll_d2",
@@ -290,7 +291,7 @@ static const char * const dpilvds_parents[] __initconst = {
"lvdspll_d8"
};
-static const char * const pmicspi_parents[] __initconst = {
+static const char * const pmicspi_parents[] = {
"clk26m",
"univpll2_d6",
"syspll_d8",
@@ -301,14 +302,14 @@ static const char * const pmicspi_parents[] __initconst = {
"syspll_d24"
};
-static const char * const smi_mfg_as_parents[] __initconst = {
+static const char * const smi_mfg_as_parents[] = {
"clk26m",
"smi_sel",
"mfg_sel",
"mem_sel"
};
-static const char * const gcpu_parents[] __initconst = {
+static const char * const gcpu_parents[] = {
"clk26m",
"syspll_d4",
"univpll_d7",
@@ -316,14 +317,14 @@ static const char * const gcpu_parents[] __initconst = {
"syspll_d6"
};
-static const char * const dpi1_parents[] __initconst = {
+static const char * const dpi1_parents[] = {
"clk26m",
"tvhdmi_h_ck",
"tvhdmi_d2",
"tvhdmi_d4"
};
-static const char * const cci_parents[] __initconst = {
+static const char * const cci_parents[] = {
"clk26m",
"mainpll_537p3m",
"univpll_d3",
@@ -332,7 +333,7 @@ static const char * const cci_parents[] __initconst = {
"syspll_d5"
};
-static const char * const apll_parents[] __initconst = {
+static const char * const apll_parents[] = {
"clk26m",
"apll_ck",
"apll_d4",
@@ -341,14 +342,14 @@ static const char * const apll_parents[] __initconst = {
"apll_d24"
};
-static const char * const hdmipll_parents[] __initconst = {
+static const char * const hdmipll_parents[] = {
"clk26m",
"hdmitx_clkdig_cts",
"hdmitx_clkdig_d2",
"hdmitx_clkdig_d3"
};
-static const struct mtk_composite top_muxes[] __initconst = {
+static const struct mtk_composite top_muxes[] = {
/* CLK_CFG_0 */
MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
0x0140, 0, 3, INVALID_MUX_GATE_BIT),
@@ -390,7 +391,7 @@ static const struct mtk_composite top_muxes[] __initconst = {
MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31),
/* CLK_CFG_9 */
MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7),
- MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15),
+ MUX_GATE_FLAGS(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15, CLK_IS_CRITICAL),
MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23),
MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31),
};
@@ -401,23 +402,22 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x0048,
};
-#define GATE_ICG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_ICG_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
-static const struct mtk_gate infra_clks[] __initconst = {
+static const struct mtk_gate infra_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "infra_dummy"),
GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23),
GATE_ICG(CLK_INFRA_PMICSPI, "pmicspi_ck", "pmicspi_sel", 22),
GATE_ICG(CLK_INFRA_CCIF1_AP_CTRL, "ccif1_ap_ctrl", "axi_sel", 21),
GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20),
GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15),
- GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ GATE_ICG_AO(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7),
GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6),
GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5),
@@ -438,25 +438,14 @@ static const struct mtk_gate_regs peri1_cg_regs = {
.sta_ofs = 0x001c,
};
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-static const struct mtk_gate peri_gates[] __initconst = {
+static const struct mtk_gate peri_gates[] = {
+ GATE_DUMMY(CLK_DUMMY, "peri_dummy"),
/* PERI0 */
GATE_PERI0(CLK_PERI_I2C5, "i2c5_ck", "axi_sel", 31),
GATE_PERI0(CLK_PERI_I2C4, "i2c4_ck", "axi_sel", 30),
@@ -502,12 +491,12 @@ static const struct mtk_gate peri_gates[] __initconst = {
GATE_PERI1(CLK_PERI_I2C6, "i2c6_ck", "axi_sel", 0),
};
-static const char * const uart_ck_sel_parents[] __initconst = {
+static const char * const uart_ck_sel_parents[] = {
"clk26m",
"uart_sel",
};
-static const struct mtk_composite peri_clks[] __initconst = {
+static const struct mtk_composite peri_clks[] = {
MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
@@ -532,125 +521,46 @@ static const struct mtk_clk_rst_desc clk_rst_desc[] = {
}
};
-static void __init mtk_topckgen_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- int r;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8135_clk_lock, clk_data);
-
- clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8135-topckgen", mtk_topckgen_init);
-
-static void __init mtk_infrasys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
-
- clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .rst_desc = &clk_rst_desc[0],
+};
- mtk_register_reset_controller(node, &clk_rst_desc[0]);
-}
-CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_gates,
+ .num_clks = ARRAY_SIZE(peri_gates),
+ .composite_clks = peri_clks,
+ .num_composite_clks = ARRAY_SIZE(peri_clks),
+ .clk_lock = &mt8135_clk_lock,
+ .rst_desc = &clk_rst_desc[1],
+};
-static void __init mtk_pericfg_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .clk_lock = &mt8135_clk_lock,
+};
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
+static const struct of_device_id of_match_clk_mt8135[] = {
+ { .compatible = "mediatek,mt8135-infracfg", .data = &infra_desc },
+ { .compatible = "mediatek,mt8135-pericfg", .data = &peri_desc },
+ { .compatible = "mediatek,mt8135-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8135);
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-
- mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
- clk_data);
- mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
- &mt8135_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_register_reset_controller(node, &clk_rst_desc[1]);
-}
-CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
-
-#define MT8135_PLL_FMAX (2000 * MHZ)
-#define CON0_MT8135_RST_BAR BIT(27)
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT8135_RST_BAR, \
- .fmax = MT8135_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- }
+static struct platform_driver clk_mt8135_drv = {
+ .driver = {
+ .name = "clk-mt8135",
+ .of_match_table = of_match_clk_mt8135,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt8135_drv);
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000000, 0, 21, 0x204, 24, 0x0, 0x204, 0),
- PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000000, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000000, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000000, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000000, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000000, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000000, 0, 31, 0x294, 6, 0x0, 0x298, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
- PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000000, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
- PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
-};
-
-static void __init mtk_apmixedsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
- if (!clk_data)
- return;
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-}
-CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8135-apmixedsys",
- mtk_apmixedsys_init);
+MODULE_DESCRIPTION("MediaTek MT8135 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-apmixedsys.c b/drivers/clk/mediatek/clk-mt8167-apmixedsys.c
new file mode 100644
index 000000000000..fca41f50d6ba
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-apmixedsys.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Copyright (c) 2023 Collabora, Ltd.
+ */
+
+#include <dt-bindings/clock/mt8167-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-pll.h"
+#include "clk-mtk.h"
+
+static DEFINE_SPINLOCK(mt8167_apmixed_clk_lock);
+
+#define MT8167_PLL_FMAX (2500UL * MHZ)
+
+#define CON0_MT8167_RST_BAR BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8167_RST_BAR, \
+ .fmax = MT8167_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8167_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 604500000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
+ 21, 0x0104, 24, 0, 0x0104, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
+ HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
+ HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
+ 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
+ 31, 0x0180, 1, 0x0194, 0x0184, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
+ 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0, 0,
+ 21, 0x01C4, 24, 0, 0x01C4, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0, 0,
+ 21, 0x01E4, 24, 0, 0x01E4, 0),
+};
+
+#define DIV_ADJ_FLAG(_id, _name, _parent, _reg, _shift, _width, _flag) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .clk_divider_flags = _flag, \
+}
+
+static const struct mtk_clk_divider adj_divs[] = {
+ DIV_ADJ_FLAG(CLK_APMIXED_HDMI_REF, "hdmi_ref", "tvdpll",
+ 0x1c4, 24, 3, CLK_DIVIDER_POWER_OF_TWO),
+};
+
+static int clk_mt8167_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, MT8167_CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = mtk_clk_register_dividers(dev, adj_divs, ARRAY_SIZE(adj_divs), base,
+ &mt8167_apmixed_clk_lock, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_dividers;
+
+ return 0;
+
+unregister_dividers:
+ mtk_clk_unregister_dividers(adj_divs, ARRAY_SIZE(adj_divs), clk_data);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8167_apmixed[] = {
+ { .compatible = "mediatek,mt8167-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_apmixed);
+
+static struct platform_driver clk_mt8167_apmixed_drv = {
+ .probe = clk_mt8167_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8167-apmixed",
+ .of_match_table = of_match_clk_mt8167_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8167_apmixed_drv)
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index ce1ae8d243c3..86125635c8a6 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -23,16 +23,11 @@ static const struct mtk_gate_regs aud_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-static const struct mtk_gate aud_clks[] __initconst = {
+
+static const struct mtk_gate aud_clks[] = {
GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6),
GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8),
@@ -48,19 +43,24 @@ static const struct mtk_gate aud_clks[] __initconst = {
GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27),
};
-static void __init mtk_audsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
-
- mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+static const struct mtk_clk_desc aud_desc = {
+ .clks = aud_clks,
+ .num_clks = ARRAY_SIZE(aud_clks),
+};
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct of_device_id of_match_clk_mt8167_audsys[] = {
+ { .compatible = "mediatek,mt8167-audsys", .data = &aud_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_audsys);
-}
-CLK_OF_DECLARE(mtk_audsys, "mediatek,mt8167-audsys", mtk_audsys_init);
+static struct platform_driver clk_mt8167_audsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8167-audsys",
+ .of_match_table = of_match_clk_mt8167_audsys,
+ },
+};
+module_platform_driver(clk_mt8167_audsys_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index e359e563d2b7..315b7f64bad6 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -23,16 +23,10 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate img_clks[] __initconst = {
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "smi_mm", 5),
GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "smi_mm", 6),
@@ -41,20 +35,24 @@ static const struct mtk_gate img_clks[] __initconst = {
GATE_IMG(CLK_IMG_VENC, "img_venc", "smi_mm", 9),
};
-static void __init mtk_imgsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
-}
-CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8167-imgsys", mtk_imgsys_init);
+static const struct of_device_id of_match_clk_mt8167_imgsys[] = {
+ { .compatible = "mediatek,mt8167-imgsys", .data = &img_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_imgsys);
+
+static struct platform_driver clk_mt8167_imgsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8167-imgsys",
+ .of_match_table = of_match_clk_mt8167_imgsys,
+ },
+};
+module_platform_driver(clk_mt8167_imgsys_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index 4fd82fe87d6e..4851f5bf3a90 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -23,36 +23,34 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mfg_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate mfg_clks[] __initconst = {
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
GATE_MFG(CLK_MFG_BMEM, "mfg_bmem", "gfmux_emi1x_sel", 1),
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_mm", 2),
GATE_MFG(CLK_MFG_B26M, "mfg_b26m", "clk26m_ck", 3),
};
-static void __init mtk_mfgcfg_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
-
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
-}
-CLK_OF_DECLARE(mtk_mfgcfg, "mediatek,mt8167-mfgcfg", mtk_mfgcfg_init);
+static const struct of_device_id of_match_clk_mt8167_mfgcfg[] = {
+ { .compatible = "mediatek,mt8167-mfgcfg", .data = &mfg_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_mfgcfg);
+
+static struct platform_driver clk_mt8167_mfgcfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8167-mfgcfg",
+ .of_match_table = of_match_clk_mt8167_mfgcfg,
+ },
+};
+module_platform_driver(clk_mt8167_mfgcfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 73910060577f..4e053c61315d 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -29,23 +29,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x110,
};
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
/* MM0 */
@@ -86,47 +74,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_HDMI_PLL, "mm_hdmi_pll", "hdmtx_dig_cts", 21),
};
-struct clk_mt8167_mm_driver_data {
- const struct mtk_gate *gates_clk;
- int gates_num;
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
};
-static const struct clk_mt8167_mm_driver_data mt8167_mmsys_driver_data = {
- .gates_clk = mm_clks,
- .gates_num = ARRAY_SIZE(mm_clks),
+static const struct platform_device_id clk_mt8167_mm_id_table[] = {
+ { .name = "clk-mt8167-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, clk_mt8167_mm_id_table);
-static int clk_mt8167_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- const struct clk_mt8167_mm_driver_data *data;
- struct clk_hw_onecell_data *clk_data;
- int ret;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- data = &mt8167_mmsys_driver_data;
-
- ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
- clk_data);
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static struct platform_driver clk_mt8173_mm_drv = {
+static struct platform_driver clk_mt8167_mm_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8167-mm",
},
- .probe = clk_mt8167_mm_probe,
+ .id_table = clk_mt8167_mm_id_table,
};
-
-builtin_platform_driver(clk_mt8173_mm_drv);
+module_platform_driver(clk_mt8167_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index ee4fffb6859d..76900f393d31 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -29,45 +29,37 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x8,
};
-#define GATE_VDEC0_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-#define GATE_VDEC1_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
-static const struct mtk_gate vdec_clks[] __initconst = {
+static const struct mtk_gate vdec_clks[] = {
/* VDEC0 */
GATE_VDEC0_I(CLK_VDEC_CKEN, "vdec_cken", "rg_vdec", 0),
/* VDEC1 */
GATE_VDEC1_I(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "smi_mm", 0),
};
-static void __init mtk_vdecsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct of_device_id of_match_clk_mt8167_vdec[] = {
+ { .compatible = "mediatek,mt8167-vdecsys", .data = &vdec_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_vdec);
-}
-CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8167-vdecsys", mtk_vdecsys_init);
+static struct platform_driver clk_mt8167_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8167-vdecsys",
+ .of_match_table = of_match_clk_mt8167_vdec,
+ },
+};
+module_platform_driver(clk_mt8167_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index f900ac4bf7b8..b9041f79cbbd 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -11,16 +11,16 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include "clk-gate.h"
#include "clk-mtk.h"
-#include "clk-pll.h"
#include <dt-bindings/clock/mt8167-clk.h>
static DEFINE_SPINLOCK(mt8167_clk_lock);
-static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+static const struct mtk_fixed_clk fixed_clks[] = {
FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
@@ -29,7 +29,7 @@ static const struct mtk_fixed_clk fixed_clks[] __initconst = {
FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx_dig_cts", "clk26m", 52500000),
};
-static const struct mtk_fixed_factor top_divs[] __initconst = {
+static const struct mtk_fixed_factor top_divs[] = {
FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
@@ -85,22 +85,22 @@ static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
};
-static const char * const uart0_parents[] __initconst = {
+static const char * const uart0_parents[] = {
"clk26m_ck",
"univpll_d24"
};
-static const char * const gfmux_emi1x_parents[] __initconst = {
+static const char * const gfmux_emi1x_parents[] = {
"clk26m_ck",
"dmpll_ck"
};
-static const char * const emi_ddrphy_parents[] __initconst = {
+static const char * const emi_ddrphy_parents[] = {
"gfmux_emi1x_sel",
"gfmux_emi1x_sel"
};
-static const char * const ahb_infra_parents[] __initconst = {
+static const char * const ahb_infra_parents[] = {
"clk_null",
"clk26m_ck",
"mainpll_d11",
@@ -116,7 +116,7 @@ static const char * const ahb_infra_parents[] __initconst = {
"mainpll_d10"
};
-static const char * const csw_mux_mfg_parents[] __initconst = {
+static const char * const csw_mux_mfg_parents[] = {
"clk_null",
"clk_null",
"univpll_d3",
@@ -127,7 +127,7 @@ static const char * const csw_mux_mfg_parents[] __initconst = {
"mmpll380m"
};
-static const char * const msdc0_parents[] __initconst = {
+static const char * const msdc0_parents[] = {
"clk26m_ck",
"univpll_d6",
"mainpll_d8",
@@ -138,7 +138,7 @@ static const char * const msdc0_parents[] __initconst = {
"mmpll_d2"
};
-static const char * const camtg_mm_parents[] __initconst = {
+static const char * const camtg_mm_parents[] = {
"clk_null",
"clk26m_ck",
"usb_phy48m_ck",
@@ -146,17 +146,17 @@ static const char * const camtg_mm_parents[] __initconst = {
"univpll_d6"
};
-static const char * const pwm_mm_parents[] __initconst = {
+static const char * const pwm_mm_parents[] = {
"clk26m_ck",
"univpll_d12"
};
-static const char * const uart1_parents[] __initconst = {
+static const char * const uart1_parents[] = {
"clk26m_ck",
"univpll_d24"
};
-static const char * const msdc1_parents[] __initconst = {
+static const char * const msdc1_parents[] = {
"clk26m_ck",
"univpll_d6",
"mainpll_d8",
@@ -167,24 +167,24 @@ static const char * const msdc1_parents[] __initconst = {
"mmpll_d2"
};
-static const char * const spm_52m_parents[] __initconst = {
+static const char * const spm_52m_parents[] = {
"clk26m_ck",
"univpll_d24"
};
-static const char * const pmicspi_parents[] __initconst = {
+static const char * const pmicspi_parents[] = {
"univpll_d20",
"usb_phy48m_ck",
"univpll_d16",
"clk26m_ck"
};
-static const char * const qaxi_aud26m_parents[] __initconst = {
+static const char * const qaxi_aud26m_parents[] = {
"clk26m_ck",
"ahb_infra_sel"
};
-static const char * const aud_intbus_parents[] __initconst = {
+static const char * const aud_intbus_parents[] = {
"clk_null",
"clk26m_ck",
"mainpll_d22",
@@ -192,7 +192,7 @@ static const char * const aud_intbus_parents[] __initconst = {
"mainpll_d11"
};
-static const char * const nfi2x_pad_parents[] __initconst = {
+static const char * const nfi2x_pad_parents[] = {
"clk_null",
"clk_null",
"clk_null",
@@ -280,12 +280,12 @@ static const char * const nfi2x_pad_parents[] __initconst = {
"mainpll_d5"
};
-static const char * const nfi1x_pad_parents[] __initconst = {
+static const char * const nfi1x_pad_parents[] = {
"ahb_infra_sel",
"nfi1x_ck"
};
-static const char * const mfg_mm_parents[] __initconst = {
+static const char * const mfg_mm_parents[] = {
"clk_null",
"clk_null",
"clk_null",
@@ -325,12 +325,12 @@ static const char * const mfg_mm_parents[] __initconst = {
"mainpll_d14"
};
-static const char * const ddrphycfg_parents[] __initconst = {
+static const char * const ddrphycfg_parents[] = {
"clk26m_ck",
"mainpll_d16"
};
-static const char * const smi_mm_parents[] __initconst = {
+static const char * const smi_mm_parents[] = {
"clk26m_ck",
"clk_null",
"clk_null",
@@ -346,7 +346,7 @@ static const char * const smi_mm_parents[] __initconst = {
"mainpll_d14"
};
-static const char * const usb_78m_parents[] __initconst = {
+static const char * const usb_78m_parents[] = {
"clk_null",
"clk26m_ck",
"univpll_d16",
@@ -354,7 +354,7 @@ static const char * const usb_78m_parents[] __initconst = {
"mainpll_d20"
};
-static const char * const scam_mm_parents[] __initconst = {
+static const char * const scam_mm_parents[] = {
"clk_null",
"clk26m_ck",
"mainpll_d14",
@@ -362,7 +362,7 @@ static const char * const scam_mm_parents[] __initconst = {
"mainpll_d12"
};
-static const char * const spinor_parents[] __initconst = {
+static const char * const spinor_parents[] = {
"clk26m_d2",
"clk26m_ck",
"mainpll_d40",
@@ -373,7 +373,7 @@ static const char * const spinor_parents[] __initconst = {
"univpll_d12"
};
-static const char * const msdc2_parents[] __initconst = {
+static const char * const msdc2_parents[] = {
"clk26m_ck",
"univpll_d6",
"mainpll_d8",
@@ -384,7 +384,7 @@ static const char * const msdc2_parents[] __initconst = {
"mmpll_d2"
};
-static const char * const eth_parents[] __initconst = {
+static const char * const eth_parents[] = {
"clk26m_ck",
"mainpll_d40",
"univpll_d24",
@@ -392,7 +392,7 @@ static const char * const eth_parents[] __initconst = {
"mainpll_d20"
};
-static const char * const vdec_mm_parents[] __initconst = {
+static const char * const vdec_mm_parents[] = {
"clk26m_ck",
"univpll_d4",
"mainpll_d4",
@@ -401,7 +401,7 @@ static const char * const vdec_mm_parents[] __initconst = {
"mainpll_d6"
};
-static const char * const dpi0_mm_parents[] __initconst = {
+static const char * const dpi0_mm_parents[] = {
"clk26m_ck",
"lvdspll_ck",
"lvdspll_d2",
@@ -409,7 +409,7 @@ static const char * const dpi0_mm_parents[] __initconst = {
"lvdspll_d8"
};
-static const char * const dpi1_mm_parents[] __initconst = {
+static const char * const dpi1_mm_parents[] = {
"clk26m_ck",
"tvdpll_d2",
"tvdpll_d4",
@@ -417,85 +417,85 @@ static const char * const dpi1_mm_parents[] __initconst = {
"tvdpll_d16"
};
-static const char * const axi_mfg_in_parents[] __initconst = {
+static const char * const axi_mfg_in_parents[] = {
"clk26m_ck",
"mainpll_d11",
"univpll_d24",
"mmpll380m"
};
-static const char * const slow_mfg_parents[] __initconst = {
+static const char * const slow_mfg_parents[] = {
"clk26m_ck",
"univpll_d12",
"univpll_d24"
};
-static const char * const aud1_parents[] __initconst = {
+static const char * const aud1_parents[] = {
"clk26m_ck",
"apll1_ck"
};
-static const char * const aud2_parents[] __initconst = {
+static const char * const aud2_parents[] = {
"clk26m_ck",
"apll2_ck"
};
-static const char * const aud_engen1_parents[] __initconst = {
+static const char * const aud_engen1_parents[] = {
"clk26m_ck",
"rg_apll1_d2_en",
"rg_apll1_d4_en",
"rg_apll1_d8_en"
};
-static const char * const aud_engen2_parents[] __initconst = {
+static const char * const aud_engen2_parents[] = {
"clk26m_ck",
"rg_apll2_d2_en",
"rg_apll2_d4_en",
"rg_apll2_d8_en"
};
-static const char * const i2c_parents[] __initconst = {
+static const char * const i2c_parents[] = {
"clk26m_ck",
"univpll_d20",
"univpll_d16",
"univpll_d12"
};
-static const char * const aud_i2s0_m_parents[] __initconst = {
+static const char * const aud_i2s0_m_parents[] = {
"rg_aud1",
"rg_aud2"
};
-static const char * const pwm_parents[] __initconst = {
+static const char * const pwm_parents[] = {
"clk26m_ck",
"univpll_d12"
};
-static const char * const spi_parents[] __initconst = {
+static const char * const spi_parents[] = {
"clk26m_ck",
"univpll_d12",
"univpll_d8",
"univpll_d6"
};
-static const char * const aud_spdifin_parents[] __initconst = {
+static const char * const aud_spdifin_parents[] = {
"clk26m_ck",
"univpll_d2"
};
-static const char * const uart2_parents[] __initconst = {
+static const char * const uart2_parents[] = {
"clk26m_ck",
"univpll_d24"
};
-static const char * const bsi_parents[] __initconst = {
+static const char * const bsi_parents[] = {
"clk26m_ck",
"mainpll_d10",
"mainpll_d12",
"mainpll_d20"
};
-static const char * const dbg_atclk_parents[] __initconst = {
+static const char * const dbg_atclk_parents[] = {
"clk_null",
"clk26m_ck",
"mainpll_d5",
@@ -503,7 +503,7 @@ static const char * const dbg_atclk_parents[] __initconst = {
"univpll_d5"
};
-static const char * const csw_nfiecc_parents[] __initconst = {
+static const char * const csw_nfiecc_parents[] = {
"clk_null",
"mainpll_d7",
"mainpll_d6",
@@ -511,7 +511,7 @@ static const char * const csw_nfiecc_parents[] __initconst = {
"mainpll_d5"
};
-static const char * const nfiecc_parents[] __initconst = {
+static const char * const nfiecc_parents[] = {
"clk_null",
"nfi2x_pad_sel",
"mainpll_d4",
@@ -625,24 +625,24 @@ static struct mtk_composite top_muxes[] __initdata = {
0x07c, 13, 3),
};
-static const char * const ifr_mux1_parents[] __initconst = {
+static const char * const ifr_mux1_parents[] = {
"clk26m_ck",
"armpll",
"univpll",
"mainpll_d2"
};
-static const char * const ifr_eth_25m_parents[] __initconst = {
+static const char * const ifr_eth_25m_parents[] = {
"eth_d2_ck",
"rg_eth"
};
-static const char * const ifr_i2c0_parents[] __initconst = {
+static const char * const ifr_i2c0_parents[] = {
"ahb_infra_d2",
"rg_i2c"
};
-static const struct mtk_composite ifr_muxes[] __initconst = {
+static const struct mtk_composite ifr_muxes[] = {
MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
2, 2),
MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
@@ -685,21 +685,6 @@ static const struct mtk_clk_divider top_adj_divs[] = {
0x0078, 0, 8),
};
-#define DIV_ADJ_FLAG(_id, _name, _parent, _reg, _shift, _width, _flag) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .div_reg = _reg, \
- .div_shift = _shift, \
- .div_width = _width, \
- .clk_divider_flags = _flag, \
-}
-
-static const struct mtk_clk_divider apmixed_adj_divs[] = {
- DIV_ADJ_FLAG(CLK_APMIXED_HDMI_REF, "hdmi_ref", "tvdpll",
- 0x1c4, 24, 3, CLK_DIVIDER_POWER_OF_TWO),
-};
-
static const struct mtk_gate_regs top0_cg_regs = {
.set_ofs = 0x50,
.clr_ofs = 0x80,
@@ -736,79 +721,31 @@ static const struct mtk_gate_regs top5_cg_regs = {
.sta_ofs = 0x44,
};
-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP0_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_TOP3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_TOP5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
-
-static const struct mtk_gate top_clks[] __initconst = {
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP0_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_TOP3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top4_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_TOP5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top5_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate top_clks[] = {
/* TOP0 */
GATE_TOP0(CLK_TOP_PWM_MM, "pwm_mm", "pwm_mm_sel", 0),
GATE_TOP0(CLK_TOP_CAM_MM, "cam_mm", "camtg_mm_sel", 1),
@@ -921,143 +858,40 @@ static const struct mtk_gate top_clks[] __initconst = {
GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
};
-static void __init mtk_topckgen_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(MT8167_CLK_TOP_NR_CLK);
-
- mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
- clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
-
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8167_clk_lock, clk_data);
- mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
- base, &mt8167_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8167-topckgen", mtk_topckgen_init);
-
-static void __init mtk_infracfg_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
-
- mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
- &mt8167_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8167-infracfg", mtk_infracfg_init);
-
-#define MT8167_PLL_FMAX (2500UL * MHZ)
-
-#define CON0_MT8167_RST_BAR BIT(27)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift, _div_table) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT8167_RST_BAR, \
- .fmax = MT8167_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
- NULL)
-
-static const struct mtk_pll_div_table mmpll_div_table[] = {
- { .div = 0, .freq = MT8167_PLL_FMAX },
- { .div = 1, .freq = 1000000000 },
- { .div = 2, .freq = 604500000 },
- { .div = 3, .freq = 253500000 },
- { .div = 4, .freq = 126750000 },
- { } /* sentinel */
+static const struct mtk_clk_desc topck_desc = {
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .fixed_clks = fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .divider_clks = top_adj_divs,
+ .num_divider_clks = ARRAY_SIZE(top_adj_divs),
+ .clk_lock = &mt8167_clk_lock,
};
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
- 21, 0x0104, 24, 0, 0x0104, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
- HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
- HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
- 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
- 31, 0x0180, 1, 0x0194, 0x0184, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
- 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0, 0,
- 21, 0x01C4, 24, 0, 0x01C4, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0, 0,
- 21, 0x01E4, 24, 0, 0x01E4, 0),
+static const struct mtk_clk_desc infra_desc = {
+ .composite_clks = ifr_muxes,
+ .num_composite_clks = ARRAY_SIZE(ifr_muxes),
+ .clk_lock = &mt8167_clk_lock,
};
-static void __init mtk_apmixedsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- int r;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(MT8167_CLK_APMIXED_NR_CLK);
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_dividers(apmixed_adj_divs, ARRAY_SIZE(apmixed_adj_divs),
- base, &mt8167_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static const struct of_device_id of_match_clk_mt8167[] = {
+ { .compatible = "mediatek,mt8167-topckgen", .data = &topck_desc },
+ { .compatible = "mediatek,mt8167-infracfg", .data = &infra_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8167);
-}
-CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8167-apmixedsys",
- mtk_apmixedsys_init);
+static struct platform_driver clk_mt8167_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8167",
+ .of_match_table = of_match_clk_mt8167,
+ },
+};
+module_platform_driver(clk_mt8167_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
new file mode 100644
index 000000000000..8c2aa8b0f39e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-fhctl.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
+#include "clk-pllfh.h"
+
+#define REGOFF_REF2USB 0x8
+#define REGOFF_HDMI_REF 0x40
+
+#define MT8173_PLL_FMAX (3000UL * MHZ)
+
+#define CON0_MT8173_RST_BAR BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8173_RST_BAR, \
+ .fmax = MT8173_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8173_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 702000000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0, PLL_AO,
+ 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0, PLL_AO,
+ 21, 0x214, 24, 0x0, 0x214, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000100, HAVE_RST_BAR, 21,
+ 0x220, 4, 0x0, 0x224, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000000, HAVE_RST_BAR, 7,
+ 0x230, 4, 0x0, 0x234, 14),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0,
+ 0x244, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0),
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0),
+};
+
+enum fh_pll_id {
+ FH_ARMCA7PLL,
+ FH_ARMCA15PLL,
+ FH_MAINPLL,
+ FH_MPLL,
+ FH_MSDCPLL,
+ FH_MMPLL,
+ FH_VENCPLL,
+ FH_TVDPLL,
+ FH_VCODECPLL,
+ FH_LVDSPLL,
+ FH_MSDC2PLL,
+ FH_NR_FH,
+};
+
+#define FH(_pllid, _fhid, _offset) { \
+ .data = { \
+ .pll_id = _pllid, \
+ .fh_id = _fhid, \
+ .fh_ver = FHCTL_PLLFH_V1, \
+ .fhx_offset = _offset, \
+ .dds_mask = GENMASK(21, 0), \
+ .slope0_value = 0x6003c97, \
+ .slope1_value = 0x6003c97, \
+ .sfstrx_en = BIT(2), \
+ .frddsx_en = BIT(1), \
+ .fhctlx_en = BIT(0), \
+ .tgl_org = BIT(31), \
+ .dvfs_tri = BIT(31), \
+ .pcwchg = BIT(31), \
+ .dt_val = 0x0, \
+ .df_val = 0x9, \
+ .updnlmt_shft = 16, \
+ .msk_frddsx_dys = GENMASK(23, 20), \
+ .msk_frddsx_dts = GENMASK(19, 16), \
+ }, \
+ }
+
+static struct mtk_pllfh_data pllfhs[] = {
+ FH(CLK_APMIXED_ARMCA7PLL, FH_ARMCA7PLL, 0x38),
+ FH(CLK_APMIXED_ARMCA15PLL, FH_ARMCA15PLL, 0x4c),
+ FH(CLK_APMIXED_MAINPLL, FH_MAINPLL, 0x60),
+ FH(CLK_APMIXED_MPLL, FH_MPLL, 0x74),
+ FH(CLK_APMIXED_MSDCPLL, FH_MSDCPLL, 0x88),
+ FH(CLK_APMIXED_MMPLL, FH_MMPLL, 0x9c),
+ FH(CLK_APMIXED_VENCPLL, FH_VENCPLL, 0xb0),
+ FH(CLK_APMIXED_TVDPLL, FH_TVDPLL, 0xc4),
+ FH(CLK_APMIXED_VCODECPLL, FH_VCODECPLL, 0xd8),
+ FH(CLK_APMIXED_LVDSPLL, FH_LVDSPLL, 0xec),
+ FH(CLK_APMIXED_MSDCPLL2, FH_MSDC2PLL, 0x100),
+};
+
+static const struct of_device_id of_match_clk_mt8173_apmixed[] = {
+ { .compatible = "mediatek,mt8173-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_apmixed);
+
+static int clk_mt8173_apmixed_probe(struct platform_device *pdev)
+{
+ const u8 *fhctl_node = "mediatek,mt8173-fhctl";
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int r;
+
+ base = of_iomap(node, 0);
+ if (!base)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (IS_ERR_OR_NULL(clk_data))
+ return -ENOMEM;
+
+ fhctl_parse_dt(fhctl_node, pllfhs, ARRAY_SIZE(pllfhs));
+ r = mtk_clk_register_pllfhs(node, plls, ARRAY_SIZE(plls),
+ pllfhs, ARRAY_SIZE(pllfhs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ hw = mtk_clk_register_ref2usb_tx("ref2usb_tx", "clk26m", base + REGOFF_REF2USB);
+ if (IS_ERR(hw)) {
+ r = PTR_ERR(hw);
+ dev_err(&pdev->dev, "Failed to register ref2usb_tx: %d\n", r);
+ goto unregister_plls;
+ }
+ clk_data->hws[CLK_APMIXED_REF2USB_TX] = hw;
+
+ hw = devm_clk_hw_register_divider(&pdev->dev, "hdmi_ref", "tvdpll_594m", 0,
+ base + REGOFF_HDMI_REF, 16, 3,
+ CLK_DIVIDER_POWER_OF_TWO, NULL);
+ clk_data->hws[CLK_APMIXED_HDMI_REF] = hw;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_ref2usb;
+
+ return 0;
+
+unregister_ref2usb:
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+unregister_plls:
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8173_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_apmixed_drv = {
+ .probe = clk_mt8173_apmixed_probe,
+ .remove = clk_mt8173_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8173-apmixed",
+ .of_match_table = of_match_clk_mt8173_apmixed,
+ },
+};
+module_platform_driver(clk_mt8173_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 apmixed clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-img.c b/drivers/clk/mediatek/clk-mt8173-img.c
new file mode 100644
index 000000000000..6db2b9ab2bc9
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-img.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x0004,
+ .clr_ofs = 0x0008,
+ .sta_ofs = 0x0000,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
+ GATE_IMG(CLK_IMG_LARB2_SMI, "img_larb2_smi", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "mm_sel", 5),
+ GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "mm_sel", 6),
+ GATE_IMG(CLK_IMG_SEN_TG, "img_sen_tg", "camtg_sel", 7),
+ GATE_IMG(CLK_IMG_SEN_CAM, "img_sen_cam", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_CAM_SV, "img_cam_sv", "mm_sel", 9),
+ GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
+};
+
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8173_imgsys[] = {
+ { .compatible = "mediatek,mt8173-imgsys", .data = &img_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_imgsys);
+
+static struct platform_driver clk_mt8173_vdecsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8173-imgsys",
+ .of_match_table = of_match_clk_mt8173_imgsys,
+ },
+};
+module_platform_driver(clk_mt8173_vdecsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-infracfg.c b/drivers/clk/mediatek/clk-mt8173-infracfg.c
new file mode 100644
index 000000000000..4ed5043076ec
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-infracfg.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr)
+
+static struct clk_hw_onecell_data *infra_clk_data;
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x0040,
+ .clr_ofs = 0x0044,
+ .sta_ofs = 0x0048,
+};
+
+static const char * const ca53_parents[] __initconst = {
+ "clk26m",
+ "armca7pll",
+ "mainpll",
+ "univpll"
+};
+
+static const char * const ca72_parents[] __initconst = {
+ "clk26m",
+ "armca15pll",
+ "mainpll",
+ "univpll"
+};
+
+static const struct mtk_composite cpu_muxes[] = {
+ MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2),
+ MUX(CLK_INFRA_CA72SEL, "infra_ca72_sel", ca72_parents, 0x0000, 2, 2),
+};
+
+static const struct mtk_fixed_factor infra_early_divs[] = {
+ FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
+};
+
+static const struct mtk_gate infra_gates[] = {
+ GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+ GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1),
+ GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5),
+ GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+ GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
+ GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+ GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "cpum_ck", 15),
+ GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+ GATE_ICG(CLK_INFRA_CEC, "infra_cec", "clk26m", 18),
+ GATE_ICG(CLK_INFRA_PMICSPI, "infra_pmicspi", "pmicspi_sel", 22),
+ GATE_ICG(CLK_INFRA_PMICWRAP, "infra_pmicwrap", "axi_sel", 23),
+};
+
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34 };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+};
+
+static const struct of_device_id of_match_clk_mt8173_infracfg[] = {
+ { .compatible = "mediatek,mt8173-infracfg" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_infracfg);
+
+static void clk_mt8173_infra_init_early(struct device_node *node)
+{
+ int i;
+
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!infra_clk_data)
+ return;
+
+ for (i = 0; i < CLK_INFRA_NR_CLK; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ mtk_clk_register_factors(infra_early_divs,
+ ARRAY_SIZE(infra_early_divs), infra_clk_data);
+
+ of_clk_add_hw_provider(node, of_clk_hw_onecell_get, infra_clk_data);
+}
+CLK_OF_DECLARE_DRIVER(mtk_infrasys, "mediatek,mt8173-infracfg",
+ clk_mt8173_infra_init_early);
+
+static int clk_mt8173_infracfg_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ r = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
+ ARRAY_SIZE(infra_gates), infra_clk_data);
+ if (r)
+ return r;
+
+ r = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
+ ARRAY_SIZE(cpu_muxes), infra_clk_data);
+ if (r)
+ goto unregister_gates;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, infra_clk_data);
+ if (r)
+ goto unregister_cpumuxes;
+
+ r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (r)
+ goto unregister_clk_hw;
+
+ return 0;
+
+unregister_clk_hw:
+ of_clk_del_provider(node);
+unregister_cpumuxes:
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), infra_clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), infra_clk_data);
+ return r;
+}
+
+static int clk_mt8173_infracfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_infracfg_drv = {
+ .driver = {
+ .name = "clk-mt8173-infracfg",
+ .of_match_table = of_match_clk_mt8173_infracfg,
+ },
+ .probe = clk_mt8173_infracfg_probe,
+ .remove = clk_mt8173_infracfg_remove,
+};
+module_platform_driver(clk_mt8173_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 infracfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index 8abf42c2030c..18e466dbf610 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -25,25 +25,14 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x0110,
};
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mt8173_mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
/* MM0 */
GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
@@ -100,47 +89,26 @@ static const struct mtk_gate mt8173_mm_clks[] = {
GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
};
-struct clk_mt8173_mm_driver_data {
- const struct mtk_gate *gates_clk;
- int gates_num;
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mt8173_mm_clks,
+ .num_clks = ARRAY_SIZE(mt8173_mm_clks),
};
-static const struct clk_mt8173_mm_driver_data mt8173_mmsys_driver_data = {
- .gates_clk = mt8173_mm_clks,
- .gates_num = ARRAY_SIZE(mt8173_mm_clks),
+static const struct platform_device_id clk_mt8173_mm_id_table[] = {
+ { .name = "clk-mt8173-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
};
-
-static int clk_mt8173_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- const struct clk_mt8173_mm_driver_data *data;
- struct clk_hw_onecell_data *clk_data;
- int ret;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- data = &mt8173_mmsys_driver_data;
-
- ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
- clk_data);
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- return ret;
-
- return 0;
-}
+MODULE_DEVICE_TABLE(platform, clk_mt8173_mm_id_table);
static struct platform_driver clk_mt8173_mm_drv = {
.driver = {
.name = "clk-mt8173-mm",
},
- .probe = clk_mt8173_mm_probe,
+ .id_table = clk_mt8173_mm_id_table,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
};
+module_platform_driver(clk_mt8173_mm_drv);
-builtin_platform_driver(clk_mt8173_mm_drv);
+MODULE_DESCRIPTION("MediaTek MT8173 MultiMedia clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-pericfg.c b/drivers/clk/mediatek/clk-mt8173-pericfg.c
new file mode 100644
index 000000000000..bebda74d0f43
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-pericfg.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr)
+
+static DEFINE_SPINLOCK(mt8173_clk_lock);
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x0010,
+ .sta_ofs = 0x0018,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0x000c,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x001c,
+};
+
+static const char * const uart_ck_sel_parents[] = {
+ "clk26m",
+ "uart_sel",
+};
+
+static const struct mtk_composite peri_clks[] = {
+ MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
+ MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
+ MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
+ MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
+};
+
+static const struct mtk_gate peri_gates[] = {
+ GATE_DUMMY(CLK_DUMMY, "peri_gate_dummy"),
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0),
+ GATE_PERI0(CLK_PERI_THERM, "peri_therm", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9),
+ GATE_PERI0(CLK_PERI_USB0, "peri_usb0", "usb20_sel", 10),
+ GATE_PERI0(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11),
+ GATE_PERI0(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16),
+ GATE_PERI0(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17),
+ GATE_PERI0(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18),
+ GATE_PERI0(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19),
+ GATE_PERI0(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20),
+ GATE_PERI0(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21),
+ GATE_PERI0(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22),
+ GATE_PERI0(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27),
+ GATE_PERI0(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28),
+ GATE_PERI0(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29),
+ GATE_PERI0(CLK_PERI_I2C5, "peri_i2c5", "axi_sel", 30),
+ GATE_PERI0(CLK_PERI_NFIECC, "peri_nfiecc", "axi_sel", 31),
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_SPI, "peri_spi", "spi_sel", 0),
+ GATE_PERI1(CLK_PERI_IRRX, "peri_irrx", "spi_sel", 1),
+ GATE_PERI1(CLK_PERI_I2C6, "peri_i2c6", "axi_sel", 2),
+};
+
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4 };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+};
+
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_gates,
+ .num_clks = ARRAY_SIZE(peri_gates),
+ .composite_clks = peri_clks,
+ .num_composite_clks = ARRAY_SIZE(peri_clks),
+ .clk_lock = &mt8173_clk_lock,
+ .rst_desc = &clk_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8173_pericfg[] = {
+ { .compatible = "mediatek,mt8173-pericfg", .data = &peri_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_pericfg);
+
+static struct platform_driver clk_mt8173_pericfg_drv = {
+ .driver = {
+ .name = "clk-mt8173-pericfg",
+ .of_match_table = of_match_clk_mt8173_pericfg,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt8173_pericfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 pericfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-topckgen.c b/drivers/clk/mediatek/clk-mt8173-topckgen.c
new file mode 100644
index 000000000000..baa8fd6cb312
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-topckgen.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
+#define TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _reg, \
+ (_reg + 0x4), (_reg + 0x8), _shift, _width, \
+ _gate, 0, -1, _flags)
+
+#define TOP_MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, CLK_SET_RATE_PARENT | _flags)
+
+static DEFINE_SPINLOCK(mt8173_top_clk_lock);
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_d2",
+ "main_h364m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "mmpll_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll2_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "univpll1_d4"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+ "clk26m",
+ "univpll3_d2",
+ "usb_syspll_125m",
+ "univpll2_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "msdcpll_d4",
+ "vencpll_d4",
+ "tvdpll_ck",
+ "univpll_d2",
+ "univpll1_d2",
+ "mmpll_ck",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "msdcpll2_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+ "clk26m",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "univpll2_d2",
+ "msdcpll2_d4",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "msdcpll_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d8",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d8",
+ "dmpll_d16"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "dmpll_d2"
+};
+
+static const char * const venc_lt_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "univpll2_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "vcodecpll_370p5",
+ "dmpll_ck"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "clk26m",
+ "clk26m",
+ "tvdpll_d8",
+ "tvdpll_d16"
+};
+
+static const char * const irda_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll2_d4"
+};
+
+static const char * const cci400_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "armca7pll_754m",
+ "armca7pll_502m",
+ "univpll_d2",
+ "syspll_d2",
+ "msdcpll_ck",
+ "dmpll_ck"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const mem_mfg_in_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck",
+ "clk26m"
+};
+
+static const char * const axi_mfg_in_parents[] = {
+ "clk26m",
+ "axi_sel",
+ "dmpll_d2"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "dmpll_d4"
+};
+
+static const char * const spinfi_ifr_parents[] = {
+ "clk26m",
+ "univpll2_d8",
+ "univpll3_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll3_d2",
+ "syspll1_d4",
+ "univpll1_d4"
+};
+
+static const char * const hdmi_parents[] = {
+ "clk26m",
+ "hdmitx_dig_cts",
+ "hdmitxpll_d2",
+ "hdmitxpll_d3"
+};
+
+static const char * const dpilvds_parents[] = {
+ "clk26m",
+ "lvdspll",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8",
+ "fpc_ck"
+};
+
+static const char * const msdc50_2_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "syspll3_d4",
+ "univpll2_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll_d52",
+ "univpll2_d8"
+};
+
+static const char * const rtc_parents[] = {
+ "clkrtc_int",
+ "clkrtc_ext",
+ "clk26m",
+ "univpll3_d8"
+};
+
+static const char * const i2s0_m_ck_parents[] = {
+ "apll1_div1",
+ "apll2_div1"
+};
+
+static const char * const i2s1_m_ck_parents[] = {
+ "apll1_div2",
+ "apll2_div2"
+};
+
+static const char * const i2s2_m_ck_parents[] = {
+ "apll1_div3",
+ "apll2_div3"
+};
+
+static const char * const i2s3_m_ck_parents[] = {
+ "apll1_div4",
+ "apll2_div4"
+};
+
+static const char * const i2s3_b_ck_parents[] = {
+ "apll1_div5",
+ "apll2_div5"
+};
+
+static const struct mtk_fixed_clk fixed_clks[] = {
+ FIXED_CLK(CLK_DUMMY, "topck_dummy", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk26m", 125 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_DIG, "dsi0_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_DSI1_DIG, "dsi1_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_LVDS_PXL, "lvds_pxl", "lvdspll", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_LVDS_CTS, "lvds_cts", "lvdspll", DUMMY_RATE),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_ARMCA7PLL_754M, "armca7pll_754m", "armca7pll", 1, 2),
+ FACTOR(CLK_TOP_ARMCA7PLL_502M, "armca7pll_502m", "armca7pll", 1, 3),
+
+ FACTOR_FLAGS(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3, 0),
+ FACTOR_FLAGS(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5, 0),
+ FACTOR_FLAGS(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7, 0),
+
+ FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3),
+
+ FACTOR_FLAGS(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26, 0),
+
+ FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1),
+ FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
+ FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
+
+ FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
+ FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
+
+ FACTOR(CLK_TOP_ARMCA7PLL_D2, "armca7pll_d2", "armca7pll_754m", 1, 1),
+ FACTOR(CLK_TOP_ARMCA7PLL_D3, "armca7pll_d3", "armca7pll_502m", 1, 1),
+
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16),
+
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4),
+
+ FACTOR_FLAGS(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4, 0),
+
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16),
+
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1, 0),
+ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2, 0),
+
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3),
+ FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4),
+
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2),
+ FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4),
+};
+
+static const struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x0040, 0, 3),
+ MUX_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0040, 8, 1,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel",
+ ddrphycfg_parents, 0x0040, 16, 1, 23,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x0040, 24, 4, 31),
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x0050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0050, 24, 4, 31),
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0060, 0, 3, 7),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0060, 8, 1, 15),
+ MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x0060, 24, 2, 31),
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x0070, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents,
+ 0x0070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents,
+ 0x0070, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents,
+ 0x0070, 24, 3, 31),
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents,
+ 0x0080, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents,
+ 0x0080, 8, 4, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x0080, 16, 2, 23),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x0080, 24, 3, 31),
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x0090, 0, 3, 7 /* 7:5 */),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x0090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents,
+ 0x0090, 24, 4, 31),
+ /* CLK_CFG_6 */
+ /*
+ * The dpi0_sel clock should not propagate rate changes to its parent
+ * clock so the dpi driver can have full control over PLL and divider.
+ */
+ MUX_GATE_FLAGS(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents,
+ 0x00a0, 0, 3, 7, 0),
+ MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
+ MUX_GATE_FLAGS(CLK_TOP_CCI400_SEL, "cci400_sel",
+ cci400_parents, 0x00a0, 16, 3, 23,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0x00b0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents,
+ 0x00b0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents,
+ 0x00b0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0x00b0, 24, 2, 31),
+ /* CLK_CFG_12 */
+ MUX_GATE(CLK_TOP_SPINFI_IFR_SEL, "spinfi_ifr_sel", spinfi_ifr_parents,
+ 0x00c0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents, 0x00c0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents,
+ 0x00c0, 24, 3, 31),
+ /* CLK_CFG_13 */
+ MUX_GATE(CLK_TOP_MSDC50_2_H_SEL, "msdc50_2_h_sel", msdc50_2_h_parents,
+ 0x00d0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel", hdcp_parents, 0x00d0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel", hdcp_24m_parents,
+ 0x00d0, 16, 2, 23),
+ MUX_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x00d0, 24, 2,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+
+ DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0),
+ DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8),
+ DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16),
+ DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0),
+
+ DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28),
+ DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0),
+ DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8),
+ DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16),
+ DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24),
+ DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
+
+ MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1),
+ MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1),
+ MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1),
+ MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1),
+ MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .clk_lock = &mt8173_top_clk_lock,
+};
+
+static const struct of_device_id of_match_clk_mt8173_topckgen[] = {
+ { .compatible = "mediatek,mt8173-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_topckgen);
+
+static struct platform_driver clk_mt8173_topckgen_drv = {
+ .driver = {
+ .name = "clk-mt8173-topckgen",
+ .of_match_table = of_match_clk_mt8173_topckgen,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt8173_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 topckgen clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-vdecsys.c b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
new file mode 100644
index 000000000000..625ca0b09cc2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define GATE_VDEC(_id, _name, _parent, _regs) \
+ GATE_MTK(_id, _name, _parent, _regs, 0, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
+ GATE_VDEC(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", &vdec0_cg_regs),
+ GATE_VDEC(CLK_VDEC_LARB_CKEN, "vdec_larb_cken", "mm_sel", &vdec1_cg_regs),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8173_vdecsys[] = {
+ { .compatible = "mediatek,mt8173-vdecsys", .data = &vdec_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vdecsys);
+
+static struct platform_driver clk_mt8173_vdecsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8173-vdecsys",
+ .of_match_table = of_match_clk_mt8173_vdecsys,
+ },
+};
+module_platform_driver(clk_mt8173_vdecsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-vencsys.c b/drivers/clk/mediatek/clk-mt8173-vencsys.c
new file mode 100644
index 000000000000..87755dd1a337
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-vencsys.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8173-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "venc_dummy"),
+ GATE_VENC(CLK_VENC_CKE0, "venc_cke0", "mm_sel", 0),
+ GATE_VENC(CLK_VENC_CKE1, "venc_cke1", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_CKE2, "venc_cke2", "venc_sel", 8),
+ GATE_VENC(CLK_VENC_CKE3, "venc_cke3", "venc_sel", 12),
+};
+
+static const struct mtk_gate venclt_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "venclt_dummy"),
+ GATE_VENC(CLK_VENCLT_CKE0, "venclt_cke0", "mm_sel", 0),
+ GATE_VENC(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct mtk_clk_desc venc_lt_desc = {
+ .clks = venclt_clks,
+ .num_clks = ARRAY_SIZE(venclt_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8173_vencsys[] = {
+ { .compatible = "mediatek,mt8173-vencsys", .data = &venc_desc },
+ { .compatible = "mediatek,mt8173-vencltsys", .data = &venc_lt_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vencsys);
+
+static struct platform_driver clk_mt8173_vencsys_drv = {
+ .driver = {
+ .name = "clk-mt8173-vencsys",
+ .of_match_table = of_match_clk_mt8173_vencsys,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt8173_vencsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8173 vencsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
deleted file mode 100644
index b57e33cda7a5..000000000000
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ /dev/null
@@ -1,1125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: James Liao <jamesjj.liao@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "clk-cpumux.h"
-#include "clk-gate.h"
-#include "clk-mtk.h"
-#include "clk-pll.h"
-
-#include <dt-bindings/clock/mt8173-clk.h>
-
-/*
- * For some clocks, we don't care what their actual rates are. And these
- * clocks may change their rate on different products or different scenarios.
- * So we model these clocks' rate as 0, to denote it's not an actual rate.
- */
-#define DUMMY_RATE 0
-
-static DEFINE_SPINLOCK(mt8173_clk_lock);
-
-static const struct mtk_fixed_clk fixed_clks[] __initconst = {
- FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk26m", DUMMY_RATE),
- FIXED_CLK(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk26m", 125 * MHZ),
- FIXED_CLK(CLK_TOP_DSI0_DIG, "dsi0_dig", "clk26m", DUMMY_RATE),
- FIXED_CLK(CLK_TOP_DSI1_DIG, "dsi1_dig", "clk26m", DUMMY_RATE),
- FIXED_CLK(CLK_TOP_LVDS_PXL, "lvds_pxl", "lvdspll", DUMMY_RATE),
- FIXED_CLK(CLK_TOP_LVDS_CTS, "lvds_cts", "lvdspll", DUMMY_RATE),
-};
-
-static const struct mtk_fixed_factor top_divs[] __initconst = {
- FACTOR(CLK_TOP_ARMCA7PLL_754M, "armca7pll_754m", "armca7pll", 1, 2),
- FACTOR(CLK_TOP_ARMCA7PLL_502M, "armca7pll_502m", "armca7pll", 1, 3),
-
- FACTOR_FLAGS(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3, 0),
- FACTOR_FLAGS(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5, 0),
- FACTOR_FLAGS(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7, 0),
-
- FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4),
- FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3),
-
- FACTOR_FLAGS(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3, 0),
- FACTOR_FLAGS(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5, 0),
- FACTOR_FLAGS(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7, 0),
- FACTOR_FLAGS(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26, 0),
-
- FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1),
- FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
- FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
-
- FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
- FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
-
- FACTOR(CLK_TOP_ARMCA7PLL_D2, "armca7pll_d2", "armca7pll_754m", 1, 1),
- FACTOR(CLK_TOP_ARMCA7PLL_D3, "armca7pll_d3", "armca7pll_502m", 1, 1),
-
- FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
- FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
-
- FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1),
- FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
- FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
- FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
- FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16),
-
- FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
- FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
- FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
-
- FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
- FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
-
- FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
- FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
- FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
- FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
- FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2),
- FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4),
-
- FACTOR_FLAGS(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4, 0),
-
- FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1),
- FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2),
- FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4),
- FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8),
- FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16),
-
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1, 0),
- FACTOR_FLAGS(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2, 0),
-
- FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3),
- FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4),
-
- FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
- FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2),
- FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4),
-};
-
-static const char * const axi_parents[] __initconst = {
- "clk26m",
- "syspll1_d2",
- "syspll_d5",
- "syspll1_d4",
- "univpll_d5",
- "univpll2_d2",
- "dmpll_d2",
- "dmpll_d4"
-};
-
-static const char * const mem_parents[] __initconst = {
- "clk26m",
- "dmpll_ck"
-};
-
-static const char * const ddrphycfg_parents[] __initconst = {
- "clk26m",
- "syspll1_d8"
-};
-
-static const char * const mm_parents[] __initconst = {
- "clk26m",
- "vencpll_d2",
- "main_h364m",
- "syspll1_d2",
- "syspll_d5",
- "syspll1_d4",
- "univpll1_d2",
- "univpll2_d2",
- "dmpll_d2"
-};
-
-static const char * const pwm_parents[] __initconst = {
- "clk26m",
- "univpll2_d4",
- "univpll3_d2",
- "univpll1_d4"
-};
-
-static const char * const vdec_parents[] __initconst = {
- "clk26m",
- "vcodecpll_ck",
- "tvdpll_445p5m",
- "univpll_d3",
- "vencpll_d2",
- "syspll_d3",
- "univpll1_d2",
- "mmpll_d2",
- "dmpll_d2",
- "dmpll_d4"
-};
-
-static const char * const venc_parents[] __initconst = {
- "clk26m",
- "vcodecpll_ck",
- "tvdpll_445p5m",
- "univpll_d3",
- "vencpll_d2",
- "syspll_d3",
- "univpll1_d2",
- "univpll2_d2",
- "dmpll_d2",
- "dmpll_d4"
-};
-
-static const char * const mfg_parents[] __initconst = {
- "clk26m",
- "mmpll_ck",
- "dmpll_ck",
- "clk26m",
- "clk26m",
- "clk26m",
- "clk26m",
- "clk26m",
- "clk26m",
- "syspll_d3",
- "syspll1_d2",
- "syspll_d5",
- "univpll_d3",
- "univpll1_d2",
- "univpll_d5",
- "univpll2_d2"
-};
-
-static const char * const camtg_parents[] __initconst = {
- "clk26m",
- "univpll_d26",
- "univpll2_d2",
- "syspll3_d2",
- "syspll3_d4",
- "univpll1_d4"
-};
-
-static const char * const uart_parents[] __initconst = {
- "clk26m",
- "univpll2_d8"
-};
-
-static const char * const spi_parents[] __initconst = {
- "clk26m",
- "syspll3_d2",
- "syspll1_d4",
- "syspll4_d2",
- "univpll3_d2",
- "univpll2_d4",
- "univpll1_d8"
-};
-
-static const char * const usb20_parents[] __initconst = {
- "clk26m",
- "univpll1_d8",
- "univpll3_d4"
-};
-
-static const char * const usb30_parents[] __initconst = {
- "clk26m",
- "univpll3_d2",
- "usb_syspll_125m",
- "univpll2_d4"
-};
-
-static const char * const msdc50_0_h_parents[] __initconst = {
- "clk26m",
- "syspll1_d2",
- "syspll2_d2",
- "syspll4_d2",
- "univpll_d5",
- "univpll1_d4"
-};
-
-static const char * const msdc50_0_parents[] __initconst = {
- "clk26m",
- "msdcpll_ck",
- "msdcpll_d2",
- "univpll1_d4",
- "syspll2_d2",
- "syspll_d7",
- "msdcpll_d4",
- "vencpll_d4",
- "tvdpll_ck",
- "univpll_d2",
- "univpll1_d2",
- "mmpll_ck",
- "msdcpll2_ck",
- "msdcpll2_d2",
- "msdcpll2_d4"
-};
-
-static const char * const msdc30_1_parents[] __initconst = {
- "clk26m",
- "univpll2_d2",
- "msdcpll_d4",
- "univpll1_d4",
- "syspll2_d2",
- "syspll_d7",
- "univpll_d7",
- "vencpll_d4"
-};
-
-static const char * const msdc30_2_parents[] __initconst = {
- "clk26m",
- "univpll2_d2",
- "msdcpll_d4",
- "univpll1_d4",
- "syspll2_d2",
- "syspll_d7",
- "univpll_d7",
- "vencpll_d2"
-};
-
-static const char * const msdc30_3_parents[] __initconst = {
- "clk26m",
- "msdcpll2_ck",
- "msdcpll2_d2",
- "univpll2_d2",
- "msdcpll2_d4",
- "msdcpll_d4",
- "univpll1_d4",
- "syspll2_d2",
- "syspll_d7",
- "univpll_d7",
- "vencpll_d4",
- "msdcpll_ck",
- "msdcpll_d2",
- "msdcpll_d4"
-};
-
-static const char * const audio_parents[] __initconst = {
- "clk26m",
- "syspll3_d4",
- "syspll4_d4",
- "syspll1_d16"
-};
-
-static const char * const aud_intbus_parents[] __initconst = {
- "clk26m",
- "syspll1_d4",
- "syspll4_d2",
- "univpll3_d2",
- "univpll2_d8",
- "dmpll_d4",
- "dmpll_d8"
-};
-
-static const char * const pmicspi_parents[] __initconst = {
- "clk26m",
- "syspll1_d8",
- "syspll3_d4",
- "syspll1_d16",
- "univpll3_d4",
- "univpll_d26",
- "dmpll_d8",
- "dmpll_d16"
-};
-
-static const char * const scp_parents[] __initconst = {
- "clk26m",
- "syspll1_d2",
- "univpll_d5",
- "syspll_d5",
- "dmpll_d2",
- "dmpll_d4"
-};
-
-static const char * const atb_parents[] __initconst = {
- "clk26m",
- "syspll1_d2",
- "univpll_d5",
- "dmpll_d2"
-};
-
-static const char * const venc_lt_parents[] __initconst = {
- "clk26m",
- "univpll_d3",
- "vcodecpll_ck",
- "tvdpll_445p5m",
- "vencpll_d2",
- "syspll_d3",
- "univpll1_d2",
- "univpll2_d2",
- "syspll1_d2",
- "univpll_d5",
- "vcodecpll_370p5",
- "dmpll_ck"
-};
-
-static const char * const dpi0_parents[] __initconst = {
- "clk26m",
- "tvdpll_d2",
- "tvdpll_d4",
- "clk26m",
- "clk26m",
- "tvdpll_d8",
- "tvdpll_d16"
-};
-
-static const char * const irda_parents[] __initconst = {
- "clk26m",
- "univpll2_d4",
- "syspll2_d4"
-};
-
-static const char * const cci400_parents[] __initconst = {
- "clk26m",
- "vencpll_ck",
- "armca7pll_754m",
- "armca7pll_502m",
- "univpll_d2",
- "syspll_d2",
- "msdcpll_ck",
- "dmpll_ck"
-};
-
-static const char * const aud_1_parents[] __initconst = {
- "clk26m",
- "apll1_ck",
- "univpll2_d4",
- "univpll2_d8"
-};
-
-static const char * const aud_2_parents[] __initconst = {
- "clk26m",
- "apll2_ck",
- "univpll2_d4",
- "univpll2_d8"
-};
-
-static const char * const mem_mfg_in_parents[] __initconst = {
- "clk26m",
- "mmpll_ck",
- "dmpll_ck",
- "clk26m"
-};
-
-static const char * const axi_mfg_in_parents[] __initconst = {
- "clk26m",
- "axi_sel",
- "dmpll_d2"
-};
-
-static const char * const scam_parents[] __initconst = {
- "clk26m",
- "syspll3_d2",
- "univpll2_d4",
- "dmpll_d4"
-};
-
-static const char * const spinfi_ifr_parents[] __initconst = {
- "clk26m",
- "univpll2_d8",
- "univpll3_d4",
- "syspll4_d2",
- "univpll2_d4",
- "univpll3_d2",
- "syspll1_d4",
- "univpll1_d4"
-};
-
-static const char * const hdmi_parents[] __initconst = {
- "clk26m",
- "hdmitx_dig_cts",
- "hdmitxpll_d2",
- "hdmitxpll_d3"
-};
-
-static const char * const dpilvds_parents[] __initconst = {
- "clk26m",
- "lvdspll",
- "lvdspll_d2",
- "lvdspll_d4",
- "lvdspll_d8",
- "fpc_ck"
-};
-
-static const char * const msdc50_2_h_parents[] __initconst = {
- "clk26m",
- "syspll1_d2",
- "syspll2_d2",
- "syspll4_d2",
- "univpll_d5",
- "univpll1_d4"
-};
-
-static const char * const hdcp_parents[] __initconst = {
- "clk26m",
- "syspll4_d2",
- "syspll3_d4",
- "univpll2_d4"
-};
-
-static const char * const hdcp_24m_parents[] __initconst = {
- "clk26m",
- "univpll_d26",
- "univpll_d52",
- "univpll2_d8"
-};
-
-static const char * const rtc_parents[] __initconst = {
- "clkrtc_int",
- "clkrtc_ext",
- "clk26m",
- "univpll3_d8"
-};
-
-static const char * const i2s0_m_ck_parents[] __initconst = {
- "apll1_div1",
- "apll2_div1"
-};
-
-static const char * const i2s1_m_ck_parents[] __initconst = {
- "apll1_div2",
- "apll2_div2"
-};
-
-static const char * const i2s2_m_ck_parents[] __initconst = {
- "apll1_div3",
- "apll2_div3"
-};
-
-static const char * const i2s3_m_ck_parents[] __initconst = {
- "apll1_div4",
- "apll2_div4"
-};
-
-static const char * const i2s3_b_ck_parents[] __initconst = {
- "apll1_div5",
- "apll2_div5"
-};
-
-static const char * const ca53_parents[] __initconst = {
- "clk26m",
- "armca7pll",
- "mainpll",
- "univpll"
-};
-
-static const char * const ca72_parents[] __initconst = {
- "clk26m",
- "armca15pll",
- "mainpll",
- "univpll"
-};
-
-static const struct mtk_composite cpu_muxes[] __initconst = {
- MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2),
- MUX(CLK_INFRA_CA72SEL, "infra_ca72_sel", ca72_parents, 0x0000, 2, 2),
-};
-
-static const struct mtk_composite top_muxes[] __initconst = {
- /* CLK_CFG_0 */
- MUX(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x0040, 0, 3),
- MUX(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0040, 8, 1),
- MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents, 0x0040, 16, 1, 23),
- MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x0040, 24, 4, 31),
- /* CLK_CFG_1 */
- MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0050, 0, 2, 7),
- MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x0050, 8, 4, 15),
- MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0050, 16, 4, 23),
- MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0050, 24, 4, 31),
- /* CLK_CFG_2 */
- MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0060, 0, 3, 7),
- MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0060, 8, 1, 15),
- MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0060, 16, 3, 23),
- MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x0060, 24, 2, 31),
- /* CLK_CFG_3 */
- MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x0070, 0, 2, 7),
- MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents, 0x0070, 8, 3, 15),
- MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents, 0x0070, 16, 4, 23),
- MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents, 0x0070, 24, 3, 31),
- /* CLK_CFG_4 */
- MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents, 0x0080, 0, 3, 7),
- MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents, 0x0080, 8, 4, 15),
- MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x0080, 16, 2, 23),
- MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents, 0x0080, 24, 3, 31),
- /* CLK_CFG_5 */
- MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x0090, 0, 3, 7 /* 7:5 */),
- MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x0090, 8, 3, 15),
- MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
- MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31),
- /* CLK_CFG_6 */
- /*
- * The dpi0_sel clock should not propagate rate changes to its parent
- * clock so the dpi driver can have full control over PLL and divider.
- */
- MUX_GATE_FLAGS(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7, 0),
- MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
- MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23),
- MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
- /* CLK_CFG_7 */
- MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0x00b0, 0, 2, 7),
- MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents, 0x00b0, 8, 2, 15),
- MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents, 0x00b0, 16, 2, 23),
- MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0x00b0, 24, 2, 31),
- /* CLK_CFG_12 */
- MUX_GATE(CLK_TOP_SPINFI_IFR_SEL, "spinfi_ifr_sel", spinfi_ifr_parents, 0x00c0, 0, 3, 7),
- MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents, 0x00c0, 8, 2, 15),
- MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x00c0, 24, 3, 31),
- /* CLK_CFG_13 */
- MUX_GATE(CLK_TOP_MSDC50_2_H_SEL, "msdc50_2_h_sel", msdc50_2_h_parents, 0x00d0, 0, 3, 7),
- MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel", hdcp_parents, 0x00d0, 8, 2, 15),
- MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel", hdcp_24m_parents, 0x00d0, 16, 2, 23),
- MUX(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x00d0, 24, 2),
-
- DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24),
- DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0),
- DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8),
- DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16),
- DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24),
- DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0),
-
- DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28),
- DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0),
- DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8),
- DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16),
- DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24),
- DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
-
- MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1),
- MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1),
- MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1),
- MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1),
- MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
-};
-
-static const struct mtk_gate_regs infra_cg_regs __initconst = {
- .set_ofs = 0x0040,
- .clr_ofs = 0x0044,
- .sta_ofs = 0x0048,
-};
-
-#define GATE_ICG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate infra_clks[] __initconst = {
- GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
- GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1),
- GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5),
- GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
- GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
- GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
- GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "cpum_ck", 15),
- GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
- GATE_ICG(CLK_INFRA_CEC, "infra_cec", "clk26m", 18),
- GATE_ICG(CLK_INFRA_PMICSPI, "infra_pmicspi", "pmicspi_sel", 22),
- GATE_ICG(CLK_INFRA_PMICWRAP, "infra_pmicwrap", "axi_sel", 23),
-};
-
-static const struct mtk_fixed_factor infra_divs[] __initconst = {
- FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
-};
-
-static const struct mtk_gate_regs peri0_cg_regs __initconst = {
- .set_ofs = 0x0008,
- .clr_ofs = 0x0010,
- .sta_ofs = 0x0018,
-};
-
-static const struct mtk_gate_regs peri1_cg_regs __initconst = {
- .set_ofs = 0x000c,
- .clr_ofs = 0x0014,
- .sta_ofs = 0x001c,
-};
-
-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate peri_gates[] __initconst = {
- /* PERI0 */
- GATE_PERI0(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0),
- GATE_PERI0(CLK_PERI_THERM, "peri_therm", "axi_sel", 1),
- GATE_PERI0(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2),
- GATE_PERI0(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3),
- GATE_PERI0(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4),
- GATE_PERI0(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5),
- GATE_PERI0(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6),
- GATE_PERI0(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7),
- GATE_PERI0(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8),
- GATE_PERI0(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9),
- GATE_PERI0(CLK_PERI_USB0, "peri_usb0", "usb20_sel", 10),
- GATE_PERI0(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11),
- GATE_PERI0(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12),
- GATE_PERI0(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13),
- GATE_PERI0(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14),
- GATE_PERI0(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15),
- GATE_PERI0(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16),
- GATE_PERI0(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17),
- GATE_PERI0(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18),
- GATE_PERI0(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19),
- GATE_PERI0(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20),
- GATE_PERI0(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21),
- GATE_PERI0(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22),
- GATE_PERI0(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23),
- GATE_PERI0(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24),
- GATE_PERI0(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25),
- GATE_PERI0(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26),
- GATE_PERI0(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27),
- GATE_PERI0(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28),
- GATE_PERI0(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29),
- GATE_PERI0(CLK_PERI_I2C5, "peri_i2c5", "axi_sel", 30),
- GATE_PERI0(CLK_PERI_NFIECC, "peri_nfiecc", "axi_sel", 31),
- /* PERI1 */
- GATE_PERI1(CLK_PERI_SPI, "peri_spi", "spi_sel", 0),
- GATE_PERI1(CLK_PERI_IRRX, "peri_irrx", "spi_sel", 1),
- GATE_PERI1(CLK_PERI_I2C6, "peri_i2c6", "axi_sel", 2),
-};
-
-static const char * const uart_ck_sel_parents[] __initconst = {
- "clk26m",
- "uart_sel",
-};
-
-static const struct mtk_composite peri_clks[] __initconst = {
- MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
- MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
- MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
- MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
-};
-
-static const struct mtk_gate_regs cg_regs_4_8_0 __initconst = {
- .set_ofs = 0x0004,
- .clr_ofs = 0x0008,
- .sta_ofs = 0x0000,
-};
-
-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &cg_regs_4_8_0, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate img_clks[] __initconst = {
- GATE_IMG(CLK_IMG_LARB2_SMI, "img_larb2_smi", "mm_sel", 0),
- GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "mm_sel", 5),
- GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "mm_sel", 6),
- GATE_IMG(CLK_IMG_SEN_TG, "img_sen_tg", "camtg_sel", 7),
- GATE_IMG(CLK_IMG_SEN_CAM, "img_sen_cam", "mm_sel", 8),
- GATE_IMG(CLK_IMG_CAM_SV, "img_cam_sv", "mm_sel", 9),
- GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
-};
-
-static const struct mtk_gate_regs vdec0_cg_regs __initconst = {
- .set_ofs = 0x0000,
- .clr_ofs = 0x0004,
- .sta_ofs = 0x0000,
-};
-
-static const struct mtk_gate_regs vdec1_cg_regs __initconst = {
- .set_ofs = 0x0008,
- .clr_ofs = 0x000c,
- .sta_ofs = 0x0008,
-};
-
-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-static const struct mtk_gate vdec_clks[] __initconst = {
- GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0),
- GATE_VDEC1(CLK_VDEC_LARB_CKEN, "vdec_larb_cken", "mm_sel", 0),
-};
-
-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &cg_regs_4_8_0, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-static const struct mtk_gate venc_clks[] __initconst = {
- GATE_VENC(CLK_VENC_CKE0, "venc_cke0", "mm_sel", 0),
- GATE_VENC(CLK_VENC_CKE1, "venc_cke1", "venc_sel", 4),
- GATE_VENC(CLK_VENC_CKE2, "venc_cke2", "venc_sel", 8),
- GATE_VENC(CLK_VENC_CKE3, "venc_cke3", "venc_sel", 12),
-};
-
-#define GATE_VENCLT(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &cg_regs_4_8_0, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-static const struct mtk_gate venclt_clks[] __initconst = {
- GATE_VENCLT(CLK_VENCLT_CKE0, "venclt_cke0", "mm_sel", 0),
- GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
-};
-
-static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
-static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
-
-static const struct mtk_clk_rst_desc clk_rst_desc[] = {
- /* infrasys */
- {
- .version = MTK_RST_SIMPLE,
- .rst_bank_ofs = infrasys_rst_ofs,
- .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
- },
- /* pericfg */
- {
- .version = MTK_RST_SIMPLE,
- .rst_bank_ofs = pericfg_rst_ofs,
- .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
- }
-};
-
-static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata;
-static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata;
-
-static void __init mtk_clk_enable_critical(void)
-{
- if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
- return;
-
- clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA15PLL]->clk);
- clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA7PLL]->clk);
- clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_MEM_SEL]->clk);
- clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
- clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_CCI400_SEL]->clk);
- clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_RTC_SEL]->clk);
-}
-
-static void __init mtk_topckgen_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- int r;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8173_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_clk_enable_critical();
-}
-CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
-
-static void __init mtk_infrasys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
- mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
-
- mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_register_reset_controller(node, &clk_rst_desc[0]);
-}
-CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
-
-static void __init mtk_pericfg_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-
- mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
- clk_data);
- mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
- &mt8173_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_register_reset_controller(node, &clk_rst_desc[1]);
-}
-CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
-
-struct mtk_clk_usb {
- int id;
- const char *name;
- const char *parent;
- u32 reg_ofs;
-};
-
-#define APMIXED_USB(_id, _name, _parent, _reg_ofs) { \
- .id = _id, \
- .name = _name, \
- .parent = _parent, \
- .reg_ofs = _reg_ofs, \
- }
-
-static const struct mtk_clk_usb apmixed_usb[] __initconst = {
- APMIXED_USB(CLK_APMIXED_REF2USB_TX, "ref2usb_tx", "clk26m", 0x8),
-};
-
-#define MT8173_PLL_FMAX (3000UL * MHZ)
-
-#define CON0_MT8173_RST_BAR BIT(24)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift, _div_table) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT8173_RST_BAR, \
- .fmax = MT8173_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
- NULL)
-
-static const struct mtk_pll_div_table mmpll_div_table[] = {
- { .div = 0, .freq = MT8173_PLL_FMAX },
- { .div = 1, .freq = 1000000000 },
- { .div = 2, .freq = 702000000 },
- { .div = 3, .freq = 253500000 },
- { .div = 4, .freq = 126750000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0, 0, 21, 0x204, 24, 0x0, 0x204, 0),
- PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0, 0, 21, 0x214, 24, 0x0, 0x214, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000100, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000000, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0),
- PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0),
- PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0),
- PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0),
- PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0),
-};
-
-static void __init mtk_apmixedsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- struct clk_hw *hw;
- int r, i;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
- if (!clk_data) {
- iounmap(base);
- return;
- }
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
- for (i = 0; i < ARRAY_SIZE(apmixed_usb); i++) {
- const struct mtk_clk_usb *cku = &apmixed_usb[i];
-
- hw = mtk_clk_register_ref2usb_tx(cku->name, cku->parent, base + cku->reg_ofs);
- if (IS_ERR(hw)) {
- pr_err("Failed to register clk %s: %ld\n", cku->name, PTR_ERR(hw));
- continue;
- }
-
- clk_data->hws[cku->id] = hw;
- }
-
- hw = clk_hw_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
- base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
- NULL);
- clk_data->hws[CLK_APMIXED_HDMI_REF] = hw;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- mtk_clk_enable_critical();
-}
-CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
- mtk_apmixedsys_init);
-
-static void __init mtk_imgsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
-
-static void __init mtk_vdecsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8173-vdecsys", mtk_vdecsys_init);
-
-static void __init mtk_vencsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_vencsys, "mediatek,mt8173-vencsys", mtk_vencsys_init);
-
-static void __init mtk_vencltsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK);
-
- mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_vencltsys, "mediatek,mt8173-vencltsys", mtk_vencltsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8183-apmixedsys.c b/drivers/clk/mediatek/clk-mt8183-apmixedsys.c
new file mode 100644
index 000000000000..2b261c0e2b61
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-apmixedsys.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Weiyi Lu <weiyi.lu@mediatek.com>
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8183-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x20,
+ .clr_ofs = 0x20,
+ .sta_ofs = 0x20,
+};
+
+#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags)
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0)
+
+/*
+ * CRITICAL CLOCK:
+ * apmixed_appll26m is the toppest clock gate of all PLLs.
+ */
+static const struct mtk_gate apmixed_clks[] = {
+ /* AUDIO0 */
+ GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m", "f_f26m_ck", 4),
+ GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m",
+ "f_f26m_ck", 5, CLK_IS_CRITICAL),
+ GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", "f_f26m_ck", 6),
+ GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m", "f_f26m_ck", 7),
+ GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m", "f_f26m_ck", 8),
+ GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m", "f_f26m_ck", 9),
+ GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", "f_f26m_ck", 11),
+ GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m", "f_f26m_ck", 13),
+ GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m", "f_f26m_ck", 14),
+ GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", "f_f26m_ck", 16),
+ GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m", "f_f26m_ck", 17),
+};
+
+#define MT8183_PLL_FMAX (3800UL * MHZ)
+#define MT8183_PLL_FMIN (1500UL * MHZ)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8183_PLL_FMAX, \
+ .fmin = MT8183_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = _pcwibits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, NULL)
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 187500000 },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { /* sentinel */ }
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
+ 0x0204, 0, 0, armpll_div_table),
+ PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
+ 0x0214, 0, 0, armpll_div_table),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
+ 0x0294, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
+ 0x0224, 0, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
+ 0x0234, 0, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0,
+ 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
+ mfgpll_div_table),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0,
+ 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0,
+ 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0,
+ HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
+ 0x0274, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0,
+ 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0,
+ 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
+};
+
+static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8183_apmixed[] = {
+ { .compatible = "mediatek,mt8183-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_apmixed);
+
+static struct platform_driver clk_mt8183_apmixed_drv = {
+ .probe = clk_mt8183_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8183-apmixed",
+ .of_match_table = of_match_clk_mt8183_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8183_apmixed_drv)
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index b2d7746eddbe..9938c6466e76 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -67,39 +67,45 @@ static const struct mtk_gate audio_clks[] = {
20),
};
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
+};
+
static int clk_mt8183_audio_probe(struct platform_device *pdev)
{
- struct clk_hw_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ r = mtk_clk_simple_probe(pdev);
if (r)
return r;
r = devm_of_platform_populate(&pdev->dev);
if (r)
- of_clk_del_provider(node);
+ mtk_clk_simple_remove(pdev);
return r;
}
+static int clk_mt8183_audio_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ return mtk_clk_simple_remove(pdev);
+}
+
static const struct of_device_id of_match_clk_mt8183_audio[] = {
- { .compatible = "mediatek,mt8183-audiosys", },
- {}
+ { .compatible = "mediatek,mt8183-audiosys", .data = &audio_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_audio);
static struct platform_driver clk_mt8183_audio_drv = {
.probe = clk_mt8183_audio_probe,
+ .remove = clk_mt8183_audio_remove,
.driver = {
.name = "clk-mt8183-audio",
.of_match_table = of_match_clk_mt8183_audio,
},
};
-
-builtin_platform_driver(clk_mt8183_audio_drv);
+module_platform_driver(clk_mt8183_audio_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index 6907b1a6a824..c0719624004f 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -47,6 +47,7 @@ static const struct of_device_id of_match_clk_mt8183_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_cam);
static struct platform_driver clk_mt8183_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -56,5 +57,5 @@ static struct platform_driver clk_mt8183_cam_drv = {
.of_match_table = of_match_clk_mt8183_cam,
},
};
-
-builtin_platform_driver(clk_mt8183_cam_drv);
+module_platform_driver(clk_mt8183_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index 8d884425d79f..55fc80615724 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -47,6 +47,7 @@ static const struct of_device_id of_match_clk_mt8183_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_img);
static struct platform_driver clk_mt8183_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -56,5 +57,5 @@ static struct platform_driver clk_mt8183_img_drv = {
.of_match_table = of_match_clk_mt8183_img,
},
};
-
-builtin_platform_driver(clk_mt8183_img_drv);
+module_platform_driver(clk_mt8183_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index 953a8a33d048..59255eab6fe2 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core0);
static struct platform_driver clk_mt8183_ipu_core0_drv = {
.probe = mtk_clk_simple_probe,
@@ -49,5 +50,5 @@ static struct platform_driver clk_mt8183_ipu_core0_drv = {
.of_match_table = of_match_clk_mt8183_ipu_core0,
},
};
-
-builtin_platform_driver(clk_mt8183_ipu_core0_drv);
+module_platform_driver(clk_mt8183_ipu_core0_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index 221d12265974..c4baa052c809 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core1);
static struct platform_driver clk_mt8183_ipu_core1_drv = {
.probe = mtk_clk_simple_probe,
@@ -49,5 +50,5 @@ static struct platform_driver clk_mt8183_ipu_core1_drv = {
.of_match_table = of_match_clk_mt8183_ipu_core1,
},
};
-
-builtin_platform_driver(clk_mt8183_ipu_core1_drv);
+module_platform_driver(clk_mt8183_ipu_core1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 8c4fd96df821..74866e9c50d7 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -38,6 +38,7 @@ static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_adl);
static struct platform_driver clk_mt8183_ipu_adl_drv = {
.probe = mtk_clk_simple_probe,
@@ -47,5 +48,5 @@ static struct platform_driver clk_mt8183_ipu_adl_drv = {
.of_match_table = of_match_clk_mt8183_ipu_adl,
},
};
-
-builtin_platform_driver(clk_mt8183_ipu_adl_drv);
+module_platform_driver(clk_mt8183_ipu_adl_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index 14a4c3ff82a1..bd7303105357 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -107,6 +107,7 @@ static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_conn);
static struct platform_driver clk_mt8183_ipu_conn_drv = {
.probe = mtk_clk_simple_probe,
@@ -116,5 +117,5 @@ static struct platform_driver clk_mt8183_ipu_conn_drv = {
.of_match_table = of_match_clk_mt8183_ipu_conn,
},
};
-
-builtin_platform_driver(clk_mt8183_ipu_conn_drv);
+module_platform_driver(clk_mt8183_ipu_conn_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index 730c9ae5ea12..816ecf1191ee 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_mfg);
static struct platform_driver clk_mt8183_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -48,5 +49,5 @@ static struct platform_driver clk_mt8183_mfg_drv = {
.of_match_table = of_match_clk_mt8183_mfg,
},
};
-
-builtin_platform_driver(clk_mt8183_mfg_drv);
+module_platform_driver(clk_mt8183_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 11ecc6fb0065..2f99828bff1b 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -82,25 +82,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DBI_IF, "mm_dbi_if", "dpi0_sel", 13),
};
-static int clk_mt8183_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct platform_device_id clk_mt8183_mm_id_table[] = {
+ { .name = "clk-mt8183-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8183_mm_id_table);
static struct platform_driver clk_mt8183_mm_drv = {
- .probe = clk_mt8183_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8183-mm",
},
+ .id_table = clk_mt8183_mm_id_table,
};
-
-builtin_platform_driver(clk_mt8183_mm_drv);
+module_platform_driver(clk_mt8183_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index c294e50b96b7..513b7956cbea 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -51,6 +51,7 @@ static const struct of_device_id of_match_clk_mt8183_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_vdec);
static struct platform_driver clk_mt8183_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -60,5 +61,5 @@ static struct platform_driver clk_mt8183_vdec_drv = {
.of_match_table = of_match_clk_mt8183_vdec,
},
};
-
-builtin_platform_driver(clk_mt8183_vdec_drv);
+module_platform_driver(clk_mt8183_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index 0051c5d92fc5..532f6e12a561 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -43,6 +43,7 @@ static const struct of_device_id of_match_clk_mt8183_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_venc);
static struct platform_driver clk_mt8183_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -52,5 +53,5 @@ static struct platform_driver clk_mt8183_venc_drv = {
.of_match_table = of_match_clk_mt8183_venc,
},
};
-
-builtin_platform_driver(clk_mt8183_venc_drv);
+module_platform_driver(clk_mt8183_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 23afc9584638..2336a1b69c09 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -14,7 +14,6 @@
#include "clk-gate.h"
#include "clk-mtk.h"
#include "clk-mux.h"
-#include "clk-pll.h"
#include <dt-bindings/clock/mt8183-clk.h>
@@ -26,11 +25,14 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
};
-static const struct mtk_fixed_factor top_early_divs[] = {
- FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
-};
-
+/*
+ * To retain compatibility with older devicetrees, we keep CLK_TOP_CLK13M
+ * valid, but renamed from "clk13m" (defined as fixed clock in the new
+ * devicetrees) to "clk26m_d2", satisfying the older clock assignments.
+ * This means that on new devicetrees "clk26m_d2" is unused.
+ */
static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk26m_d2", "clk26m", 1, 2),
FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 2),
FACTOR_FLAGS(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, 1, 0),
FACTOR_FLAGS(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1, 2, 0),
@@ -449,138 +451,97 @@ static const char * const aud_2_parents[] = {
static const struct mtk_mux top_muxes[] = {
/* CLK_CFG_0 */
MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_AXI, "axi_sel",
- axi_parents, 0x40,
- 0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL),
+ axi_parents, 0x40, 0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MM, "mm_sel",
- mm_parents, 0x40,
- 0x44, 0x48, 8, 3, 15, 0x004, 1),
+ mm_parents, 0x40, 0x44, 0x48, 8, 3, 15, 0x004, 1),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IMG, "img_sel",
- img_parents, 0x40,
- 0x44, 0x48, 16, 3, 23, 0x004, 2),
+ img_parents, 0x40, 0x44, 0x48, 16, 3, 23, 0x004, 2),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAM, "cam_sel",
- cam_parents, 0x40,
- 0x44, 0x48, 24, 4, 31, 0x004, 3),
+ cam_parents, 0x40, 0x44, 0x48, 24, 4, 31, 0x004, 3),
/* CLK_CFG_1 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP, "dsp_sel",
- dsp_parents, 0x50,
- 0x54, 0x58, 0, 4, 7, 0x004, 4),
+ dsp_parents, 0x50, 0x54, 0x58, 0, 4, 7, 0x004, 4),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP1, "dsp1_sel",
- dsp1_parents, 0x50,
- 0x54, 0x58, 8, 4, 15, 0x004, 5),
+ dsp1_parents, 0x50, 0x54, 0x58, 8, 4, 15, 0x004, 5),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP2, "dsp2_sel",
- dsp2_parents, 0x50,
- 0x54, 0x58, 16, 4, 23, 0x004, 6),
+ dsp2_parents, 0x50, 0x54, 0x58, 16, 4, 23, 0x004, 6),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IPU_IF, "ipu_if_sel",
- ipu_if_parents, 0x50,
- 0x54, 0x58, 24, 4, 31, 0x004, 7),
+ ipu_if_parents, 0x50, 0x54, 0x58, 24, 4, 31, 0x004, 7),
/* CLK_CFG_2 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MFG, "mfg_sel",
- mfg_parents, 0x60,
- 0x64, 0x68, 0, 2, 7, 0x004, 8),
+ mfg_parents, 0x60, 0x64, 0x68, 0, 2, 7, 0x004, 8),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_F52M_MFG, "f52m_mfg_sel",
- f52m_mfg_parents, 0x60,
- 0x64, 0x68, 8, 2, 15, 0x004, 9),
+ f52m_mfg_parents, 0x60, 0x64, 0x68, 8, 2, 15, 0x004, 9),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG, "camtg_sel",
- camtg_parents, 0x60,
- 0x64, 0x68, 16, 3, 23, 0x004, 10),
+ camtg_parents, 0x60, 0x64, 0x68, 16, 3, 23, 0x004, 10),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG2, "camtg2_sel",
- camtg2_parents, 0x60,
- 0x64, 0x68, 24, 3, 31, 0x004, 11),
+ camtg2_parents, 0x60, 0x64, 0x68, 24, 3, 31, 0x004, 11),
/* CLK_CFG_3 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG3, "camtg3_sel",
- camtg3_parents, 0x70,
- 0x74, 0x78, 0, 3, 7, 0x004, 12),
+ camtg3_parents, 0x70, 0x74, 0x78, 0, 3, 7, 0x004, 12),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG4, "camtg4_sel",
- camtg4_parents, 0x70,
- 0x74, 0x78, 8, 3, 15, 0x004, 13),
+ camtg4_parents, 0x70, 0x74, 0x78, 8, 3, 15, 0x004, 13),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_UART, "uart_sel",
- uart_parents, 0x70,
- 0x74, 0x78, 16, 1, 23, 0x004, 14),
+ uart_parents, 0x70, 0x74, 0x78, 16, 1, 23, 0x004, 14),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SPI, "spi_sel",
- spi_parents, 0x70,
- 0x74, 0x78, 24, 2, 31, 0x004, 15),
+ spi_parents, 0x70, 0x74, 0x78, 24, 2, 31, 0x004, 15),
/* CLK_CFG_4 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_hclk_sel",
- msdc50_hclk_parents, 0x80,
- 0x84, 0x88, 0, 2, 7, 0x004, 16),
+ msdc50_hclk_parents, 0x80, 0x84, 0x88, 0, 2, 7, 0x004, 16),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel",
- msdc50_0_parents, 0x80,
- 0x84, 0x88, 8, 3, 15, 0x004, 17),
+ msdc50_0_parents, 0x80, 0x84, 0x88, 8, 3, 15, 0x004, 17),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel",
- msdc30_1_parents, 0x80,
- 0x84, 0x88, 16, 3, 23, 0x004, 18),
+ msdc30_1_parents, 0x80, 0x84, 0x88, 16, 3, 23, 0x004, 18),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel",
- msdc30_2_parents, 0x80,
- 0x84, 0x88, 24, 3, 31, 0x004, 19),
+ msdc30_2_parents, 0x80, 0x84, 0x88, 24, 3, 31, 0x004, 19),
/* CLK_CFG_5 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUDIO, "audio_sel",
- audio_parents, 0x90,
- 0x94, 0x98, 0, 2, 7, 0x004, 20),
+ audio_parents, 0x90, 0x94, 0x98, 0, 2, 7, 0x004, 20),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel",
- aud_intbus_parents, 0x90,
- 0x94, 0x98, 8, 2, 15, 0x004, 21),
+ aud_intbus_parents, 0x90, 0x94, 0x98, 8, 2, 15, 0x004, 21),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_PMICSPI, "pmicspi_sel",
- pmicspi_parents, 0x90,
- 0x94, 0x98, 16, 2, 23, 0x004, 22),
+ pmicspi_parents, 0x90, 0x94, 0x98, 16, 2, 23, 0x004, 22),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FPWRAP_ULPOSC, "fpwrap_ulposc_sel",
- fpwrap_ulposc_parents, 0x90,
- 0x94, 0x98, 24, 2, 31, 0x004, 23),
+ fpwrap_ulposc_parents, 0x90, 0x94, 0x98, 24, 2, 31, 0x004, 23),
/* CLK_CFG_6 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
- atb_parents, 0xa0,
- 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
+ atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
- dpi0_parents, 0xa0,
- 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
+ dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
- scam_parents, 0xa0,
- 0xa4, 0xa8, 24, 1, 31, 0x004, 27),
+ scam_parents, 0xa0, 0xa4, 0xa8, 24, 1, 31, 0x004, 27),
/* CLK_CFG_7 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DISP_PWM, "disppwm_sel",
- disppwm_parents, 0xb0,
- 0xb4, 0xb8, 0, 3, 7, 0x004, 28),
+ disppwm_parents, 0xb0, 0xb4, 0xb8, 0, 3, 7, 0x004, 28),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_USB_TOP, "usb_top_sel",
- usb_top_parents, 0xb0,
- 0xb4, 0xb8, 8, 2, 15, 0x004, 29),
+ usb_top_parents, 0xb0, 0xb4, 0xb8, 8, 2, 15, 0x004, 29),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel",
- ssusb_top_xhci_parents, 0xb0,
- 0xb4, 0xb8, 16, 2, 23, 0x004, 30),
+ ssusb_top_xhci_parents, 0xb0, 0xb4, 0xb8, 16, 2, 23, 0x004, 30),
MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SPM, "spm_sel",
- spm_parents, 0xb0,
- 0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL),
+ spm_parents, 0xb0, 0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL),
/* CLK_CFG_8 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_I2C, "i2c_sel",
- i2c_parents, 0xc0,
- 0xc4, 0xc8, 0, 2, 7, 0x008, 1),
+ i2c_parents, 0xc0, 0xc4, 0xc8, 0, 2, 7, 0x008, 1),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCP, "scp_sel",
- scp_parents, 0xc0,
- 0xc4, 0xc8, 8, 3, 15, 0x008, 2),
+ scp_parents, 0xc0, 0xc4, 0xc8, 8, 3, 15, 0x008, 2),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SENINF, "seninf_sel",
- seninf_parents, 0xc0,
- 0xc4, 0xc8, 16, 2, 23, 0x008, 3),
+ seninf_parents, 0xc0, 0xc4, 0xc8, 16, 2, 23, 0x008, 3),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DXCC, "dxcc_sel",
- dxcc_parents, 0xc0,
- 0xc4, 0xc8, 24, 2, 31, 0x008, 4),
+ dxcc_parents, 0xc0, 0xc4, 0xc8, 24, 2, 31, 0x008, 4),
/* CLK_CFG_9 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG1, "aud_eng1_sel",
- aud_engen1_parents, 0xd0,
- 0xd4, 0xd8, 0, 2, 7, 0x008, 5),
+ aud_engen1_parents, 0xd0, 0xd4, 0xd8, 0, 2, 7, 0x008, 5),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG2, "aud_eng2_sel",
- aud_engen2_parents, 0xd0,
- 0xd4, 0xd8, 8, 2, 15, 0x008, 6),
+ aud_engen2_parents, 0xd0, 0xd4, 0xd8, 8, 2, 15, 0x008, 6),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FAES_UFSFDE, "faes_ufsfde_sel",
- faes_ufsfde_parents, 0xd0,
- 0xd4, 0xd8, 16, 3, 23, 0x008, 7),
+ faes_ufsfde_parents, 0xd0, 0xd4, 0xd8, 16, 3, 23, 0x008, 7),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FUFS, "fufs_sel",
- fufs_parents, 0xd0,
- 0xd4, 0xd8, 24, 2, 31, 0x008, 8),
+ fufs_parents, 0xd0, 0xd4, 0xd8, 24, 2, 31, 0x008, 8),
/* CLK_CFG_10 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_1, "aud_1_sel",
- aud_1_parents, 0xe0,
- 0xe4, 0xe8, 0, 1, 7, 0x008, 9),
+ aud_1_parents, 0xe0, 0xe4, 0xe8, 0, 1, 7, 0x008, 9),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_2, "aud_2_sel",
- aud_2_parents, 0xe0,
- 0xe4, 0xe8, 8, 1, 15, 0x008, 10),
+ aud_2_parents, 0xe0, 0xe4, 0xe8, 8, 1, 15, 0x008, 10),
};
static const char * const apll_i2s0_parents[] = {
@@ -613,21 +574,6 @@ static const char * const apll_i2s5_parents[] = {
"aud_2_sel"
};
-static struct mtk_composite top_aud_muxes[] = {
- MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents,
- 0x320, 8, 1),
- MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents,
- 0x320, 9, 1),
- MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents,
- 0x320, 10, 1),
- MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents,
- 0x320, 11, 1),
- MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents,
- 0x320, 12, 1),
- MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents,
- 0x328, 20, 1),
-};
-
static const char * const mcu_mp0_parents[] = {
"clk26m",
"armpll_ll",
@@ -658,19 +604,19 @@ static struct mtk_composite mcu_muxes[] = {
MUX(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0, 9, 2),
};
-static struct mtk_composite top_aud_divs[] = {
- DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel",
- 0x320, 2, 0x324, 8, 0),
- DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel",
- 0x320, 3, 0x324, 8, 8),
- DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel",
- 0x320, 4, 0x324, 8, 16),
- DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel",
- 0x320, 5, 0x324, 8, 24),
- DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel",
- 0x320, 6, 0x328, 8, 0),
- DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4",
- 0x320, 7, 0x328, 8, 8),
+static struct mtk_composite top_aud_comp[] = {
+ MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents, 0x320, 8, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents, 0x320, 9, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents, 0x320, 10, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents, 0x320, 11, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents, 0x320, 12, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents, 0x328, 20, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel", 0x320, 2, 0x324, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel", 0x320, 3, 0x324, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel", 0x320, 4, 0x324, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel", 0x320, 5, 0x324, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel", 0x320, 6, 0x328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4", 0x320, 7, 0x328, 8, 8),
};
static const struct mtk_gate_regs top_cg_regs = {
@@ -731,203 +677,106 @@ static const struct mtk_gate_regs infra3_cg_regs = {
static const struct mtk_gate infra_clks[] = {
/* INFRA0 */
- GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr",
- "axi_sel", 0),
- GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap",
- "axi_sel", 1),
- GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md",
- "axi_sel", 2),
- GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn",
- "axi_sel", 3),
- GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp",
- "scp_sel", 4),
- GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej",
- "f_f26m_ck", 5),
- GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt",
- "axi_sel", 6),
- GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb",
- "axi_sel", 8),
- GATE_INFRA0(CLK_INFRA_GCE, "infra_gce",
- "axi_sel", 9),
- GATE_INFRA0(CLK_INFRA_THERM, "infra_therm",
- "axi_sel", 10),
- GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0",
- "i2c_sel", 11),
- GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1",
- "i2c_sel", 12),
- GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2",
- "i2c_sel", 13),
- GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3",
- "i2c_sel", 14),
- GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk",
- "axi_sel", 15),
- GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1",
- "i2c_sel", 16),
- GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2",
- "i2c_sel", 17),
- GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3",
- "i2c_sel", 18),
- GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4",
- "i2c_sel", 19),
- GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
- "i2c_sel", 21),
- GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
- "uart_sel", 22),
- GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
- "uart_sel", 23),
- GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
- "uart_sel", 24),
- GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3",
- "uart_sel", 25),
- GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m",
- "axi_sel", 27),
- GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc",
- "axi_sel", 28),
- GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif",
- "axi_sel", 31),
+ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "axi_sel", 0),
+ GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "axi_sel", 1),
+ GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md", "axi_sel", 2),
+ GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn", "axi_sel", 3),
+ GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp", "scp_sel", 4),
+ GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej", "f_f26m_ck", 5),
+ GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt", "axi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb", "axi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_GCE, "infra_gce", "axi_sel", 9),
+ GATE_INFRA0(CLK_INFRA_THERM, "infra_therm", "axi_sel", 10),
+ GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0", "i2c_sel", 11),
+ GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1", "i2c_sel", 12),
+ GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2", "i2c_sel", 13),
+ GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3", "i2c_sel", 14),
+ GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk", "axi_sel", 15),
+ GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1", "i2c_sel", 16),
+ GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2", "i2c_sel", 17),
+ GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3", "i2c_sel", 18),
+ GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4", "i2c_sel", 19),
+ GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", "i2c_sel", 21),
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0", "uart_sel", 22),
+ GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", "uart_sel", 23),
+ GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", "uart_sel", 24),
+ GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3", "uart_sel", 25),
+ GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m", "axi_sel", 27),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc", "axi_sel", 28),
+ GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif", "axi_sel", 31),
/* INFRA1 */
- GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0",
- "spi_sel", 1),
- GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0",
- "msdc50_hclk_sel", 2),
- GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1",
- "axi_sel", 4),
- GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2",
- "axi_sel", 5),
- GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck",
- "msdc50_0_sel", 6),
- GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc",
- "f_f26m_ck", 7),
- GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu",
- "axi_sel", 8),
- GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng",
- "axi_sel", 9),
- GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc",
- "f_f26m_ck", 10),
- GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum",
- "axi_sel", 11),
- GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap",
- "axi_sel", 12),
- GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md",
- "axi_sel", 13),
- GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md",
- "f_f26m_ck", 14),
- GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck",
- "msdc30_1_sel", 16),
- GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck",
- "msdc30_2_sel", 17),
- GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma",
- "axi_sel", 18),
- GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu",
- "axi_sel", 19),
- GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc",
- "axi_sel", 20),
- GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap",
- "axi_sel", 23),
- GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys",
- "axi_sel", 24),
- GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio",
- "axi_sel", 25),
- GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md",
- "axi_sel", 26),
- GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core",
- "dxcc_sel", 27),
- GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao",
- "dxcc_sel", 28),
- GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk",
- "axi_sel", 30),
- GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
- "f_f26m_ck", 31),
+ GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0", "spi_sel", 1),
+ GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0", "msdc50_hclk_sel", 2),
+ GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1", "axi_sel", 4),
+ GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2", "axi_sel", 5),
+ GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck", "msdc50_0_sel", 6),
+ GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc", "f_f26m_ck", 7),
+ GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu", "axi_sel", 8),
+ GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng", "axi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc", "f_f26m_ck", 10),
+ GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum", "axi_sel", 11),
+ GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap", "axi_sel", 12),
+ GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md", "axi_sel", 13),
+ GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md", "f_f26m_ck", 14),
+ GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck", "msdc30_1_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck", "msdc30_2_sel", 17),
+ GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma", "axi_sel", 18),
+ GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu", "axi_sel", 19),
+ GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc", "axi_sel", 20),
+ GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23),
+ GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys", "axi_sel", 24),
+ GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25),
+ GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26),
+ GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core", "dxcc_sel", 27),
+ GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao", "dxcc_sel", 28),
+ GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk", "axi_sel", 30),
+ GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "f_f26m_ck", 31),
/* INFRA2 */
- GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx",
- "f_f26m_ck", 0),
- GATE_INFRA2(CLK_INFRA_USB, "infra_usb",
- "usb_top_sel", 1),
- GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm",
- "axi_sel", 2),
- GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk",
- "axi_sel", 3),
- GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk",
- "f_f26m_ck", 4),
- GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1",
- "spi_sel", 6),
- GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4",
- "i2c_sel", 7),
- GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share",
- "f_f26m_ck", 8),
- GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2",
- "spi_sel", 9),
- GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3",
- "spi_sel", 10),
- GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck",
- "ssusb_top_xhci_sel", 11),
- GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick",
- "fufs_sel", 12),
- GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck",
- "fufs_sel", 13),
- GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk",
- "axi_sel", 14),
- GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist",
- "axi_sel", 16),
- GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5",
- "i2c_sel", 18),
- GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter",
- "i2c_sel", 19),
- GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm",
- "i2c_sel", 20),
- GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter",
- "i2c_sel", 21),
- GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm",
- "i2c_sel", 22),
- GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter",
- "i2c_sel", 23),
- GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm",
- "i2c_sel", 24),
- GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4",
- "spi_sel", 25),
- GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5",
- "spi_sel", 26),
- GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma",
- "axi_sel", 27),
- GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs",
- "fufs_sel", 28),
- GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde",
- "faes_ufsfde_sel", 29),
- GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick",
- "fufs_sel", 30),
+ GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx", "f_f26m_ck", 0),
+ GATE_INFRA2(CLK_INFRA_USB, "infra_usb", "usb_top_sel", 1),
+ GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm", "axi_sel", 2),
+ GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk", "axi_sel", 3),
+ GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk", "f_f26m_ck", 4),
+ GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 6),
+ GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4", "i2c_sel", 7),
+ GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share", "f_f26m_ck", 8),
+ GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2", "spi_sel", 9),
+ GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3", "spi_sel", 10),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck", "ssusb_top_xhci_sel", 11),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "fufs_sel", 12),
+ GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", "fufs_sel", 13),
+ GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16),
+ GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18),
+ GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19),
+ GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20),
+ GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter", "i2c_sel", 21),
+ GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm", "i2c_sel", 22),
+ GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter", "i2c_sel", 23),
+ GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm", "i2c_sel", 24),
+ GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4", "spi_sel", 25),
+ GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5", "spi_sel", 26),
+ GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma", "axi_sel", 27),
+ GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs", "fufs_sel", 28),
+ GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde", "faes_ufsfde_sel", 29),
+ GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick", "fufs_sel", 30),
/* INFRA3 */
- GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self",
- "msdc50_0_sel", 0),
- GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self",
- "msdc50_0_sel", 1),
- GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self",
- "msdc50_0_sel", 2),
- GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi",
- "axi_sel", 5),
- GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6",
- "i2c_sel", 6),
- GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0",
- "msdc50_hclk_sel", 7),
- GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0",
- "msdc50_hclk_sel", 8),
- GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap",
- "axi_sel", 16),
- GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md",
- "axi_sel", 17),
- GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap",
- "axi_sel", 18),
- GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md",
- "axi_sel", 19),
- GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m",
- "f_f26m_ck", 20),
- GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk",
- "axi_sel", 21),
- GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7",
- "i2c_sel", 22),
- GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8",
- "i2c_sel", 23),
- GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc",
- "msdc50_0_sel", 24),
+ GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0),
+ GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1),
+ GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2),
+ GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
+ GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
+ GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
+ GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0", "msdc50_hclk_sel", 8),
+ GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap", "axi_sel", 16),
+ GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md", "axi_sel", 17),
+ GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap", "axi_sel", 18),
+ GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md", "axi_sel", 19),
+ GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m", "f_f26m_ck", 20),
+ GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk", "axi_sel", 21),
+ GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7", "i2c_sel", 22),
+ GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8", "i2c_sel", 23),
+ GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc", "msdc50_0_sel", 24),
};
static const struct mtk_gate_regs peri_cg_regs = {
@@ -944,140 +793,6 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI(CLK_PERI_AXI, "peri_axi", "axi_sel", 31),
};
-static const struct mtk_gate_regs apmixed_cg_regs = {
- .set_ofs = 0x20,
- .clr_ofs = 0x20,
- .sta_ofs = 0x20,
-};
-
-#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \
- GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \
- _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags)
-
-#define GATE_APMIXED(_id, _name, _parent, _shift) \
- GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0)
-
-/*
- * CRITICAL CLOCK:
- * apmixed_appll26m is the toppest clock gate of all PLLs.
- */
-static const struct mtk_gate apmixed_clks[] = {
- /* AUDIO0 */
- GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m",
- "f_f26m_ck", 4),
- GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m",
- "f_f26m_ck", 5, CLK_IS_CRITICAL),
- GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m",
- "f_f26m_ck", 6),
- GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m",
- "f_f26m_ck", 7),
- GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m",
- "f_f26m_ck", 8),
- GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m",
- "f_f26m_ck", 9),
- GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m",
- "f_f26m_ck", 11),
- GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m",
- "f_f26m_ck", 13),
- GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
- "f_f26m_ck", 14),
- GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m",
- "f_f26m_ck", 16),
- GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m",
- "f_f26m_ck", 17),
-};
-
-#define MT8183_PLL_FMAX (3800UL * MHZ)
-#define MT8183_PLL_FMIN (1500UL * MHZ)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
- _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift, \
- _pcw_chg_reg, _div_table) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = _rst_bar_mask, \
- .fmax = MT8183_PLL_FMAX, \
- .fmin = MT8183_PLL_FMIN, \
- .pcwbits = _pcwbits, \
- .pcwibits = _pcwibits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .tuner_en_reg = _tuner_en_reg, \
- .tuner_en_bit = _tuner_en_bit, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .pcw_chg_reg = _pcw_chg_reg, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
- _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift, \
- _pcw_chg_reg) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
- _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift, \
- _pcw_chg_reg, NULL)
-
-static const struct mtk_pll_div_table armpll_div_table[] = {
- { .div = 0, .freq = MT8183_PLL_FMAX },
- { .div = 1, .freq = 1500 * MHZ },
- { .div = 2, .freq = 750 * MHZ },
- { .div = 3, .freq = 375 * MHZ },
- { .div = 4, .freq = 187500000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_div_table mfgpll_div_table[] = {
- { .div = 0, .freq = MT8183_PLL_FMAX },
- { .div = 1, .freq = 1600 * MHZ },
- { .div = 2, .freq = 800 * MHZ },
- { .div = 3, .freq = 400 * MHZ },
- { .div = 4, .freq = 200 * MHZ },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_data plls[] = {
- PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0,
- HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
- 0x0204, 0, 0, armpll_div_table),
- PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0,
- HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
- 0x0214, 0, 0, armpll_div_table),
- PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0,
- HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
- 0x0294, 0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0,
- HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
- 0x0224, 0, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0,
- HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
- 0x0234, 0, 0),
- PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0,
- 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
- mfgpll_div_table),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0,
- 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0,
- 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0,
- HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
- 0x0274, 0, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0,
- 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0,
- 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
-};
-
static u16 infra_rst_ofs[] = {
INFRA_RST0_SET_OFFSET,
INFRA_RST1_SET_OFFSET,
@@ -1091,41 +806,6 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_bank_nr = ARRAY_SIZE(infra_rst_ofs),
};
-static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
- mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
-
-static struct clk_hw_onecell_data *top_clk_data;
-
-static void clk_mt8183_top_init_early(struct device_node *node)
-{
- int i;
-
- top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
-
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
- top_clk_data);
-
- of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
-}
-
-CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
- clk_mt8183_top_init_early);
-
/* Register mux notifier for MFG mux */
static int clk_mt8183_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
{
@@ -1148,150 +828,55 @@ static int clk_mt8183_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
}
-static int clk_mt8183_top_probe(struct platform_device *pdev)
-{
- void __iomem *base;
- struct device_node *node = pdev->dev.of_node;
- int ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- top_clk_data);
-
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
- top_clk_data);
-
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
-
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt8183_clk_lock, top_clk_data);
-
- mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt8183_clk_lock, top_clk_data);
-
- mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt8183_clk_lock, top_clk_data);
-
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- top_clk_data);
-
- ret = clk_mt8183_reg_mfg_mux_notifier(&pdev->dev,
- top_clk_data->hws[CLK_TOP_MUX_MFG]->clk);
- if (ret)
- return ret;
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
- top_clk_data);
-}
-
-static int clk_mt8183_infra_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r) {
- dev_err(&pdev->dev,
- "%s(): could not register clock provider: %d\n",
- __func__, r);
- return r;
- }
-
- mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
-
- return r;
-}
-
-static int clk_mt8183_peri_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
-
-static int clk_mt8183_mcu_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- void __iomem *base;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .rst_desc = &clk_rst_desc,
+};
- clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+static const struct mtk_clk_desc mcu_desc = {
+ .composite_clks = mcu_muxes,
+ .num_composite_clks = ARRAY_SIZE(mcu_muxes),
+ .clk_lock = &mt8183_clk_lock,
+};
- mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
- &mt8183_clk_lock, clk_data);
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_clks,
+ .num_clks = ARRAY_SIZE(peri_clks),
+};
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_comp,
+ .num_composite_clks = ARRAY_SIZE(top_aud_comp),
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .clk_lock = &mt8183_clk_lock,
+ .clk_notifier_func = clk_mt8183_reg_mfg_mux_notifier,
+ .mfg_clk_idx = CLK_TOP_MUX_MFG,
+};
static const struct of_device_id of_match_clk_mt8183[] = {
- {
- .compatible = "mediatek,mt8183-apmixedsys",
- .data = clk_mt8183_apmixed_probe,
- }, {
- .compatible = "mediatek,mt8183-topckgen",
- .data = clk_mt8183_top_probe,
- }, {
- .compatible = "mediatek,mt8183-infracfg",
- .data = clk_mt8183_infra_probe,
- }, {
- .compatible = "mediatek,mt8183-pericfg",
- .data = clk_mt8183_peri_probe,
- }, {
- .compatible = "mediatek,mt8183-mcucfg",
- .data = clk_mt8183_mcu_probe,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt8183_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *pdev);
- int r;
-
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- return -EINVAL;
-
- r = clk_probe(pdev);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+ { .compatible = "mediatek,mt8183-infracfg", .data = &infra_desc },
+ { .compatible = "mediatek,mt8183-mcucfg", .data = &mcu_desc },
+ { .compatible = "mediatek,mt8183-pericfg", .data = &peri_desc, },
+ { .compatible = "mediatek,mt8183-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8183);
static struct platform_driver clk_mt8183_drv = {
- .probe = clk_mt8183_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183",
.of_match_table = of_match_clk_mt8183,
},
};
-
-static int __init clk_mt8183_init(void)
-{
- return platform_driver_register(&clk_mt8183_drv);
-}
-
-arch_initcall(clk_mt8183_init);
+module_platform_driver(clk_mt8183_drv)
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
index 1d673c6278a9..da7950d51c64 100644
--- a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
@@ -7,6 +7,7 @@
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt8186-clk.h>
+#include "clk-fhctl.h"
#include "clk-mtk.h"
#include "clk-pll.h"
#include "clk-pllfh.h"
@@ -98,6 +99,7 @@ enum fh_pll_id {
.data = { \
.pll_id = _pllid, \
.fh_id = _fhid, \
+ .fh_ver = FHCTL_PLLFH_V2, \
.fhx_offset = _offset, \
.dds_mask = GENMASK(21, 0), \
.slope0_value = 0x6003c97, \
@@ -134,6 +136,7 @@ static const struct of_device_id of_match_clk_mt8186_apmixed[] = {
{ .compatible = "mediatek,mt8186-apmixedsys", },
{}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_apmixed);
static int clk_mt8186_apmixed_probe(struct platform_device *pdev)
{
@@ -190,4 +193,5 @@ static struct platform_driver clk_mt8186_apmixed_drv = {
.of_match_table = of_match_clk_mt8186_apmixed,
},
};
-builtin_platform_driver(clk_mt8186_apmixed_drv);
+module_platform_driver(clk_mt8186_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c
index 9ec345a2ce66..656d9e6f3ee2 100644
--- a/drivers/clk/mediatek/clk-mt8186-cam.c
+++ b/drivers/clk/mediatek/clk-mt8186-cam.c
@@ -78,6 +78,7 @@ static const struct of_device_id of_match_clk_mt8186_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_cam);
static struct platform_driver clk_mt8186_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -87,4 +88,5 @@ static struct platform_driver clk_mt8186_cam_drv = {
.of_match_table = of_match_clk_mt8186_cam,
},
};
-builtin_platform_driver(clk_mt8186_cam_drv);
+module_platform_driver(clk_mt8186_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c
index 08a625475aee..754b27f03817 100644
--- a/drivers/clk/mediatek/clk-mt8186-img.c
+++ b/drivers/clk/mediatek/clk-mt8186-img.c
@@ -56,6 +56,7 @@ static const struct of_device_id of_match_clk_mt8186_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_img);
static struct platform_driver clk_mt8186_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -65,4 +66,5 @@ static struct platform_driver clk_mt8186_img_drv = {
.of_match_table = of_match_clk_mt8186_img,
},
};
-builtin_platform_driver(clk_mt8186_img_drv);
+module_platform_driver(clk_mt8186_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
index 47f2e480a05e..7619c357b150 100644
--- a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
@@ -55,6 +55,7 @@ static const struct of_device_id of_match_clk_mt8186_imp_iic_wrap[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_imp_iic_wrap);
static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
@@ -64,4 +65,5 @@ static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
.of_match_table = of_match_clk_mt8186_imp_iic_wrap,
},
};
-builtin_platform_driver(clk_mt8186_imp_iic_wrap_drv);
+module_platform_driver(clk_mt8186_imp_iic_wrap_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index df2a6bd1aefa..a907a5def5b8 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -227,6 +227,7 @@ static const struct of_device_id of_match_clk_mt8186_infra_ao[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_infra_ao);
static struct platform_driver clk_mt8186_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
@@ -236,4 +237,5 @@ static struct platform_driver clk_mt8186_infra_ao_drv = {
.of_match_table = of_match_clk_mt8186_infra_ao,
},
};
-builtin_platform_driver(clk_mt8186_infra_ao_drv);
+module_platform_driver(clk_mt8186_infra_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c
index 8fca148effa6..50e340035aa7 100644
--- a/drivers/clk/mediatek/clk-mt8186-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8186-ipe.c
@@ -43,6 +43,7 @@ static const struct of_device_id of_match_clk_mt8186_ipe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_ipe);
static struct platform_driver clk_mt8186_ipe_drv = {
.probe = mtk_clk_simple_probe,
@@ -52,4 +53,5 @@ static struct platform_driver clk_mt8186_ipe_drv = {
.of_match_table = of_match_clk_mt8186_ipe,
},
};
-builtin_platform_driver(clk_mt8186_ipe_drv);
+module_platform_driver(clk_mt8186_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
index dfc305c1fc5d..d1640e4dc2ad 100644
--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -43,66 +43,26 @@ static struct mtk_composite mcu_muxes[] = {
MUX(CLK_MCU_ARMPLL_BUS_SEL, "mcu_armpll_bus_sel", mcu_armpll_bus_parents, 0x2E0, 9, 2),
};
-static const struct of_device_id of_match_clk_mt8186_mcu[] = {
- { .compatible = "mediatek,mt8186-mcusys", },
- {}
+static const struct mtk_clk_desc mcu_desc = {
+ .composite_clks = mcu_muxes,
+ .num_composite_clks = ARRAY_SIZE(mcu_muxes),
};
-static int clk_mt8186_mcu_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
- void __iomem *base;
-
- clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- r = PTR_ERR(base);
- goto free_mcu_data;
- }
-
- r = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
- NULL, clk_data);
- if (r)
- goto free_mcu_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto unregister_composite_muxes;
-
- platform_set_drvdata(pdev, clk_data);
-
- return r;
-
-unregister_composite_muxes:
- mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data);
-free_mcu_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8186_mcu_remove(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
-
- of_clk_del_provider(node);
- mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data);
- mtk_free_clk_data(clk_data);
-
- return 0;
-}
+static const struct of_device_id of_match_clk_mt8186_mcu[] = {
+ { .compatible = "mediatek,mt8186-mcusys", .data = &mcu_desc },
+ { /* sentinel */}
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mcu);
static struct platform_driver clk_mt8186_mcu_drv = {
- .probe = clk_mt8186_mcu_probe,
- .remove = clk_mt8186_mcu_remove,
.driver = {
.name = "clk-mt8186-mcu",
.of_match_table = of_match_clk_mt8186_mcu,
},
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
-builtin_platform_driver(clk_mt8186_mcu_drv);
+module_platform_driver(clk_mt8186_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c
index 05174088ef20..e1d19007e375 100644
--- a/drivers/clk/mediatek/clk-mt8186-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8186-mdp.c
@@ -68,6 +68,7 @@ static const struct of_device_id of_match_clk_mt8186_mdp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mdp);
static struct platform_driver clk_mt8186_mdp_drv = {
.probe = mtk_clk_simple_probe,
@@ -77,4 +78,5 @@ static struct platform_driver clk_mt8186_mdp_drv = {
.of_match_table = of_match_clk_mt8186_mdp,
},
};
-builtin_platform_driver(clk_mt8186_mdp_drv);
+module_platform_driver(clk_mt8186_mdp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c
index 0142d741053a..aeb098b54585 100644
--- a/drivers/clk/mediatek/clk-mt8186-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8186-mfg.c
@@ -37,6 +37,7 @@ static const struct of_device_id of_match_clk_mt8186_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mfg);
static struct platform_driver clk_mt8186_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -46,4 +47,5 @@ static struct platform_driver clk_mt8186_mfg_drv = {
.of_match_table = of_match_clk_mt8186_mfg,
},
};
-builtin_platform_driver(clk_mt8186_mfg_drv);
+module_platform_driver(clk_mt8186_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
index 1d33be407947..fc3bb6d1f714 100644
--- a/drivers/clk/mediatek/clk-mt8186-mm.c
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -58,54 +58,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DISP_26M, "mm_disp_26m_ck", "top_disp", 10),
};
-static int clk_mt8186_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
- if (r)
- goto free_mm_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto unregister_gates;
-
- platform_set_drvdata(pdev, clk_data);
-
- return r;
-
-unregister_gates:
- mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
-free_mm_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8186_mm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
-
- of_clk_del_provider(node);
- mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return 0;
-}
+static const struct platform_device_id clk_mt8186_mm_id_table[] = {
+ { .name = "clk-mt8186-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8186_mm_id_table);
static struct platform_driver clk_mt8186_mm_drv = {
- .probe = clk_mt8186_mm_probe,
- .remove = clk_mt8186_mm_remove,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8186-mm",
},
+ .id_table = clk_mt8186_mm_id_table,
};
-builtin_platform_driver(clk_mt8186_mm_drv);
+module_platform_driver(clk_mt8186_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
index c2beda7ef976..1a0340a20beb 100644
--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -669,9 +669,6 @@ static struct mtk_composite top_muxes[] = {
MUX(CLK_TOP_APLL_I2S4_MCK_SEL, "apll_i2s4_mck_sel", apll_mck_parents, 0x0320, 19, 1),
MUX(CLK_TOP_APLL_TDMOUT_MCK_SEL, "apll_tdmout_mck_sel", apll_mck_parents,
0x0320, 20, 1),
-};
-
-static const struct mtk_composite top_adj_divs[] = {
DIV_GATE(CLK_TOP_APLL12_CK_DIV0, "apll12_div0", "apll_i2s0_mck_sel",
0x0320, 0, 0x0328, 8, 0),
DIV_GATE(CLK_TOP_APLL12_CK_DIV1, "apll12_div1", "apll_i2s1_mck_sel",
@@ -684,11 +681,6 @@ static const struct mtk_composite top_adj_divs[] = {
0x0320, 4, 0x0334, 8, 0),
};
-static const struct of_device_id of_match_clk_mt8186_topck[] = {
- { .compatible = "mediatek,mt8186-topckgen", },
- {}
-};
-
/* Register mux notifier for MFG mux */
static int clk_mt8186_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
{
@@ -711,97 +703,33 @@ static int clk_mt8186_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
}
-static int clk_mt8186_topck_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
- void __iomem *base;
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- r = PTR_ERR(base);
- goto free_top_data;
- }
-
- r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
- if (r)
- goto free_top_data;
-
- r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- if (r)
- goto unregister_fixed_clks;
-
- r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
- &mt8186_clk_lock, clk_data);
- if (r)
- goto unregister_factors;
-
- r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8186_clk_lock, clk_data);
- if (r)
- goto unregister_muxes;
-
- r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
- &mt8186_clk_lock, clk_data);
- if (r)
- goto unregister_composite_muxes;
-
- r = clk_mt8186_reg_mfg_mux_notifier(&pdev->dev,
- clk_data->hws[CLK_TOP_MFG]->clk);
- if (r)
- goto unregister_composite_divs;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto unregister_composite_divs;
-
- platform_set_drvdata(pdev, clk_data);
-
- return r;
-
-unregister_composite_divs:
- mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data);
-unregister_composite_muxes:
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
-unregister_muxes:
- mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data);
-unregister_factors:
- mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
-unregister_fixed_clks:
- mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data);
-free_top_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_mtk_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_mtk_muxes),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .clk_lock = &mt8186_clk_lock,
+ .clk_notifier_func = clk_mt8186_reg_mfg_mux_notifier,
+ .mfg_clk_idx = CLK_TOP_MFG,
+};
-static int clk_mt8186_topck_remove(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
-
- of_clk_del_provider(node);
- mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data);
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
- mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data);
- mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data);
- mtk_free_clk_data(clk_data);
-
- return 0;
-}
+static const struct of_device_id of_match_clk_mt8186_topck[] = {
+ { .compatible = "mediatek,mt8186-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_topck);
static struct platform_driver clk_mt8186_topck_drv = {
- .probe = clk_mt8186_topck_probe,
- .remove = clk_mt8186_topck_remove,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-topck",
.of_match_table = of_match_clk_mt8186_topck,
},
};
-builtin_platform_driver(clk_mt8186_topck_drv);
+module_platform_driver(clk_mt8186_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c
index 5ad7e1ae0bac..9bf3b8632870 100644
--- a/drivers/clk/mediatek/clk-mt8186-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8186-vdec.c
@@ -76,6 +76,7 @@ static const struct of_device_id of_match_clk_mt8186_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_vdec);
static struct platform_driver clk_mt8186_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -85,4 +86,5 @@ static struct platform_driver clk_mt8186_vdec_drv = {
.of_match_table = of_match_clk_mt8186_vdec,
},
};
-builtin_platform_driver(clk_mt8186_vdec_drv);
+module_platform_driver(clk_mt8186_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c
index f5519f794c45..0c1bc94e84cf 100644
--- a/drivers/clk/mediatek/clk-mt8186-venc.c
+++ b/drivers/clk/mediatek/clk-mt8186-venc.c
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8186_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_venc);
static struct platform_driver clk_mt8186_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -48,4 +49,5 @@ static struct platform_driver clk_mt8186_venc_drv = {
.of_match_table = of_match_clk_mt8186_venc,
},
};
-builtin_platform_driver(clk_mt8186_venc_drv);
+module_platform_driver(clk_mt8186_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c
index 8db3e9178a1e..c4727b1cb64d 100644
--- a/drivers/clk/mediatek/clk-mt8186-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8186-wpe.c
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8186_wpe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_wpe);
static struct platform_driver clk_mt8186_wpe_drv = {
.probe = mtk_clk_simple_probe,
@@ -48,4 +49,5 @@ static struct platform_driver clk_mt8186_wpe_drv = {
.of_match_table = of_match_clk_mt8186_wpe,
},
};
-builtin_platform_driver(clk_mt8186_wpe_drv);
+module_platform_driver(clk_mt8186_wpe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
new file mode 100644
index 000000000000..808f2ad3b7ee
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs adsp_audio26m_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x80,
+ .sta_ofs = 0x80,
+};
+
+#define GATE_ADSP_FLAGS(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &adsp_audio26m_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate adsp_audio26m_clks[] = {
+ GATE_ADSP_FLAGS(CLK_AUDIODSP_AUDIO26M, "audiodsp_audio26m", "clk26m", 3),
+};
+
+static const struct mtk_clk_desc adsp_audio26m_desc = {
+ .clks = adsp_audio26m_clks,
+ .num_clks = ARRAY_SIZE(adsp_audio26m_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_adsp_audio26m[] = {
+ { .compatible = "mediatek,mt8188-adsp-audio26m", .data = &adsp_audio26m_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_adsp_audio26m);
+
+static struct platform_driver clk_mt8188_adsp_audio26m_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-adsp_audio26m",
+ .of_match_table = of_match_clk_mt8188_adsp_audio26m,
+ },
+};
+module_platform_driver(clk_mt8188_adsp_audio26m_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
new file mode 100644
index 000000000000..9d21da2d9aa7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_PLL_SSUSB26M_EN, "pll_ssusb26m_en", "clk26m", 1),
+};
+
+#define MT8188_PLL_FMAX (3800UL * MHZ)
+#define MT8188_PLL_FMIN (1500UL * MHZ)
+#define MT8188_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcw_chg_reg, \
+ _en_reg, _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8188_PLL_FMAX, \
+ .fmin = MT8188_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8188_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .en_reg = _en_reg, \
+ .pll_en_bit = _pll_en_bit, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ETHPLL, "ethpll", 0x044C, 0x0458, 0,
+ 0, 0, 22, 0x0450, 24, 0, 0, 0, 0x0450, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0514, 0x0520, 0,
+ 0, 0, 22, 0x0518, 24, 0, 0, 0, 0x0518, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL1, "tvdpll1", 0x0524, 0x0530, 0,
+ 0, 0, 22, 0x0528, 24, 0, 0, 0, 0x0528, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL2, "tvdpll2", 0x0534, 0x0540, 0,
+ 0, 0, 22, 0x0538, 24, 0, 0, 0, 0x0538, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0544, 0x0550, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0548, 24, 0, 0, 0, 0x0548, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x045C, 0x0468, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0460, 24, 0, 0, 0, 0x0460, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0554, 0x0560, 0,
+ 0, 0, 22, 0x0558, 24, 0, 0, 0, 0x0558, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0504, 0x0510, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0508, 24, 0, 0, 0, 0x0508, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x042C, 0x0438, 0,
+ 0, 0, 22, 0x0430, 24, 0, 0, 0, 0x0430, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0304, 0x0314, 0,
+ 0, 0, 32, 0x0308, 24, 0x0034, 0x0000, 12, 0x030C, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0318, 0x0328, 0,
+ 0, 0, 32, 0x031C, 24, 0x0038, 0x0000, 13, 0x0320, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_APLL3, "apll3", 0x032C, 0x033C, 0,
+ 0, 0, 32, 0x0330, 24, 0x003C, 0x0000, 14, 0x0334, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_APLL4, "apll4", 0x0404, 0x0414, 0,
+ 0, 0, 32, 0x0408, 24, 0x0040, 0x0000, 15, 0x040C, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_APLL5, "apll5", 0x0418, 0x0428, 0,
+ 0, 0, 32, 0x041C, 24, 0x0044, 0x0000, 16, 0x0420, 0, 0, 0, 9),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0340, 0x034C, 0,
+ 0, 0, 22, 0x0344, 24, 0, 0, 0, 0x0344, 0, 0, 0, 9),
+};
+
+static const struct of_device_id of_match_clk_mt8188_apmixed[] = {
+ { .compatible = "mediatek,mt8188-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_apmixed);
+
+static int clk_mt8188_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ if (r)
+ goto unregister_plls;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_gates;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8188_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8188_apmixed_drv = {
+ .probe = clk_mt8188_apmixed_probe,
+ .remove = clk_mt8188_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8188-apmixed",
+ .of_match_table = of_match_clk_mt8188_apmixed,
+ },
+};
+module_platform_driver(clk_mt8188_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
new file mode 100644
index 000000000000..c5a3856bd223
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_main_clks[] = {
+ GATE_CAM(CLK_CAM_MAIN_LARB13, "cam_main_larb13", "top_cam", 0),
+ GATE_CAM(CLK_CAM_MAIN_LARB14, "cam_main_larb14", "top_cam", 1),
+ GATE_CAM(CLK_CAM_MAIN_CAM, "cam_main_cam", "top_cam", 2),
+ GATE_CAM(CLK_CAM_MAIN_CAM_SUBA, "cam_main_cam_suba", "top_cam", 3),
+ GATE_CAM(CLK_CAM_MAIN_CAM_SUBB, "cam_main_cam_subb", "top_cam", 4),
+ GATE_CAM(CLK_CAM_MAIN_CAMTG, "cam_main_camtg", "top_cam", 7),
+ GATE_CAM(CLK_CAM_MAIN_SENINF, "cam_main_seninf", "top_cam", 8),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVA, "cam_main_gcamsva", "top_cam", 9),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVB, "cam_main_gcamsvb", "top_cam", 10),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVC, "cam_main_gcamsvc", "top_cam", 11),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVD, "cam_main_gcamsvd", "top_cam", 12),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVE, "cam_main_gcamsve", "top_cam", 13),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVF, "cam_main_gcamsvf", "top_cam", 14),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVG, "cam_main_gcamsvg", "top_cam", 15),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVH, "cam_main_gcamsvh", "top_cam", 16),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVI, "cam_main_gcamsvi", "top_cam", 17),
+ GATE_CAM(CLK_CAM_MAIN_GCAMSVJ, "cam_main_gcamsvj", "top_cam", 18),
+ GATE_CAM(CLK_CAM_MAIN_CAMSV_TOP, "cam_main_camsv", "top_cam", 19),
+ GATE_CAM(CLK_CAM_MAIN_CAMSV_CQ_A, "cam_main_camsv_cq_a", "top_cam", 20),
+ GATE_CAM(CLK_CAM_MAIN_CAMSV_CQ_B, "cam_main_camsv_cq_b", "top_cam", 21),
+ GATE_CAM(CLK_CAM_MAIN_CAMSV_CQ_C, "cam_main_camsv_cq_c", "top_cam", 22),
+ GATE_CAM(CLK_CAM_MAIN_FAKE_ENG, "cam_main_fake_eng", "top_cam", 28),
+ GATE_CAM(CLK_CAM_MAIN_CAM2MM0_GALS, "cam_main_cam2mm0_gals", "top_cam", 29),
+ GATE_CAM(CLK_CAM_MAIN_CAM2MM1_GALS, "cam_main_cam2mm1_gals", "top_cam", 30),
+ GATE_CAM(CLK_CAM_MAIN_CAM2SYS_GALS, "cam_main_cam2sys_gals", "top_cam", 31),
+};
+
+static const struct mtk_gate cam_rawa_clks[] = {
+ GATE_CAM(CLK_CAM_RAWA_LARBX, "cam_rawa_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWA_CAM, "cam_rawa_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWA_CAMTG, "cam_rawa_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_rawb_clks[] = {
+ GATE_CAM(CLK_CAM_RAWB_LARBX, "cam_rawb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWB_CAM, "cam_rawb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWB_CAMTG, "cam_rawb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuva_clks[] = {
+ GATE_CAM(CLK_CAM_YUVA_LARBX, "cam_yuva_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVA_CAM, "cam_yuva_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVA_CAMTG, "cam_yuva_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuvb_clks[] = {
+ GATE_CAM(CLK_CAM_YUVB_LARBX, "cam_yuvb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVB_CAM, "cam_yuvb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_clk_desc cam_main_desc = {
+ .clks = cam_main_clks,
+ .num_clks = ARRAY_SIZE(cam_main_clks),
+};
+
+static const struct mtk_clk_desc cam_rawa_desc = {
+ .clks = cam_rawa_clks,
+ .num_clks = ARRAY_SIZE(cam_rawa_clks),
+};
+
+static const struct mtk_clk_desc cam_rawb_desc = {
+ .clks = cam_rawb_clks,
+ .num_clks = ARRAY_SIZE(cam_rawb_clks),
+};
+
+static const struct mtk_clk_desc cam_yuva_desc = {
+ .clks = cam_yuva_clks,
+ .num_clks = ARRAY_SIZE(cam_yuva_clks),
+};
+
+static const struct mtk_clk_desc cam_yuvb_desc = {
+ .clks = cam_yuvb_clks,
+ .num_clks = ARRAY_SIZE(cam_yuvb_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_cam[] = {
+ { .compatible = "mediatek,mt8188-camsys", .data = &cam_main_desc },
+ { .compatible = "mediatek,mt8188-camsys-rawa", .data = &cam_rawa_desc },
+ { .compatible = "mediatek,mt8188-camsys-rawb", .data = &cam_rawb_desc },
+ { .compatible = "mediatek,mt8188-camsys-yuva", .data = &cam_yuva_desc },
+ { .compatible = "mediatek,mt8188-camsys-yuvb", .data = &cam_yuvb_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_cam);
+
+static struct platform_driver clk_mt8188_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-cam",
+ .of_match_table = of_match_clk_mt8188_cam,
+ },
+};
+
+module_platform_driver(clk_mt8188_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-ccu.c b/drivers/clk/mediatek/clk-mt8188-ccu.c
new file mode 100644
index 000000000000..ebc0d3aeee11
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-ccu.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ccu_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CCU(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ccu_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ccu_clks[] = {
+ GATE_CCU(CLK_CCU_LARB27, "ccu_larb27", "top_ccu", 0),
+ GATE_CCU(CLK_CCU_AHB, "ccu_ahb", "top_ccu", 1),
+ GATE_CCU(CLK_CCU_CCU0, "ccu_ccu0", "top_ccu", 2),
+};
+
+static const struct mtk_clk_desc ccu_desc = {
+ .clks = ccu_clks,
+ .num_clks = ARRAY_SIZE(ccu_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_ccu[] = {
+ { .compatible = "mediatek,mt8188-ccusys", .data = &ccu_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ccu);
+
+static struct platform_driver clk_mt8188_ccu_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-ccu",
+ .of_match_table = of_match_clk_mt8188_ccu,
+ },
+};
+
+module_platform_driver(clk_mt8188_ccu_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
new file mode 100644
index 000000000000..b4622875e14c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imgsys_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMGSYS(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &imgsys_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate imgsys_main_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_LARB9, "imgsys_main_larb9", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_TRAW0, "imgsys_main_traw0", "top_img", 1),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_TRAW1, "imgsys_main_traw1", "top_img", 2),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_VCORE_GALS, "imgsys_main_vcore_gals", "top_img", 3),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_DIP0, "imgsys_main_dip0", "top_img", 8),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_WPE0, "imgsys_main_wpe0", "top_img", 9),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_IPE, "imgsys_main_ipe", "top_img", 10),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_WPE1, "imgsys_main_wpe1", "top_img", 12),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_WPE2, "imgsys_main_wpe2", "top_img", 13),
+ GATE_IMGSYS(CLK_IMGSYS_MAIN_GALS, "imgsys_main_gals", "top_img", 31),
+};
+
+static const struct mtk_gate imgsys_wpe1_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS_WPE1_LARB11, "imgsys_wpe1_larb11", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS_WPE1, "imgsys_wpe1", "top_img", 1),
+};
+
+static const struct mtk_gate imgsys_wpe2_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS_WPE2_LARB11, "imgsys_wpe2_larb11", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS_WPE2, "imgsys_wpe2", "top_img", 1),
+};
+
+static const struct mtk_gate imgsys_wpe3_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS_WPE3_LARB11, "imgsys_wpe3_larb11", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS_WPE3, "imgsys_wpe3", "top_img", 1),
+};
+
+static const struct mtk_gate imgsys1_dip_top_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS1_DIP_TOP_LARB10, "imgsys1_dip_larb10", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS1_DIP_TOP_DIP_TOP, "imgsys1_dip_dip_top", "top_img", 1),
+};
+
+static const struct mtk_gate imgsys1_dip_nr_clks[] = {
+ GATE_IMGSYS(CLK_IMGSYS1_DIP_NR_LARB15, "imgsys1_dip_nr_larb15", "top_img", 0),
+ GATE_IMGSYS(CLK_IMGSYS1_DIP_NR_DIP_NR, "imgsys1_dip_nr_dip_nr", "top_img", 1),
+};
+
+static const struct mtk_clk_desc imgsys_main_desc = {
+ .clks = imgsys_main_clks,
+ .num_clks = ARRAY_SIZE(imgsys_main_clks),
+};
+
+static const struct mtk_clk_desc imgsys_wpe1_desc = {
+ .clks = imgsys_wpe1_clks,
+ .num_clks = ARRAY_SIZE(imgsys_wpe1_clks),
+};
+
+static const struct mtk_clk_desc imgsys_wpe2_desc = {
+ .clks = imgsys_wpe2_clks,
+ .num_clks = ARRAY_SIZE(imgsys_wpe2_clks),
+};
+
+static const struct mtk_clk_desc imgsys_wpe3_desc = {
+ .clks = imgsys_wpe3_clks,
+ .num_clks = ARRAY_SIZE(imgsys_wpe3_clks),
+};
+
+static const struct mtk_clk_desc imgsys1_dip_top_desc = {
+ .clks = imgsys1_dip_top_clks,
+ .num_clks = ARRAY_SIZE(imgsys1_dip_top_clks),
+};
+
+static const struct mtk_clk_desc imgsys1_dip_nr_desc = {
+ .clks = imgsys1_dip_nr_clks,
+ .num_clks = ARRAY_SIZE(imgsys1_dip_nr_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_imgsys_main[] = {
+ { .compatible = "mediatek,mt8188-imgsys", .data = &imgsys_main_desc },
+ { .compatible = "mediatek,mt8188-imgsys-wpe1", .data = &imgsys_wpe1_desc },
+ { .compatible = "mediatek,mt8188-imgsys-wpe2", .data = &imgsys_wpe2_desc },
+ { .compatible = "mediatek,mt8188-imgsys-wpe3", .data = &imgsys_wpe3_desc },
+ { .compatible = "mediatek,mt8188-imgsys1-dip-top", .data = &imgsys1_dip_top_desc },
+ { .compatible = "mediatek,mt8188-imgsys1-dip-nr", .data = &imgsys1_dip_nr_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imgsys_main);
+
+static struct platform_driver clk_mt8188_imgsys_main_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-imgsys_main",
+ .of_match_table = of_match_clk_mt8188_imgsys_main,
+ },
+};
+
+module_platform_driver(clk_mt8188_imgsys_main_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
new file mode 100644
index 000000000000..da41a3c59919
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_OPS_PARENT_ENABLE)
+
+static const struct mtk_gate imp_iic_wrap_c_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C0,
+ "imp_iic_wrap_c_ap_clock_i2c0", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C2,
+ "imp_iic_wrap_c_ap_clock_i2c2", "top_i2c", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C3,
+ "imp_iic_wrap_c_ap_clock_i2c3", "top_i2c", 2),
+};
+
+static const struct mtk_gate imp_iic_wrap_w_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C1,
+ "imp_iic_wrap_w_ap_clock_i2c1", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C4,
+ "imp_iic_wrap_w_ap_clock_i2c4", "top_i2c", 1),
+};
+
+static const struct mtk_gate imp_iic_wrap_en_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C5,
+ "imp_iic_wrap_en_ap_clock_i2c5", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C6,
+ "imp_iic_wrap_en_ap_clock_i2c6", "top_i2c", 1),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_c_desc = {
+ .clks = imp_iic_wrap_c_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_c_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_w_desc = {
+ .clks = imp_iic_wrap_w_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_w_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_en_desc = {
+ .clks = imp_iic_wrap_en_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_en_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8188-imp-iic-wrap-c", .data = &imp_iic_wrap_c_desc },
+ { .compatible = "mediatek,mt8188-imp-iic-wrap-w", .data = &imp_iic_wrap_w_desc },
+ { .compatible = "mediatek,mt8188-imp-iic-wrap-en", .data = &imp_iic_wrap_en_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imp_iic_wrap);
+
+static struct platform_driver clk_mt8188_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8188_imp_iic_wrap,
+ },
+};
+
+module_platform_driver(clk_mt8188_imp_iic_wrap_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-infra_ao.c b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
new file mode 100644
index 000000000000..91c35db40b4e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs infra_ao0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra_ao1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra_ao2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra_ao3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs infra_ao4_cg_regs = {
+ .set_ofs = 0xe0,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe8,
+};
+
+#define GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO0(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_ao2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO3(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao4_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO4(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, 0)
+
+static const struct mtk_gate infra_ao_clks[] = {
+ /* INFRA_AO0 */
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_TMR, "infra_ao_pmic_tmr", "top_pwrap_ulposc", 0),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_AP, "infra_ao_pmic_ap", "top_pwrap_ulposc", 1),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_MD, "infra_ao_pmic_md", "top_pwrap_ulposc", 2),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_CONN, "infra_ao_pmic_conn", "top_pwrap_ulposc", 3),
+ /* infra_ao_sej is main clock is for secure engine with JTAG support */
+ GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SEJ, "infra_ao_sej", "top_axi", 5, CLK_IS_CRITICAL),
+ GATE_INFRA_AO0(CLK_INFRA_AO_APXGPT, "infra_ao_apxgpt", "top_axi", 6),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE, "infra_ao_gce", "top_axi", 8),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE2, "infra_ao_gce2", "top_axi", 9),
+ GATE_INFRA_AO0(CLK_INFRA_AO_THERM, "infra_ao_therm", "top_axi", 10),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM_HCLK, "infra_ao_pwm_h", "top_axi", 15),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM1, "infra_ao_pwm1", "top_pwm", 16),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM2, "infra_ao_pwm2", "top_pwm", 17),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM3, "infra_ao_pwm3", "top_pwm", 18),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM4, "infra_ao_pwm4", "top_pwm", 19),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM, "infra_ao_pwm", "top_pwm", 21),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART0, "infra_ao_uart0", "top_uart", 22),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART1, "infra_ao_uart1", "top_uart", 23),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART2, "infra_ao_uart2", "top_uart", 24),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART3, "infra_ao_uart3", "top_uart", 25),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART4, "infra_ao_uart4", "top_uart", 26),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE_26M, "infra_ao_gce_26m", "clk26m", 27),
+ GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_dma", "pad_fpc_ck", 28),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
+ /* INFRA_AO1 */
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc5hclk", 2),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC2, "infra_ao_msdc2", "top_axi", 5),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0_SRC, "infra_ao_msdc0_clk", "top_msdc50_0", 6),
+ /* infra_ao_dvfsrc is for internal DVFS usage, should not be handled by Linux. */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DVFSRC, "infra_ao_dvfsrc",
+ "clk26m", 7, CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_TRNG, "infra_ao_trng", "top_axi", 9),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC, "infra_ao_auxadc", "clk26m", 10),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CPUM, "infra_ao_cpum", "top_axi", 11),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_32K, "infra_ao_hdmi_32k", "clk32k", 12),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_HCLK, "infra_ao_cec_66m_hclk", "top_axi", 13),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_26M, "infra_ao_pcie_tl_26m", "clk26m", 15),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1_SRC, "infra_ao_msdc1_clk", "top_msdc30_1", 16),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_BCLK, "infra_ao_cec_66m_bclk", "top_axi", 17),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_96M, "infra_ao_pcie_tl_96m", "top_tl", 18),
+ /* infra_ao_dapc is for device access permission control module */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DEVICE_APC, "infra_ao_dapc",
+ "top_axi", 20, CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_ECC_66M_HCLK, "infra_ao_ecc_66m_hclk", "top_axi", 23),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DEBUGSYS, "infra_ao_debugsys", "top_axi", 24),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUDIO, "infra_ao_audio", "top_axi", 25),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_32K, "infra_ao_pcie_tl_32k", "clk32k", 26),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DBG_TRACE, "infra_ao_dbg_trace", "top_axi", 29),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DRAMC_F26M, "infra_ao_dramc26", "clk26m", 31),
+ /* INFRA_AO2 */
+ GATE_INFRA_AO2(CLK_INFRA_AO_IRTX, "infra_ao_irtx", "top_axi", 0),
+ GATE_INFRA_AO2(CLK_INFRA_AO_DISP_PWM, "infra_ao_disp_pwm", "top_disp_pwm0", 2),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CLDMA_BCLK, "infra_ao_cldmabclk", "top_axi", 3),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AUDIO_26M_BCLK, "infra_ao_audio26m", "clk26m", 4),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "top_spi", 6),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI2, "infra_ao_spi2", "top_spi", 9),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI3, "infra_ao_spi3", "top_spi", 10),
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_FSSPM, "infra_ao_fsspm",
+ "top_sspm", 15, CLK_IS_CRITICAL),
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_SSPM_BUS_HCLK, "infra_ao_sspm_hclk",
+ "top_axi", 17, CLK_IS_CRITICAL),
+ GATE_INFRA_AO2(CLK_INFRA_AO_APDMA_BCLK, "infra_ao_apdma_bclk", "top_axi", 18),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CQ_DMA, "infra_ao_cq_dma", "top_axi", 27),
+ /* INFRA_AO3 */
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SELF, "infra_ao_msdc0sf", "top_msdc50_0", 0),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC1_SELF, "infra_ao_msdc1sf", "top_msdc50_0", 1),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC2_SELF, "infra_ao_msdc2sf", "top_msdc50_0", 2),
+ GATE_INFRA_AO3(CLK_INFRA_AO_I2S_DMA, "infra_ao_i2s_dma", "top_axi", 5),
+ GATE_INFRA_AO3(CLK_INFRA_AO_AP_MSDC0, "infra_ao_ap_msdc0", "top_msdc50_0", 7),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MD_MSDC0, "infra_ao_md_msdc0", "top_msdc50_0", 8),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC30_2, "infra_ao_msdc30_2", "top_msdc30_2", 9),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU, "infra_ao_gcpu", "top_gcpu", 10),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_PERI_26M, "infra_ao_pcie_peri_26m", "clk26m", 15),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_66M_BCLK, "infra_ao_gcpu_66m_bclk", "top_axi", 16),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_133M_BCLK, "infra_ao_gcpu_133m_bclk", "top_axi", 17),
+ GATE_INFRA_AO3(CLK_INFRA_AO_DISP_PWM1, "infra_ao_disp_pwm1", "top_disp_pwm1", 20),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FBIST2FPC, "infra_ao_fbist2fpc", "top_msdc50_0", 24),
+ /* infra_ao_dapc_sync is for device access permission control module */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_DEVICE_APC_SYNC, "infra_ao_dapc_sync",
+ "top_axi", 25, CLK_IS_CRITICAL),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_P1_PERI_26M, "infra_ao_pcie_p1_peri_26m", "clk26m", 26),
+ /* INFRA_AO4 */
+ /* infra_ao_133m_mclk_set/infra_ao_66m_mclk_set are main clocks of peripheral */
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_133M_MCLK_CK, "infra_ao_133m_mclk_set",
+ "top_axi", 0, CLK_IS_CRITICAL),
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_66M_MCLK_CK, "infra_ao_66m_mclk_set",
+ "top_axi", 1, CLK_IS_CRITICAL),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_PL_P_250M_P0, "infra_ao_pcie_pl_p_250m_p0",
+ "pextp_pipe", 7),
+ GATE_INFRA_AO4(CLK_INFRA_AO_RG_AES_MSDCFDE_CK_0P,
+ "infra_ao_aes_msdcfde_0p", "top_aes_msdcfde", 18),
+};
+
+static const struct mtk_clk_desc infra_ao_desc = {
+ .clks = infra_ao_clks,
+ .num_clks = ARRAY_SIZE(infra_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_infra_ao[] = {
+ { .compatible = "mediatek,mt8188-infracfg-ao", .data = &infra_ao_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_infra_ao);
+
+static struct platform_driver clk_mt8188_infra_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-infra_ao",
+ .of_match_table = of_match_clk_mt8188_infra_ao,
+ },
+};
+module_platform_driver(clk_mt8188_infra_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
new file mode 100644
index 000000000000..c07afbd1429e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ipe_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipe_clks[] = {
+ GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
+ GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
+ GATE_IPE(CLK_IPE_ME, "ipe_me", "top_ipe", 2),
+ GATE_IPE(CLK_IPESYS_TOP, "ipesys_top", "top_ipe", 3),
+ GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
+};
+
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_ipe[] = {
+ { .compatible = "mediatek,mt8188-ipesys", .data = &ipe_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ipe);
+
+static struct platform_driver clk_mt8188_ipe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-ipe",
+ .of_match_table = of_match_clk_mt8188_ipe,
+ },
+};
+
+module_platform_driver(clk_mt8188_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-mfg.c b/drivers/clk/mediatek/clk-mt8188-mfg.c
new file mode 100644
index 000000000000..e5a6eaf84672
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-mfg.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfgcfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfgcfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
+
+static const struct mtk_gate mfgcfg_clks[] = {
+ GATE_MFG(CLK_MFGCFG_BG3D, "mfgcfg_bg3d", "mfg_ck_fast_ref", 0),
+};
+
+static const struct mtk_clk_desc mfgcfg_desc = {
+ .clks = mfgcfg_clks,
+ .num_clks = ARRAY_SIZE(mfgcfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_mfgcfg[] = {
+ { .compatible = "mediatek,mt8188-mfgcfg", .data = &mfgcfg_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_mfgcfg);
+
+static struct platform_driver clk_mt8188_mfgcfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-mfgcfg",
+ .of_match_table = of_match_clk_mt8188_mfgcfg,
+ },
+};
+
+module_platform_driver(clk_mt8188_mfgcfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-peri_ao.c b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
new file mode 100644
index 000000000000..b00e1ae8bd26
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_ao_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate peri_ao_clks[] = {
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET, "peri_ao_ethernet", "top_axi", 0),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_BUS, "peri_ao_ethernet_bus", "top_axi", 1),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_BUS, "peri_ao_flashif_bus", "top_axi", 3),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_26M, "peri_ao_flashif_26m", "clk26m", 4),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIFLASHCK, "peri_ao_flashiflashck", "top_spinor", 5),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_BUS, "peri_ao_ssusb_2p_bus", "top_usb_top_2p", 9),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_XHCI, "peri_ao_ssusb_2p_xhci", "top_ssusb_xhci_2p", 10),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_BUS, "peri_ao_ssusb_3p_bus", "top_usb_top_3p", 11),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_XHCI, "peri_ao_ssusb_3p_xhci", "top_ssusb_xhci_3p", 12),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_BUS, "peri_ao_ssusb_bus", "top_usb_top", 13),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_XHCI, "peri_ao_ssusb_xhci", "top_ssusb_xhci", 14),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_MAC, "peri_ao_ethernet_mac_clk", "top_snps_eth_250m", 16),
+ GATE_PERI_AO(CLK_PERI_AO_PCIE_P0_FMEM, "peri_ao_pcie_p0_fmem", "hd_466m_fmem_ck", 24),
+};
+
+static const struct mtk_clk_desc peri_ao_desc = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_peri_ao[] = {
+ { .compatible = "mediatek,mt8188-pericfg-ao", .data = &peri_ao_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_peri_ao);
+
+static struct platform_driver clk_mt8188_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-peri_ao",
+ .of_match_table = of_match_clk_mt8188_peri_ao,
+ },
+};
+module_platform_driver(clk_mt8188_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
new file mode 100644
index 000000000000..c56ec42cb15f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+static DEFINE_SPINLOCK(mt8188_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_ULPOSC1, "ulposc_ck1", NULL, 260000000),
+ FIXED_CLK(CLK_TOP_MPHONE_SLAVE_BCK, "mphone_slave_bck", NULL, 49152000),
+ FIXED_CLK(CLK_TOP_PAD_FPC, "pad_fpc_ck", NULL, 50000000),
+ FIXED_CLK(CLK_TOP_466M_FMEM, "hd_466m_fmem_ck", NULL, 533000000),
+ FIXED_CLK(CLK_TOP_PEXTP_PIPE, "pextp_pipe", NULL, 250000000),
+ FIXED_CLK(CLK_TOP_DSI_PHY, "dsi_phy", NULL, 500000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D6_D8, "mainpll_d6_d8", "mainpll_d6", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll_192m", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1", 1, 3),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2", 1, 3),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_APLL3_D4, "apll3_d4", "apll3", 1, 4),
+ FACTOR(CLK_TOP_APLL4_D4, "apll4_d4", "apll4", 1, 4),
+ FACTOR(CLK_TOP_APLL5_D4, "apll5_d4", "apll5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D6_D2, "mmpll_d6_d2", "mmpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7),
+ FACTOR(CLK_TOP_MMPLL_D9, "mmpll_d9", "mmpll", 1, 9),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 1, 16),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_ETHPLL_D2, "ethpll_d2", "ethpll", 1, 2),
+ FACTOR(CLK_TOP_ETHPLL_D4, "ethpll_d4", "ethpll", 1, 4),
+ FACTOR(CLK_TOP_ETHPLL_D8, "ethpll_d8", "ethpll", 1, 8),
+ FACTOR(CLK_TOP_ETHPLL_D10, "ethpll_d10", "ethpll", 1, 10),
+ FACTOR(CLK_TOP_ADSPPLL_D2, "adsppll_d2", "adsppll", 1, 2),
+ FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4),
+ FACTOR(CLK_TOP_ADSPPLL_D8, "adsppll_d8", "adsppll", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc_ck1", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc_ck1", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc_ck1", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D7, "ulposc1_d7", "ulposc_ck1", 1, 7),
+ FACTOR(CLK_TOP_ULPOSC1_D10, "ulposc1_d10", "ulposc_ck1", 1, 10),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc_ck1", 1, 16),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "ulposc1_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "mainpll_d7_d4",
+ "clk32k"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d4",
+ "mainpll_d6",
+ "univpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d3",
+ "mainpll_d3"
+};
+
+static const char * const bus_aximem_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6"
+};
+
+static const char * const vpp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ethdr_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5_d4",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "imgpll",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d7"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "tvdpll1",
+ "mainpll_d4",
+ "mmpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "imgpll"
+};
+
+static const char * const ccu_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d5",
+ "mainpll_d6",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "univpll_d7"
+};
+
+static const char * const ccu_ahb_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d5",
+ "mainpll_d6",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "univpll_d7"
+};
+
+static const char * const img_parents[] = {
+ "clk26m",
+ "imgpll",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d6_d4"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp1_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp2_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp3_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp4_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp5_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp6_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp7_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_core_tmp_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "univpll_d6",
+ "univpll_d7"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_192m_d10",
+ "clk13m",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg2_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_192m_d10",
+ "clk13m",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg3_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_192m_d10",
+ "clk13m",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d6_d4",
+ "univpll_d6_d4",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d4_d4",
+ "univpll_d5_d4"
+};
+
+static const char * const msdc5hclk_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const intdir_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const audio_h_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "apll1",
+ "apll2"
+};
+
+static const char * const pwrap_ulposc_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "ulposc1_d7",
+ "ulposc1_d8",
+ "ulposc1_d16",
+ "mainpll_d4_d8",
+ "univpll_d5_d8",
+ "tvdpll1_d16"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2",
+ "mainpll_d9",
+ "mainpll_d4_d2"
+};
+
+static const char * const dp_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll2_d2",
+ "tvdpll1_d4",
+ "tvdpll2_d4",
+ "tvdpll1_d8",
+ "tvdpll2_d8",
+ "tvdpll1_d16",
+ "tvdpll2_d16"
+};
+
+static const char * const edp_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll2_d2",
+ "tvdpll1_d4",
+ "tvdpll2_d4",
+ "tvdpll1_d8",
+ "tvdpll2_d8",
+ "tvdpll1_d16",
+ "tvdpll2_d16"
+};
+
+static const char * const dpi_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll2_d2",
+ "tvdpll1_d4",
+ "tvdpll2_d4",
+ "tvdpll1_d8",
+ "tvdpll2_d8",
+ "tvdpll1_d16",
+ "tvdpll2_d16"
+};
+
+static const char * const disp_pwm0_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "ulposc1_d2",
+ "ulposc1_d4",
+ "ulposc1_d16",
+ "ethpll_d4"
+};
+
+static const char * const disp_pwm1_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "ulposc1_d2",
+ "ulposc1_d4",
+ "ulposc1_d16"
+};
+
+static const char * const usb_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const ssusb_xhci_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const usb_2p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const ssusb_xhci_2p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const usb_3p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const ssusb_xhci_3p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const seninf1_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mmpll_d5_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mmpll_d6",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d4_d4",
+ "mainpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d5_d2",
+ "mainpll_d5"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mmpll_d6",
+ "mainpll_d4",
+ "tvdpll2",
+ "univpll_d4",
+ "imgpll",
+ "univpll_d6_d2",
+ "mmpll_d9"
+};
+
+static const char * const pwm_parents[] = {
+ "clk32k",
+ "clk26m",
+ "univpll_d4_d8",
+ "univpll_d6_d4"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d6_d2",
+ "mainpll_d7_d4"
+};
+
+static const char * const spmi_p_mst_parents[] = {
+ "clk26m",
+ "clk13m",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d7",
+ "clk32k",
+ "mainpll_d7_d8",
+ "mainpll_d6_d8",
+ "mainpll_d5_d8"
+};
+
+static const char * const spmi_m_mst_parents[] = {
+ "clk26m",
+ "clk13m",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d7",
+ "clk32k",
+ "mainpll_d7_d8",
+ "mainpll_d6_d8",
+ "mainpll_d5_d8"
+};
+
+static const char * const dvfsrc_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "univpll_d6_d8",
+ "msdcpll_d16"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const aes_msdcfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4_d4",
+ "univpll_d4_d2",
+ "univpll_d6"
+};
+
+static const char * const dsi_occ_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const wpe_vpp_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "tvdpll1",
+ "univpll_d4"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "univpll_d4_d8",
+ "mainpll_d5_d8",
+ "univpll_d6_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_192m_d4",
+ "univpll_192m_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const hdmi_apb_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "msdcpll_d2"
+};
+
+static const char * const snps_eth_250m_parents[] = {
+ "clk26m",
+ "ethpll_d2"
+};
+
+static const char * const snps_eth_62p4m_ptp_parents[] = {
+ "apll2_d3",
+ "apll1_d3",
+ "clk26m",
+ "ethpll_d8"
+};
+
+static const char * const snps_eth_50m_rmii_parents[] = {
+ "clk26m",
+ "ethpll_d10"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "clk13m",
+ "mainpll_d6",
+ "mainpll_d5_d2",
+ "univpll_d4_d4",
+ "univpll_d4",
+ "ulposc1_d2",
+ "ulposc1_ck1",
+ "adsppll",
+ "adsppll_d2",
+ "adsppll_d4",
+ "adsppll_d8"
+};
+
+static const char * const audio_local_bus_parents[] = {
+ "clk26m",
+ "clk13m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d4",
+ "univpll_d6",
+ "ulposc1_ck1",
+ "ulposc1_d4",
+ "ulposc1_d2"
+};
+
+static const char * const asm_h_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const asm_l_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const apll1_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const apll2_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const apll3_parents[] = {
+ "clk26m",
+ "apll3_d4"
+};
+
+static const char * const apll4_parents[] = {
+ "clk26m",
+ "apll4_d4"
+};
+
+static const char * const apll5_parents[] = {
+ "clk26m",
+ "apll5_d4"
+};
+
+static const char * const i2so1_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const i2so2_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const i2si1_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const i2si2_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const dptx_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const aud_iec_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const a2sys_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const a3sys_parents[] = {
+ "clk26m",
+ "apll3_d4",
+ "apll4_d4",
+ "apll5_d4"
+};
+
+static const char * const a4sys_parents[] = {
+ "clk26m",
+ "apll3_d4",
+ "apll4_d4",
+ "apll5_d4"
+};
+
+static const char * const ecc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk13m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const ulposc_parents[] = {
+ "ulposc_ck1",
+ "ethpll_d2",
+ "mainpll_d4_d2",
+ "ethpll_d10"
+};
+
+static const char * const srck_parents[] = {
+ "ulposc1_d10",
+ "clk26m"
+};
+
+static const char * const mfg_fast_ref_parents[] = {
+ "top_mfg_core_tmp",
+ "mfgpll"
+};
+
+static const struct mtk_mux top_mtk_muxes[] = {
+ /*
+ * CLK_CFG_0
+ * axi_sel and bus_aximem_sel are bus clocks, should not be closed by Linux.
+ * spm_sel and scp_sel are main clocks in always-on co-processor.
+ */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "top_axi", axi_parents,
+ 0x020, 0x024, 0x028, 0, 4, 7, 0x04, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "top_spm", spm_parents,
+ 0x020, 0x024, 0x028, 8, 4, 15, 0x04, 1, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SCP, "top_scp", scp_parents,
+ 0x020, 0x024, 0x028, 16, 4, 23, 0x04, 2, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_BUS_AXIMEM, "top_bus_aximem", bus_aximem_parents,
+ 0x020, 0x024, 0x028, 24, 4, 31, 0x04, 3, CLK_IS_CRITICAL),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VPP, "top_vpp",
+ vpp_parents, 0x02C, 0x030, 0x034, 0, 4, 7, 0x04, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETHDR, "top_ethdr",
+ ethdr_parents, 0x02C, 0x030, 0x034, 8, 4, 15, 0x04, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "top_ipe",
+ ipe_parents, 0x02C, 0x030, 0x034, 16, 4, 23, 0x04, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "top_cam",
+ cam_parents, 0x02C, 0x030, 0x034, 24, 4, 31, 0x04, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU, "top_ccu",
+ ccu_parents, 0x038, 0x03C, 0x040, 0, 4, 7, 0x04, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU_AHB, "top_ccu_ahb",
+ ccu_ahb_parents, 0x038, 0x03C, 0x040, 8, 4, 15, 0x04, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG, "top_img",
+ img_parents, 0x038, 0x03C, 0x040, 16, 4, 23, 0x04, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "top_camtm",
+ camtm_parents, 0x038, 0x03C, 0x040, 24, 4, 31, 0x04, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP, "top_dsp",
+ dsp_parents, 0x044, 0x048, 0x04C, 0, 4, 7, 0x04, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP1, "top_dsp1",
+ dsp1_parents, 0x044, 0x048, 0x04C, 8, 4, 15, 0x04, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP2, "top_dsp2",
+ dsp2_parents, 0x044, 0x048, 0x04C, 16, 4, 23, 0x04, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP3, "top_dsp3",
+ dsp3_parents, 0x044, 0x048, 0x04C, 24, 4, 31, 0x04, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP4, "top_dsp4",
+ dsp4_parents, 0x050, 0x054, 0x058, 0, 4, 7, 0x04, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP5, "top_dsp5",
+ dsp5_parents, 0x050, 0x054, 0x058, 8, 4, 15, 0x04, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP6, "top_dsp6",
+ dsp6_parents, 0x050, 0x054, 0x058, 16, 4, 23, 0x04, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP7, "top_dsp7",
+ dsp7_parents, 0x050, 0x054, 0x058, 24, 4, 31, 0x04, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_CORE_TMP, "top_mfg_core_tmp",
+ mfg_core_tmp_parents, 0x05C, 0x060, 0x064, 0, 4, 7, 0x04, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "top_camtg",
+ camtg_parents, 0x05C, 0x060, 0x064, 8, 4, 15, 0x04, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "top_camtg2",
+ camtg2_parents, 0x05C, 0x060, 0x064, 16, 4, 23, 0x04, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "top_camtg3",
+ camtg3_parents, 0x05C, 0x060, 0x064, 24, 4, 31, 0x04, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "top_uart",
+ uart_parents, 0x068, 0x06C, 0x070, 0, 4, 7, 0x04, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "top_spi",
+ spi_parents, 0x068, 0x06C, 0x070, 8, 4, 15, 0x04, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "top_msdc5hclk",
+ msdc5hclk_parents, 0x068, 0x06C, 0x070, 16, 4, 23, 0x04, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "top_msdc50_0",
+ msdc50_0_parents, 0x068, 0x06C, 0x070, 24, 4, 31, 0x04, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "top_msdc30_1",
+ msdc30_1_parents, 0x074, 0x078, 0x07C, 0, 4, 7, 0x04, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2, "top_msdc30_2",
+ msdc30_2_parents, 0x074, 0x078, 0x07C, 8, 4, 15, 0x04, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_INTDIR, "top_intdir",
+ intdir_parents, 0x074, 0x078, 0x07C, 16, 4, 23, 0x04, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "top_aud_intbus",
+ aud_intbus_parents, 0x074, 0x078, 0x07C, 24, 4, 31, 0x04, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H, "top_audio_h",
+ audio_h_parents, 0x080, 0x084, 0x088, 0, 4, 7, 0x08, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC, "top_pwrap_ulposc",
+ pwrap_ulposc_parents, 0x080, 0x084, 0x088, 8, 4, 15, 0x08, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB, "top_atb",
+ atb_parents, 0x080, 0x084, 0x088, 16, 4, 23, 0x08, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPM, "top_sspm",
+ sspm_parents, 0x080, 0x084, 0x088, 24, 4, 31, 0x08, 3),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DP, "top_dp",
+ dp_parents, 0x08C, 0x090, 0x094, 0, 4, 7, 0x08, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EDP, "top_edp",
+ edp_parents, 0x08C, 0x090, 0x094, 8, 4, 15, 0x08, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
+ dpi_parents, 0x08C, 0x090, 0x094, 16, 4, 23, 0x08, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM0, "top_disp_pwm0",
+ disp_pwm0_parents, 0x08C, 0x090, 0x094, 24, 4, 31, 0x08, 7),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM1, "top_disp_pwm1",
+ disp_pwm1_parents, 0x098, 0x09C, 0x0A0, 0, 4, 7, 0x08, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "top_usb_top",
+ usb_parents, 0x098, 0x09C, 0x0A0, 8, 4, 15, 0x08, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI, "top_ssusb_xhci",
+ ssusb_xhci_parents, 0x098, 0x09C, 0x0A0, 16, 4, 23, 0x08, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_2P, "top_usb_top_2p",
+ usb_2p_parents, 0x098, 0x09C, 0x0A0, 24, 4, 31, 0x08, 11),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_2P, "top_ssusb_xhci_2p",
+ ssusb_xhci_2p_parents, 0x0A4, 0x0A8, 0x0AC, 0, 4, 7, 0x08, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_3P, "top_usb_top_3p",
+ usb_3p_parents, 0x0A4, 0x0A8, 0x0AC, 8, 4, 15, 0x08, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_3P, "top_ssusb_xhci_3p",
+ ssusb_xhci_3p_parents, 0x0A4, 0x0A8, 0x0AC, 16, 4, 23, 0x08, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "top_i2c",
+ i2c_parents, 0x0A4, 0x0A8, 0x0AC, 24, 4, 31, 0x08, 15),
+ /* CLK_CFG_12 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "top_seninf",
+ seninf_parents, 0x0B0, 0x0B4, 0x0B8, 0, 4, 7, 0x08, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "top_seninf1",
+ seninf1_parents, 0x0B0, 0x0B4, 0x0B8, 8, 4, 15, 0x08, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU, "top_gcpu",
+ gcpu_parents, 0x0B0, 0x0B4, 0x0B8, 16, 4, 23, 0x08, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "top_venc",
+ venc_parents, 0x0B0, 0x0B4, 0x0B8, 24, 4, 31, 0x08, 19),
+ /*
+ * CLK_CFG_13
+ * top_mcupm is main clock in co-processor, should not be handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "top_vdec",
+ vdec_parents, 0x0BC, 0x0C0, 0x0C4, 0, 4, 7, 0x08, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "top_pwm",
+ pwm_parents, 0x0BC, 0x0C0, 0x0C4, 8, 4, 15, 0x08, 21),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MCUPM, "top_mcupm", mcupm_parents,
+ 0x0BC, 0x0C0, 0x0C4, 16, 4, 23, 0x08, 22, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_P_MST, "top_spmi_p_mst",
+ spmi_p_mst_parents, 0x0BC, 0x0C0, 0x0C4, 24, 4, 31, 0x08, 23),
+ /*
+ * CLK_CFG_14
+ * dvfsrc_sel is for internal DVFS usage, should not be handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_M_MST, "top_spmi_m_mst",
+ spmi_m_mst_parents, 0x0C8, 0x0CC, 0x0D0, 0, 4, 7, 0x08, 24),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DVFSRC, "top_dvfsrc", dvfsrc_parents,
+ 0x0C8, 0x0CC, 0x0D0, 8, 4, 15, 0x08, 25, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL, "top_tl",
+ tl_parents, 0x0C8, 0x0CC, 0x0D0, 16, 4, 23, 0x08, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE, "top_aes_msdcfde",
+ aes_msdcfde_parents, 0x0C8, 0x0CC, 0x0D0, 24, 4, 31, 0x08, 27),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSI_OCC, "top_dsi_occ",
+ dsi_occ_parents, 0x0D4, 0x0D8, 0x0DC, 0, 4, 7, 0x08, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_WPE_VPP, "top_wpe_vpp",
+ wpe_vpp_parents, 0x0D4, 0x0D8, 0x0DC, 8, 4, 15, 0x08, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP, "top_hdcp",
+ hdcp_parents, 0x0D4, 0x0D8, 0x0DC, 16, 4, 23, 0x08, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP_24M, "top_hdcp_24m",
+ hdcp_24m_parents, 0x0D4, 0x0D8, 0x0DC, 24, 4, 31, 0x08, 31),
+ /* CLK_CFG_16 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDMI_APB, "top_hdmi_apb",
+ hdmi_apb_parents, 0x0E0, 0x0E4, 0x0E8, 0, 4, 7, 0x0C, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_250M, "top_snps_eth_250m",
+ snps_eth_250m_parents, 0x0E0, 0x0E4, 0x0E8, 8, 4, 15, 0x0C, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_62P4M_PTP, "top_snps_eth_62p4m_ptp",
+ snps_eth_62p4m_ptp_parents, 0x0E0, 0x0E4, 0x0E8, 16, 4, 23, 0x0C, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_50M_RMII, "snps_eth_50m_rmii",
+ snps_eth_50m_rmii_parents, 0x0E0, 0x0E4, 0x0E8, 24, 4, 31, 0x0C, 3),
+ /* CLK_CFG_17 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP, "top_adsp",
+ adsp_parents, 0x0EC, 0x0F0, 0x0F4, 0, 4, 7, 0x0C, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_LOCAL_BUS, "top_audio_local_bus",
+ audio_local_bus_parents, 0x0EC, 0x0F0, 0x0F4, 8, 4, 15, 0x0C, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_H, "top_asm_h",
+ asm_h_parents, 0x0EC, 0x0F0, 0x0F4, 16, 4, 23, 0x0C, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_L, "top_asm_l",
+ asm_l_parents, 0x0EC, 0x0F0, 0x0F4, 24, 4, 31, 0x0C, 7),
+ /* CLK_CFG_18 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL1, "top_apll1",
+ apll1_parents, 0x0F8, 0x0FC, 0x100, 0, 4, 7, 0x0C, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL2, "top_apll2",
+ apll2_parents, 0x0F8, 0x0FC, 0x100, 8, 4, 15, 0x0C, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL3, "top_apll3",
+ apll3_parents, 0x0F8, 0x0FC, 0x100, 16, 4, 23, 0x0C, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL4, "top_apll4",
+ apll4_parents, 0x0F8, 0x0FC, 0x100, 24, 4, 31, 0x0C, 11),
+ /* CLK_CFG_19 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL5, "top_apll5",
+ apll5_parents, 0x0104, 0x0108, 0x010C, 0, 4, 7, 0x0C, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO1, "top_i2so1",
+ i2so1_parents, 0x0104, 0x0108, 0x010C, 8, 4, 15, 0x0C, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO2, "top_i2so2",
+ i2so2_parents, 0x0104, 0x0108, 0x010C, 16, 4, 23, 0x0C, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI1, "top_i2si1",
+ i2si1_parents, 0x0104, 0x0108, 0x010C, 24, 4, 31, 0x0C, 15),
+ /* CLK_CFG_20 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI2, "top_i2si2",
+ i2si2_parents, 0x0110, 0x0114, 0x0118, 0, 4, 7, 0x0C, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPTX, "top_dptx",
+ dptx_parents, 0x0110, 0x0114, 0x0118, 8, 4, 15, 0x0C, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_IEC, "top_aud_iec",
+ aud_iec_parents, 0x0110, 0x0114, 0x0118, 16, 4, 23, 0x0C, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_HP, "top_a1sys_hp",
+ a1sys_hp_parents, 0x0110, 0x0114, 0x0118, 24, 4, 31, 0x0C, 19),
+ /* CLK_CFG_21 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A2SYS, "top_a2sys",
+ a2sys_parents, 0x011C, 0x0120, 0x0124, 0, 4, 7, 0x0C, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A3SYS, "top_a3sys",
+ a3sys_parents, 0x011C, 0x0120, 0x0124, 8, 4, 15, 0x0C, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A4SYS, "top_a4sys",
+ a4sys_parents, 0x011C, 0x0120, 0x0124, 16, 4, 23, 0x0C, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ECC, "top_ecc",
+ ecc_parents, 0x011C, 0x0120, 0x0124, 24, 4, 31, 0x0C, 23),
+ /*
+ * CLK_CFG_22
+ * top_ulposc/top_srck are clock source of always on co-processor,
+ * should not be closed by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINOR, "top_spinor",
+ spinor_parents, 0x0128, 0x012C, 0x0130, 0, 4, 7, 0x0C, 24),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_ULPOSC, "top_ulposc", ulposc_parents,
+ 0x0128, 0x012C, 0x0130, 8, 4, 15, 0x0C, 25, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SRCK, "top_srck", srck_parents,
+ 0x0128, 0x012C, 0x0130, 16, 4, 23, 0x0C, 26, CLK_IS_CRITICAL),
+};
+
+static const struct mtk_composite top_adj_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV0, "apll12_div0", "top_i2si1", 0x0320, 0, 0x0328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV1, "apll12_div1", "top_i2si2", 0x0320, 1, 0x0328, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV2, "apll12_div2", "top_i2so1", 0x0320, 2, 0x0328, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV3, "apll12_div3", "top_i2so2", 0x0320, 3, 0x0328, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV4, "apll12_div4", "top_aud_iec", 0x0320, 4, 0x0334, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV9, "apll12_div9", "top_dptx", 0x0320, 9, 0x0338, 8, 8),
+};
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x238,
+ .clr_ofs = 0x238,
+ .sta_ofs = 0x238,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x250,
+ .clr_ofs = 0x250,
+ .sta_ofs = 0x250,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_CFGREG_CLOCK_EN_VPP0, "cfgreg_clock_vpp0", "top_vpp", 0),
+ GATE_TOP0(CLK_TOP_CFGREG_CLOCK_EN_VPP1, "cfgreg_clock_vpp1", "top_vpp", 1),
+ GATE_TOP0(CLK_TOP_CFGREG_CLOCK_EN_VDO0, "cfgreg_clock_vdo0", "top_vpp", 2),
+ GATE_TOP0(CLK_TOP_CFGREG_CLOCK_EN_VDO1, "cfgreg_clock_vdo1", "top_vpp", 3),
+ GATE_TOP0(CLK_TOP_CFGREG_CLOCK_ISP_AXI_GALS, "cfgreg_clock_isp_axi_gals", "top_vpp", 4),
+ GATE_TOP0(CLK_TOP_CFGREG_F26M_VPP0, "cfgreg_f26m_vpp0", "clk26m", 5),
+ GATE_TOP0(CLK_TOP_CFGREG_F26M_VPP1, "cfgreg_f26m_vpp1", "clk26m", 6),
+ GATE_TOP0(CLK_TOP_CFGREG_F26M_VDO0, "cfgreg_f26m_vdo0", "clk26m", 7),
+ GATE_TOP0(CLK_TOP_CFGREG_F26M_VDO1, "cfgreg_f26m_vdo1", "clk26m", 8),
+ GATE_TOP0(CLK_TOP_CFGREG_AUD_F26M_AUD, "cfgreg_aud_f26m_aud", "clk26m", 9),
+ GATE_TOP0(CLK_TOP_CFGREG_UNIPLL_SES, "cfgreg_unipll_ses", "univpll_d2", 15),
+ GATE_TOP0(CLK_TOP_CFGREG_F_PCIE_PHY_REF, "cfgreg_f_pcie_phy_ref", "clk26m", 18),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_SSUSB_TOP_REF, "ssusb_ref", "clk26m", 0),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 1),
+ GATE_TOP1(CLK_TOP_SSUSB_TOP_P1_REF, "ssusb_p1_ref", "clk26m", 2),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P1_REF, "ssusb_phy_p1_ref", "clk26m", 3),
+ GATE_TOP1(CLK_TOP_SSUSB_TOP_P2_REF, "ssusb_p2_ref", "clk26m", 4),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P2_REF, "ssusb_phy_p2_ref", "clk26m", 5),
+ GATE_TOP1(CLK_TOP_SSUSB_TOP_P3_REF, "ssusb_p3_ref", "clk26m", 6),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P3_REF, "ssusb_phy_p3_ref", "clk26m", 7),
+};
+
+static const struct of_device_id of_match_clk_mt8188_topck[] = {
+ { .compatible = "mediatek,mt8188-topckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_topck);
+
+/* Register mux notifier for MFG mux */
+static int clk_mt8188_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ mfg_mux_nb->ops = &clk_mux_ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to TOP_MFG_CORE_TMP */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
+static int clk_mt8188_topck_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *top_clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw *hw;
+ int r;
+ void __iomem *base;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!top_clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ r = PTR_ERR(base);
+ goto free_top_data;
+ }
+
+ r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ top_clk_data);
+ if (r)
+ goto free_top_data;
+
+ r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ if (r)
+ goto unregister_fixed_clks;
+
+ r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
+ ARRAY_SIZE(top_mtk_muxes), node,
+ &mt8188_clk_lock, top_clk_data);
+ if (r)
+ goto unregister_factors;
+
+ hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_ref_parents,
+ ARRAY_SIZE(mfg_fast_ref_parents), CLK_SET_RATE_PARENT,
+ (base + 0x250), 8, 1, 0, &mt8188_clk_lock);
+ if (IS_ERR(hw)) {
+ r = PTR_ERR(hw);
+ goto unregister_muxes;
+ }
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw;
+
+ r = clk_mt8188_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF]->clk);
+ if (r)
+ goto unregister_muxes;
+
+ r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
+ ARRAY_SIZE(top_adj_divs), base,
+ &mt8188_clk_lock, top_clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
+ if (r)
+ goto unregister_composite_divs;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
+ if (r)
+ goto unregister_gates;
+
+ platform_set_drvdata(pdev, top_clk_data);
+
+ return r;
+
+unregister_gates:
+ mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+unregister_composite_divs:
+ mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+free_top_data:
+ mtk_free_clk_data(top_clk_data);
+ return r;
+}
+
+static int clk_mt8188_topck_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *top_clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
+ mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+ mtk_free_clk_data(top_clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8188_topck_drv = {
+ .probe = clk_mt8188_topck_probe,
+ .remove = clk_mt8188_topck_remove,
+ .driver = {
+ .name = "clk-mt8188-topck",
+ .of_match_table = of_match_clk_mt8188_topck,
+ },
+};
+module_platform_driver(clk_mt8188_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdec.c b/drivers/clk/mediatek/clk-mt8188-vdec.c
new file mode 100644
index 000000000000..8c3d76531753
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-vdec.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vdec2_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec1_clks[] = {
+ /* VDEC1_0 */
+ GATE_VDEC0(CLK_VDEC1_SOC_VDEC, "vdec1_soc_vdec", "top_vdec", 0),
+ GATE_VDEC0(CLK_VDEC1_SOC_VDEC_ACTIVE, "vdec1_soc_vdec_active", "top_vdec", 4),
+ GATE_VDEC0(CLK_VDEC1_SOC_VDEC_ENG, "vdec1_soc_vdec_eng", "top_vdec", 8),
+ /* VDEC1_1 */
+ GATE_VDEC1(CLK_VDEC1_SOC_LAT, "vdec1_soc_lat", "top_vdec", 0),
+ GATE_VDEC1(CLK_VDEC1_SOC_LAT_ACTIVE, "vdec1_soc_lat_active", "top_vdec", 4),
+ GATE_VDEC1(CLK_VDEC1_SOC_LAT_ENG, "vdec1_soc_lat_eng", "top_vdec", 8),
+ /* VDEC1_2 */
+ GATE_VDEC2(CLK_VDEC1_SOC_LARB1, "vdec1_soc_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_gate vdec2_clks[] = {
+ /* VDEC2_0 */
+ GATE_VDEC0(CLK_VDEC2_VDEC, "vdec2_vdec", "top_vdec", 0),
+ GATE_VDEC0(CLK_VDEC2_VDEC_ACTIVE, "vdec2_vdec_active", "top_vdec", 4),
+ GATE_VDEC0(CLK_VDEC2_VDEC_ENG, "vdec2_vdec_eng", "top_vdec", 8),
+ /* VDEC2_1 */
+ GATE_VDEC1(CLK_VDEC2_LAT, "vdec2_lat", "top_vdec", 0),
+ /* VDEC2_2 */
+ GATE_VDEC2(CLK_VDEC2_LARB1, "vdec2_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_clk_desc vdec1_desc = {
+ .clks = vdec1_clks,
+ .num_clks = ARRAY_SIZE(vdec1_clks),
+};
+
+static const struct mtk_clk_desc vdec2_desc = {
+ .clks = vdec2_clks,
+ .num_clks = ARRAY_SIZE(vdec2_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_vdec[] = {
+ { .compatible = "mediatek,mt8188-vdecsys-soc", .data = &vdec1_desc },
+ { .compatible = "mediatek,mt8188-vdecsys", .data = &vdec2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_vdec);
+
+static struct platform_driver clk_mt8188_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-vdec",
+ .of_match_table = of_match_clk_mt8188_vdec,
+ },
+};
+
+module_platform_driver(clk_mt8188_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo0.c b/drivers/clk/mediatek/clk-mt8188-vdo0.c
new file mode 100644
index 000000000000..d2be44c2f3f5
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-vdo0.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vdo0_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo0_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs vdo0_2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_VDO0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo0_2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
+static const struct mtk_gate vdo0_clks[] = {
+ /* VDO0_0 */
+ GATE_VDO0_0(CLK_VDO0_DISP_OVL0, "vdo0_disp_ovl0", "top_vpp", 0),
+ GATE_VDO0_0(CLK_VDO0_FAKE_ENG0, "vdo0_fake_eng0", "top_vpp", 2),
+ GATE_VDO0_0(CLK_VDO0_DISP_CCORR0, "vdo0_disp_ccorr0", "top_vpp", 4),
+ GATE_VDO0_0(CLK_VDO0_DISP_MUTEX0, "vdo0_disp_mutex0", "top_vpp", 6),
+ GATE_VDO0_0(CLK_VDO0_DISP_GAMMA0, "vdo0_disp_gamma0", "top_vpp", 8),
+ GATE_VDO0_0(CLK_VDO0_DISP_DITHER0, "vdo0_disp_dither0", "top_vpp", 10),
+ GATE_VDO0_0(CLK_VDO0_DISP_WDMA0, "vdo0_disp_wdma0", "top_vpp", 17),
+ GATE_VDO0_0(CLK_VDO0_DISP_RDMA0, "vdo0_disp_rdma0", "top_vpp", 19),
+ GATE_VDO0_0(CLK_VDO0_DSI0, "vdo0_dsi0", "top_vpp", 21),
+ GATE_VDO0_0(CLK_VDO0_DSI1, "vdo0_dsi1", "top_vpp", 22),
+ GATE_VDO0_0(CLK_VDO0_DSC_WRAP0, "vdo0_dsc_wrap0", "top_vpp", 23),
+ GATE_VDO0_0(CLK_VDO0_VPP_MERGE0, "vdo0_vpp_merge0", "top_vpp", 24),
+ GATE_VDO0_0(CLK_VDO0_DP_INTF0, "vdo0_dp_intf0", "top_vpp", 25),
+ GATE_VDO0_0(CLK_VDO0_DISP_AAL0, "vdo0_disp_aal0", "top_vpp", 26),
+ GATE_VDO0_0(CLK_VDO0_INLINEROT0, "vdo0_inlinerot0", "top_vpp", 27),
+ GATE_VDO0_0(CLK_VDO0_APB_BUS, "vdo0_apb_bus", "top_vpp", 28),
+ GATE_VDO0_0(CLK_VDO0_DISP_COLOR0, "vdo0_disp_color0", "top_vpp", 29),
+ GATE_VDO0_0(CLK_VDO0_MDP_WROT0, "vdo0_mdp_wrot0", "top_vpp", 30),
+ GATE_VDO0_0(CLK_VDO0_DISP_RSZ0, "vdo0_disp_rsz0", "top_vpp", 31),
+ /* VDO0_1 */
+ GATE_VDO0_1(CLK_VDO0_DISP_POSTMASK0, "vdo0_disp_postmask0", "top_vpp", 0),
+ GATE_VDO0_1(CLK_VDO0_FAKE_ENG1, "vdo0_fake_eng1", "top_vpp", 1),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC2, "vdo0_dl_async2", "top_vpp", 5),
+ GATE_VDO0_1(CLK_VDO0_DL_RELAY3, "vdo0_dl_relay3", "top_vpp", 6),
+ GATE_VDO0_1(CLK_VDO0_DL_RELAY4, "vdo0_dl_relay4", "top_vpp", 7),
+ GATE_VDO0_1(CLK_VDO0_SMI_GALS, "vdo0_smi_gals", "top_vpp", 10),
+ GATE_VDO0_1(CLK_VDO0_SMI_COMMON, "vdo0_smi_common", "top_vpp", 11),
+ GATE_VDO0_1(CLK_VDO0_SMI_EMI, "vdo0_smi_emi", "top_vpp", 12),
+ GATE_VDO0_1(CLK_VDO0_SMI_IOMMU, "vdo0_smi_iommu", "top_vpp", 13),
+ GATE_VDO0_1(CLK_VDO0_SMI_LARB, "vdo0_smi_larb", "top_vpp", 14),
+ GATE_VDO0_1(CLK_VDO0_SMI_RSI, "vdo0_smi_rsi", "top_vpp", 15),
+ /* VDO0_2 */
+ GATE_VDO0_2(CLK_VDO0_DSI0_DSI, "vdo0_dsi0_dsi", "top_dsi_occ", 0),
+ GATE_VDO0_2(CLK_VDO0_DSI1_DSI, "vdo0_dsi1_dsi", "top_dsi_occ", 8),
+ GATE_VDO0_2_FLAGS(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf",
+ "top_edp", 16, CLK_SET_RATE_PARENT),
+};
+
+static const struct mtk_clk_desc vdo0_desc = {
+ .clks = vdo0_clks,
+ .num_clks = ARRAY_SIZE(vdo0_clks),
+};
+
+static const struct platform_device_id clk_mt8188_vdo0_id_table[] = {
+ { .name = "clk-mt8188-vdo0", .driver_data = (kernel_ulong_t)&vdo0_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo0_id_table);
+
+static struct platform_driver clk_mt8188_vdo0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8188-vdo0",
+ },
+ .id_table = clk_mt8188_vdo0_id_table,
+};
+module_platform_driver(clk_mt8188_vdo0_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
new file mode 100644
index 000000000000..2ef8cae2e16e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vdo1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo1_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs vdo1_2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs vdo1_3_cg_regs = {
+ .set_ofs = 0x134,
+ .clr_ofs = 0x138,
+ .sta_ofs = 0x130,
+};
+
+static const struct mtk_gate_regs vdo1_4_cg_regs = {
+ .set_ofs = 0x144,
+ .clr_ofs = 0x148,
+ .sta_ofs = 0x140,
+};
+
+#define GATE_VDO1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_3_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo1_3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
+#define GATE_VDO1_4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vdo1_clks[] = {
+ /* VDO1_0 */
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB3, "vdo1_smi_larb3", "top_vpp", 1),
+ GATE_VDO1_0(CLK_VDO1_GALS, "vdo1_gals", "top_vpp", 2),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG0, "vdo1_fake_eng0", "top_vpp", 3),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG1, "vdo1_fake_eng1", "top_vpp", 4),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA0, "vdo1_mdp_rdma0", "top_vpp", 5),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA1, "vdo1_mdp_rdma1", "top_vpp", 6),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA2, "vdo1_mdp_rdma2", "top_vpp", 7),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA3, "vdo1_mdp_rdma3", "top_vpp", 8),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE0, "vdo1_vpp_merge0", "top_vpp", 9),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE1, "vdo1_vpp_merge1", "top_vpp", 10),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE2, "vdo1_vpp_merge2", "top_vpp", 11),
+ /* VDO1_1 */
+ GATE_VDO1_1(CLK_VDO1_VPP_MERGE3, "vdo1_vpp_merge3", "top_vpp", 0),
+ GATE_VDO1_1(CLK_VDO1_VPP_MERGE4, "vdo1_vpp_merge4", "top_vpp", 1),
+ GATE_VDO1_1(CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC, "vdo1_vpp2_to_vdo1_dl_async", "top_vpp", 2),
+ GATE_VDO1_1(CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC, "vdo1_vpp3_to_vdo1_dl_async", "top_vpp", 3),
+ GATE_VDO1_1(CLK_VDO1_DISP_MUTEX, "vdo1_disp_mutex", "top_vpp", 4),
+ GATE_VDO1_1(CLK_VDO1_MDP_RDMA4, "vdo1_mdp_rdma4", "top_vpp", 5),
+ GATE_VDO1_1(CLK_VDO1_MDP_RDMA5, "vdo1_mdp_rdma5", "top_vpp", 6),
+ GATE_VDO1_1(CLK_VDO1_MDP_RDMA6, "vdo1_mdp_rdma6", "top_vpp", 7),
+ GATE_VDO1_1(CLK_VDO1_MDP_RDMA7, "vdo1_mdp_rdma7", "top_vpp", 8),
+ GATE_VDO1_1(CLK_VDO1_DP_INTF0_MMCK, "vdo1_dp_intf0_mmck", "top_vpp", 9),
+ GATE_VDO1_1(CLK_VDO1_DPI0_MM, "vdo1_dpi0_mm_ck", "top_vpp", 10),
+ GATE_VDO1_1(CLK_VDO1_DPI1_MM, "vdo1_dpi1_mm_ck", "top_vpp", 11),
+ GATE_VDO1_1(CLK_VDO1_MERGE0_DL_ASYNC, "vdo1_merge0_dl_async", "top_vpp", 13),
+ GATE_VDO1_1(CLK_VDO1_MERGE1_DL_ASYNC, "vdo1_merge1_dl_async", "top_vpp", 14),
+ GATE_VDO1_1(CLK_VDO1_MERGE2_DL_ASYNC, "vdo1_merge2_dl_async", "top_vpp", 15),
+ GATE_VDO1_1(CLK_VDO1_MERGE3_DL_ASYNC, "vdo1_merge3_dl_async", "top_vpp", 16),
+ GATE_VDO1_1(CLK_VDO1_MERGE4_DL_ASYNC, "vdo1_merge4_dl_async", "top_vpp", 17),
+ GATE_VDO1_1(CLK_VDO1_DSC_VDO1_DL_ASYNC, "vdo1_dsc_vdo1_dl_async", "top_vpp", 18),
+ GATE_VDO1_1(CLK_VDO1_MERGE_VDO1_DL_ASYNC, "vdo1_merge_vdo1_dl_async", "top_vpp", 19),
+ GATE_VDO1_1(CLK_VDO1_PADDING0, "vdo1_padding0", "top_vpp", 20),
+ GATE_VDO1_1(CLK_VDO1_PADDING1, "vdo1_padding1", "top_vpp", 21),
+ GATE_VDO1_1(CLK_VDO1_PADDING2, "vdo1_padding2", "top_vpp", 22),
+ GATE_VDO1_1(CLK_VDO1_PADDING3, "vdo1_padding3", "top_vpp", 23),
+ GATE_VDO1_1(CLK_VDO1_PADDING4, "vdo1_padding4", "top_vpp", 24),
+ GATE_VDO1_1(CLK_VDO1_PADDING5, "vdo1_padding5", "top_vpp", 25),
+ GATE_VDO1_1(CLK_VDO1_PADDING6, "vdo1_padding6", "top_vpp", 26),
+ GATE_VDO1_1(CLK_VDO1_PADDING7, "vdo1_padding7", "top_vpp", 27),
+ GATE_VDO1_1(CLK_VDO1_DISP_RSZ0, "vdo1_disp_rsz0", "top_vpp", 28),
+ GATE_VDO1_1(CLK_VDO1_DISP_RSZ1, "vdo1_disp_rsz1", "top_vpp", 29),
+ GATE_VDO1_1(CLK_VDO1_DISP_RSZ2, "vdo1_disp_rsz2", "top_vpp", 30),
+ GATE_VDO1_1(CLK_VDO1_DISP_RSZ3, "vdo1_disp_rsz3", "top_vpp", 31),
+ /* VDO1_2 */
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_FE0, "vdo1_hdr_vdo_fe0", "top_vpp", 0),
+ GATE_VDO1_2(CLK_VDO1_HDR_GFX_FE0, "vdo1_hdr_gfx_fe0", "top_vpp", 1),
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_BE, "vdo1_hdr_vdo_be", "top_vpp", 2),
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_FE1, "vdo1_hdr_vdo_fe1", "top_vpp", 16),
+ GATE_VDO1_2(CLK_VDO1_HDR_GFX_FE1, "vdo1_hdr_gfx_fe1", "top_vpp", 17),
+ GATE_VDO1_2(CLK_VDO1_DISP_MIXER, "vdo1_disp_mixer", "top_vpp", 18),
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_FE0_DL_ASYNC, "vdo1_hdr_vdo_fe0_dl_async", "top_vpp", 19),
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_FE1_DL_ASYNC, "vdo1_hdr_vdo_fe1_dl_async", "top_vpp", 20),
+ GATE_VDO1_2(CLK_VDO1_HDR_GFX_FE0_DL_ASYNC, "vdo1_hdr_gfx_fe0_dl_async", "top_vpp", 21),
+ GATE_VDO1_2(CLK_VDO1_HDR_GFX_FE1_DL_ASYNC, "vdo1_hdr_gfx_fe1_dl_async", "top_vpp", 22),
+ GATE_VDO1_2(CLK_VDO1_HDR_VDO_BE_DL_ASYNC, "vdo1_hdr_vdo_be_dl_async", "top_vpp", 23),
+ /* VDO1_3 */
+ GATE_VDO1_3(CLK_VDO1_DPI0, "vdo1_dpi0_ck", "top_vpp", 0),
+ GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPI0, "vdo1_disp_monitor_dpi0_ck", "top_vpp", 1),
+ GATE_VDO1_3(CLK_VDO1_DPI1, "vdo1_dpi1_ck", "top_vpp", 8),
+ GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPI1, "vdo1_disp_monitor_dpi1_ck", "top_vpp", 9),
+ GATE_VDO1_3_FLAGS(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_dp", 16, CLK_SET_RATE_PARENT),
+ GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf_ck", "top_vpp", 17),
+ /* VDO1_4 */
+ GATE_VDO1_4(CLK_VDO1_26M_SLOW, "vdo1_26m_slow_ck", "clk26m", 8),
+};
+
+static const struct mtk_clk_desc vdo1_desc = {
+ .clks = vdo1_clks,
+ .num_clks = ARRAY_SIZE(vdo1_clks),
+};
+
+static const struct platform_device_id clk_mt8188_vdo1_id_table[] = {
+ { .name = "clk-mt8188-vdo1", .driver_data = (kernel_ulong_t)&vdo1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo1_id_table);
+
+static struct platform_driver clk_mt8188_vdo1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8188-vdo1",
+ },
+ .id_table = clk_mt8188_vdo1_id_table,
+};
+module_platform_driver(clk_mt8188_vdo1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-venc.c b/drivers/clk/mediatek/clk-mt8188-venc.c
new file mode 100644
index 000000000000..245367f33fa5
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-venc.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc1_clks[] = {
+ GATE_VENC1(CLK_VENC1_LARB, "venc1_larb", "top_venc", 0),
+ GATE_VENC1(CLK_VENC1_VENC, "venc1_venc", "top_venc", 4),
+ GATE_VENC1(CLK_VENC1_JPGENC, "venc1_jpgenc", "top_venc", 8),
+ GATE_VENC1(CLK_VENC1_JPGDEC, "venc1_jpgdec", "top_venc", 12),
+ GATE_VENC1(CLK_VENC1_JPGDEC_C1, "venc1_jpgdec_c1", "top_venc", 16),
+ GATE_VENC1(CLK_VENC1_GALS, "venc1_gals", "top_venc", 28),
+ GATE_VENC1(CLK_VENC1_GALS_SRAM, "venc1_gals_sram", "top_venc", 31),
+};
+
+static const struct mtk_clk_desc venc1_desc = {
+ .clks = venc1_clks,
+ .num_clks = ARRAY_SIZE(venc1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_venc1[] = {
+ { .compatible = "mediatek,mt8188-vencsys", .data = &venc1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1);
+
+static struct platform_driver clk_mt8188_venc1_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-venc1",
+ .of_match_table = of_match_clk_mt8188_venc1,
+ },
+};
+
+module_platform_driver(clk_mt8188_venc1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp0.c b/drivers/clk/mediatek/clk-mt8188-vpp0.c
new file mode 100644
index 000000000000..07bdedf6a21a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-vpp0.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vpp0_0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x20,
+};
+
+static const struct mtk_gate_regs vpp0_1_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x34,
+ .sta_ofs = 0x2c,
+};
+
+static const struct mtk_gate_regs vpp0_2_cg_regs = {
+ .set_ofs = 0x3c,
+ .clr_ofs = 0x40,
+ .sta_ofs = 0x38,
+};
+
+#define GATE_VPP0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp0_clks[] = {
+ /* VPP0_0 */
+ GATE_VPP0_0(CLK_VPP0_MDP_FG, "vpp0_mdp_fg", "top_vpp", 1),
+ GATE_VPP0_0(CLK_VPP0_STITCH, "vpp0_stitch", "top_vpp", 2),
+ GATE_VPP0_0(CLK_VPP0_PADDING, "vpp0_padding", "top_vpp", 7),
+ GATE_VPP0_0(CLK_VPP0_MDP_TCC, "vpp0_mdp_tcc", "top_vpp", 8),
+ GATE_VPP0_0(CLK_VPP0_WARP0_ASYNC_TX, "vpp0_warp0_async_tx", "top_vpp", 10),
+ GATE_VPP0_0(CLK_VPP0_WARP1_ASYNC_TX, "vpp0_warp1_async_tx", "top_vpp", 11),
+ GATE_VPP0_0(CLK_VPP0_MUTEX, "vpp0_mutex", "top_vpp", 13),
+ GATE_VPP0_0(CLK_VPP02VPP1_RELAY, "vpp02vpp1_relay", "top_vpp", 14),
+ GATE_VPP0_0(CLK_VPP0_VPP12VPP0_ASYNC, "vpp0_vpp12vpp0_async", "top_vpp", 15),
+ GATE_VPP0_0(CLK_VPP0_MMSYSRAM_TOP, "vpp0_mmsysram_top", "top_vpp", 16),
+ GATE_VPP0_0(CLK_VPP0_MDP_AAL, "vpp0_mdp_aal", "top_vpp", 17),
+ GATE_VPP0_0(CLK_VPP0_MDP_RSZ, "vpp0_mdp_rsz", "top_vpp", 18),
+ /* VPP0_1 */
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON_MMSRAM, "vpp0_smi_common_mmsram", "top_vpp", 0),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB0_MMSRAM, "vpp0_gals_vdo0_larb0_mmsram", "top_vpp", 1),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB1_MMSRAM, "vpp0_gals_vdo0_larb1_mmsram", "top_vpp", 2),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS_MMSRAM, "vpp0_gals_vencsys_mmsram", "top_vpp", 3),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS_CORE1_MMSRAM,
+ "vpp0_gals_vencsys_core1_mmsram", "top_vpp", 4),
+ GATE_VPP0_1(CLK_VPP0_GALS_INFRA_MMSRAM, "vpp0_gals_infra_mmsram", "top_vpp", 5),
+ GATE_VPP0_1(CLK_VPP0_GALS_CAMSYS_MMSRAM, "vpp0_gals_camsys_mmsram", "top_vpp", 6),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB5_MMSRAM, "vpp0_gals_vpp1_larb5_mmsram", "top_vpp", 7),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB6_MMSRAM, "vpp0_gals_vpp1_larb6_mmsram", "top_vpp", 8),
+ GATE_VPP0_1(CLK_VPP0_SMI_REORDER_MMSRAM, "vpp0_smi_reorder_mmsram", "top_vpp", 9),
+ GATE_VPP0_1(CLK_VPP0_SMI_IOMMU, "vpp0_smi_iommu", "top_vpp", 10),
+ GATE_VPP0_1(CLK_VPP0_GALS_IMGSYS_CAMSYS, "vpp0_gals_imgsys_camsys", "top_vpp", 11),
+ GATE_VPP0_1(CLK_VPP0_MDP_RDMA, "vpp0_mdp_rdma", "top_vpp", 12),
+ GATE_VPP0_1(CLK_VPP0_MDP_WROT, "vpp0_mdp_wrot", "top_vpp", 13),
+ GATE_VPP0_1(CLK_VPP0_GALS_EMI0_EMI1, "vpp0_gals_emi0_emi1", "top_vpp", 16),
+ GATE_VPP0_1(CLK_VPP0_SMI_SUB_COMMON_REORDER, "vpp0_smi_sub_common_reorder", "top_vpp", 17),
+ GATE_VPP0_1(CLK_VPP0_SMI_RSI, "vpp0_smi_rsi", "top_vpp", 18),
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON_LARB4, "vpp0_smi_common_larb4", "top_vpp", 19),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDEC_VDEC_CORE1, "vpp0_gals_vdec_vdec_core1", "top_vpp", 20),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_WPESYS, "vpp0_gals_vpp1_wpesys", "top_vpp", 21),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1,
+ "vpp0_gals_vdo0_vdo1_vencsys_core1", "top_vpp", 22),
+ GATE_VPP0_1(CLK_VPP0_FAKE_ENG, "vpp0_fake_eng", "top_vpp", 23),
+ GATE_VPP0_1(CLK_VPP0_MDP_HDR, "vpp0_mdp_hdr", "top_vpp", 24),
+ GATE_VPP0_1(CLK_VPP0_MDP_TDSHP, "vpp0_mdp_tdshp", "top_vpp", 25),
+ GATE_VPP0_1(CLK_VPP0_MDP_COLOR, "vpp0_mdp_color", "top_vpp", 26),
+ GATE_VPP0_1(CLK_VPP0_MDP_OVL, "vpp0_mdp_ovl", "top_vpp", 27),
+ GATE_VPP0_1(CLK_VPP0_DSIP_RDMA, "vpp0_dsip_rdma", "top_vpp", 28),
+ GATE_VPP0_1(CLK_VPP0_DISP_WDMA, "vpp0_disp_wdma", "top_vpp", 29),
+ GATE_VPP0_1(CLK_VPP0_MDP_HMS, "vpp0_mdp_hms", "top_vpp", 30),
+ /* VPP0_2 */
+ GATE_VPP0_2(CLK_VPP0_WARP0_RELAY, "vpp0_warp0_relay", "top_wpe_vpp", 0),
+ GATE_VPP0_2(CLK_VPP0_WARP0_ASYNC, "vpp0_warp0_async", "top_wpe_vpp", 1),
+ GATE_VPP0_2(CLK_VPP0_WARP1_RELAY, "vpp0_warp1_relay", "top_wpe_vpp", 2),
+ GATE_VPP0_2(CLK_VPP0_WARP1_ASYNC, "vpp0_warp1_async", "top_wpe_vpp", 3),
+};
+
+static const struct mtk_clk_desc vpp0_desc = {
+ .clks = vpp0_clks,
+ .num_clks = ARRAY_SIZE(vpp0_clks),
+};
+
+static const struct platform_device_id clk_mt8188_vpp0_id_table[] = {
+ { .name = "clk-mt8188-vpp0", .driver_data = (kernel_ulong_t)&vpp0_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp0_id_table);
+
+static struct platform_driver clk_mt8188_vpp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8188-vpp0",
+ },
+ .id_table = clk_mt8188_vpp0_id_table,
+};
+module_platform_driver(clk_mt8188_vpp0_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp1.c b/drivers/clk/mediatek/clk-mt8188-vpp1.c
new file mode 100644
index 000000000000..d4e66b240573
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-vpp1.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vpp1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vpp1_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_VPP1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp1_clks[] = {
+ /* VPP1_0 */
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_OVL, "vpp1_svpp1_mdp_ovl", "top_vpp", 0),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TCC, "vpp1_svpp1_mdp_tcc", "top_vpp", 1),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_WROT, "vpp1_svpp1_mdp_wrot", "top_vpp", 2),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_VPP_PAD, "vpp1_svpp1_vpp_pad", "top_vpp", 3),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_WROT, "vpp1_svpp2_mdp_wrot", "top_vpp", 4),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_PAD, "vpp1_svpp2_vpp_pad", "top_vpp", 5),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_WROT, "vpp1_svpp3_mdp_wrot", "top_vpp", 6),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_PAD, "vpp1_svpp3_vpp_pad", "top_vpp", 7),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RDMA, "vpp1_svpp1_mdp_rdma", "top_vpp", 8),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_FG, "vpp1_svpp1_mdp_fg", "top_vpp", 9),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_RDMA, "vpp1_svpp2_mdp_rdma", "top_vpp", 10),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_FG, "vpp1_svpp2_mdp_fg", "top_vpp", 11),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_RDMA, "vpp1_svpp3_mdp_rdma", "top_vpp", 12),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_FG, "vpp1_svpp3_mdp_fg", "top_vpp", 13),
+ GATE_VPP1_0(CLK_VPP1_VPP_SPLIT, "vpp1_vpp_split", "top_vpp", 14),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VDO0_DL_RELAY, "vpp1_svpp2_vdo0_dl_relay", "top_vpp", 15),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RSZ, "vpp1_svpp1_mdp_rsz", "top_vpp", 16),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TDSHP, "vpp1_svpp1_mdp_tdshp", "top_vpp", 17),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_COLOR, "vpp1_svpp1_mdp_color", "top_vpp", 18),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VDO1_DL_RELAY, "vpp1_svpp3_vdo1_dl_relay", "top_vpp", 19),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_RSZ, "vpp1_svpp2_mdp_rsz", "top_vpp", 20),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_MERGE, "vpp1_svpp2_vpp_merge", "top_vpp", 21),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_TDSHP, "vpp1_svpp2_mdp_tdshp", "top_vpp", 22),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_COLOR, "vpp1_svpp2_mdp_color", "top_vpp", 23),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_RSZ, "vpp1_svpp3_mdp_rsz", "top_vpp", 24),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_MERGE, "vpp1_svpp3_vpp_merge", "top_vpp", 25),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_TDSHP, "vpp1_svpp3_mdp_tdshp", "top_vpp", 26),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_COLOR, "vpp1_svpp3_mdp_color", "top_vpp", 27),
+ GATE_VPP1_0(CLK_VPP1_GALS5, "vpp1_gals5", "top_vpp", 28),
+ GATE_VPP1_0(CLK_VPP1_GALS6, "vpp1_gals6", "top_vpp", 29),
+ GATE_VPP1_0(CLK_VPP1_LARB5, "vpp1_larb5", "top_vpp", 30),
+ GATE_VPP1_0(CLK_VPP1_LARB6, "vpp1_larb6", "top_vpp", 31),
+ /* VPP1_1 */
+ GATE_VPP1_1(CLK_VPP1_SVPP1_MDP_HDR, "vpp1_svpp1_mdp_hdr", "top_vpp", 0),
+ GATE_VPP1_1(CLK_VPP1_SVPP1_MDP_AAL, "vpp1_svpp1_mdp_aal", "top_vpp", 1),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_HDR, "vpp1_svpp2_mdp_hdr", "top_vpp", 2),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_AAL, "vpp1_svpp2_mdp_aal", "top_vpp", 3),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_HDR, "vpp1_svpp3_mdp_hdr", "top_vpp", 4),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_AAL, "vpp1_svpp3_mdp_aal", "top_vpp", 5),
+ GATE_VPP1_1(CLK_VPP1_DISP_MUTEX, "vpp1_disp_mutex", "top_vpp", 7),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_VDO1_DL_RELAY, "vpp1_svpp2_vdo1_dl_relay", "top_vpp", 8),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_VDO0_DL_RELAY, "vpp1_svpp3_vdo0_dl_relay", "top_vpp", 9),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL_ASYNC, "vpp1_vpp0_dl_async", "top_vpp", 10),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL1_RELAY, "vpp1_vpp0_dl1_relay", "top_vpp", 11),
+ GATE_VPP1_1(CLK_VPP1_LARB5_FAKE_ENG, "vpp1_larb5_fake_eng", "top_vpp", 12),
+ GATE_VPP1_1(CLK_VPP1_LARB6_FAKE_ENG, "vpp1_larb6_fake_eng", "top_vpp", 13),
+ GATE_VPP1_1(CLK_VPP1_HDMI_META, "vpp1_hdmi_meta", "top_vpp", 16),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_HDMI, "vpp1_vpp_split_hdmi", "top_vpp", 17),
+ GATE_VPP1_1(CLK_VPP1_DGI_IN, "vpp1_dgi_in", "top_vpp", 18),
+ GATE_VPP1_1(CLK_VPP1_DGI_OUT, "vpp1_dgi_out", "top_vpp", 19),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_DGI, "vpp1_vpp_split_dgi", "top_vpp", 20),
+ GATE_VPP1_1(CLK_VPP1_DL_CON_OCC, "vpp1_dl_con_occ", "top_vpp", 21),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_26M, "vpp1_vpp_split_26m", "top_vpp", 26),
+};
+
+static const struct mtk_clk_desc vpp1_desc = {
+ .clks = vpp1_clks,
+ .num_clks = ARRAY_SIZE(vpp1_clks),
+};
+
+static const struct platform_device_id clk_mt8188_vpp1_id_table[] = {
+ { .name = "clk-mt8188-vpp1", .driver_data = (kernel_ulong_t)&vpp1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp1_id_table);
+
+static struct platform_driver clk_mt8188_vpp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8188-vpp1",
+ },
+ .id_table = clk_mt8188_vpp1_id_table,
+};
+module_platform_driver(clk_mt8188_vpp1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-wpe.c b/drivers/clk/mediatek/clk-mt8188-wpe.c
new file mode 100644
index 000000000000..393ac38a2172
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8188-wpe.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mediatek,mt8188-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs wpe_top_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs wpe_vpp0_0_cg_regs = {
+ .set_ofs = 0x58,
+ .clr_ofs = 0x58,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs wpe_vpp0_1_cg_regs = {
+ .set_ofs = 0x5c,
+ .clr_ofs = 0x5c,
+ .sta_ofs = 0x5c,
+};
+
+#define GATE_WPE_TOP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp0_0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp0_1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate wpe_top_clks[] = {
+ GATE_WPE_TOP(CLK_WPE_TOP_WPE_VPP0, "wpe_wpe_vpp0", "top_wpe_vpp", 16),
+ GATE_WPE_TOP(CLK_WPE_TOP_SMI_LARB7, "wpe_smi_larb7", "top_wpe_vpp", 18),
+ GATE_WPE_TOP(CLK_WPE_TOP_WPESYS_EVENT_TX, "wpe_wpesys_event_tx", "top_wpe_vpp", 20),
+ GATE_WPE_TOP(CLK_WPE_TOP_SMI_LARB7_PCLK_EN, "wpe_smi_larb7_p_en", "top_wpe_vpp", 24),
+};
+
+static const struct mtk_gate wpe_vpp0_clks[] = {
+ /* WPE_VPP00 */
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_VGEN, "wpe_vpp0_vgen", "top_img", 0),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_EXT, "wpe_vpp0_ext", "top_img", 1),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_VFC, "wpe_vpp0_vfc", "top_img", 2),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH0_TOP, "wpe_vpp0_cach0_top", "top_img", 3),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH0_DMA, "wpe_vpp0_cach0_dma", "top_img", 4),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH1_TOP, "wpe_vpp0_cach1_top", "top_img", 5),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH1_DMA, "wpe_vpp0_cach1_dma", "top_img", 6),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH2_TOP, "wpe_vpp0_cach2_top", "top_img", 7),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH2_DMA, "wpe_vpp0_cach2_dma", "top_img", 8),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH3_TOP, "wpe_vpp0_cach3_top", "top_img", 9),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_CACH3_DMA, "wpe_vpp0_cach3_dma", "top_img", 10),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_PSP, "wpe_vpp0_psp", "top_img", 11),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_PSP2, "wpe_vpp0_psp2", "top_img", 12),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_SYNC, "wpe_vpp0_sync", "top_img", 13),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_C24, "wpe_vpp0_c24", "top_img", 14),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_MDP_CROP, "wpe_vpp0_mdp_crop", "top_img", 15),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_ISP_CROP, "wpe_vpp0_isp_crop", "top_img", 16),
+ GATE_WPE_VPP0_0(CLK_WPE_VPP0_TOP, "wpe_vpp0_top", "top_img", 17),
+ /* WPE_VPP0_1 */
+ GATE_WPE_VPP0_1(CLK_WPE_VPP0_VECI, "wpe_vpp0_veci", "top_img", 0),
+ GATE_WPE_VPP0_1(CLK_WPE_VPP0_VEC2I, "wpe_vpp0_vec2i", "top_img", 1),
+ GATE_WPE_VPP0_1(CLK_WPE_VPP0_VEC3I, "wpe_vpp0_vec3i", "top_img", 2),
+ GATE_WPE_VPP0_1(CLK_WPE_VPP0_WPEO, "wpe_vpp0_wpeo", "top_img", 3),
+ GATE_WPE_VPP0_1(CLK_WPE_VPP0_MSKO, "wpe_vpp0_msko", "top_img", 4),
+};
+
+static const struct mtk_clk_desc wpe_top_desc = {
+ .clks = wpe_top_clks,
+ .num_clks = ARRAY_SIZE(wpe_top_clks),
+};
+
+static const struct mtk_clk_desc wpe_vpp0_desc = {
+ .clks = wpe_vpp0_clks,
+ .num_clks = ARRAY_SIZE(wpe_vpp0_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8188_wpe[] = {
+ { .compatible = "mediatek,mt8188-wpesys", .data = &wpe_top_desc },
+ { .compatible = "mediatek,mt8188-wpesys-vpp0", .data = &wpe_vpp0_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_wpe);
+
+static struct platform_driver clk_mt8188_wpe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8188-wpe",
+ .of_match_table = of_match_clk_mt8188_wpe,
+ },
+};
+
+module_platform_driver(clk_mt8188_wpe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
new file mode 100644
index 000000000000..eafd34297b9a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ * Copyright (c) 2023 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8192-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-fhctl.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
+#include "clk-pllfh.h"
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_MIPID26M, "mipid26m", "clk26m", 16),
+};
+
+#define MT8192_PLL_FMAX (3800UL * MHZ)
+#define MT8192_PLL_FMIN (1500UL * MHZ)
+#define MT8192_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcw_chg_reg, \
+ _en_reg, _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8192_PLL_FMAX, \
+ .fmin = MT8192_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8192_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .en_reg = _en_reg, \
+ .pll_en_bit = _pll_en_bit, \
+ }
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift) \
+ PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, 0, 0, 0)
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_MAINPLL, "mainpll", 0x0340, 0x034c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0344, 24, 0, 0, 0, 0x0344, 0),
+ PLL_B(CLK_APMIXED_UNIVPLL, "univpll", 0x0308, 0x0314, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x030c, 24, 0, 0, 0, 0x030c, 0),
+ PLL(CLK_APMIXED_USBPLL, "usbpll", 0x03c4, 0x03cc, 0x00000000,
+ 0, 0, 22, 0x03c4, 24, 0, 0, 0, 0x03c4, 0, 0x03c4, 0x03cc, 2),
+ PLL_B(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035c, 0x00000000,
+ 0, 0, 22, 0x0354, 24, 0, 0, 0, 0x0354, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0360, 0x036c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0364, 24, 0, 0, 0, 0x0364, 0),
+ PLL_B(CLK_APMIXED_ADSPPLL, "adsppll", 0x0370, 0x037c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0374, 24, 0, 0, 0, 0x0374, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0268, 0x0274, 0x00000000,
+ 0, 0, 22, 0x026c, 24, 0, 0, 0, 0x026c, 0),
+ PLL_B(CLK_APMIXED_TVDPLL, "tvdpll", 0x0380, 0x038c, 0x00000000,
+ 0, 0, 22, 0x0384, 24, 0, 0, 0, 0x0384, 0),
+ PLL_B(CLK_APMIXED_APLL1, "apll1", 0x0318, 0x0328, 0x00000000,
+ 0, 0, 32, 0x031c, 24, 0x0040, 0x000c, 0, 0x0320, 0),
+ PLL_B(CLK_APMIXED_APLL2, "apll2", 0x032c, 0x033c, 0x00000000,
+ 0, 0, 32, 0x0330, 24, 0, 0, 0, 0x0334, 0),
+};
+
+enum fh_pll_id {
+ FH_ARMPLL_LL,
+ FH_ARMPLL_BL0,
+ FH_ARMPLL_BL1,
+ FH_ARMPLL_BL2,
+ FH_ARMPLL_BL3,
+ FH_CCIPLL,
+ FH_MFGPLL,
+ FH_MEMPLL,
+ FH_MPLL,
+ FH_MMPLL,
+ FH_MAINPLL,
+ FH_MSDCPLL,
+ FH_ADSPPLL,
+ FH_APUPLL,
+ FH_TVDPLL,
+ FH_NR_FH,
+};
+
+#define FH(_pllid, _fhid, _offset) { \
+ .data = { \
+ .pll_id = _pllid, \
+ .fh_id = _fhid, \
+ .fh_ver = FHCTL_PLLFH_V2, \
+ .fhx_offset = _offset, \
+ .dds_mask = GENMASK(21, 0), \
+ .slope0_value = 0x6003c97, \
+ .slope1_value = 0x6003c97, \
+ .sfstrx_en = BIT(2), \
+ .frddsx_en = BIT(1), \
+ .fhctlx_en = BIT(0), \
+ .tgl_org = BIT(31), \
+ .dvfs_tri = BIT(31), \
+ .pcwchg = BIT(31), \
+ .dt_val = 0x0, \
+ .df_val = 0x9, \
+ .updnlmt_shft = 16, \
+ .msk_frddsx_dys = GENMASK(23, 20), \
+ .msk_frddsx_dts = GENMASK(19, 16), \
+ }, \
+ }
+
+static struct mtk_pllfh_data pllfhs[] = {
+ FH(CLK_APMIXED_MFGPLL, FH_MFGPLL, 0xb4),
+ FH(CLK_APMIXED_MMPLL, FH_MMPLL, 0xf0),
+ FH(CLK_APMIXED_MAINPLL, FH_MAINPLL, 0x104),
+ FH(CLK_APMIXED_MSDCPLL, FH_MSDCPLL, 0x118),
+ FH(CLK_APMIXED_ADSPPLL, FH_ADSPPLL, 0x12c),
+ FH(CLK_APMIXED_TVDPLL, FH_TVDPLL, 0x154),
+};
+
+static const struct of_device_id of_match_clk_mt8192_apmixed[] = {
+ { .compatible = "mediatek,mt8192-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_apmixed);
+
+static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const u8 *fhctl_node = "mediatek,mt8192-fhctl";
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ fhctl_parse_dt(fhctl_node, pllfhs, ARRAY_SIZE(pllfhs));
+
+ r = mtk_clk_register_pllfhs(node, plls, ARRAY_SIZE(plls),
+ pllfhs, ARRAY_SIZE(pllfhs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ if (r)
+ goto unregister_plls;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_gates;
+
+ return r;
+
+unregister_gates:
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+unregister_plls:
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8192_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8192_apmixed_drv = {
+ .driver = {
+ .name = "clk-mt8192-apmixed",
+ .of_match_table = of_match_clk_mt8192_apmixed,
+ },
+ .probe = clk_mt8192_apmixed_probe,
+ .remove = clk_mt8192_apmixed_remove,
+};
+module_platform_driver(clk_mt8192_apmixed_drv);
+MODULE_DESCRIPTION("MediaTek MT8192 apmixed clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index 8c989bffd8c7..ee251492d4f1 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -77,42 +77,45 @@ static const struct mtk_gate aud_clks[] = {
GATE_AUD2(CLK_AUD_I2S9_B, "aud_i2s9_b", "audio_sel", 4),
};
+static const struct mtk_clk_desc aud_desc = {
+ .clks = aud_clks,
+ .num_clks = ARRAY_SIZE(aud_clks),
+};
+
static int clk_mt8192_aud_probe(struct platform_device *pdev)
{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
int r;
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
- if (r)
- return r;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ r = mtk_clk_simple_probe(pdev);
if (r)
return r;
r = devm_of_platform_populate(&pdev->dev);
if (r)
- of_clk_del_provider(node);
+ mtk_clk_simple_remove(pdev);
return r;
}
+static int clk_mt8192_aud_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ return mtk_clk_simple_remove(pdev);
+}
+
static const struct of_device_id of_match_clk_mt8192_aud[] = {
- { .compatible = "mediatek,mt8192-audsys", },
- {}
+ { .compatible = "mediatek,mt8192-audsys", .data = &aud_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_aud);
static struct platform_driver clk_mt8192_aud_drv = {
.probe = clk_mt8192_aud_probe,
+ .remove = clk_mt8192_aud_remove,
.driver = {
.name = "clk-mt8192-aud",
.of_match_table = of_match_clk_mt8192_aud,
},
};
-
-builtin_platform_driver(clk_mt8192_aud_drv);
+module_platform_driver(clk_mt8192_aud_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index 90b57d46eef7..7befd6ee8c79 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -95,6 +95,7 @@ static const struct of_device_id of_match_clk_mt8192_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_cam);
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -104,5 +105,5 @@ static struct platform_driver clk_mt8192_cam_drv = {
.of_match_table = of_match_clk_mt8192_cam,
},
};
-
-builtin_platform_driver(clk_mt8192_cam_drv);
+module_platform_driver(clk_mt8192_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index da82d65a7650..a7505150a9d0 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -58,6 +58,7 @@ static const struct of_device_id of_match_clk_mt8192_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_img);
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -67,5 +68,5 @@ static struct platform_driver clk_mt8192_img_drv = {
.of_match_table = of_match_clk_mt8192_img,
},
};
-
-builtin_platform_driver(clk_mt8192_img_drv);
+module_platform_driver(clk_mt8192_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index ff8e20bb44bb..cd5d00a7c54b 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -107,6 +107,7 @@ static const struct of_device_id of_match_clk_mt8192_imp_iic_wrap[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_imp_iic_wrap);
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
@@ -116,5 +117,5 @@ static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
},
};
-
-builtin_platform_driver(clk_mt8192_imp_iic_wrap_drv);
+module_platform_driver(clk_mt8192_imp_iic_wrap_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index 0225abe4170a..dee671ae38e6 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -45,6 +45,7 @@ static const struct of_device_id of_match_clk_mt8192_ipe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_ipe);
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
@@ -54,5 +55,5 @@ static struct platform_driver clk_mt8192_ipe_drv = {
.of_match_table = of_match_clk_mt8192_ipe,
},
};
-
-builtin_platform_driver(clk_mt8192_ipe_drv);
+module_platform_driver(clk_mt8192_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index 4675788d7816..f7b27264e378 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -70,6 +70,7 @@ static const struct of_device_id of_match_clk_mt8192_mdp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mdp);
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
@@ -79,5 +80,5 @@ static struct platform_driver clk_mt8192_mdp_drv = {
.of_match_table = of_match_clk_mt8192_mdp,
},
};
-
-builtin_platform_driver(clk_mt8192_mdp_drv);
+module_platform_driver(clk_mt8192_mdp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index ec5b44ffa458..85f76a2bbac4 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt8192_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mfg);
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -49,5 +50,5 @@ static struct platform_driver clk_mt8192_mfg_drv = {
.of_match_table = of_match_clk_mt8192_mfg,
},
};
-
-builtin_platform_driver(clk_mt8192_mfg_drv);
+module_platform_driver(clk_mt8192_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index 1be3ff4d407d..47335d517714 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -80,29 +80,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM2(CLK_MM_32KHZ, "mm_32khz", "clk32k", 25),
};
-static int clk_mt8192_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
- if (r)
- return r;
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct platform_device_id clk_mt8192_mm_id_table[] = {
+ { .name = "clk-mt8192-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8192_mm_id_table);
static struct platform_driver clk_mt8192_mm_drv = {
- .probe = clk_mt8192_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8192-mm",
},
+ .id_table = clk_mt8192_mm_id_table,
};
-
-builtin_platform_driver(clk_mt8192_mm_drv);
+module_platform_driver(clk_mt8192_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index a72e1b73fce8..60d65f96d39a 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -52,6 +52,7 @@ static const struct of_device_id of_match_clk_mt8192_msdc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_msdc);
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
@@ -61,5 +62,5 @@ static struct platform_driver clk_mt8192_msdc_drv = {
.of_match_table = of_match_clk_mt8192_msdc,
},
};
-
-builtin_platform_driver(clk_mt8192_msdc_drv);
+module_platform_driver(clk_mt8192_msdc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index 18a8679108b8..6aad57797c39 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -38,6 +38,7 @@ static const struct of_device_id of_match_clk_mt8192_scp_adsp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_scp_adsp);
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
@@ -47,5 +48,5 @@ static struct platform_driver clk_mt8192_scp_adsp_drv = {
.of_match_table = of_match_clk_mt8192_scp_adsp,
},
};
-
-builtin_platform_driver(clk_mt8192_scp_adsp_drv);
+module_platform_driver(clk_mt8192_scp_adsp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index e149962dbbf9..473afd58495c 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -82,6 +82,7 @@ static const struct of_device_id of_match_clk_mt8192_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec);
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -91,5 +92,5 @@ static struct platform_driver clk_mt8192_vdec_drv = {
.of_match_table = of_match_clk_mt8192_vdec,
},
};
-
-builtin_platform_driver(clk_mt8192_vdec_drv);
+module_platform_driver(clk_mt8192_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index 80b8bb170996..57b1b16e2310 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -41,6 +41,7 @@ static const struct of_device_id of_match_clk_mt8192_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_venc);
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -50,5 +51,5 @@ static struct platform_driver clk_mt8192_venc_drv = {
.of_match_table = of_match_clk_mt8192_venc,
},
};
-
-builtin_platform_driver(clk_mt8192_venc_drv);
+module_platform_driver(clk_mt8192_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index 0e88588b2c49..aa11291463f7 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -15,7 +15,6 @@
#include "clk-gate.h"
#include "clk-mtk.h"
#include "clk-mux.h"
-#include "clk-pll.h"
#include <dt-bindings/clock/mt8192-clk.h>
#include <dt-bindings/reset/mt8192-resets.h>
@@ -26,10 +25,6 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_ULPOSC, "ulposc", NULL, 260000000),
};
-static const struct mtk_fixed_factor top_early_divs[] = {
- FACTOR(CLK_TOP_CSW_F26M_D2, "csw_f26m_d2", "clk26m", 1, 2),
-};
-
static const struct mtk_fixed_factor top_divs[] = {
FACTOR_FLAGS(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3, 0),
FACTOR_FLAGS(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4, 0),
@@ -95,6 +90,7 @@ static const struct mtk_fixed_factor top_divs[] = {
FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
FACTOR(CLK_TOP_OSC_D16, "osc_d16", "ulposc", 1, 16),
FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_CSW_F26M_D2, "csw_f26m_d2", "clk26m", 1, 2),
FACTOR(CLK_TOP_ADSPPLL, "adsppll_ck", "adsppll", 1, 1),
FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13, 0),
FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M_D2, "univpll_192m_d2", "univpll_192m", 1, 2, 0),
@@ -701,9 +697,7 @@ static struct mtk_composite top_muxes[] = {
MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s_m_parents, 0x320, 23, 1),
MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s_m_parents, 0x320, 24, 1),
MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s_m_parents, 0x320, 25, 1),
-};
-
-static const struct mtk_composite top_adj_divs[] = {
+ /* APLL_DIV */
DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_m_sel", 0x320, 0, 0x328, 8, 0),
DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_m_sel", 0x320, 1, 0x328, 8, 8),
DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_m_sel", 0x320, 2, 0x328, 8, 16),
@@ -717,19 +711,6 @@ static const struct mtk_composite top_adj_divs[] = {
DIV_GATE(CLK_TOP_APLL12_DIV9, "apll12_div9", "apll_i2s9_m_sel", 0x320, 10, 0x338, 8, 16),
};
-static const struct mtk_gate_regs apmixed_cg_regs = {
- .set_ofs = 0x14,
- .clr_ofs = 0x14,
- .sta_ofs = 0x14,
-};
-
-#define GATE_APMIXED(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-
-static const struct mtk_gate apmixed_clks[] = {
- GATE_APMIXED(CLK_APMIXED_MIPID26M, "mipid26m", "clk26m", 16),
-};
-
static const struct mtk_gate_regs infra0_cg_regs = {
.set_ofs = 0x80,
.clr_ofs = 0x84,
@@ -983,91 +964,6 @@ static const struct mtk_clk_rst_desc clk_rst_desc = {
.rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
};
-#define MT8192_PLL_FMAX (3800UL * MHZ)
-#define MT8192_PLL_FMIN (1500UL * MHZ)
-#define MT8192_INTEGER_BITS 8
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
- _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
- _pcw_reg, _pcw_shift, _pcw_chg_reg, \
- _en_reg, _pll_en_bit) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = _rst_bar_mask, \
- .fmax = MT8192_PLL_FMAX, \
- .fmin = MT8192_PLL_FMIN, \
- .pcwbits = _pcwbits, \
- .pcwibits = MT8192_INTEGER_BITS, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .tuner_en_reg = _tuner_en_reg, \
- .tuner_en_bit = _tuner_en_bit, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .pcw_chg_reg = _pcw_chg_reg, \
- .en_reg = _en_reg, \
- .pll_en_bit = _pll_en_bit, \
- }
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
- _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
- _pcw_reg, _pcw_shift) \
- PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
- _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
- _pcw_reg, _pcw_shift, 0, 0, 0)
-
-static const struct mtk_pll_data plls[] = {
- PLL_B(CLK_APMIXED_MAINPLL, "mainpll", 0x0340, 0x034c, 0xff000000,
- HAVE_RST_BAR, BIT(23), 22, 0x0344, 24, 0, 0, 0, 0x0344, 0),
- PLL_B(CLK_APMIXED_UNIVPLL, "univpll", 0x0308, 0x0314, 0xff000000,
- HAVE_RST_BAR, BIT(23), 22, 0x030c, 24, 0, 0, 0, 0x030c, 0),
- PLL(CLK_APMIXED_USBPLL, "usbpll", 0x03c4, 0x03cc, 0x00000000,
- 0, 0, 22, 0x03c4, 24, 0, 0, 0, 0x03c4, 0, 0x03c4, 0x03cc, 2),
- PLL_B(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035c, 0x00000000,
- 0, 0, 22, 0x0354, 24, 0, 0, 0, 0x0354, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0360, 0x036c, 0xff000000,
- HAVE_RST_BAR, BIT(23), 22, 0x0364, 24, 0, 0, 0, 0x0364, 0),
- PLL_B(CLK_APMIXED_ADSPPLL, "adsppll", 0x0370, 0x037c, 0xff000000,
- HAVE_RST_BAR, BIT(23), 22, 0x0374, 24, 0, 0, 0, 0x0374, 0),
- PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0268, 0x0274, 0x00000000,
- 0, 0, 22, 0x026c, 24, 0, 0, 0, 0x026c, 0),
- PLL_B(CLK_APMIXED_TVDPLL, "tvdpll", 0x0380, 0x038c, 0x00000000,
- 0, 0, 22, 0x0384, 24, 0, 0, 0, 0x0384, 0),
- PLL_B(CLK_APMIXED_APLL1, "apll1", 0x0318, 0x0328, 0x00000000,
- 0, 0, 32, 0x031c, 24, 0x0040, 0x000c, 0, 0x0320, 0),
- PLL_B(CLK_APMIXED_APLL2, "apll2", 0x032c, 0x033c, 0x00000000,
- 0, 0, 32, 0x0330, 24, 0, 0, 0, 0x0334, 0),
-};
-
-static struct clk_hw_onecell_data *top_clk_data;
-
-static void clk_mt8192_top_init_early(struct device_node *node)
-{
- int i;
-
- top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- if (!top_clk_data)
- return;
-
- for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
-
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
-
- of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
-}
-
-CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen",
- clk_mt8192_top_init_early);
-
/* Register mux notifier for MFG mux */
static int clk_mt8192_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
{
@@ -1090,164 +986,48 @@ static int clk_mt8192_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
}
-static int clk_mt8192_top_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- int r;
- void __iomem *base;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
- mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node, &mt8192_clk_lock,
- top_clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8192_clk_lock,
- top_clk_data);
- mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8192_clk_lock,
- top_clk_data);
- r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
- if (r)
- return r;
-
- r = clk_mt8192_reg_mfg_mux_notifier(&pdev->dev,
- top_clk_data->hws[CLK_TOP_MFG_PLL_SEL]->clk);
- if (r)
- return r;
-
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
- top_clk_data);
-}
-
-static int clk_mt8192_infra_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
- if (r)
- goto free_clk_data;
-
- r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
- if (r)
- goto free_clk_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto free_clk_data;
-
- return r;
-
-free_clk_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8192_peri_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
- if (r)
- goto free_clk_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto free_clk_data;
-
- return r;
-
-free_clk_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
- if (r)
- goto free_clk_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto free_clk_data;
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .rst_desc = &clk_rst_desc,
+};
- return r;
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_clks,
+ .num_clks = ARRAY_SIZE(peri_clks),
+};
-free_clk_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_mtk_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_mtk_muxes),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .clk_lock = &mt8192_clk_lock,
+ .clk_notifier_func = clk_mt8192_reg_mfg_mux_notifier,
+ .mfg_clk_idx = CLK_TOP_MFG_PLL_SEL,
+};
static const struct of_device_id of_match_clk_mt8192[] = {
- {
- .compatible = "mediatek,mt8192-apmixedsys",
- .data = clk_mt8192_apmixed_probe,
- }, {
- .compatible = "mediatek,mt8192-topckgen",
- .data = clk_mt8192_top_probe,
- }, {
- .compatible = "mediatek,mt8192-infracfg",
- .data = clk_mt8192_infra_probe,
- }, {
- .compatible = "mediatek,mt8192-pericfg",
- .data = clk_mt8192_peri_probe,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt8192_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *pdev);
- int r;
-
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- return -EINVAL;
-
- r = clk_probe(pdev);
- if (r)
- dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", pdev->name, r);
-
- return r;
-}
+ { .compatible = "mediatek,mt8192-infracfg", .data = &infra_desc },
+ { .compatible = "mediatek,mt8192-pericfg", .data = &peri_desc },
+ { .compatible = "mediatek,mt8192-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8192);
static struct platform_driver clk_mt8192_drv = {
- .probe = clk_mt8192_probe,
.driver = {
.name = "clk-mt8192",
.of_match_table = of_match_clk_mt8192,
},
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
-
-static int __init clk_mt8192_init(void)
-{
- return platform_driver_register(&clk_mt8192_drv);
-}
-
-arch_initcall(clk_mt8192_init);
+module_platform_driver(clk_mt8192_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index 0dfed6ec4d15..8b9b5d820286 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -3,9 +3,11 @@
// Copyright (c) 2021 MediaTek Inc.
// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+#include "clk-fhctl.h"
#include "clk-gate.h"
#include "clk-mtk.h"
#include "clk-pll.h"
+#include "clk-pllfh.h"
#include <dt-bindings/clock/mt8195-clk.h>
#include <linux/of_device.h>
@@ -105,26 +107,87 @@ static const struct mtk_pll_data plls[] = {
0, 0, 22, 0x0158, 24, 0, 0, 0, 0x0158, 0, 0x0158, 0, 9),
};
+enum fh_pll_id {
+ FH_ARMPLL_LL,
+ FH_ARMPLL_BL,
+ FH_MEMPLL,
+ FH_ADSPPLL,
+ FH_NNAPLL,
+ FH_CCIPLL,
+ FH_MFGPLL,
+ FH_TVDPLL2,
+ FH_MPLL,
+ FH_MMPLL,
+ FH_MAINPLL,
+ FH_MSDCPLL,
+ FH_IMGPLL,
+ FH_VDECPLL,
+ FH_TVDPLL1,
+ FH_NR_FH,
+};
+
+#define FH(_pllid, _fhid, _offset) { \
+ .data = { \
+ .pll_id = _pllid, \
+ .fh_id = _fhid, \
+ .fh_ver = FHCTL_PLLFH_V2, \
+ .fhx_offset = _offset, \
+ .dds_mask = GENMASK(21, 0), \
+ .slope0_value = 0x6003c97, \
+ .slope1_value = 0x6003c97, \
+ .sfstrx_en = BIT(2), \
+ .frddsx_en = BIT(1), \
+ .fhctlx_en = BIT(0), \
+ .tgl_org = BIT(31), \
+ .dvfs_tri = BIT(31), \
+ .pcwchg = BIT(31), \
+ .dt_val = 0x0, \
+ .df_val = 0x9, \
+ .updnlmt_shft = 16, \
+ .msk_frddsx_dys = GENMASK(23, 20), \
+ .msk_frddsx_dts = GENMASK(19, 16), \
+ }, \
+ }
+
+static struct mtk_pllfh_data pllfhs[] = {
+ FH(CLK_APMIXED_ADSPPLL, FH_ADSPPLL, 0x78),
+ FH(CLK_APMIXED_NNAPLL, FH_NNAPLL, 0x8c),
+ FH(CLK_APMIXED_MFGPLL, FH_MFGPLL, 0xb4),
+ FH(CLK_APMIXED_TVDPLL2, FH_TVDPLL2, 0xc8),
+ FH(CLK_APMIXED_MMPLL, FH_MMPLL, 0xf0),
+ FH(CLK_APMIXED_MAINPLL, FH_MAINPLL, 0x104),
+ FH(CLK_APMIXED_MSDCPLL, FH_MSDCPLL, 0x118),
+ FH(CLK_APMIXED_IMGPLL, FH_IMGPLL, 0x12c),
+ FH(CLK_APMIXED_VDECPLL, FH_VDECPLL, 0x140),
+ FH(CLK_APMIXED_TVDPLL2, FH_TVDPLL1, 0x154),
+};
+
static const struct of_device_id of_match_clk_mt8195_apmixed[] = {
{ .compatible = "mediatek,mt8195-apmixedsys", },
{}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apmixed);
static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
+ const u8 *fhctl_node = "mediatek,mt8195-fhctl";
int r;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ fhctl_parse_dt(fhctl_node, pllfhs, ARRAY_SIZE(pllfhs));
+
+ r = mtk_clk_register_pllfhs(node, plls, ARRAY_SIZE(plls),
+ pllfhs, ARRAY_SIZE(pllfhs), clk_data);
if (r)
goto free_apmixed_data;
- r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
if (r)
goto unregister_plls;
@@ -139,7 +202,8 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
unregister_gates:
mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
unregister_plls:
- mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
free_apmixed_data:
mtk_free_clk_data(clk_data);
return r;
@@ -152,7 +216,8 @@ static int clk_mt8195_apmixed_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
- mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_clk_unregister_pllfhs(plls, ARRAY_SIZE(plls), pllfhs,
+ ARRAY_SIZE(pllfhs), clk_data);
mtk_free_clk_data(clk_data);
return 0;
@@ -166,4 +231,5 @@ static struct platform_driver clk_mt8195_apmixed_drv = {
.of_match_table = of_match_clk_mt8195_apmixed,
},
};
-builtin_platform_driver(clk_mt8195_apmixed_drv);
+module_platform_driver(clk_mt8195_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
index 0b52f6a009c4..de04c087c8c3 100644
--- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -101,6 +101,7 @@ static const struct of_device_id of_match_clk_mt8195_apusys_pll[] = {
{ .compatible = "mediatek,mt8195-apusys_pll", },
{}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apusys_pll);
static struct platform_driver clk_mt8195_apusys_pll_drv = {
.probe = clk_mt8195_apusys_pll_probe,
@@ -110,4 +111,5 @@ static struct platform_driver clk_mt8195_apusys_pll_drv = {
.of_match_table = of_match_clk_mt8195_apusys_pll,
},
};
-builtin_platform_driver(clk_mt8195_apusys_pll_drv);
+module_platform_driver(clk_mt8195_apusys_pll_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
index e4d00fe6e757..77e608be579a 100644
--- a/drivers/clk/mediatek/clk-mt8195-cam.c
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -131,6 +131,7 @@ static const struct of_device_id of_match_clk_mt8195_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_cam);
static struct platform_driver clk_mt8195_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -140,4 +141,5 @@ static struct platform_driver clk_mt8195_cam_drv = {
.of_match_table = of_match_clk_mt8195_cam,
},
};
-builtin_platform_driver(clk_mt8195_cam_drv);
+module_platform_driver(clk_mt8195_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
index 4e326b6301ba..bdc2e6f3e9ce 100644
--- a/drivers/clk/mediatek/clk-mt8195-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8195_ccu[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ccu);
static struct platform_driver clk_mt8195_ccu_drv = {
.probe = mtk_clk_simple_probe,
@@ -48,4 +49,5 @@ static struct platform_driver clk_mt8195_ccu_drv = {
.of_match_table = of_match_clk_mt8195_ccu,
},
};
-builtin_platform_driver(clk_mt8195_ccu_drv);
+module_platform_driver(clk_mt8195_ccu_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
index 12f5c436d075..d853e0e63d87 100644
--- a/drivers/clk/mediatek/clk-mt8195-img.c
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -85,6 +85,7 @@ static const struct of_device_id of_match_clk_mt8195_img[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_img);
static struct platform_driver clk_mt8195_img_drv = {
.probe = mtk_clk_simple_probe,
@@ -94,4 +95,5 @@ static struct platform_driver clk_mt8195_img_drv = {
.of_match_table = of_match_clk_mt8195_img,
},
};
-builtin_platform_driver(clk_mt8195_img_drv);
+module_platform_driver(clk_mt8195_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
index fbc809d05072..1d808876f5c5 100644
--- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -55,6 +55,7 @@ static const struct of_device_id of_match_clk_mt8195_imp_iic_wrap[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_imp_iic_wrap);
static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
@@ -64,4 +65,5 @@ static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
.of_match_table = of_match_clk_mt8195_imp_iic_wrap,
},
};
-builtin_platform_driver(clk_mt8195_imp_iic_wrap_drv);
+module_platform_driver(clk_mt8195_imp_iic_wrap_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index fcd410461d3b..f3ee4390707d 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -229,6 +229,7 @@ static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_infra_ao);
static struct platform_driver clk_mt8195_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
@@ -238,4 +239,5 @@ static struct platform_driver clk_mt8195_infra_ao_drv = {
.of_match_table = of_match_clk_mt8195_infra_ao,
},
};
-builtin_platform_driver(clk_mt8195_infra_ao_drv);
+module_platform_driver(clk_mt8195_infra_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
index b0d745cf7752..4c47f6521275 100644
--- a/drivers/clk/mediatek/clk-mt8195-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -40,6 +40,7 @@ static const struct of_device_id of_match_clk_mt8195_ipe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ipe);
static struct platform_driver clk_mt8195_ipe_drv = {
.probe = mtk_clk_simple_probe,
@@ -49,4 +50,5 @@ static struct platform_driver clk_mt8195_ipe_drv = {
.of_match_table = of_match_clk_mt8195_ipe,
},
};
-builtin_platform_driver(clk_mt8195_ipe_drv);
+module_platform_driver(clk_mt8195_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index c94cb71bd9b9..038acf0b1167 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -38,6 +38,7 @@ static const struct of_device_id of_match_clk_mt8195_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_mfg);
static struct platform_driver clk_mt8195_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -47,4 +48,5 @@ static struct platform_driver clk_mt8195_mfg_drv = {
.of_match_table = of_match_clk_mt8195_mfg,
},
};
-builtin_platform_driver(clk_mt8195_mfg_drv);
+module_platform_driver(clk_mt8195_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
index 2f6b3bb657db..0de162593c01 100644
--- a/drivers/clk/mediatek/clk-mt8195-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -51,6 +51,7 @@ static const struct of_device_id of_match_clk_mt8195_peri_ao[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_peri_ao);
static struct platform_driver clk_mt8195_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
@@ -60,4 +61,5 @@ static struct platform_driver clk_mt8195_peri_ao_drv = {
.of_match_table = of_match_clk_mt8195_peri_ao,
},
};
-builtin_platform_driver(clk_mt8195_peri_ao_drv);
+module_platform_driver(clk_mt8195_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
index e16c383f631b..d0d3e3b09780 100644
--- a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -36,6 +36,7 @@ static const struct of_device_id of_match_clk_mt8195_scp_adsp[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_scp_adsp);
static struct platform_driver clk_mt8195_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
@@ -45,4 +46,5 @@ static struct platform_driver clk_mt8195_scp_adsp_drv = {
.of_match_table = of_match_clk_mt8195_scp_adsp,
},
};
-builtin_platform_driver(clk_mt8195_scp_adsp_drv);
+module_platform_driver(clk_mt8195_scp_adsp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 93e96419da66..3c2174c3e742 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1219,6 +1219,7 @@ static const struct of_device_id of_match_clk_mt8195_topck[] = {
{ .compatible = "mediatek,mt8195-topckgen", },
{}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_topck);
/* Register mux notifier for MFG mux */
static int clk_mt8195_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
@@ -1262,7 +1263,8 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_fixed_clks;
- r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
+ ARRAY_SIZE(top_mtk_muxes), node,
&mt8195_clk_lock, top_clk_data);
if (r)
goto unregister_factors;
@@ -1281,12 +1283,14 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_muxes;
- r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
+ ARRAY_SIZE(top_adj_divs), base,
&mt8195_clk_lock, top_clk_data);
if (r)
goto unregister_muxes;
- r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
if (r)
goto unregister_composite_divs;
@@ -1337,4 +1341,5 @@ static struct platform_driver clk_mt8195_topck_drv = {
.of_match_table = of_match_clk_mt8195_topck,
},
};
-builtin_platform_driver(clk_mt8195_topck_drv);
+module_platform_driver(clk_mt8195_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
index a1446b666385..2bcbceb10326 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -93,6 +93,7 @@ static const struct of_device_id of_match_clk_mt8195_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_vdec);
static struct platform_driver clk_mt8195_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -102,4 +103,5 @@ static struct platform_driver clk_mt8195_vdec_drv = {
.of_match_table = of_match_clk_mt8195_vdec,
},
};
-builtin_platform_driver(clk_mt8195_vdec_drv);
+module_platform_driver(clk_mt8195_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 07b46bfd5040..509780750e43 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -93,54 +93,24 @@ static const struct mtk_gate vdo0_clks[] = {
"top_edp", 16, CLK_SET_RATE_PARENT),
};
-static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VDO0_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
- if (r)
- goto free_vdo0_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto unregister_gates;
-
- platform_set_drvdata(pdev, clk_data);
-
- return r;
-
-unregister_gates:
- mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
-free_vdo0_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8195_vdo0_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
-
- of_clk_del_provider(node);
- mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc vdo0_desc = {
+ .clks = vdo0_clks,
+ .num_clks = ARRAY_SIZE(vdo0_clks),
+};
- return 0;
-}
+static const struct platform_device_id clk_mt8195_vdo0_id_table[] = {
+ { .name = "clk-mt8195-vdo0", .driver_data = (kernel_ulong_t)&vdo0_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo0_id_table);
static struct platform_driver clk_mt8195_vdo0_drv = {
- .probe = clk_mt8195_vdo0_probe,
- .remove = clk_mt8195_vdo0_remove,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo0",
},
+ .id_table = clk_mt8195_vdo0_id_table,
};
-builtin_platform_driver(clk_mt8195_vdo0_drv);
+module_platform_driver(clk_mt8195_vdo0_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 835335b9d87b..0a5214a1ed25 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -120,54 +120,24 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_4(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
-static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_VDO1_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- r = mtk_clk_register_gates(node, vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
- if (r)
- goto free_vdo1_data;
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- goto unregister_gates;
-
- platform_set_drvdata(pdev, clk_data);
-
- return r;
-
-unregister_gates:
- mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
-free_vdo1_data:
- mtk_free_clk_data(clk_data);
- return r;
-}
-
-static int clk_mt8195_vdo1_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
-
- of_clk_del_provider(node);
- mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc vdo1_desc = {
+ .clks = vdo1_clks,
+ .num_clks = ARRAY_SIZE(vdo1_clks),
+};
- return 0;
-}
+static const struct platform_device_id clk_mt8195_vdo1_id_table[] = {
+ { .name = "clk-mt8195-vdo1", .driver_data = (kernel_ulong_t)&vdo1_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo1_id_table);
static struct platform_driver clk_mt8195_vdo1_drv = {
- .probe = clk_mt8195_vdo1_probe,
- .remove = clk_mt8195_vdo1_remove,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo1",
},
+ .id_table = clk_mt8195_vdo1_id_table,
};
-builtin_platform_driver(clk_mt8195_vdo1_drv);
+module_platform_driver(clk_mt8195_vdo1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
index 622f57804f96..0991a6968765 100644
--- a/drivers/clk/mediatek/clk-mt8195-venc.c
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -58,6 +58,7 @@ static const struct of_device_id of_match_clk_mt8195_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_venc);
static struct platform_driver clk_mt8195_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -67,4 +68,5 @@ static struct platform_driver clk_mt8195_venc_drv = {
.of_match_table = of_match_clk_mt8195_venc,
},
};
-builtin_platform_driver(clk_mt8195_venc_drv);
+module_platform_driver(clk_mt8195_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
index bf2939c3a023..1a98fb9a25e8 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -91,21 +91,19 @@ static const struct mtk_clk_desc vpp0_desc = {
.num_clks = ARRAY_SIZE(vpp0_clks),
};
-static const struct of_device_id of_match_clk_mt8195_vpp0[] = {
- {
- .compatible = "mediatek,mt8195-vppsys0",
- .data = &vpp0_desc,
- }, {
- /* sentinel */
- }
+static const struct platform_device_id clk_mt8195_vpp0_id_table[] = {
+ { .name = "clk-mt8195-vpp0", .driver_data = (kernel_ulong_t)&vpp0_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp0_id_table);
static struct platform_driver clk_mt8195_vpp0_drv = {
- .probe = mtk_clk_simple_probe,
- .remove = mtk_clk_simple_remove,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp0",
- .of_match_table = of_match_clk_mt8195_vpp0,
},
+ .id_table = clk_mt8195_vpp0_id_table,
};
-builtin_platform_driver(clk_mt8195_vpp0_drv);
+module_platform_driver(clk_mt8195_vpp0_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
index ffd52c762890..c2d5b582f53a 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -89,21 +89,19 @@ static const struct mtk_clk_desc vpp1_desc = {
.num_clks = ARRAY_SIZE(vpp1_clks),
};
-static const struct of_device_id of_match_clk_mt8195_vpp1[] = {
- {
- .compatible = "mediatek,mt8195-vppsys1",
- .data = &vpp1_desc,
- }, {
- /* sentinel */
- }
+static const struct platform_device_id clk_mt8195_vpp1_id_table[] = {
+ { .name = "clk-mt8195-vpp1", .driver_data = (kernel_ulong_t)&vpp1_desc },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp1_id_table);
static struct platform_driver clk_mt8195_vpp1_drv = {
- .probe = mtk_clk_simple_probe,
- .remove = mtk_clk_simple_remove,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp1",
- .of_match_table = of_match_clk_mt8195_vpp1,
},
+ .id_table = clk_mt8195_vpp1_id_table,
};
-builtin_platform_driver(clk_mt8195_vpp1_drv);
+module_platform_driver(clk_mt8195_vpp1_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
index b483fab10e18..289896cb2f6c 100644
--- a/drivers/clk/mediatek/clk-mt8195-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -132,6 +132,7 @@ static const struct of_device_id of_match_clk_mt8195_wpe[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_wpe);
static struct platform_driver clk_mt8195_wpe_drv = {
.probe = mtk_clk_simple_probe,
@@ -141,4 +142,5 @@ static struct platform_driver clk_mt8195_wpe_drv = {
.of_match_table = of_match_clk_mt8195_wpe,
},
};
-builtin_platform_driver(clk_mt8195_wpe_drv);
+module_platform_driver(clk_mt8195_wpe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-apmixedsys.c b/drivers/clk/mediatek/clk-mt8365-apmixedsys.c
new file mode 100644
index 000000000000..9b0bc5daeac0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-apmixedsys.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Copyright (c) 2023 Collabora Ltd.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-pll.h"
+#include "clk-mtk.h"
+
+#define MT8365_PLL_FMAX (3800UL * MHZ)
+#define MT8365_PLL_FMIN (1500UL * MHZ)
+#define CON0_MT8365_RST_BAR BIT(23)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table, \
+ _rst_bar_mask, _pcw_chg_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8365_PLL_FMAX, \
+ .fmin = MT8365_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = 8, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, _rst_bar_mask, _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, NULL, _rst_bar_mask, \
+ _pcw_chg_reg) \
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 182500000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table dsppll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 600 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL, "armpll", 0x030C, 0x0318, 0x00000001, PLL_AO,
+ 22, 0x0310, 24, 0, 0, 0, 0x0310, 0, armpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0228, 0x0234, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x022C, 24, 0, 0, 0, 0x022C, 0, CON0_MT8365_RST_BAR, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll2", 0x0208, 0x0214, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x020C, 24, 0, 0, 0, 0x020C, 0, CON0_MT8365_RST_BAR, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0218, 0x0224, 0x00000001, 0, 22,
+ 0x021C, 24, 0, 0, 0, 0x021C, 0, mfgpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035C, 0x00000001, 0, 22,
+ 0x0354, 24, 0, 0, 0, 0x0354, 0, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0330, 0x033C, 0x00000001, 0, 22,
+ 0x0334, 24, 0, 0, 0, 0x0334, 0, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x031C, 0x032C, 0x00000001, 0, 32,
+ 0x0320, 24, 0x0040, 0x000C, 0, 0x0324, 0, 0, 0x0320),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0360, 0x0370, 0x00000001, 0, 32,
+ 0x0364, 24, 0x004C, 0x000C, 5, 0x0368, 0, 0, 0x0364),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0374, 0x0380, 0x00000001, 0, 22,
+ 0x0378, 24, 0, 0, 0, 0x0378, 0, 0, 0),
+ PLL_B(CLK_APMIXED_DSPPLL, "dsppll", 0x0390, 0x039C, 0x00000001, 0, 22,
+ 0x0394, 24, 0, 0, 0, 0x0394, 0, dsppll_div_table, 0, 0),
+ PLL(CLK_APMIXED_APUPLL, "apupll", 0x03A0, 0x03AC, 0x00000001, 0, 22,
+ 0x03A4, 24, 0, 0, 0, 0x03A4, 0, 0, 0),
+};
+
+static int clk_mt8365_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *hw;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ hw = devm_clk_hw_register_gate(dev, "univ_en", "univpll2", 0,
+ base + 0x204, 0, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_UNIV_EN] = hw;
+
+ hw = devm_clk_hw_register_gate(dev, "usb20_en", "univ_en", 0,
+ base + 0x204, 1, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_USB20_EN] = hw;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8365_apmixed[] = {
+ { .compatible = "mediatek,mt8365-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apmixed);
+
+static struct platform_driver clk_mt8365_apmixed_drv = {
+ .probe = clk_mt8365_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8365-apmixed",
+ .of_match_table = of_match_clk_mt8365_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8365_apmixed_drv)
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
index 91ffe89d9721..74f7fb22c87f 100644
--- a/drivers/clk/mediatek/clk-mt8365-apu.c
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -42,6 +42,7 @@ static const struct of_device_id of_match_clk_mt8365_apu[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apu);
static struct platform_driver clk_mt8365_apu_drv = {
.probe = mtk_clk_simple_probe,
@@ -51,5 +52,5 @@ static struct platform_driver clk_mt8365_apu_drv = {
.of_match_table = of_match_clk_mt8365_apu,
},
};
-builtin_platform_driver(clk_mt8365_apu_drv);
+module_platform_driver(clk_mt8365_apu_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
index 31d5b5cd6de1..61516e19acd1 100644
--- a/drivers/clk/mediatek/clk-mt8365-cam.c
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -44,6 +44,7 @@ static const struct of_device_id of_match_clk_mt8365_cam[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_cam);
static struct platform_driver clk_mt8365_cam_drv = {
.probe = mtk_clk_simple_probe,
@@ -53,5 +54,5 @@ static struct platform_driver clk_mt8365_cam_drv = {
.of_match_table = of_match_clk_mt8365_cam,
},
};
-builtin_platform_driver(clk_mt8365_cam_drv);
+module_platform_driver(clk_mt8365_cam_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
index 587b49128b03..4c836c69db4f 100644
--- a/drivers/clk/mediatek/clk-mt8365-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -50,6 +50,7 @@ static const struct of_device_id of_match_clk_mt8365_mfg[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg);
static struct platform_driver clk_mt8365_mfg_drv = {
.probe = mtk_clk_simple_probe,
@@ -59,5 +60,5 @@ static struct platform_driver clk_mt8365_mfg_drv = {
.of_match_table = of_match_clk_mt8365_mfg,
},
};
-builtin_platform_driver(clk_mt8365_mfg_drv);
+module_platform_driver(clk_mt8365_mfg_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 5c8bf18ab1f1..44427120846f 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -72,41 +72,24 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_LVDSTX_CTS, "mm_flvdstx_cts", "lvdstx_dig_cts", 3),
};
-static int clk_mt8365_mm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
- struct clk_hw_onecell_data *clk_data;
- int ret;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- ret = mtk_clk_register_gates_with_dev(node, mm_clks,
- ARRAY_SIZE(mm_clks), clk_data,
- dev);
- if (ret)
- goto err_free_clk_data;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto err_unregister_gates;
-
- return 0;
-
-err_unregister_gates:
- mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
-
-err_free_clk_data:
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
- return ret;
-}
+static const struct platform_device_id clk_mt8365_mm_id_table[] = {
+ { .name = "clk-mt8365-mm", .driver_data = (kernel_ulong_t)&mm_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8365_mm_id_table);
static struct platform_driver clk_mt8365_mm_drv = {
- .probe = clk_mt8365_mm_probe,
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8365-mm",
},
+ .id_table = clk_mt8365_mm_id_table,
};
-builtin_platform_driver(clk_mt8365_mm_drv);
+module_platform_driver(clk_mt8365_mm_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
index cdc678e8941c..b51571e9da00 100644
--- a/drivers/clk/mediatek/clk-mt8365-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -50,6 +50,7 @@ static const struct of_device_id of_match_clk_mt8365_vdec[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_vdec);
static struct platform_driver clk_mt8365_vdec_drv = {
.probe = mtk_clk_simple_probe,
@@ -59,5 +60,5 @@ static struct platform_driver clk_mt8365_vdec_drv = {
.of_match_table = of_match_clk_mt8365_vdec,
},
};
-builtin_platform_driver(clk_mt8365_vdec_drv);
+module_platform_driver(clk_mt8365_vdec_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
index 0e080c22119d..572344645c86 100644
--- a/drivers/clk/mediatek/clk-mt8365-venc.c
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8365_venc[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_venc);
static struct platform_driver clk_mt8365_venc_drv = {
.probe = mtk_clk_simple_probe,
@@ -48,5 +49,5 @@ static struct platform_driver clk_mt8365_venc_drv = {
.of_match_table = of_match_clk_mt8365_venc,
},
};
-builtin_platform_driver(clk_mt8365_venc_drv);
+module_platform_driver(clk_mt8365_venc_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index adfecb618f10..6b4e193f648d 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 MediaTek Inc.
+ * Copyright (C) 2023 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
*/
#include <dt-bindings/clock/mediatek,mt8365-clk.h>
@@ -17,7 +19,6 @@
#include "clk-gate.h"
#include "clk-mtk.h"
#include "clk-mux.h"
-#include "clk-pll.h"
static DEFINE_SPINLOCK(mt8365_clk_lock);
@@ -383,31 +384,23 @@ static const char * const mbist_diag_parents[] = {
"univpll2_d8"
};
-static const char * const apll_i2s0_parents[] = {
+static const char * const apll_i2s_parents[] = {
"aud_1_sel",
"aud_2_sel"
};
-static struct mtk_composite top_misc_mux_gates[] = {
+static struct mtk_composite top_misc_muxes[] = {
/* CLK_CFG_11 */
MUX_GATE(CLK_TOP_MBIST_DIAG_SEL, "mbist_diag_sel", mbist_diag_parents,
0x0ec, 0, 2, 7),
-};
-
-struct mt8365_clk_audio_mux {
- int id;
- const char *name;
- u8 shift;
-};
-
-static struct mt8365_clk_audio_mux top_misc_muxes[] = {
- { CLK_TOP_APLL_I2S0_SEL, "apll_i2s0_sel", 11},
- { CLK_TOP_APLL_I2S1_SEL, "apll_i2s1_sel", 12},
- { CLK_TOP_APLL_I2S2_SEL, "apll_i2s2_sel", 13},
- { CLK_TOP_APLL_I2S3_SEL, "apll_i2s3_sel", 14},
- { CLK_TOP_APLL_TDMOUT_SEL, "apll_tdmout_sel", 15},
- { CLK_TOP_APLL_TDMIN_SEL, "apll_tdmin_sel", 16},
- { CLK_TOP_APLL_SPDIF_SEL, "apll_spdif_sel", 17},
+ /* Audio MUX */
+ MUX(CLK_TOP_APLL_I2S0_SEL, "apll_i2s0_sel", apll_i2s_parents, 0x0320, 11, 1),
+ MUX(CLK_TOP_APLL_I2S1_SEL, "apll_i2s1_sel", apll_i2s_parents, 0x0320, 12, 1),
+ MUX(CLK_TOP_APLL_I2S2_SEL, "apll_i2s2_sel", apll_i2s_parents, 0x0320, 13, 1),
+ MUX(CLK_TOP_APLL_I2S3_SEL, "apll_i2s3_sel", apll_i2s_parents, 0x0320, 14, 1),
+ MUX(CLK_TOP_APLL_TDMOUT_SEL, "apll_tdmout_sel", apll_i2s_parents, 0x0320, 15, 1),
+ MUX(CLK_TOP_APLL_TDMIN_SEL, "apll_tdmin_sel", apll_i2s_parents, 0x0320, 16, 1),
+ MUX(CLK_TOP_APLL_SPDIF_SEL, "apll_spdif_sel", apll_i2s_parents, 0x0320, 17, 1),
};
#define CLK_CFG_UPDATE 0x004
@@ -570,35 +563,56 @@ static const struct mtk_clk_divider top_adj_divs[] = {
0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
};
-struct mtk_simple_gate {
- int id;
- const char *name;
- const char *parent;
- u32 reg;
- u8 shift;
- unsigned long gate_flags;
-};
-
-static const struct mtk_simple_gate top_clk_gates[] = {
- { CLK_TOP_CONN_32K, "conn_32k", "clk32k", 0x0, 10, CLK_GATE_SET_TO_DISABLE },
- { CLK_TOP_CONN_26M, "conn_26m", "clk26m", 0x0, 11, CLK_GATE_SET_TO_DISABLE },
- { CLK_TOP_DSP_32K, "dsp_32k", "clk32k", 0x0, 16, CLK_GATE_SET_TO_DISABLE },
- { CLK_TOP_DSP_26M, "dsp_26m", "clk26m", 0x0, 17, CLK_GATE_SET_TO_DISABLE },
- { CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_192m_d4", 0x104, 8, 0 },
- { CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "usb20_192m_d4", 0x104, 9, 0 },
- { CLK_TOP_LVDSTX_CLKDIG_EN, "lvdstx_dig_en", "lvdstx_dig_cts", 0x104, 20, 0 },
- { CLK_TOP_VPLL_DPIX_EN, "vpll_dpix_en", "vpll_dpix", 0x104, 21, 0 },
- { CLK_TOP_SSUSB_TOP_CK_EN, "ssusb_top_ck_en", NULL, 0x104, 22, 0 },
- { CLK_TOP_SSUSB_PHY_CK_EN, "ssusb_phy_ck_en", NULL, 0x104, 23, 0 },
- { CLK_TOP_AUD_I2S0_M, "aud_i2s0_m_ck", "apll12_ck_div0", 0x320, 0, 0 },
- { CLK_TOP_AUD_I2S1_M, "aud_i2s1_m_ck", "apll12_ck_div1", 0x320, 1, 0 },
- { CLK_TOP_AUD_I2S2_M, "aud_i2s2_m_ck", "apll12_ck_div2", 0x320, 2, 0 },
- { CLK_TOP_AUD_I2S3_M, "aud_i2s3_m_ck", "apll12_ck_div3", 0x320, 3, 0 },
- { CLK_TOP_AUD_TDMOUT_M, "aud_tdmout_m_ck", "apll12_ck_div4", 0x320, 4, 0 },
- { CLK_TOP_AUD_TDMOUT_B, "aud_tdmout_b_ck", "apll12_ck_div4b", 0x320, 5, 0 },
- { CLK_TOP_AUD_TDMIN_M, "aud_tdmin_m_ck", "apll12_ck_div5", 0x320, 6, 0 },
- { CLK_TOP_AUD_TDMIN_B, "aud_tdmin_b_ck", "apll12_ck_div5b", 0x320, 7, 0 },
- { CLK_TOP_AUD_SPDIF_M, "aud_spdif_m_ck", "apll12_ck_div6", 0x320, 8, 0 },
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0,
+ .clr_ofs = 0,
+ .sta_ofs = 0,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x104,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x320,
+ .clr_ofs = 0x320,
+ .sta_ofs = 0x320,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate top_clk_gates[] = {
+ GATE_TOP0(CLK_TOP_CONN_32K, "conn_32k", "clk32k", 10),
+ GATE_TOP0(CLK_TOP_CONN_26M, "conn_26m", "clk26m", 11),
+ GATE_TOP0(CLK_TOP_DSP_32K, "dsp_32k", "clk32k", 16),
+ GATE_TOP0(CLK_TOP_DSP_26M, "dsp_26m", "clk26m", 17),
+ GATE_TOP1(CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_192m_d4", 8),
+ GATE_TOP1(CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "usb20_192m_d4", 9),
+ GATE_TOP1(CLK_TOP_LVDSTX_CLKDIG_EN, "lvdstx_dig_en", "lvdstx_dig_cts", 20),
+ GATE_TOP1(CLK_TOP_VPLL_DPIX_EN, "vpll_dpix_en", "vpll_dpix", 21),
+ GATE_TOP1(CLK_TOP_SSUSB_TOP_CK_EN, "ssusb_top_ck_en", NULL, 22),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_CK_EN, "ssusb_phy_ck_en", NULL, 23),
+ GATE_TOP2(CLK_TOP_AUD_I2S0_M, "aud_i2s0_m_ck", "apll12_ck_div0", 0),
+ GATE_TOP2(CLK_TOP_AUD_I2S1_M, "aud_i2s1_m_ck", "apll12_ck_div1", 1),
+ GATE_TOP2(CLK_TOP_AUD_I2S2_M, "aud_i2s2_m_ck", "apll12_ck_div2", 2),
+ GATE_TOP2(CLK_TOP_AUD_I2S3_M, "aud_i2s3_m_ck", "apll12_ck_div3", 3),
+ GATE_TOP2(CLK_TOP_AUD_TDMOUT_M, "aud_tdmout_m_ck", "apll12_ck_div4", 4),
+ GATE_TOP2(CLK_TOP_AUD_TDMOUT_B, "aud_tdmout_b_ck", "apll12_ck_div4b", 5),
+ GATE_TOP2(CLK_TOP_AUD_TDMIN_M, "aud_tdmin_m_ck", "apll12_ck_div5", 6),
+ GATE_TOP2(CLK_TOP_AUD_TDMIN_B, "aud_tdmin_b_ck", "apll12_ck_div5b", 7),
+ GATE_TOP2(CLK_TOP_AUD_SPDIF_M, "aud_spdif_m_ck", "apll12_ck_div6", 8),
};
static const struct mtk_gate_regs ifr2_cg_regs = {
@@ -631,50 +645,24 @@ static const struct mtk_gate_regs ifr6_cg_regs = {
.sta_ofs = 0xd8,
};
-#define GATE_IFR2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_IFR3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_IFR4(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_IFR5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_IFR6(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr6_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFRX(_id, _name, _parent, _shift, _regs) \
+ GATE_MTK(_id, _name, _parent, _regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_IFR2(_id, _name, _parent, _shift) \
+ GATE_IFRX(_id, _name, _parent, _shift, &ifr2_cg_regs)
+
+#define GATE_IFR3(_id, _name, _parent, _shift) \
+ GATE_IFRX(_id, _name, _parent, _shift, &ifr3_cg_regs)
+
+#define GATE_IFR4(_id, _name, _parent, _shift) \
+ GATE_IFRX(_id, _name, _parent, _shift, &ifr4_cg_regs)
+
+#define GATE_IFR5(_id, _name, _parent, _shift) \
+ GATE_IFRX(_id, _name, _parent, _shift, &ifr5_cg_regs)
+
+#define GATE_IFR6(_id, _name, _parent, _shift) \
+ GATE_IFRX(_id, _name, _parent, _shift, &ifr6_cg_regs)
static const struct mtk_gate ifr_clks[] = {
/* IFR2 */
@@ -753,403 +741,65 @@ static const struct mtk_gate ifr_clks[] = {
GATE_IFR6(CLK_IFR_SSUSB_XHCI, "ifr_ssusb_xhci", "ssusb_xhci_sel", 11),
};
-static const struct mtk_simple_gate peri_clks[] = {
- { CLK_PERIAXI, "periaxi", "axi_sel", 0x20c, 31, 0 },
-};
-
-#define MT8365_PLL_FMAX (3800UL * MHZ)
-#define MT8365_PLL_FMIN (1500UL * MHZ)
-#define CON0_MT8365_RST_BAR BIT(23)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
- _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table, \
- _rst_bar_mask, _pcw_chg_reg) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = _rst_bar_mask, \
- .fmax = MT8365_PLL_FMAX, \
- .fmin = MT8365_PLL_FMIN, \
- .pcwbits = _pcwbits, \
- .pcwibits = 8, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .tuner_en_reg = _tuner_en_reg, \
- .tuner_en_bit = _tuner_en_bit, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .pcw_chg_reg = _pcw_chg_reg, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, \
- _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
- _pcw_shift, _rst_bar_mask, _pcw_chg_reg) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
- _pcwbits, _pd_reg, _pd_shift, \
- _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
- _pcw_reg, _pcw_shift, NULL, _rst_bar_mask, \
- _pcw_chg_reg) \
-
-static const struct mtk_pll_div_table armpll_div_table[] = {
- { .div = 0, .freq = MT8365_PLL_FMAX },
- { .div = 1, .freq = 1500 * MHZ },
- { .div = 2, .freq = 750 * MHZ },
- { .div = 3, .freq = 375 * MHZ },
- { .div = 4, .freq = 182500000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_div_table mfgpll_div_table[] = {
- { .div = 0, .freq = MT8365_PLL_FMAX },
- { .div = 1, .freq = 1600 * MHZ },
- { .div = 2, .freq = 800 * MHZ },
- { .div = 3, .freq = 400 * MHZ },
- { .div = 4, .freq = 200 * MHZ },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_div_table dsppll_div_table[] = {
- { .div = 0, .freq = MT8365_PLL_FMAX },
- { .div = 1, .freq = 1600 * MHZ },
- { .div = 2, .freq = 600 * MHZ },
- { .div = 3, .freq = 400 * MHZ },
- { .div = 4, .freq = 200 * MHZ },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_data plls[] = {
- PLL_B(CLK_APMIXED_ARMPLL, "armpll", 0x030C, 0x0318, 0x00000001, PLL_AO,
- 22, 0x0310, 24, 0, 0, 0, 0x0310, 0, armpll_div_table, 0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0228, 0x0234, 0xFF000001,
- HAVE_RST_BAR, 22, 0x022C, 24, 0, 0, 0, 0x022C, 0,
- CON0_MT8365_RST_BAR, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll2", 0x0208, 0x0214, 0xFF000001,
- HAVE_RST_BAR, 22, 0x020C, 24, 0, 0, 0, 0x020C, 0,
- CON0_MT8365_RST_BAR, 0),
- PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0218, 0x0224, 0x00000001, 0, 22,
- 0x021C, 24, 0, 0, 0, 0x021C, 0, mfgpll_div_table, 0, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035C, 0x00000001, 0, 22,
- 0x0354, 24, 0, 0, 0, 0x0354, 0, 0, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0330, 0x033C, 0x00000001, 0, 22,
- 0x0334, 24, 0, 0, 0, 0x0334, 0, 0, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x031C, 0x032C, 0x00000001, 0, 32,
- 0x0320, 24, 0x0040, 0x000C, 0, 0x0324, 0, 0, 0x0320),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x0360, 0x0370, 0x00000001, 0, 32,
- 0x0364, 24, 0x004C, 0x000C, 5, 0x0368, 0, 0, 0x0364),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0374, 0x0380, 0x00000001, 0, 22,
- 0x0378, 24, 0, 0, 0, 0x0378, 0, 0, 0),
- PLL_B(CLK_APMIXED_DSPPLL, "dsppll", 0x0390, 0x039C, 0x00000001, 0, 22,
- 0x0394, 24, 0, 0, 0, 0x0394, 0, dsppll_div_table, 0, 0),
- PLL(CLK_APMIXED_APUPLL, "apupll", 0x03A0, 0x03AC, 0x00000001, 0, 22,
- 0x03A4, 24, 0, 0, 0, 0x03A4, 0, 0, 0),
-};
-
-static int clk_mt8365_apmixed_probe(struct platform_device *pdev)
-{
- void __iomem *base;
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct clk_hw *hw;
- int ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- hw = devm_clk_hw_register_gate(dev, "univ_en", "univpll2", 0,
- base + 0x204, 0, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
- clk_data->hws[CLK_APMIXED_UNIV_EN] = hw;
-
- hw = devm_clk_hw_register_gate(dev, "usb20_en", "univ_en", 0,
- base + 0x204, 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
- clk_data->hws[CLK_APMIXED_USB20_EN] = hw;
-
- ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_plls;
-
- return 0;
-
-unregister_plls:
- mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
-
- return ret;
-}
-
-static int
-clk_mt8365_register_mtk_simple_gates(struct device *dev, void __iomem *base,
- struct clk_hw_onecell_data *clk_data,
- const struct mtk_simple_gate *gates,
- unsigned int num_gates)
-{
- unsigned int i;
-
- for (i = 0; i != num_gates; ++i) {
- const struct mtk_simple_gate *gate = &gates[i];
- struct clk_hw *hw;
-
- hw = devm_clk_hw_register_gate(dev, gate->name, gate->parent, 0,
- base + gate->reg, gate->shift,
- gate->gate_flags, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- clk_data->hws[gate->id] = hw;
- }
-
- return 0;
-}
-
-static int clk_mt8365_top_probe(struct platform_device *pdev)
-{
- void __iomem *base;
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- int ret;
- int i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- ret = mtk_clk_register_fixed_clks(top_fixed_clks,
- ARRAY_SIZE(top_fixed_clks), clk_data);
- if (ret)
- goto free_clk_data;
-
- ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
- clk_data);
- if (ret)
- goto unregister_fixed_clks;
-
- ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
- &mt8365_clk_lock, clk_data);
- if (ret)
- goto unregister_factors;
-
- ret = mtk_clk_register_composites(top_misc_mux_gates,
- ARRAY_SIZE(top_misc_mux_gates), base,
- &mt8365_clk_lock, clk_data);
- if (ret)
- goto unregister_muxes;
-
- for (i = 0; i != ARRAY_SIZE(top_misc_muxes); ++i) {
- struct mt8365_clk_audio_mux *mux = &top_misc_muxes[i];
- struct clk_hw *hw;
-
- hw = devm_clk_hw_register_mux(dev, mux->name, apll_i2s0_parents,
- ARRAY_SIZE(apll_i2s0_parents),
- CLK_SET_RATE_PARENT, base + 0x320,
- mux->shift, 1, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
- goto unregister_composites;
- }
-
- clk_data->hws[mux->id] = hw;
- }
-
- ret = mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
- base, &mt8365_clk_lock, clk_data);
- if (ret)
- goto unregister_composites;
-
- ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
- top_clk_gates,
- ARRAY_SIZE(top_clk_gates));
- if (ret)
- goto unregister_dividers;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_dividers;
-
- return 0;
-unregister_dividers:
- mtk_clk_unregister_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
- clk_data);
-unregister_composites:
- mtk_clk_unregister_composites(top_misc_mux_gates,
- ARRAY_SIZE(top_misc_mux_gates), clk_data);
-unregister_muxes:
- mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
-unregister_factors:
- mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
-unregister_fixed_clks:
- mtk_clk_unregister_fixed_clks(top_fixed_clks,
- ARRAY_SIZE(top_fixed_clks), clk_data);
-free_clk_data:
- mtk_free_clk_data(clk_data);
-
- return ret;
-}
-
-static int clk_mt8365_infra_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int ret;
-
- clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- ret = mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
- clk_data);
- if (ret)
- goto free_clk_data;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_gates;
-
- return 0;
-
-unregister_gates:
- mtk_clk_unregister_gates(ifr_clks, ARRAY_SIZE(ifr_clks), clk_data);
-free_clk_data:
- mtk_free_clk_data(clk_data);
-
- return ret;
-}
-
-static int clk_mt8365_peri_probe(struct platform_device *pdev)
-{
- void __iomem *base;
- struct clk_hw_onecell_data *clk_data;
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- int ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_devm_alloc_clk_data(dev, CLK_PERI_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
-
- ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
- peri_clks,
- ARRAY_SIZE(peri_clks));
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- return ret;
-}
-
-static int clk_mt8365_mcu_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- void __iomem *base;
- int ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
- if (!clk_data)
- return -ENOMEM;
+static const struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = 0x20c,
+ .clr_ofs = 0x20c,
+ .sta_ofs = 0x20c,
+};
- ret = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
- base, &mt8365_clk_lock, clk_data);
- if (ret)
- goto free_clk_data;
+static const struct mtk_gate peri_clks[] = {
+ GATE_MTK(CLK_PERIAXI, "periaxi", "axi_sel", &peri_cg_regs, 31,
+ &mtk_clk_gate_ops_no_setclr),
+};
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (ret)
- goto unregister_composites;
+static const struct mtk_clk_desc topck_desc = {
+ .clks = top_clk_gates,
+ .num_clks = ARRAY_SIZE(top_clk_gates),
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_misc_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_misc_muxes),
+ .divider_clks = top_adj_divs,
+ .num_divider_clks = ARRAY_SIZE(top_adj_divs),
+ .clk_lock = &mt8365_clk_lock,
+};
- return 0;
+static const struct mtk_clk_desc infra_desc = {
+ .clks = ifr_clks,
+ .num_clks = ARRAY_SIZE(ifr_clks),
+};
-unregister_composites:
- mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
- clk_data);
-free_clk_data:
- mtk_free_clk_data(clk_data);
+static const struct mtk_clk_desc peri_desc = {
+ .clks = peri_clks,
+ .num_clks = ARRAY_SIZE(peri_clks),
+};
- return ret;
-}
+static const struct mtk_clk_desc mcu_desc = {
+ .composite_clks = mcu_muxes,
+ .num_composite_clks = ARRAY_SIZE(mcu_muxes),
+ .clk_lock = &mt8365_clk_lock,
+};
static const struct of_device_id of_match_clk_mt8365[] = {
- {
- .compatible = "mediatek,mt8365-apmixedsys",
- .data = clk_mt8365_apmixed_probe,
- }, {
- .compatible = "mediatek,mt8365-topckgen",
- .data = clk_mt8365_top_probe,
- }, {
- .compatible = "mediatek,mt8365-infracfg",
- .data = clk_mt8365_infra_probe,
- }, {
- .compatible = "mediatek,mt8365-pericfg",
- .data = clk_mt8365_peri_probe,
- }, {
- .compatible = "mediatek,mt8365-mcucfg",
- .data = clk_mt8365_mcu_probe,
- }, {
- /* sentinel */
- }
-};
-
-static int clk_mt8365_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *pdev);
- int ret;
-
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- return -EINVAL;
-
- ret = clk_probe(pdev);
- if (ret)
- dev_err(&pdev->dev,
- "%s: could not register clock provider: %d\n",
- pdev->name, ret);
-
- return ret;
-}
+ { .compatible = "mediatek,mt8365-topckgen", .data = &topck_desc },
+ { .compatible = "mediatek,mt8365-infracfg", .data = &infra_desc },
+ { .compatible = "mediatek,mt8365-pericfg", .data = &peri_desc },
+ { .compatible = "mediatek,mt8365-mcucfg", .data = &mcu_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8365);
static struct platform_driver clk_mt8365_drv = {
- .probe = clk_mt8365_probe,
.driver = {
.name = "clk-mt8365",
.of_match_table = of_match_clk_mt8365,
},
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
};
-
-static int __init clk_mt8365_init(void)
-{
- return platform_driver_register(&clk_mt8365_drv);
-}
-arch_initcall(clk_mt8365_init);
+module_platform_driver(clk_mt8365_drv);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8516-apmixedsys.c b/drivers/clk/mediatek/clk-mt8516-apmixedsys.c
new file mode 100644
index 000000000000..edd9174d2f2f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8516-apmixedsys.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ *
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mt8516-clk.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MT8516_PLL_FMAX (1502UL * MHZ)
+
+#define CON0_MT8516_RST_BAR BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8516_RST_BAR, \
+ .fmax = MT8516_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8516_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 604500000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
+ 21, 0x0104, 24, 0, 0x0104, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
+ HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
+ HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
+ 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
+ 31, 0x0180, 1, 0x0194, 0x0184, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
+ 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+};
+
+static int clk_mt8516_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8516_apmixed[] = {
+ { .compatible = "mediatek,mt8516-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_apmixed);
+
+static struct platform_driver clk_mt8516_apmixed_drv = {
+ .probe = clk_mt8516_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8516-apmixed",
+ .of_match_table = of_match_clk_mt8516_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8516_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8516 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 90f48068a8de..48340fc7430d 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019 MediaTek Inc.
* Author: James Liao <jamesjj.liao@mediatek.com>
* Fabien Parent <fparent@baylibre.com>
+ * Copyright (c) 2023 Collabora Ltd.
*/
#include <linux/clk-provider.h>
@@ -22,16 +23,10 @@ static const struct mtk_gate_regs aud_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
-static const struct mtk_gate aud_clks[] __initconst = {
+static const struct mtk_gate aud_clks[] = {
GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6),
GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8),
@@ -47,19 +42,26 @@ static const struct mtk_gate aud_clks[] __initconst = {
GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27),
};
-static void __init mtk_audsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+static const struct mtk_clk_desc aud_desc = {
+ .clks = aud_clks,
+ .num_clks = ARRAY_SIZE(aud_clks),
+};
- mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+static const struct of_device_id of_match_clk_mt8516_aud[] = {
+ { .compatible = "mediatek,mt8516-audsys", .data = &aud_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_aud);
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static struct platform_driver clk_mt8516_aud_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8516-aud",
+ .of_match_table = of_match_clk_mt8516_aud,
+ },
+};
+module_platform_driver(clk_mt8516_aud_drv);
-}
-CLK_OF_DECLARE(mtk_audsys, "mediatek,mt8516-audsys", mtk_audsys_init);
+MODULE_DESCRIPTION("MediaTek MT8516 audiosys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index b96db88893e2..21eb052b0a53 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019 MediaTek Inc.
* Author: James Liao <jamesjj.liao@mediatek.com>
* Fabien Parent <fparent@baylibre.com>
+ * Copyright (c) 2023 Collabora Ltd.
*/
#include <linux/delay.h>
@@ -10,10 +11,10 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include "clk-gate.h"
#include "clk-mtk.h"
-#include "clk-pll.h"
#include <dt-bindings/clock/mt8516-clk.h>
@@ -525,59 +526,23 @@ static const struct mtk_gate_regs top5_cg_regs = {
.sta_ofs = 0x44,
};
-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_TOP3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
-
-#define GATE_TOP5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_TOP3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top4_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_TOP5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate top_clks[] __initconst = {
/* TOP1 */
@@ -675,137 +640,42 @@ static const struct mtk_gate top_clks[] __initconst = {
GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
};
-static void __init mtk_topckgen_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
- mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
- clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
-
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8516_clk_lock, clk_data);
- mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
- base, &mt8516_clk_lock, clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init);
-
-static void __init mtk_infracfg_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- void __iomem *base;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
+static const struct mtk_clk_desc topck_desc = {
+ .clks = top_clks,
+ .num_clks = ARRAY_SIZE(top_clks),
+ .fixed_clks = fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .composite_clks = top_muxes,
+ .num_composite_clks = ARRAY_SIZE(top_muxes),
+ .divider_clks = top_adj_divs,
+ .num_divider_clks = ARRAY_SIZE(top_adj_divs),
+ .clk_lock = &mt8516_clk_lock,
+};
- clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+static const struct mtk_clk_desc infra_desc = {
+ .composite_clks = ifr_muxes,
+ .num_composite_clks = ARRAY_SIZE(ifr_muxes),
+ .clk_lock = &mt8516_clk_lock,
+};
- mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
- &mt8516_clk_lock, clk_data);
+static const struct of_device_id of_match_clk_mt8516[] = {
+ { .compatible = "mediatek,mt8516-topckgen", .data = &topck_desc },
+ { .compatible = "mediatek,mt8516-infracfg", .data = &infra_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8516);
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8516-infracfg", mtk_infracfg_init);
-
-#define MT8516_PLL_FMAX (1502UL * MHZ)
-
-#define CON0_MT8516_RST_BAR BIT(27)
-
-#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift, _div_table) { \
- .id = _id, \
- .name = _name, \
- .reg = _reg, \
- .pwr_reg = _pwr_reg, \
- .en_mask = _en_mask, \
- .flags = _flags, \
- .rst_bar_mask = CON0_MT8516_RST_BAR, \
- .fmax = MT8516_PLL_FMAX, \
- .pcwbits = _pcwbits, \
- .pd_reg = _pd_reg, \
- .pd_shift = _pd_shift, \
- .tuner_reg = _tuner_reg, \
- .pcw_reg = _pcw_reg, \
- .pcw_shift = _pcw_shift, \
- .div_table = _div_table, \
- }
-
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
- _pcw_shift) \
- PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
- _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
- NULL)
-
-static const struct mtk_pll_div_table mmpll_div_table[] = {
- { .div = 0, .freq = MT8516_PLL_FMAX },
- { .div = 1, .freq = 1000000000 },
- { .div = 2, .freq = 604500000 },
- { .div = 3, .freq = 253500000 },
- { .div = 4, .freq = 126750000 },
- { } /* sentinel */
-};
-
-static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
- 21, 0x0104, 24, 0, 0x0104, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
- HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
- HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
- 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
- 31, 0x0180, 1, 0x0194, 0x0184, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
- 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
-};
-
-static void __init mtk_apmixedsys_init(struct device_node *node)
-{
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base;
- int r;
-
- base = of_iomap(node, 0);
- if (!base) {
- pr_err("%s(): ioremap failed\n", __func__);
- return;
- }
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+static struct platform_driver clk_mt8516_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8516",
+ .of_match_table = of_match_clk_mt8516,
+ },
+};
+module_platform_driver(clk_mt8516_drv);
-}
-CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8516-apmixedsys",
- mtk_apmixedsys_init);
+MODULE_DESCRIPTION("MediaTek MT8516 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 3c1ac8d3010f..fd2214c3242f 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -11,12 +11,29 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "clk-mtk.h"
#include "clk-gate.h"
+#include "clk-mux.h"
+
+const struct mtk_gate_regs cg_regs_dummy = { 0, 0, 0 };
+EXPORT_SYMBOL_GPL(cg_regs_dummy);
+
+static int mtk_clk_dummy_enable(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void mtk_clk_dummy_disable(struct clk_hw *hw) { }
+
+const struct clk_ops mtk_clk_dummy_ops = {
+ .enable = mtk_clk_dummy_enable,
+ .disable = mtk_clk_dummy_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_dummy_ops);
static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
unsigned int clk_num)
@@ -197,8 +214,8 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors);
-static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
- void __iomem *base, spinlock_t *lock)
+static struct clk_hw *mtk_clk_register_composite(struct device *dev,
+ const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock)
{
struct clk_hw *hw;
struct clk_mux *mux = NULL;
@@ -264,7 +281,7 @@ static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
div_ops = &clk_divider_ops;
}
- hw = clk_hw_register_composite(NULL, mc->name, parent_names, num_parents,
+ hw = clk_hw_register_composite(dev, mc->name, parent_names, num_parents,
mux_hw, mux_ops,
div_hw, div_ops,
gate_hw, gate_ops,
@@ -308,7 +325,8 @@ static void mtk_clk_unregister_composite(struct clk_hw *hw)
kfree(mux);
}
-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+int mtk_clk_register_composites(struct device *dev,
+ const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data)
{
@@ -327,7 +345,7 @@ int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
continue;
}
- hw = mtk_clk_register_composite(mc, base, lock);
+ hw = mtk_clk_register_composite(dev, mc, base, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mc->name,
@@ -375,7 +393,8 @@ void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_composites);
-int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
+int mtk_clk_register_dividers(struct device *dev,
+ const struct mtk_clk_divider *mcds, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data)
{
@@ -394,7 +413,7 @@ int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
continue;
}
- hw = clk_hw_register_divider(NULL, mcd->name, mcd->parent_name,
+ hw = clk_hw_register_divider(dev, mcd->name, mcd->parent_name,
mcd->flags, base + mcd->div_reg, mcd->div_shift,
mcd->div_width, mcd->clk_divider_flags, lock);
@@ -444,25 +463,101 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_dividers);
-int mtk_clk_simple_probe(struct platform_device *pdev)
+static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ struct device_node *node)
{
+ const struct platform_device_id *id;
const struct mtk_clk_desc *mcd;
struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
- int r;
+ void __iomem *base;
+ int num_clks, r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd) {
+ /* Clock driver wasn't registered from devicetree */
+ id = platform_get_device_id(pdev);
+ if (id)
+ mcd = (const struct mtk_clk_desc *)id->driver_data;
+
+ if (!mcd)
+ return -EINVAL;
+ }
- mcd = of_device_get_match_data(&pdev->dev);
- if (!mcd)
- return -EINVAL;
+ /* Composite clocks needs us to pass iomem pointer */
+ if (mcd->composite_clks) {
+ if (!mcd->shared_io)
+ base = devm_platform_ioremap_resource(pdev, 0);
+ else
+ base = of_iomap(node, 0);
+
+ if (IS_ERR_OR_NULL(base))
+ return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
+ }
- clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ /* Calculate how many clk_hw_onecell_data entries to allocate */
+ num_clks = mcd->num_clks + mcd->num_composite_clks;
+ num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
+ num_clks += mcd->num_mux_clks + mcd->num_divider_clks;
+
+ clk_data = mtk_alloc_clk_data(num_clks);
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates_with_dev(node, mcd->clks, mcd->num_clks,
- clk_data, &pdev->dev);
- if (r)
- goto free_data;
+ if (mcd->fixed_clks) {
+ r = mtk_clk_register_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
+ if (r)
+ goto free_data;
+ }
+
+ if (mcd->factor_clks) {
+ r = mtk_clk_register_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+ if (r)
+ goto unregister_fixed_clks;
+ }
+
+ if (mcd->mux_clks) {
+ r = mtk_clk_register_muxes(&pdev->dev, mcd->mux_clks,
+ mcd->num_mux_clks, node,
+ mcd->clk_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+ }
+
+ if (mcd->composite_clks) {
+ /* We don't check composite_lock because it's optional */
+ r = mtk_clk_register_composites(&pdev->dev,
+ mcd->composite_clks,
+ mcd->num_composite_clks,
+ base, mcd->clk_lock, clk_data);
+ if (r)
+ goto unregister_muxes;
+ }
+
+ if (mcd->divider_clks) {
+ r = mtk_clk_register_dividers(&pdev->dev,
+ mcd->divider_clks,
+ mcd->num_divider_clks,
+ base, mcd->clk_lock, clk_data);
+ if (r)
+ goto unregister_composites;
+ }
+
+ if (mcd->clks) {
+ r = mtk_clk_register_gates(&pdev->dev, node, mcd->clks,
+ mcd->num_clks, clk_data);
+ if (r)
+ goto unregister_dividers;
+ }
+
+ if (mcd->clk_notifier_func) {
+ struct clk *mfg_mux = clk_data->hws[mcd->mfg_clk_idx]->clk;
+
+ r = mcd->clk_notifier_func(&pdev->dev, mfg_mux);
+ if (r)
+ goto unregister_clks;
+ }
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -480,25 +575,94 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
return r;
unregister_clks:
- mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->clks)
+ mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+unregister_dividers:
+ if (mcd->divider_clks)
+ mtk_clk_unregister_dividers(mcd->divider_clks,
+ mcd->num_divider_clks, clk_data);
+unregister_composites:
+ if (mcd->composite_clks)
+ mtk_clk_unregister_composites(mcd->composite_clks,
+ mcd->num_composite_clks, clk_data);
+unregister_muxes:
+ if (mcd->mux_clks)
+ mtk_clk_unregister_muxes(mcd->mux_clks,
+ mcd->num_mux_clks, clk_data);
+unregister_factors:
+ if (mcd->factor_clks)
+ mtk_clk_unregister_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+unregister_fixed_clks:
+ if (mcd->fixed_clks)
+ mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
free_data:
mtk_free_clk_data(clk_data);
+ if (mcd->shared_io && base)
+ iounmap(base);
return r;
}
-EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
-int mtk_clk_simple_remove(struct platform_device *pdev)
+static int __mtk_clk_simple_remove(struct platform_device *pdev,
+ struct device_node *node)
{
- const struct mtk_clk_desc *mcd = of_device_get_match_data(&pdev->dev);
struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
+ const struct mtk_clk_desc *mcd = device_get_match_data(&pdev->dev);
of_clk_del_provider(node);
- mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->clks)
+ mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->divider_clks)
+ mtk_clk_unregister_dividers(mcd->divider_clks,
+ mcd->num_divider_clks, clk_data);
+ if (mcd->composite_clks)
+ mtk_clk_unregister_composites(mcd->composite_clks,
+ mcd->num_composite_clks, clk_data);
+ if (mcd->mux_clks)
+ mtk_clk_unregister_muxes(mcd->mux_clks,
+ mcd->num_mux_clks, clk_data);
+ if (mcd->factor_clks)
+ mtk_clk_unregister_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+ if (mcd->fixed_clks)
+ mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
mtk_free_clk_data(clk_data);
return 0;
}
+
+int mtk_clk_pdev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+
+ return __mtk_clk_simple_probe(pdev, node);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_pdev_probe);
+
+int mtk_clk_simple_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+
+ return __mtk_clk_simple_probe(pdev, node);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
+
+int mtk_clk_pdev_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+
+ return __mtk_clk_simple_remove(pdev, node);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_pdev_remove);
+
+int mtk_clk_simple_remove(struct platform_device *pdev)
+{
+ return __mtk_clk_simple_remove(pdev, pdev->dev.of_node);
+}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f2db6b57d5b5..b7a751861fce 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -22,6 +22,25 @@
struct platform_device;
+/*
+ * We need the clock IDs to start from zero but to maintain devicetree
+ * backwards compatibility we can't change bindings to start from zero.
+ * Only a few platforms are affected, so we solve issues given by the
+ * commonized MTK clocks probe function(s) by adding a dummy clock at
+ * the beginning where needed.
+ */
+#define CLK_DUMMY 0
+
+extern const struct clk_ops mtk_clk_dummy_ops;
+extern const struct mtk_gate_regs cg_regs_dummy;
+
+#define GATE_DUMMY(_id, _name) { \
+ .id = _id, \
+ .name = _name, \
+ .regs = &cg_regs_dummy, \
+ .ops = &mtk_clk_dummy_ops, \
+ }
+
struct mtk_fixed_clk {
int id;
const char *name;
@@ -154,7 +173,8 @@ struct mtk_composite {
.flags = 0, \
}
-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+int mtk_clk_register_composites(struct device *dev,
+ const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
@@ -182,7 +202,8 @@ struct mtk_clk_divider {
.div_width = _width, \
}
-int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
+int mtk_clk_register_dividers(struct device *dev,
+ const struct mtk_clk_divider *mcds, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
@@ -200,9 +221,26 @@ void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw);
struct mtk_clk_desc {
const struct mtk_gate *clks;
size_t num_clks;
+ const struct mtk_composite *composite_clks;
+ size_t num_composite_clks;
+ const struct mtk_clk_divider *divider_clks;
+ size_t num_divider_clks;
+ const struct mtk_fixed_clk *fixed_clks;
+ size_t num_fixed_clks;
+ const struct mtk_fixed_factor *factor_clks;
+ size_t num_factor_clks;
+ const struct mtk_mux *mux_clks;
+ size_t num_mux_clks;
const struct mtk_clk_rst_desc *rst_desc;
+ spinlock_t *clk_lock;
+ bool shared_io;
+
+ int (*clk_notifier_func)(struct device *dev, struct clk *clk);
+ unsigned int mfg_clk_idx;
};
+int mtk_clk_pdev_probe(struct platform_device *pdev);
+int mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
int mtk_clk_simple_remove(struct platform_device *pdev);
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index ba1720b9e231..c8593554239d 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -154,9 +154,10 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
-static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
- struct regmap *regmap,
- spinlock_t *lock)
+static struct clk_hw *mtk_clk_register_mux(struct device *dev,
+ const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
struct clk_init_data init = {};
@@ -177,7 +178,7 @@ static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
clk_mux->lock = lock;
clk_mux->hw.init = &init;
- ret = clk_hw_register(NULL, &clk_mux->hw);
+ ret = clk_hw_register(dev, &clk_mux->hw);
if (ret) {
kfree(clk_mux);
return ERR_PTR(ret);
@@ -198,7 +199,8 @@ static void mtk_clk_unregister_mux(struct clk_hw *hw)
kfree(mux);
}
-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+int mtk_clk_register_muxes(struct device *dev,
+ const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
struct clk_hw_onecell_data *clk_data)
@@ -222,7 +224,7 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
continue;
}
- hw = mtk_clk_register_mux(mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 83ff420f4ebe..7ecb963b0ec6 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -83,7 +83,8 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+int mtk_clk_register_muxes(struct device *dev,
+ const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
struct clk_hw_onecell_data *clk_data);
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index f48780bec507..3a2b3f90be25 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -75,13 +75,13 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
base = of_iomap(node, 0);
if (!base) {
pr_err("%s(): ioremap failed\n", __func__);
- return;
+ goto out_node_put;
}
num_clocks = of_clk_get_parent_count(node);
if (!num_clocks) {
pr_err("%s(): failed to get clocks property\n", __func__);
- return;
+ goto err;
}
for (i = 0; i < num_clocks; i++) {
@@ -102,16 +102,26 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
pllfh->state.ssc_rate = ssc_rate;
pllfh->state.base = base;
}
+
+out_node_put:
+ of_node_put(node);
+ return;
+err:
+ iounmap(base);
+ goto out_node_put;
}
+EXPORT_SYMBOL_GPL(fhctl_parse_dt);
-static void pllfh_init(struct mtk_fh *fh, struct mtk_pllfh_data *pllfh_data)
+static int pllfh_init(struct mtk_fh *fh, struct mtk_pllfh_data *pllfh_data)
{
struct fh_pll_regs *regs = &fh->regs;
const struct fhctl_offset *offset;
void __iomem *base = pllfh_data->state.base;
void __iomem *fhx_base = base + pllfh_data->data.fhx_offset;
- offset = fhctl_get_offset_table();
+ offset = fhctl_get_offset_table(pllfh_data->data.fh_ver);
+ if (IS_ERR(offset))
+ return PTR_ERR(offset);
regs->reg_hp_en = base + offset->offset_hp_en;
regs->reg_clk_con = base + offset->offset_clk_con;
@@ -129,6 +139,8 @@ static void pllfh_init(struct mtk_fh *fh, struct mtk_pllfh_data *pllfh_data)
fh->lock = &pllfh_lock;
fh->ops = fhctl_get_ops();
+
+ return 0;
}
static bool fhctl_is_supported_and_enabled(const struct mtk_pllfh_data *pllfh)
@@ -142,20 +154,29 @@ mtk_clk_register_pllfh(const struct mtk_pll_data *pll_data,
{
struct clk_hw *hw;
struct mtk_fh *fh;
+ int ret;
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (!fh)
return ERR_PTR(-ENOMEM);
- pllfh_init(fh, pllfh_data);
+ ret = pllfh_init(fh, pllfh_data);
+ if (ret) {
+ hw = ERR_PTR(ret);
+ goto out;
+ }
hw = mtk_clk_register_pll_ops(&fh->clk_pll, pll_data, base,
&mtk_pllfh_ops);
if (IS_ERR(hw))
+ goto out;
+
+ fhctl_hw_init(fh);
+
+out:
+ if (IS_ERR(hw))
kfree(fh);
- else
- fhctl_hw_init(fh);
return hw;
}
@@ -234,6 +255,7 @@ err:
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_pllfhs);
void mtk_clk_unregister_pllfhs(const struct mtk_pll_data *plls, int num_plls,
struct mtk_pllfh_data *pllfhs, int num_fhs,
@@ -273,3 +295,4 @@ void mtk_clk_unregister_pllfhs(const struct mtk_pll_data *plls, int num_plls,
iounmap(base);
}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_pllfhs);
diff --git a/drivers/clk/mediatek/clk-pllfh.h b/drivers/clk/mediatek/clk-pllfh.h
index c0a6e1537034..5f419c2ec01f 100644
--- a/drivers/clk/mediatek/clk-pllfh.h
+++ b/drivers/clk/mediatek/clk-pllfh.h
@@ -18,6 +18,7 @@ struct fh_pll_state {
struct fh_pll_data {
int pll_id;
int fh_id;
+ int fh_ver;
u32 fhx_offset;
u32 dds_mask;
u32 slope0_value;
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index 36976927fe82..8778c149d26a 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -27,14 +27,13 @@ static unsigned long meson_clk_cpu_dyndiv_recalc_rate(struct clk_hw *hw,
NULL, 0, data->div.width);
}
-static long meson_clk_cpu_dyndiv_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int meson_clk_cpu_dyndiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_cpu_dyndiv_data *data = meson_clk_cpu_dyndiv_data(clk);
- return divider_round_rate(hw, rate, prate, NULL, data->div.width, 0);
+ return divider_determine_rate(hw, req, NULL, data->div.width, 0);
}
static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +62,7 @@ static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops meson_clk_cpu_dyndiv_ops = {
.recalc_rate = meson_clk_cpu_dyndiv_recalc_rate,
- .round_rate = meson_clk_cpu_dyndiv_round_rate,
+ .determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
};
EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops);
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index c5ca23a5e3e8..feae49a8f6dc 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -86,18 +86,23 @@ __dualdiv_get_setting(unsigned long rate, unsigned long parent_rate,
return (struct meson_clk_dualdiv_param *)&table[best_i];
}
-static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int meson_clk_dualdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
- const struct meson_clk_dualdiv_param *setting =
- __dualdiv_get_setting(rate, *parent_rate, dualdiv);
+ const struct meson_clk_dualdiv_param *setting;
- if (!setting)
- return meson_clk_dualdiv_recalc_rate(hw, *parent_rate);
+ setting = __dualdiv_get_setting(req->rate, req->best_parent_rate,
+ dualdiv);
+ if (setting)
+ req->rate = __dualdiv_param_to_rate(req->best_parent_rate,
+ setting);
+ else
+ req->rate = meson_clk_dualdiv_recalc_rate(hw,
+ req->best_parent_rate);
- return __dualdiv_param_to_rate(*parent_rate, setting);
+ return 0;
}
static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -122,7 +127,7 @@ static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops meson_clk_dualdiv_ops = {
.recalc_rate = meson_clk_dualdiv_recalc_rate,
- .round_rate = meson_clk_dualdiv_round_rate,
+ .determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
};
EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index fc9df4860872..20255e129b37 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -87,16 +87,22 @@ static unsigned long mpll_recalc_rate(struct clk_hw *hw,
return rate < 0 ? 0 : rate;
}
-static long mpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int mpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
unsigned int sdm, n2;
+ long rate;
+
+ params_from_rate(req->rate, req->best_parent_rate, &sdm, &n2,
+ mpll->flags);
- params_from_rate(rate, *parent_rate, &sdm, &n2, mpll->flags);
- return rate_from_params(*parent_rate, sdm, n2);
+ rate = rate_from_params(req->best_parent_rate, sdm, n2);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+ return 0;
}
static int mpll_set_rate(struct clk_hw *hw,
@@ -157,13 +163,13 @@ static int mpll_init(struct clk_hw *hw)
const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
- .round_rate = mpll_round_rate,
+ .determine_rate = mpll_determine_rate,
};
EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
- .round_rate = mpll_round_rate,
+ .determine_rate = mpll_determine_rate,
.set_rate = mpll_set_rate,
.init = mpll_init,
};
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 76d31c0a3342..d12c45c4c261 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -96,16 +96,17 @@ static int sclk_div_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long sclk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sclk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
int div;
- div = sclk_div_bestdiv(hw, rate, prate, sclk);
+ div = sclk_div_bestdiv(hw, req->rate, &req->best_parent_rate, sclk);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return 0;
}
static void sclk_apply_ratio(struct clk_regmap *clk,
@@ -237,7 +238,7 @@ static int sclk_div_init(struct clk_hw *hw)
const struct clk_ops meson_sclk_div_ops = {
.recalc_rate = sclk_div_recalc_rate,
- .round_rate = sclk_div_round_rate,
+ .determine_rate = sclk_div_determine_rate,
.set_rate = sclk_div_set_rate,
.enable = sclk_div_enable,
.disable = sclk_div_disable,
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 32aae880a14f..bce61c45e967 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
- char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
- snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
- char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
- if (!name)
+ pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+ strchrnul(dev->of_node->full_name, '@'), i);
+ if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
- snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
- pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);
@@ -293,4 +291,3 @@ module_exit(clk_ccc_exit);
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 4f0a19db7ed7..c8ffa755b58d 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -374,14 +374,13 @@ static void mpfs_reset_unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
static void mpfs_reset_adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
- auxiliary_device_uninit(adev);
-
kfree(adev);
}
@@ -513,4 +512,3 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index 7aa7f4a9564f..6fb1aa9487b5 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -384,12 +384,10 @@ disable_pm_runtime:
return ret;
}
-static int mmp2_audio_clk_remove(struct platform_device *pdev)
+static void mmp2_audio_clk_remove(struct platform_device *pdev)
{
pm_clk_destroy(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -436,7 +434,7 @@ static struct platform_driver mmp2_audio_clk_driver = {
.pm = &mmp2_audio_clk_pm_ops,
},
.probe = mmp2_audio_clk_probe,
- .remove = mmp2_audio_clk_remove,
+ .remove_new = mmp2_audio_clk_remove,
};
module_platform_driver(mmp2_audio_clk_driver);
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index e3777ca65912..3ae6078f6ff7 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -781,7 +781,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
return 0;
}
-static int armada_3700_periph_clock_remove(struct platform_device *pdev)
+static void armada_3700_periph_clock_remove(struct platform_device *pdev)
{
struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
struct clk_hw_onecell_data *hw_data = data->hw_data;
@@ -791,13 +791,11 @@ static int armada_3700_periph_clock_remove(struct platform_device *pdev)
for (i = 0; i < hw_data->num; i++)
clk_hw_unregister(hw_data->hws[i]);
-
- return 0;
}
static struct platform_driver armada_3700_periph_clock_driver = {
.probe = armada_3700_periph_clock_probe,
- .remove = armada_3700_periph_clock_remove,
+ .remove_new = armada_3700_periph_clock_remove,
.driver = {
.name = "marvell-armada-3700-periph-clock",
.of_match_table = armada_3700_periph_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index fc403ad735ad..eccc1aeefbaf 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -126,7 +126,7 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
}
-static int armada_3700_tbg_clock_remove(struct platform_device *pdev)
+static void armada_3700_tbg_clock_remove(struct platform_device *pdev)
{
int i;
struct clk_hw_onecell_data *hw_tbg_data = platform_get_drvdata(pdev);
@@ -134,8 +134,6 @@ static int armada_3700_tbg_clock_remove(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
for (i = 0; i < hw_tbg_data->num; i++)
clk_hw_unregister_fixed_factor(hw_tbg_data->hws[i]);
-
- return 0;
}
static const struct of_device_id armada_3700_tbg_clock_of_match[] = {
@@ -145,7 +143,7 @@ static const struct of_device_id armada_3700_tbg_clock_of_match[] = {
static struct platform_driver armada_3700_tbg_clock_driver = {
.probe = armada_3700_tbg_clock_probe,
- .remove = armada_3700_tbg_clock_remove,
+ .remove_new = armada_3700_tbg_clock_remove,
.driver = {
.name = "marvell-armada-3700-tbg-clock",
.of_match_table = armada_3700_tbg_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index 41271351cf1f..0e2e7d00ae11 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -65,11 +65,9 @@ static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
return ret;
}
-static int armada_3700_xtal_clock_remove(struct platform_device *pdev)
+static void armada_3700_xtal_clock_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
}
static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
@@ -79,7 +77,7 @@ static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
static struct platform_driver armada_3700_xtal_clock_driver = {
.probe = armada_3700_xtal_clock_probe,
- .remove = armada_3700_xtal_clock_remove,
+ .remove_new = armada_3700_xtal_clock_remove,
.driver = {
.name = "marvell-armada-3700-xtal-clock",
.of_match_table = armada_3700_xtal_clock_of_match,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 70d43f0a8919..12be3e2371b3 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -141,6 +141,14 @@ config IPQ_GCC_4019
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config IPQ_GCC_5332
+ tristate "IPQ5332 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the global clock controller on ipq5332 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
config IPQ_GCC_6018
tristate "IPQ6018 Global Clock Controller"
help
@@ -173,6 +181,14 @@ config IPQ_GCC_8074
i2c, USB, SD/eMMC, etc. Select this for the root clock
of ipq8074.
+config IPQ_GCC_9574
+ tristate "IPQ9574 Global Clock Controller"
+ help
+ Support for global clock controller on ipq9574 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq9574.
+
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
help
@@ -196,6 +212,16 @@ config MSM_GCC_8916
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
+config MSM_GCC_8917
+ tristate "MSM8917/QM215 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8917 and qm215
+ devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8939
tristate "MSM8939 Global Clock Controller"
select QCOM_GDSC
@@ -410,6 +436,24 @@ config SC_DISPCC_8280XP
Say Y if you want to support display devices and functionality such as
splash screen.
+config SA_GCC_8775P
+ tristate "SA8775 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SA8775 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
+config SA_GPUCC_8775P
+ tristate "SA8775P Graphics clock controller"
+ select QCOM_GDSC
+ select SA_GCC_8775P
+ help
+ Support for the graphics clock controller on SA8775P devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
@@ -569,6 +613,14 @@ config QCS_Q6SSTOP_404
Say Y if you want to use the Q6SSTOP branch clocks of the WCSS clock
controller to reset the Q6SSTOP subsystem.
+config QDU_GCC_1000
+ tristate "QDU1000/QRU1000 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QDU1000 and
+ QRU1000 devices. Say Y if you want to use peripheral
+ devices such as UART, SPI, I2C, USB, SD, PCIe, etc.
+
config SDM_GCC_845
tristate "SDM845/SDM670 Global Clock Controller"
select QCOM_GDSC
@@ -627,6 +679,13 @@ config SDX_GCC_65
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_CAMCC_6350
+ tristate "SM6350 Camera Clock Controller"
+ select SM_GCC_6350
+ help
+ Support for the camera clock controller on SM6350 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_8250
tristate "SM8250 Camera Clock Controller"
select SM_GCC_8250
@@ -695,6 +754,15 @@ config SM_DISPCC_8450
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8550
+ tristate "SM8550 Display Clock Controller"
+ depends on SM_GCC_8550
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8550 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_6115
tristate "SM6115 and SM4250 Global Clock Controller"
select QCOM_GDSC
@@ -726,6 +794,14 @@ config SM_GCC_6375
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS etc.
+config SM_GCC_7150
+ tristate "SM7150 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM7150 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
help
@@ -765,6 +841,33 @@ config SM_GCC_8550
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GPUCC_6115
+ tristate "SM6115 Graphics Clock Controller"
+ select SM_GCC_6115
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the graphics clock controller on SM6115 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config SM_GPUCC_6125
+ tristate "SM6125 Graphics Clock Controller"
+ select SM_GCC_6125
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the graphics clock controller on SM6125 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config SM_GPUCC_6375
+ tristate "SM6375 Graphics Clock Controller"
+ select SM_GCC_6375
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the graphics clock controller on SM6375 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_6350
tristate "SM6350 Graphics Clock Controller"
select SM_GCC_6350
@@ -797,6 +900,13 @@ config SM_GPUCC_8350
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_TCSRCC_8550
+ tristate "SM8550 TCSR Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8550 devices.
+ Say Y if you want to use peripheral devices such as SD/UFS.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index f18c446a97ea..9ff4c373ad95 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -24,9 +24,11 @@ obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8909) += gcc-msm8909.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8917) += gcc-msm8917.o
obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
@@ -52,7 +55,7 @@ obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_A7PLL) += a7-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
-obj-$(CONFIG_QCOM_CLK_APCC_MSM8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_CLK_APCC_MSM8996) += apcs-msm8996.o clk-cpu-8996.o clk-cbf-8996.o
obj-$(CONFIG_QCOM_CLK_APCS_SDX55) += apcs-sdx55.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
@@ -62,11 +65,14 @@ obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_QDU_GCC_1000) += gcc-qdu1000.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_8280XP) += dispcc-sc8280xp.o
+obj-$(CONFIG_SA_GCC_8775P) += gcc-sa8775p.o
+obj-$(CONFIG_SA_GPUCC_8775P) += gpucc-sa8775p.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
@@ -91,6 +97,7 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
+obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
@@ -99,19 +106,25 @@ obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_6375) += dispcc-sm6375.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
+obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
obj-$(CONFIG_SM_GCC_6375) += gcc-sm6375.o
+obj-$(CONFIG_SM_GCC_7150) += gcc-sm7150.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
+obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
+obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
+obj-$(CONFIG_SM_GPUCC_6375) += gpucc-sm6375.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
+obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index 89e0730810ac..ce57b333ec99 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -119,18 +119,16 @@ err:
return ret;
}
-static int qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
+static void qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
{
struct clk_regmap_mux_div *a53cc = platform_get_drvdata(pdev);
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
-
- return 0;
}
static struct platform_driver qcom_apcs_msm8916_clk_driver = {
.probe = qcom_apcs_msm8916_clk_probe,
- .remove = qcom_apcs_msm8916_clk_remove,
+ .remove_new = qcom_apcs_msm8916_clk_remove,
.driver = {
.name = "qcom-apcs-msm8916-clk",
},
diff --git a/drivers/clk/qcom/apcs-msm8996.c b/drivers/clk/qcom/apcs-msm8996.c
new file mode 100644
index 000000000000..3e91e9e6da74
--- /dev/null
+++ b/drivers/clk/qcom/apcs-msm8996.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm APCS clock controller driver
+ *
+ * Copyright (c) 2022, Linaro Limited
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define APCS_AUX_OFFSET 0x50
+
+#define APCS_AUX_DIV_MASK GENMASK(17, 16)
+#define APCS_AUX_DIV_2 0x1
+
+static int qcom_apcs_msm8996_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ unsigned int val;
+ int ret = -ENODEV;
+
+ regmap = dev_get_regmap(parent, NULL);
+ if (!regmap) {
+ dev_err(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+
+ regmap_read(regmap, APCS_AUX_OFFSET, &val);
+ regmap_update_bits(regmap, APCS_AUX_OFFSET, APCS_AUX_DIV_MASK,
+ FIELD_PREP(APCS_AUX_DIV_MASK, APCS_AUX_DIV_2));
+
+ /*
+ * This clock is used during CPU cluster setup while setting up CPU PLLs.
+ * Add hardware mandated delay to make sure that the sys_apcs_aux clock
+ * is stable (after setting the divider) before continuing
+ * bootstrapping to keep CPUs from ending up in a weird state.
+ */
+ udelay(5);
+
+ /*
+ * As this clocks is a parent of the CPU cluster clocks and is actually
+ * used as a parent during CPU clocks setup, we want for it to register
+ * as early as possible, without letting fw_devlink to delay probing of
+ * either of the drivers.
+ *
+ * The sys_apcs_aux is a child (divider) of gpll0, but we register it
+ * as a fixed rate clock instead to ease bootstrapping procedure. By
+ * doing this we make sure that CPU cluster clocks are able to be setup
+ * early during the boot process (as it is recommended by Qualcomm).
+ */
+ hw = devm_clk_hw_register_fixed_rate(dev, "sys_apcs_aux", NULL, 0, 300000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static struct platform_driver qcom_apcs_msm8996_clk_driver = {
+ .probe = qcom_apcs_msm8996_clk_probe,
+ .driver = {
+ .name = "qcom-apcs-msm8996-clk",
+ },
+};
+
+/* Register early enough to fix the clock to be used for other cores */
+static int __init qcom_apcs_msm8996_clk_init(void)
+{
+ return platform_driver_register(&qcom_apcs_msm8996_clk_driver);
+}
+postcore_initcall(qcom_apcs_msm8996_clk_init);
+
+static void __exit qcom_apcs_msm8996_clk_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_msm8996_clk_driver);
+}
+module_exit(qcom_apcs_msm8996_clk_exit);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm MSM8996 APCS clock driver");
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index e599f862ec44..d644e6e1f8b7 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -120,20 +120,18 @@ err:
return ret;
}
-static int qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
+static void qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
{
struct device *cpu_dev = get_cpu_device(0);
struct clk_regmap_mux_div *a7cc = platform_get_drvdata(pdev);
clk_notifier_unregister(a7cc->pclk, &a7cc->clk_nb);
dev_pm_domain_detach(cpu_dev, true);
-
- return 0;
}
static struct platform_driver qcom_apcs_sdx55_clk_driver = {
.probe = qcom_apcs_sdx55_clk_probe,
- .remove = qcom_apcs_sdx55_clk_remove,
+ .remove_new = qcom_apcs_sdx55_clk_remove,
.driver = {
.name = "qcom-sdx55-acps-clk",
},
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index a5aea27eb867..cf4f0d340cbf 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -8,20 +8,38 @@
#include "clk-alpha-pll.h"
-static const u8 ipq_pll_offsets[] = {
- [PLL_OFF_L_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL] = 0x10,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_CONFIG_CTL_U] = 0x24,
- [PLL_OFF_STATUS] = 0x28,
- [PLL_OFF_TEST_CTL] = 0x30,
- [PLL_OFF_TEST_CTL_U] = 0x34,
+/*
+ * Even though APSS PLL type is of existing one (like Huayra), its offsets
+ * are different from the one mentioned in the clk-alpha-pll.c, since the
+ * PLL is specific to APSS, so lets the define the same.
+ */
+static const u8 ipq_pll_offsets[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_STATUS] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ },
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [PLL_OFF_L_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ },
};
-static struct clk_alpha_pll ipq_pll = {
+static struct clk_alpha_pll ipq_pll_huayra = {
.offset = 0x0,
- .regs = ipq_pll_offsets,
+ .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_HUAYRA],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x0,
@@ -37,6 +55,38 @@ static struct clk_alpha_pll ipq_pll = {
},
};
+static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ .offset = 0x0,
+ .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "a53pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_stromer_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config ipq5332_pll_config = {
+ .l = 0x3e,
+ .config_ctl_val = 0x4001075b,
+ .config_ctl_hi_val = 0x304,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .early_output_mask = BIT(3),
+ .alpha_en_mask = BIT(24),
+ .status_val = 0x3,
+ .status_mask = GENMASK(10, 8),
+ .lock_det = BIT(2),
+ .test_ctl_hi_val = 0x00400003,
+};
+
static const struct alpha_pll_config ipq6018_pll_config = {
.l = 0x37,
.config_ctl_val = 0x240d4828,
@@ -61,6 +111,30 @@ static const struct alpha_pll_config ipq8074_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+struct apss_pll_data {
+ int pll_type;
+ struct clk_alpha_pll *pll;
+ const struct alpha_pll_config *pll_config;
+};
+
+static struct apss_pll_data ipq5332_pll_data = {
+ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+ .pll = &ipq_pll_stromer_plus,
+ .pll_config = &ipq5332_pll_config,
+};
+
+static struct apss_pll_data ipq8074_pll_data = {
+ .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
+ .pll = &ipq_pll_huayra,
+ .pll_config = &ipq8074_pll_config,
+};
+
+static struct apss_pll_data ipq6018_pll_data = {
+ .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
+ .pll = &ipq_pll_huayra,
+ .pll_config = &ipq6018_pll_config,
+};
+
static const struct regmap_config ipq_pll_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -71,7 +145,7 @@ static const struct regmap_config ipq_pll_regmap_config = {
static int apss_ipq_pll_probe(struct platform_device *pdev)
{
- const struct alpha_pll_config *ipq_pll_config;
+ const struct apss_pll_data *data;
struct device *dev = &pdev->dev;
struct regmap *regmap;
void __iomem *base;
@@ -85,23 +159,27 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ipq_pll_config = of_device_get_match_data(&pdev->dev);
- if (!ipq_pll_config)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
- clk_alpha_pll_configure(&ipq_pll, regmap, ipq_pll_config);
+ if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA)
+ clk_alpha_pll_configure(data->pll, regmap, data->pll_config);
+ else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
+ clk_stromer_pll_configure(data->pll, regmap, data->pll_config);
- ret = devm_clk_register_regmap(dev, &ipq_pll.clkr);
+ ret = devm_clk_register_regmap(dev, &data->pll->clkr);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
- &ipq_pll.clkr.hw);
+ &data->pll->clkr.hw);
}
static const struct of_device_id apss_ipq_pll_match_table[] = {
- { .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_config },
- { .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_config },
+ { .compatible = "qcom,ipq5332-a53pll", .data = &ipq5332_pll_data },
+ { .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_data },
+ { .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_data },
{ }
};
MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
index ec163ea769f5..4396fddba7a6 100644
--- a/drivers/clk/qcom/camcc-sc7280.c
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -88,8 +88,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll0_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -111,8 +111,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll0_out_odd",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -163,8 +163,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll1_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll1.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -213,8 +213,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll2_out_aux",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll2.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -236,8 +236,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll2_out_aux2",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll2.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -288,8 +288,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll3_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll3.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -340,8 +340,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll4_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll4.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -392,8 +392,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll5_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll5.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -444,8 +444,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll6_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll6.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -467,8 +467,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll6_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_pll6_out_odd",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_pll6.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1227,8 +1227,8 @@ static struct clk_branch cam_cc_bps_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_bps_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1245,8 +1245,8 @@ static struct clk_branch cam_cc_bps_areg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_bps_areg_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1263,8 +1263,8 @@ static struct clk_branch cam_cc_bps_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_bps_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1281,8 +1281,8 @@ static struct clk_branch cam_cc_bps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_bps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_bps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1299,8 +1299,8 @@ static struct clk_branch cam_cc_camnoc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_camnoc_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1317,8 +1317,8 @@ static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_camnoc_dcd_xo_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1335,8 +1335,8 @@ static struct clk_branch cam_cc_cci_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_cci_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1353,8 +1353,8 @@ static struct clk_branch cam_cc_cci_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_cci_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1371,8 +1371,8 @@ static struct clk_branch cam_cc_core_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_core_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1389,8 +1389,8 @@ static struct clk_branch cam_cc_cpas_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_cpas_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1407,8 +1407,8 @@ static struct clk_branch cam_cc_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csi0phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1425,8 +1425,8 @@ static struct clk_branch cam_cc_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csi1phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1443,8 +1443,8 @@ static struct clk_branch cam_cc_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csi2phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1461,8 +1461,8 @@ static struct clk_branch cam_cc_csi3phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csi3phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1479,8 +1479,8 @@ static struct clk_branch cam_cc_csi4phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csi4phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi4phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1497,8 +1497,8 @@ static struct clk_branch cam_cc_csiphy0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csiphy0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1515,8 +1515,8 @@ static struct clk_branch cam_cc_csiphy1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csiphy1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1533,8 +1533,8 @@ static struct clk_branch cam_cc_csiphy2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csiphy2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1551,8 +1551,8 @@ static struct clk_branch cam_cc_csiphy3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csiphy3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1569,8 +1569,8 @@ static struct clk_branch cam_cc_csiphy4_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_csiphy4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1587,8 +1587,8 @@ static struct clk_branch cam_cc_gdsc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_gdsc_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
@@ -1605,8 +1605,8 @@ static struct clk_branch cam_cc_icp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_icp_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1623,8 +1623,8 @@ static struct clk_branch cam_cc_icp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_icp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_icp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1641,8 +1641,8 @@ static struct clk_branch cam_cc_ife_0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_0_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1659,8 +1659,8 @@ static struct clk_branch cam_cc_ife_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1677,8 +1677,8 @@ static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_0_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1695,8 +1695,8 @@ static struct clk_branch cam_cc_ife_0_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_0_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1713,8 +1713,8 @@ static struct clk_branch cam_cc_ife_0_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_0_dsp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1731,8 +1731,8 @@ static struct clk_branch cam_cc_ife_1_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_1_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1749,8 +1749,8 @@ static struct clk_branch cam_cc_ife_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1767,8 +1767,8 @@ static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_1_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1785,8 +1785,8 @@ static struct clk_branch cam_cc_ife_1_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_1_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1803,8 +1803,8 @@ static struct clk_branch cam_cc_ife_1_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_1_dsp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1821,8 +1821,8 @@ static struct clk_branch cam_cc_ife_2_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_2_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1839,8 +1839,8 @@ static struct clk_branch cam_cc_ife_2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1857,8 +1857,8 @@ static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_2_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1875,8 +1875,8 @@ static struct clk_branch cam_cc_ife_2_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_2_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1893,8 +1893,8 @@ static struct clk_branch cam_cc_ife_2_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_2_dsp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1911,8 +1911,8 @@ static struct clk_branch cam_cc_ife_lite_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1929,8 +1929,8 @@ static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_0_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1947,8 +1947,8 @@ static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_0_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1965,8 +1965,8 @@ static struct clk_branch cam_cc_ife_lite_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1983,8 +1983,8 @@ static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_1_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2001,8 +2001,8 @@ static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ife_lite_1_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2019,8 +2019,8 @@ static struct clk_branch cam_cc_ipe_0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ipe_0_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2037,8 +2037,8 @@ static struct clk_branch cam_cc_ipe_0_areg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ipe_0_areg_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2055,8 +2055,8 @@ static struct clk_branch cam_cc_ipe_0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ipe_0_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2073,8 +2073,8 @@ static struct clk_branch cam_cc_ipe_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_ipe_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2091,8 +2091,8 @@ static struct clk_branch cam_cc_jpeg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_jpeg_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2109,8 +2109,8 @@ static struct clk_branch cam_cc_lrme_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_lrme_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_lrme_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2127,8 +2127,8 @@ static struct clk_branch cam_cc_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2145,8 +2145,8 @@ static struct clk_branch cam_cc_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2163,8 +2163,8 @@ static struct clk_branch cam_cc_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2181,8 +2181,8 @@ static struct clk_branch cam_cc_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2199,8 +2199,8 @@ static struct clk_branch cam_cc_mclk4_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2217,8 +2217,8 @@ static struct clk_branch cam_cc_mclk5_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_mclk5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2235,8 +2235,8 @@ static struct clk_branch cam_cc_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "cam_cc_sleep_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sleep_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
new file mode 100644
index 000000000000..acba9f99d960
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -0,0 +1,1906 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6350-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAMCC_PLL0_OUT_EVEN,
+ P_CAMCC_PLL0_OUT_MAIN,
+ P_CAMCC_PLL1_OUT_EVEN,
+ P_CAMCC_PLL1_OUT_MAIN,
+ P_CAMCC_PLL2_OUT_EARLY,
+ P_CAMCC_PLL2_OUT_MAIN,
+ P_CAMCC_PLL3_OUT_MAIN,
+};
+
+static struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config camcc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000002,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll camcc_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+/* 808MHz configuration */
+static const struct alpha_pll_config camcc_pll1_config = {
+ .l = 0x2a,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000000,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll camcc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+/* 1920MHz configuration */
+static const struct alpha_pll_config camcc_pll2_config = {
+ .l = 0x64,
+ .alpha = 0x0,
+ .post_div_val = 0x3 << 8,
+ .post_div_mask = 0x3 << 8,
+ .aux_output_mask = BIT(1),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .config_ctl_val = 0x20000800,
+ .config_ctl_hi_val = 0x400003d2,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+};
+
+static struct clk_alpha_pll camcc_pll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_agera_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor camcc_pll2_out_early = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll2_out_early",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 384MHz configuration */
+static const struct alpha_pll_config camcc_pll3_config = {
+ .l = 0x14,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000002,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00014805,
+};
+
+static struct clk_alpha_pll camcc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map camcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL1_OUT_EVEN, 3 },
+ { P_CAMCC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data camcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll1_out_even.clkr.hw },
+ { .hw = &camcc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL3_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data camcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll3.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL2_OUT_EARLY, 3 },
+};
+
+static const struct clk_parent_data camcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll2_out_early.hw },
+};
+
+static const struct parent_map camcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL1_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data camcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL1_OUT_EVEN, 3 },
+ { P_CAMCC_PLL3_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data camcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll1_out_even.clkr.hw },
+ { .hw = &camcc_pll3.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data camcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL1_OUT_MAIN, 2 },
+ { P_CAMCC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data camcc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll1.clkr.hw },
+ { .hw = &camcc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL1_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data camcc_parent_data_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll1.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data camcc_parent_data_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &camcc_pll2_out_main.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_camcc_bps_clk_src[] = {
+ F(200000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_CAMCC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_cci_0_clk_src[] = {
+ F(37500000, P_CAMCC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(50000000, P_CAMCC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_CAMCC_PLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_cci_0_clk_src = {
+ .cmd_rcgr = 0xf004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_0_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_cci_1_clk_src = {
+ .cmd_rcgr = 0x10004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_1_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_cphy_rx_clk_src[] = {
+ F(150000000, P_CAMCC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_CAMCC_PLL3_OUT_MAIN, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cphy_rx_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAMCC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi0phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi1phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi2phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x5070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi3phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAMCC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_4,
+ .freq_tbl = ftbl_camcc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_fast_ahb_clk_src",
+ .parent_data = camcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_icp_clk_src[] = {
+ F(240000000, P_CAMCC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(384000000, P_CAMCC_PLL3_OUT_MAIN, 1, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_icp_clk_src = {
+ .cmd_rcgr = 0xe014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_5,
+ .freq_tbl = ftbl_camcc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_clk_src",
+ .parent_data = camcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_0_clk_src[] = {
+ F(240000000, P_CAMCC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(320000000, P_CAMCC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_csid_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_csid_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_2_clk_src = {
+ .cmd_rcgr = 0xb00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xb030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_csid_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_lite_clk_src[] = {
+ F(320000000, P_CAMCC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_MAIN, 1.5, 0, 0),
+ F(480000000, P_CAMCC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_6,
+ .freq_tbl = ftbl_camcc_ife_lite_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_clk_src",
+ .parent_data = camcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xc024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_csid_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ipe_0_clk_src[] = {
+ F(240000000, P_CAMCC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(320000000, P_CAMCC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(538666667, P_CAMCC_PLL1_OUT_MAIN, 1.5, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_7,
+ .freq_tbl = ftbl_camcc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_clk_src",
+ .parent_data = camcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_jpeg_clk_src[] = {
+ F(66666667, P_CAMCC_PLL0_OUT_MAIN, 9, 0, 0),
+ F(133333333, P_CAMCC_PLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_jpeg_clk_src = {
+ .cmd_rcgr = 0xd004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_jpeg_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_jpeg_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_lrme_clk_src[] = {
+ F(200000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(269333333, P_CAMCC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(323200000, P_CAMCC_PLL1_OUT_MAIN, 2.5, 0, 0),
+ F(404000000, P_CAMCC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_lrme_clk_src = {
+ .cmd_rcgr = 0x11004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_8,
+ .freq_tbl = ftbl_camcc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_lrme_clk_src",
+ .parent_data = camcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_mclk0_clk_src[] = {
+ F(19200000, P_CAMCC_PLL2_OUT_EARLY, 1, 1, 50),
+ F(24000000, P_CAMCC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(64000000, P_CAMCC_PLL2_OUT_EARLY, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk0_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk1_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk2_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk3_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk4_clk_src = {
+ .cmd_rcgr = 0x4084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk4_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAMCC_PLL2_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_9,
+ .freq_tbl = ftbl_camcc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_slow_ahb_clk_src",
+ .parent_data = camcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch camcc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_fast_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_bps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_camnoc_axi_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_0_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_1_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_core_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cpas_ahb_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi0phytimer_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi1phytimer_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi2phytimer_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi3phytimer_clk = {
+ .halt_reg = 0x5088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi3phytimer_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy3_clk = {
+ .halt_reg = 0x508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_icp_clk = {
+ .halt_reg = 0xe02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_icp_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_icp_ts_clk = {
+ .halt_reg = 0xe00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_csid_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_csid_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_axi_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xb050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_csid_clk = {
+ .halt_reg = 0xb048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_csid_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_dsp_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xc044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_csid_clk = {
+ .halt_reg = 0xc03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_csid_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_fast_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ipe_0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_jpeg_clk = {
+ .halt_reg = 0xd01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_jpeg_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_lrme_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_lrme_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk4_clk = {
+ .halt_reg = 0x409c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x409c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_soc_ahb_clk = {
+ .halt_reg = 0x1400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_sys_tmr_clk = {
+ .halt_reg = 0xe034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0xb004,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0x14004,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_hw *camcc_sm6350_hws[] = {
+ [CAMCC_PLL2_OUT_EARLY] = &camcc_pll2_out_early.hw,
+};
+
+static struct clk_regmap *camcc_sm6350_clocks[] = {
+ [CAMCC_BPS_AHB_CLK] = &camcc_bps_ahb_clk.clkr,
+ [CAMCC_BPS_AREG_CLK] = &camcc_bps_areg_clk.clkr,
+ [CAMCC_BPS_AXI_CLK] = &camcc_bps_axi_clk.clkr,
+ [CAMCC_BPS_CLK] = &camcc_bps_clk.clkr,
+ [CAMCC_BPS_CLK_SRC] = &camcc_bps_clk_src.clkr,
+ [CAMCC_CAMNOC_AXI_CLK] = &camcc_camnoc_axi_clk.clkr,
+ [CAMCC_CCI_0_CLK] = &camcc_cci_0_clk.clkr,
+ [CAMCC_CCI_0_CLK_SRC] = &camcc_cci_0_clk_src.clkr,
+ [CAMCC_CCI_1_CLK] = &camcc_cci_1_clk.clkr,
+ [CAMCC_CCI_1_CLK_SRC] = &camcc_cci_1_clk_src.clkr,
+ [CAMCC_CORE_AHB_CLK] = &camcc_core_ahb_clk.clkr,
+ [CAMCC_CPAS_AHB_CLK] = &camcc_cpas_ahb_clk.clkr,
+ [CAMCC_CPHY_RX_CLK_SRC] = &camcc_cphy_rx_clk_src.clkr,
+ [CAMCC_CSI0PHYTIMER_CLK] = &camcc_csi0phytimer_clk.clkr,
+ [CAMCC_CSI0PHYTIMER_CLK_SRC] = &camcc_csi0phytimer_clk_src.clkr,
+ [CAMCC_CSI1PHYTIMER_CLK] = &camcc_csi1phytimer_clk.clkr,
+ [CAMCC_CSI1PHYTIMER_CLK_SRC] = &camcc_csi1phytimer_clk_src.clkr,
+ [CAMCC_CSI2PHYTIMER_CLK] = &camcc_csi2phytimer_clk.clkr,
+ [CAMCC_CSI2PHYTIMER_CLK_SRC] = &camcc_csi2phytimer_clk_src.clkr,
+ [CAMCC_CSI3PHYTIMER_CLK] = &camcc_csi3phytimer_clk.clkr,
+ [CAMCC_CSI3PHYTIMER_CLK_SRC] = &camcc_csi3phytimer_clk_src.clkr,
+ [CAMCC_CSIPHY0_CLK] = &camcc_csiphy0_clk.clkr,
+ [CAMCC_CSIPHY1_CLK] = &camcc_csiphy1_clk.clkr,
+ [CAMCC_CSIPHY2_CLK] = &camcc_csiphy2_clk.clkr,
+ [CAMCC_CSIPHY3_CLK] = &camcc_csiphy3_clk.clkr,
+ [CAMCC_FAST_AHB_CLK_SRC] = &camcc_fast_ahb_clk_src.clkr,
+ [CAMCC_ICP_CLK] = &camcc_icp_clk.clkr,
+ [CAMCC_ICP_CLK_SRC] = &camcc_icp_clk_src.clkr,
+ [CAMCC_ICP_TS_CLK] = &camcc_icp_ts_clk.clkr,
+ [CAMCC_IFE_0_AXI_CLK] = &camcc_ife_0_axi_clk.clkr,
+ [CAMCC_IFE_0_CLK] = &camcc_ife_0_clk.clkr,
+ [CAMCC_IFE_0_CLK_SRC] = &camcc_ife_0_clk_src.clkr,
+ [CAMCC_IFE_0_CPHY_RX_CLK] = &camcc_ife_0_cphy_rx_clk.clkr,
+ [CAMCC_IFE_0_CSID_CLK] = &camcc_ife_0_csid_clk.clkr,
+ [CAMCC_IFE_0_CSID_CLK_SRC] = &camcc_ife_0_csid_clk_src.clkr,
+ [CAMCC_IFE_0_DSP_CLK] = &camcc_ife_0_dsp_clk.clkr,
+ [CAMCC_IFE_1_AXI_CLK] = &camcc_ife_1_axi_clk.clkr,
+ [CAMCC_IFE_1_CLK] = &camcc_ife_1_clk.clkr,
+ [CAMCC_IFE_1_CLK_SRC] = &camcc_ife_1_clk_src.clkr,
+ [CAMCC_IFE_1_CPHY_RX_CLK] = &camcc_ife_1_cphy_rx_clk.clkr,
+ [CAMCC_IFE_1_CSID_CLK] = &camcc_ife_1_csid_clk.clkr,
+ [CAMCC_IFE_1_CSID_CLK_SRC] = &camcc_ife_1_csid_clk_src.clkr,
+ [CAMCC_IFE_1_DSP_CLK] = &camcc_ife_1_dsp_clk.clkr,
+ [CAMCC_IFE_2_AXI_CLK] = &camcc_ife_2_axi_clk.clkr,
+ [CAMCC_IFE_2_CLK] = &camcc_ife_2_clk.clkr,
+ [CAMCC_IFE_2_CLK_SRC] = &camcc_ife_2_clk_src.clkr,
+ [CAMCC_IFE_2_CPHY_RX_CLK] = &camcc_ife_2_cphy_rx_clk.clkr,
+ [CAMCC_IFE_2_CSID_CLK] = &camcc_ife_2_csid_clk.clkr,
+ [CAMCC_IFE_2_CSID_CLK_SRC] = &camcc_ife_2_csid_clk_src.clkr,
+ [CAMCC_IFE_2_DSP_CLK] = &camcc_ife_2_dsp_clk.clkr,
+ [CAMCC_IFE_LITE_CLK] = &camcc_ife_lite_clk.clkr,
+ [CAMCC_IFE_LITE_CLK_SRC] = &camcc_ife_lite_clk_src.clkr,
+ [CAMCC_IFE_LITE_CPHY_RX_CLK] = &camcc_ife_lite_cphy_rx_clk.clkr,
+ [CAMCC_IFE_LITE_CSID_CLK] = &camcc_ife_lite_csid_clk.clkr,
+ [CAMCC_IFE_LITE_CSID_CLK_SRC] = &camcc_ife_lite_csid_clk_src.clkr,
+ [CAMCC_IPE_0_AHB_CLK] = &camcc_ipe_0_ahb_clk.clkr,
+ [CAMCC_IPE_0_AREG_CLK] = &camcc_ipe_0_areg_clk.clkr,
+ [CAMCC_IPE_0_AXI_CLK] = &camcc_ipe_0_axi_clk.clkr,
+ [CAMCC_IPE_0_CLK] = &camcc_ipe_0_clk.clkr,
+ [CAMCC_IPE_0_CLK_SRC] = &camcc_ipe_0_clk_src.clkr,
+ [CAMCC_JPEG_CLK] = &camcc_jpeg_clk.clkr,
+ [CAMCC_JPEG_CLK_SRC] = &camcc_jpeg_clk_src.clkr,
+ [CAMCC_LRME_CLK] = &camcc_lrme_clk.clkr,
+ [CAMCC_LRME_CLK_SRC] = &camcc_lrme_clk_src.clkr,
+ [CAMCC_MCLK0_CLK] = &camcc_mclk0_clk.clkr,
+ [CAMCC_MCLK0_CLK_SRC] = &camcc_mclk0_clk_src.clkr,
+ [CAMCC_MCLK1_CLK] = &camcc_mclk1_clk.clkr,
+ [CAMCC_MCLK1_CLK_SRC] = &camcc_mclk1_clk_src.clkr,
+ [CAMCC_MCLK2_CLK] = &camcc_mclk2_clk.clkr,
+ [CAMCC_MCLK2_CLK_SRC] = &camcc_mclk2_clk_src.clkr,
+ [CAMCC_MCLK3_CLK] = &camcc_mclk3_clk.clkr,
+ [CAMCC_MCLK3_CLK_SRC] = &camcc_mclk3_clk_src.clkr,
+ [CAMCC_MCLK4_CLK] = &camcc_mclk4_clk.clkr,
+ [CAMCC_MCLK4_CLK_SRC] = &camcc_mclk4_clk_src.clkr,
+ [CAMCC_PLL0] = &camcc_pll0.clkr,
+ [CAMCC_PLL0_OUT_EVEN] = &camcc_pll0_out_even.clkr,
+ [CAMCC_PLL1] = &camcc_pll1.clkr,
+ [CAMCC_PLL1_OUT_EVEN] = &camcc_pll1_out_even.clkr,
+ [CAMCC_PLL2] = &camcc_pll2.clkr,
+ [CAMCC_PLL2_OUT_MAIN] = &camcc_pll2_out_main.clkr,
+ [CAMCC_PLL3] = &camcc_pll3.clkr,
+ [CAMCC_SLOW_AHB_CLK_SRC] = &camcc_slow_ahb_clk_src.clkr,
+ [CAMCC_SOC_AHB_CLK] = &camcc_soc_ahb_clk.clkr,
+ [CAMCC_SYS_TMR_CLK] = &camcc_sys_tmr_clk.clkr,
+};
+
+static struct gdsc *camcc_sm6350_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct regmap_config camcc_sm6350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc camcc_sm6350_desc = {
+ .config = &camcc_sm6350_regmap_config,
+ .clk_hws = camcc_sm6350_hws,
+ .num_clk_hws = ARRAY_SIZE(camcc_sm6350_hws),
+ .clks = camcc_sm6350_clocks,
+ .num_clks = ARRAY_SIZE(camcc_sm6350_clocks),
+ .gdscs = camcc_sm6350_gdscs,
+ .num_gdscs = ARRAY_SIZE(camcc_sm6350_gdscs),
+};
+
+static const struct of_device_id camcc_sm6350_match_table[] = {
+ { .compatible = "qcom,sm6350-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, camcc_sm6350_match_table);
+
+static int camcc_sm6350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &camcc_sm6350_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&camcc_pll0, regmap, &camcc_pll0_config);
+ clk_fabia_pll_configure(&camcc_pll1, regmap, &camcc_pll1_config);
+ clk_agera_pll_configure(&camcc_pll2, regmap, &camcc_pll2_config);
+ clk_fabia_pll_configure(&camcc_pll3, regmap, &camcc_pll3_config);
+
+ return qcom_cc_really_probe(pdev, &camcc_sm6350_desc, regmap);
+}
+
+static struct platform_driver camcc_sm6350_driver = {
+ .probe = camcc_sm6350_probe,
+ .driver = {
+ .name = "sm6350-camcc",
+ .of_match_table = camcc_sm6350_match_table,
+ },
+};
+
+static int __init camcc_sm6350_init(void)
+{
+ return platform_driver_register(&camcc_sm6350_driver);
+}
+subsys_initcall(camcc_sm6350_init);
+
+static void __exit camcc_sm6350_exit(void)
+{
+ platform_driver_unregister(&camcc_sm6350_driver);
+}
+module_exit(camcc_sm6350_exit);
+
+MODULE_DESCRIPTION("QTI CAMCC SM6350 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index e3c09471dadf..51338a2884d2 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -95,8 +95,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll0_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -118,8 +118,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll0_out_odd",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -166,8 +166,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll1_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll1.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -237,8 +237,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll3_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll3.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -285,8 +285,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll4_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll4.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -333,8 +333,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll5_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll5.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -381,8 +381,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll6_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll6.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -429,8 +429,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll7_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll7.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll7.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -477,8 +477,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_pll8_out_even",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_pll8.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll8.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1268,8 +1268,8 @@ static struct clk_branch cam_cc_gdsc_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_gdsc_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
@@ -1286,8 +1286,8 @@ static struct clk_branch cam_cc_bps_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_bps_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1304,8 +1304,8 @@ static struct clk_branch cam_cc_bps_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_bps_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_bps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1322,8 +1322,8 @@ static struct clk_branch cam_cc_bps_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_bps_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1340,8 +1340,8 @@ static struct clk_branch cam_cc_camnoc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_camnoc_axi_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1358,8 +1358,8 @@ static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_camnoc_dcd_xo_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1376,8 +1376,8 @@ static struct clk_branch cam_cc_cci_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cci_0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1394,8 +1394,8 @@ static struct clk_branch cam_cc_cci_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cci_1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1412,8 +1412,8 @@ static struct clk_branch cam_cc_core_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_core_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1430,8 +1430,8 @@ static struct clk_branch cam_cc_cpas_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1448,8 +1448,8 @@ static struct clk_branch cam_cc_cpas_bps_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_bps_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_bps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1466,8 +1466,8 @@ static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1484,8 +1484,8 @@ static struct clk_branch cam_cc_cpas_ife_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ife_0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1502,8 +1502,8 @@ static struct clk_branch cam_cc_cpas_ife_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ife_1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1520,8 +1520,8 @@ static struct clk_branch cam_cc_cpas_ife_2_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ife_2_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1538,8 +1538,8 @@ static struct clk_branch cam_cc_cpas_ife_lite_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ife_lite_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1556,8 +1556,8 @@ static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_ipe_nps_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1574,8 +1574,8 @@ static struct clk_branch cam_cc_cpas_sbi_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_sbi_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1592,8 +1592,8 @@ static struct clk_branch cam_cc_cpas_sfe_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_sfe_0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1610,8 +1610,8 @@ static struct clk_branch cam_cc_cpas_sfe_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_cpas_sfe_1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1628,8 +1628,8 @@ static struct clk_branch cam_cc_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi0phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1646,8 +1646,8 @@ static struct clk_branch cam_cc_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi1phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1664,8 +1664,8 @@ static struct clk_branch cam_cc_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi2phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1682,8 +1682,8 @@ static struct clk_branch cam_cc_csi3phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi3phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1700,8 +1700,8 @@ static struct clk_branch cam_cc_csi4phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi4phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi4phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1718,8 +1718,8 @@ static struct clk_branch cam_cc_csi5phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csi5phytimer_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi5phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1736,8 +1736,8 @@ static struct clk_branch cam_cc_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csid_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1754,8 +1754,8 @@ static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csid_csiphy_rx_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1772,8 +1772,8 @@ static struct clk_branch cam_cc_csiphy0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1790,8 +1790,8 @@ static struct clk_branch cam_cc_csiphy1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1808,8 +1808,8 @@ static struct clk_branch cam_cc_csiphy2_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy2_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1826,8 +1826,8 @@ static struct clk_branch cam_cc_csiphy3_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy3_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1844,8 +1844,8 @@ static struct clk_branch cam_cc_csiphy4_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy4_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1862,8 +1862,8 @@ static struct clk_branch cam_cc_csiphy5_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_csiphy5_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1880,8 +1880,8 @@ static struct clk_branch cam_cc_icp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_icp_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1898,8 +1898,8 @@ static struct clk_branch cam_cc_icp_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_icp_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_icp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1916,8 +1916,8 @@ static struct clk_branch cam_cc_ife_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1934,8 +1934,8 @@ static struct clk_branch cam_cc_ife_0_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_0_dsp_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1952,8 +1952,8 @@ static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_0_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1970,8 +1970,8 @@ static struct clk_branch cam_cc_ife_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1988,8 +1988,8 @@ static struct clk_branch cam_cc_ife_1_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_1_dsp_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2006,8 +2006,8 @@ static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_1_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2024,8 +2024,8 @@ static struct clk_branch cam_cc_ife_2_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_2_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2042,8 +2042,8 @@ static struct clk_branch cam_cc_ife_2_dsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_2_dsp_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2060,8 +2060,8 @@ static struct clk_branch cam_cc_ife_2_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_2_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2078,8 +2078,8 @@ static struct clk_branch cam_cc_ife_lite_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_lite_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2096,8 +2096,8 @@ static struct clk_branch cam_cc_ife_lite_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_lite_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2114,8 +2114,8 @@ static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_lite_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2132,8 +2132,8 @@ static struct clk_branch cam_cc_ife_lite_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ife_lite_csid_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2150,8 +2150,8 @@ static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ipe_nps_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2168,8 +2168,8 @@ static struct clk_branch cam_cc_ipe_nps_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ipe_nps_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2186,8 +2186,8 @@ static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ipe_nps_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2204,8 +2204,8 @@ static struct clk_branch cam_cc_ipe_pps_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ipe_pps_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2222,8 +2222,8 @@ static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_ipe_pps_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2240,8 +2240,8 @@ static struct clk_branch cam_cc_jpeg_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_jpeg_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2258,8 +2258,8 @@ static struct clk_branch cam_cc_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2276,8 +2276,8 @@ static struct clk_branch cam_cc_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2294,8 +2294,8 @@ static struct clk_branch cam_cc_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk2_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2312,8 +2312,8 @@ static struct clk_branch cam_cc_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk3_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2330,8 +2330,8 @@ static struct clk_branch cam_cc_mclk4_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk4_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2348,8 +2348,8 @@ static struct clk_branch cam_cc_mclk5_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk5_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2366,8 +2366,8 @@ static struct clk_branch cam_cc_mclk6_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk6_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2384,8 +2384,8 @@ static struct clk_branch cam_cc_mclk7_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_mclk7_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_mclk7_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2402,8 +2402,8 @@ static struct clk_branch cam_cc_qdss_debug_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_qdss_debug_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_qdss_debug_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_qdss_debug_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2420,8 +2420,8 @@ static struct clk_branch cam_cc_qdss_debug_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_qdss_debug_xo_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2438,8 +2438,8 @@ static struct clk_branch cam_cc_sbi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sbi_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2456,8 +2456,8 @@ static struct clk_branch cam_cc_sbi_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sbi_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2474,8 +2474,8 @@ static struct clk_branch cam_cc_sfe_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sfe_0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2492,8 +2492,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sfe_0_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2510,8 +2510,8 @@ static struct clk_branch cam_cc_sfe_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sfe_1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2528,8 +2528,8 @@ static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sfe_1_fast_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2546,8 +2546,8 @@ static struct clk_branch cam_cc_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "cam_cc_sleep_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sleep_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f9e4cfd7261c..b9f6535a7ba7 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -204,6 +204,29 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL] = 0x1C,
[PLL_OFF_STATUS] = 0x20,
},
+ [CLK_ALPHA_PLL_TYPE_STROMER] = {
+ [PLL_OFF_L_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0xff,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ [PLL_OFF_STATUS] = 0x28,
+ },
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x08,
+ [PLL_OFF_USER_CTL_U] = 0x0c,
+ [PLL_OFF_CONFIG_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL] = 0x14,
+ [PLL_OFF_TEST_CTL_U] = 0x18,
+ [PLL_OFF_STATUS] = 0x1c,
+ [PLL_OFF_ALPHA_VAL] = 0x24,
+ [PLL_OFF_ALPHA_VAL_U] = 0x28,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -215,6 +238,8 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define ALPHA_BITWIDTH 32U
#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
+#define ALPHA_PLL_STATUS_REG_SHIFT 8
+
#define PLL_HUAYRA_M_WIDTH 8
#define PLL_HUAYRA_M_SHIFT 8
#define PLL_HUAYRA_M_MASK 0xff
@@ -358,6 +383,11 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+
if (pll->flags & SUPPORTS_FSM_MODE)
qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
}
@@ -2324,3 +2354,115 @@ const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
.round_rate = clk_rivian_evo_pll_round_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
+
+void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, val_u, mask, mask_u;
+
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+
+ if (pll_has_64bit_config(pll))
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (pll_alpha_width(pll) > 32)
+ regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+ val |= config->alpha_mode_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+ mask |= config->alpha_mode_mask;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+
+ /* Stromer APSS PLL does not enable LOCK_DET by default, so enable it */
+ val_u = config->status_val << ALPHA_PLL_STATUS_REG_SHIFT;
+ val_u |= config->lock_det;
+
+ mask_u = config->status_mask;
+ mask_u |= config->lock_det;
+
+ regmap_update_bits(regmap, PLL_USER_CTL_U(pll), mask_u, val_u);
+ regmap_write(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ regmap_write(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
+}
+EXPORT_SYMBOL_GPL(clk_stromer_pll_configure);
+
+static int clk_alpha_pll_stromer_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ u32 l;
+ u64 a;
+
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate,
+ &l, &a, ALPHA_REG_BITWIDTH);
+
+ return 0;
+}
+
+static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ int ret;
+ u32 l;
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH);
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ a >> ALPHA_BITWIDTH);
+
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
+ if (!clk_hw_is_enabled(hw))
+ return 0;
+
+ /*
+ * Stromer PLL supports Dynamic programming.
+ * It allows the PLL frequency to be changed on-the-fly without first
+ * execution of a shutdown procedure followed by a bring up procedure.
+ */
+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
+ PLL_UPDATE);
+
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+
+ return wait_for_pll_enable_lock(pll);
+}
+
+const struct clk_ops clk_alpha_pll_stromer_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .determine_rate = clk_alpha_pll_stromer_determine_rate,
+ .set_rate = clk_alpha_pll_stromer_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 2bdae362c827..d07b17186b90 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2015, 2018, 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
#ifndef __QCOM_CLK_ALPHA_PLL_H__
#define __QCOM_CLK_ALPHA_PLL_H__
@@ -22,6 +26,8 @@ enum {
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
+ CLK_ALPHA_PLL_TYPE_STROMER,
+ CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -131,6 +137,9 @@ struct alpha_pll_config {
u32 post_div_mask;
u32 vco_val;
u32 vco_mask;
+ u32 status_val;
+ u32 status_mask;
+ u32 lock_det;
};
extern const struct clk_ops clk_alpha_pll_ops;
@@ -139,6 +148,7 @@ extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_huayra_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
+extern const struct clk_ops clk_alpha_pll_stromer_ops;
extern const struct clk_ops clk_alpha_pll_fabia_ops;
extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
@@ -162,6 +172,7 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
+#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
@@ -187,5 +198,7 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index f869fc6aaed6..ca896ebf7e1b 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -39,27 +39,22 @@ static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
return !!val == !enabling;
}
-#define BRANCH_CLK_OFF BIT(31)
-#define BRANCH_NOC_FSM_STATUS_SHIFT 28
-#define BRANCH_NOC_FSM_STATUS_MASK 0x7
-#define BRANCH_NOC_FSM_STATUS_ON (0x2 << BRANCH_NOC_FSM_STATUS_SHIFT)
-
static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
{
u32 val;
u32 mask;
- mask = BRANCH_NOC_FSM_STATUS_MASK << BRANCH_NOC_FSM_STATUS_SHIFT;
- mask |= BRANCH_CLK_OFF;
+ mask = CBCR_NOC_FSM_STATUS;
+ mask |= CBCR_CLK_OFF;
regmap_read(br->clkr.regmap, br->halt_reg, &val);
if (enabling) {
val &= mask;
- return (val & BRANCH_CLK_OFF) == 0 ||
- val == BRANCH_NOC_FSM_STATUS_ON;
+ return (val & CBCR_CLK_OFF) == 0 ||
+ FIELD_GET(CBCR_NOC_FSM_STATUS, val) == FSM_STATUS_ON;
} else {
- return val & BRANCH_CLK_OFF;
+ return val & CBCR_CLK_OFF;
}
}
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 17a58119165e..0cf800b9d08d 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -4,6 +4,7 @@
#ifndef __QCOM_CLK_BRANCH_H__
#define __QCOM_CLK_BRANCH_H__
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include "clk-regmap.h"
@@ -37,6 +38,49 @@ struct clk_branch {
struct clk_regmap clkr;
};
+/* Branch clock common bits for HLOS-owned clocks */
+#define CBCR_CLK_OFF BIT(31)
+#define CBCR_NOC_FSM_STATUS GENMASK(30, 28)
+ #define FSM_STATUS_ON BIT(1)
+#define CBCR_FORCE_MEM_CORE_ON BIT(14)
+#define CBCR_FORCE_MEM_PERIPH_ON BIT(13)
+#define CBCR_FORCE_MEM_PERIPH_OFF BIT(12)
+#define CBCR_WAKEUP GENMASK(11, 8)
+#define CBCR_SLEEP GENMASK(7, 4)
+
+static inline void qcom_branch_set_force_mem_core(struct regmap *regmap,
+ struct clk_branch clk, bool on)
+{
+ regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_CORE_ON,
+ on ? CBCR_FORCE_MEM_CORE_ON : 0);
+}
+
+static inline void qcom_branch_set_force_periph_on(struct regmap *regmap,
+ struct clk_branch clk, bool on)
+{
+ regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_PERIPH_ON,
+ on ? CBCR_FORCE_MEM_PERIPH_ON : 0);
+}
+
+static inline void qcom_branch_set_force_periph_off(struct regmap *regmap,
+ struct clk_branch clk, bool on)
+{
+ regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_PERIPH_OFF,
+ on ? CBCR_FORCE_MEM_PERIPH_OFF : 0);
+}
+
+static inline void qcom_branch_set_wakeup(struct regmap *regmap, struct clk_branch clk, u32 val)
+{
+ regmap_update_bits(regmap, clk.halt_reg, CBCR_WAKEUP,
+ FIELD_PREP(CBCR_WAKEUP, val));
+}
+
+static inline void qcom_branch_set_sleep(struct regmap *regmap, struct clk_branch clk, u32 val)
+{
+ regmap_update_bits(regmap, clk.halt_reg, CBCR_SLEEP,
+ FIELD_PREP(CBCR_SLEEP, val));
+}
+
extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_branch_simple_ops;
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
new file mode 100644
index 000000000000..cfd567636f4e
--- /dev/null
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, 2023 Linaro Ltd.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-regmap.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_APCS_AUX,
+};
+
+enum {
+ CBF_XO_INDEX,
+ CBF_PLL_INDEX,
+ CBF_DIV_INDEX,
+ CBF_APCS_AUX_INDEX,
+};
+
+#define DIV_THRESHOLD 600000000
+
+#define CBF_MUX_OFFSET 0x18
+#define CBF_MUX_PARENT_MASK GENMASK(1, 0)
+#define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
+#define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
+ FIELD_PREP(CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
+#define CBF_MUX_AUTO_CLK_SEL_BIT BIT(6)
+
+#define CBF_PLL_OFFSET 0xf000
+
+static const u8 cbf_pll_regs[PLL_OFF_MAX_REGS] = {
+ [PLL_OFF_L_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ [PLL_OFF_STATUS] = 0x28,
+};
+
+static const struct alpha_pll_config cbfpll_config = {
+ .l = 72,
+ .config_ctl_val = 0x200d4828,
+ .config_ctl_hi_val = 0x006,
+ .test_ctl_val = 0x1c000000,
+ .test_ctl_hi_val = 0x00004000,
+ .pre_div_mask = BIT(12),
+ .post_div_mask = 0x3 << 8,
+ .post_div_val = 0x1 << 8,
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+};
+
+static struct clk_alpha_pll cbf_pll = {
+ .offset = CBF_PLL_OFFSET,
+ .regs = cbf_pll_regs,
+ .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cbf_pll",
+ .parent_data = (const struct clk_parent_data[]) {
+ { .index = DT_XO, },
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_hwfsm_ops,
+ },
+};
+
+static struct clk_fixed_factor cbf_pll_postdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cbf_pll_postdiv",
+ .parent_hws = (const struct clk_hw*[]){
+ &cbf_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_parent_data cbf_mux_parent_data[] = {
+ { .index = DT_XO },
+ { .hw = &cbf_pll.clkr.hw },
+ { .hw = &cbf_pll_postdiv.hw },
+ { .index = DT_APCS_AUX },
+};
+
+struct clk_cbf_8996_mux {
+ u32 reg;
+ struct notifier_block nb;
+ struct clk_regmap clkr;
+};
+
+static struct clk_cbf_8996_mux *to_clk_cbf_8996_mux(struct clk_regmap *clkr)
+{
+ return container_of(clkr, struct clk_cbf_8996_mux, clkr);
+}
+
+static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data);
+
+static u8 clk_cbf_8996_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
+ u32 val;
+
+ regmap_read(clkr->regmap, mux->reg, &val);
+
+ return FIELD_GET(CBF_MUX_PARENT_MASK, val);
+}
+
+static int clk_cbf_8996_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
+ u32 val;
+
+ val = FIELD_PREP(CBF_MUX_PARENT_MASK, index);
+
+ return regmap_update_bits(clkr->regmap, mux->reg, CBF_MUX_PARENT_MASK, val);
+}
+
+static int clk_cbf_8996_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent;
+
+ if (req->rate < (DIV_THRESHOLD / 2))
+ return -EINVAL;
+
+ if (req->rate < DIV_THRESHOLD)
+ parent = clk_hw_get_parent_by_index(hw, CBF_DIV_INDEX);
+ else
+ parent = clk_hw_get_parent_by_index(hw, CBF_PLL_INDEX);
+
+ if (!parent)
+ return -EINVAL;
+
+ req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
+ req->best_parent_hw = parent;
+
+ return 0;
+}
+
+static const struct clk_ops clk_cbf_8996_mux_ops = {
+ .set_parent = clk_cbf_8996_mux_set_parent,
+ .get_parent = clk_cbf_8996_mux_get_parent,
+ .determine_rate = clk_cbf_8996_mux_determine_rate,
+};
+
+static struct clk_cbf_8996_mux cbf_mux = {
+ .reg = CBF_MUX_OFFSET,
+ .nb.notifier_call = cbf_clk_notifier_cb,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cbf_mux",
+ .parent_data = cbf_mux_parent_data,
+ .num_parents = ARRAY_SIZE(cbf_mux_parent_data),
+ .ops = &clk_cbf_8996_mux_ops,
+ /* CPU clock is critical and should never be gated */
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ },
+};
+
+static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct clk_notifier_data *cnd = data;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /*
+ * Avoid overvolting. clk_core_set_rate_nolock() walks from top
+ * to bottom, so it will change the rate of the PLL before
+ * chaging the parent of PMUX. This can result in pmux getting
+ * clocked twice the expected rate.
+ *
+ * Manually switch to PLL/2 here.
+ */
+ if (cnd->old_rate > DIV_THRESHOLD &&
+ cnd->new_rate < DIV_THRESHOLD)
+ clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_DIV_INDEX);
+ break;
+ case ABORT_RATE_CHANGE:
+ /* Revert manual change */
+ if (cnd->new_rate < DIV_THRESHOLD &&
+ cnd->old_rate > DIV_THRESHOLD)
+ clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_PLL_INDEX);
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(0);
+};
+
+static struct clk_hw *cbf_msm8996_hw_clks[] = {
+ &cbf_pll_postdiv.hw,
+};
+
+static struct clk_regmap *cbf_msm8996_clks[] = {
+ &cbf_pll.clkr,
+ &cbf_mux.clkr,
+};
+
+static const struct regmap_config cbf_msm8996_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct regmap *regmap;
+ struct device *dev = &pdev->dev;
+ int i, ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &cbf_msm8996_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Select GPLL0 for 300MHz for the CBF clock */
+ regmap_write(regmap, CBF_MUX_OFFSET, 0x3);
+
+ /* Ensure write goes through before PLLs are reconfigured */
+ udelay(5);
+
+ /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
+ regmap_update_bits(regmap, CBF_MUX_OFFSET,
+ CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
+ CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
+
+ clk_alpha_pll_configure(&cbf_pll, regmap, &cbfpll_config);
+
+ /* Wait for PLL(s) to lock */
+ udelay(50);
+
+ /* Enable auto clock selection for CBF */
+ regmap_update_bits(regmap, CBF_MUX_OFFSET,
+ CBF_MUX_AUTO_CLK_SEL_BIT,
+ CBF_MUX_AUTO_CLK_SEL_BIT);
+
+ /* Ensure write goes through before muxes are switched */
+ udelay(5);
+
+ /* Switch CBF to use the primary PLL */
+ regmap_update_bits(regmap, CBF_MUX_OFFSET, CBF_MUX_PARENT_MASK, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(cbf_msm8996_hw_clks); i++) {
+ ret = devm_clk_hw_register(dev, cbf_msm8996_hw_clks[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cbf_msm8996_clks); i++) {
+ ret = devm_clk_register_regmap(dev, cbf_msm8996_clks[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_clk_notifier_register(dev, cbf_mux.clkr.hw.clk, &cbf_mux.nb);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
+}
+
+static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
+ { .compatible = "qcom,msm8996-cbf" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
+
+static struct platform_driver qcom_msm8996_cbf_driver = {
+ .probe = qcom_msm8996_cbf_probe,
+ .driver = {
+ .name = "qcom-msm8996-cbf",
+ .of_match_table = qcom_msm8996_cbf_match_table,
+ },
+};
+
+/* Register early enough to fix the clock to be used for other cores */
+static int __init qcom_msm8996_cbf_init(void)
+{
+ return platform_driver_register(&qcom_msm8996_cbf_driver);
+}
+postcore_initcall(qcom_msm8996_cbf_init);
+
+static void __exit qcom_msm8996_cbf_exit(void)
+{
+ platform_driver_unregister(&qcom_msm8996_cbf_driver);
+}
+module_exit(qcom_msm8996_cbf_exit);
+
+MODULE_DESCRIPTION("QCOM MSM8996 CPU Bus Fabric Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index ee76ef958d31..592c7c3cdeb7 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -12,6 +12,8 @@
* +-------+
* XO | |
* +------------------>0 |
+ * SYS_APCS_AUX | |
+ * +------------------>3 |
* | |
* PLL/2 | SMUX +----+
* +------->1 | |
@@ -58,6 +60,8 @@
#include <linux/regmap.h>
#include <soc/qcom/kryo-l2-accessors.h>
+#include <asm/cputype.h>
+
#include "clk-alpha-pll.h"
#include "clk-regmap.h"
#include "clk-regmap-mux.h"
@@ -74,10 +78,16 @@ enum _pmux_input {
#define PWRCL_REG_OFFSET 0x0
#define PERFCL_REG_OFFSET 0x80000
#define MUX_OFFSET 0x40
+#define CLK_CTL_OFFSET 0x44
+#define CLK_CTL_AUTO_CLK_SEL BIT(8)
#define ALT_PLL_OFFSET 0x100
#define SSSCTL_OFFSET 0x160
+#define PSCTL_OFFSET 0x164
#define PMUX_MASK 0x3
+#define MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
+#define MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
+ FIELD_PREP(MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
[PLL_OFF_L_VAL] = 0x04,
@@ -93,21 +103,20 @@ static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
[PLL_OFF_USER_CTL] = 0x10,
- [PLL_OFF_USER_CTL_U] = 0x14,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_TEST_CTL] = 0x20,
- [PLL_OFF_TEST_CTL_U] = 0x24,
[PLL_OFF_STATUS] = 0x28,
};
/* PLLs */
static const struct alpha_pll_config hfpll_config = {
- .l = 60,
- .config_ctl_val = 0x200d4aa8,
+ .l = 54,
+ .config_ctl_val = 0x200d4828,
.config_ctl_hi_val = 0x006,
+ .test_ctl_val = 0x1c000000,
+ .test_ctl_hi_val = 0x00004000,
.pre_div_mask = BIT(12),
.post_div_mask = 0x3 << 8,
.post_div_val = 0x1 << 8,
@@ -127,7 +136,7 @@ static struct clk_alpha_pll pwrcl_pll = {
.name = "pwrcl_pll",
.parent_data = pll_parent,
.num_parents = ARRAY_SIZE(pll_parent),
- .ops = &clk_alpha_pll_huayra_ops,
+ .ops = &clk_alpha_pll_hwfsm_ops,
},
};
@@ -139,7 +148,7 @@ static struct clk_alpha_pll perfcl_pll = {
.name = "perfcl_pll",
.parent_data = pll_parent,
.num_parents = ARRAY_SIZE(pll_parent),
- .ops = &clk_alpha_pll_huayra_ops,
+ .ops = &clk_alpha_pll_hwfsm_ops,
},
};
@@ -311,20 +320,29 @@ static const struct clk_ops clk_cpu_8996_pmux_ops = {
.determine_rate = clk_cpu_8996_pmux_determine_rate,
};
+static const struct parent_map smux_parent_map[] = {
+ { .cfg = 0, }, /* xo */
+ { .cfg = 1, }, /* pll */
+ { .cfg = 3, }, /* sys_apcs_aux */
+};
+
static const struct clk_parent_data pwrcl_smux_parents[] = {
{ .fw_name = "xo" },
{ .hw = &pwrcl_pll_postdiv.hw },
+ { .fw_name = "sys_apcs_aux" },
};
static const struct clk_parent_data perfcl_smux_parents[] = {
{ .fw_name = "xo" },
{ .hw = &perfcl_pll_postdiv.hw },
+ { .fw_name = "sys_apcs_aux" },
};
static struct clk_regmap_mux pwrcl_smux = {
.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
+ .parent_map = smux_parent_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pwrcl_smux",
.parent_data = pwrcl_smux_parents,
@@ -338,6 +356,7 @@ static struct clk_regmap_mux perfcl_smux = {
.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
+ .parent_map = smux_parent_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "perfcl_smux",
.parent_data = perfcl_smux_parents,
@@ -414,11 +433,55 @@ static struct clk_regmap *cpu_msm8996_clks[] = {
&perfcl_pmux.clkr,
};
+static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap);
+
static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
struct regmap *regmap)
{
int i, ret;
+ /* Select GPLL0 for 300MHz for both clusters */
+ regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0xc);
+ regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0xc);
+
+ /* Ensure write goes through before PLLs are reconfigured */
+ udelay(5);
+
+ /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
+ regmap_update_bits(regmap, PWRCL_REG_OFFSET + MUX_OFFSET,
+ MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
+ MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
+ regmap_update_bits(regmap, PERFCL_REG_OFFSET + MUX_OFFSET,
+ MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
+ MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
+
+ clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
+ clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
+ clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
+
+ /* Wait for PLL(s) to lock */
+ udelay(50);
+
+ /* Enable auto clock selection for both clusters */
+ regmap_update_bits(regmap, PWRCL_REG_OFFSET + CLK_CTL_OFFSET,
+ CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL);
+ regmap_update_bits(regmap, PERFCL_REG_OFFSET + CLK_CTL_OFFSET,
+ CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL);
+
+ /* Ensure write goes through before muxes are switched */
+ udelay(5);
+
+ qcom_cpu_clk_msm8996_acd_init(regmap);
+
+ /* Pulse swallower and soft-start settings */
+ regmap_write(regmap, PWRCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005);
+ regmap_write(regmap, PERFCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005);
+
+ /* Switch clusters to use the ACD leg */
+ regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0x32);
+ regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0x32);
+
for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
if (ret)
@@ -431,11 +494,6 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
return ret;
}
- clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
- clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
- clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
- clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
-
/* Enable alt PLLs */
clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
@@ -446,9 +504,9 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
return ret;
}
-#define CPU_AFINITY_MASK 0xFFF
-#define PWRCL_CPU_REG_MASK 0x3
-#define PERFCL_CPU_REG_MASK 0x103
+#define CPU_CLUSTER_AFFINITY_MASK 0xf00
+#define PWRCL_AFFINITY_MASK 0x000
+#define PERFCL_AFFINITY_MASK 0x100
#define L2ACDCR_REG 0x580ULL
#define L2ACDTD_REG 0x581ULL
@@ -456,31 +514,32 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
#define L2ACDSSCR_REG 0x589ULL
static DEFINE_SPINLOCK(qcom_clk_acd_lock);
-static void __iomem *base;
-static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
+static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap)
{
u64 hwid;
+ u32 val;
unsigned long flags;
spin_lock_irqsave(&qcom_clk_acd_lock, flags);
- hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK;
+ val = kryo_l2_get_indirect_reg(L2ACDTD_REG);
+ if (val == 0x00006a11)
+ goto out;
kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11);
kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f);
kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601);
- if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) {
- writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET);
- kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
- }
+ kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
- if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) {
- kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
- writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET);
- }
+ hwid = read_cpuid_mpidr();
+ if ((hwid & CPU_CLUSTER_AFFINITY_MASK) == PWRCL_AFFINITY_MASK)
+ regmap_write(regmap, PWRCL_REG_OFFSET + SSSCTL_OFFSET, 0xf);
+ else
+ regmap_write(regmap, PERFCL_REG_OFFSET + SSSCTL_OFFSET, 0xf);
+out:
spin_unlock_irqrestore(&qcom_clk_acd_lock, flags);
}
@@ -489,31 +548,40 @@ static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
{
struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
struct clk_notifier_data *cnd = data;
- int ret;
switch (event) {
case PRE_RATE_CHANGE:
- ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
- qcom_cpu_clk_msm8996_acd_init(base);
+ qcom_cpu_clk_msm8996_acd_init(cpuclk->clkr.regmap);
+
+ /*
+ * Avoid overvolting. clk_core_set_rate_nolock() walks from top
+ * to bottom, so it will change the rate of the PLL before
+ * chaging the parent of PMUX. This can result in pmux getting
+ * clocked twice the expected rate.
+ *
+ * Manually switch to PLL/2 here.
+ */
+ if (cnd->new_rate < DIV_2_THRESHOLD &&
+ cnd->old_rate > DIV_2_THRESHOLD)
+ clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, SMUX_INDEX);
+
break;
- case POST_RATE_CHANGE:
- if (cnd->new_rate < DIV_2_THRESHOLD)
- ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
- SMUX_INDEX);
- else
- ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
- ACD_INDEX);
+ case ABORT_RATE_CHANGE:
+ /* Revert manual change */
+ if (cnd->new_rate < DIV_2_THRESHOLD &&
+ cnd->old_rate > DIV_2_THRESHOLD)
+ clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ACD_INDEX);
break;
default:
- ret = 0;
break;
}
- return notifier_from_errno(ret);
+ return NOTIFY_OK;
};
static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
{
+ static void __iomem *base;
struct regmap *regmap;
struct clk_hw_onecell_data *data;
struct device *dev = &pdev->dev;
@@ -535,8 +603,6 @@ static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
- qcom_cpu_clk_msm8996_acd_init(base);
-
data->hws[0] = &pwrcl_pmux.clkr.hw;
data->hws[1] = &perfcl_pmux.clkr.hw;
data->num = 2;
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index 7dd17c184b69..86f728dc69e5 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -128,20 +128,20 @@ static void clk_hfpll_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&h->lock, flags);
}
-static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_hfpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_hfpll *h = to_clk_hfpll(hw);
struct hfpll_data const *hd = h->d;
unsigned long rrate;
- rate = clamp(rate, hd->min_rate, hd->max_rate);
+ req->rate = clamp(req->rate, hd->min_rate, hd->max_rate);
- rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ rrate = DIV_ROUND_UP(req->rate, req->best_parent_rate) * req->best_parent_rate;
if (rrate > hd->max_rate)
- rrate -= *parent_rate;
+ rrate -= req->best_parent_rate;
- return rrate;
+ req->rate = rrate;
+ return 0;
}
/*
@@ -241,7 +241,7 @@ const struct clk_ops clk_ops_hfpll = {
.enable = clk_hfpll_enable,
.disable = clk_hfpll_disable,
.is_enabled = hfpll_is_enabled,
- .round_rate = clk_hfpll_round_rate,
+ .determine_rate = clk_hfpll_determine_rate,
.set_rate = clk_hfpll_set_rate,
.recalc_rate = clk_hfpll_recalc_rate,
.init = clk_hfpll_init,
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 293a9dfa7151..f5ce403e1e27 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -97,11 +97,11 @@ const struct clk_ops krait_mux_clk_ops = {
EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
-static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int krait_div2_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
- *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
- return DIV_ROUND_UP(*parent_rate, 2);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), req->rate * 2);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, 2);
+ return 0;
}
static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -142,7 +142,7 @@ krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
}
const struct clk_ops krait_div2_clk_ops = {
- .round_rate = krait_div2_round_rate,
+ .determine_rate = krait_div2_determine_rate,
.set_rate = krait_div2_set_rate,
.recalc_rate = krait_div2_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index b1be5b664bf3..cac623e27b0e 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -580,8 +580,8 @@ static int rpm_clk_probe(struct platform_device *pdev)
goto err;
}
- ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get,
- rcc);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_rpm_clk_hw_get,
+ rcc);
if (ret)
goto err;
@@ -591,19 +591,12 @@ err:
return ret;
}
-static int rpm_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
-}
-
static struct platform_driver rpm_clk_driver = {
.driver = {
.name = "qcom-clk-rpm",
.of_match_table = rpm_clk_match_table,
},
.probe = rpm_clk_probe,
- .remove = rpm_clk_remove,
};
static int __init rpm_clk_init(void)
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 586a810c682c..45ee370f3307 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -366,6 +366,16 @@ DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
+DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1);
+DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1);
+DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
+DEFINE_CLK_RPMH_VRM(clk4, _a1, "clka4", 1);
+DEFINE_CLK_RPMH_VRM(clk5, _a1, "clka5", 1);
+
+DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2);
+DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2);
+DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2);
+
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
DEFINE_CLK_RPMH_BCM(ce, "CE0");
@@ -396,6 +406,22 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
+static struct clk_hw *sa8775p_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a4_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+ [RPMH_PKA_CLK] = &clk_rpmh_pka.hw,
+ [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sa8775p = {
+ .clks = sa8775p_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sa8775p_rpmh_clocks),
+};
+
static struct clk_hw *sdm670_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -445,6 +471,7 @@ static struct clk_hw *sm8150_rpmh_clocks[] = {
[RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
[RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw,
[RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
@@ -484,6 +511,7 @@ static struct clk_hw *sc8180x_rpmh_clocks[] = {
[RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_d_ao.hw,
[RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_d.hw,
[RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_d_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
@@ -504,6 +532,7 @@ static struct clk_hw *sm8250_rpmh_clocks[] = {
[RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
[RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw,
[RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
@@ -576,6 +605,31 @@ static const struct clk_rpmh_desc clk_rpmh_sm8450 = {
.num_clks = ARRAY_SIZE(sm8450_rpmh_clocks),
};
+static struct clk_hw *sm8550_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a1.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a1_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
+ .clks = sm8550_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+};
+
static struct clk_hw *sc7280_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
@@ -730,6 +784,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
+ { .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
@@ -742,6 +797,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
+ { .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
{ .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index fea505876855..887b945a6fb7 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -25,107 +25,133 @@
#define QCOM_RPM_SMD_KEY_STATE 0x54415453
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
-#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
- key) \
- static struct clk_smd_rpm _platform##_##_active; \
- static struct clk_smd_rpm _platform##_##_name = { \
+#define __DEFINE_CLK_SMD_RPM_PREFIX(_prefix, _name, _active, \
+ type, r_id, key) \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_active; \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
- .rpm_status_id = (stat_id), \
.rpm_key = (key), \
- .peer = &_platform##_##_active, \
+ .peer = &clk_smd_rpm_##_prefix##_active, \
.rate = INT_MAX, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
- .fw_name = "xo", \
- .name = "xo_board", \
- }, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = "xo", \
+ .name = "xo_board", \
+ }, \
.num_parents = 1, \
}, \
}; \
- static struct clk_smd_rpm _platform##_##_active = { \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_active = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
- .rpm_status_id = (stat_id), \
.active_only = true, \
.rpm_key = (key), \
- .peer = &_platform##_##_name, \
+ .peer = &clk_smd_rpm_##_prefix##_name, \
.rate = INT_MAX, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
- .fw_name = "xo", \
- .name = "xo_board", \
- }, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = "xo", \
+ .name = "xo_board", \
+ }, \
.num_parents = 1, \
}, \
}
-#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
- stat_id, r, key) \
- static struct clk_smd_rpm _platform##_##_active; \
- static struct clk_smd_rpm _platform##_##_name = { \
+#define __DEFINE_CLK_SMD_RPM(_name, _active, type, r_id, key) \
+ __DEFINE_CLK_SMD_RPM_PREFIX(/* empty */, _name, _active, \
+ type, r_id, key)
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(_prefix, _name, _active,\
+ type, r_id, r, key) \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_active; \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
- .rpm_status_id = (stat_id), \
.rpm_key = (key), \
.branch = true, \
- .peer = &_platform##_##_active, \
+ .peer = &clk_smd_rpm_##_prefix##_active, \
.rate = (r), \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
- .fw_name = "xo", \
- .name = "xo_board", \
- }, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = "xo", \
+ .name = "xo_board", \
+ }, \
.num_parents = 1, \
}, \
}; \
- static struct clk_smd_rpm _platform##_##_active = { \
+ static struct clk_smd_rpm clk_smd_rpm_##_prefix##_active = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
- .rpm_status_id = (stat_id), \
.active_only = true, \
.rpm_key = (key), \
.branch = true, \
- .peer = &_platform##_##_name, \
+ .peer = &clk_smd_rpm_##_prefix##_name, \
.rate = (r), \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
- .fw_name = "xo", \
- .name = "xo_board", \
- }, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = "xo", \
+ .name = "xo_board", \
+ }, \
.num_parents = 1, \
}, \
}
-#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \
- __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
- 0, QCOM_RPM_SMD_KEY_RATE)
-
-#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \
- __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \
- r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE)
-
-#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \
- __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
- 0, QCOM_RPM_SMD_KEY_STATE)
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_name, _active, type, r_id, r, key) \
+ __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(/* empty */, \
+ _name, _active, type, r_id, r, key)
+
+#define DEFINE_CLK_SMD_RPM(_name, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_name##_clk, _name##_a_clk, \
+ type, r_id, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BUS(_name, r_id) \
+ __DEFINE_CLK_SMD_RPM_PREFIX(bus_##r_id##_, \
+ _name##_clk, _name##_a_clk, QCOM_SMD_RPM_BUS_CLK, r_id, \
+ QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_CLK_SRC(_name, type, r_id) \
+ __DEFINE_CLK_SMD_RPM( \
+ _name##_clk_src, _name##_a_clk_src, \
+ type, r_id, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_name, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(branch_, \
+ _name##_clk, _name##_a_clk, \
+ type, r_id, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH_A(_name, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(branch_, \
+ _name, _name##_a, type, \
+ r_id, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_name, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_name##_clk, _name##_a_clk, \
+ type, r_id, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_name, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, _name##_a, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, r, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id, r) \
- __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PREFIX(_prefix, _name, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(_prefix, \
+ _name, _name##_a, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, r, \
QCOM_RPM_KEY_SOFTWARE_ENABLE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, \
- r_id, r) \
- __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_name, r_id, r) \
+ DEFINE_CLK_SMD_RPM_XO_BUFFER(_name, r_id, r); \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name##_pin, _name##_a##_pin, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, r, \
QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
@@ -134,7 +160,6 @@ struct clk_smd_rpm {
const int rpm_res_type;
const int rpm_key;
const int rpm_clk_id;
- const int rpm_status_id;
const bool active_only;
bool enabled;
bool branch;
@@ -413,48 +438,102 @@ static const struct clk_ops clk_smd_rpm_branch_ops = {
.recalc_rate = clk_smd_rpm_recalc_rate,
};
-DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
-DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
-DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
-DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5, 19200000);
+DEFINE_CLK_SMD_RPM_BRANCH_A(bi_tcxo, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+DEFINE_CLK_SMD_RPM_BRANCH(qdss, QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
+DEFINE_CLK_SMD_RPM_QDSS(qdss, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_BRANCH_A(bimc_freq_log, QCOM_SMD_RPM_MISC_CLK, 4, 1);
+
+DEFINE_CLK_SMD_RPM_BRANCH(mss_cfg_ahb, QCOM_SMD_RPM_MCFG_CLK, 0, 19200000);
+
+DEFINE_CLK_SMD_RPM_BRANCH(aggre1_noc, QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(aggre2_noc, QCOM_SMD_RPM_AGGR_CLK, 2, 1000);
+DEFINE_CLK_SMD_RPM(aggre1_noc, QCOM_SMD_RPM_AGGR_CLK, 1);
+DEFINE_CLK_SMD_RPM(aggre2_noc, QCOM_SMD_RPM_AGGR_CLK, 2);
+
+DEFINE_CLK_SMD_RPM_BUS(pcnoc, 0);
+DEFINE_CLK_SMD_RPM_BUS(snoc, 1);
+DEFINE_CLK_SMD_RPM_BUS(sysmmnoc, 2);
+DEFINE_CLK_SMD_RPM_BUS(cnoc, 2);
+DEFINE_CLK_SMD_RPM_BUS(mmssnoc_ahb, 3);
+DEFINE_CLK_SMD_RPM_BUS(snoc_periph, 0);
+DEFINE_CLK_SMD_RPM_BUS(cnoc, 1);
+DEFINE_CLK_SMD_RPM_BUS(snoc, 2);
+DEFINE_CLK_SMD_RPM_BUS(snoc_lpass, 5);
+
+DEFINE_CLK_SMD_RPM(bimc, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(cpuss_gnoc, QCOM_SMD_RPM_MEM_CLK, 1);
+DEFINE_CLK_SMD_RPM_CLK_SRC(gfx3d, QCOM_SMD_RPM_MEM_CLK, 1);
+DEFINE_CLK_SMD_RPM(ocmemgx, QCOM_SMD_RPM_MEM_CLK, 2);
+DEFINE_CLK_SMD_RPM(bimc_gpu, QCOM_SMD_RPM_MEM_CLK, 2);
+
+DEFINE_CLK_SMD_RPM(ce1, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM(ce2, QCOM_SMD_RPM_CE_CLK, 1);
+DEFINE_CLK_SMD_RPM(ce3, QCOM_SMD_RPM_CE_CLK, 2);
+
+DEFINE_CLK_SMD_RPM(ipa, QCOM_SMD_RPM_IPA_CLK, 0);
+
+DEFINE_CLK_SMD_RPM(hwkm, QCOM_SMD_RPM_HWKM_CLK, 0);
+
+DEFINE_CLK_SMD_RPM(mmssnoc_axi_rpm, QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(mmnrt, QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(mmrt, QCOM_SMD_RPM_MMAXI_CLK, 1);
+
+DEFINE_CLK_SMD_RPM(pka, QCOM_SMD_RPM_PKA_CLK, 0);
+
+DEFINE_CLK_SMD_RPM(qpic, QCOM_SMD_RPM_QPIC_CLK, 0);
+
+DEFINE_CLK_SMD_RPM(qup, QCOM_SMD_RPM_QUP_CLK, 0);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk1, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk2, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk1, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk2, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk3, 3, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk1, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk2, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk3, 6, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk, 8, 19200000);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PREFIX(38m4_, rf_clk3, 6, 38400000);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(cxo_d0, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(cxo_d1, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(cxo_a0, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(cxo_a1, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(cxo_a2, 6, 19200000);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER(diff_clk, 7, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(div_clk1, 11, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(div_clk2, 12, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(div_clk3, 13, 19200000);
static struct clk_smd_rpm *msm8909_clks[] = {
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
- [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QPIC_CLK] = &clk_smd_rpm_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &clk_smd_rpm_qpic_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8909 = {
@@ -463,30 +542,30 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8909 = {
};
static struct clk_smd_rpm *msm8916_clks[] = {
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
@@ -494,35 +573,69 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
.num_clks = ARRAY_SIZE(msm8916_clks),
};
-DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+static struct clk_smd_rpm *msm8917_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &clk_smd_rpm_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &clk_smd_rpm_bimc_gpu_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8917 = {
+ .clks = msm8917_clks,
+ .num_clks = ARRAY_SIZE(msm8917_clks),
+};
static struct clk_smd_rpm *msm8936_clks[] = {
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk,
- [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
@@ -530,67 +643,51 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
.num_clks = ARRAY_SIZE(msm8936_clks),
};
-DEFINE_CLK_SMD_RPM(msm8974, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
-DEFINE_CLK_SMD_RPM(msm8974, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, QCOM_SMD_RPM_BUS_CLK, 3);
-DEFINE_CLK_SMD_RPM(msm8974, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1);
-DEFINE_CLK_SMD_RPM(msm8974, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6, 19200000);
-
static struct clk_smd_rpm *msm8974_clks[] = {
- [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
- [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_GFX3D_CLK_SRC] = &msm8974_gfx3d_clk_src,
- [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8974_gfx3d_a_clk_src,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
- [RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_CXO_D0] = &msm8974_cxo_d0,
- [RPM_SMD_CXO_D0_A] = &msm8974_cxo_d0_a,
- [RPM_SMD_CXO_D1] = &msm8974_cxo_d1,
- [RPM_SMD_CXO_D1_A] = &msm8974_cxo_d1_a,
- [RPM_SMD_CXO_A0] = &msm8974_cxo_a0,
- [RPM_SMD_CXO_A0_A] = &msm8974_cxo_a0_a,
- [RPM_SMD_CXO_A1] = &msm8974_cxo_a1,
- [RPM_SMD_CXO_A1_A] = &msm8974_cxo_a1_a,
- [RPM_SMD_CXO_A2] = &msm8974_cxo_a2,
- [RPM_SMD_CXO_A2_A] = &msm8974_cxo_a2_a,
- [RPM_SMD_DIFF_CLK] = &msm8974_diff_clk,
- [RPM_SMD_DIFF_A_CLK] = &msm8974_diff_a_clk,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_CXO_D0_PIN] = &msm8974_cxo_d0_pin,
- [RPM_SMD_CXO_D0_A_PIN] = &msm8974_cxo_d0_a_pin,
- [RPM_SMD_CXO_D1_PIN] = &msm8974_cxo_d1_pin,
- [RPM_SMD_CXO_D1_A_PIN] = &msm8974_cxo_d1_a_pin,
- [RPM_SMD_CXO_A0_PIN] = &msm8974_cxo_a0_pin,
- [RPM_SMD_CXO_A0_A_PIN] = &msm8974_cxo_a0_a_pin,
- [RPM_SMD_CXO_A1_PIN] = &msm8974_cxo_a1_pin,
- [RPM_SMD_CXO_A1_A_PIN] = &msm8974_cxo_a1_a_pin,
- [RPM_SMD_CXO_A2_PIN] = &msm8974_cxo_a2_pin,
- [RPM_SMD_CXO_A2_A_PIN] = &msm8974_cxo_a2_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_MMSSNOC_AHB_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_clk,
+ [RPM_SMD_MMSSNOC_AHB_A_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_GFX3D_CLK_SRC] = &clk_smd_rpm_gfx3d_clk_src,
+ [RPM_SMD_GFX3D_A_CLK_SRC] = &clk_smd_rpm_gfx3d_a_clk_src,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_OCMEMGX_CLK] = &clk_smd_rpm_ocmemgx_clk,
+ [RPM_SMD_OCMEMGX_A_CLK] = &clk_smd_rpm_ocmemgx_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_CXO_D0] = &clk_smd_rpm_cxo_d0,
+ [RPM_SMD_CXO_D0_A] = &clk_smd_rpm_cxo_d0_a,
+ [RPM_SMD_CXO_D1] = &clk_smd_rpm_cxo_d1,
+ [RPM_SMD_CXO_D1_A] = &clk_smd_rpm_cxo_d1_a,
+ [RPM_SMD_CXO_A0] = &clk_smd_rpm_cxo_a0,
+ [RPM_SMD_CXO_A0_A] = &clk_smd_rpm_cxo_a0_a,
+ [RPM_SMD_CXO_A1] = &clk_smd_rpm_cxo_a1,
+ [RPM_SMD_CXO_A1_A] = &clk_smd_rpm_cxo_a1_a,
+ [RPM_SMD_CXO_A2] = &clk_smd_rpm_cxo_a2,
+ [RPM_SMD_CXO_A2_A] = &clk_smd_rpm_cxo_a2_a,
+ [RPM_SMD_DIFF_CLK] = &clk_smd_rpm_diff_clk,
+ [RPM_SMD_DIFF_A_CLK] = &clk_smd_rpm_diff_clk_a,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_CXO_D0_PIN] = &clk_smd_rpm_cxo_d0_pin,
+ [RPM_SMD_CXO_D0_A_PIN] = &clk_smd_rpm_cxo_d0_a_pin,
+ [RPM_SMD_CXO_D1_PIN] = &clk_smd_rpm_cxo_d1_pin,
+ [RPM_SMD_CXO_D1_A_PIN] = &clk_smd_rpm_cxo_d1_a_pin,
+ [RPM_SMD_CXO_A0_PIN] = &clk_smd_rpm_cxo_a0_pin,
+ [RPM_SMD_CXO_A0_A_PIN] = &clk_smd_rpm_cxo_a0_a_pin,
+ [RPM_SMD_CXO_A1_PIN] = &clk_smd_rpm_cxo_a1_pin,
+ [RPM_SMD_CXO_A1_A_PIN] = &clk_smd_rpm_cxo_a1_a_pin,
+ [RPM_SMD_CXO_A2_PIN] = &clk_smd_rpm_cxo_a2_pin,
+ [RPM_SMD_CXO_A2_A_PIN] = &clk_smd_rpm_cxo_a2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
@@ -598,35 +695,33 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.num_clks = ARRAY_SIZE(msm8974_clks),
};
-DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
-
static struct clk_smd_rpm *msm8976_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk,
- [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
@@ -634,65 +729,57 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
.num_clks = ARRAY_SIZE(msm8976_clks),
};
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8, 19200000);
-
-DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
-DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1);
-
-DEFINE_CLK_SMD_RPM_BRANCH(msm8992, mss_cfg_ahb_clk, mss_cfg_ahb_a_clk,
- QCOM_SMD_RPM_MCFG_CLK, 0, 19200000);
static struct clk_smd_rpm *msm8992_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
- [RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_GFX3D_CLK_SRC] = &msm8974_gfx3d_clk_src,
- [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8974_gfx3d_a_clk_src,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3,
- [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk,
- [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
- [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
- [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
- [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk,
- [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_CE2_CLK] = &msm8992_ce2_clk,
- [RPM_SMD_CE2_A_CLK] = &msm8992_ce2_a_clk,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_OCMEMGX_CLK] = &clk_smd_rpm_ocmemgx_clk,
+ [RPM_SMD_OCMEMGX_A_CLK] = &clk_smd_rpm_ocmemgx_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_GFX3D_CLK_SRC] = &clk_smd_rpm_gfx3d_clk_src,
+ [RPM_SMD_GFX3D_A_CLK_SRC] = &clk_smd_rpm_gfx3d_a_clk_src,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &clk_smd_rpm_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &clk_smd_rpm_div_clk3_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_LN_BB_CLK] = &clk_smd_rpm_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &clk_smd_rpm_ln_bb_clk_a,
+ [RPM_SMD_MMSSNOC_AHB_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_clk,
+ [RPM_SMD_MMSSNOC_AHB_A_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_a_clk,
+ [RPM_SMD_MSS_CFG_AHB_CLK] = &clk_smd_rpm_branch_mss_cfg_ahb_clk,
+ [RPM_SMD_MSS_CFG_AHB_A_CLK] = &clk_smd_rpm_branch_mss_cfg_ahb_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_CE2_CLK] = &clk_smd_rpm_ce2_clk,
+ [RPM_SMD_CE2_A_CLK] = &clk_smd_rpm_ce2_a_clk,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8992 = {
@@ -700,61 +787,59 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8992 = {
.num_clks = ARRAY_SIZE(msm8992_clks),
};
-DEFINE_CLK_SMD_RPM(msm8994, ce3_clk, ce3_a_clk, QCOM_SMD_RPM_CE_CLK, 2);
-
static struct clk_smd_rpm *msm8994_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
- [RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_GFX3D_CLK_SRC] = &msm8974_gfx3d_clk_src,
- [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8974_gfx3d_a_clk_src,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3,
- [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk,
- [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
- [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
- [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
- [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk,
- [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_CE2_CLK] = &msm8992_ce2_clk,
- [RPM_SMD_CE2_A_CLK] = &msm8992_ce2_a_clk,
- [RPM_SMD_CE3_CLK] = &msm8994_ce3_clk,
- [RPM_SMD_CE3_A_CLK] = &msm8994_ce3_a_clk,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_OCMEMGX_CLK] = &clk_smd_rpm_ocmemgx_clk,
+ [RPM_SMD_OCMEMGX_A_CLK] = &clk_smd_rpm_ocmemgx_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_GFX3D_CLK_SRC] = &clk_smd_rpm_gfx3d_clk_src,
+ [RPM_SMD_GFX3D_A_CLK_SRC] = &clk_smd_rpm_gfx3d_a_clk_src,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &clk_smd_rpm_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &clk_smd_rpm_div_clk3_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_LN_BB_CLK] = &clk_smd_rpm_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &clk_smd_rpm_ln_bb_clk_a,
+ [RPM_SMD_MMSSNOC_AHB_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_clk,
+ [RPM_SMD_MMSSNOC_AHB_A_CLK] = &clk_smd_rpm_bus_3_mmssnoc_ahb_a_clk,
+ [RPM_SMD_MSS_CFG_AHB_CLK] = &clk_smd_rpm_branch_mss_cfg_ahb_clk,
+ [RPM_SMD_MSS_CFG_AHB_A_CLK] = &clk_smd_rpm_branch_mss_cfg_ahb_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_CE2_CLK] = &clk_smd_rpm_ce2_clk,
+ [RPM_SMD_CE2_A_CLK] = &clk_smd_rpm_ce2_a_clk,
+ [RPM_SMD_CE3_CLK] = &clk_smd_rpm_ce3_clk,
+ [RPM_SMD_CE3_A_CLK] = &clk_smd_rpm_ce3_a_clk,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8994 = {
@@ -762,58 +847,53 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8994 = {
.num_clks = ARRAY_SIZE(msm8994_clks),
};
-DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
- QCOM_SMD_RPM_MMAXI_CLK, 0);
-DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
- QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
-DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
- QCOM_SMD_RPM_AGGR_CLK, 2, 1000);
-
static struct clk_smd_rpm *msm8996_clks[] = {
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
- [RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
- [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
- [RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
- [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk,
- [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3,
- [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_MMAXI_CLK] = &clk_smd_rpm_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &clk_smd_rpm_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &clk_smd_rpm_branch_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &clk_smd_rpm_branch_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &clk_smd_rpm_branch_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &clk_smd_rpm_branch_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_LN_BB_CLK] = &clk_smd_rpm_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &clk_smd_rpm_ln_bb_clk_a,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &clk_smd_rpm_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &clk_smd_rpm_div_clk3_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
@@ -821,28 +901,27 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
.num_clks = ARRAY_SIZE(msm8996_clks),
};
-DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8, 19200000);
-
static struct clk_smd_rpm *qcs404_clks[] = {
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_BIMC_GPU_CLK] = &qcs404_bimc_gpu_clk,
- [RPM_SMD_BIMC_GPU_A_CLK] = &qcs404_bimc_gpu_a_clk,
- [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
- [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk,
- [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_PNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &clk_smd_rpm_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &clk_smd_rpm_bimc_gpu_a_clk,
+ [RPM_SMD_QPIC_CLK] = &clk_smd_rpm_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &clk_smd_rpm_qpic_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_LN_BB_CLK] = &clk_smd_rpm_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &clk_smd_rpm_ln_bb_clk_a,
+ [RPM_SMD_LN_BB_CLK_PIN] = &clk_smd_rpm_ln_bb_clk_pin,
+ [RPM_SMD_LN_BB_A_CLK_PIN] = &clk_smd_rpm_ln_bb_clk_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
@@ -850,68 +929,59 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
.num_clks = ARRAY_SIZE(qcs404_clks),
};
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin, 3, 19200000);
-DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
- QCOM_SMD_RPM_AGGR_CLK, 1);
-DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
- QCOM_SMD_RPM_AGGR_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6, 19200000);
-
static struct clk_smd_rpm *msm8998_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3,
- [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_LN_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_LN_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
- [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
- [RPM_SMD_LN_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_LN_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_LN_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_LN_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
- [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
- [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
- [RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
- [RPM_SMD_AGGR1_NOC_CLK] = &msm8998_aggre1_noc_clk,
- [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8998_aggre1_noc_a_clk,
- [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk,
- [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3,
- [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
- [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin,
- [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &clk_smd_rpm_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &clk_smd_rpm_div_clk3_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_LN_BB_CLK1] = &clk_smd_rpm_ln_bb_clk1,
+ [RPM_SMD_LN_BB_CLK1_A] = &clk_smd_rpm_ln_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &clk_smd_rpm_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &clk_smd_rpm_ln_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3] = &clk_smd_rpm_ln_bb_clk3,
+ [RPM_SMD_LN_BB_CLK3_A] = &clk_smd_rpm_ln_bb_clk3_a,
+ [RPM_SMD_LN_BB_CLK1_PIN] = &clk_smd_rpm_ln_bb_clk1_pin,
+ [RPM_SMD_LN_BB_CLK1_A_PIN] = &clk_smd_rpm_ln_bb_clk1_a_pin,
+ [RPM_SMD_LN_BB_CLK2_PIN] = &clk_smd_rpm_ln_bb_clk2_pin,
+ [RPM_SMD_LN_BB_CLK2_A_PIN] = &clk_smd_rpm_ln_bb_clk2_a_pin,
+ [RPM_SMD_LN_BB_CLK3_PIN] = &clk_smd_rpm_ln_bb_clk3_pin,
+ [RPM_SMD_LN_BB_CLK3_A_PIN] = &clk_smd_rpm_ln_bb_clk3_a_pin,
+ [RPM_SMD_MMAXI_CLK] = &clk_smd_rpm_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &clk_smd_rpm_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &clk_smd_rpm_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &clk_smd_rpm_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &clk_smd_rpm_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &clk_smd_rpm_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_RF_CLK3] = &clk_smd_rpm_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &clk_smd_rpm_rf_clk3_a,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
+ [RPM_SMD_RF_CLK3_PIN] = &clk_smd_rpm_rf_clk3_pin,
+ [RPM_SMD_RF_CLK3_A_PIN] = &clk_smd_rpm_rf_clk3_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
@@ -920,44 +990,44 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
};
static struct clk_smd_rpm *sdm660_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk,
- [RPM_SMD_CNOC_PERIPH_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_CNOC_PERIPH_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_MMSSNOC_AXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
- [RPM_SMD_MMSSNOC_AXI_CLK_A] = &msm8996_mmssnoc_axi_rpm_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk,
- [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
- [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
- [RPM_SMD_LN_BB_CLK] = &msm8916_bb_clk1,
- [RPM_SMD_LN_BB_A_CLK] = &msm8916_bb_clk1_a,
- [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
- [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_LN_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_LN_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_LN_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_LN_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
- [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_2_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_2_cnoc_a_clk,
+ [RPM_SMD_CNOC_PERIPH_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_CNOC_PERIPH_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_MMSSNOC_AXI_CLK] = &clk_smd_rpm_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMSSNOC_AXI_CLK_A] = &clk_smd_rpm_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &clk_smd_rpm_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &clk_smd_rpm_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_DIV_CLK1] = &clk_smd_rpm_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &clk_smd_rpm_div_clk1_a,
+ [RPM_SMD_LN_BB_CLK] = &clk_smd_rpm_ln_bb_clk1,
+ [RPM_SMD_LN_BB_A_CLK] = &clk_smd_rpm_ln_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &clk_smd_rpm_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &clk_smd_rpm_ln_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3] = &clk_smd_rpm_ln_bb_clk3,
+ [RPM_SMD_LN_BB_CLK3_A] = &clk_smd_rpm_ln_bb_clk3_a,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_LN_BB_CLK1_PIN] = &clk_smd_rpm_ln_bb_clk1_pin,
+ [RPM_SMD_LN_BB_CLK1_A_PIN] = &clk_smd_rpm_ln_bb_clk1_a_pin,
+ [RPM_SMD_LN_BB_CLK2_PIN] = &clk_smd_rpm_ln_bb_clk2_pin,
+ [RPM_SMD_LN_BB_CLK2_A_PIN] = &clk_smd_rpm_ln_bb_clk2_a_pin,
+ [RPM_SMD_LN_BB_CLK3_PIN] = &clk_smd_rpm_ln_bb_clk3_pin,
+ [RPM_SMD_LN_BB_CLK3_A_PIN] = &clk_smd_rpm_ln_bb_clk3_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
@@ -966,20 +1036,20 @@ static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
};
static struct clk_smd_rpm *mdm9607_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
- [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QPIC_CLK] = &clk_smd_rpm_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &clk_smd_rpm_qpic_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_mdm9607 = {
@@ -988,34 +1058,34 @@ static const struct rpm_smd_clk_desc rpm_clk_mdm9607 = {
};
static struct clk_smd_rpm *msm8953_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk,
- [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_RF_CLK3] = &msm8992_ln_bb_clk,
- [RPM_SMD_RF_CLK3_A] = &msm8992_ln_bb_a_clk,
- [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
- [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &clk_smd_rpm_bus_0_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &clk_smd_rpm_bus_0_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_1_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_1_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &clk_smd_rpm_bus_2_sysmmnoc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_RF_CLK3] = &clk_smd_rpm_ln_bb_clk,
+ [RPM_SMD_RF_CLK3_A] = &clk_smd_rpm_ln_bb_clk_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8953 = {
@@ -1023,54 +1093,41 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8953 = {
.num_clks = ARRAY_SIZE(msm8953_clks),
};
-/* SM6125 */
-DEFINE_CLK_SMD_RPM(sm6125, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
-DEFINE_CLK_SMD_RPM(sm6125, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
-DEFINE_CLK_SMD_RPM_BRANCH(sm6125, qdss_clk, qdss_a_clk,
- QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
-DEFINE_CLK_SMD_RPM(sm6125, qup_clk, qup_a_clk, QCOM_SMD_RPM_QUP_CLK, 0);
-DEFINE_CLK_SMD_RPM(sm6125, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMAXI_CLK, 0);
-DEFINE_CLK_SMD_RPM(sm6125, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMAXI_CLK, 1);
-DEFINE_CLK_SMD_RPM(sm6125, snoc_periph_clk, snoc_periph_a_clk,
- QCOM_SMD_RPM_BUS_CLK, 0);
-DEFINE_CLK_SMD_RPM(sm6125, snoc_lpass_clk, snoc_lpass_a_clk,
- QCOM_SMD_RPM_BUS_CLK, 5);
-
static struct clk_smd_rpm *sm6125_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_LN_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_LN_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
- [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
- [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
- [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
- [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
- [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
- [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
- [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
- [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
- [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
- [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
- [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_2_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_2_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_branch_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_branch_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_1_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_1_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_LN_BB_CLK1] = &clk_smd_rpm_ln_bb_clk1,
+ [RPM_SMD_LN_BB_CLK1_A] = &clk_smd_rpm_ln_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &clk_smd_rpm_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &clk_smd_rpm_ln_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3] = &clk_smd_rpm_ln_bb_clk3,
+ [RPM_SMD_LN_BB_CLK3_A] = &clk_smd_rpm_ln_bb_clk3_a,
+ [RPM_SMD_QUP_CLK] = &clk_smd_rpm_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &clk_smd_rpm_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &clk_smd_rpm_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &clk_smd_rpm_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &clk_smd_rpm_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &clk_smd_rpm_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &clk_smd_rpm_bus_0_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &clk_smd_rpm_bus_0_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_a_clk,
};
static const struct rpm_smd_clk_desc rpm_clk_sm6125 = {
@@ -1080,38 +1137,38 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6125 = {
/* SM6115 */
static struct clk_smd_rpm *sm6115_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
- [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
- [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
- [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
- [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
- [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
- [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
- [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
- [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
- [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_2_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_2_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_branch_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_branch_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &clk_smd_rpm_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &clk_smd_rpm_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_1_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_1_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_QUP_CLK] = &clk_smd_rpm_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &clk_smd_rpm_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &clk_smd_rpm_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &clk_smd_rpm_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &clk_smd_rpm_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &clk_smd_rpm_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &clk_smd_rpm_bus_0_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &clk_smd_rpm_bus_0_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_a_clk,
+ [RPM_SMD_RF_CLK1_PIN] = &clk_smd_rpm_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &clk_smd_rpm_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &clk_smd_rpm_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &clk_smd_rpm_rf_clk2_a_pin,
};
static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
@@ -1119,42 +1176,36 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
.num_clks = ARRAY_SIZE(sm6115_clks),
};
-/* SM6375 */
-DEFINE_CLK_SMD_RPM(sm6375, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 0);
-DEFINE_CLK_SMD_RPM(sm6375, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 1);
-DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
-DEFINE_CLK_SMD_RPM_BRANCH(sm6375, bimc_freq_log, bimc_freq_log_a, QCOM_SMD_RPM_MISC_CLK, 4, 1);
static struct clk_smd_rpm *sm6375_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
- [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
- [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
- [RPM_SMD_MMRT_CLK] = &sm6375_mmrt_clk,
- [RPM_SMD_MMRT_A_CLK] = &sm6375_mmrt_a_clk,
- [RPM_SMD_MMNRT_CLK] = &sm6375_mmnrt_clk,
- [RPM_SMD_MMNRT_A_CLK] = &sm6375_mmnrt_a_clk,
- [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
- [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
- [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
- [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
- [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
- [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
- [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
- [RPM_SMD_BIMC_FREQ_LOG] = &sm6375_bimc_freq_log,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_2_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_2_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_branch_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_branch_qdss_a_clk,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_1_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_1_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &clk_smd_rpm_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &clk_smd_rpm_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &clk_smd_rpm_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &clk_smd_rpm_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &clk_smd_rpm_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &clk_smd_rpm_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &clk_smd_rpm_bus_0_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &clk_smd_rpm_bus_0_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_HWKM_CLK] = &clk_smd_rpm_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &clk_smd_rpm_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &clk_smd_rpm_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &clk_smd_rpm_pka_a_clk,
+ [RPM_SMD_BIMC_FREQ_LOG] = &clk_smd_rpm_branch_bimc_freq_log,
};
static const struct rpm_smd_clk_desc rpm_clk_sm6375 = {
@@ -1162,55 +1213,45 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6375 = {
.num_clks = ARRAY_SIZE(sm6375_clks),
};
-/* QCM2290 */
-DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, ln_bb_clk2, ln_bb_clk2_a, 0x2, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, rf_clk3, rf_clk3_a, 6, 38400000);
-
-DEFINE_CLK_SMD_RPM(qcm2290, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, cpuss_gnoc_clk, cpuss_gnoc_a_clk,
- QCOM_SMD_RPM_MEM_CLK, 1);
-DEFINE_CLK_SMD_RPM(qcm2290, bimc_gpu_clk, bimc_gpu_a_clk,
- QCOM_SMD_RPM_MEM_CLK, 2);
-
static struct clk_smd_rpm *qcm2290_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
- [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
- [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
- [RPM_SMD_LN_BB_CLK2] = &qcm2290_ln_bb_clk2,
- [RPM_SMD_LN_BB_CLK2_A] = &qcm2290_ln_bb_clk2_a,
- [RPM_SMD_RF_CLK3] = &qcm2290_rf_clk3,
- [RPM_SMD_RF_CLK3_A] = &qcm2290_rf_clk3_a,
- [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
- [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
- [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
- [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
- [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
- [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
- [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
- [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
- [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
- [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
- [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
- [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
- [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
- [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
- [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
- [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
- [RPM_SMD_QPIC_CLK] = &qcm2290_qpic_clk,
- [RPM_SMD_QPIC_CLK_A] = &qcm2290_qpic_a_clk,
- [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
- [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
- [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
- [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
- [RPM_SMD_BIMC_GPU_CLK] = &qcm2290_bimc_gpu_clk,
- [RPM_SMD_BIMC_GPU_A_CLK] = &qcm2290_bimc_gpu_a_clk,
- [RPM_SMD_CPUSS_GNOC_CLK] = &qcm2290_cpuss_gnoc_clk,
- [RPM_SMD_CPUSS_GNOC_A_CLK] = &qcm2290_cpuss_gnoc_a_clk,
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &clk_smd_rpm_bus_2_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &clk_smd_rpm_bus_2_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &clk_smd_rpm_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &clk_smd_rpm_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_branch_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_branch_qdss_a_clk,
+ [RPM_SMD_LN_BB_CLK2] = &clk_smd_rpm_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &clk_smd_rpm_ln_bb_clk2_a,
+ [RPM_SMD_RF_CLK3] = &clk_smd_rpm_38m4_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &clk_smd_rpm_38m4_rf_clk3_a,
+ [RPM_SMD_CNOC_CLK] = &clk_smd_rpm_bus_1_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &clk_smd_rpm_bus_1_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &clk_smd_rpm_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &clk_smd_rpm_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &clk_smd_rpm_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &clk_smd_rpm_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &clk_smd_rpm_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &clk_smd_rpm_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &clk_smd_rpm_bus_0_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &clk_smd_rpm_bus_0_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &clk_smd_rpm_bus_5_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &clk_smd_rpm_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &clk_smd_rpm_ce1_a_clk,
+ [RPM_SMD_QPIC_CLK] = &clk_smd_rpm_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &clk_smd_rpm_qpic_a_clk,
+ [RPM_SMD_HWKM_CLK] = &clk_smd_rpm_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &clk_smd_rpm_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &clk_smd_rpm_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &clk_smd_rpm_pka_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &clk_smd_rpm_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &clk_smd_rpm_bimc_gpu_a_clk,
+ [RPM_SMD_CPUSS_GNOC_CLK] = &clk_smd_rpm_cpuss_gnoc_clk,
+ [RPM_SMD_CPUSS_GNOC_A_CLK] = &clk_smd_rpm_cpuss_gnoc_a_clk,
};
static const struct rpm_smd_clk_desc rpm_clk_qcm2290 = {
@@ -1223,6 +1264,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8909", .data = &rpm_clk_msm8909 },
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
+ { .compatible = "qcom,rpmcc-msm8917", .data = &rpm_clk_msm8917 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index f2cf55cee2fd..f846be285f51 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -204,7 +204,7 @@ static int spmi_pmic_clkdiv_probe(struct platform_device *pdev)
struct regmap *regmap;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
- const char *parent_name;
+ struct clk_parent_data parent_data = { .index = 0, };
int nclks, i, ret, cxo_hz;
char name[20];
u32 start;
@@ -246,14 +246,8 @@ static int spmi_pmic_clkdiv_probe(struct platform_device *pdev)
cxo_hz = clk_get_rate(cxo);
clk_put(cxo);
- parent_name = of_clk_get_parent_name(of_node, 0);
- if (!parent_name) {
- dev_err(dev, "missing parent clock\n");
- return -ENODEV;
- }
-
init.name = name;
- init.parent_names = &parent_name;
+ init.parent_data = &parent_data;
init.num_parents = 1;
init.ops = &clk_spmi_pmic_div_ops;
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 96b149365912..e9cfe41c0442 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -20,13 +20,13 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
P_DISP_CC_PLL0_OUT_MAIN,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
- P_DSI1_PHY_PLL_OUT_DSICLK,
P_GPLL0_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -71,7 +71,6 @@ static const struct parent_map disp_cc_parent_map_0[] = {
static const struct clk_parent_data disp_cc_parent_data_0[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "dsi0_phy_pll_out_byteclk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map disp_cc_parent_map_1[] = {
@@ -80,7 +79,6 @@ static const struct parent_map disp_cc_parent_map_1[] = {
static const struct clk_parent_data disp_cc_parent_data_1[] = {
{ .fw_name = "bi_tcxo" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map disp_cc_parent_map_2[] = {
@@ -91,7 +89,6 @@ static const struct parent_map disp_cc_parent_map_2[] = {
static const struct clk_parent_data disp_cc_parent_data_2[] = {
{ .fw_name = "bi_tcxo_ao" },
{ .fw_name = "gcc_disp_gpll0_div_clk_src" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map disp_cc_parent_map_3[] = {
@@ -104,20 +101,16 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &disp_cc_pll0.clkr.hw },
{ .fw_name = "gcc_disp_gpll0_clk_src" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map disp_cc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
};
static const struct clk_parent_data disp_cc_parent_data_4[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "dsi0_phy_pll_out_dsiclk" },
- { .fw_name = "dsi1_phy_pll_out_dsiclk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map disp_cc_parent_map_5[] = {
@@ -126,7 +119,6 @@ static const struct parent_map disp_cc_parent_map_5[] = {
static const struct clk_parent_data disp_cc_parent_data_5[] = {
{ .fw_name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
@@ -451,6 +443,10 @@ static struct clk_branch disp_cc_sleep_clk = {
},
};
+static const struct qcom_reset_map disp_cc_qcm2290_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
+};
+
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
.pd = {
@@ -500,6 +496,8 @@ static const struct qcom_cc_desc disp_cc_qcm2290_desc = {
.num_clks = ARRAY_SIZE(disp_cc_qcm2290_clocks),
.gdscs = disp_cc_qcm2290_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_qcm2290_gdscs),
+ .resets = disp_cc_qcm2290_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_qcm2290_resets),
};
static const struct of_device_id disp_cc_qcm2290_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 5d2ae297e741..9536bfc72a43 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -351,8 +351,8 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
@@ -365,8 +365,8 @@ static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dp_link_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 818bb8f4637c..1937edf23f21 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -466,8 +466,8 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_rot_clk",
- .parent_names = (const char *[]){
- "disp_cc_mdss_rot_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
index b921456a2e0d..87b27053ddb6 100644
--- a/drivers/clk/qcom/dispcc-sm6125.c
+++ b/drivers/clk/qcom/dispcc-sm6125.c
@@ -667,7 +667,7 @@ static const struct qcom_cc_desc disp_cc_sm6125_desc = {
};
static const struct of_device_id disp_cc_sm6125_match_table[] = {
- { .compatible = "qcom,dispcc-sm6125" },
+ { .compatible = "qcom,sm6125-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm6125_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm6375.c b/drivers/clk/qcom/dispcc-sm6375.c
index 5ce9198ad611..caa1b90a5ff2 100644
--- a/drivers/clk/qcom/dispcc-sm6375.c
+++ b/drivers/clk/qcom/dispcc-sm6375.c
@@ -252,8 +252,8 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 382dbd8ba250..e17bb8b543b5 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -1251,19 +1251,12 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
-static void disp_cc_sm8250_pm_runtime_disable(void *data)
-{
- pm_runtime_disable(data);
-}
-
static int disp_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
- pm_runtime_enable(&pdev->dev);
-
- ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index 0cd7ebe90301..adbfd30bfc96 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -694,8 +694,8 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
@@ -708,8 +708,8 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte1_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
@@ -722,8 +722,8 @@ static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -737,8 +737,8 @@ static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -752,8 +752,8 @@ static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -767,8 +767,8 @@ static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_div_clk_src",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -784,8 +784,8 @@ static struct clk_branch disp_cc_mdss_ahb1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_ahb1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -802,8 +802,8 @@ static struct clk_branch disp_cc_mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -820,8 +820,8 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -838,8 +838,8 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte0_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -856,8 +856,8 @@ static struct clk_branch disp_cc_mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -874,8 +874,8 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_byte1_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -892,8 +892,8 @@ static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_aux_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -910,8 +910,8 @@ static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_crypto_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -928,8 +928,8 @@ static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -946,8 +946,8 @@ static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -964,8 +964,8 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -982,8 +982,8 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1000,8 +1000,8 @@ static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1018,8 +1018,8 @@ static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_aux_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1036,8 +1036,8 @@ static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_crypto_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1054,8 +1054,8 @@ static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1072,8 +1072,8 @@ static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1090,8 +1090,8 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1108,8 +1108,8 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1126,8 +1126,8 @@ static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1144,8 +1144,8 @@ static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_aux_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1162,8 +1162,8 @@ static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_crypto_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1180,8 +1180,8 @@ static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1198,8 +1198,8 @@ static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1216,8 +1216,8 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1234,8 +1234,8 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1252,8 +1252,8 @@ static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_aux_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1270,8 +1270,8 @@ static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_crypto_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1288,8 +1288,8 @@ static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1306,8 +1306,8 @@ static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_intf_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1324,8 +1324,8 @@ static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_pixel0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1342,8 +1342,8 @@ static struct clk_branch disp_cc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1360,8 +1360,8 @@ static struct clk_branch disp_cc_mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1378,8 +1378,8 @@ static struct clk_branch disp_cc_mdss_mdp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_mdp1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1396,8 +1396,8 @@ static struct clk_branch disp_cc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1414,8 +1414,8 @@ static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1432,8 +1432,8 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1450,8 +1450,8 @@ static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_non_gdsc_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1468,8 +1468,8 @@ static struct clk_branch disp_cc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1486,8 +1486,8 @@ static struct clk_branch disp_cc_mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1504,8 +1504,8 @@ static struct clk_branch disp_cc_mdss_rot1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_rot1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1522,8 +1522,8 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_rot_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1540,8 +1540,8 @@ static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_rscc_ahb_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1558,8 +1558,8 @@ static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_rscc_vsync_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1576,8 +1576,8 @@ static struct clk_branch disp_cc_mdss_vsync1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_vsync1_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1594,8 +1594,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1612,8 +1612,8 @@ static struct clk_branch disp_cc_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "disp_cc_sleep_clk",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &disp_cc_sleep_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_sleep_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1762,19 +1762,12 @@ static const struct of_device_id disp_cc_sm8450_match_table[] = {
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table);
-static void disp_cc_sm8450_pm_runtime_disable(void *data)
-{
- pm_runtime_disable(data);
-}
-
static int disp_cc_sm8450_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
- pm_runtime_enable(&pdev->dev);
-
- ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8450_pm_runtime_disable, &pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
new file mode 100644
index 000000000000..1e5a11081860
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -0,0 +1,1807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8550-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x82e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+ F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x818c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8220,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81ec,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x8238,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x8254,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x82d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x82b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x829c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x8158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x80a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x813c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8188,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x821c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x8250,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x82cc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0xe074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8550_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8550_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8550_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8550_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8550_desc = {
+ .config = &disp_cc_sm8550_regmap_config,
+ .clks = disp_cc_sm8550_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8550_clocks),
+ .resets = disp_cc_sm8550_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8550_resets),
+ .gdscs = disp_cc_sm8550_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8550_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sm8550-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8550_match_table);
+
+static int disp_cc_sm8550_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8550_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /*
+ * Keep clocks always enabled:
+ * disp_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8550_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8550_driver = {
+ .probe = disp_cc_sm8550_probe,
+ .driver = {
+ .name = "disp_cc-sm8550",
+ .of_match_table = disp_cc_sm8550_match_table,
+ },
+};
+
+static int __init disp_cc_sm8550_init(void)
+{
+ return platform_driver_register(&disp_cc_sm8550_driver);
+}
+subsys_initcall(disp_cc_sm8550_init);
+
+static void __exit disp_cc_sm8550_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sm8550_driver);
+}
+module_exit(disp_cc_sm8550_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index ab088d702d7c..7085d2ccae49 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -36,68 +36,6 @@ enum {
P_SLEEP_CLK,
};
-static const struct parent_map gcc_xo_gpll0_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 }
-};
-
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0_vote",
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL4, 5 }
-};
-
-static const char * const gcc_xo_gpll0_gpll4[] = {
- "xo",
- "gpll0_vote",
- "gpll4_vote",
-};
-
-static const struct parent_map gcc_xo_sata_asic0_map[] = {
- { P_XO, 0 },
- { P_SATA_ASIC0_CLK, 2 }
-};
-
-static const char * const gcc_xo_sata_asic0[] = {
- "xo",
- "sata_asic0_clk",
-};
-
-static const struct parent_map gcc_xo_sata_rx_map[] = {
- { P_XO, 0 },
- { P_SATA_RX_CLK, 2}
-};
-
-static const char * const gcc_xo_sata_rx[] = {
- "xo",
- "sata_rx_clk",
-};
-
-static const struct parent_map gcc_xo_pcie_map[] = {
- { P_XO, 0 },
- { P_PCIE_0_1_PIPE_CLK, 2 }
-};
-
-static const char * const gcc_xo_pcie[] = {
- "xo",
- "pcie_pipe",
-};
-
-static const struct parent_map gcc_xo_pcie_sleep_map[] = {
- { P_XO, 0 },
- { P_SLEEP_CLK, 6 }
-};
-
-static const char * const gcc_xo_pcie_sleep[] = {
- "xo",
- "sleep_clk_src",
-};
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
@@ -108,7 +46,9 @@ static struct clk_pll gpll0 = {
.status_bit = 17,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -119,48 +59,14 @@ static struct clk_regmap gpll0_vote = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0_vote",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
};
-static struct clk_rcg2 config_noc_clk_src = {
- .cmd_rcgr = 0x0150,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "config_noc_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
- .cmd_rcgr = 0x0190,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "periph_noc_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 system_noc_clk_src = {
- .cmd_rcgr = 0x0120,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "system_noc_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct clk_pll gpll1 = {
.l_reg = 0x0044,
.m_reg = 0x0048,
@@ -171,7 +77,9 @@ static struct clk_pll gpll1 = {
.status_bit = 17,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -182,7 +90,9 @@ static struct clk_regmap gpll1_vote = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1_vote",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -198,7 +108,9 @@ static struct clk_pll gpll4 = {
.status_bit = 17,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -209,12 +121,112 @@ static struct clk_regmap gpll4_vote = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gpll4_vote",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
};
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_xo_sata_asic0_map[] = {
+ { P_XO, 0 },
+ { P_SATA_ASIC0_CLK, 2 }
+};
+
+static const struct clk_parent_data gcc_xo_sata_asic0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "sata_asic0_clk", .name = "sata_asic0_clk" },
+};
+
+static const struct parent_map gcc_xo_sata_rx_map[] = {
+ { P_XO, 0 },
+ { P_SATA_RX_CLK, 2}
+};
+
+static const struct clk_parent_data gcc_xo_sata_rx[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "sata_rx_clk", .name = "sata_rx_clk" },
+};
+
+static const struct parent_map gcc_xo_pcie_map[] = {
+ { P_XO, 0 },
+ { P_PCIE_0_1_PIPE_CLK, 2 }
+};
+
+static const struct clk_parent_data gcc_xo_pcie[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "pcie_pipe", .name = "pcie_pipe" },
+};
+
+static const struct parent_map gcc_xo_pcie_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_pcie_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0150,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x0190,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0120,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_ufs_axi_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -230,8 +242,8 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.freq_tbl = ftbl_gcc_ufs_axi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -249,8 +261,8 @@ static struct clk_rcg2 usb30_master_clk_src = {
.freq_tbl = ftbl_gcc_usb30_master_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -268,8 +280,26 @@ static struct clk_rcg2 usb30_sec_master_clk_src = {
.freq_tbl = ftbl_gcc_usb30_sec_master_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_sec_master_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_mock_utmi_clk[] = {
+ F(125000000, P_GPLL0, 1, 5, 24),
+ { }
+};
+
+static struct clk_rcg2 usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1be8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb30_sec_mock_utmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -281,8 +311,8 @@ static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_sec_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_sec_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -298,8 +328,8 @@ static struct clk_branch gcc_usb30_sec_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -321,8 +351,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -346,8 +376,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -359,8 +389,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -373,8 +403,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -386,8 +416,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -400,8 +430,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -413,8 +443,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -427,8 +457,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -440,8 +470,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -454,8 +484,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -467,8 +497,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -481,8 +511,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -514,8 +544,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -528,8 +558,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -542,8 +572,8 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -556,8 +586,8 @@ static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -570,8 +600,8 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -584,8 +614,8 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -597,8 +627,8 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -611,8 +641,8 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -624,8 +654,8 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -638,8 +668,8 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -651,8 +681,8 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -665,8 +695,8 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -678,8 +708,8 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -692,8 +722,8 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -705,8 +735,8 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -719,8 +749,8 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -732,8 +762,8 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -746,8 +776,8 @@ static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -760,8 +790,8 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -774,8 +804,8 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -788,8 +818,8 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -802,8 +832,8 @@ static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -816,8 +846,8 @@ static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -830,8 +860,8 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -851,8 +881,8 @@ static struct clk_rcg2 ce1_clk_src = {
.freq_tbl = ftbl_gcc_ce1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ce1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -872,8 +902,8 @@ static struct clk_rcg2 ce2_clk_src = {
.freq_tbl = ftbl_gcc_ce2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ce2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -893,8 +923,8 @@ static struct clk_rcg2 ce3_clk_src = {
.freq_tbl = ftbl_gcc_ce3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ce3_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -914,8 +944,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gcc_gp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -928,8 +958,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gcc_gp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -942,8 +972,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gcc_gp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -961,8 +991,8 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
.freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_aux_clk_src",
- .parent_names = gcc_xo_pcie_sleep,
- .num_parents = 2,
+ .parent_data = gcc_xo_pcie_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_pcie_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -975,8 +1005,8 @@ static struct clk_rcg2 pcie_1_aux_clk_src = {
.freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_aux_clk_src",
- .parent_names = gcc_xo_pcie_sleep,
- .num_parents = 2,
+ .parent_data = gcc_xo_pcie_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_pcie_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -994,8 +1024,8 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
.freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_pipe_clk_src",
- .parent_names = gcc_xo_pcie,
- .num_parents = 2,
+ .parent_data = gcc_xo_pcie,
+ .num_parents = ARRAY_SIZE(gcc_xo_pcie),
.ops = &clk_rcg2_ops,
},
};
@@ -1007,8 +1037,8 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = {
.freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_pipe_clk_src",
- .parent_names = gcc_xo_pcie,
- .num_parents = 2,
+ .parent_data = gcc_xo_pcie,
+ .num_parents = ARRAY_SIZE(gcc_xo_pcie),
.ops = &clk_rcg2_ops,
},
};
@@ -1025,8 +1055,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_gcc_pdm2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1045,8 +1075,8 @@ static struct clk_rcg2 sata_asic0_clk_src = {
.freq_tbl = ftbl_gcc_sata_asic0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sata_asic0_clk_src",
- .parent_names = gcc_xo_sata_asic0,
- .num_parents = 2,
+ .parent_data = gcc_xo_sata_asic0,
+ .num_parents = ARRAY_SIZE(gcc_xo_sata_asic0),
.ops = &clk_rcg2_ops,
},
};
@@ -1065,8 +1095,8 @@ static struct clk_rcg2 sata_pmalive_clk_src = {
.freq_tbl = ftbl_gcc_sata_pmalive_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sata_pmalive_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1085,8 +1115,8 @@ static struct clk_rcg2 sata_rx_clk_src = {
.freq_tbl = ftbl_gcc_sata_rx_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sata_rx_clk_src",
- .parent_names = gcc_xo_sata_rx,
- .num_parents = 2,
+ .parent_data = gcc_xo_sata_rx,
+ .num_parents = ARRAY_SIZE(gcc_xo_sata_rx),
.ops = &clk_rcg2_ops,
},
};
@@ -1103,8 +1133,8 @@ static struct clk_rcg2 sata_rx_oob_clk_src = {
.freq_tbl = ftbl_gcc_sata_rx_oob_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sata_rx_oob_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1130,8 +1160,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_gpll0_gpll4,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1144,8 +1174,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1158,8 +1188,8 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1172,8 +1202,8 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1191,8 +1221,8 @@ static struct clk_rcg2 tsif_ref_clk_src = {
.freq_tbl = ftbl_gcc_tsif_ref_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1209,26 +1239,8 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_gcc_usb30_sec_mock_utmi_clk[] = {
- F(125000000, P_GPLL0, 1, 5, 24),
- { }
-};
-
-static struct clk_rcg2 usb30_sec_mock_utmi_clk_src = {
- .cmd_rcgr = 0x1be8,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_gcc_usb30_sec_mock_utmi_clk,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_sec_mock_utmi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1245,8 +1257,8 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.freq_tbl = ftbl_gcc_usb_hs_system_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1268,9 +1280,9 @@ static struct clk_rcg2 usb_hsic_clk_src = {
.freq_tbl = ftbl_gcc_usb_hsic_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hsic_clk_src",
- .parent_names = (const char *[]){
- "xo",
- "gpll1_vote",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
},
.num_parents = 2,
.ops = &clk_rcg2_ops,
@@ -1290,9 +1302,9 @@ static struct clk_rcg2 usb_hsic_ahb_clk_src = {
.freq_tbl = ftbl_gcc_usb_hsic_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hsic_ahb_clk_src",
- .parent_names = (const char *[]){
- "xo",
- "gpll1_vote",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
},
.num_parents = 2,
.ops = &clk_rcg2_ops,
@@ -1311,29 +1323,12 @@ static struct clk_rcg2 usb_hsic_io_cal_clk_src = {
.freq_tbl = ftbl_gcc_usb_hsic_io_cal_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hsic_io_cal_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 1,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
-static struct clk_branch gcc_usb_hsic_mock_utmi_clk = {
- .halt_reg = 0x1f14,
- .clkr = {
- .enable_reg = 0x1f14,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb_hsic_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb_hsic_mock_utmi_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_gcc_usb_hsic_mock_utmi_clk[] = {
F(60000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1346,12 +1341,29 @@ static struct clk_rcg2 usb_hsic_mock_utmi_clk_src = {
.freq_tbl = ftbl_gcc_usb_hsic_mock_utmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hsic_mock_utmi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 1,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct clk_branch gcc_usb_hsic_mock_utmi_clk = {
+ .halt_reg = 0x1f14,
+ .clkr = {
+ .enable_reg = 0x1f14,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static const struct freq_tbl ftbl_gcc_usb_hsic_system_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
{ }
@@ -1364,12 +1376,25 @@ static struct clk_rcg2 usb_hsic_system_clk_src = {
.freq_tbl = ftbl_gcc_usb_hsic_system_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hsic_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct clk_regmap gcc_mmss_gpll0_clk_src = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_gpll0_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ },
+};
+
static struct clk_branch gcc_bam_dma_ahb_clk = {
.halt_reg = 0x0d44,
.halt_check = BRANCH_HALT_VOTED,
@@ -1378,8 +1403,8 @@ static struct clk_branch gcc_bam_dma_ahb_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_bam_dma_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1395,8 +1420,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1411,8 +1436,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1428,8 +1453,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1445,8 +1470,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1462,8 +1487,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1479,8 +1504,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1496,8 +1521,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1513,8 +1538,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1530,8 +1555,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1547,8 +1572,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1564,8 +1589,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1581,8 +1606,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1598,8 +1623,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1615,8 +1640,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1632,8 +1657,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1649,8 +1674,8 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1666,8 +1691,8 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart4_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart4_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1683,8 +1708,8 @@ static struct clk_branch gcc_blsp1_uart5_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart5_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart5_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart5_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1700,8 +1725,8 @@ static struct clk_branch gcc_blsp1_uart6_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart6_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart6_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart6_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1718,8 +1743,8 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1734,8 +1759,8 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1751,8 +1776,8 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1768,8 +1793,8 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1785,8 +1810,8 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1802,8 +1827,8 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1819,8 +1844,8 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1836,8 +1861,8 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1853,8 +1878,8 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1870,8 +1895,8 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1887,8 +1912,8 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1904,8 +1929,8 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1921,8 +1946,8 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1938,8 +1963,8 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1955,8 +1980,8 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1972,8 +1997,8 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1989,8 +2014,8 @@ static struct clk_branch gcc_blsp2_uart4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart4_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart4_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2006,8 +2031,8 @@ static struct clk_branch gcc_blsp2_uart5_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart5_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart5_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart5_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2023,8 +2048,8 @@ static struct clk_branch gcc_blsp2_uart6_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart6_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart6_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart6_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2041,8 +2066,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2058,8 +2083,8 @@ static struct clk_branch gcc_ce1_ahb_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce1_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2075,8 +2100,8 @@ static struct clk_branch gcc_ce1_axi_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce1_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2092,8 +2117,8 @@ static struct clk_branch gcc_ce1_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce1_clk",
- .parent_names = (const char *[]){
- "ce1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ce1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2110,8 +2135,8 @@ static struct clk_branch gcc_ce2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce2_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2127,8 +2152,8 @@ static struct clk_branch gcc_ce2_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce2_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2144,8 +2169,8 @@ static struct clk_branch gcc_ce2_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce2_clk",
- .parent_names = (const char *[]){
- "ce2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ce2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2162,8 +2187,8 @@ static struct clk_branch gcc_ce3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce3_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2179,8 +2204,8 @@ static struct clk_branch gcc_ce3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce3_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2196,8 +2221,8 @@ static struct clk_branch gcc_ce3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ce3_clk",
- .parent_names = (const char *[]){
- "ce3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ce3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2213,8 +2238,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2230,8 +2255,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2247,8 +2272,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2264,8 +2289,8 @@ static struct clk_branch gcc_ocmem_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ocmem_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2280,8 +2305,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]){
- "pcie_0_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcie_0_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2297,8 +2322,8 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2314,8 +2339,8 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2331,8 +2356,8 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_names = (const char *[]){
- "pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcie_0_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2348,8 +2373,8 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2365,8 +2390,8 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk",
- .parent_names = (const char *[]){
- "pcie_1_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcie_1_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2382,8 +2407,8 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2399,8 +2424,8 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2416,8 +2441,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_names = (const char *[]){
- "pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcie_1_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2433,8 +2458,8 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2450,8 +2475,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2467,8 +2492,8 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2483,8 +2508,8 @@ static struct clk_branch gcc_periph_noc_usb_hsic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_periph_noc_usb_hsic_ahb_clk",
- .parent_names = (const char *[]){
- "usb_hsic_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2501,8 +2526,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2517,8 +2542,8 @@ static struct clk_branch gcc_sata_asic0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_asic0_clk",
- .parent_names = (const char *[]){
- "sata_asic0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_asic0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2534,8 +2559,8 @@ static struct clk_branch gcc_sata_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_axi_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2551,8 +2576,8 @@ static struct clk_branch gcc_sata_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2568,8 +2593,8 @@ static struct clk_branch gcc_sata_pmalive_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_pmalive_clk",
- .parent_names = (const char *[]){
- "sata_pmalive_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_pmalive_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2585,8 +2610,8 @@ static struct clk_branch gcc_sata_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_rx_clk",
- .parent_names = (const char *[]){
- "sata_rx_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2602,8 +2627,8 @@ static struct clk_branch gcc_sata_rx_oob_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sata_rx_oob_clk",
- .parent_names = (const char *[]){
- "sata_rx_oob_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_rx_oob_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2619,8 +2644,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2635,8 +2660,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2652,8 +2677,8 @@ static struct clk_branch gcc_sdcc1_cdccal_ff_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_cdccal_ff_clk",
- .parent_names = (const char *[]){
- "xo"
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" }
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2668,8 +2693,8 @@ static struct clk_branch gcc_sdcc1_cdccal_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_cdccal_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src"
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "sleep_clk", .name = "sleep_clk" }
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2684,8 +2709,8 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2700,8 +2725,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2717,8 +2742,8 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2733,8 +2758,8 @@ static struct clk_branch gcc_sdcc3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_apps_clk",
- .parent_names = (const char *[]){
- "sdcc3_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2750,8 +2775,8 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2766,8 +2791,8 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_names = (const char *[]){
- "sdcc4_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2783,8 +2808,8 @@ static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2800,8 +2825,8 @@ static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2817,8 +2842,8 @@ static struct clk_branch gcc_sys_noc_usb3_sec_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_sec_axi_clk",
- .parent_names = (const char *[]){
- "usb30_sec_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_sec_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2834,8 +2859,8 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2850,8 +2875,8 @@ static struct clk_branch gcc_tsif_inactivity_timers_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_inactivity_timers_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2867,8 +2892,8 @@ static struct clk_branch gcc_tsif_ref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk",
- .parent_names = (const char *[]){
- "tsif_ref_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &tsif_ref_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2884,8 +2909,8 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
- .parent_names = (const char *[]){
- "config_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &config_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2901,8 +2926,8 @@ static struct clk_branch gcc_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2918,8 +2943,8 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_cfg_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2935,8 +2960,8 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
- .parent_names = (const char *[]){
- "ufs_rx_symbol_0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "ufs_rx_symbol_0_clk_src", .name = "ufs_rx_symbol_0_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2952,8 +2977,8 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
- .parent_names = (const char *[]){
- "ufs_rx_symbol_1_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "ufs_rx_symbol_1_clk_src", .name = "ufs_rx_symbol_1_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2969,8 +2994,8 @@ static struct clk_branch gcc_ufs_tx_cfg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_cfg_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2986,8 +3011,8 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
- .parent_names = (const char *[]){
- "ufs_tx_symbol_0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "ufs_tx_symbol_0_clk_src", .name = "ufs_tx_symbol_0_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3003,8 +3028,8 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
- .parent_names = (const char *[]){
- "ufs_tx_symbol_1_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "ufs_tx_symbol_1_clk_src", .name = "ufs_tx_symbol_1_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3020,8 +3045,8 @@ static struct clk_branch gcc_usb2a_phy_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2a_phy_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3036,8 +3061,8 @@ static struct clk_branch gcc_usb2b_phy_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2b_phy_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3052,8 +3077,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3069,8 +3094,8 @@ static struct clk_branch gcc_usb30_sec_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_master_clk",
- .parent_names = (const char *[]){
- "usb30_sec_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_sec_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3086,8 +3111,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3103,8 +3128,8 @@ static struct clk_branch gcc_usb30_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3119,8 +3144,8 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3135,8 +3160,8 @@ static struct clk_branch gcc_usb_hs_inactivity_timers_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_inactivity_timers_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3152,8 +3177,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]){
- "usb_hs_system_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3169,8 +3194,8 @@ static struct clk_branch gcc_usb_hsic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &periph_noc_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3185,8 +3210,8 @@ static struct clk_branch gcc_usb_hsic_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_clk",
- .parent_names = (const char *[]){
- "usb_hsic_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3202,8 +3227,8 @@ static struct clk_branch gcc_usb_hsic_io_cal_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_io_cal_clk",
- .parent_names = (const char *[]){
- "usb_hsic_io_cal_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_io_cal_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3219,8 +3244,8 @@ static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_io_cal_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3235,8 +3260,8 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_system_clk",
- .parent_names = (const char *[]){
- "usb_hsic_system_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3468,6 +3493,7 @@ static struct clk_regmap *gcc_apq8084_clocks[] = {
[GCC_USB_HSIC_IO_CAL_SLEEP_CLK] = &gcc_usb_hsic_io_cal_sleep_clk.clkr,
[GCC_USB_HSIC_MOCK_UTMI_CLK] = &gcc_usb_hsic_mock_utmi_clk.clkr,
[GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK_SRC] = &gcc_mmss_gpll0_clk_src,
};
static struct gdsc *gcc_apq8084_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 5675c60525a7..5657e29464ad 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -77,98 +77,397 @@ struct clk_fepll {
const struct freq_tbl *freq_tbl;
};
-static struct parent_map gcc_xo_200_500_map[] = {
- { P_XO, 0 },
- { P_FEPLL200, 1 },
- { P_FEPLL500, 2 },
+/*
+ * Contains index for safe clock during APSS freq change.
+ * fepll500 is being used as safe clock so initialize it
+ * with its index in parents list gcc_xo_ddr_500_200.
+ */
+static const int gcc_ipq4019_cpu_safe_parent = 2;
+
+/* Calculates the VCO rate for FEPLL. */
+static u64 clk_fepll_vco_calc_rate(struct clk_fepll *pll_div,
+ unsigned long parent_rate)
+{
+ const struct clk_fepll_vco *pll_vco = pll_div->pll_vco;
+ u32 fdbkdiv, refclkdiv, cdiv;
+ u64 vco;
+
+ regmap_read(pll_div->cdiv.clkr.regmap, pll_vco->reg, &cdiv);
+ refclkdiv = (cdiv >> pll_vco->refclkdiv_shift) &
+ (BIT(pll_vco->refclkdiv_width) - 1);
+ fdbkdiv = (cdiv >> pll_vco->fdbkdiv_shift) &
+ (BIT(pll_vco->fdbkdiv_width) - 1);
+
+ vco = parent_rate / refclkdiv;
+ vco *= 2;
+ vco *= fdbkdiv;
+
+ return vco;
+}
+
+static const struct clk_fepll_vco gcc_apss_ddrpll_vco = {
+ .fdbkdiv_shift = 16,
+ .fdbkdiv_width = 8,
+ .refclkdiv_shift = 24,
+ .refclkdiv_width = 5,
+ .reg = 0x2e020,
};
-static const char * const gcc_xo_200_500[] = {
- "xo",
- "fepll200",
- "fepll500",
+static const struct clk_fepll_vco gcc_fepll_vco = {
+ .fdbkdiv_shift = 16,
+ .fdbkdiv_width = 8,
+ .refclkdiv_shift = 24,
+ .refclkdiv_width = 5,
+ .reg = 0x2f020,
};
-static struct parent_map gcc_xo_200_map[] = {
- { P_XO, 0 },
- { P_FEPLL200, 1 },
+/*
+ * Round rate function for APSS CPU PLL Clock divider.
+ * It looks up the frequency table and returns the next higher frequency
+ * supported in hardware.
+ */
+static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate)
+{
+ struct clk_fepll *pll = to_clk_fepll(hw);
+ struct clk_hw *p_hw;
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(pll->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ p_hw = clk_hw_get_parent_by_index(hw, f->src);
+ *p_rate = clk_hw_get_rate(p_hw);
+
+ return f->freq;
};
-static const char * const gcc_xo_200[] = {
- "xo",
- "fepll200",
+/*
+ * Clock set rate function for APSS CPU PLL Clock divider.
+ * It looks up the frequency table and updates the PLL divider to corresponding
+ * divider value.
+ */
+static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_fepll *pll = to_clk_fepll(hw);
+ const struct freq_tbl *f;
+ u32 mask;
+
+ f = qcom_find_freq(pll->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift;
+ regmap_update_bits(pll->cdiv.clkr.regmap,
+ pll->cdiv.reg, mask,
+ f->pre_div << pll->cdiv.shift);
+ /*
+ * There is no status bit which can be checked for successful CPU
+ * divider update operation so using delay for the same.
+ */
+ udelay(1);
+
+ return 0;
};
-static struct parent_map gcc_xo_200_spi_map[] = {
- { P_XO, 0 },
- { P_FEPLL200, 2 },
+/*
+ * Clock frequency calculation function for APSS CPU PLL Clock divider.
+ * This clock divider is nonlinear so this function calculates the actual
+ * divider and returns the output frequency by dividing VCO Frequency
+ * with this actual divider value.
+ */
+static unsigned long
+clk_cpu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fepll *pll = to_clk_fepll(hw);
+ u32 cdiv, pre_div;
+ u64 rate;
+
+ regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv);
+ cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1);
+
+ /*
+ * Some dividers have value in 0.5 fraction so multiply both VCO
+ * frequency(parent_rate) and pre_div with 2 to make integer
+ * calculation.
+ */
+ if (cdiv > 10)
+ pre_div = (cdiv + 1) * 2;
+ else
+ pre_div = cdiv + 12;
+
+ rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2;
+ do_div(rate, pre_div);
+
+ return rate;
};
-static const char * const gcc_xo_200_spi[] = {
- "xo",
- "fepll200",
+static const struct clk_ops clk_regmap_cpu_div_ops = {
+ .round_rate = clk_cpu_div_round_rate,
+ .set_rate = clk_cpu_div_set_rate,
+ .recalc_rate = clk_cpu_div_recalc_rate,
};
-static struct parent_map gcc_xo_sdcc1_500_map[] = {
- { P_XO, 0 },
- { P_DDRPLL, 1 },
- { P_FEPLL500, 2 },
+static const struct freq_tbl ftbl_apss_ddr_pll[] = {
+ { 384000000, P_XO, 0xd, 0, 0 },
+ { 413000000, P_XO, 0xc, 0, 0 },
+ { 448000000, P_XO, 0xb, 0, 0 },
+ { 488000000, P_XO, 0xa, 0, 0 },
+ { 512000000, P_XO, 0x9, 0, 0 },
+ { 537000000, P_XO, 0x8, 0, 0 },
+ { 565000000, P_XO, 0x7, 0, 0 },
+ { 597000000, P_XO, 0x6, 0, 0 },
+ { 632000000, P_XO, 0x5, 0, 0 },
+ { 672000000, P_XO, 0x4, 0, 0 },
+ { 716000000, P_XO, 0x3, 0, 0 },
+ { 768000000, P_XO, 0x2, 0, 0 },
+ { 823000000, P_XO, 0x1, 0, 0 },
+ { 896000000, P_XO, 0x0, 0, 0 },
+ { }
};
-static const char * const gcc_xo_sdcc1_500[] = {
- "xo",
- "ddrpllsdcc",
- "fepll500",
+static struct clk_fepll gcc_apss_cpu_plldiv_clk = {
+ .cdiv.reg = 0x2e020,
+ .cdiv.shift = 4,
+ .cdiv.width = 4,
+ .cdiv.clkr = {
+ .enable_reg = 0x2e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ddrpllapss",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_cpu_div_ops,
+ },
+ },
+ .freq_tbl = ftbl_apss_ddr_pll,
+ .pll_vco = &gcc_apss_ddrpll_vco,
};
-static struct parent_map gcc_xo_wcss2g_map[] = {
- { P_XO, 0 },
- { P_FEPLLWCSS2G, 1 },
+/* Calculates the rate for PLL divider.
+ * If the divider value is not fixed then it gets the actual divider value
+ * from divider table. Then, it calculate the clock rate by dividing the
+ * parent rate with actual divider value.
+ */
+static unsigned long
+clk_regmap_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fepll *pll = to_clk_fepll(hw);
+ u32 cdiv, pre_div = 1;
+ u64 rate;
+ const struct clk_div_table *clkt;
+
+ if (pll->fixed_div) {
+ pre_div = pll->fixed_div;
+ } else {
+ regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv);
+ cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1);
+
+ for (clkt = pll->div_table; clkt->div; clkt++) {
+ if (clkt->val == cdiv)
+ pre_div = clkt->div;
+ }
+ }
+
+ rate = clk_fepll_vco_calc_rate(pll, parent_rate);
+ do_div(rate, pre_div);
+
+ return rate;
};
-static const char * const gcc_xo_wcss2g[] = {
- "xo",
- "fepllwcss2g",
+static const struct clk_ops clk_fepll_div_ops = {
+ .recalc_rate = clk_regmap_clk_div_recalc_rate,
};
-static struct parent_map gcc_xo_wcss5g_map[] = {
- { P_XO, 0 },
- { P_FEPLLWCSS5G, 1 },
+static struct clk_fepll gcc_apss_sdcc_clk = {
+ .fixed_div = 28,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "ddrpllsdcc",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .pll_vco = &gcc_apss_ddrpll_vco,
};
-static const char * const gcc_xo_wcss5g[] = {
- "xo",
- "fepllwcss5g",
+static struct clk_fepll gcc_fepll125_clk = {
+ .fixed_div = 32,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepll125",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .pll_vco = &gcc_fepll_vco,
};
-static struct parent_map gcc_xo_125_dly_map[] = {
- { P_XO, 0 },
- { P_FEPLL125DLY, 1 },
+static struct clk_fepll gcc_fepll125dly_clk = {
+ .fixed_div = 32,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepll125dly",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .pll_vco = &gcc_fepll_vco,
};
-static const char * const gcc_xo_125_dly[] = {
- "xo",
- "fepll125dly",
+static struct clk_fepll gcc_fepll200_clk = {
+ .fixed_div = 20,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepll200",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .pll_vco = &gcc_fepll_vco,
};
-static struct parent_map gcc_xo_ddr_500_200_map[] = {
+static struct clk_fepll gcc_fepll500_clk = {
+ .fixed_div = 8,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepll500",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .pll_vco = &gcc_fepll_vco,
+};
+
+static const struct clk_div_table fepllwcss_clk_div_table[] = {
+ { 0, 15 },
+ { 1, 16 },
+ { 2, 18 },
+ { 3, 20 },
+ { },
+};
+
+static struct clk_fepll gcc_fepllwcss2g_clk = {
+ .cdiv.reg = 0x2f020,
+ .cdiv.shift = 8,
+ .cdiv.width = 2,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepllwcss2g",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .div_table = fepllwcss_clk_div_table,
+ .pll_vco = &gcc_fepll_vco,
+};
+
+static struct clk_fepll gcc_fepllwcss5g_clk = {
+ .cdiv.reg = 0x2f020,
+ .cdiv.shift = 12,
+ .cdiv.width = 2,
+ .cdiv.clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "fepllwcss5g",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_fepll_div_ops,
+ },
+ },
+ .div_table = fepllwcss_clk_div_table,
+ .pll_vco = &gcc_fepll_vco,
+};
+
+static struct parent_map gcc_xo_200_500_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_200_500[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepll200_clk.cdiv.clkr.hw },
+ { .hw = &gcc_fepll500_clk.cdiv.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_pcnoc_ahb_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(100000000, P_FEPLL200, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcnoc_ahb_clk_src = {
+ .cmd_rcgr = 0x21024,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_500_map,
+ .freq_tbl = ftbl_gcc_pcnoc_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcnoc_ahb_clk_src",
+ .parent_data = gcc_xo_200_500,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_500),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch pcnoc_clk_src = {
+ .halt_reg = 0x21030,
+ .clkr = {
+ .enable_reg = 0x21030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcnoc_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcnoc_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT |
+ CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct parent_map gcc_xo_200_map[] = {
{ P_XO, 0 },
- { P_FEPLL200, 3 },
- { P_FEPLL500, 2 },
- { P_DDRPLLAPSS, 1 },
+ { P_FEPLL200, 1 },
};
-/*
- * Contains index for safe clock during APSS freq change.
- * fepll500 is being used as safe clock so initialize it
- * with its index in parents list gcc_xo_ddr_500_200.
- */
-static const int gcc_ipq4019_cpu_safe_parent = 2;
-static const char * const gcc_xo_ddr_500_200[] = {
- "xo",
- "fepll200",
- "fepll500",
- "ddrpllapss",
+static const struct clk_parent_data gcc_xo_200[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepll200_clk.cdiv.clkr.hw },
};
static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
@@ -184,8 +483,8 @@ static struct clk_rcg2 audio_clk_src = {
.freq_tbl = ftbl_gcc_audio_pwm_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "audio_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
@@ -198,9 +497,8 @@ static struct clk_branch gcc_audio_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_audio_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -215,9 +513,8 @@ static struct clk_branch gcc_audio_pwm_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_audio_pwm_clk",
- .parent_names = (const char *[]){
- "audio_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &audio_clk_src.clkr.hw },
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -237,8 +534,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
@@ -250,9 +547,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -267,8 +563,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
@@ -280,9 +576,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -290,6 +585,16 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
},
};
+static struct parent_map gcc_xo_200_spi_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_200_spi[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepll200_clk.cdiv.clkr.hw },
+};
+
static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_spi_apps_clk[] = {
F(960000, P_XO, 12, 1, 4),
F(4800000, P_XO, 1, 1, 10),
@@ -309,8 +614,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_2_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_200_spi,
- .num_parents = 2,
+ .parent_data = gcc_xo_200_spi,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_spi),
.ops = &clk_rcg2_ops,
},
};
@@ -322,9 +627,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -340,8 +644,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.parent_map = gcc_xo_200_spi_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_200_spi,
- .num_parents = 2,
+ .parent_data = gcc_xo_200_spi,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_spi),
.ops = &clk_rcg2_ops,
},
};
@@ -353,9 +657,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -385,8 +688,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.parent_map = gcc_xo_200_spi_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_200_spi,
- .num_parents = 2,
+ .parent_data = gcc_xo_200_spi,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_spi),
.ops = &clk_rcg2_ops,
},
};
@@ -398,9 +701,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw },
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -416,8 +718,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.parent_map = gcc_xo_200_spi_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_200_spi,
- .num_parents = 2,
+ .parent_data = gcc_xo_200_spi,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_spi),
.ops = &clk_rcg2_ops,
},
};
@@ -429,9 +731,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -454,8 +755,8 @@ static struct clk_rcg2 gp1_clk_src = {
.parent_map = gcc_xo_200_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
@@ -467,9 +768,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gp1_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -485,8 +785,8 @@ static struct clk_rcg2 gp2_clk_src = {
.parent_map = gcc_xo_200_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
@@ -498,9 +798,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gp2_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -516,8 +815,8 @@ static struct clk_rcg2 gp3_clk_src = {
.parent_map = gcc_xo_200_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
@@ -529,9 +828,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gp3_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -539,6 +837,18 @@ static struct clk_branch gcc_gp3_clk = {
},
};
+static struct parent_map gcc_xo_sdcc1_500_map[] = {
+ { P_XO, 0 },
+ { P_DDRPLL, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_sdcc1_500[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_apss_sdcc_clk.cdiv.clkr.hw },
+ { .hw = &gcc_fepll500_clk.cdiv.clkr.hw },
+};
+
static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
F(144000, P_XO, 1, 3, 240),
F(400000, P_XO, 1, 1, 0),
@@ -557,8 +867,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.parent_map = gcc_xo_sdcc1_500_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_sdcc1_500,
- .num_parents = 3,
+ .parent_data = gcc_xo_sdcc1_500,
+ .num_parents = ARRAY_SIZE(gcc_xo_sdcc1_500),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -582,6 +892,20 @@ static const struct freq_tbl ftbl_gcc_apps_clk[] = {
{ }
};
+static struct parent_map gcc_xo_ddr_500_200_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 3 },
+ { P_FEPLL500, 2 },
+ { P_DDRPLLAPSS, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_ddr_500_200[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepll200_clk.cdiv.clkr.hw },
+ { .hw = &gcc_fepll500_clk.cdiv.clkr.hw },
+ { .hw = &gcc_apss_cpu_plldiv_clk.cdiv.clkr.hw },
+};
+
static struct clk_rcg2 apps_clk_src = {
.cmd_rcgr = 0x1900c,
.hid_width = 5,
@@ -589,8 +913,8 @@ static struct clk_rcg2 apps_clk_src = {
.parent_map = gcc_xo_ddr_500_200_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "apps_clk_src",
- .parent_names = gcc_xo_ddr_500_200,
- .num_parents = 4,
+ .parent_data = gcc_xo_ddr_500_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_ddr_500_200),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -609,8 +933,8 @@ static struct clk_rcg2 apps_ahb_clk_src = {
.freq_tbl = ftbl_gcc_apps_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "apps_ahb_clk_src",
- .parent_names = gcc_xo_200_500,
- .num_parents = 3,
+ .parent_data = gcc_xo_200_500,
+ .num_parents = ARRAY_SIZE(gcc_xo_200_500),
.ops = &clk_rcg2_ops,
},
};
@@ -623,9 +947,8 @@ static struct clk_branch gcc_apss_ahb_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_ahb_clk",
- .parent_names = (const char *[]){
- "apps_ahb_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &apps_ahb_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -641,9 +964,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -657,8 +979,9 @@ static struct clk_branch gcc_dcd_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_dcd_xo_clk",
- .parent_names = (const char *[]){
- "xo",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -673,9 +996,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -691,9 +1013,8 @@ static struct clk_branch gcc_crypto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -708,9 +1029,8 @@ static struct clk_branch gcc_crypto_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_axi_clk",
- .parent_names = (const char *[]){
- "fepll125",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll125_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -725,15 +1045,42 @@ static struct clk_branch gcc_crypto_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_clk",
- .parent_names = (const char *[]){
- "fepll125",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll125_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
},
};
+static struct parent_map gcc_xo_125_dly_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL125DLY, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_125_dly[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepll125dly_clk.cdiv.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
+ F(125000000, P_FEPLL125DLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fephy_125m_dly_clk_src = {
+ .cmd_rcgr = 0x12000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_125_dly_map,
+ .freq_tbl = ftbl_gcc_fephy_dly_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fephy_125m_dly_clk_src",
+ .parent_data = gcc_xo_125_dly,
+ .num_parents = ARRAY_SIZE(gcc_xo_125_dly),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_ess_clk = {
.halt_reg = 0x12010,
.clkr = {
@@ -741,9 +1088,8 @@ static struct clk_branch gcc_ess_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ess_clk",
- .parent_names = (const char *[]){
- "fephy_125m_dly_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &fephy_125m_dly_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -759,9 +1105,8 @@ static struct clk_branch gcc_imem_axi_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_imem_axi_clk",
- .parent_names = (const char *[]){
- "fepll200",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll200_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -775,9 +1120,8 @@ static struct clk_branch gcc_imem_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_imem_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -791,9 +1135,8 @@ static struct clk_branch gcc_pcie_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -807,9 +1150,8 @@ static struct clk_branch gcc_pcie_axi_m_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_axi_m_clk",
- .parent_names = (const char *[]){
- "fepll200",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll200_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -823,9 +1165,8 @@ static struct clk_branch gcc_pcie_axi_s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_axi_s_clk",
- .parent_names = (const char *[]){
- "fepll200",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll200_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -840,9 +1181,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -856,9 +1196,8 @@ static struct clk_branch gcc_qpic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_qpic_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -872,9 +1211,8 @@ static struct clk_branch gcc_qpic_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_qpic_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -888,9 +1226,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -904,9 +1241,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &sdcc1_apps_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -922,9 +1258,8 @@ static struct clk_branch gcc_tlmm_ahb_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_tlmm_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -938,9 +1273,8 @@ static struct clk_branch gcc_usb2_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2_master_clk",
- .parent_names = (const char *[]){
- "pcnoc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -954,28 +1288,12 @@ static struct clk_branch gcc_usb2_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2_sleep_clk",
- .parent_names = (const char *[]){
- "gcc_sleep_clk_src",
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_usb2_mock_utmi_clk = {
- .halt_reg = 0x1e014,
- .clkr = {
- .enable_reg = 0x1e014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb2_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk",
+ .name = "gcc_sleep_clk_src",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
- .flags = CLK_SET_RATE_PARENT,
},
},
};
@@ -992,12 +1310,28 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_xo_200,
- .num_parents = 2,
+ .parent_data = gcc_xo_200,
+ .num_parents = ARRAY_SIZE(gcc_xo_200),
.ops = &clk_rcg2_ops,
},
};
+static struct clk_branch gcc_usb2_mock_utmi_clk = {
+ .halt_reg = 0x1e014,
+ .clkr = {
+ .enable_reg = 0x1e014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb30_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_master_clk = {
.halt_reg = 0x1e028,
.clkr = {
@@ -1005,9 +1339,8 @@ static struct clk_branch gcc_usb3_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_master_clk",
- .parent_names = (const char *[]){
- "fepll125",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_fepll125_clk.cdiv.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -1021,8 +1354,9 @@ static struct clk_branch gcc_usb3_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_sleep_clk",
- .parent_names = (const char *[]){
- "gcc_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk",
+ .name = "gcc_sleep_clk_src",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1037,9 +1371,8 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &usb30_mock_utmi_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1047,25 +1380,16 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
},
};
-static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
- F(125000000, P_FEPLL125DLY, 1, 0, 0),
- { }
+static struct parent_map gcc_xo_wcss2g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS2G, 1 },
};
-static struct clk_rcg2 fephy_125m_dly_clk_src = {
- .cmd_rcgr = 0x12000,
- .hid_width = 5,
- .parent_map = gcc_xo_125_dly_map,
- .freq_tbl = ftbl_gcc_fephy_dly_clk,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "fephy_125m_dly_clk_src",
- .parent_names = gcc_xo_125_dly,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
+static const struct clk_parent_data gcc_xo_wcss2g[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepllwcss2g_clk.cdiv.clkr.hw },
};
-
static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
F(48000000, P_XO, 1, 0, 0),
F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
@@ -1079,8 +1403,8 @@ static struct clk_rcg2 wcss2g_clk_src = {
.parent_map = gcc_xo_wcss2g_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "wcss2g_clk_src",
- .parent_names = gcc_xo_wcss2g,
- .num_parents = 2,
+ .parent_data = gcc_xo_wcss2g,
+ .num_parents = ARRAY_SIZE(gcc_xo_wcss2g),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1093,9 +1417,8 @@ static struct clk_branch gcc_wcss2g_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss2g_clk",
- .parent_names = (const char *[]){
- "wcss2g_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &wcss2g_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1110,8 +1433,9 @@ static struct clk_branch gcc_wcss2g_ref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss2g_ref_clk",
- .parent_names = (const char *[]){
- "xo",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1127,8 +1451,9 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss2g_rtc_clk",
- .parent_names = (const char *[]){
- "gcc_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk",
+ .name = "gcc_sleep_clk_src",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1136,6 +1461,16 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
},
};
+static struct parent_map gcc_xo_wcss5g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS5G, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_wcss5g[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &gcc_fepllwcss5g_clk.cdiv.clkr.hw },
+};
+
static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
F(48000000, P_XO, 1, 0, 0),
F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
@@ -1149,8 +1484,8 @@ static struct clk_rcg2 wcss5g_clk_src = {
.freq_tbl = ftbl_gcc_wcss5g_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "wcss5g_clk_src",
- .parent_names = gcc_xo_wcss5g,
- .num_parents = 2,
+ .parent_data = gcc_xo_wcss5g,
+ .num_parents = ARRAY_SIZE(gcc_xo_wcss5g),
.ops = &clk_rcg2_ops,
},
};
@@ -1162,9 +1497,8 @@ static struct clk_branch gcc_wcss5g_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss5g_clk",
- .parent_names = (const char *[]){
- "wcss5g_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){
+ &wcss5g_clk_src.clkr.hw },
.num_parents = 1,
.ops = &clk_branch2_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1179,8 +1513,9 @@ static struct clk_branch gcc_wcss5g_ref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss5g_ref_clk",
- .parent_names = (const char *[]){
- "xo",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1196,8 +1531,9 @@ static struct clk_branch gcc_wcss5g_rtc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_wcss5g_rtc_clk",
- .parent_names = (const char *[]){
- "gcc_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk",
+ .name = "gcc_sleep_clk_src",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1206,363 +1542,6 @@ static struct clk_branch gcc_wcss5g_rtc_clk = {
},
};
-/* Calculates the VCO rate for FEPLL. */
-static u64 clk_fepll_vco_calc_rate(struct clk_fepll *pll_div,
- unsigned long parent_rate)
-{
- const struct clk_fepll_vco *pll_vco = pll_div->pll_vco;
- u32 fdbkdiv, refclkdiv, cdiv;
- u64 vco;
-
- regmap_read(pll_div->cdiv.clkr.regmap, pll_vco->reg, &cdiv);
- refclkdiv = (cdiv >> pll_vco->refclkdiv_shift) &
- (BIT(pll_vco->refclkdiv_width) - 1);
- fdbkdiv = (cdiv >> pll_vco->fdbkdiv_shift) &
- (BIT(pll_vco->fdbkdiv_width) - 1);
-
- vco = parent_rate / refclkdiv;
- vco *= 2;
- vco *= fdbkdiv;
-
- return vco;
-}
-
-static const struct clk_fepll_vco gcc_apss_ddrpll_vco = {
- .fdbkdiv_shift = 16,
- .fdbkdiv_width = 8,
- .refclkdiv_shift = 24,
- .refclkdiv_width = 5,
- .reg = 0x2e020,
-};
-
-static const struct clk_fepll_vco gcc_fepll_vco = {
- .fdbkdiv_shift = 16,
- .fdbkdiv_width = 8,
- .refclkdiv_shift = 24,
- .refclkdiv_width = 5,
- .reg = 0x2f020,
-};
-
-/*
- * Round rate function for APSS CPU PLL Clock divider.
- * It looks up the frequency table and returns the next higher frequency
- * supported in hardware.
- */
-static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate)
-{
- struct clk_fepll *pll = to_clk_fepll(hw);
- struct clk_hw *p_hw;
- const struct freq_tbl *f;
-
- f = qcom_find_freq(pll->freq_tbl, rate);
- if (!f)
- return -EINVAL;
-
- p_hw = clk_hw_get_parent_by_index(hw, f->src);
- *p_rate = clk_hw_get_rate(p_hw);
-
- return f->freq;
-};
-
-/*
- * Clock set rate function for APSS CPU PLL Clock divider.
- * It looks up the frequency table and updates the PLL divider to corresponding
- * divider value.
- */
-static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_fepll *pll = to_clk_fepll(hw);
- const struct freq_tbl *f;
- u32 mask;
-
- f = qcom_find_freq(pll->freq_tbl, rate);
- if (!f)
- return -EINVAL;
-
- mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift;
- regmap_update_bits(pll->cdiv.clkr.regmap,
- pll->cdiv.reg, mask,
- f->pre_div << pll->cdiv.shift);
- /*
- * There is no status bit which can be checked for successful CPU
- * divider update operation so using delay for the same.
- */
- udelay(1);
-
- return 0;
-};
-
-/*
- * Clock frequency calculation function for APSS CPU PLL Clock divider.
- * This clock divider is nonlinear so this function calculates the actual
- * divider and returns the output frequency by dividing VCO Frequency
- * with this actual divider value.
- */
-static unsigned long
-clk_cpu_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_fepll *pll = to_clk_fepll(hw);
- u32 cdiv, pre_div;
- u64 rate;
-
- regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv);
- cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1);
-
- /*
- * Some dividers have value in 0.5 fraction so multiply both VCO
- * frequency(parent_rate) and pre_div with 2 to make integer
- * calculation.
- */
- if (cdiv > 10)
- pre_div = (cdiv + 1) * 2;
- else
- pre_div = cdiv + 12;
-
- rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2;
- do_div(rate, pre_div);
-
- return rate;
-};
-
-static const struct clk_ops clk_regmap_cpu_div_ops = {
- .round_rate = clk_cpu_div_round_rate,
- .set_rate = clk_cpu_div_set_rate,
- .recalc_rate = clk_cpu_div_recalc_rate,
-};
-
-static const struct freq_tbl ftbl_apss_ddr_pll[] = {
- { 384000000, P_XO, 0xd, 0, 0 },
- { 413000000, P_XO, 0xc, 0, 0 },
- { 448000000, P_XO, 0xb, 0, 0 },
- { 488000000, P_XO, 0xa, 0, 0 },
- { 512000000, P_XO, 0x9, 0, 0 },
- { 537000000, P_XO, 0x8, 0, 0 },
- { 565000000, P_XO, 0x7, 0, 0 },
- { 597000000, P_XO, 0x6, 0, 0 },
- { 632000000, P_XO, 0x5, 0, 0 },
- { 672000000, P_XO, 0x4, 0, 0 },
- { 716000000, P_XO, 0x3, 0, 0 },
- { 768000000, P_XO, 0x2, 0, 0 },
- { 823000000, P_XO, 0x1, 0, 0 },
- { 896000000, P_XO, 0x0, 0, 0 },
- { }
-};
-
-static struct clk_fepll gcc_apss_cpu_plldiv_clk = {
- .cdiv.reg = 0x2e020,
- .cdiv.shift = 4,
- .cdiv.width = 4,
- .cdiv.clkr = {
- .enable_reg = 0x2e000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "ddrpllapss",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_regmap_cpu_div_ops,
- },
- },
- .freq_tbl = ftbl_apss_ddr_pll,
- .pll_vco = &gcc_apss_ddrpll_vco,
-};
-
-/* Calculates the rate for PLL divider.
- * If the divider value is not fixed then it gets the actual divider value
- * from divider table. Then, it calculate the clock rate by dividing the
- * parent rate with actual divider value.
- */
-static unsigned long
-clk_regmap_clk_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_fepll *pll = to_clk_fepll(hw);
- u32 cdiv, pre_div = 1;
- u64 rate;
- const struct clk_div_table *clkt;
-
- if (pll->fixed_div) {
- pre_div = pll->fixed_div;
- } else {
- regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv);
- cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1);
-
- for (clkt = pll->div_table; clkt->div; clkt++) {
- if (clkt->val == cdiv)
- pre_div = clkt->div;
- }
- }
-
- rate = clk_fepll_vco_calc_rate(pll, parent_rate);
- do_div(rate, pre_div);
-
- return rate;
-};
-
-static const struct clk_ops clk_fepll_div_ops = {
- .recalc_rate = clk_regmap_clk_div_recalc_rate,
-};
-
-static struct clk_fepll gcc_apss_sdcc_clk = {
- .fixed_div = 28,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "ddrpllsdcc",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .pll_vco = &gcc_apss_ddrpll_vco,
-};
-
-static struct clk_fepll gcc_fepll125_clk = {
- .fixed_div = 32,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepll125",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .pll_vco = &gcc_fepll_vco,
-};
-
-static struct clk_fepll gcc_fepll125dly_clk = {
- .fixed_div = 32,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepll125dly",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .pll_vco = &gcc_fepll_vco,
-};
-
-static struct clk_fepll gcc_fepll200_clk = {
- .fixed_div = 20,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepll200",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .pll_vco = &gcc_fepll_vco,
-};
-
-static struct clk_fepll gcc_fepll500_clk = {
- .fixed_div = 8,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepll500",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .pll_vco = &gcc_fepll_vco,
-};
-
-static const struct clk_div_table fepllwcss_clk_div_table[] = {
- { 0, 15 },
- { 1, 16 },
- { 2, 18 },
- { 3, 20 },
- { },
-};
-
-static struct clk_fepll gcc_fepllwcss2g_clk = {
- .cdiv.reg = 0x2f020,
- .cdiv.shift = 8,
- .cdiv.width = 2,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepllwcss2g",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .div_table = fepllwcss_clk_div_table,
- .pll_vco = &gcc_fepll_vco,
-};
-
-static struct clk_fepll gcc_fepllwcss5g_clk = {
- .cdiv.reg = 0x2f020,
- .cdiv.shift = 12,
- .cdiv.width = 2,
- .cdiv.clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "fepllwcss5g",
- .parent_names = (const char *[]){
- "xo",
- },
- .num_parents = 1,
- .ops = &clk_fepll_div_ops,
- },
- },
- .div_table = fepllwcss_clk_div_table,
- .pll_vco = &gcc_fepll_vco,
-};
-
-static const struct freq_tbl ftbl_gcc_pcnoc_ahb_clk[] = {
- F(48000000, P_XO, 1, 0, 0),
- F(100000000, P_FEPLL200, 2, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_pcnoc_ahb_clk_src = {
- .cmd_rcgr = 0x21024,
- .hid_width = 5,
- .parent_map = gcc_xo_200_500_map,
- .freq_tbl = ftbl_gcc_pcnoc_ahb_clk,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_pcnoc_ahb_clk_src",
- .parent_names = gcc_xo_200_500,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_branch pcnoc_clk_src = {
- .halt_reg = 0x21030,
- .clkr = {
- .enable_reg = 0x21030,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "pcnoc_clk_src",
- .parent_names = (const char *[]){
- "gcc_pcnoc_ahb_clk_src",
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
- .flags = CLK_SET_RATE_PARENT |
- CLK_IS_CRITICAL,
- },
- },
-};
-
static struct clk_regmap *gcc_ipq4019_clocks[] = {
[AUDIO_CLK_SRC] = &audio_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
new file mode 100644
index 000000000000..bdb4a0a11d07
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -0,0 +1,3824 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "reset.h"
+
+enum {
+ DT_SLEEP_CLK,
+ DT_XO,
+ DT_PCIE_2LANE_PHY_PIPE_CLK,
+ DT_PCIE_2LANE_PHY_PIPE_CLK_X1,
+ DT_USB_PCIE_WRAPPER_PIPE_CLK,
+};
+
+enum {
+ P_PCIE3X2_PIPE,
+ P_PCIE3X1_0_PIPE,
+ P_PCIE3X1_1_PIPE,
+ P_USB3PHY_0_PIPE,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC,
+ P_GPLL0_OUT_AUX,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL2_OUT_AUX,
+ P_GPLL2_OUT_MAIN,
+ P_GPLL4_OUT_AUX,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct clk_parent_data gcc_parent_data_xo = { .index = DT_XO };
+
+static struct clk_alpha_pll gpll0_main = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_main",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_stromer_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_div2",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll2_main = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll2",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_stromer_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll2_main",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll4_main = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4_main",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_stromer_ops,
+ /*
+ * There are no consumers for this GPLL in kernel yet,
+ * (will be added soon), so the clock framework
+ * disables this source. But some of the clocks
+ * initialized by boot loaders uses this source. So we
+ * need to keep this clock ON. Add the
+ * CLK_IGNORE_UNUSED flag so the clock will not be
+ * disabled. Once the consumer in kernel is added, we
+ * can get rid of this flag.
+ */
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll4",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct parent_map gcc_parent_map_xo[] = {
+ { P_XO, 0 },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_AUX, 2 },
+ { P_GPLL4_OUT_AUX, 3 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+ { P_GPLL0_OUT_AUX, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_AUX, 2 },
+ { P_GPLL4_OUT_AUX, 3 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_AUX, 1 },
+ { P_GPLL0_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_adss_pwm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apss_axi_clk_src[] = {
+ F(480000000, P_GPLL4_OUT_MAIN, 2.5, 0, 0),
+ F(533333333, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_apss_axi_clk_src = {
+ .cmd_rcgr = 0x24004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_apss_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_apss_axi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 1, 1, 25),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2.5, 0, 0),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 144, 15625),
+ F(7372800, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 288, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 576, 15625),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x202c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x302c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x402c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_lpass_sway_clk_src[] = {
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_lpass_sway_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_nss_ts_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_nss_ts_clk_src = {
+ .cmd_rcgr = 0x17088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie3x1_0_axi_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie3x1_0_axi_clk_src = {
+ .cmd_rcgr = 0x29018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie3x1_0_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3x1_0_rchg_clk_src = {
+ .cmd_rcgr = 0x2907c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_rchg_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_rchg_clk = {
+ .halt_reg = 0x2907c,
+ .clkr = {
+ .enable_reg = 0x2907c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_rchg_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3x1_0_rchg_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3x1_1_axi_clk_src = {
+ .cmd_rcgr = 0x2a004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie3x1_0_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3x1_1_rchg_clk_src = {
+ .cmd_rcgr = 0x2a078,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_rchg_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_rchg_clk = {
+ .halt_reg = 0x2a078,
+ .clkr = {
+ .enable_reg = 0x2a078,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_rchg_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3x1_1_rchg_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie3x2_axi_m_clk_src[] = {
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie3x2_axi_m_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie3x2_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3x2_axi_s_clk_src = {
+ .cmd_rcgr = 0x28084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie3x1_0_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3x2_rchg_clk_src = {
+ .cmd_rcgr = 0x28078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_rchg_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_rchg_clk = {
+ .halt_reg = 0x28078,
+ .clkr = {
+ .enable_reg = 0x28078,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x2_rchg_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3x2_rchg_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_clk_src[] = {
+ F(2000000, P_XO, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_clk_src = {
+ .cmd_rcgr = 0x28004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_pcie_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie3x2_pipe_clk_src = {
+ .reg = 0x28064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x2_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_2LANE_PHY_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie3x1_0_pipe_clk_src = {
+ .reg = 0x29064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_USB_PCIE_WRAPPER_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie3x1_1_pipe_clk_src = {
+ .reg = 0x2a064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_2LANE_PHY_PIPE_CLK_X1,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x31004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcnoc_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_q6_axim_clk_src = {
+ .cmd_rcgr = 0x25004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_apss_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_axim_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_at_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_at_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qdss_at_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_tsctr_clk_src[] = {
+ F(600000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x2d01c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qdss_tsctr_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div3_clk_src = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div8_clk_src = {
+ .mult = 1,
+ .div = 8,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div8_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div16_clk_src = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div16_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qpic_io_macro_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qpic_io_macro_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(143713, P_XO, 1, 1, 167),
+ F(400000, P_XO, 1, 1, 60),
+ F(24000000, P_XO, 1, 0, 0),
+ F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
+ F(96000000, P_GPLL2_OUT_MAIN, 12, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x33004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sleep_clk_src = {
+ .cmd_rcgr = 0x3400c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sleep_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_system_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2e004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_system_noc_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_system_noc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_system_noc_bfdcd_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_system_noc_bfdcd_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 gcc_uniphy_sys_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy_sys_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb0_aux_clk_src = {
+ .cmd_rcgr = 0x2c018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_pcie_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_lfps_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_lfps_clk_src = {
+ .cmd_rcgr = 0x2c07c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_usb0_lfps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_lfps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb0_master_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_mock_utmi_clk_src[] = {
+ F(60000000, P_GPLL4_OUT_AUX, 10, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2c02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb0_pipe_clk_src = {
+ .reg = 0x2c074,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_USB_PCIE_WRAPPER_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 gcc_wcss_ahb_clk_src = {
+ .cmd_rcgr = 0x25030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_ahb_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_xo_clk_src = {
+ .cmd_rcgr = 0x34004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_xo_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_div gcc_qdss_dap_div_clk_src = {
+ .reg = 0x2d028,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb0_mock_utmi_div_clk_src = {
+ .reg = 0x2c040,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_adss_pwm_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb_clk = {
+ .halt_reg = 0x34024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x34024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x3020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x4024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x1010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x3040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x4054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart3_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce_ahb_clk = {
+ .halt_reg = 0x25074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_div2_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce_axi_clk = {
+ .halt_reg = 0x25068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce_pcnoc_ahb_clk = {
+ .halt_reg = 0x25070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce_pcnoc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+ .halt_reg = 0x3a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_apu_clk = {
+ .halt_reg = 0x3a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_apu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+ .halt_reg = 0x3a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_core_axim_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_sway_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mdio_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_slave_ahb_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mdio_slave_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_q6_axi_clk = {
+ .halt_reg = 0x19010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mem_noc_q6_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_q6_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_ts_clk = {
+ .halt_reg = 0x19028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x19028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mem_noc_ts_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_tsctr_div8_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ts_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_nss_ts_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscc_clk = {
+ .halt_reg = 0x17034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscfg_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_atb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_nsscc_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_qosgen_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_div4_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_1_clk = {
+ .halt_reg = 0x1707c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_timeout_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_div4_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_xo_dcd_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_xo_dcd_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_ahb_clk = {
+ .halt_reg = 0x29030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_aux_clk = {
+ .halt_reg = 0x29070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_axi_m_clk = {
+ .halt_reg = 0x29038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_axi_s_bridge_clk = {
+ .halt_reg = 0x29048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_axi_s_clk = {
+ .halt_reg = 0x29040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_0_pipe_clk = {
+ .halt_reg = 0x29068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x29068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_ahb_clk = {
+ .halt_reg = 0x2a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_aux_clk = {
+ .halt_reg = 0x2a070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_axi_m_clk = {
+ .halt_reg = 0x2a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_axi_s_bridge_clk = {
+ .halt_reg = 0x2a024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_axi_s_clk = {
+ .halt_reg = 0x2a01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_1_pipe_clk = {
+ .halt_reg = 0x2a068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2a068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x1_phy_ahb_clk = {
+ .halt_reg = 0x29078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x1_phy_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_ahb_clk = {
+ .halt_reg = 0x28030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_aux_clk = {
+ .halt_reg = 0x28070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_axi_m_clk = {
+ .halt_reg = 0x28038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_axi_s_bridge_clk = {
+ .halt_reg = 0x28048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_axi_s_clk = {
+ .halt_reg = 0x28040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_phy_ahb_clk = {
+ .halt_reg = 0x28080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_phy_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3x2_pipe_clk = {
+ .halt_reg = 0x28068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x28068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3x2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_at_clk = {
+ .halt_reg = 0x31024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcnoc_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_lpass_clk = {
+ .halt_reg = 0x31020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcnoc_lpass_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_ahb_clk = {
+ .halt_reg = 0x25014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_wcss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_ahb_s_clk = {
+ .halt_reg = 0x25018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_ahb_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_wcss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_axim_clk = {
+ .halt_reg = 0x2500c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_axim_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_q6_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_axis_clk = {
+ .halt_reg = 0x25010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_axis_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_tsctr_1to2_clk = {
+ .halt_reg = 0x25020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6_tsctr_1to2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6ss_atbm_clk = {
+ .halt_reg = 0x2501c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6ss_atbm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6ss_pclkdbg_clk = {
+ .halt_reg = 0x25024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6ss_pclkdbg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_dap_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6ss_trig_clk = {
+ .halt_reg = 0x250a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x250a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_q6ss_trig_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_dap_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x2d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2d038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_cfg_ahb_clk = {
+ .halt_reg = 0x2d06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_ahb_clk = {
+ .halt_reg = 0x2d068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2d068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x2d05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_dap_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_etr_usb_clk = {
+ .halt_reg = 0x2d064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2d064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_etr_usb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_eud_at_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_at_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_eud_at_clk = {
+ .halt_reg = 0x2d070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2d070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_eud_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eud_at_div_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_io_macro_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qpic_io_macro_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_sleep_clk = {
+ .halt_reg = 0x3201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x33034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x3302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_lpass_cfg_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_lpass_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_nssnoc_1_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_nssnoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_nssnoc_clk = {
+ .halt_reg = 0x17084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_nssnoc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_1lane_1_m_clk = {
+ .halt_reg = 0x2e050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_1lane_1_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_1lane_1_s_clk = {
+ .halt_reg = 0x2e0ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e0ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_1lane_1_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_1_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_1lane_m_clk = {
+ .halt_reg = 0x2e080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_1lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_1lane_s_clk = {
+ .halt_reg = 0x2e04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_1lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x1_0_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_2lane_m_clk = {
+ .halt_reg = 0x2e07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_2lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_2lane_s_clk = {
+ .halt_reg = 0x2e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_2lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3x2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_usb_clk = {
+ .halt_reg = 0x2e058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_usb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_at_clk = {
+ .halt_reg = 0x2e038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2e038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sys_noc_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
+ .halt_reg = 0x2e030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sys_noc_wcss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_wcss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+ .halt_reg = 0x16010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+ .halt_reg = 0x1600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+ .halt_reg = 0x16018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+ .halt_reg = 0x2c050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_eud_at_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_eud_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eud_at_div_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_lfps_clk = {
+ .halt_reg = 0x2c090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_lfps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_lfps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+ .halt_reg = 0x2c048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+ .halt_reg = 0x2c054,
+ .clkr = {
+ .enable_reg = 0x2c054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+ .halt_reg = 0x2c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+ .halt_reg = 0x2c078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2c078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+ .halt_reg = 0x2c058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_axim_clk = {
+ .halt_reg = 0x2505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_axim_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_axis_clk = {
+ .halt_reg = 0x25060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_axis_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_apb_bdg_clk = {
+ .halt_reg = 0x25048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_apb_bdg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_dap_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
+ .halt_reg = 0x25038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_apb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_dap_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_atb_bdg_clk = {
+ .halt_reg = 0x2504c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2504c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_atb_bdg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
+ .halt_reg = 0x2503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_nts_bdg_clk = {
+ .halt_reg = 0x25050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_nts_bdg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
+ .halt_reg = 0x25040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_nts_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_ecahb_clk = {
+ .halt_reg = 0x25058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_ecahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_wcss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_mst_async_bdg_clk = {
+ .halt_reg = 0x2e0b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_mst_async_bdg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_slv_async_bdg_clk = {
+ .halt_reg = 0x2e0b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e0b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_slv_async_bdg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_div4_clk = {
+ .halt_reg = 0x3401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_div4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_div4_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_im_sleep_clk = {
+ .halt_reg = 0x34020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_im_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_pcnoc_1_clk = {
+ .halt_reg = 0x17080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_pcnoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_ahb_clk = {
+ .halt_reg = 0x1900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mem_noc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_apss_axi_clk = {
+ .halt_reg = 0x1901c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mem_noc_apss_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_apss_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_snoc_qosgen_extref_div_clk_src = {
+ .reg = 0x2e010,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_qosgen_extref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_mem_noc_qosgen_extref_clk = {
+ .halt_reg = 0x19024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mem_noc_qosgen_extref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_snoc_qosgen_extref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_ipq5332_clocks[] = {
+ [GPLL0_MAIN] = &gpll0_main.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL2_MAIN] = &gpll2_main.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL4_MAIN] = &gpll4_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+ [GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr,
+ [GCC_AHB_CLK] = &gcc_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK_SRC] = &gcc_apss_axi_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_CE_AHB_CLK] = &gcc_ce_ahb_clk.clkr,
+ [GCC_CE_AXI_CLK] = &gcc_ce_axi_clk.clkr,
+ [GCC_CE_PCNOC_AHB_CLK] = &gcc_ce_pcnoc_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_APU_CLK] = &gcc_cmn_12gpll_apu_clk.clkr,
+ [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+ [GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr,
+ [GCC_LPASS_SWAY_CLK_SRC] = &gcc_lpass_sway_clk_src.clkr,
+ [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+ [GCC_MDIO_SLAVE_AHB_CLK] = &gcc_mdio_slave_ahb_clk.clkr,
+ [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr,
+ [GCC_MEM_NOC_TS_CLK] = &gcc_mem_noc_ts_clk.clkr,
+ [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
+ [GCC_NSS_TS_CLK_SRC] = &gcc_nss_ts_clk_src.clkr,
+ [GCC_NSSCC_CLK] = &gcc_nsscc_clk.clkr,
+ [GCC_NSSCFG_CLK] = &gcc_nsscfg_clk.clkr,
+ [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
+ [GCC_NSSNOC_NSSCC_CLK] = &gcc_nssnoc_nsscc_clk.clkr,
+ [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+ [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
+ [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+ [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+ [GCC_NSSNOC_XO_DCD_CLK] = &gcc_nssnoc_xo_dcd_clk.clkr,
+ [GCC_PCIE3X1_0_AHB_CLK] = &gcc_pcie3x1_0_ahb_clk.clkr,
+ [GCC_PCIE3X1_0_AUX_CLK] = &gcc_pcie3x1_0_aux_clk.clkr,
+ [GCC_PCIE3X1_0_AXI_CLK_SRC] = &gcc_pcie3x1_0_axi_clk_src.clkr,
+ [GCC_PCIE3X1_0_AXI_M_CLK] = &gcc_pcie3x1_0_axi_m_clk.clkr,
+ [GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK] = &gcc_pcie3x1_0_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3X1_0_AXI_S_CLK] = &gcc_pcie3x1_0_axi_s_clk.clkr,
+ [GCC_PCIE3X1_0_PIPE_CLK] = &gcc_pcie3x1_0_pipe_clk.clkr,
+ [GCC_PCIE3X1_0_RCHG_CLK] = &gcc_pcie3x1_0_rchg_clk.clkr,
+ [GCC_PCIE3X1_0_RCHG_CLK_SRC] = &gcc_pcie3x1_0_rchg_clk_src.clkr,
+ [GCC_PCIE3X1_1_AHB_CLK] = &gcc_pcie3x1_1_ahb_clk.clkr,
+ [GCC_PCIE3X1_1_AUX_CLK] = &gcc_pcie3x1_1_aux_clk.clkr,
+ [GCC_PCIE3X1_1_AXI_CLK_SRC] = &gcc_pcie3x1_1_axi_clk_src.clkr,
+ [GCC_PCIE3X1_1_AXI_M_CLK] = &gcc_pcie3x1_1_axi_m_clk.clkr,
+ [GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK] = &gcc_pcie3x1_1_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3X1_1_AXI_S_CLK] = &gcc_pcie3x1_1_axi_s_clk.clkr,
+ [GCC_PCIE3X1_1_PIPE_CLK] = &gcc_pcie3x1_1_pipe_clk.clkr,
+ [GCC_PCIE3X1_1_RCHG_CLK] = &gcc_pcie3x1_1_rchg_clk.clkr,
+ [GCC_PCIE3X1_1_RCHG_CLK_SRC] = &gcc_pcie3x1_1_rchg_clk_src.clkr,
+ [GCC_PCIE3X1_PHY_AHB_CLK] = &gcc_pcie3x1_phy_ahb_clk.clkr,
+ [GCC_PCIE3X2_AHB_CLK] = &gcc_pcie3x2_ahb_clk.clkr,
+ [GCC_PCIE3X2_AUX_CLK] = &gcc_pcie3x2_aux_clk.clkr,
+ [GCC_PCIE3X2_AXI_M_CLK] = &gcc_pcie3x2_axi_m_clk.clkr,
+ [GCC_PCIE3X2_AXI_M_CLK_SRC] = &gcc_pcie3x2_axi_m_clk_src.clkr,
+ [GCC_PCIE3X2_AXI_S_BRIDGE_CLK] = &gcc_pcie3x2_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3X2_AXI_S_CLK] = &gcc_pcie3x2_axi_s_clk.clkr,
+ [GCC_PCIE3X2_AXI_S_CLK_SRC] = &gcc_pcie3x2_axi_s_clk_src.clkr,
+ [GCC_PCIE3X2_PHY_AHB_CLK] = &gcc_pcie3x2_phy_ahb_clk.clkr,
+ [GCC_PCIE3X2_PIPE_CLK] = &gcc_pcie3x2_pipe_clk.clkr,
+ [GCC_PCIE3X2_RCHG_CLK] = &gcc_pcie3x2_rchg_clk.clkr,
+ [GCC_PCIE3X2_RCHG_CLK_SRC] = &gcc_pcie3x2_rchg_clk_src.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PCNOC_AT_CLK] = &gcc_pcnoc_at_clk.clkr,
+ [GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr,
+ [GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
+ [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
+ [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
+ [GCC_Q6_AXIM_CLK_SRC] = &gcc_q6_axim_clk_src.clkr,
+ [GCC_Q6_AXIS_CLK] = &gcc_q6_axis_clk.clkr,
+ [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
+ [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
+ [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
+ [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr,
+ [GCC_QDSS_CFG_AHB_CLK] = &gcc_qdss_cfg_ahb_clk.clkr,
+ [GCC_QDSS_DAP_AHB_CLK] = &gcc_qdss_dap_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr,
+ [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
+ [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
+ [GCC_QPIC_IO_MACRO_CLK_SRC] = &gcc_qpic_io_macro_clk_src.clkr,
+ [GCC_QPIC_SLEEP_CLK] = &gcc_qpic_sleep_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr,
+ [GCC_SNOC_NSSNOC_1_CLK] = &gcc_snoc_nssnoc_1_clk.clkr,
+ [GCC_SNOC_NSSNOC_CLK] = &gcc_snoc_nssnoc_clk.clkr,
+ [GCC_SNOC_PCIE3_1LANE_1_M_CLK] = &gcc_snoc_pcie3_1lane_1_m_clk.clkr,
+ [GCC_SNOC_PCIE3_1LANE_1_S_CLK] = &gcc_snoc_pcie3_1lane_1_s_clk.clkr,
+ [GCC_SNOC_PCIE3_1LANE_M_CLK] = &gcc_snoc_pcie3_1lane_m_clk.clkr,
+ [GCC_SNOC_PCIE3_1LANE_S_CLK] = &gcc_snoc_pcie3_1lane_s_clk.clkr,
+ [GCC_SNOC_PCIE3_2LANE_M_CLK] = &gcc_snoc_pcie3_2lane_m_clk.clkr,
+ [GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
+ [GCC_SNOC_USB_CLK] = &gcc_snoc_usb_clk.clkr,
+ [GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
+ [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
+ [GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr,
+ [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+ [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+ [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+ [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+ [GCC_UNIPHY_SYS_CLK_SRC] = &gcc_uniphy_sys_clk_src.clkr,
+ [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+ [GCC_USB0_AUX_CLK_SRC] = &gcc_usb0_aux_clk_src.clkr,
+ [GCC_USB0_EUD_AT_CLK] = &gcc_usb0_eud_at_clk.clkr,
+ [GCC_USB0_LFPS_CLK] = &gcc_usb0_lfps_clk.clkr,
+ [GCC_USB0_LFPS_CLK_SRC] = &gcc_usb0_lfps_clk_src.clkr,
+ [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+ [GCC_USB0_MASTER_CLK_SRC] = &gcc_usb0_master_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK_SRC] = &gcc_usb0_mock_utmi_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb0_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+ [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+ [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+ [GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr,
+ [GCC_WCSS_AXIM_CLK] = &gcc_wcss_axim_clk.clkr,
+ [GCC_WCSS_AXIS_CLK] = &gcc_wcss_axis_clk.clkr,
+ [GCC_WCSS_DBG_IFC_APB_BDG_CLK] = &gcc_wcss_dbg_ifc_apb_bdg_clk.clkr,
+ [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
+ [GCC_WCSS_DBG_IFC_ATB_BDG_CLK] = &gcc_wcss_dbg_ifc_atb_bdg_clk.clkr,
+ [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
+ [GCC_WCSS_DBG_IFC_NTS_BDG_CLK] = &gcc_wcss_dbg_ifc_nts_bdg_clk.clkr,
+ [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
+ [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
+ [GCC_WCSS_MST_ASYNC_BDG_CLK] = &gcc_wcss_mst_async_bdg_clk.clkr,
+ [GCC_WCSS_SLV_ASYNC_BDG_CLK] = &gcc_wcss_slv_async_bdg_clk.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+ [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
+ [GCC_IM_SLEEP_CLK] = &gcc_im_sleep_clk.clkr,
+ [GCC_NSSNOC_PCNOC_1_CLK] = &gcc_nssnoc_pcnoc_1_clk.clkr,
+ [GCC_MEM_NOC_AHB_CLK] = &gcc_mem_noc_ahb_clk.clkr,
+ [GCC_MEM_NOC_APSS_AXI_CLK] = &gcc_mem_noc_apss_axi_clk.clkr,
+ [GCC_SNOC_QOSGEN_EXTREF_DIV_CLK_SRC] = &gcc_snoc_qosgen_extref_div_clk_src.clkr,
+ [GCC_MEM_NOC_QOSGEN_EXTREF_CLK] = &gcc_mem_noc_qosgen_extref_clk.clkr,
+ [GCC_PCIE3X2_PIPE_CLK_SRC] = &gcc_pcie3x2_pipe_clk_src.clkr,
+ [GCC_PCIE3X1_0_PIPE_CLK_SRC] = &gcc_pcie3x1_0_pipe_clk_src.clkr,
+ [GCC_PCIE3X1_1_PIPE_CLK_SRC] = &gcc_pcie3x1_1_pipe_clk_src.clkr,
+ [GCC_USB0_PIPE_CLK_SRC] = &gcc_usb0_pipe_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq5332_resets[] = {
+ [GCC_ADSS_BCR] = { 0x1c000 },
+ [GCC_ADSS_PWM_CLK_ARES] = { 0x1c00c, 2 },
+ [GCC_AHB_CLK_ARES] = { 0x34024, 2 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x38000 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_CLK_ARES] = { 0x3800c, 2 },
+ [GCC_APSS_AHB_CLK_ARES] = { 0x24018, 2 },
+ [GCC_APSS_AXI_CLK_ARES] = { 0x2401c, 2 },
+ [GCC_BLSP1_AHB_CLK_ARES] = { 0x1008, 2 },
+ [GCC_BLSP1_BCR] = { 0x1000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x2000 },
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_ARES] = { 0x2024, 2 },
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_ARES] = { 0x2020, 2 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x3000 },
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_ARES] = { 0x3024, 2 },
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_ARES] = { 0x3020, 2 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x4000 },
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_ARES] = { 0x4024, 2 },
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_ARES] = { 0x4020, 2 },
+ [GCC_BLSP1_SLEEP_CLK_ARES] = { 0x1010, 2 },
+ [GCC_BLSP1_UART1_APPS_CLK_ARES] = { 0x2040, 2 },
+ [GCC_BLSP1_UART1_BCR] = { 0x2028 },
+ [GCC_BLSP1_UART2_APPS_CLK_ARES] = { 0x3040, 2 },
+ [GCC_BLSP1_UART2_BCR] = { 0x3028 },
+ [GCC_BLSP1_UART3_APPS_CLK_ARES] = { 0x4054, 2 },
+ [GCC_BLSP1_UART3_BCR] = { 0x4028 },
+ [GCC_CE_BCR] = { 0x18008 },
+ [GCC_CMN_BLK_BCR] = { 0x3a000 },
+ [GCC_CMN_LDO0_BCR] = { 0x1d000 },
+ [GCC_CMN_LDO1_BCR] = { 0x1d008 },
+ [GCC_DCC_BCR] = { 0x35000 },
+ [GCC_GP1_CLK_ARES] = { 0x8018, 2 },
+ [GCC_GP2_CLK_ARES] = { 0x9018, 2 },
+ [GCC_LPASS_BCR] = { 0x27000 },
+ [GCC_LPASS_CORE_AXIM_CLK_ARES] = { 0x27018, 2 },
+ [GCC_LPASS_SWAY_CLK_ARES] = { 0x27014, 2 },
+ [GCC_MDIOM_BCR] = { 0x12000 },
+ [GCC_MDIOS_BCR] = { 0x12008 },
+ [GCC_NSS_BCR] = { 0x17000 },
+ [GCC_NSS_TS_CLK_ARES] = { 0x17018, 2 },
+ [GCC_NSSCC_CLK_ARES] = { 0x17034, 2 },
+ [GCC_NSSCFG_CLK_ARES] = { 0x1702c, 2 },
+ [GCC_NSSNOC_ATB_CLK_ARES] = { 0x17014, 2 },
+ [GCC_NSSNOC_NSSCC_CLK_ARES] = { 0x17030, 2 },
+ [GCC_NSSNOC_QOSGEN_REF_CLK_ARES] = { 0x1701c, 2 },
+ [GCC_NSSNOC_SNOC_1_CLK_ARES] = { 0x1707c, 2 },
+ [GCC_NSSNOC_SNOC_CLK_ARES] = { 0x17028, 2 },
+ [GCC_NSSNOC_TIMEOUT_REF_CLK_ARES] = { 0x17020, 2 },
+ [GCC_NSSNOC_XO_DCD_CLK_ARES] = { 0x17074, 2 },
+ [GCC_PCIE3X1_0_AHB_CLK_ARES] = { 0x29030, 2 },
+ [GCC_PCIE3X1_0_AUX_CLK_ARES] = { 0x29070, 2 },
+ [GCC_PCIE3X1_0_AXI_M_CLK_ARES] = { 0x29038, 2 },
+ [GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK_ARES] = { 0x29048, 2 },
+ [GCC_PCIE3X1_0_AXI_S_CLK_ARES] = { 0x29040, 2 },
+ [GCC_PCIE3X1_0_BCR] = { 0x29000 },
+ [GCC_PCIE3X1_0_LINK_DOWN_BCR] = { 0x29054 },
+ [GCC_PCIE3X1_0_PHY_BCR] = { 0x29060 },
+ [GCC_PCIE3X1_0_PHY_PHY_BCR] = { 0x2905c },
+ [GCC_PCIE3X1_1_AHB_CLK_ARES] = { 0x2a00c, 2 },
+ [GCC_PCIE3X1_1_AUX_CLK_ARES] = { 0x2a070, 2 },
+ [GCC_PCIE3X1_1_AXI_M_CLK_ARES] = { 0x2a014, 2 },
+ [GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK_ARES] = { 0x2a024, 2 },
+ [GCC_PCIE3X1_1_AXI_S_CLK_ARES] = { 0x2a01c, 2 },
+ [GCC_PCIE3X1_1_BCR] = { 0x2a000 },
+ [GCC_PCIE3X1_1_LINK_DOWN_BCR] = { 0x2a028 },
+ [GCC_PCIE3X1_1_PHY_BCR] = { 0x2a030 },
+ [GCC_PCIE3X1_1_PHY_PHY_BCR] = { 0x2a02c },
+ [GCC_PCIE3X1_PHY_AHB_CLK_ARES] = { 0x29078, 2 },
+ [GCC_PCIE3X2_AHB_CLK_ARES] = { 0x28030, 2 },
+ [GCC_PCIE3X2_AUX_CLK_ARES] = { 0x28070, 2 },
+ [GCC_PCIE3X2_AXI_M_CLK_ARES] = { 0x28038, 2 },
+ [GCC_PCIE3X2_AXI_S_BRIDGE_CLK_ARES] = { 0x28048, 2 },
+ [GCC_PCIE3X2_AXI_S_CLK_ARES] = { 0x28040, 2 },
+ [GCC_PCIE3X2_BCR] = { 0x28000 },
+ [GCC_PCIE3X2_LINK_DOWN_BCR] = { 0x28054 },
+ [GCC_PCIE3X2_PHY_AHB_CLK_ARES] = { 0x28080, 2 },
+ [GCC_PCIE3X2_PHY_BCR] = { 0x28060 },
+ [GCC_PCIE3X2PHY_PHY_BCR] = { 0x2805c },
+ [GCC_PCNOC_BCR] = { 0x31000 },
+ [GCC_PCNOC_LPASS_CLK_ARES] = { 0x31020, 2 },
+ [GCC_PRNG_AHB_CLK_ARES] = { 0x13024, 2 },
+ [GCC_PRNG_BCR] = { 0x13020 },
+ [GCC_Q6_AHB_CLK_ARES] = { 0x25014, 2 },
+ [GCC_Q6_AHB_S_CLK_ARES] = { 0x25018, 2 },
+ [GCC_Q6_AXIM_CLK_ARES] = { 0x2500c, 2 },
+ [GCC_Q6_AXIS_CLK_ARES] = { 0x25010, 2 },
+ [GCC_Q6_TSCTR_1TO2_CLK_ARES] = { 0x25020, 2 },
+ [GCC_Q6SS_ATBM_CLK_ARES] = { 0x2501c, 2 },
+ [GCC_Q6SS_PCLKDBG_CLK_ARES] = { 0x25024, 2 },
+ [GCC_Q6SS_TRIG_CLK_ARES] = { 0x250a0, 2 },
+ [GCC_QDSS_APB2JTAG_CLK_ARES] = { 0x2d060, 2 },
+ [GCC_QDSS_AT_CLK_ARES] = { 0x2d038, 2 },
+ [GCC_QDSS_BCR] = { 0x2d000 },
+ [GCC_QDSS_CFG_AHB_CLK_ARES] = { 0x2d06c, 2 },
+ [GCC_QDSS_DAP_AHB_CLK_ARES] = { 0x2d068, 2 },
+ [GCC_QDSS_DAP_CLK_ARES] = { 0x2d05c, 2 },
+ [GCC_QDSS_ETR_USB_CLK_ARES] = { 0x2d064, 2 },
+ [GCC_QDSS_EUD_AT_CLK_ARES] = { 0x2d070, 2 },
+ [GCC_QDSS_STM_CLK_ARES] = { 0x2d040, 2 },
+ [GCC_QDSS_TRACECLKIN_CLK_ARES] = { 0x2d044, 2 },
+ [GCC_QDSS_TS_CLK_ARES] = { 0x2d078, 2 },
+ [GCC_QDSS_TSCTR_DIV16_CLK_ARES] = { 0x2d058, 2 },
+ [GCC_QDSS_TSCTR_DIV2_CLK_ARES] = { 0x2d048, 2 },
+ [GCC_QDSS_TSCTR_DIV3_CLK_ARES] = { 0x2d04c, 2 },
+ [GCC_QDSS_TSCTR_DIV4_CLK_ARES] = { 0x2d050, 2 },
+ [GCC_QDSS_TSCTR_DIV8_CLK_ARES] = { 0x2d054, 2 },
+ [GCC_QPIC_AHB_CLK_ARES] = { 0x32010, 2 },
+ [GCC_QPIC_CLK_ARES] = { 0x32014, 2 },
+ [GCC_QPIC_BCR] = { 0x32000 },
+ [GCC_QPIC_IO_MACRO_CLK_ARES] = { 0x3200c, 2 },
+ [GCC_QPIC_SLEEP_CLK_ARES] = { 0x3201c, 2 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x2c068 },
+ [GCC_SDCC1_AHB_CLK_ARES] = { 0x33034, 2 },
+ [GCC_SDCC1_APPS_CLK_ARES] = { 0x3302c, 2 },
+ [GCC_SDCC_BCR] = { 0x33000 },
+ [GCC_SNOC_BCR] = { 0x2e000 },
+ [GCC_SNOC_LPASS_CFG_CLK_ARES] = { 0x2e028, 2 },
+ [GCC_SNOC_NSSNOC_1_CLK_ARES] = { 0x17090, 2 },
+ [GCC_SNOC_NSSNOC_CLK_ARES] = { 0x17084, 2 },
+ [GCC_SYS_NOC_QDSS_STM_AXI_CLK_ARES] = { 0x2e034, 2 },
+ [GCC_SYS_NOC_WCSS_AHB_CLK_ARES] = { 0x2e030, 2 },
+ [GCC_UNIPHY0_AHB_CLK_ARES] = { 0x16010, 2 },
+ [GCC_UNIPHY0_BCR] = { 0x16000 },
+ [GCC_UNIPHY0_SYS_CLK_ARES] = { 0x1600c, 2 },
+ [GCC_UNIPHY1_AHB_CLK_ARES] = { 0x1601c, 2 },
+ [GCC_UNIPHY1_BCR] = { 0x16014 },
+ [GCC_UNIPHY1_SYS_CLK_ARES] = { 0x16018, 2 },
+ [GCC_USB0_AUX_CLK_ARES] = { 0x2c050, 2 },
+ [GCC_USB0_EUD_AT_CLK_ARES] = { 0x30004, 2 },
+ [GCC_USB0_LFPS_CLK_ARES] = { 0x2c090, 2 },
+ [GCC_USB0_MASTER_CLK_ARES] = { 0x2c048, 2 },
+ [GCC_USB0_MOCK_UTMI_CLK_ARES] = { 0x2c054, 2 },
+ [GCC_USB0_PHY_BCR] = { 0x2c06c },
+ [GCC_USB0_PHY_CFG_AHB_CLK_ARES] = { 0x2c05c, 2 },
+ [GCC_USB0_SLEEP_CLK_ARES] = { 0x2c058, 2 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x2c070 },
+ [GCC_USB_BCR] = { 0x2c000 },
+ [GCC_WCSS_AXIM_CLK_ARES] = { 0x2505c, 2 },
+ [GCC_WCSS_AXIS_CLK_ARES] = { 0x25060, 2 },
+ [GCC_WCSS_BCR] = { 0x18004 },
+ [GCC_WCSS_DBG_IFC_APB_BDG_CLK_ARES] = { 0x25048, 2 },
+ [GCC_WCSS_DBG_IFC_APB_CLK_ARES] = { 0x25038, 2 },
+ [GCC_WCSS_DBG_IFC_ATB_BDG_CLK_ARES] = { 0x2504c, 2 },
+ [GCC_WCSS_DBG_IFC_ATB_CLK_ARES] = { 0x2503c, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_BDG_CLK_ARES] = { 0x25050, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_CLK_ARES] = { 0x25040, 2 },
+ [GCC_WCSS_ECAHB_CLK_ARES] = { 0x25058, 2 },
+ [GCC_WCSS_MST_ASYNC_BDG_CLK_ARES] = { 0x2e0b0, 2 },
+ [GCC_WCSS_Q6_BCR] = { 0x18000 },
+ [GCC_WCSS_SLV_ASYNC_BDG_CLK_ARES] = { 0x2e0b4, 2 },
+ [GCC_XO_CLK_ARES] = { 0x34018, 2 },
+ [GCC_XO_DIV4_CLK_ARES] = { 0x3401c, 2 },
+ [GCC_Q6SS_DBG_ARES] = { 0x25094 },
+ [GCC_WCSS_DBG_BDG_ARES] = { 0x25098, 0 },
+ [GCC_WCSS_DBG_ARES] = { 0x25098, 1 },
+ [GCC_WCSS_AXI_S_ARES] = { 0x25098, 2 },
+ [GCC_WCSS_AXI_M_ARES] = { 0x25098, 3 },
+ [GCC_WCSSAON_ARES] = { 0x2509C },
+ [GCC_PCIE3X2_PIPE_ARES] = { 0x28058, 0 },
+ [GCC_PCIE3X2_CORE_STICKY_ARES] = { 0x28058, 1 },
+ [GCC_PCIE3X2_AXI_S_STICKY_ARES] = { 0x28058, 2 },
+ [GCC_PCIE3X2_AXI_M_STICKY_ARES] = { 0x28058, 3 },
+ [GCC_PCIE3X1_0_PIPE_ARES] = { 0x29058, 0 },
+ [GCC_PCIE3X1_0_CORE_STICKY_ARES] = { 0x29058, 1 },
+ [GCC_PCIE3X1_0_AXI_S_STICKY_ARES] = { 0x29058, 2 },
+ [GCC_PCIE3X1_0_AXI_M_STICKY_ARES] = { 0x29058, 3 },
+ [GCC_PCIE3X1_1_PIPE_ARES] = { 0x2a058, 0 },
+ [GCC_PCIE3X1_1_CORE_STICKY_ARES] = { 0x2a058, 1 },
+ [GCC_PCIE3X1_1_AXI_S_STICKY_ARES] = { 0x2a058, 2 },
+ [GCC_PCIE3X1_1_AXI_M_STICKY_ARES] = { 0x2a058, 3 },
+ [GCC_IM_SLEEP_CLK_ARES] = { 0x34020, 2 },
+ [GCC_NSSNOC_PCNOC_1_CLK_ARES] = { 0x17080, 2 },
+ [GCC_UNIPHY0_XPCS_ARES] = { 0x16050 },
+ [GCC_UNIPHY1_XPCS_ARES] = { 0x16060 },
+};
+
+static const struct regmap_config gcc_ipq5332_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3f024,
+ .fast_io = true,
+};
+
+static struct clk_hw *gcc_ipq5332_hws[] = {
+ &gpll0_div2.hw,
+ &gcc_xo_div4_clk_src.hw,
+ &gcc_system_noc_bfdcd_div2_clk_src.hw,
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ &gcc_qdss_tsctr_div3_clk_src.hw,
+ &gcc_qdss_tsctr_div4_clk_src.hw,
+ &gcc_qdss_tsctr_div8_clk_src.hw,
+ &gcc_qdss_tsctr_div16_clk_src.hw,
+ &gcc_eud_at_div_clk_src.hw,
+};
+
+static const struct qcom_cc_desc gcc_ipq5332_desc = {
+ .config = &gcc_ipq5332_regmap_config,
+ .clks = gcc_ipq5332_clocks,
+ .num_clks = ARRAY_SIZE(gcc_ipq5332_clocks),
+ .resets = gcc_ipq5332_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq5332_resets),
+ .clk_hws = gcc_ipq5332_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq5332_hws),
+};
+
+static int gcc_ipq5332_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_ipq5332_desc);
+}
+
+static const struct of_device_id gcc_ipq5332_match_table[] = {
+ { .compatible = "qcom,ipq5332-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq5332_match_table);
+
+static struct platform_driver gcc_ipq5332_driver = {
+ .probe = gcc_ipq5332_probe,
+ .driver = {
+ .name = "gcc-ipq5332",
+ .of_match_table = gcc_ipq5332_match_table,
+ },
+};
+
+static int __init gcc_ipq5332_init(void)
+{
+ return platform_driver_register(&gcc_ipq5332_driver);
+}
+core_initcall(gcc_ipq5332_init);
+
+static void __exit gcc_ipq5332_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq5332_driver);
+}
+module_exit(gcc_ipq5332_exit);
+
+MODULE_DESCRIPTION("QTI GCC IPQ5332 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 14cc4ad02221..6541d98c0348 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -934,7 +934,7 @@ static struct clk_rcg2 usb0_mock_utmi_clk_src = {
};
static const struct clk_parent_data gcc_usb3phy_0_cc_pipe_clk_xo[] = {
- { .name = "usb3phy_0_cc_pipe_clk" },
+ { .fw_name = "usb3phy_0_cc_pipe_clk", .name = "usb3phy_0_cc_pipe_clk" },
{ .fw_name = "xo", .name = "xo" },
};
@@ -1002,7 +1002,7 @@ static struct clk_rcg2 usb1_mock_utmi_clk_src = {
};
static const struct clk_parent_data gcc_usb3phy_1_cc_pipe_clk_xo[] = {
- { .name = "usb3phy_1_cc_pipe_clk" },
+ { .fw_name = "usb3phy_1_cc_pipe_clk", .name = "usb3phy_1_cc_pipe_clk" },
{ .fw_name = "xo", .name = "xo" },
};
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
new file mode 100644
index 000000000000..b2a2d618a5ec
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -0,0 +1,4248 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2023 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+#include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_BIAS_PLL_UBI_NC_CLK,
+ DT_PCIE30_PHY0_PIPE_CLK,
+ DT_PCIE30_PHY1_PIPE_CLK,
+ DT_PCIE30_PHY2_PIPE_CLK,
+ DT_PCIE30_PHY3_PIPE_CLK,
+ DT_USB3PHY_0_CC_PIPE_CLK,
+};
+
+enum {
+ P_XO,
+ P_PCIE30_PHY0_PIPE,
+ P_PCIE30_PHY1_PIPE,
+ P_PCIE30_PHY2_PIPE,
+ P_PCIE30_PHY3_PIPE,
+ P_USB3PHY_0_PIPE,
+ P_GPLL0,
+ P_GPLL0_DIV2,
+ P_GPLL0_OUT_AUX,
+ P_GPLL2,
+ P_GPLL4,
+ P_PI_SLEEP,
+ P_BIAS_PLL_UBI_NC_CLK,
+};
+
+static const struct parent_map gcc_xo_map[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_xo_data[] = {
+ { .index = DT_XO },
+};
+
+static const struct clk_parent_data gcc_sleep_clk_data[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static struct clk_alpha_pll gpll0_main = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_main",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_out_main_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_out_main_div2",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_main = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll4_main",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll4",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll4_main.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2_main = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll2_main",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll2",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll2_main.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_sleep_clk_src = {
+ .halt_reg = 0x3400c,
+ .clkr = {
+ .enable_reg = 0x3400c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sleep_clk_src",
+ .parent_data = gcc_sleep_clk_data,
+ .num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_out_main_div2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_div2_gpll0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_div2_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+ { P_GPLL0, 5 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_sleep_clk[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+ { P_PI_SLEEP, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_xo_gpll0_core_pi_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 2 },
+ { P_PI_SLEEP, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .index = DT_BIAS_PLL_UBI_NC_CLK },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+ { P_BIAS_PLL_UBI_NC_CLK, 3 },
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map
+ gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_PI_SLEEP, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_out_main_div2_gpll0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_out_main_div2_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll4_gpll0_gpll0_out_main_div2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_usb3phy_0_cc_pipe_clk_xo[] = {
+ { .index = DT_USB3PHY_0_CC_PIPE_CLK },
+ { .index = DT_XO },
+};
+
+static const struct parent_map gcc_usb3phy_0_cc_pipe_clk_xo_map[] = {
+ { P_USB3PHY_0_PIPE, 0 },
+ { P_XO, 2 },
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll0_gpll2_gpll0_out_main_div2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_div2[] = {
+ { .index = DT_XO},
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll0_div2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_pi_sleep[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_pi_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL4, 3 },
+ { P_PI_SLEEP, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_aux_gpll2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_aux_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_GPLL2, 3 },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x2400c,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_axi_clk_src[] = {
+ F(533000000, P_GPLL0, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_axi_clk_src = {
+ .cmd_rcgr = 0x24004,
+ .freq_tbl = ftbl_apss_axi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_div2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_axi_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_div2_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_div2_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+ F(9600000, P_XO, 2.5, 0, 0),
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x02018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 2, 5),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2, 4, 5),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07018,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07004,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+ F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+ F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x0202c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0302c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x0402c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x0502c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x0602c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x0702c,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x24018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+ .halt_reg = 0x2401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2024,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02020,
+ .clkr = {
+ .enable_reg = 0x02020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03024,
+ .clkr = {
+ .enable_reg = 0x03024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x03020,
+ .clkr = {
+ .enable_reg = 0x03020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04024,
+ .clkr = {
+ .enable_reg = 0x04024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x04020,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05024,
+ .clkr = {
+ .enable_reg = 0x05024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x05020,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06024,
+ .clkr = {
+ .enable_reg = 0x06024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x06020,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07024,
+ .clkr = {
+ .enable_reg = 0x07024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x07020,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x02040,
+ .clkr = {
+ .enable_reg = 0x02040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x03040,
+ .clkr = {
+ .enable_reg = 0x03040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x04054,
+ .clkr = {
+ .enable_reg = 0x04054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart3_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x05040,
+ .clkr = {
+ .enable_reg = 0x05040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart4_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x06040,
+ .clkr = {
+ .enable_reg = 0x06040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart5_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x07040,
+ .clkr = {
+ .enable_reg = 0x07040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart6_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcie0_axi_m_clk_src[] = {
+ F(240000000, P_GPLL4, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie0_axi_m_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie0_axi_m_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+ .halt_reg = 0x28038,
+ .clkr = {
+ .enable_reg = 0x28038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie0_1lane_m_clk = {
+ .halt_reg = 0x2e07c,
+ .clkr = {
+ .enable_reg = 0x2e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_pcie0_1lane_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie1_axi_m_clk_src = {
+ .cmd_rcgr = 0x29018,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie1_axi_m_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_m_clk = {
+ .halt_reg = 0x29038,
+ .clkr = {
+ .enable_reg = 0x29038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie1_1lane_m_clk = {
+ .halt_reg = 0x2e08c,
+ .clkr = {
+ .enable_reg = 0x2e08c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_pcie1_1lane_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcie2_axi_m_clk_src[] = {
+ F(342857143, P_GPLL4, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie2_axi_m_clk_src = {
+ .cmd_rcgr = 0x2a018,
+ .freq_tbl = ftbl_pcie2_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie2_axi_m_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_m_clk = {
+ .halt_reg = 0x2a038,
+ .clkr = {
+ .enable_reg = 0x2a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie2_2lane_m_clk = {
+ .halt_reg = 0x2e080,
+ .clkr = {
+ .enable_reg = 0x2e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_pcie2_2lane_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie3_axi_m_clk_src = {
+ .cmd_rcgr = 0x2b018,
+ .freq_tbl = ftbl_pcie2_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie3_axi_m_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_m_clk = {
+ .halt_reg = 0x2b038,
+ .clkr = {
+ .enable_reg = 0x2b038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie3_2lane_m_clk = {
+ .halt_reg = 0x2e090,
+ .clkr = {
+ .enable_reg = 0x2e090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_pcie3_2lane_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie0_axi_s_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie0_axi_s_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+ .halt_reg = 0x2803c,
+ .clkr = {
+ .enable_reg = 0x2803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+ .halt_reg = 0x28040,
+ .clkr = {
+ .enable_reg = 0x28040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie0_1lane_s_clk = {
+ .halt_reg = 0x2e048,
+ .clkr = {
+ .enable_reg = 0x2e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_snoc_pcie0_1lane_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie1_axi_s_clk_src = {
+ .cmd_rcgr = 0x29020,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie1_axi_s_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_clk = {
+ .halt_reg = 0x2903c,
+ .clkr = {
+ .enable_reg = 0x2903c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_bridge_clk = {
+ .halt_reg = 0x29040,
+ .clkr = {
+ .enable_reg = 0x29040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie1_1lane_s_clk = {
+ .halt_reg = 0x2e04c,
+ .clkr = {
+ .enable_reg = 0x2e04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_snoc_pcie1_1lane_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie2_axi_s_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie2_axi_s_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_clk = {
+ .halt_reg = 0x2a03c,
+ .clkr = {
+ .enable_reg = 0x2a03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_bridge_clk = {
+ .halt_reg = 0x2a040,
+ .clkr = {
+ .enable_reg = 0x2a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie2_2lane_s_clk = {
+ .halt_reg = 0x2e050,
+ .clkr = {
+ .enable_reg = 0x2e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_snoc_pcie2_2lane_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie3_axi_s_clk_src = {
+ .cmd_rcgr = 0x2b020,
+ .freq_tbl = ftbl_pcie0_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie3_axi_s_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_clk = {
+ .halt_reg = 0x2b03c,
+ .clkr = {
+ .enable_reg = 0x2b03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_bridge_clk = {
+ .halt_reg = 0x2b040,
+ .clkr = {
+ .enable_reg = 0x2b040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie3_2lane_s_clk = {
+ .halt_reg = 0x2e054,
+ .clkr = {
+ .enable_reg = 0x2e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_snoc_pcie3_2lane_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_axi_s_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux pcie0_pipe_clk_src = {
+ .reg = 0x28064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
+ .reg = 0x29064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
+ .reg = 0x2a064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie2_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY2_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
+ .reg = 0x2b064,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie3_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY3_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie0_rchng_clk_src = {
+ .cmd_rcgr = 0x28028,
+ .freq_tbl = ftbl_pcie_rchng_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie0_rchng_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+ .halt_reg = 0x28028,
+ .clkr = {
+ .enable_reg = 0x28028,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie0_rchng_clk_src.clkr.hw
+
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie1_rchng_clk_src = {
+ .cmd_rcgr = 0x29028,
+ .freq_tbl = ftbl_pcie_rchng_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie1_rchng_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie1_rchng_clk = {
+ .halt_reg = 0x29028,
+ .clkr = {
+ .enable_reg = 0x29028,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie1_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie2_rchng_clk_src = {
+ .cmd_rcgr = 0x2a028,
+ .freq_tbl = ftbl_pcie_rchng_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie2_rchng_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie2_rchng_clk = {
+ .halt_reg = 0x2a028,
+ .clkr = {
+ .enable_reg = 0x2a028,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie2_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie3_rchng_clk_src = {
+ .cmd_rcgr = 0x2b028,
+ .freq_tbl = ftbl_pcie_rchng_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie3_rchng_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie3_rchng_clk = {
+ .halt_reg = 0x2b028,
+ .clkr = {
+ .enable_reg = 0x2b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie3_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(20000000, P_GPLL0, 10, 1, 4),
+ { }
+};
+
+static struct clk_rcg2 pcie_aux_clk_src = {
+ .cmd_rcgr = 0x28004,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcie_aux_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+ .halt_reg = 0x28034,
+ .clkr = {
+ .enable_reg = 0x28034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_aux_clk = {
+ .halt_reg = 0x29034,
+ .clkr = {
+ .enable_reg = 0x29034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_aux_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_aux_clk = {
+ .halt_reg = 0x2a034,
+ .clkr = {
+ .enable_reg = 0x2a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_aux_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_aux_clk = {
+ .halt_reg = 0x2b034,
+ .clkr = {
+ .enable_reg = 0x2b034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_aux_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_usb_aux_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb0_aux_clk_src = {
+ .cmd_rcgr = 0x2c018,
+ .freq_tbl = ftbl_usb_aux_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb0_aux_clk_src",
+ .parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_core_pi_sleep_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+ .halt_reg = 0x2c048,
+ .clkr = {
+ .enable_reg = 0x2c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb0_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_usb0_master_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb0_master_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .freq_tbl = ftbl_usb0_master_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb0_master_clk_src",
+ .parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_out_main_div2_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+ .halt_reg = 0x2c044,
+ .clkr = {
+ .enable_reg = 0x2c044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_master_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb0_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_usb_clk = {
+ .halt_reg = 0x2e058,
+ .clkr = {
+ .enable_reg = 0x2e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_snoc_usb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb0_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_usb_axi_clk = {
+ .halt_reg = 0x2e084,
+ .clkr = {
+ .enable_reg = 0x2e084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_usb_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb0_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_usb0_mock_utmi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL4, 10, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 usb0_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2c02c,
+ .freq_tbl = ftbl_usb0_mock_utmi_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb0_mock_utmi_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div usb0_mock_utmi_div_clk_src = {
+ .reg = 0x2c040,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb0_mock_utmi_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &usb0_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+ .halt_reg = 0x2c04c,
+ .clkr = {
+ .enable_reg = 0x2c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb0_mock_utmi_div_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux usb0_pipe_clk_src = {
+ .reg = 0x2C074,
+ .shift = 8,
+ .width = 2,
+ .parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "usb0_pipe_clk_src",
+ .parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
+ .num_parents = ARRAY_SIZE(gcc_usb3phy_0_cc_pipe_clk_xo),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc_apps_clk_src[] = {
+ F(144000, P_XO, 16, 12, 125),
+ F(400000, P_XO, 12, 1, 5),
+ F(24000000, P_GPLL2, 12, 1, 4),
+ F(48000000, P_GPLL2, 12, 1, 2),
+ F(96000000, P_GPLL2, 12, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(192000000, P_GPLL2, 6, 0, 0),
+ F(384000000, P_GPLL2, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x33004,
+ .freq_tbl = ftbl_sdcc_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll0_out_main_div2),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x3302c,
+ .clkr = {
+ .enable_reg = 0x3302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(150000000, P_GPLL4, 8, 0, 0),
+ F(300000000, P_GPLL4, 4, 0, 0),
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x33018,
+ .freq_tbl = ftbl_sdcc_ice_core_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4_gpll0_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x33030,
+ .clkr = {
+ .enable_reg = 0x33030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x31004,
+ .freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_nsscfg_clk = {
+ .halt_reg = 0x1702c,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nsscfg_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_nsscc_clk = {
+ .halt_reg = 0x17030,
+ .clkr = {
+ .enable_reg = 0x17030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_nsscc_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscc_clk = {
+ .halt_reg = 0x17034,
+ .clkr = {
+ .enable_reg = 0x17034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nsscc_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_pcnoc_1_clk = {
+ .halt_reg = 0x17080,
+ .clkr = {
+ .enable_reg = 0x17080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_pcnoc_1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_ahb_clk = {
+ .halt_reg = 0x2d064,
+ .clkr = {
+ .enable_reg = 0x2d064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_dap_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_cfg_ahb_clk = {
+ .halt_reg = 0x2d068,
+ .clkr = {
+ .enable_reg = 0x2d068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x32010,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qpic_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x32014,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qpic_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+ .halt_reg = 0x17040,
+ .clkr = {
+ .enable_reg = 0x17040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdio_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+ .halt_reg = 0x1704c,
+ .clkr = {
+ .enable_reg = 0x1704c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+ .halt_reg = 0x1705c,
+ .clkr = {
+ .enable_reg = 0x1705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_ahb_clk = {
+ .halt_reg = 0x1706c,
+ .clkr = {
+ .enable_reg = 0x1706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+ .halt_reg = 0x3a004,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_apu_clk = {
+ .halt_reg = 0x3a00c,
+ .clkr = {
+ .enable_reg = 0x3a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_apu_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+ .halt_reg = 0x28030,
+ .clkr = {
+ .enable_reg = 0x28030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_ahb_clk = {
+ .halt_reg = 0x29030,
+ .clkr = {
+ .enable_reg = 0x29030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_ahb_clk = {
+ .halt_reg = 0x2a030,
+ .clkr = {
+ .enable_reg = 0x2a030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_ahb_clk = {
+ .halt_reg = 0x2b030,
+ .clkr = {
+ .enable_reg = 0x2b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcie3_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+ .halt_reg = 0x2c05c,
+ .clkr = {
+ .enable_reg = 0x2c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x33034,
+ .clkr = {
+ .enable_reg = 0x33034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_system_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(342850000, P_GPLL4, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2e004,
+ .freq_tbl = ftbl_system_noc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_q6ss_boot_clk = {
+ .halt_reg = 0x25080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x25080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6ss_boot_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+ .halt_reg = 0x17028,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_1_clk = {
+ .halt_reg = 0x1707c,
+ .clkr = {
+ .enable_reg = 0x1707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_etr_usb_clk = {
+ .halt_reg = 0x2d060,
+ .clkr = {
+ .enable_reg = 0x2d060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_etr_usb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_wcss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss_ahb_clk_src = {
+ .cmd_rcgr = 0x25030,
+ .freq_tbl = ftbl_wcss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "wcss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_q6_ahb_clk = {
+ .halt_reg = 0x25014,
+ .clkr = {
+ .enable_reg = 0x25014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6_ahb_s_clk = {
+ .halt_reg = 0x25018,
+ .clkr = {
+ .enable_reg = 0x25018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6_ahb_s_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_ecahb_clk = {
+ .halt_reg = 0x25058,
+ .clkr = {
+ .enable_reg = 0x25058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_ecahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_acmt_clk = {
+ .halt_reg = 0x2505c,
+ .clkr = {
+ .enable_reg = 0x2505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_acmt_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
+ .halt_reg = 0x2e030,
+ .clkr = {
+ .enable_reg = 0x2e030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_wcss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_wcss_axi_m_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss_axi_m_clk_src = {
+ .cmd_rcgr = 0x25078,
+ .freq_tbl = ftbl_wcss_axi_m_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "wcss_axi_m_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_anoc_wcss_axi_m_clk = {
+ .halt_reg = 0x2e0a8,
+ .clkr = {
+ .enable_reg = 0x2e0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_anoc_wcss_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &wcss_axi_m_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
+ F(240000000, P_GPLL4, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_at_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .freq_tbl = ftbl_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "qdss_at_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_q6ss_atbm_clk = {
+ .halt_reg = 0x2501c,
+ .clkr = {
+ .enable_reg = 0x2501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6ss_atbm_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
+ .halt_reg = 0x2503c,
+ .clkr = {
+ .enable_reg = 0x2503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_atb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_atb_clk = {
+ .halt_reg = 0x17014,
+ .clkr = {
+ .enable_reg = 0x17014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_atb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x2d038,
+ .clkr = {
+ .enable_reg = 0x2d038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_at_clk = {
+ .halt_reg = 0x2e038,
+ .clkr = {
+ .enable_reg = 0x2e038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_at_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcnoc_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_eud_at_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_usb0_eud_at_clk = {
+ .halt_reg = 0x30004,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb0_eud_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_eud_at_div_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_eud_at_clk = {
+ .halt_reg = 0x2d06c,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_eud_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_eud_at_div_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_stm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_stm_clk_src = {
+ .cmd_rcgr = 0x2d00c,
+ .freq_tbl = ftbl_qdss_stm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "qdss_stm_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_stm_clk = {
+ .halt_reg = 0x2d03c,
+ .clkr = {
+ .enable_reg = 0x2d03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_stm_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_stm_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_qdss_stm_axi_clk = {
+ .halt_reg = 0x2e034,
+ .clkr = {
+ .enable_reg = 0x2e034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_qdss_stm_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_stm_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_traceclkin_clk_src[] = {
+ F(300000000, P_GPLL4, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_traceclkin_clk_src = {
+ .cmd_rcgr = 0x2d014,
+ .freq_tbl = ftbl_qdss_traceclkin_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "qdss_traceclkin_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_traceclkin_clk = {
+ .halt_reg = 0x2d040,
+ .clkr = {
+ .enable_reg = 0x2d040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_traceclkin_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_traceclkin_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+ F(600000000, P_GPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x2d01c,
+ .freq_tbl = ftbl_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "qdss_tsctr_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "qdss_tsctr_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_q6_tsctr_1to2_clk = {
+ .halt_reg = 0x25020,
+ .clkr = {
+ .enable_reg = 0x25020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6_tsctr_1to2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div2_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
+ .halt_reg = 0x25040,
+ .clkr = {
+ .enable_reg = 0x25040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_nts_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div2_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_tsctr_div2_clk = {
+ .halt_reg = 0x2d044,
+ .clkr = {
+ .enable_reg = 0x2d044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div2_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_uniphy_sys_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 uniphy_sys_clk_src = {
+ .cmd_rcgr = 0x17090,
+ .freq_tbl = ftbl_uniphy_sys_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "uniphy_sys_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_ts_clk_src = {
+ .cmd_rcgr = 0x17088,
+ .freq_tbl = ftbl_uniphy_sys_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "nss_ts_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_ts_clk = {
+ .halt_reg = 0x2d078,
+ .clkr = {
+ .enable_reg = 0x2d078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_ts_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nss_ts_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data) {
+ .name = "qdss_dap_sync_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_tsctr_div4_clk = {
+ .halt_reg = 0x2d04c,
+ .clkr = {
+ .enable_reg = 0x2d04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div4_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div8_clk_src = {
+ .mult = 1,
+ .div = 8,
+ .hw.init = &(struct clk_init_data) {
+ .name = "qdss_tsctr_div8_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_nss_ts_clk = {
+ .halt_reg = 0x17018,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nss_ts_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nss_ts_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_tsctr_div8_clk = {
+ .halt_reg = 0x2d050,
+ .clkr = {
+ .enable_reg = 0x2d050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div8_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div8_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div16_clk_src = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data) {
+ .name = "qdss_tsctr_div16_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_tsctr_div16_clk = {
+ .halt_reg = 0x2d054,
+ .clkr = {
+ .enable_reg = 0x2d054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div16_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div16_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6ss_pclkdbg_clk = {
+ .halt_reg = 0x25024,
+ .clkr = {
+ .enable_reg = 0x25024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6ss_pclkdbg_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_q6ss_trig_clk = {
+ .halt_reg = 0x25068,
+ .clkr = {
+ .enable_reg = 0x25068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6ss_trig_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
+ .halt_reg = 0x25038,
+ .clkr = {
+ .enable_reg = 0x25038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_apb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = {
+ .halt_reg = 0x25044,
+ .clkr = {
+ .enable_reg = 0x25044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_dbg_ifc_dapbus_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x2d058,
+ .clkr = {
+ .enable_reg = 0x2d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_apb2jtag_clk = {
+ .halt_reg = 0x2d05c,
+ .clkr = {
+ .enable_reg = 0x2d05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_apb2jtag_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div3_clk_src = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data) {
+ .name = "qdss_tsctr_div3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_qdss_tsctr_div3_clk = {
+ .halt_reg = 0x2d048,
+ .clkr = {
+ .enable_reg = 0x2d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &qdss_tsctr_div3_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qpic_io_macro_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .freq_tbl = ftbl_qpic_io_macro_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "qpic_io_macro_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_qpic_io_macro_clk = {
+ .halt_reg = 0x3200c,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_io_macro_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qpic_io_macro_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_q6_axi_clk_src[] = {
+ F(533333333, P_GPLL0, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 q6_axi_clk_src = {
+ .cmd_rcgr = 0x25004,
+ .freq_tbl = ftbl_q6_axi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll4_pi_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "q6_axi_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll4_pi_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll4_pi_sleep),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_q6_axim_clk = {
+ .halt_reg = 0x2500c,
+ .clkr = {
+ .enable_reg = 0x2500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_q6_axim_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &q6_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss_q6_tbu_clk = {
+ .halt_reg = 0x12050,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_wcss_q6_tbu_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &q6_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_q6_axi_clk = {
+ .halt_reg = 0x19010,
+ .clkr = {
+ .enable_reg = 0x19010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mem_noc_q6_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &q6_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_q6_axim2_clk_src[] = {
+ F(342857143, P_GPLL4, 3.5, 0, 0),
+ { }
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_bias_pll_ubinc_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+ { P_BIAS_PLL_UBI_NC_CLK, 4 },
+};
+
+static struct clk_rcg2 q6_axim2_clk_src = {
+ .cmd_rcgr = 0x25028,
+ .freq_tbl = ftbl_q6_axim2_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubinc_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "q6_axim2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nssnoc_memnoc_bfdcd_clk_src[] = {
+ F(533333333, P_GPLL0, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nssnoc_memnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x17004,
+ .freq_tbl = ftbl_nssnoc_memnoc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_aux_gpll2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "nssnoc_memnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_aux_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_gpll2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_nssnoc_memnoc_clk = {
+ .halt_reg = 0x17024,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_memnoc_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nssnoc_memnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_mem_noc_1_clk = {
+ .halt_reg = 0x17084,
+ .clkr = {
+ .enable_reg = 0x17084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_mem_noc_1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nssnoc_memnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_tbu_clk = {
+ .halt_reg = 0x12040,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nss_tbu_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nssnoc_memnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_nssnoc_clk = {
+ .halt_reg = 0x19014,
+ .clkr = {
+ .enable_reg = 0x19014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mem_noc_nssnoc_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &nssnoc_memnoc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_axim_clk_src[] = {
+ F(133333333, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_axim_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .freq_tbl = ftbl_lpass_axim_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "lpass_axim_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpass_sway_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .freq_tbl = ftbl_lpass_axim_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "lpass_sway_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .freq_tbl = ftbl_adss_pwm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "adss_pwm_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c00c,
+ .clkr = {
+ .enable_reg = 0x1c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &adss_pwm_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_xo_clk_src = {
+ .halt_reg = 0x34004,
+ .clkr = {
+ .enable_reg = 0x34004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_xo_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_xo_dcd_clk = {
+ .halt_reg = 0x17074,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_xo_dcd_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+ .halt_reg = 0x17048,
+ .clkr = {
+ .enable_reg = 0x17048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy0_sys_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &uniphy_sys_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+ .halt_reg = 0x17058,
+ .clkr = {
+ .enable_reg = 0x17058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy1_sys_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &uniphy_sys_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_sys_clk = {
+ .halt_reg = 0x17068,
+ .clkr = {
+ .enable_reg = 0x17068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_uniphy2_sys_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &uniphy_sys_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+ .halt_reg = 0x3a008,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_sys_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &uniphy_sys_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_xo_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+ .halt_reg = 0x1701c,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_qosgen_ref_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_div4_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+ .halt_reg = 0x17020,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_nssnoc_timeout_ref_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_div4_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_div4_clk = {
+ .halt_reg = 0x3401c,
+ .clkr = {
+ .enable_reg = 0x3401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_xo_div4_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_div4_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_ipq9574_hws[] = {
+ &gpll0_out_main_div2.hw,
+ &gcc_xo_div4_clk_src.hw,
+ &qdss_dap_sync_clk_src.hw,
+ &qdss_tsctr_div2_clk_src.hw,
+ &qdss_tsctr_div8_clk_src.hw,
+ &qdss_tsctr_div16_clk_src.hw,
+ &qdss_tsctr_div3_clk_src.hw,
+ &gcc_eud_at_div_clk_src.hw,
+};
+
+static struct clk_regmap *gcc_ipq9574_clks[] = {
+ [GPLL0_MAIN] = &gpll0_main.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_MAIN] = &gpll4_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL2_MAIN] = &gpll2_main.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [APSS_AXI_CLK_SRC] = &apss_axi_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [PCIE0_AXI_M_CLK_SRC] = &pcie0_axi_m_clk_src.clkr,
+ [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+ [PCIE1_AXI_M_CLK_SRC] = &pcie1_axi_m_clk_src.clkr,
+ [GCC_PCIE1_AXI_M_CLK] = &gcc_pcie1_axi_m_clk.clkr,
+ [PCIE2_AXI_M_CLK_SRC] = &pcie2_axi_m_clk_src.clkr,
+ [GCC_PCIE2_AXI_M_CLK] = &gcc_pcie2_axi_m_clk.clkr,
+ [PCIE3_AXI_M_CLK_SRC] = &pcie3_axi_m_clk_src.clkr,
+ [GCC_PCIE3_AXI_M_CLK] = &gcc_pcie3_axi_m_clk.clkr,
+ [PCIE0_AXI_S_CLK_SRC] = &pcie0_axi_s_clk_src.clkr,
+ [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+ [PCIE1_AXI_S_CLK_SRC] = &pcie1_axi_s_clk_src.clkr,
+ [GCC_PCIE1_AXI_S_BRIDGE_CLK] = &gcc_pcie1_axi_s_bridge_clk.clkr,
+ [GCC_PCIE1_AXI_S_CLK] = &gcc_pcie1_axi_s_clk.clkr,
+ [PCIE2_AXI_S_CLK_SRC] = &pcie2_axi_s_clk_src.clkr,
+ [GCC_PCIE2_AXI_S_BRIDGE_CLK] = &gcc_pcie2_axi_s_bridge_clk.clkr,
+ [GCC_PCIE2_AXI_S_CLK] = &gcc_pcie2_axi_s_clk.clkr,
+ [PCIE3_AXI_S_CLK_SRC] = &pcie3_axi_s_clk_src.clkr,
+ [GCC_PCIE3_AXI_S_BRIDGE_CLK] = &gcc_pcie3_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3_AXI_S_CLK] = &gcc_pcie3_axi_s_clk.clkr,
+ [PCIE0_PIPE_CLK_SRC] = &pcie0_pipe_clk_src.clkr,
+ [PCIE1_PIPE_CLK_SRC] = &pcie1_pipe_clk_src.clkr,
+ [PCIE2_PIPE_CLK_SRC] = &pcie2_pipe_clk_src.clkr,
+ [PCIE3_PIPE_CLK_SRC] = &pcie3_pipe_clk_src.clkr,
+ [PCIE_AUX_CLK_SRC] = &pcie_aux_clk_src.clkr,
+ [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+ [GCC_PCIE1_AUX_CLK] = &gcc_pcie1_aux_clk.clkr,
+ [GCC_PCIE2_AUX_CLK] = &gcc_pcie2_aux_clk.clkr,
+ [GCC_PCIE3_AUX_CLK] = &gcc_pcie3_aux_clk.clkr,
+ [PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
+ [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+ [PCIE1_RCHNG_CLK_SRC] = &pcie1_rchng_clk_src.clkr,
+ [GCC_PCIE1_RCHNG_CLK] = &gcc_pcie1_rchng_clk.clkr,
+ [PCIE2_RCHNG_CLK_SRC] = &pcie2_rchng_clk_src.clkr,
+ [GCC_PCIE2_RCHNG_CLK] = &gcc_pcie2_rchng_clk.clkr,
+ [PCIE3_RCHNG_CLK_SRC] = &pcie3_rchng_clk_src.clkr,
+ [GCC_PCIE3_RCHNG_CLK] = &gcc_pcie3_rchng_clk.clkr,
+ [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+ [GCC_PCIE1_AHB_CLK] = &gcc_pcie1_ahb_clk.clkr,
+ [GCC_PCIE2_AHB_CLK] = &gcc_pcie2_ahb_clk.clkr,
+ [GCC_PCIE3_AHB_CLK] = &gcc_pcie3_ahb_clk.clkr,
+ [USB0_AUX_CLK_SRC] = &usb0_aux_clk_src.clkr,
+ [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+ [USB0_MASTER_CLK_SRC] = &usb0_master_clk_src.clkr,
+ [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+ [GCC_SNOC_USB_CLK] = &gcc_snoc_usb_clk.clkr,
+ [GCC_ANOC_USB_AXI_CLK] = &gcc_anoc_usb_axi_clk.clkr,
+ [USB0_MOCK_UTMI_CLK_SRC] = &usb0_mock_utmi_clk_src.clkr,
+ [USB0_MOCK_UTMI_DIV_CLK_SRC] = &usb0_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+ [USB0_PIPE_CLK_SRC] = &usb0_pipe_clk_src.clkr,
+ [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [GCC_NSSCFG_CLK] = &gcc_nsscfg_clk.clkr,
+ [GCC_NSSNOC_NSSCC_CLK] = &gcc_nssnoc_nsscc_clk.clkr,
+ [GCC_NSSCC_CLK] = &gcc_nsscc_clk.clkr,
+ [GCC_NSSNOC_PCNOC_1_CLK] = &gcc_nssnoc_pcnoc_1_clk.clkr,
+ [GCC_QDSS_DAP_AHB_CLK] = &gcc_qdss_dap_ahb_clk.clkr,
+ [GCC_QDSS_CFG_AHB_CLK] = &gcc_qdss_cfg_ahb_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+ [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+ [GCC_UNIPHY2_AHB_CLK] = &gcc_uniphy2_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_APU_CLK] = &gcc_cmn_12gpll_apu_clk.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+ [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
+ [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
+ [WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
+ [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
+ [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
+ [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
+ [GCC_WCSS_ACMT_CLK] = &gcc_wcss_acmt_clk.clkr,
+ [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
+ [WCSS_AXI_M_CLK_SRC] = &wcss_axi_m_clk_src.clkr,
+ [GCC_ANOC_WCSS_AXI_M_CLK] = &gcc_anoc_wcss_axi_m_clk.clkr,
+ [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+ [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
+ [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
+ [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
+ [GCC_PCNOC_AT_CLK] = &gcc_pcnoc_at_clk.clkr,
+ [GCC_USB0_EUD_AT_CLK] = &gcc_usb0_eud_at_clk.clkr,
+ [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr,
+ [QDSS_STM_CLK_SRC] = &qdss_stm_clk_src.clkr,
+ [GCC_QDSS_STM_CLK] = &gcc_qdss_stm_clk.clkr,
+ [GCC_SYS_NOC_QDSS_STM_AXI_CLK] = &gcc_sys_noc_qdss_stm_axi_clk.clkr,
+ [QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
+ [GCC_QDSS_TRACECLKIN_CLK] = &gcc_qdss_traceclkin_clk.clkr,
+ [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+ [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
+ [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
+ [GCC_QDSS_TSCTR_DIV2_CLK] = &gcc_qdss_tsctr_div2_clk.clkr,
+ [GCC_QDSS_TS_CLK] = &gcc_qdss_ts_clk.clkr,
+ [GCC_QDSS_TSCTR_DIV4_CLK] = &gcc_qdss_tsctr_div4_clk.clkr,
+ [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
+ [GCC_QDSS_TSCTR_DIV8_CLK] = &gcc_qdss_tsctr_div8_clk.clkr,
+ [GCC_QDSS_TSCTR_DIV16_CLK] = &gcc_qdss_tsctr_div16_clk.clkr,
+ [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
+ [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
+ [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
+ [GCC_WCSS_DBG_IFC_DAPBUS_CLK] = &gcc_wcss_dbg_ifc_dapbus_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_QDSS_APB2JTAG_CLK] = &gcc_qdss_apb2jtag_clk.clkr,
+ [GCC_QDSS_TSCTR_DIV3_CLK] = &gcc_qdss_tsctr_div3_clk.clkr,
+ [QPIC_IO_MACRO_CLK_SRC] = &qpic_io_macro_clk_src.clkr,
+ [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
+ [Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
+ [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
+ [GCC_WCSS_Q6_TBU_CLK] = &gcc_wcss_q6_tbu_clk.clkr,
+ [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr,
+ [Q6_AXIM2_CLK_SRC] = &q6_axim2_clk_src.clkr,
+ [NSSNOC_MEMNOC_BFDCD_CLK_SRC] = &nssnoc_memnoc_bfdcd_clk_src.clkr,
+ [GCC_NSSNOC_MEMNOC_CLK] = &gcc_nssnoc_memnoc_clk.clkr,
+ [GCC_NSSNOC_MEM_NOC_1_CLK] = &gcc_nssnoc_mem_noc_1_clk.clkr,
+ [GCC_NSS_TBU_CLK] = &gcc_nss_tbu_clk.clkr,
+ [GCC_MEM_NOC_NSSNOC_CLK] = &gcc_mem_noc_nssnoc_clk.clkr,
+ [LPASS_AXIM_CLK_SRC] = &lpass_axim_clk_src.clkr,
+ [LPASS_SWAY_CLK_SRC] = &lpass_sway_clk_src.clkr,
+ [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_NSSNOC_XO_DCD_CLK] = &gcc_nssnoc_xo_dcd_clk.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+ [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+ [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+ [GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
+ [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+ [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+ [GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr,
+ [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+ [GCC_Q6SS_BOOT_CLK] = &gcc_q6ss_boot_clk.clkr,
+ [UNIPHY_SYS_CLK_SRC] = &uniphy_sys_clk_src.clkr,
+ [NSS_TS_CLK_SRC] = &nss_ts_clk_src.clkr,
+ [GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr,
+ [GCC_ANOC_PCIE1_1LANE_M_CLK] = &gcc_anoc_pcie1_1lane_m_clk.clkr,
+ [GCC_ANOC_PCIE2_2LANE_M_CLK] = &gcc_anoc_pcie2_2lane_m_clk.clkr,
+ [GCC_ANOC_PCIE3_2LANE_M_CLK] = &gcc_anoc_pcie3_2lane_m_clk.clkr,
+ [GCC_SNOC_PCIE0_1LANE_S_CLK] = &gcc_snoc_pcie0_1lane_s_clk.clkr,
+ [GCC_SNOC_PCIE1_1LANE_S_CLK] = &gcc_snoc_pcie1_1lane_s_clk.clkr,
+ [GCC_SNOC_PCIE2_2LANE_S_CLK] = &gcc_snoc_pcie2_2lane_s_clk.clkr,
+ [GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+ [GCC_ADSS_BCR] = { 0x1c000, 0 },
+ [GCC_ANOC0_TBU_BCR] = { 0x1203c, 0 },
+ [GCC_ANOC1_TBU_BCR] = { 0x1204c, 0 },
+ [GCC_ANOC_BCR] = { 0x2e074, 0 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x38000, 0 },
+ [GCC_APSS_TCU_BCR] = { 0x12014, 0 },
+ [GCC_BLSP1_BCR] = { 0x01000, 0 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000, 0 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03000, 0 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04000, 0 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05000, 0 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06000, 0 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07000, 0 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02028, 0 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028, 0 },
+ [GCC_BLSP1_UART3_BCR] = { 0x04028, 0 },
+ [GCC_BLSP1_UART4_BCR] = { 0x05028, 0 },
+ [GCC_BLSP1_UART5_BCR] = { 0x06028, 0 },
+ [GCC_BLSP1_UART6_BCR] = { 0x07028, 0 },
+ [GCC_BOOT_ROM_BCR] = { 0x13028, 0 },
+ [GCC_CMN_BLK_BCR] = { 0x3a000, 0 },
+ [GCC_CMN_BLK_AHB_ARES] = { 0x3a010, 0 },
+ [GCC_CMN_BLK_SYS_ARES] = { 0x3a010, 1 },
+ [GCC_CMN_BLK_APU_ARES] = { 0x3a010, 2 },
+ [GCC_DCC_BCR] = { 0x35000, 0 },
+ [GCC_DDRSS_BCR] = { 0x11000, 0 },
+ [GCC_IMEM_BCR] = { 0x0e000, 0 },
+ [GCC_LPASS_BCR] = { 0x27000, 0 },
+ [GCC_MDIO_BCR] = { 0x1703c, 0 },
+ [GCC_MPM_BCR] = { 0x37000, 0 },
+ [GCC_MSG_RAM_BCR] = { 0x26000, 0 },
+ [GCC_NSS_BCR] = { 0x17000, 0 },
+ [GCC_NSS_TBU_BCR] = { 0x12044, 0 },
+ [GCC_NSSNOC_MEMNOC_1_ARES] = { 0x17038, 13 },
+ [GCC_NSSNOC_PCNOC_1_ARES] = { 0x17038, 12 },
+ [GCC_NSSNOC_SNOC_1_ARES] = { 0x17038, 11 },
+ [GCC_NSSNOC_XO_DCD_ARES] = { 0x17038, 10 },
+ [GCC_NSSNOC_TS_ARES] = { 0x17038, 9 },
+ [GCC_NSSCC_ARES] = { 0x17038, 8 },
+ [GCC_NSSNOC_NSSCC_ARES] = { 0x17038, 7 },
+ [GCC_NSSNOC_ATB_ARES] = { 0x17038, 6 },
+ [GCC_NSSNOC_MEMNOC_ARES] = { 0x17038, 5 },
+ [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x17038, 4 },
+ [GCC_NSSNOC_SNOC_ARES] = { 0x17038, 3 },
+ [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x17038, 2 },
+ [GCC_NSS_CFG_ARES] = { 0x17038, 1 },
+ [GCC_UBI0_DBG_ARES] = { 0x17038, 0 },
+ [GCC_PCIE0PHY_PHY_BCR] = { 0x2805c, 0 },
+ [GCC_PCIE0_AHB_ARES] = { 0x28058, 7 },
+ [GCC_PCIE0_AUX_ARES] = { 0x28058, 6 },
+ [GCC_PCIE0_AXI_M_ARES] = { 0x28058, 5 },
+ [GCC_PCIE0_AXI_M_STICKY_ARES] = { 0x28058, 4 },
+ [GCC_PCIE0_AXI_S_ARES] = { 0x28058, 3 },
+ [GCC_PCIE0_AXI_S_STICKY_ARES] = { 0x28058, 2 },
+ [GCC_PCIE0_CORE_STICKY_ARES] = { 0x28058, 1 },
+ [GCC_PCIE0_PIPE_ARES] = { 0x28058, 0 },
+ [GCC_PCIE1_AHB_ARES] = { 0x29058, 7 },
+ [GCC_PCIE1_AUX_ARES] = { 0x29058, 6 },
+ [GCC_PCIE1_AXI_M_ARES] = { 0x29058, 5 },
+ [GCC_PCIE1_AXI_M_STICKY_ARES] = { 0x29058, 4 },
+ [GCC_PCIE1_AXI_S_ARES] = { 0x29058, 3 },
+ [GCC_PCIE1_AXI_S_STICKY_ARES] = { 0x29058, 2 },
+ [GCC_PCIE1_CORE_STICKY_ARES] = { 0x29058, 1 },
+ [GCC_PCIE1_PIPE_ARES] = { 0x29058, 0 },
+ [GCC_PCIE2_AHB_ARES] = { 0x2a058, 7 },
+ [GCC_PCIE2_AUX_ARES] = { 0x2a058, 6 },
+ [GCC_PCIE2_AXI_M_ARES] = { 0x2a058, 5 },
+ [GCC_PCIE2_AXI_M_STICKY_ARES] = { 0x2a058, 4 },
+ [GCC_PCIE2_AXI_S_ARES] = { 0x2a058, 3 },
+ [GCC_PCIE2_AXI_S_STICKY_ARES] = { 0x2a058, 2 },
+ [GCC_PCIE2_CORE_STICKY_ARES] = { 0x2a058, 1 },
+ [GCC_PCIE2_PIPE_ARES] = { 0x2a058, 0 },
+ [GCC_PCIE3_AHB_ARES] = { 0x2b058, 7 },
+ [GCC_PCIE3_AUX_ARES] = { 0x2b058, 6 },
+ [GCC_PCIE3_AXI_M_ARES] = { 0x2b058, 5 },
+ [GCC_PCIE3_AXI_M_STICKY_ARES] = { 0x2b058, 4 },
+ [GCC_PCIE3_AXI_S_ARES] = { 0x2b058, 3 },
+ [GCC_PCIE3_AXI_S_STICKY_ARES] = { 0x2b058, 2 },
+ [GCC_PCIE3_CORE_STICKY_ARES] = { 0x2b058, 1 },
+ [GCC_PCIE3_PIPE_ARES] = { 0x2b058, 0 },
+ [GCC_PCIE0_BCR] = { 0x28000, 0 },
+ [GCC_PCIE0_LINK_DOWN_BCR] = { 0x28054, 0 },
+ [GCC_PCIE0_PHY_BCR] = { 0x28060, 0 },
+ [GCC_PCIE1_BCR] = { 0x29000, 0 },
+ [GCC_PCIE1_LINK_DOWN_BCR] = { 0x29054, 0 },
+ [GCC_PCIE1_PHY_BCR] = { 0x29060, 0 },
+ [GCC_PCIE1PHY_PHY_BCR] = { 0x2905c, 0 },
+ [GCC_PCIE2_BCR] = { 0x2a000, 0 },
+ [GCC_PCIE2_LINK_DOWN_BCR] = { 0x2a054, 0 },
+ [GCC_PCIE2_PHY_BCR] = { 0x2a060, 0 },
+ [GCC_PCIE2PHY_PHY_BCR] = { 0x2a05c, 0 },
+ [GCC_PCIE3_BCR] = { 0x2b000, 0 },
+ [GCC_PCIE3_LINK_DOWN_BCR] = { 0x2b054, 0 },
+ [GCC_PCIE3PHY_PHY_BCR] = { 0x2b05c, 0 },
+ [GCC_PCIE3_PHY_BCR] = { 0x2b060, 0 },
+ [GCC_PCNOC_BCR] = { 0x31000, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x31030, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x31038, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x31040, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x31048, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x31050, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x31058, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x31060, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x31068, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x31070, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x31078, 0 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12034, 0 },
+ [GCC_PRNG_BCR] = { 0x13020, 0 },
+ [GCC_Q6SS_DBG_ARES] = { 0x2506c, 4 },
+ [GCC_Q6_AHB_ARES] = { 0x2506c, 3 },
+ [GCC_Q6_AHB_S_ARES] = { 0x2506c, 2 },
+ [GCC_Q6_AXIM2_ARES] = { 0x2506c, 1 },
+ [GCC_Q6_AXIM_ARES] = { 0x2506c, 0 },
+ [GCC_QDSS_BCR] = { 0x2d000, 0 },
+ [GCC_QPIC_BCR] = { 0x32000, 0 },
+ [GCC_QPIC_AHB_ARES] = { 0x3201c, 1 },
+ [GCC_QPIC_ARES] = { 0x3201c, 0 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x2c068, 0 },
+ [GCC_RBCPR_BCR] = { 0x39000, 0 },
+ [GCC_RBCPR_MX_BCR] = { 0x39014, 0 },
+ [GCC_SDCC_BCR] = { 0x33000, 0 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+ [GCC_SMMU_CFG_BCR] = { 0x1202c, 0 },
+ [GCC_SNOC_BCR] = { 0x2e000, 0 },
+ [GCC_SPDM_BCR] = { 0x36000, 0 },
+ [GCC_TCSR_BCR] = { 0x3d000, 0 },
+ [GCC_TLMM_BCR] = { 0x3e000, 0 },
+ [GCC_TME_BCR] = { 0x10000, 0 },
+ [GCC_UNIPHY0_BCR] = { 0x17044, 0 },
+ [GCC_UNIPHY0_SYS_RESET] = { 0x17050, 0 },
+ [GCC_UNIPHY0_AHB_RESET] = { 0x17050, 1 },
+ [GCC_UNIPHY0_XPCS_RESET] = { 0x17050, 2 },
+ [GCC_UNIPHY1_SYS_RESET] = { 0x17060, 0 },
+ [GCC_UNIPHY1_AHB_RESET] = { 0x17060, 1 },
+ [GCC_UNIPHY1_XPCS_RESET] = { 0x17060, 2 },
+ [GCC_UNIPHY2_SYS_RESET] = { 0x17070, 0 },
+ [GCC_UNIPHY2_AHB_RESET] = { 0x17070, 1 },
+ [GCC_UNIPHY2_XPCS_RESET] = { 0x17070, 2 },
+ [GCC_UNIPHY1_BCR] = { 0x17054, 0 },
+ [GCC_UNIPHY2_BCR] = { 0x17064, 0 },
+ [GCC_USB0_PHY_BCR] = { 0x2c06c, 0 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x2c070, 0 },
+ [GCC_USB_BCR] = { 0x2c000, 0 },
+ [GCC_USB_MISC_RESET] = { 0x2c064, 0 },
+ [GCC_WCSSAON_RESET] = { 0x25074, 0 },
+ [GCC_WCSS_ACMT_ARES] = { 0x25070, 5 },
+ [GCC_WCSS_AHB_S_ARES] = { 0x25070, 4 },
+ [GCC_WCSS_AXI_M_ARES] = { 0x25070, 3 },
+ [GCC_WCSS_BCR] = { 0x18004, 0 },
+ [GCC_WCSS_DBG_ARES] = { 0x25070, 2 },
+ [GCC_WCSS_DBG_BDG_ARES] = { 0x25070, 1 },
+ [GCC_WCSS_ECAHB_ARES] = { 0x25070, 0 },
+ [GCC_WCSS_Q6_BCR] = { 0x18000, 0 },
+ [GCC_WCSS_Q6_TBU_BCR] = { 0x12054, 0 },
+};
+
+static const struct of_device_id gcc_ipq9574_match_table[] = {
+ { .compatible = "qcom,ipq9574-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq9574_match_table);
+
+static const struct regmap_config gcc_ipq9574_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq9574_desc = {
+ .config = &gcc_ipq9574_regmap_config,
+ .clks = gcc_ipq9574_clks,
+ .num_clks = ARRAY_SIZE(gcc_ipq9574_clks),
+ .resets = gcc_ipq9574_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq9574_resets),
+ .clk_hws = gcc_ipq9574_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq9574_hws),
+};
+
+static int gcc_ipq9574_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_ipq9574_desc);
+}
+
+static struct platform_driver gcc_ipq9574_driver = {
+ .probe = gcc_ipq9574_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq9574",
+ .of_match_table = gcc_ipq9574_match_table,
+ },
+};
+
+static int __init gcc_ipq9574_init(void)
+{
+ return platform_driver_register(&gcc_ipq9574_driver);
+}
+core_initcall(gcc_ipq9574_init);
+
+static void __exit gcc_ipq9574_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq9574_driver);
+}
+module_exit(gcc_ipq9574_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ9574 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
new file mode 100644
index 000000000000..a4c33a2ce61c
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -0,0 +1,3303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Otto Pflüger
+ *
+ * Based on gcc-msm8953.c:
+ * Copyright 2021, The Linux Foundation. All rights reserved.
+ * with parts taken from gcc-qcs404.c:
+ * Copyright 2018, The Linux Foundation. All rights reserved.
+ * and gcc-msm8939.c:
+ * Copyright 2020 Linaro Limited
+ * adapted with data from clock-gcc-8952.c in Qualcomm's msm-4.9 release:
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8917.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_DSI0PLL,
+ DT_DSI0PLL_BYTE,
+};
+
+enum {
+ P_XO,
+ P_SLEEP_CLK,
+ P_GPLL0,
+ P_GPLL3,
+ P_GPLL4,
+ P_GPLL6,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+};
+
+static struct clk_alpha_pll gpll0_sleep_clk_src = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45008,
+ .enable_mask = BIT(23),
+ .enable_is_inverted = true,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_early",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct pll_vco gpll3_p_vco[] = {
+ { 700000000, 1400000000, 0 },
+};
+
+static const struct alpha_pll_config gpll3_early_config = {
+ .l = 63,
+ .config_ctl_val = 0x4001055b,
+ .early_output_mask = 0,
+ .post_div_mask = GENMASK(11, 8),
+ .post_div_val = BIT(8),
+};
+
+static struct clk_alpha_pll gpll3_early = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpll3_p_vco,
+ .num_vco = ARRAY_SIZE(gpll3_p_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll3_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_pll gpll6_early = {
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700c,
+ .config_reg = 0x37014,
+ .mode_reg = 0x37000,
+ .status_reg = 0x3701c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6 = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll6_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct parent_map gcc_xo_gpll0_out_aux_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_sleep_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL4, 3 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll4_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c00c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0d000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0d014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0f024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ F(64000000, P_GPLL0, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x0c044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0d034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_byte0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_byte_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL_BYTE },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_byte0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "byte0_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(61540000, P_GPLL0, 13, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 1, 3, 64),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_cci_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cci_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_cpp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 3 },
+};
+
+static const struct clk_parent_data gcc_cpp_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.hw },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL0, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .parent_map = gcc_cpp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cpp_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3c020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi2_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_esc0_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "esc0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_gfx3d_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL3, 2 },
+ { P_GPLL6, 3 },
+};
+
+static const struct parent_map gcc_gfx3d_map_qm215[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL3, 2 },
+ { P_GPLL6, 6 },
+};
+
+static const struct clk_parent_data gcc_gfx3d_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+ { .hw = &gpll6.hw },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(270000000, P_GPLL6, 4, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL3, 1, 0, 0),
+ F(484800000, P_GPLL3, 1, 0, 0),
+ F(500000000, P_GPLL3, 1, 0, 0),
+ F(523200000, P_GPLL3, 1, 0, 0),
+ F(550000000, P_GPLL3, 1, 0, 0),
+ F(598000000, P_GPLL3, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gcc_gfx3d_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_gfx3d_data,
+ .num_parents = ARRAY_SIZE(gcc_gfx3d_data),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ }
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL6, 1, 1, 45),
+ F(66667000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_sleep_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_sleep_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_sleep_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(145450000, P_GPLL0, 5.5, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mdp_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_pclk_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_pclk_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_sdcc1_apps_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+};
+
+static const struct clk_parent_data gcc_sdcc1_apss_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(192000000, P_GPLL4, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(384000000, P_GPLL4, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .parent_map = gcc_sdcc1_apps_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_sdcc1_apss_data,
+ .num_parents = ARRAY_SIZE(gcc_sdcc1_apss_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(180000000, P_GPLL6, 6, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(329140000, P_GPLL4, 3.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll4_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_gpll4_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_vfe_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(329140000, P_GPLL4, 3.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll4_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_gpll4_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x58054,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll4_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6_gpll4_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vsync_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_tcu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x59030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gpu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x0b008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0d010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0f020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0f020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0d00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0f01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x0c03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0d02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x56004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2_ahb_clk = {
+ .halt_reg = 0x3c040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2_clk = {
+ .halt_reg = 0x3c03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2phy_clk = {
+ .halt_reg = 0x3c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2pix_clk = {
+ .halt_reg = 0x3c058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2rdi_clk = {
+ .halt_reg = 0x3c050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe1_clk = {
+ .halt_reg = 0x58074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x5c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_ahb_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_axi_clk = {
+ .halt_reg = 0x58048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_ahb_clk = {
+ .halt_reg = 0x58060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_axi_clk = {
+ .halt_reg = 0x58068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_dcc_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_jpeg_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_smmu_cfg_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe1_tbu_clk = {
+ .halt_reg = 0x12090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []){ 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []){ 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []){ 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .cxcs = (unsigned int []){ 0x57020, 0x57028 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "jpeg_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []){ 0x58038, 0x58048, 0x5600c, 0x58050 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x5806c,
+ .cxcs = (unsigned int []){ 0x5805c, 0x58068, 0x5600c, 0x58074 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gx_gdsc = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000, 0x59020 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x58078,
+ .cxcs = (unsigned int []){ 0x5803c, 0x58064 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "cpp_gdsc",
+ },
+ .flags = ALWAYS_ON,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8917_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8917_resets[] = {
+ [GCC_CAMSS_MICRO_BCR] = { 0x56008 },
+ [GCC_MSS_BCR] = { 0x71000 },
+ [GCC_QUSB2_PHY_BCR] = { 0x4103c },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
+};
+
+static const struct regmap_config gcc_msm8917_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static struct gdsc *gcc_msm8917_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
+static const struct qcom_cc_desc gcc_msm8917_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8917_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8917_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8917_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
+};
+
+static const struct qcom_cc_desc gcc_qm215_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8917_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8917_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8917_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
+};
+
+static int gcc_msm8917_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ const struct qcom_cc_desc *gcc_desc;
+
+ gcc_desc = of_device_get_match_data(&pdev->dev);
+
+ if (gcc_desc == &gcc_qm215_desc)
+ gfx3d_clk_src.parent_map = gcc_gfx3d_map_qm215;
+
+ regmap = qcom_cc_map(pdev, gcc_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
+
+ return qcom_cc_really_probe(pdev, gcc_desc, regmap);
+}
+
+static const struct of_device_id gcc_msm8917_match_table[] = {
+ { .compatible = "qcom,gcc-msm8917", .data = &gcc_msm8917_desc },
+ { .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
+ {},
+};
+
+static struct platform_driver gcc_msm8917_driver = {
+ .probe = gcc_msm8917_probe,
+ .driver = {
+ .name = "gcc-msm8917",
+ .of_match_table = gcc_msm8917_match_table,
+ },
+};
+
+static int __init gcc_msm8917_init(void)
+{
+ return platform_driver_register(&gcc_msm8917_driver);
+}
+core_initcall(gcc_msm8917_init);
+
+static void __exit gcc_msm8917_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8917_driver);
+}
+module_exit(gcc_msm8917_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8917 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 712073f9dc69..7f8969a77974 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -73,8 +73,8 @@ static struct clk_regmap gpll0_vote = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -104,8 +104,8 @@ static struct clk_regmap gpll1_vote = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll1.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -135,8 +135,8 @@ static struct clk_regmap gpll2_vote = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gpll2_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll2.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -166,8 +166,8 @@ static struct clk_regmap bimc_pll_vote = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "bimc_pll_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &bimc_pll.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_pll.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -197,8 +197,8 @@ static struct clk_regmap gpll3_vote = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gpll3_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll3.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -244,8 +244,8 @@ static struct clk_regmap gpll4_vote = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gpll4_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll4.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -290,8 +290,8 @@ static struct clk_regmap gpll5_vote = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "gpll5_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll5.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll5.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
@@ -321,8 +321,8 @@ static struct clk_regmap gpll6_vote = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gpll6_vote",
- .parent_data = &(const struct clk_parent_data) {
- .hw = &gpll6.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
},
.num_parents = 1,
.ops = &clk_pll_vote_ops,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 9dd4e7ffa1f8..dbc7093ab9cc 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3754,19 +3754,17 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
return 0;
}
-static int gcc_msm8960_remove(struct platform_device *pdev)
+static void gcc_msm8960_remove(struct platform_device *pdev)
{
struct platform_device *tsens = platform_get_drvdata(pdev);
if (tsens)
platform_device_unregister(tsens);
-
- return 0;
}
static struct platform_driver gcc_msm8960_driver = {
.probe = gcc_msm8960_probe,
- .remove = gcc_msm8960_remove,
+ .remove_new = gcc_msm8960_remove,
.driver = {
.name = "gcc-msm8960",
.of_match_table = gcc_msm8960_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 025cc9a20dbb..0231c1efd286 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2110,7 +2110,7 @@ static struct clk_branch gcc_sdcc1_cdccal_sleep_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_cdccal_sleep_clk",
.parent_data = (const struct clk_parent_data[]){
- { .fw_name = "sleep_clk", .name = "sleep_clk_src" }
+ { .fw_name = "sleep_clk", .name = "sleep_clk" }
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2275,7 +2275,7 @@ static struct clk_branch gcc_usb2a_phy_sleep_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2a_phy_sleep_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "sleep_clk", .name = "sleep_clk_src",
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2291,7 +2291,7 @@ static struct clk_branch gcc_usb2b_phy_sleep_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2b_phy_sleep_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "sleep_clk", .name = "sleep_clk_src",
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2341,7 +2341,7 @@ static struct clk_branch gcc_usb30_sleep_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sleep_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "sleep_clk", .name = "sleep_clk_src",
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2440,7 +2440,7 @@ static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hsic_io_cal_sleep_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "sleep_clk", .name = "sleep_clk_src",
+ .fw_name = "sleep_clk", .name = "sleep_clk",
},
.num_parents = 1,
.ops = &clk_branch2_ops,
diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c
index 6b112984694c..8beb923c0e19 100644
--- a/drivers/clk/qcom/gcc-msm8976.c
+++ b/drivers/clk/qcom/gcc-msm8976.c
@@ -334,9 +334,9 @@ static const struct parent_map gcc_parent_map_7[] = {
{ P_GPLL6_OUT, 3 },
};
-static const struct clk_parent_data gcc_parent_data_7[] = {
- { .hw = &gpll0_vote.hw },
- { .hw = &gpll6_vote.hw },
+static const struct clk_hw * gcc_parent_hws_7[] = {
+ &gpll0_vote.hw,
+ &gpll6_vote.hw,
};
static const struct parent_map gcc_parent_map_8[] = {
@@ -363,8 +363,8 @@ static const struct parent_map gcc_parent_map_8_gp[] = {
{ P_GPLL0_OUT_MAIN, 1 },
};
-static const struct clk_parent_data gcc_parent_data_8_gp[] = {
- { .hw = &gpll0_vote.hw },
+static const struct clk_hw *gcc_parent_hws_8_gp[] = {
+ &gpll0_vote.hw,
};
static const struct parent_map gcc_parent_map_9[] = {
@@ -952,8 +952,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_data = gcc_parent_data_8_gp,
- .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .parent_hws = gcc_parent_hws_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_hws_8_gp),
.ops = &clk_rcg2_ops,
},
};
@@ -973,8 +973,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_camss_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_data = gcc_parent_data_8_gp,
- .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .parent_hws = gcc_parent_hws_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_hws_8_gp),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,8 +1015,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_mclk_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .parent_hws = gcc_parent_hws_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_hws_7),
.ops = &clk_rcg2_ops,
},
};
@@ -1029,8 +1029,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_mclk_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .parent_hws = gcc_parent_hws_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_hws_7),
.ops = &clk_rcg2_ops,
},
};
@@ -1043,8 +1043,8 @@ static struct clk_rcg2 mclk2_clk_src = {
.freq_tbl = ftbl_mclk_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .parent_hws = gcc_parent_hws_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_hws_7),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index e16163706735..5e44d1bcca9e 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3455,7 +3455,8 @@ static struct gdsc usb30_gdsc = {
.pd = {
.name = "usb30",
},
- .pwrsts = PWRSTS_OFF_ON,
+ /* TODO: Change to OFF_ON when USB drivers get proper suspend support */
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie0_gdsc = {
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 33473c52eb90..be024f8093c5 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -387,7 +387,6 @@ static struct clk_alpha_pll_postdiv gpll4_out_test = {
enum {
P_AUD_REF_CLK,
- P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_MAIN,
P_GPLL4_OUT_MAIN,
P_PLL0_EARLY_DIV_CLK_SRC,
@@ -399,26 +398,22 @@ static const struct parent_map gcc_parent_map_0[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_0[] = {
{ .fw_name = "xo" },
{ .hw = &gpll0_out_main.clkr.hw },
{ .hw = &gpll0_out_main.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_1[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_1[] = {
{ .fw_name = "xo" },
{ .hw = &gpll0_out_main.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_2[] = {
@@ -426,7 +421,6 @@ static const struct parent_map gcc_parent_map_2[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
{ P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_2[] = {
@@ -434,47 +428,40 @@ static const struct clk_parent_data gcc_parent_data_2[] = {
{ .hw = &gpll0_out_main.clkr.hw },
{ .fw_name = "sleep_clk" },
{ .hw = &gpll0_out_main.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_3[] = {
{ P_XO, 0 },
{ P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "xo" },
{ .fw_name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_4[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL4_OUT_MAIN, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_4[] = {
{ .fw_name = "xo" },
{ .hw = &gpll0_out_main.clkr.hw },
{ .hw = &gpll4_out_main.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_5[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_AUD_REF_CLK, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_5[] = {
{ .fw_name = "xo" },
{ .hw = &gpll0_out_main.clkr.hw },
{ .fw_name = "aud_ref_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
@@ -2911,7 +2898,8 @@ static struct gdsc usb_30_gdsc = {
.pd = {
.name = "usb_30_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ /* TODO: Change to OFF_ON when USB drivers get proper suspend support */
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 7792b8f23704..096deff2ba25 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -1243,7 +1243,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parents_12,
.num_parents = ARRAY_SIZE(gcc_parents_12),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
+ .flags = CLK_OPS_PARENT_ENABLE,
},
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 46d314d69250..a39c4990b29d 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -19,17 +19,24 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "common.h"
+#include "gdsc.h"
#include "reset.h"
enum {
- P_CORE_BI_PLL_TEST_SE,
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_HDMI_PHY_PLL_CLK,
+};
+
+enum {
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
- P_GPLL0_OUT_AUX,
P_GPLL0_OUT_MAIN,
P_GPLL1_OUT_MAIN,
P_GPLL3_OUT_MAIN,
- P_GPLL4_OUT_AUX,
P_GPLL4_OUT_MAIN,
P_GPLL6_OUT_AUX,
P_HDMI_PHY_PLL_CLK,
@@ -38,240 +45,12 @@ enum {
P_XO,
};
-static const struct parent_map gcc_parent_map_0[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_0[] = {
- "cxo",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const char * const gcc_parent_names_ao_0[] = {
- "cxo",
- "gpll0_ao_out_main",
- "core_bi_pll_test_se",
-};
-
static const struct parent_map gcc_parent_map_1[] = {
{ P_XO, 0 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_1[] = {
- "cxo",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_2[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL6_OUT_AUX, 2 },
- { P_SLEEP_CLK, 6 },
-};
-
-static const char * const gcc_parent_names_2[] = {
- "cxo",
- "gpll0_out_main",
- "gpll6_out_aux",
- "sleep_clk",
-};
-
-static const struct parent_map gcc_parent_map_3[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL6_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_3[] = {
- "cxo",
- "gpll0_out_main",
- "gpll6_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_4[] = {
- { P_XO, 0 },
- { P_GPLL1_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_4[] = {
- "cxo",
- "gpll1_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_5[] = {
- { P_XO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
- { P_GPLL0_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_5[] = {
- "cxo",
- "dsi0pll_byteclk_src",
- "gpll0_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_6[] = {
- { P_XO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_GPLL0_OUT_AUX, 3 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_6[] = {
- "cxo",
- "dsi0_phy_pll_out_byteclk",
- "gpll0_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_7[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL3_OUT_MAIN, 2 },
- { P_GPLL6_OUT_AUX, 3 },
- { P_GPLL4_OUT_AUX, 4 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_7[] = {
- "cxo",
- "gpll0_out_main",
- "gpll3_out_main",
- "gpll6_out_aux",
- "gpll4_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_8[] = {
- { P_XO, 0 },
- { P_HDMI_PHY_PLL_CLK, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_8[] = {
- "cxo",
- "hdmi_phy_pll_clk",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_9[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_DSI0_PHY_PLL_OUT_DSICLK, 2 },
- { P_GPLL6_OUT_AUX, 3 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_9[] = {
- "cxo",
- "gpll0_out_main",
- "dsi0_phy_pll_out_dsiclk",
- "gpll6_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_10[] = {
- { P_XO, 0 },
- { P_SLEEP_CLK, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_10[] = {
- "cxo",
- "sleep_clk",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_11[] = {
- { P_XO, 0 },
- { P_PCIE_0_PIPE_CLK, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_11[] = {
- "cxo",
- "pcie_0_pipe_clk",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_12[] = {
- { P_XO, 0 },
- { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_GPLL0_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_12[] = {
- "cxo",
- "dsi0pll_pclk_src",
- "gpll0_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_13[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_MAIN, 2 },
- { P_GPLL6_OUT_AUX, 3 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_13[] = {
- "cxo",
- "gpll0_out_main",
- "gpll4_out_main",
- "gpll6_out_aux",
- "core_bi_pll_test_se",
};
-static const struct parent_map gcc_parent_map_14[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_14[] = {
- "cxo",
- "gpll0_out_main",
- "gpll4_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_15[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_15[] = {
- "cxo",
- "gpll0_out_aux",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_16[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL0_OUT_AUX, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_16[] = {
- "cxo",
- "gpll0_out_main",
- "gpll0_out_aux",
- "core_bi_pll_test_se",
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_XO, .name = "xo-board" },
};
static struct clk_fixed_factor cxo = {
@@ -279,8 +58,8 @@ static struct clk_fixed_factor cxo = {
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "cxo",
- .parent_names = (const char *[]){ "xo-board" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_fixed_factor_ops,
},
};
@@ -294,8 +73,8 @@ static struct clk_alpha_pll gpll0_sleep_clk_src = {
.enable_is_inverted = true,
.hw.init = &(struct clk_init_data){
.name = "gpll0_sleep_clk_src",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_alpha_pll_ops,
},
},
@@ -310,9 +89,8 @@ static struct clk_alpha_pll gpll0_out_main = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
- .parent_names = (const char *[])
- { "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_alpha_pll_ops,
},
},
@@ -327,8 +105,8 @@ static struct clk_alpha_pll gpll0_ao_out_main = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0_ao_out_main",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.flags = CLK_IS_CRITICAL,
.ops = &clk_alpha_pll_fixed_ops,
},
@@ -343,8 +121,8 @@ static struct clk_alpha_pll gpll1_out_main = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_alpha_pll_ops,
},
},
@@ -374,8 +152,8 @@ static struct clk_alpha_pll gpll3_out_main = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_alpha_pll_ops,
},
},
@@ -389,8 +167,8 @@ static struct clk_alpha_pll gpll4_out_main = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_alpha_pll_ops,
},
},
@@ -406,8 +184,8 @@ static struct clk_pll gpll6 = {
.status_bit = 17,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6",
- .parent_names = (const char *[]){ "cxo" },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_pll_ops,
},
};
@@ -417,12 +195,195 @@ static struct clk_regmap gpll6_out_aux = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gpll6_out_aux",
- .parent_names = (const char *[]){ "gpll6" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
};
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_ao_0[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_ao_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll6_out_aux.hw },
+ { .index = DT_SLEEP_CLK, .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll6_out_aux.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll1_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK, .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK, .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+ { .hw = &gpll6_out_aux.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_HDMI_PHY_PLL_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .index = DT_HDMI_PHY_PLL_CLK, .name = "hdmi_pll" },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK, .name = "dsi0pll" },
+ { .hw = &gpll6_out_aux.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .index = DT_SLEEP_CLK, .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_XO, 0 },
+ { P_PCIE_0_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK, .name = "dsi0pll" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+ { .hw = &gpll6_out_aux.hw },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_XO, .name = "xo-board" },
+};
+
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_XO, .name = "xo-board" },
+ { .hw = &gpll0_out_main.clkr.hw },
+};
+
static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
@@ -439,8 +400,8 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.freq_tbl = ftbl_apss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_ahb_clk_src",
- .parent_names = gcc_parent_names_ao_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_ao_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_ao_0),
.flags = CLK_IS_CRITICAL,
.ops = &clk_rcg2_ops,
},
@@ -460,8 +421,8 @@ static struct clk_rcg2 blsp1_qup0_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup0_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -485,8 +446,8 @@ static struct clk_rcg2 blsp1_qup0_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup0_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -499,8 +460,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -524,8 +485,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -538,8 +499,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -564,8 +525,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -578,8 +539,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -592,8 +553,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -606,8 +567,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -620,8 +581,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -654,8 +615,8 @@ static struct clk_rcg2 blsp1_uart0_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart0_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -668,8 +629,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -682,8 +643,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -697,8 +658,8 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -711,8 +672,8 @@ static struct clk_rcg2 blsp2_qup0_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup0_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -725,8 +686,8 @@ static struct clk_rcg2 blsp2_qup0_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup0_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -739,8 +700,8 @@ static struct clk_rcg2 blsp2_uart0_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart0_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -752,8 +713,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = gcc_parent_map_5,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = gcc_parent_names_5,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
@@ -775,8 +736,8 @@ static struct clk_rcg2 emac_clk_src = {
.freq_tbl = ftbl_emac_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "emac_clk_src",
- .parent_names = gcc_parent_names_4,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
.ops = &clk_rcg2_ops,
},
};
@@ -796,8 +757,8 @@ static struct clk_rcg2 emac_ptp_clk_src = {
.freq_tbl = ftbl_emac_ptp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "emac_ptp_clk_src",
- .parent_names = gcc_parent_names_4,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
.ops = &clk_rcg2_ops,
},
};
@@ -815,8 +776,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = gcc_parent_names_6,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
@@ -849,8 +810,8 @@ static struct clk_rcg2 gfx3d_clk_src = {
.freq_tbl = ftbl_gfx3d_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = gcc_parent_names_7,
- .num_parents = 6,
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
.ops = &clk_rcg2_ops,
},
};
@@ -870,8 +831,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -884,8 +845,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -898,8 +859,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -912,8 +873,8 @@ static struct clk_rcg2 hdmi_app_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_app_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 2,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -926,8 +887,8 @@ static struct clk_rcg2 hdmi_pclk_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_pclk_clk_src",
- .parent_names = gcc_parent_names_8,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
.ops = &clk_rcg2_ops,
},
};
@@ -953,8 +914,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_mdp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = gcc_parent_names_9,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
.ops = &clk_rcg2_ops,
},
};
@@ -972,8 +933,8 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
.freq_tbl = ftbl_pcie_0_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_aux_clk_src",
- .parent_names = gcc_parent_names_10,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
.ops = &clk_rcg2_ops,
},
};
@@ -993,8 +954,8 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
.freq_tbl = ftbl_pcie_0_pipe_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_pipe_clk_src",
- .parent_names = gcc_parent_names_11,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
.ops = &clk_rcg2_ops,
},
};
@@ -1006,8 +967,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = gcc_parent_map_12,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = gcc_parent_names_12,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_pixel_ops,
},
@@ -1027,8 +988,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_pdm2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1055,8 +1016,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.freq_tbl = ftbl_sdcc1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_parent_names_13,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1075,8 +1036,8 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.freq_tbl = ftbl_sdcc1_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1101,8 +1062,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_sdcc2_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_14,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1115,8 +1076,8 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 2,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -1137,8 +1098,8 @@ static struct clk_rcg2 usb30_master_clk_src = {
.freq_tbl = ftbl_usb30_master_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1151,8 +1112,8 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 2,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -1165,8 +1126,8 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.freq_tbl = ftbl_pcie_0_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 2,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -1188,8 +1149,8 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.freq_tbl = ftbl_usb_hs_system_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1202,8 +1163,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_esc0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = gcc_parent_names_15,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
.ops = &clk_rcg2_ops,
},
};
@@ -1224,8 +1185,8 @@ static struct clk_rcg2 cdsp_bimc_clk_src = {
.freq_tbl = ftbl_cdsp_bimc_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "cdsp_bimc_clk_src",
- .parent_names = gcc_parent_names_16,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_16,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_16),
.ops = &clk_rcg2_ops,
},
};
@@ -1238,8 +1199,8 @@ static struct clk_branch gcc_apss_ahb_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_ahb_clk",
- .parent_names = (const char *[]){
- "apss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &apss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1270,8 +1231,8 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
.ops = &clk_branch2_ops,
- .parent_names = (const char *[]){
- "gcc_apss_tcu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_apss_tcu_clk.clkr.hw,
},
},
@@ -1299,8 +1260,8 @@ static struct clk_branch gcc_bimc_cdsp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "gcc_bimc_cdsp_clk",
- .parent_names = (const char *[]) {
- "cdsp_bimc_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cdsp_bimc_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1369,8 +1330,8 @@ static struct clk_branch gcc_blsp1_qup0_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup0_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup0_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup0_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1387,8 +1348,8 @@ static struct clk_branch gcc_blsp1_qup0_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup0_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup0_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup0_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1405,8 +1366,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1423,8 +1384,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1441,8 +1402,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1459,8 +1420,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1477,8 +1438,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1495,8 +1456,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1513,8 +1474,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1531,8 +1492,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1549,8 +1510,8 @@ static struct clk_branch gcc_blsp1_uart0_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart0_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart0_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart0_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1567,8 +1528,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1585,8 +1546,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1603,8 +1564,8 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1634,8 +1595,8 @@ static struct clk_branch gcc_blsp2_qup0_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup0_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup0_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup0_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1652,8 +1613,8 @@ static struct clk_branch gcc_blsp2_qup0_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup0_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup0_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup0_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1670,8 +1631,8 @@ static struct clk_branch gcc_blsp2_uart0_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart0_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart0_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_uart0_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1753,8 +1714,8 @@ static struct clk_branch gcc_eth_ptp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_eth_ptp_clk",
- .parent_names = (const char *[]){
- "emac_ptp_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &emac_ptp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1771,8 +1732,8 @@ static struct clk_branch gcc_eth_rgmii_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_eth_rgmii_clk",
- .parent_names = (const char *[]){
- "emac_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &emac_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1854,8 +1815,8 @@ static struct clk_branch gcc_cdsp_tbu_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data) {
.name = "gcc_cdsp_tbu_clk",
- .parent_names = (const char *[]) {
- "cdsp_bimc_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cdsp_bimc_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1872,8 +1833,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1890,8 +1851,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1908,8 +1869,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1978,8 +1939,8 @@ static struct clk_branch gcc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte0_clk",
- .parent_names = (const char *[]){
- "byte0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1996,8 +1957,8 @@ static struct clk_branch gcc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc0_clk",
- .parent_names = (const char *[]){
- "esc0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2014,8 +1975,8 @@ static struct clk_branch gcc_mdss_hdmi_app_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_hdmi_app_clk",
- .parent_names = (const char *[]){
- "hdmi_app_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &hdmi_app_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2032,8 +1993,8 @@ static struct clk_branch gcc_mdss_hdmi_pclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_hdmi_pclk_clk",
- .parent_names = (const char *[]){
- "hdmi_pclk_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &hdmi_pclk_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2050,8 +2011,8 @@ static struct clk_branch gcc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_mdp_clk",
- .parent_names = (const char *[]){
- "mdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2068,8 +2029,8 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk0_clk",
- .parent_names = (const char *[]){
- "pclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2086,8 +2047,8 @@ static struct clk_branch gcc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_vsync_clk",
- .parent_names = (const char *[]){
- "vsync_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2117,8 +2078,8 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gfx3d_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2135,8 +2096,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(27),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]){
- "pcie_0_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcie_0_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2179,8 +2140,8 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(28),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_names = (const char *[]){
- "pcie_0_pipe_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcie_0_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2238,8 +2199,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2348,8 +2309,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2366,8 +2327,8 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ice_core_clk",
- .parent_names = (const char *[]){
- "sdcc1_ice_core_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2410,8 +2371,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2441,8 +2402,8 @@ static struct clk_branch gcc_sys_noc_usb3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2471,8 +2432,8 @@ static struct clk_branch gcc_usb20_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb20_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb20_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb20_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2502,8 +2463,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2520,8 +2481,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2551,8 +2512,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]){
- "usb3_phy_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb3_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2594,8 +2555,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]){
- "usb_hs_system_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2630,6 +2591,22 @@ static struct clk_branch gcc_wdsp_q6ss_axim_clk = {
},
};
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_hw *gcc_qcs404_hws[] = {
&cxo.hw,
};
@@ -2780,6 +2757,11 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
};
+static struct gdsc *gcc_qcs404_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+};
+
static const struct qcom_reset_map gcc_qcs404_resets[] = {
[GCC_GENI_IR_BCR] = { 0x0F000 },
[GCC_CDSP_RESTART] = { 0x18000 },
@@ -2822,6 +2804,8 @@ static const struct qcom_cc_desc gcc_qcs404_desc = {
.num_resets = ARRAY_SIZE(gcc_qcs404_resets),
.clk_hws = gcc_qcs404_hws,
.num_clk_hws = ARRAY_SIZE(gcc_qcs404_hws),
+ .gdscs = gcc_qcs404_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs404_gdscs),
};
static const struct of_device_id gcc_qcs404_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-qdu1000.c b/drivers/clk/qcom/gcc-qdu1000.c
new file mode 100644
index 000000000000..5051769ad90c
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qdu1000.c
@@ -0,0 +1,2653 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qdu1000-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL2_OUT_MAIN,
+ P_GCC_GPLL3_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL6_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_PCIE_0_PHY_AUX_CLK,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ DT_TCXO_IDX,
+ DT_SLEEP_CLK_IDX,
+ DT_PCIE_0_PIPE_CLK_IDX,
+ DT_PCIE_0_PHY_AUX_CLK_IDX,
+ DT_USB3_PHY_WRAPPER_PIPE_CLK_IDX,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll2_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_TCXO_IDX,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK_IDX },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_TCXO_IDX },
+ { .index = DT_SLEEP_CLK_IDX },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL6_OUT_MAIN, 3 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PHY_AUX_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_PCIE_0_PHY_AUX_CLK_IDX },
+ { .index = DT_TCXO_IDX },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_PCIE_0_PIPE_CLK_IDX },
+ { .index = DT_TCXO_IDX },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL7_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_TCXO_IDX },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_USB3_PHY_WRAPPER_PIPE_CLK_IDX },
+ { .index = DT_TCXO_IDX },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = {
+ .reg = 0x9d080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x9d064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x4906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_aggre_noc_ecpri_dma_clk_src[] = {
+ F(466500000, P_GCC_GPLL5_OUT_MAIN, 2, 0, 0),
+ F(500000000, P_GCC_GPLL2_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_aggre_noc_ecpri_dma_clk_src = {
+ .cmd_rcgr = 0x92020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_aggre_noc_ecpri_dma_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_ecpri_dma_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_aggre_noc_ecpri_gsi_clk_src[] = {
+ F(250000000, P_GCC_GPLL2_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_aggre_noc_ecpri_gsi_clk_src = {
+ .cmd_rcgr = 0x92038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_aggre_noc_ecpri_gsi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_ecpri_gsi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x74004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x75004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x76004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x9d068,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x43010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x27154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x27288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x273bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x274f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x27624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s5_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x27758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s5_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2788c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x279c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x28154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x28288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x283bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x284f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x28624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x28758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2888c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x289c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc5_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_MAIN, 10, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GCC_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc5_apps_clk_src = {
+ .cmd_rcgr = 0x3b034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc5_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc5_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc5_ice_core_clk_src[] = {
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc5_ice_core_clk_src = {
+ .cmd_rcgr = 0x3b01c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_sdcc5_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc5_ice_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_sm_bus_xo_clk_src = {
+ .cmd_rcgr = 0x5b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sm_bus_xo_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsc_clk_src[] = {
+ F(500000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsc_clk_src = {
+ .cmd_rcgr = 0x57010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_tsc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_tsc_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x49028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x49044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x49070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x4905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_ecpri_dma_clk = {
+ .halt_reg = 0x92008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x92008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x92008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_ecpri_dma_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_aggre_noc_ecpri_dma_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_ecpri_gsi_clk = {
+ .halt_reg = 0x9201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_ecpri_gsi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_aggre_noc_ecpri_gsi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_ecpri_cc_ahb_clk = {
+ .halt_reg = 0x3e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_ecpri_cc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x8401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8401c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_ecpri_dma_clk = {
+ .halt_reg = 0x54030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x54030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x54030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_ecpri_dma_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_aggre_noc_ecpri_dma_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_ahb_clk = {
+ .halt_reg = 0x3a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll1_even_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll1_even_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll1_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll2_even_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll2_even_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll2_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll3_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll4_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll4_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_cc_gpll5_even_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_cc_gpll5_even_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll5_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ecpri_xo_clk = {
+ .halt_reg = 0x3a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ecpri_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_100g_c2c_hm_apb_clk = {
+ .halt_reg = 0x39010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_100g_c2c_hm_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_100g_fh_hm_apb_0_clk = {
+ .halt_reg = 0x39004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_100g_fh_hm_apb_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_100g_fh_hm_apb_1_clk = {
+ .halt_reg = 0x39008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_100g_fh_hm_apb_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_100g_fh_hm_apb_2_clk = {
+ .halt_reg = 0x3900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_100g_fh_hm_apb_2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_dbg_c2c_hm_apb_clk = {
+ .halt_reg = 0x39014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_dbg_c2c_hm_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_dbg_snoc_axi_clk = {
+ .halt_reg = 0x3901c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3901c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eth_dbg_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gemnoc_pcie_qx_clk = {
+ .halt_reg = 0x5402c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5402c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gemnoc_pcie_qx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x74000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x74000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x75000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x76000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x9d030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x9d02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x9c004,
+ .halt_bit = 31,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x9c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_aux_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x9d048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x9d040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x43004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x43004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x43008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_anoc_pcie_clk = {
+ .halt_reg = 0x84044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x84044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x84044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_anoc_pcie_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_ecpri_dma0_clk = {
+ .halt_reg = 0x84038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x84038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x84038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_ecpri_dma0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_ecpri_dma1_clk = {
+ .halt_reg = 0x8403c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8403c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_ecpri_dma1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_ecpri_gsi_clk = {
+ .halt_reg = 0x84040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x84040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x84040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_ecpri_gsi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2714c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x27280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x273b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x274e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x2761c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x27750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x27884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x279b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x28018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2814c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x28280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x283b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x284e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2861c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x28750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x28884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x289b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc5_ahb_clk = {
+ .halt_reg = 0x3b00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3b00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc5_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc5_apps_clk = {
+ .halt_reg = 0x3b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc5_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc5_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc5_ice_core_clk = {
+ .halt_reg = 0x3b010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc5_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc5_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sm_bus_ahb_clk = {
+ .halt_reg = 0x5b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sm_bus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sm_bus_xo_clk = {
+ .halt_reg = 0x5b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sm_bus_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sm_bus_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_cnoc_gemnoc_pcie_qx_clk = {
+ .halt_reg = 0x9200c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_cnoc_gemnoc_pcie_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_cnoc_gemnoc_pcie_south_qx_clk = {
+ .halt_reg = 0x92010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x92010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_cnoc_gemnoc_pcie_south_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_cnoc_pcie_qx_clk = {
+ .halt_reg = 0x84030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x84030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_cnoc_pcie_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie_sf_center_qx_clk = {
+ .halt_reg = 0x92014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x92014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie_sf_center_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_pcie_sf_south_qx_clk = {
+ .halt_reg = 0x92018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x92018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_pcie_sf_south_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsc_cfg_ahb_clk = {
+ .halt_reg = 0x5700c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_tsc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsc_cntr_clk = {
+ .halt_reg = 0x57004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_tsc_cntr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_tsc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsc_etu_clk = {
+ .halt_reg = 0x57008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_tsc_etu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_tsc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_clkref_en = {
+ .halt_reg = 0x9c008,
+ .halt_bit = 31,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x9c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_clkref_en",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x49018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x49024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x49020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x49060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x49064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x49068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x49068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_qdu1000_clocks[] = {
+ [GCC_AGGRE_NOC_ECPRI_DMA_CLK] = &gcc_aggre_noc_ecpri_dma_clk.clkr,
+ [GCC_AGGRE_NOC_ECPRI_DMA_CLK_SRC] = &gcc_aggre_noc_ecpri_dma_clk_src.clkr,
+ [GCC_AGGRE_NOC_ECPRI_GSI_CLK_SRC] = &gcc_aggre_noc_ecpri_gsi_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CFG_NOC_ECPRI_CC_AHB_CLK] = &gcc_cfg_noc_ecpri_cc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_ECPRI_DMA_CLK] = &gcc_ddrss_ecpri_dma_clk.clkr,
+ [GCC_ECPRI_AHB_CLK] = &gcc_ecpri_ahb_clk.clkr,
+ [GCC_ECPRI_CC_GPLL0_CLK_SRC] = &gcc_ecpri_cc_gpll0_clk_src.clkr,
+ [GCC_ECPRI_CC_GPLL1_EVEN_CLK_SRC] = &gcc_ecpri_cc_gpll1_even_clk_src.clkr,
+ [GCC_ECPRI_CC_GPLL2_EVEN_CLK_SRC] = &gcc_ecpri_cc_gpll2_even_clk_src.clkr,
+ [GCC_ECPRI_CC_GPLL3_CLK_SRC] = &gcc_ecpri_cc_gpll3_clk_src.clkr,
+ [GCC_ECPRI_CC_GPLL4_CLK_SRC] = &gcc_ecpri_cc_gpll4_clk_src.clkr,
+ [GCC_ECPRI_CC_GPLL5_EVEN_CLK_SRC] = &gcc_ecpri_cc_gpll5_even_clk_src.clkr,
+ [GCC_ECPRI_XO_CLK] = &gcc_ecpri_xo_clk.clkr,
+ [GCC_ETH_DBG_SNOC_AXI_CLK] = &gcc_eth_dbg_snoc_axi_clk.clkr,
+ [GCC_GEMNOC_PCIE_QX_CLK] = &gcc_gemnoc_pcie_qx_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL2] = &gcc_gpll2.clkr,
+ [GCC_GPLL2_OUT_EVEN] = &gcc_gpll2_out_even.clkr,
+ [GCC_GPLL3] = &gcc_gpll3.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL5_OUT_EVEN] = &gcc_gpll5_out_even.clkr,
+ [GCC_GPLL6] = &gcc_gpll6.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK] = &gcc_pcie_0_phy_aux_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_ANOC_PCIE_CLK] = &gcc_qmip_anoc_pcie_clk.clkr,
+ [GCC_QMIP_ECPRI_DMA0_CLK] = &gcc_qmip_ecpri_dma0_clk.clkr,
+ [GCC_QMIP_ECPRI_DMA1_CLK] = &gcc_qmip_ecpri_dma1_clk.clkr,
+ [GCC_QMIP_ECPRI_GSI_CLK] = &gcc_qmip_ecpri_gsi_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC5_AHB_CLK] = &gcc_sdcc5_ahb_clk.clkr,
+ [GCC_SDCC5_APPS_CLK] = &gcc_sdcc5_apps_clk.clkr,
+ [GCC_SDCC5_APPS_CLK_SRC] = &gcc_sdcc5_apps_clk_src.clkr,
+ [GCC_SDCC5_ICE_CORE_CLK] = &gcc_sdcc5_ice_core_clk.clkr,
+ [GCC_SDCC5_ICE_CORE_CLK_SRC] = &gcc_sdcc5_ice_core_clk_src.clkr,
+ [GCC_SM_BUS_AHB_CLK] = &gcc_sm_bus_ahb_clk.clkr,
+ [GCC_SM_BUS_XO_CLK] = &gcc_sm_bus_xo_clk.clkr,
+ [GCC_SM_BUS_XO_CLK_SRC] = &gcc_sm_bus_xo_clk_src.clkr,
+ [GCC_SNOC_CNOC_GEMNOC_PCIE_QX_CLK] = &gcc_snoc_cnoc_gemnoc_pcie_qx_clk.clkr,
+ [GCC_SNOC_CNOC_GEMNOC_PCIE_SOUTH_QX_CLK] = &gcc_snoc_cnoc_gemnoc_pcie_south_qx_clk.clkr,
+ [GCC_SNOC_CNOC_PCIE_QX_CLK] = &gcc_snoc_cnoc_pcie_qx_clk.clkr,
+ [GCC_SNOC_PCIE_SF_CENTER_QX_CLK] = &gcc_snoc_pcie_sf_center_qx_clk.clkr,
+ [GCC_SNOC_PCIE_SF_SOUTH_QX_CLK] = &gcc_snoc_pcie_sf_south_qx_clk.clkr,
+ [GCC_TSC_CFG_AHB_CLK] = &gcc_tsc_cfg_ahb_clk.clkr,
+ [GCC_TSC_CLK_SRC] = &gcc_tsc_clk_src.clkr,
+ [GCC_TSC_CNTR_CLK] = &gcc_tsc_cntr_clk.clkr,
+ [GCC_TSC_ETU_CLK] = &gcc_tsc_etu_clk.clkr,
+ [GCC_USB2_CLKREF_EN] = &gcc_usb2_clkref_en.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_ETH_100G_C2C_HM_APB_CLK] = &gcc_eth_100g_c2c_hm_apb_clk.clkr,
+ [GCC_ETH_100G_FH_HM_APB_0_CLK] = &gcc_eth_100g_fh_hm_apb_0_clk.clkr,
+ [GCC_ETH_100G_FH_HM_APB_1_CLK] = &gcc_eth_100g_fh_hm_apb_1_clk.clkr,
+ [GCC_ETH_100G_FH_HM_APB_2_CLK] = &gcc_eth_100g_fh_hm_apb_2_clk.clkr,
+ [GCC_ETH_DBG_C2C_HM_APB_CLK] = &gcc_eth_dbg_c2c_hm_apb_clk.clkr,
+ [GCC_AGGRE_NOC_ECPRI_GSI_CLK] = &gcc_aggre_noc_ecpri_gsi_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK_SRC] = &gcc_pcie_0_phy_aux_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_qdu1000_resets[] = {
+ [GCC_ECPRI_CC_BCR] = { 0x3e000 },
+ [GCC_ECPRI_SS_BCR] = { 0x3a000 },
+ [GCC_ETH_WRAPPER_BCR] = { 0x39000 },
+ [GCC_PCIE_0_BCR] = { 0x9d000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x9e014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x9e020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x7c000 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x9e000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PDM_BCR] = { 0x43000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x27000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x28000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 },
+ [GCC_SDCC5_BCR] = { 0x3b000 },
+ [GCC_TSC_BCR] = { 0x57000 },
+ [GCC_USB30_PRIM_BCR] = { 0x49000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x6000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x7a000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+};
+
+static const struct regmap_config gcc_qdu1000_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qdu1000_desc = {
+ .config = &gcc_qdu1000_regmap_config,
+ .clks = gcc_qdu1000_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qdu1000_clocks),
+ .resets = gcc_qdu1000_resets,
+ .num_resets = ARRAY_SIZE(gcc_qdu1000_resets),
+};
+
+static const struct of_device_id gcc_qdu1000_match_table[] = {
+ { .compatible = "qcom,qdu1000-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qdu1000_match_table);
+
+static int gcc_qdu1000_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qdu1000_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Update FORCE_MEM_CORE_ON for gcc_pcie_0_mstr_axi_clk */
+ regmap_update_bits(regmap, 0x9d024, BIT(14), BIT(14));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ ret = qcom_cc_really_probe(pdev, &gcc_qdu1000_desc, regmap);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register GCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gcc_qdu1000_driver = {
+ .probe = gcc_qdu1000_probe,
+ .driver = {
+ .name = "gcc-qdu1000",
+ .of_match_table = gcc_qdu1000_match_table,
+ },
+};
+
+static int __init gcc_qdu1000_init(void)
+{
+ return platform_driver_register(&gcc_qdu1000_driver);
+}
+subsys_initcall(gcc_qdu1000_init);
+
+static void __exit gcc_qdu1000_exit(void)
+{
+ platform_driver_unregister(&gcc_qdu1000_driver);
+}
+module_exit(gcc_qdu1000_exit);
+
+MODULE_DESCRIPTION("QTI GCC QDU1000 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c
new file mode 100644
index 000000000000..bb94ff367abd
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sa8775p.c
@@ -0,0 +1,4785 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_UFS_CARD_RX_SYMBOL_0_CLK,
+ DT_UFS_CARD_RX_SYMBOL_1_CLK,
+ DT_UFS_CARD_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_SEC_PIPE_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_PCIE_1_PIPE_CLK,
+ DT_PCIE_PHY_AUX_CLK,
+ DT_RXC0_REF_CLK,
+ DT_RXC1_REF_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_PCIE_PHY_AUX_CLK,
+ P_RXC0_REF_CLK,
+ P_RXC1_REF_CLK,
+ P_SLEEP_CLK,
+ P_UFS_CARD_RX_SYMBOL_0_CLK,
+ P_UFS_CARD_RX_SYMBOL_1_CLK,
+ P_UFS_CARD_TX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_SEC_PIPE_CLK,
+};
+
+static const struct clk_parent_data gcc_parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll1",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll5",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll7",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC0_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC0_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC1_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC1_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_UFS_CARD_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_UFS_CARD_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_UFS_CARD_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_UFS_CARD_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_CARD_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_CARD_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_SEC_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_SEC_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = {
+ .reg = 0xa9074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0xa906c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x77074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x7706c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_0_clk_src = {
+ .reg = 0x81060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_16,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_16),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_1_clk_src = {
+ .reg = 0x810d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_17,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_tx_symbol_0_clk_src = {
+ .reg = 0x81050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x83060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x830d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x83050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x1b068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x2f068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = {
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_ptp_clk_src = {
+ .cmd_rcgr = 0xb6060,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_ptp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = {
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_rgmii_clk_src = {
+ .cmd_rcgr = 0xb6048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_rgmii_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb4028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_ptp_clk_src = {
+ .cmd_rcgr = 0xb4060,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_ptp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_rgmii_clk_src = {
+ .cmd_rcgr = 0xb4048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_rgmii_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x70004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x71004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x62004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0x1f004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xa9078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa9054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x77078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x77054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x23154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x23288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x233bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x234f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x23624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x23758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2388c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x24154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x24288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x243bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x244f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x24624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x24758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2488c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x2a154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x2a288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x2a3bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x2a4f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x2a624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x2a758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x2a88c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap3_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap3_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap3_s0_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap3_s0_clk_src = {
+ .cmd_rcgr = 0xc4154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap3_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap3_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ F(384000000, P_GCC_GPLL9_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x2002c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tscss_cntr_clk_src[] = {
+ F(15625000, P_GCC_GPLL7_OUT_MAIN, 16, 1, 4),
+ { }
+};
+
+static struct clk_rcg2 gcc_tscss_cntr_clk_src = {
+ .cmd_rcgr = 0x21008,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_15,
+ .freq_tbl = ftbl_gcc_tscss_cntr_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_tscss_cntr_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x8102c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x81074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x810a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8108c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x8302c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x83074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x830a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8308c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0x1c028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1c040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1b028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1b040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x2f028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2f040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1b06c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x2f06c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div_clk_src = {
+ .reg = 0xa9070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div_clk_src = {
+ .reg = 0x77070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap3_s0_div_clk_src = {
+ .reg = 0xc4284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1c058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1b058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x2f058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_qupv3_axi_clk = {
+ .halt_reg = 0x8e200,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8e200,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_noc_qupv3_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x810d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x810d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x810d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x830d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x830d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830d4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x2f088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy0_clk = {
+ .halt_reg = 0x76004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ahb2phy0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy2_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ahb2phy2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy3_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ahb2phy3_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x44004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_xo_clk = {
+ .halt_reg = 0x32024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_camera_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x2f084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2f084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2f084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7d164,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d164,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp1_hf_axi_clk = {
+ .halt_reg = 0xc7010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc7010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xc7010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_disp1_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x33010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_edp_ref_clkref_en = {
+ .halt_reg = 0x97448,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97448,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_edp_ref_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_axi_clk = {
+ .halt_reg = 0xb6018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_phy_aux_clk = {
+ .halt_reg = 0xb6024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_ptp_clk = {
+ .halt_reg = 0xb6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac0_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rgmii_clk = {
+ .halt_reg = 0xb6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac0_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_slv_ahb_clk = {
+ .halt_reg = 0xb6020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac0_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_axi_clk = {
+ .halt_reg = 0xb4018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb4018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb4018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_phy_aux_clk = {
+ .halt_reg = 0xb4024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb4024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_ptp_clk = {
+ .halt_reg = 0xb4040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb4040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac1_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_rgmii_clk = {
+ .halt_reg = 0xb4044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb4044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac1_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_slv_ahb_clk = {
+ .halt_reg = 0xb4020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb4020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_emac1_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x70000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x70000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x71000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x62000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0x1e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0x1f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x7d01c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = {
+ .halt_reg = 0x7d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_tcu_throttle_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_gpu_tcu_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xa9038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xa902c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa902c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xa9024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_aux_clk = {
+ .halt_reg = 0xa9030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xa9050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xa9040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipediv2_clk = {
+ .halt_reg = 0xa9048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xa901c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xa9018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7702c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x77050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipediv2_clk = {
+ .halt_reg = 0x77048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_en = {
+ .halt_reg = 0x9746c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9746c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_cfg_clk = {
+ .halt_reg = 0xb2034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pcie_throttle_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp1_ahb_clk = {
+ .halt_reg = 0xc7008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc7008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xc7008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_disp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp1_rot_ahb_clk = {
+ .halt_reg = 0xc700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_disp1_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_rot_ahb_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_disp_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x34008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3400c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcpu_ahb_clk = {
+ .halt_reg = 0x34010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qmip_video_vcpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2314c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x23280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x233b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x234e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x2361c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x23750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x23884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x24018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x24280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x243b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x244e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2461c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x24750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x24884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x2a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x2a00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x2a14c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x2a280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x2a3b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x2a4e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x2a61c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x2a750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x2a884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_2x_clk = {
+ .halt_reg = 0xc4018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap3_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_clk = {
+ .halt_reg = 0xc400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap3_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_qspi_clk = {
+ .halt_reg = 0xc4280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap3_qspi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_s0_clk = {
+ .halt_reg = 0xc414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap3_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap3_s0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x2a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_m_ahb_clk = {
+ .halt_reg = 0xc4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_3_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_s_ahb_clk = {
+ .halt_reg = 0xc4008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_qupv3_wrap_3_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x20044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sgmi_clkref_en = {
+ .halt_reg = 0x9c034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_sgmi_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tscss_ahb_clk = {
+ .halt_reg = 0x21024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_tscss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tscss_etu_clk = {
+ .halt_reg = 0x21020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_tscss_etu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tscss_global_cntr_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_tscss_global_cntr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_tscss_cntr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x81020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x81018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x8106c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8106c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x810a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x810a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x810a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x81028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x81028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x810c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x810c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x81024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x81024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x81064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x83020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x83018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x83018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x8306c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8306c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8306c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x8306c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8306c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8306c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x830a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x830a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830a4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x83028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x830c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x830c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x83024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x83064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x83064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83064,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x1c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x1c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x1c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1b020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x2f018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x2f024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x2f020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x1b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1b060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1b064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1b064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x2f05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x2f060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x2f064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2f064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_clkref_en = {
+ .halt_reg = 0x97468,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97468,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_usb_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x34014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x3401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3401c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0xa9004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x81004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x83004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb20_prim_gdsc = {
+ .gdscr = 0x1c004,
+ .pd = {
+ .name = "usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x1b004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x2f004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc emac0_gdsc = {
+ .gdscr = 0xb6004,
+ .pd = {
+ .name = "emac0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc emac1_gdsc = {
+ .gdscr = 0xb4004,
+ .pd = {
+ .name = "emac1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sa8775p_clocks[] = {
+ [GCC_AGGRE_NOC_QUPV3_AXI_CLK] = &gcc_aggre_noc_qupv3_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr,
+ [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr,
+ [GCC_AHB2PHY3_CLK] = &gcc_ahb2phy3_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP1_HF_AXI_CLK] = &gcc_disp1_hf_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EDP_REF_CLKREF_EN] = &gcc_edp_ref_clkref_en.clkr,
+ [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK] = &gcc_emac0_phy_aux_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK_SRC] = &gcc_emac0_phy_aux_clk_src.clkr,
+ [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr,
+ [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr,
+ [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr,
+ [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr,
+ [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr,
+ [GCC_EMAC1_AXI_CLK] = &gcc_emac1_axi_clk.clkr,
+ [GCC_EMAC1_PHY_AUX_CLK] = &gcc_emac1_phy_aux_clk.clkr,
+ [GCC_EMAC1_PHY_AUX_CLK_SRC] = &gcc_emac1_phy_aux_clk_src.clkr,
+ [GCC_EMAC1_PTP_CLK] = &gcc_emac1_ptp_clk.clkr,
+ [GCC_EMAC1_PTP_CLK_SRC] = &gcc_emac1_ptp_clk_src.clkr,
+ [GCC_EMAC1_RGMII_CLK] = &gcc_emac1_rgmii_clk.clkr,
+ [GCC_EMAC1_RGMII_CLK_SRC] = &gcc_emac1_rgmii_clk_src.clkr,
+ [GCC_EMAC1_SLV_AHB_CLK] = &gcc_emac1_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK] = &gcc_pcie_0_phy_aux_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK_SRC] = &gcc_pcie_0_phy_aux_clk_src.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV_CLK_SRC] = &gcc_pcie_0_pipe_div_clk_src.clkr,
+ [GCC_PCIE_0_PIPEDIV2_CLK] = &gcc_pcie_0_pipediv2_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV_CLK_SRC] = &gcc_pcie_1_pipe_div_clk_src.clkr,
+ [GCC_PCIE_1_PIPEDIV2_CLK] = &gcc_pcie_1_pipediv2_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr,
+ [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP1_AHB_CLK] = &gcc_qmip_disp1_ahb_clk.clkr,
+ [GCC_QMIP_DISP1_ROT_AHB_CLK] = &gcc_qmip_disp1_rot_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCPU_AHB_CLK] = &gcc_qmip_video_vcpu_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_CORE_2X_CLK] = &gcc_qupv3_wrap3_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP3_CORE_CLK] = &gcc_qupv3_wrap3_core_clk.clkr,
+ [GCC_QUPV3_WRAP3_QSPI_CLK] = &gcc_qupv3_wrap3_qspi_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK] = &gcc_qupv3_wrap3_s0_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK_SRC] = &gcc_qupv3_wrap3_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC] = &gcc_qupv3_wrap3_s0_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_M_AHB_CLK] = &gcc_qupv3_wrap_3_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_S_AHB_CLK] = &gcc_qupv3_wrap_3_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SGMI_CLKREF_EN] = &gcc_sgmi_clkref_en.clkr,
+ [GCC_TSCSS_AHB_CLK] = &gcc_tscss_ahb_clk.clkr,
+ [GCC_TSCSS_CNTR_CLK_SRC] = &gcc_tscss_cntr_clk_src.clkr,
+ [GCC_TSCSS_ETU_CLK] = &gcc_tscss_etu_clk.clkr,
+ [GCC_TSCSS_GLOBAL_CNTR_CLK] = &gcc_tscss_global_cntr_clk.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_card_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB_CLKREF_EN] = &gcc_usb_clkref_en.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sa8775p_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x32000 },
+ [GCC_DISPLAY1_BCR] = { 0xc7000 },
+ [GCC_DISPLAY_BCR] = { 0x33000 },
+ [GCC_EMAC0_BCR] = { 0xb6000 },
+ [GCC_EMAC1_BCR] = { 0xb4000 },
+ [GCC_GPU_BCR] = { 0x7d000 },
+ [GCC_MMSS_BCR] = { 0x17000 },
+ [GCC_PCIE_0_BCR] = { 0xa9000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbf000 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbf008 },
+ [GCC_PCIE_0_PHY_BCR] = { 0xad144 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbf00c },
+ [GCC_PCIE_1_BCR] = { 0x77000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0xae084 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0xae090 },
+ [GCC_PCIE_1_PHY_BCR] = { 0xae08c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0xae094 },
+ [GCC_PDM_BCR] = { 0x3f000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x23000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x24000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x2a000 },
+ [GCC_QUPV3_WRAPPER_3_BCR] = { 0xc4000 },
+ [GCC_SDCC1_BCR] = { 0x20000 },
+ [GCC_TSCSS_BCR] = { 0x21000 },
+ [GCC_UFS_CARD_BCR] = { 0x81000 },
+ [GCC_UFS_PHY_BCR] = { 0x83000 },
+ [GCC_USB20_PRIM_BCR] = { 0x1c000 },
+ [GCC_USB2_PHY_PRIM_BCR] = { 0x5c028 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x5c02c },
+ [GCC_USB30_PRIM_BCR] = { 0x1b000 },
+ [GCC_USB30_SEC_BCR] = { 0x2f000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x5c008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x5c014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x5c000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5c00c },
+ [GCC_USB3_PHY_TERT_BCR] = { 0x5c030 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x5c018 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5c01c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x5c004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5c010 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5c020 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x5c024 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x76000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x34014, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x3401c, 2 },
+ [GCC_VIDEO_BCR] = { 0x34000 },
+};
+
+static struct gdsc *gcc_sa8775p_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB20_PRIM_GDSC] = &usb20_prim_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [EMAC0_GDSC] = &emac0_gdsc,
+ [EMAC1_GDSC] = &emac1_gdsc,
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap3_s0_clk_src),
+};
+
+static const struct regmap_config gcc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7018,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sa8775p_desc = {
+ .config = &gcc_sa8775p_regmap_config,
+ .clks = gcc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sa8775p_clocks),
+ .resets = gcc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(gcc_sa8775p_resets),
+ .gdscs = gcc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sa8775p_gdscs),
+};
+
+static const struct of_device_id gcc_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sa8775p_match_table);
+
+static int gcc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sa8775p_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP1_AHB_CLK,
+ * GCC_DISP1_XO_CLK, GCC_DISP_AHB_CLK, GCC_DISP_XO_CLK,
+ * GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK.
+ */
+ regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x32020, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xc7004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xc7018, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x33004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x33018, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x7d004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x34004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x34024, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gcc_sa8775p_desc, regmap);
+}
+
+static struct platform_driver gcc_sa8775p_driver = {
+ .probe = gcc_sa8775p_probe,
+ .driver = {
+ .name = "sa8775p-gcc",
+ .of_match_table = gcc_sa8775p_match_table,
+ },
+};
+
+static int __init gcc_sa8775p_init(void)
+{
+ return platform_driver_register(&gcc_sa8775p_driver);
+}
+core_initcall(gcc_sa8775p_init);
+
+static void __exit gcc_sa8775p_exit(void)
+{
+ platform_driver_unregister(&gcc_sa8775p_driver);
+}
+module_exit(gcc_sa8775p_exit);
+
+MODULE_DESCRIPTION("Qualcomm SA8775P GCC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 2d3980251e78..cef3c77564cf 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -23,7 +23,6 @@
enum {
P_BI_TCXO,
- P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
P_GPLL1_OUT_MAIN,
@@ -162,21 +161,18 @@ static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_0[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct clk_parent_data gcc_parent_data_0_ao[] = {
{ .fw_name = "bi_tcxo_ao", .name = "bi_tcxo_ao" },
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_1[] = {
@@ -184,7 +180,6 @@ static const struct parent_map gcc_parent_map_1[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL6_OUT_MAIN, 2 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_1[] = {
@@ -192,7 +187,6 @@ static const struct clk_parent_data gcc_parent_data_1[] = {
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll6.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_2[] = {
@@ -201,7 +195,6 @@ static const struct parent_map gcc_parent_map_2[] = {
{ P_GPLL1_OUT_MAIN, 4 },
{ P_GPLL4_OUT_MAIN, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_2[] = {
@@ -210,19 +203,16 @@ static const struct clk_parent_data gcc_parent_data_2[] = {
{ .hw = &gpll1.clkr.hw },
{ .hw = &gpll4.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_3[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_4[] = {
@@ -230,7 +220,6 @@ static const struct parent_map gcc_parent_map_4[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_4[] = {
@@ -238,7 +227,6 @@ static const struct clk_parent_data gcc_parent_data_4[] = {
{ .hw = &gpll0.clkr.hw },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_5[] = {
@@ -246,7 +234,6 @@ static const struct parent_map gcc_parent_map_5[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL7_OUT_MAIN, 3 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_5[] = {
@@ -254,21 +241,18 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll7.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_6[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_6[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
};
static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
@@ -1987,8 +1971,7 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
+ .parent_hws = (const struct clk_hw*[]) {
&gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 46d41ebce2b0..1dc804154031 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -2760,9 +2760,8 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
- &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2810,9 +2809,8 @@ static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
- &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index b3198784e1c3..04a99dbaa57e 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -6873,6 +6873,22 @@ static struct gdsc usb30_sec_gdsc = {
.pwrsts = PWRSTS_RET_ON,
};
+static struct gdsc emac_0_gdsc = {
+ .gdscr = 0xaa004,
+ .pd = {
+ .name = "emac_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc emac_1_gdsc = {
+ .gdscr = 0xba004,
+ .pd = {
+ .name = "emac_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_sc8280xp_clocks[] = {
[GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie0_tunnel_axi_clk.clkr,
[GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie1_tunnel_axi_clk.clkr,
@@ -7351,6 +7367,8 @@ static struct gdsc *gcc_sc8280xp_gdscs[] = {
[USB30_MP_GDSC] = &usb30_mp_gdsc,
[USB30_PRIM_GDSC] = &usb30_prim_gdsc,
[USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [EMAC_0_GDSC] = &emac_0_gdsc,
+ [EMAC_1_GDSC] = &emac_1_gdsc,
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-sdx55.c b/drivers/clk/qcom/gcc-sdx55.c
index 4fca19006a77..d5e17122698c 100644
--- a/drivers/clk/qcom/gcc-sdx55.c
+++ b/drivers/clk/qcom/gcc-sdx55.c
@@ -22,7 +22,6 @@
enum {
P_BI_TCXO,
- P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
P_GPLL4_OUT_EVEN,
@@ -137,21 +136,18 @@ static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_0[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct clk_parent_data gcc_parents_0_ao[] = {
{ .fw_name = "bi_tcxo_ao" },
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_2[] = {
@@ -160,7 +156,6 @@ static const struct parent_map gcc_parent_map_2[] = {
{ P_GPLL4_OUT_EVEN, 2 },
{ P_GPLL5_OUT_MAIN, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_2[] = {
@@ -169,7 +164,6 @@ static const struct clk_parent_data gcc_parents_2[] = {
{ .hw = &gpll4_out_even.clkr.hw },
{ .hw = &gpll5.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_3[] = {
@@ -177,7 +171,6 @@ static const struct parent_map gcc_parent_map_3[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_3[] = {
@@ -185,19 +178,16 @@ static const struct clk_parent_data gcc_parents_3[] = {
{ .hw = &gpll0.clkr.hw },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_4[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_5[] = {
@@ -205,7 +195,6 @@ static const struct parent_map gcc_parent_map_5[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL4_OUT_EVEN, 2 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_5[] = {
@@ -213,7 +202,6 @@ static const struct clk_parent_data gcc_parents_5[] = {
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll4_out_even.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
@@ -232,7 +220,7 @@ static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -258,7 +246,7 @@ static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -272,7 +260,7 @@ static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -286,7 +274,7 @@ static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -300,7 +288,7 @@ static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -314,7 +302,7 @@ static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -328,7 +316,7 @@ static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -342,7 +330,7 @@ static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -386,7 +374,7 @@ static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -400,7 +388,7 @@ static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -414,7 +402,7 @@ static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -428,7 +416,7 @@ static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart4_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -450,7 +438,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
.parent_data = gcc_parents_0_ao,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0_ao),
.ops = &clk_rcg2_ops,
},
};
@@ -469,7 +457,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_rbcpr_clk_src",
.parent_data = gcc_parents_0_ao,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0_ao),
.ops = &clk_rcg2_ops,
},
};
@@ -493,7 +481,7 @@ static struct clk_rcg2 gcc_emac_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_emac_clk_src",
.parent_data = gcc_parents_5,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
.ops = &clk_rcg2_ops,
},
};
@@ -514,7 +502,7 @@ static struct clk_rcg2 gcc_emac_ptp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_emac_ptp_clk_src",
.parent_data = gcc_parents_2,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
.ops = &clk_rcg2_ops,
},
};
@@ -537,7 +525,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parents_3,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
.ops = &clk_rcg2_ops,
},
};
@@ -551,7 +539,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parents_3,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
.ops = &clk_rcg2_ops,
},
};
@@ -565,7 +553,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parents_3,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
.ops = &clk_rcg2_ops,
},
};
@@ -579,7 +567,7 @@ static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_aux_phy_clk_src",
.parent_data = gcc_parents_4,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
.ops = &clk_rcg2_ops,
},
};
@@ -598,7 +586,7 @@ static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_rchng_phy_clk_src",
.parent_data = gcc_parents_3,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
.ops = &clk_rcg2_ops,
},
};
@@ -619,7 +607,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -633,7 +621,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -652,7 +640,7 @@ static struct clk_rcg2 gcc_usb30_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -671,7 +659,7 @@ static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.ops = &clk_rcg2_ops,
},
};
@@ -691,7 +679,7 @@ static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk_src",
.parent_data = gcc_parents_4,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sdx65.c b/drivers/clk/qcom/gcc-sdx65.c
index 748ac15b5ed8..b0c17043551d 100644
--- a/drivers/clk/qcom/gcc-sdx65.c
+++ b/drivers/clk/qcom/gcc-sdx65.c
@@ -634,8 +634,8 @@ static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "gcc_cpuss_ahb_postdiv_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -649,8 +649,8 @@ static struct clk_regmap_div gcc_usb30_mock_utmi_postdiv_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "gcc_usb30_mock_utmi_postdiv_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_mock_utmi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -692,8 +692,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -710,8 +710,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -728,8 +728,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -746,8 +746,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -764,8 +764,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -782,8 +782,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -800,8 +800,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -818,8 +818,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -849,8 +849,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_uart1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -867,8 +867,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_uart2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -885,8 +885,8 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_uart3_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -903,8 +903,8 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart4_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_blsp1_uart4_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_blsp1_uart4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -936,8 +936,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -954,8 +954,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -972,8 +972,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1017,8 +1017,8 @@ static struct clk_branch gcc_pcie_aux_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1073,8 +1073,8 @@ static struct clk_branch gcc_pcie_pipe_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1093,8 +1093,8 @@ static struct clk_branch gcc_pcie_rchng_phy_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_rchng_phy_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_rchng_phy_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_rchng_phy_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1113,8 +1113,8 @@ static struct clk_branch gcc_pcie_sleep_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_sleep_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_aux_phy_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_phy_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1161,8 +1161,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1233,8 +1233,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1251,8 +1251,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1269,9 +1269,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
- &gcc_usb30_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1327,8 +1326,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1369,8 +1368,8 @@ static struct clk_branch gcc_usb3_phy_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_phy_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index 565f9912039f..5f09aefa7fb9 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -694,7 +694,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -715,7 +715,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
.parent_data = gcc_parents_9,
.num_parents = ARRAY_SIZE(gcc_parents_9),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -738,7 +738,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -753,7 +753,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -768,7 +768,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -790,7 +790,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -805,7 +805,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -820,7 +820,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -835,7 +835,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -857,7 +857,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -881,7 +881,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -916,7 +916,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -941,7 +941,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -956,7 +956,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -971,7 +971,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -986,7 +986,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1001,7 +1001,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1024,7 +1024,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
.parent_data = gcc_parents_10,
.num_parents = ARRAY_SIZE(gcc_parents_10),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1046,7 +1046,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1116,7 +1116,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1258,7 +1258,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1305,7 +1305,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parents_11,
.num_parents = ARRAY_SIZE(gcc_parents_11),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
.flags = CLK_OPS_PARENT_ENABLE,
},
};
@@ -1329,7 +1329,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.name = "gcc_ufs_phy_axi_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1351,7 +1351,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
.name = "gcc_ufs_phy_ice_core_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1392,7 +1392,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.name = "gcc_ufs_phy_unipro_core_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1414,7 +1414,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.name = "gcc_usb30_prim_master_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1483,7 +1483,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
.parent_data = gcc_parents_13,
.num_parents = ARRAY_SIZE(gcc_parents_13),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm6375.c b/drivers/clk/qcom/gcc-sm6375.c
index 89a1cc90b145..417a0fd242ec 100644
--- a/drivers/clk/qcom/gcc-sm6375.c
+++ b/drivers/clk/qcom/gcc-sm6375.c
@@ -1766,8 +1766,8 @@ static struct clk_branch gcc_camss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1784,8 +1784,8 @@ static struct clk_branch gcc_camss_cci_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_cci_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_cci_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1802,8 +1802,8 @@ static struct clk_branch gcc_camss_cci_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_cci_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_cci_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1820,8 +1820,8 @@ static struct clk_branch gcc_camss_cphy_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cphy_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1838,8 +1838,8 @@ static struct clk_branch gcc_camss_cphy_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cphy_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1856,8 +1856,8 @@ static struct clk_branch gcc_camss_cphy_2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cphy_2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1874,8 +1874,8 @@ static struct clk_branch gcc_camss_cphy_3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cphy_3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1892,8 +1892,8 @@ static struct clk_branch gcc_camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_csi0phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1910,8 +1910,8 @@ static struct clk_branch gcc_camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_csi1phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1928,8 +1928,8 @@ static struct clk_branch gcc_camss_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi2phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_csi2phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_csi2phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1946,8 +1946,8 @@ static struct clk_branch gcc_camss_csi3phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi3phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_csi3phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_csi3phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1964,8 +1964,8 @@ static struct clk_branch gcc_camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1982,8 +1982,8 @@ static struct clk_branch gcc_camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_mclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2000,8 +2000,8 @@ static struct clk_branch gcc_camss_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_mclk2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_mclk2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,8 +2018,8 @@ static struct clk_branch gcc_camss_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_mclk3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_mclk3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2036,8 +2036,8 @@ static struct clk_branch gcc_camss_mclk4_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_mclk4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_mclk4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2067,8 +2067,8 @@ static struct clk_branch gcc_camss_ope_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ope_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_ope_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_ope_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2085,8 +2085,8 @@ static struct clk_branch gcc_camss_ope_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ope_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_ope_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_ope_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2116,8 +2116,8 @@ static struct clk_branch gcc_camss_tfe_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2134,8 +2134,8 @@ static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_0_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2152,8 +2152,8 @@ static struct clk_branch gcc_camss_tfe_0_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_0_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_0_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_0_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2170,8 +2170,8 @@ static struct clk_branch gcc_camss_tfe_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2188,8 +2188,8 @@ static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_1_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2206,8 +2206,8 @@ static struct clk_branch gcc_camss_tfe_1_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_1_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_1_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_1_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2224,8 +2224,8 @@ static struct clk_branch gcc_camss_tfe_2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2242,8 +2242,8 @@ static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_2_cphy_rx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2260,8 +2260,8 @@ static struct clk_branch gcc_camss_tfe_2_csid_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_tfe_2_csid_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_tfe_2_csid_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_tfe_2_csid_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2278,8 +2278,8 @@ static struct clk_branch gcc_camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_top_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_camss_top_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_camss_top_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2298,8 +2298,8 @@ static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2330,8 +2330,9 @@ static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "gcc_disp_gpll0_clk_src",
- .parent_names =
- (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
@@ -2344,8 +2345,8 @@ static struct clk_branch gcc_disp_gpll0_div_clk_src = {
.enable_mask = BIT(20),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_gpll0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_disp_gpll0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_disp_gpll0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2407,8 +2408,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2425,8 +2426,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2443,8 +2444,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2476,8 +2477,8 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2493,8 +2494,8 @@ static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gpll0_out_even.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_even.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2554,8 +2555,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2716,8 +2717,8 @@ static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2734,8 +2735,8 @@ static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2752,8 +2753,8 @@ static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2770,8 +2771,8 @@ static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2788,8 +2789,8 @@ static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2806,8 +2807,8 @@ static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2850,8 +2851,8 @@ static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
.enable_mask = BIT(21),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2868,8 +2869,8 @@ static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2886,8 +2887,8 @@ static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
.enable_mask = BIT(23),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2904,8 +2905,8 @@ static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
.enable_mask = BIT(24),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2922,8 +2923,8 @@ static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
.enable_mask = BIT(25),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2940,8 +2941,8 @@ static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
.enable_mask = BIT(26),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3031,8 +3032,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3051,8 +3052,8 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ice_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3082,8 +3083,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3102,8 +3103,8 @@ static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
@@ -3120,8 +3121,8 @@ static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3140,8 +3141,8 @@ static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3175,8 +3176,8 @@ static struct clk_branch gcc_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3195,8 +3196,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3215,8 +3216,8 @@ static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3261,8 +3262,8 @@ static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3279,8 +3280,8 @@ static struct clk_branch gcc_usb30_prim_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_master_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3297,8 +3298,8 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3367,8 +3368,8 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_com_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3487,8 +3488,8 @@ static struct clk_branch gcc_video_vcodec0_sys_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_vcodec0_sys_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_video_venus_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_video_venus_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3505,8 +3506,8 @@ static struct clk_branch gcc_video_venus_ctl_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_venus_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_video_venus_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_video_venus_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3533,7 +3534,8 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ /* TODO: Change to OFF_ON when USB drivers get proper suspend support */
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_phy_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm7150.c b/drivers/clk/qcom/gcc-sm7150.c
new file mode 100644
index 000000000000..6b628178f62c
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm7150.c
@@ -0,0 +1,3048 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Danila Tikhonov <danila@jiaxyga.com>
+ * Copyright (c) 2023, David Wronek <davidwronek@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm7150-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_pll0_main_div_cdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pll0_main_div_cdiv",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x27000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_data = gcc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(208000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_prim_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_prim_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb3_prim_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_aggre_ufs_phy_axi_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vsensor_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0xb06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_rbcpr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x4452c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4452c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0xb070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vsensor_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_tsif_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77038,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77094,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x77094,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77094,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vsensor_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vsensor_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vsensor_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_vs_ctrl_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sm7150_hws[] = {
+ [GCC_GPLL0_MAIN_DIV_CDIV] = &gcc_pll0_main_div_cdiv.hw,
+};
+
+static struct clk_regmap *gcc_sm7150_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] =
+ &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] =
+ &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] =
+ &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL7] = &gpll7.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm7150_resets[] = {
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x26000 },
+ [GCC_VIDEO_AXI_CLK_BCR] = { 0xb01c, 2 },
+};
+
+static const struct clk_rcg_dfs_data gcc_sm7150_dfs_desc[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+};
+
+static struct gdsc *gcc_sm7150_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sm7150_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1820b0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm7150_desc = {
+ .config = &gcc_sm7150_regmap_config,
+ .clk_hws = gcc_sm7150_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_sm7150_hws),
+ .clks = gcc_sm7150_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm7150_clocks),
+ .resets = gcc_sm7150_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm7150_resets),
+ .gdscs = gcc_sm7150_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm7150_gdscs),
+};
+
+static const struct of_device_id gcc_sm7150_match_table[] = {
+ { .compatible = "qcom,sm7150-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm7150_match_table);
+
+static int gcc_sm7150_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm7150_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Disable the GPLL0 active input to MM blocks, NPU
+ * and GPU via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /*
+ * Keep the critical clocks always-ON
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_VIDEO_XO_CLK,
+ * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_sm7150_dfs_desc,
+ ARRAY_SIZE(gcc_sm7150_dfs_desc));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sm7150_desc, regmap);
+}
+
+static struct platform_driver gcc_sm7150_driver = {
+ .probe = gcc_sm7150_probe,
+ .driver = {
+ .name = "gcc-sm7150",
+ .of_match_table = gcc_sm7150_match_table,
+ },
+};
+
+static int __init gcc_sm7150_init(void)
+{
+ return platform_driver_register(&gcc_sm7150_driver);
+}
+subsys_initcall(gcc_sm7150_init);
+
+static void __exit gcc_sm7150_exit(void)
+{
+ platform_driver_unregister(&gcc_sm7150_driver);
+}
+module_exit(gcc_sm7150_exit);
+
+MODULE_DESCRIPTION("Qualcomm SM7150 Global Clock Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 09cf827addab..70b067f3618c 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -26,7 +26,6 @@
enum {
P_BI_TCXO,
P_AUD_REF_CLK,
- P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
P_GPLL7_OUT_MAIN,
@@ -117,14 +116,12 @@ static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_0[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_1[] = {
@@ -132,7 +129,6 @@ static const struct parent_map gcc_parent_map_1[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_1[] = {
@@ -140,41 +136,34 @@ static const struct clk_parent_data gcc_parents_1[] = {
{ .hw = &gpll0.clkr.hw },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_2[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_3[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_3[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
{ .hw = &gpll0.clkr.hw },
- { .fw_name = "core_bi_pll_test_se"},
};
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_4[] = {
{ .fw_name = "bi_tcxo", .name = "bi_tcxo" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_5[] = {
@@ -182,7 +171,6 @@ static const struct parent_map gcc_parent_map_5[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL7_OUT_MAIN, 3 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_5[] = {
@@ -190,7 +178,6 @@ static const struct clk_parent_data gcc_parents_5[] = {
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll7.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_6[] = {
@@ -198,7 +185,6 @@ static const struct parent_map gcc_parent_map_6[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL9_OUT_MAIN, 2 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_6[] = {
@@ -206,7 +192,6 @@ static const struct clk_parent_data gcc_parents_6[] = {
{ .hw = &gpll0.clkr.hw },
{ .hw = &gpll9.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_7[] = {
@@ -214,7 +199,6 @@ static const struct parent_map gcc_parent_map_7[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_AUD_REF_CLK, 2 },
{ P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parents_7[] = {
@@ -222,7 +206,6 @@ static const struct clk_parent_data gcc_parents_7[] = {
{ .hw = &gpll0.clkr.hw },
{ .fw_name = "aud_ref_clk", .name = "aud_ref_clk" },
{ .hw = &gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index a0ba37656b07..b6cf4bc88d4d 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -2998,9 +2998,8 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
- &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3048,9 +3047,8 @@ static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw =
- &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
index c3731f96c8e6..1385a98eb3bb 100644
--- a/drivers/clk/qcom/gcc-sm8350.c
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -17,12 +17,12 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "gdsc.h"
#include "reset.h"
enum {
P_BI_TCXO,
- P_CORE_BI_PLL_TEST_SE,
P_GCC_GPLL0_OUT_EVEN,
P_GCC_GPLL0_OUT_MAIN,
P_GCC_GPLL4_OUT_MAIN,
@@ -119,14 +119,12 @@ static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
{ P_GCC_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_0[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &gcc_gpll0.clkr.hw },
{ .hw = &gcc_gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_1[] = {
@@ -134,7 +132,6 @@ static const struct parent_map gcc_parent_map_1[] = {
{ P_GCC_GPLL0_OUT_MAIN, 1 },
{ P_SLEEP_CLK, 5 },
{ P_GCC_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_1[] = {
@@ -142,49 +139,24 @@ static const struct clk_parent_data gcc_parent_data_1[] = {
{ .hw = &gcc_gpll0.clkr.hw },
{ .fw_name = "sleep_clk" },
{ .hw = &gcc_gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_2[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_3[] = {
{ P_BI_TCXO, 0 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
- { .fw_name = "core_bi_pll_test_se" },
-};
-
-static const struct parent_map gcc_parent_map_4[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_4[] = {
- { .fw_name = "pcie_0_pipe_clk", },
- { .fw_name = "bi_tcxo" },
-};
-
-static const struct parent_map gcc_parent_map_5[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_5[] = {
- { .fw_name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
};
static const struct parent_map gcc_parent_map_6[] = {
@@ -193,7 +165,6 @@ static const struct parent_map gcc_parent_map_6[] = {
{ P_GCC_GPLL9_OUT_MAIN, 2 },
{ P_GCC_GPLL4_OUT_MAIN, 5 },
{ P_GCC_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
};
static const struct clk_parent_data gcc_parent_data_6[] = {
@@ -202,7 +173,6 @@ static const struct clk_parent_data gcc_parent_data_6[] = {
{ .hw = &gcc_gpll9.clkr.hw },
{ .hw = &gcc_gpll4.clkr.hw },
{ .hw = &gcc_gpll0_out_even.clkr.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map gcc_parent_map_7[] = {
@@ -267,54 +237,48 @@ static const struct clk_parent_data gcc_parent_data_12[] = {
static const struct parent_map gcc_parent_map_13[] = {
{ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
- { P_CORE_BI_PLL_TEST_SE, 1 },
{ P_BI_TCXO, 2 },
};
static const struct clk_parent_data gcc_parent_data_13[] = {
{ .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
- { .fw_name = "core_bi_pll_test_se" },
{ .fw_name = "bi_tcxo" },
};
static const struct parent_map gcc_parent_map_14[] = {
{ P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 },
- { P_CORE_BI_PLL_TEST_SE, 1 },
{ P_BI_TCXO, 2 },
};
static const struct clk_parent_data gcc_parent_data_14[] = {
{ .fw_name = "usb3_uni_phy_sec_gcc_usb30_pipe_clk" },
- { .fw_name = "core_bi_pll_test_se" },
{ .fw_name = "bi_tcxo" },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x6b054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_4,
- .num_parents = ARRAY_SIZE(gcc_parent_data_4),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x8d054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_5,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_5,
- .num_parents = ARRAY_SIZE(gcc_parent_data_5),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 666efa5ff978..84764cc3db4f 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -66,8 +66,8 @@ static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gpll0_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
@@ -1070,8 +1070,8 @@ static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1119,8 +1119,8 @@ static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1139,8 +1139,8 @@ static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1159,8 +1159,8 @@ static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1239,8 +1239,8 @@ static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1330,8 +1330,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1348,8 +1348,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1366,8 +1366,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1383,8 +1383,8 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1400,8 +1400,8 @@ static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0_out_even.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1446,8 +1446,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1505,8 +1505,8 @@ static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_phy_rchng_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1523,8 +1523,8 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1569,8 +1569,8 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
.enable_mask = BIT(29),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1628,8 +1628,8 @@ static struct clk_branch gcc_pcie_1_phy_aux_clk = {
.enable_mask = BIT(24),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1646,8 +1646,8 @@ static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
.enable_mask = BIT(23),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_phy_rchng_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1664,8 +1664,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1710,8 +1710,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1917,8 +1917,8 @@ static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1935,8 +1935,8 @@ static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1953,8 +1953,8 @@ static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1971,8 +1971,8 @@ static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1989,8 +1989,8 @@ static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2007,8 +2007,8 @@ static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2025,8 +2025,8 @@ static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2043,8 +2043,8 @@ static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s7_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2087,8 +2087,8 @@ static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2105,8 +2105,8 @@ static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
.enable_mask = BIT(23),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2123,8 +2123,8 @@ static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
.enable_mask = BIT(24),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2141,8 +2141,8 @@ static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
.enable_mask = BIT(25),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2159,8 +2159,8 @@ static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
.enable_mask = BIT(26),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2177,8 +2177,8 @@ static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
.enable_mask = BIT(27),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2195,8 +2195,8 @@ static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
.enable_mask = BIT(28),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2239,8 +2239,8 @@ static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2257,8 +2257,8 @@ static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2275,8 +2275,8 @@ static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2293,8 +2293,8 @@ static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2311,8 +2311,8 @@ static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2329,8 +2329,8 @@ static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2347,8 +2347,8 @@ static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2468,8 +2468,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2514,8 +2514,8 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2577,8 +2577,8 @@ static struct clk_branch gcc_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2597,8 +2597,8 @@ static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2617,8 +2617,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2637,8 +2637,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2657,8 +2657,8 @@ static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2677,8 +2677,8 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2695,8 +2695,8 @@ static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_rx_symbol_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2713,8 +2713,8 @@ static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_rx_symbol_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2731,8 +2731,8 @@ static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_tx_symbol_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2751,8 +2751,8 @@ static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2771,8 +2771,8 @@ static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2789,8 +2789,8 @@ static struct clk_branch gcc_usb30_prim_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_master_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2807,8 +2807,8 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2851,8 +2851,8 @@ static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2869,8 +2869,8 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_com_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2889,8 +2889,8 @@ static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 81d630c666d1..277cd4f020ff 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -82,8 +82,8 @@ static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gpll0_out_even",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
@@ -1198,8 +1198,8 @@ static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1232,8 +1232,8 @@ static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1252,8 +1252,8 @@ static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1272,8 +1272,8 @@ static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1352,8 +1352,8 @@ static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_prim_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1430,8 +1430,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1448,8 +1448,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1466,8 +1466,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1483,8 +1483,8 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1500,8 +1500,8 @@ static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_div_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_gpll0_out_even.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1546,8 +1546,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1594,8 +1594,8 @@ static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_phy_rchng_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1612,8 +1612,8 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1658,8 +1658,8 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
.enable_mask = BIT(29),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1706,8 +1706,8 @@ static struct clk_branch gcc_pcie_1_phy_aux_clk = {
.enable_mask = BIT(24),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1724,8 +1724,8 @@ static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
.enable_mask = BIT(23),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_phy_rchng_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1742,8 +1742,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1788,8 +1788,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1982,8 +1982,8 @@ static struct clk_branch gcc_qupv3_i2c_s0_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2000,8 +2000,8 @@ static struct clk_branch gcc_qupv3_i2c_s1_clk = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,8 +2018,8 @@ static struct clk_branch gcc_qupv3_i2c_s2_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2036,8 +2036,8 @@ static struct clk_branch gcc_qupv3_i2c_s3_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2054,8 +2054,8 @@ static struct clk_branch gcc_qupv3_i2c_s4_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2072,8 +2072,8 @@ static struct clk_branch gcc_qupv3_i2c_s5_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2090,8 +2090,8 @@ static struct clk_branch gcc_qupv3_i2c_s6_clk = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2108,8 +2108,8 @@ static struct clk_branch gcc_qupv3_i2c_s7_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s7_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2126,8 +2126,8 @@ static struct clk_branch gcc_qupv3_i2c_s8_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s8_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2144,8 +2144,8 @@ static struct clk_branch gcc_qupv3_i2c_s9_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_i2c_s9_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2203,8 +2203,8 @@ static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2221,8 +2221,8 @@ static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
.enable_mask = BIT(23),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2239,8 +2239,8 @@ static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
.enable_mask = BIT(24),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2257,8 +2257,8 @@ static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
.enable_mask = BIT(25),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2275,8 +2275,8 @@ static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
.enable_mask = BIT(26),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2293,8 +2293,8 @@ static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
.enable_mask = BIT(27),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2311,8 +2311,8 @@ static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
.enable_mask = BIT(28),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2329,8 +2329,8 @@ static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s7_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2373,8 +2373,8 @@ static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2391,8 +2391,8 @@ static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2409,8 +2409,8 @@ static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2427,8 +2427,8 @@ static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2445,8 +2445,8 @@ static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s4_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2463,8 +2463,8 @@ static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s5_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2481,8 +2481,8 @@ static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s6_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2499,8 +2499,8 @@ static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s7_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2590,8 +2590,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2621,8 +2621,8 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2656,8 +2656,8 @@ static struct clk_branch gcc_ufs_phy_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2676,8 +2676,8 @@ static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2696,8 +2696,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2716,8 +2716,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2736,8 +2736,8 @@ static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2756,8 +2756,8 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2774,8 +2774,8 @@ static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_rx_symbol_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2792,8 +2792,8 @@ static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_rx_symbol_1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2810,8 +2810,8 @@ static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_tx_symbol_0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2830,8 +2830,8 @@ static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2850,8 +2850,8 @@ static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2868,8 +2868,8 @@ static struct clk_branch gcc_usb30_prim_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_master_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2886,8 +2886,8 @@ static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2917,8 +2917,8 @@ static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2935,8 +2935,8 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_com_aux_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2955,8 +2955,8 @@ static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_pipe_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 9e4d6ce891aa..5358e28122ab 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -136,7 +136,8 @@ static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
return 0;
}
-static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
+static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status,
+ bool wait)
{
int ret;
@@ -149,7 +150,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
/* If disabling votable gdscs, don't poll on status */
- if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
+ if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) {
/*
* Add a short delay here to ensure that an enable
* right after it was disabled does not put it in an
@@ -275,7 +276,7 @@ static int gdsc_enable(struct generic_pm_domain *domain)
gdsc_deassert_clamp_io(sc);
}
- ret = gdsc_toggle_logic(sc, GDSC_ON);
+ ret = gdsc_toggle_logic(sc, GDSC_ON, false);
if (ret)
return ret;
@@ -352,7 +353,7 @@ static int gdsc_disable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_RET_ON)
return 0;
- ret = gdsc_toggle_logic(sc, GDSC_OFF);
+ ret = gdsc_toggle_logic(sc, GDSC_OFF, domain->synced_poweroff);
if (ret)
return ret;
@@ -392,7 +393,7 @@ static int gdsc_init(struct gdsc *sc)
/* Force gdsc ON if only ON state is supported */
if (sc->pwrsts == PWRSTS_ON) {
- ret = gdsc_toggle_logic(sc, GDSC_ON);
+ ret = gdsc_toggle_logic(sc, GDSC_ON, false);
if (ret)
return ret;
}
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index a925ac90018d..f929e0f2333f 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -106,9 +106,9 @@ static const struct parent_map gpu_xo_gpupll0_map[] = {
{ P_GPUPLL0_OUT_EVEN, 1 },
};
-static const struct clk_parent_data gpu_xo_gpupll0[] = {
- { .hw = &gpucc_cxo_clk.clkr.hw },
- { .hw = &gpupll0_out_even.clkr.hw },
+static const struct clk_hw *gpu_xo_gpupll0[] = {
+ &gpucc_cxo_clk.clkr.hw,
+ &gpupll0_out_even.clkr.hw,
};
static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
@@ -142,7 +142,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
.freq_tbl = ftbl_gfx3d_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_data = gpu_xo_gpupll0,
+ .parent_hws = gpu_xo_gpupll0,
.num_parents = ARRAY_SIZE(gpu_xo_gpupll0),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
new file mode 100644
index 000000000000..18d23be8d435
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+ DT_GCC_GPU_GPLL0_DIV_CLK_SRC,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct clk_parent_data parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 810MHz configuration */
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x2a,
+ .alpha = 0x3000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 1000MHz configuration */
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x34,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = ftbl_gpu_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x9430,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cb_clk = {
+ .halt_reg = 0x93a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x9130,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_demet_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_demet_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_demet_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_CLK] = &gpu_cc_demet_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x905c,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = AON_RESET | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *gpu_cc_sa8775p_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_sa8775p_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x9104 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPUCC_GPU_CC_FF_BCR] = { 0x9470 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sa8775p_desc = {
+ .config = &gpu_cc_sa8775p_regmap_config,
+ .clks = gpu_cc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sa8775p_clocks),
+ .resets = gpu_cc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sa8775p_resets),
+ .gdscs = gpu_cc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sa8775p_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sa8775p_match_table);
+
+static int gpu_cc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sa8775p_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sa8775p_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sa8775p_driver = {
+ .probe = gpu_cc_sa8775p_probe,
+ .driver = {
+ .name = "gpu_cc-sa8775p",
+ .of_match_table = gpu_cc_sa8775p_match_table,
+ },
+};
+
+static int __init gpu_cc_sa8775p_init(void)
+{
+ return platform_driver_register(&gpu_cc_sa8775p_driver);
+}
+subsys_initcall(gpu_cc_sa8775p_init);
+
+static void __exit gpu_cc_sa8775p_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sa8775p_driver);
+}
+module_exit(gpu_cc_sa8775p_exit);
+
+MODULE_DESCRIPTION("SA8775P GPUCC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index d738251cba17..3f92f0b43be6 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -21,8 +21,6 @@
#define CX_GMU_CBCR_SLEEP_SHIFT 4
#define CX_GMU_CBCR_WAKE_MASK 0xF
#define CX_GMU_CBCR_WAKE_SHIFT 8
-#define CLK_DIS_WAIT_SHIFT 12
-#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
enum {
P_BI_TCXO,
@@ -108,8 +106,8 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_gmu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -160,6 +158,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
static struct gdsc cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .clk_dis_wait_val = 8,
.pd = {
.name = "cx_gdsc",
},
@@ -242,10 +241,6 @@ static int gpu_cc_sc7180_probe(struct platform_device *pdev)
value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
regmap_update_bits(regmap, 0x1098, mask, value);
- /* Configure clk_dis_wait for gpu_cx_gdsc */
- regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
- 8 << CLK_DIS_WAIT_SHIFT);
-
return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 110b54401bc6..970d7414bdf0 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -22,8 +22,6 @@
#define CX_GMU_CBCR_SLEEP_SHIFT 4
#define CX_GMU_CBCR_WAKE_MASK 0xf
#define CX_GMU_CBCR_WAKE_SHIFT 8
-#define CLK_DIS_WAIT_SHIFT 12
-#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
enum {
P_BI_TCXO,
@@ -121,6 +119,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -193,10 +192,6 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev)
value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
regmap_update_bits(regmap, 0x1098, mask, value);
- /* Configure clk_dis_wait for gpu_cx_gdsc */
- regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
- 8 << CLK_DIS_WAIT_SHIFT);
-
return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sm6115.c b/drivers/clk/qcom/gpucc-sm6115.c
new file mode 100644
index 000000000000..c84727e8352d
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm6115.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+ DT_GCC_GPU_GPLL0_DIV_CLK_SRC,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco default_vco[] = {
+ { 1000000000, 2000000000, 0 },
+};
+
+static struct pll_vco pll1_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0,
+ .alpha_hi = 0x80,
+ .vco_val = 0x0 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .alpha_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .aux2_output_mask = BIT(2),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi1_val = 0x1,
+};
+
+/* 1200MHz configuration */
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_aux2[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpu_cc_pll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0_out_aux2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 640MHz configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x21,
+ .alpha = 0x55555555,
+ .alpha_hi = 0x55,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = pll1_vco,
+ .num_vco = ARRAY_SIZE(pll1_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll1_out_aux[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
+ .offset = 0x100,
+ .post_div_shift = 15,
+ .post_div_table = post_div_table_gpu_cc_pll1_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll1_out_aux),
+ .width = 3,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1_out_aux",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = P_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = P_BI_TCXO },
+ { .hw = &gpu_cc_pll0_out_aux2.clkr.hw },
+ { .hw = &gpu_cc_pll1_out_aux.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(320000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(465000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(600000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(820000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(900000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(950000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(980000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+ .halt_reg = 0x1060,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_cxo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .resets = (unsigned int []){ GPU_GX_BCR },
+ .reset_count = 1,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ },
+ .parent = &gpu_cx_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | SW_RESET | VOTABLE,
+};
+
+static struct clk_regmap *gpu_cc_sm6115_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_PLL1_OUT_AUX] = &gpu_cc_pll1_out_aux.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm6115_resets[] = {
+ [GPU_GX_BCR] = { 0x1008 },
+};
+
+static struct gdsc *gpu_cc_sm6115_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm6115_desc = {
+ .config = &gpu_cc_sm6115_regmap_config,
+ .clks = gpu_cc_sm6115_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm6115_clocks),
+ .resets = gpu_cc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm6115_resets),
+ .gdscs = gpu_cc_sm6115_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm6115_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm6115_match_table[] = {
+ { .compatible = "qcom,sm6115-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm6115_match_table);
+
+static int gpu_cc_sm6115_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm6115_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_alpha_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Set recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ qcom_branch_set_wakeup(regmap, gpu_cc_cx_gmu_clk, 0xf);
+ qcom_branch_set_sleep(regmap, gpu_cc_cx_gmu_clk, 0xf);
+
+ qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
+ qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm6115_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm6115_driver = {
+ .probe = gpu_cc_sm6115_probe,
+ .driver = {
+ .name = "sm6115-gpucc",
+ .of_match_table = gpu_cc_sm6115_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sm6115_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM6115 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sm6125.c b/drivers/clk/qcom/gpucc-sm6125.c
new file mode 100644
index 000000000000..d4f1296a48ef
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm6125.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6125-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_2X_CLK,
+ P_GPU_CC_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_GPU_CC_PLL1_OUT_AUX2,
+};
+
+static struct pll_vco gpu_cc_pll_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 500000000, 1000000000, 2 },
+};
+
+/* 1020MHz configuration */
+static const struct alpha_pll_config gpu_pll0_config = {
+ .l = 0x35,
+ .config_ctl_val = 0x4001055b,
+ .alpha_hi = 0x20,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0 << 20,
+ .vco_mask = 0x3 << 20,
+ .aux2_output_mask = BIT(2),
+};
+
+/* 930MHz configuration */
+static const struct alpha_pll_config gpu_pll1_config = {
+ .l = 0x30,
+ .config_ctl_val = 0x4001055b,
+ .alpha_hi = 0x70,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll0_out_aux2 = {
+ .offset = 0x0,
+ .vco_table = gpu_cc_pll_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0_out_aux2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpu_cc_pll1_out_aux2 = {
+ .offset = 0x100,
+ .vco_table = gpu_cc_pll_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1_out_aux2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX2, 4 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0_out_aux2.clkr.hw },
+ { .hw = &gpu_cc_pll1_out_aux2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(320000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0),
+ F(465000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0),
+ F(600000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(820000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(900000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(950000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gpu_cc_sm6125_clocks[] = {
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr,
+ [GPU_CC_PLL1_OUT_AUX2] = &gpu_cc_pll1_out_aux2.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static struct gdsc *gpucc_sm6125_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm6125_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm6125_desc = {
+ .config = &gpu_cc_sm6125_regmap_config,
+ .clks = gpu_cc_sm6125_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm6125_clocks),
+ .gdscs = gpucc_sm6125_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_sm6125_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm6125_match_table[] = {
+ { .compatible = "qcom,sm6125-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm6125_match_table);
+
+static int gpu_cc_sm6125_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm6125_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpu_cc_pll0_out_aux2, regmap, &gpu_pll0_config);
+ clk_alpha_pll_configure(&gpu_cc_pll1_out_aux2, regmap, &gpu_pll1_config);
+
+ /* Set recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ qcom_branch_set_wakeup(regmap, gpu_cc_cx_gmu_clk, 0xf);
+ qcom_branch_set_sleep(regmap, gpu_cc_cx_gmu_clk, 0xf);
+
+ qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
+ qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm6125_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm6125_driver = {
+ .probe = gpu_cc_sm6125_probe,
+ .driver = {
+ .name = "gpucc-sm6125",
+ .of_match_table = gpu_cc_sm6125_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sm6125_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC SM6125 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sm6375.c b/drivers/clk/qcom/gpucc-sm6375.c
new file mode 100644
index 000000000000..d3620344a009
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm6375.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6375-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+ DT_GCC_GPU_GPLL0_DIV_CLK_SRC,
+ DT_GCC_GPU_SNOC_DVM_GFX_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPU_GPLL0_CLK_SRC,
+ P_GCC_GPU_GPLL0_DIV_CLK_SRC,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+ P_GPU_CC_PLL1_OUT_EVEN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 532MHz Configuration */
+static const struct alpha_pll_config gpucc_pll0_config = {
+ .l = 0x1b,
+ .alpha = 0xb555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpucc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = P_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+/* 514MHz Configuration */
+static const struct alpha_pll_config gpucc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xc555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpucc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = P_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gpucc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_0[] = {
+ { .index = P_BI_TCXO },
+ { .hw = &gpucc_pll0.clkr.hw },
+ { .hw = &gpucc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpucc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 1 },
+ { P_GPU_CC_PLL0_OUT_ODD, 2 },
+ { P_GPU_CC_PLL1_OUT_EVEN, 3 },
+ { P_GPU_CC_PLL1_OUT_ODD, 4 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_1[] = {
+ { .index = P_BI_TCXO },
+ { .hw = &gpucc_pll0.clkr.hw },
+ { .hw = &gpucc_pll0.clkr.hw },
+ { .hw = &gpucc_pll1.clkr.hw },
+ { .hw = &gpucc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+};
+
+static const struct freq_tbl ftbl_gpucc_gmu_clk_src[] = {
+ F(200000000, P_GCC_GPU_GPLL0_DIV_CLK_SRC, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpucc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_gpucc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpucc_gmu_clk_src",
+ .parent_data = gpucc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpucc_gx_gfx3d_clk_src[] = {
+ F(266000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(390000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(490000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(650000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(770000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(840000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(900000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpucc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_1,
+ .freq_tbl = ftbl_gpucc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpucc_gx_gfx3d_clk_src",
+ .parent_data = gpucc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpucc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpucc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x10a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpucc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpucc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cx_snoc_dvm_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_GCC_GPU_SNOC_DVM_GFX_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_gx_cxo_clk = {
+ .halt_reg = 0x1060,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_gx_cxo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpucc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpucc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .resets = (unsigned int []){ GPU_GX_BCR, GPU_ACD_BCR, GPU_GX_ACD_MISC_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | SW_RESET | AON_RESET,
+};
+
+static struct clk_regmap *gpucc_sm6375_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpucc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpucc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpucc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpucc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpucc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpucc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpucc_gmu_clk_src.clkr,
+ [GPU_CC_GX_CXO_CLK] = &gpucc_gx_cxo_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpucc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpucc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpucc_gx_gmu_clk.clkr,
+ [GPU_CC_PLL0] = &gpucc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpucc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpucc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpucc_sm6375_resets[] = {
+ [GPU_GX_BCR] = { 0x1008 },
+ [GPU_ACD_BCR] = { 0x1160 },
+ [GPU_GX_ACD_MISC_BCR] = { 0x8004 },
+};
+
+static struct gdsc *gpucc_sm6375_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpucc_sm6375_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_sm6375_desc = {
+ .config = &gpucc_sm6375_regmap_config,
+ .clks = gpucc_sm6375_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_sm6375_clocks),
+ .resets = gpucc_sm6375_resets,
+ .num_resets = ARRAY_SIZE(gpucc_sm6375_resets),
+ .gdscs = gpucc_sm6375_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_sm6375_gdscs),
+};
+
+static const struct of_device_id gpucc_sm6375_match_table[] = {
+ { .compatible = "qcom,sm6375-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_sm6375_match_table);
+
+static int gpucc_sm6375_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpucc_sm6375_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&gpucc_pll0, regmap, &gpucc_pll0_config);
+ clk_lucid_pll_configure(&gpucc_pll1, regmap, &gpucc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpucc_sm6375_desc, regmap);
+}
+
+static struct platform_driver gpucc_sm6375_driver = {
+ .probe = gpucc_sm6375_probe,
+ .driver = {
+ .name = "gpucc-sm6375",
+ .of_match_table = gpucc_sm6375_match_table,
+ },
+};
+module_platform_driver(gpucc_sm6375_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC SM6375 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index 2f9287c263ec..410ae8390f1c 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -376,8 +376,8 @@ static int krait_cc_probe(struct platform_device *pdev)
for_each_possible_cpu(cpu) {
mux = krait_add_clks(dev, cpu, id->data);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
clks[cpu] = mux->clk;
}
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 1339f9211a14..134eb1529ede 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -696,6 +696,8 @@ static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
.config = &lpass_audio_cc_sc7280_regmap_config,
.clks = lpass_cc_sc7280_clocks,
.num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+ .gdscs = lpass_aon_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs),
};
static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index 5c1e17bd0d76..0df2b29e95e3 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -107,10 +107,13 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
const struct qcom_cc_desc *desc;
int ret;
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
ret = pm_clk_create(&pdev->dev);
if (ret)
- goto disable_pm_runtime;
+ return ret;
ret = pm_clk_add(&pdev->dev, "iface");
if (ret < 0) {
@@ -118,14 +121,18 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
goto destroy_pm_clk;
}
- lpass_regmap_config.name = "qdsp6ss";
- desc = &lpass_qdsp6ss_sc7280_desc;
+ if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ lpass_regmap_config.name = "qdsp6ss";
+ lpass_regmap_config.max_register = 0x3f;
+ desc = &lpass_qdsp6ss_sc7280_desc;
- ret = qcom_cc_probe_by_index(pdev, 0, desc);
- if (ret)
- goto destroy_pm_clk;
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+ }
lpass_regmap_config.name = "top_cc";
+ lpass_regmap_config.max_register = 0x4;
desc = &lpass_cc_top_sc7280_desc;
ret = qcom_cc_probe_by_index(pdev, 1, desc);
@@ -137,9 +144,6 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
destroy_pm_clk:
pm_clk_destroy(&pdev->dev);
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 33ed91c67e1c..010867dcc2ef 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -93,8 +93,8 @@ static struct clk_alpha_pll_postdiv lpass_lpaaudio_dig_pll_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "lpass_lpaaudio_dig_pll_out_odd",
- .parent_data = &(const struct clk_parent_data){
- .hw = &lpass_lpaaudio_dig_pll.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &lpass_lpaaudio_dig_pll.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -210,8 +210,8 @@ static struct clk_branch lpass_audio_core_ext_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "lpass_audio_core_ext_mclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ext_mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &ext_mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -230,8 +230,8 @@ static struct clk_branch lpass_audio_core_lpaif_pri_ibit_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "lpass_audio_core_lpaif_pri_ibit_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &lpaif_pri_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &lpaif_pri_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -250,8 +250,8 @@ static struct clk_branch lpass_audio_core_lpaif_sec_ibit_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "lpass_audio_core_lpaif_sec_ibit_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &lpaif_sec_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &lpaif_sec_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -270,8 +270,8 @@ static struct clk_branch lpass_audio_core_sysnoc_mport_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "lpass_audio_core_sysnoc_mport_core_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &core_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]) {
+ &core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index e9f971359155..02fc21208dd1 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -40,6 +40,120 @@ enum {
P_MMSLEEP,
};
+static struct clk_pll mmpll0 = {
+ .l_reg = 0x0004,
+ .m_reg = 0x0008,
+ .n_reg = 0x000c,
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll0_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll0.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+ .config_reg = 0x0050,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll1_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll1.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll2 = {
+ .l_reg = 0x4104,
+ .m_reg = 0x4108,
+ .n_reg = 0x410c,
+ .config_reg = 0x4110,
+ .mode_reg = 0x4100,
+ .status_reg = 0x411c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll2",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll mmpll3 = {
+ .l_reg = 0x0084,
+ .m_reg = 0x0088,
+ .n_reg = 0x008c,
+ .config_reg = 0x0090,
+ .mode_reg = 0x0080,
+ .status_reg = 0x009c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll mmpll4 = {
+ .l_reg = 0x00a4,
+ .m_reg = 0x00a8,
+ .n_reg = 0x00ac,
+ .config_reg = 0x00b0,
+ .mode_reg = 0x0080,
+ .status_reg = 0x00bc,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
{ P_XO, 0 },
{ P_MMPLL0, 1 },
@@ -47,11 +161,11 @@ static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
{ P_GPLL0, 5 }
};
-static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "mmpll1_vote",
- "mmss_gpll0_vote",
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0_vote.hw },
+ { .hw = &mmpll1_vote.hw },
+ { .fw_name = "mmss_gpll0_vote", .name = "mmss_gpll0_vote" },
};
static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
@@ -63,13 +177,13 @@ static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
{ P_DSI1PLL, 3 }
};
-static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "hdmipll",
- "mmss_gpll0_vote",
- "dsi0pll",
- "dsi1pll",
+static const struct clk_parent_data mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0_vote.hw },
+ { .fw_name = "hdmipll", .name = "hdmipll" },
+ { .fw_name = "mmss_gpll0_vote", .name = "mmss_gpll0_vote" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
};
static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
@@ -80,12 +194,12 @@ static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
{ P_MMPLL2, 3 }
};
-static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "mmpll1_vote",
- "mmss_gpll0_vote",
- "mmpll2",
+static const struct clk_parent_data mmcc_xo_mmpll0_1_2_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0_vote.hw },
+ { .hw = &mmpll1_vote.hw },
+ { .fw_name = "mmss_gpll0_vote", .name = "mmss_gpll0_vote" },
+ { .hw = &mmpll2.clkr.hw },
};
static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
@@ -96,12 +210,12 @@ static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
{ P_MMPLL3, 3 }
};
-static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "mmpll1_vote",
- "mmss_gpll0_vote",
- "mmpll3",
+static const struct clk_parent_data mmcc_xo_mmpll0_1_3_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0_vote.hw },
+ { .hw = &mmpll1_vote.hw },
+ { .fw_name = "mmss_gpll0_vote", .name = "mmss_gpll0_vote" },
+ { .hw = &mmpll3.clkr.hw },
};
static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
@@ -113,13 +227,13 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char * const mmcc_xo_dsi_hdmi_edp[] = {
- "xo",
- "edp_link_clk",
- "hdmipll",
- "edp_vco_div",
- "dsi0pll",
- "dsi1pll",
+static const struct clk_parent_data mmcc_xo_dsi_hdmi_edp[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "edp_link_clk", .name = "edp_link_clk" },
+ { .fw_name = "hdmipll", .name = "hdmipll" },
+ { .fw_name = "edp_vco_div", .name = "edp_vco_div" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
};
static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
@@ -131,13 +245,13 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = {
- "xo",
- "edp_link_clk",
- "hdmipll",
- "gpll0_vote",
- "dsi0pll",
- "dsi1pll",
+static const struct clk_parent_data mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "edp_link_clk", .name = "edp_link_clk" },
+ { .fw_name = "hdmipll", .name = "hdmipll" },
+ { .fw_name = "gpll0_vote", .name = "gpll0_vote" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
};
static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
@@ -149,13 +263,13 @@ static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL_BYTE, 2 }
};
-static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
- "xo",
- "edp_link_clk",
- "hdmipll",
- "gpll0_vote",
- "dsi0pllbyte",
- "dsi1pllbyte",
+static const struct clk_parent_data mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "edp_link_clk", .name = "edp_link_clk" },
+ { .fw_name = "hdmipll", .name = "hdmipll" },
+ { .fw_name = "gpll0_vote", .name = "gpll0_vote" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" },
};
static const struct parent_map mmcc_xo_mmpll0_1_4_gpll0_map[] = {
@@ -166,12 +280,12 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll0_map[] = {
{ P_MMPLL4, 3 }
};
-static const char * const mmcc_xo_mmpll0_1_4_gpll0[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "gpll0",
+static const struct clk_parent_data mmcc_xo_mmpll0_1_4_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
};
static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
@@ -183,13 +297,13 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
{ P_GPLL1, 4 }
};
-static const char * const mmcc_xo_mmpll0_1_4_gpll1_0[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "gpll1",
- "gpll0",
+static const struct clk_parent_data mmcc_xo_mmpll0_1_4_gpll1_0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll1", .name = "gpll1" },
+ { .fw_name = "gpll0", .name = "gpll0" },
};
static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
@@ -202,114 +316,14 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
{ P_MMSLEEP, 6 }
};
-static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "gpll1",
- "gpll0",
- "sleep_clk_src",
-};
-
-static struct clk_pll mmpll0 = {
- .l_reg = 0x0004,
- .m_reg = 0x0008,
- .n_reg = 0x000c,
- .config_reg = 0x0014,
- .mode_reg = 0x0000,
- .status_reg = 0x001c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "mmpll0",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap mmpll0_vote = {
- .enable_reg = 0x0100,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmpll0_vote",
- .parent_names = (const char *[]){ "mmpll0" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll mmpll1 = {
- .l_reg = 0x0044,
- .m_reg = 0x0048,
- .n_reg = 0x004c,
- .config_reg = 0x0050,
- .mode_reg = 0x0040,
- .status_reg = 0x005c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "mmpll1",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap mmpll1_vote = {
- .enable_reg = 0x0100,
- .enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data){
- .name = "mmpll1_vote",
- .parent_names = (const char *[]){ "mmpll1" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll mmpll2 = {
- .l_reg = 0x4104,
- .m_reg = 0x4108,
- .n_reg = 0x410c,
- .config_reg = 0x4110,
- .mode_reg = 0x4100,
- .status_reg = 0x411c,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "mmpll2",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_pll mmpll3 = {
- .l_reg = 0x0084,
- .m_reg = 0x0088,
- .n_reg = 0x008c,
- .config_reg = 0x0090,
- .mode_reg = 0x0080,
- .status_reg = 0x009c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "mmpll3",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_pll mmpll4 = {
- .l_reg = 0x00a4,
- .m_reg = 0x00a8,
- .n_reg = 0x00ac,
- .config_reg = 0x00b0,
- .mode_reg = 0x0080,
- .status_reg = 0x00bc,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "mmpll4",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
+static const struct clk_parent_data mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll1", .name = "gpll1" },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static struct clk_rcg2 mmss_ahb_clk_src = {
@@ -318,8 +332,8 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmss_ahb_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -343,8 +357,8 @@ static struct clk_rcg2 mmss_axi_clk_src = {
.freq_tbl = ftbl_mmss_axi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmss_axi_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -367,8 +381,8 @@ static struct clk_rcg2 ocmemnoc_clk_src = {
.freq_tbl = ftbl_ocmemnoc_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ocmemnoc_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -386,8 +400,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_camss_csi0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -399,8 +413,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_camss_csi0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -412,8 +426,8 @@ static struct clk_rcg2 csi2_clk_src = {
.freq_tbl = ftbl_camss_csi0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -425,8 +439,8 @@ static struct clk_rcg2 csi3_clk_src = {
.freq_tbl = ftbl_camss_csi0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -455,8 +469,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -468,8 +482,8 @@ static struct clk_rcg2 vfe1_clk_src = {
.freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -496,8 +510,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_mdss_mdp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_dsi_hdmi_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -508,8 +522,8 @@ static struct clk_rcg2 gfx3d_clk_src = {
.parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_2_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_2_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -531,8 +545,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -544,8 +558,8 @@ static struct clk_rcg2 jpeg1_clk_src = {
.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg1_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -557,8 +571,8 @@ static struct clk_rcg2 jpeg2_clk_src = {
.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg2_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -570,8 +584,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi_hdmi_edp_gpll0),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -584,8 +598,8 @@ static struct clk_rcg2 pclk1_clk_src = {
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
- .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi_hdmi_edp_gpll0),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -609,8 +623,8 @@ static struct clk_rcg2 vcodec0_clk_src = {
.freq_tbl = ftbl_venus0_vcodec0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vcodec0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_3_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_3_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_3_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -628,8 +642,8 @@ static struct clk_rcg2 vp_clk_src = {
.freq_tbl = ftbl_avsync_vp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vp_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -647,8 +661,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_camss_cci_cci_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0),
.ops = &clk_rcg2_ops,
},
};
@@ -671,8 +685,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
- .num_parents = 7,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -685,8 +699,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
- .num_parents = 7,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -713,8 +727,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_camss_mclk0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0),
.ops = &clk_rcg2_ops,
},
};
@@ -727,8 +741,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_camss_mclk0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0),
.ops = &clk_rcg2_ops,
},
};
@@ -741,8 +755,8 @@ static struct clk_rcg2 mclk2_clk_src = {
.freq_tbl = ftbl_camss_mclk0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0),
.ops = &clk_rcg2_ops,
},
};
@@ -755,8 +769,8 @@ static struct clk_rcg2 mclk3_clk_src = {
.freq_tbl = ftbl_camss_mclk0_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll1_0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll1_0),
.ops = &clk_rcg2_ops,
},
};
@@ -774,8 +788,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -787,8 +801,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -800,8 +814,8 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -823,8 +837,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_camss_vfe_cpp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
- .num_parents = 5,
+ .parent_data = mmcc_xo_mmpll0_1_4_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_1_4_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -835,8 +849,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte_hdmi_edp_gpll0),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -848,8 +862,8 @@ static struct clk_rcg2 byte1_clk_src = {
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
- .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte_hdmi_edp_gpll0),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -867,8 +881,8 @@ static struct clk_rcg2 edpaux_clk_src = {
.freq_tbl = ftbl_mdss_edpaux_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "edpaux_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -886,8 +900,8 @@ static struct clk_rcg2 edplink_clk_src = {
.freq_tbl = ftbl_mdss_edplink_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "edplink_clk_src",
- .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi_hdmi_edp_gpll0),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -906,8 +920,8 @@ static struct clk_rcg2 edppixel_clk_src = {
.freq_tbl = edp_pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "edppixel_clk_src",
- .parent_names = mmcc_xo_dsi_hdmi_edp,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsi_hdmi_edp,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi_hdmi_edp),
.ops = &clk_edp_pixel_ops,
},
};
@@ -924,8 +938,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte_hdmi_edp_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +951,8 @@ static struct clk_rcg2 esc1_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
- .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte_hdmi_edp_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -955,8 +969,8 @@ static struct clk_rcg2 extpclk_clk_src = {
.freq_tbl = extpclk_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
- .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
- .num_parents = 6,
+ .parent_data = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi_hdmi_edp_gpll0),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -974,8 +988,8 @@ static struct clk_rcg2 hdmi_clk_src = {
.freq_tbl = ftbl_mdss_hdmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -992,8 +1006,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1010,8 +1024,8 @@ static struct clk_rcg2 rbcpr_clk_src = {
.freq_tbl = ftbl_mmss_rbcpr_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1028,8 +1042,8 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.freq_tbl = ftbl_oxili_rbbmtimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1051,8 +1065,8 @@ static struct clk_rcg2 maple_clk_src = {
.freq_tbl = ftbl_vpu_maple_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "maple_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1073,8 +1087,8 @@ static struct clk_rcg2 vdp_clk_src = {
.freq_tbl = ftbl_vpu_vdp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vdp_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1092,8 +1106,8 @@ static struct clk_rcg2 vpu_bus_clk_src = {
.freq_tbl = ftbl_vpu_bus_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vpu_bus_clk_src",
- .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
- .num_parents = 4,
+ .parent_data = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll1_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1105,7 +1119,9 @@ static struct clk_branch mmss_cxo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_cxo_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1120,8 +1136,8 @@ static struct clk_branch mmss_sleepclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_sleepclk_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1137,8 +1153,8 @@ static struct clk_branch avsync_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1154,8 +1170,8 @@ static struct clk_branch avsync_edppixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_edppixel_clk",
- .parent_names = (const char *[]){
- "edppixel_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &edppixel_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1171,8 +1187,8 @@ static struct clk_branch avsync_extpclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_extpclk_clk",
- .parent_names = (const char *[]){
- "extpclk_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &extpclk_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1188,8 +1204,8 @@ static struct clk_branch avsync_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_pclk0_clk",
- .parent_names = (const char *[]){
- "pclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1205,8 +1221,8 @@ static struct clk_branch avsync_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_pclk1_clk",
- .parent_names = (const char *[]){
- "pclk1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1222,8 +1238,8 @@ static struct clk_branch avsync_vp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "avsync_vp_clk",
- .parent_names = (const char *[]){
- "vp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vp_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1239,8 +1255,8 @@ static struct clk_branch camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1256,8 +1272,8 @@ static struct clk_branch camss_cci_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_cci_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1272,8 +1288,8 @@ static struct clk_branch camss_cci_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_cci_clk",
- .parent_names = (const char *[]){
- "cci_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1289,8 +1305,8 @@ static struct clk_branch camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1305,8 +1321,8 @@ static struct clk_branch camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1322,8 +1338,8 @@ static struct clk_branch camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phy_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1339,8 +1355,8 @@ static struct clk_branch camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0pix_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1356,8 +1372,8 @@ static struct clk_branch camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0rdi_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1373,8 +1389,8 @@ static struct clk_branch camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1390,8 +1406,8 @@ static struct clk_branch camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1407,8 +1423,8 @@ static struct clk_branch camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phy_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1424,8 +1440,8 @@ static struct clk_branch camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1pix_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1441,8 +1457,8 @@ static struct clk_branch camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1rdi_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1458,8 +1474,8 @@ static struct clk_branch camss_csi2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1474,8 +1490,8 @@ static struct clk_branch camss_csi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_clk",
- .parent_names = (const char *[]){
- "csi2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1491,8 +1507,8 @@ static struct clk_branch camss_csi2phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phy_clk",
- .parent_names = (const char *[]){
- "csi2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1508,8 +1524,8 @@ static struct clk_branch camss_csi2pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2pix_clk",
- .parent_names = (const char *[]){
- "csi2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1525,8 +1541,8 @@ static struct clk_branch camss_csi2rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2rdi_clk",
- .parent_names = (const char *[]){
- "csi2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1542,8 +1558,8 @@ static struct clk_branch camss_csi3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1558,8 +1574,8 @@ static struct clk_branch camss_csi3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_clk",
- .parent_names = (const char *[]){
- "csi3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1575,8 +1591,8 @@ static struct clk_branch camss_csi3phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3phy_clk",
- .parent_names = (const char *[]){
- "csi3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1592,8 +1608,8 @@ static struct clk_branch camss_csi3pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3pix_clk",
- .parent_names = (const char *[]){
- "csi3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1609,8 +1625,8 @@ static struct clk_branch camss_csi3rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3rdi_clk",
- .parent_names = (const char *[]){
- "csi3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1626,8 +1642,8 @@ static struct clk_branch camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1643,8 +1659,8 @@ static struct clk_branch camss_csi_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe1_clk",
- .parent_names = (const char *[]){
- "vfe1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1660,8 +1676,8 @@ static struct clk_branch camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk",
- .parent_names = (const char *[]){
- "camss_gp0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1677,8 +1693,8 @@ static struct clk_branch camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk",
- .parent_names = (const char *[]){
- "camss_gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1694,8 +1710,8 @@ static struct clk_branch camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ispif_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1711,8 +1727,8 @@ static struct clk_branch camss_jpeg_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_jpeg0_clk",
- .parent_names = (const char *[]){
- "jpeg0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1728,8 +1744,8 @@ static struct clk_branch camss_jpeg_jpeg1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_jpeg1_clk",
- .parent_names = (const char *[]){
- "jpeg1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1745,8 +1761,8 @@ static struct clk_branch camss_jpeg_jpeg2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_jpeg2_clk",
- .parent_names = (const char *[]){
- "jpeg2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1762,8 +1778,8 @@ static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_jpeg_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1778,8 +1794,8 @@ static struct clk_branch camss_jpeg_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_jpeg_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1794,8 +1810,8 @@ static struct clk_branch camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk0_clk",
- .parent_names = (const char *[]){
- "mclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1811,8 +1827,8 @@ static struct clk_branch camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk1_clk",
- .parent_names = (const char *[]){
- "mclk1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1828,8 +1844,8 @@ static struct clk_branch camss_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk2_clk",
- .parent_names = (const char *[]){
- "mclk2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1845,8 +1861,8 @@ static struct clk_branch camss_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk3_clk",
- .parent_names = (const char *[]){
- "mclk3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk3_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1862,8 +1878,8 @@ static struct clk_branch camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_micro_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1878,8 +1894,8 @@ static struct clk_branch camss_phy0_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_phy0_csi0phytimer_clk",
- .parent_names = (const char *[]){
- "csi0phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1895,8 +1911,8 @@ static struct clk_branch camss_phy1_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_phy1_csi1phytimer_clk",
- .parent_names = (const char *[]){
- "csi1phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1912,8 +1928,8 @@ static struct clk_branch camss_phy2_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_phy2_csi2phytimer_clk",
- .parent_names = (const char *[]){
- "csi2phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2phytimer_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1929,8 +1945,8 @@ static struct clk_branch camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_top_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1946,8 +1962,8 @@ static struct clk_branch camss_vfe_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_cpp_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1963,8 +1979,8 @@ static struct clk_branch camss_vfe_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_cpp_clk",
- .parent_names = (const char *[]){
- "cpp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1980,8 +1996,8 @@ static struct clk_branch camss_vfe_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1997,8 +2013,8 @@ static struct clk_branch camss_vfe_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_vfe1_clk",
- .parent_names = (const char *[]){
- "vfe1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2014,8 +2030,8 @@ static struct clk_branch camss_vfe_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_vfe_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2031,8 +2047,8 @@ static struct clk_branch camss_vfe_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_vfe_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2048,8 +2064,8 @@ static struct clk_branch mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2065,8 +2081,8 @@ static struct clk_branch mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2082,8 +2098,8 @@ static struct clk_branch mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte0_clk",
- .parent_names = (const char *[]){
- "byte0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2099,8 +2115,8 @@ static struct clk_branch mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte1_clk",
- .parent_names = (const char *[]){
- "byte1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2116,8 +2132,8 @@ static struct clk_branch mdss_edpaux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_edpaux_clk",
- .parent_names = (const char *[]){
- "edpaux_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &edpaux_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2133,8 +2149,8 @@ static struct clk_branch mdss_edplink_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_edplink_clk",
- .parent_names = (const char *[]){
- "edplink_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &edplink_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2150,8 +2166,8 @@ static struct clk_branch mdss_edppixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_edppixel_clk",
- .parent_names = (const char *[]){
- "edppixel_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &edppixel_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2167,8 +2183,8 @@ static struct clk_branch mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc0_clk",
- .parent_names = (const char *[]){
- "esc0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2184,8 +2200,8 @@ static struct clk_branch mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc1_clk",
- .parent_names = (const char *[]){
- "esc1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2201,8 +2217,8 @@ static struct clk_branch mdss_extpclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_extpclk_clk",
- .parent_names = (const char *[]){
- "extpclk_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &extpclk_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2218,8 +2234,8 @@ static struct clk_branch mdss_hdmi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2235,8 +2251,8 @@ static struct clk_branch mdss_hdmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_clk",
- .parent_names = (const char *[]){
- "hdmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &hdmi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2252,8 +2268,8 @@ static struct clk_branch mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_mdp_clk",
- .parent_names = (const char *[]){
- "mdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2269,8 +2285,8 @@ static struct clk_branch mdss_mdp_lut_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_mdp_lut_clk",
- .parent_names = (const char *[]){
- "mdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2286,8 +2302,8 @@ static struct clk_branch mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk0_clk",
- .parent_names = (const char *[]){
- "pclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2303,8 +2319,8 @@ static struct clk_branch mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk1_clk",
- .parent_names = (const char *[]){
- "pclk1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2320,8 +2336,8 @@ static struct clk_branch mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_vsync_clk",
- .parent_names = (const char *[]){
- "vsync_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2337,8 +2353,8 @@ static struct clk_branch mmss_rbcpr_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2354,127 +2370,8 @@ static struct clk_branch mmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_clk",
- .parent_names = (const char *[]){
- "rbcpr_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_ahb_clk = {
- .halt_reg = 0x0230,
- .clkr = {
- .enable_reg = 0x0230,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_ahb_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_axi_clk = {
- .halt_reg = 0x0210,
- .clkr = {
- .enable_reg = 0x0210,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_axi_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_axi_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_csi0_clk = {
- .halt_reg = 0x023c,
- .clkr = {
- .enable_reg = 0x023c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_csi0_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_csi0_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_gfx3d_clk = {
- .halt_reg = 0x022c,
- .clkr = {
- .enable_reg = 0x022c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_gfx3d_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_gfx3d_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_jpeg0_clk = {
- .halt_reg = 0x0204,
- .clkr = {
- .enable_reg = 0x0204,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_jpeg0_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_jpeg0_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_jpeg1_clk = {
- .halt_reg = 0x0208,
- .clkr = {
- .enable_reg = 0x0208,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_jpeg1_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_jpeg1_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_jpeg2_clk = {
- .halt_reg = 0x0224,
- .clkr = {
- .enable_reg = 0x0224,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_jpeg2_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_jpeg2_div_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &rbcpr_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2483,143 +2380,6 @@ static struct clk_branch mmss_spdm_jpeg2_clk = {
},
};
-static struct clk_branch mmss_spdm_mdp_clk = {
- .halt_reg = 0x020c,
- .clkr = {
- .enable_reg = 0x020c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_mdp_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_mdp_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_pclk0_clk = {
- .halt_reg = 0x0234,
- .clkr = {
- .enable_reg = 0x0234,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_pclk0_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_pclk0_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_pclk1_clk = {
- .halt_reg = 0x0228,
- .clkr = {
- .enable_reg = 0x0228,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_pclk1_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_pclk1_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_vcodec0_clk = {
- .halt_reg = 0x0214,
- .clkr = {
- .enable_reg = 0x0214,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_vcodec0_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_vcodec0_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_vfe0_clk = {
- .halt_reg = 0x0218,
- .clkr = {
- .enable_reg = 0x0218,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_vfe0_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_vfe0_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_vfe1_clk = {
- .halt_reg = 0x021c,
- .clkr = {
- .enable_reg = 0x021c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_vfe1_clk",
- .parent_names = (const char *[]){
- "mmss_spdm_vfe1_div_clk",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_rm_axi_clk = {
- .halt_reg = 0x0304,
- .clkr = {
- .enable_reg = 0x0304,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_rm_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mmss_spdm_rm_ocmemnoc_clk = {
- .halt_reg = 0x0308,
- .clkr = {
- .enable_reg = 0x0308,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_spdm_rm_ocmemnoc_clk",
- .parent_names = (const char *[]){
- "ocmemnoc_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-
static struct clk_branch mmss_misc_ahb_clk = {
.halt_reg = 0x502c,
.clkr = {
@@ -2627,8 +2387,8 @@ static struct clk_branch mmss_misc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2644,8 +2404,8 @@ static struct clk_branch mmss_mmssnoc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmssnoc_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2661,8 +2421,8 @@ static struct clk_branch mmss_mmssnoc_bto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmssnoc_bto_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2678,8 +2438,8 @@ static struct clk_branch mmss_mmssnoc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmssnoc_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
@@ -2695,8 +2455,8 @@ static struct clk_branch mmss_s0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_s0_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2712,8 +2472,8 @@ static struct clk_branch ocmemcx_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "ocmemcx_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2729,8 +2489,8 @@ static struct clk_branch ocmemcx_ocmemnoc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "ocmemcx_ocmemnoc_clk",
- .parent_names = (const char *[]){
- "ocmemnoc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ocmemnoc_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2746,8 +2506,8 @@ static struct clk_branch oxili_ocmemgx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "oxili_ocmemgx_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2763,8 +2523,8 @@ static struct clk_branch oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "oxili_gfx3d_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2780,8 +2540,8 @@ static struct clk_branch oxili_rbbmtimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "oxili_rbbmtimer_clk",
- .parent_names = (const char *[]){
- "rbbmtimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &rbbmtimer_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2797,8 +2557,8 @@ static struct clk_branch oxilicx_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "oxilicx_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2814,8 +2574,8 @@ static struct clk_branch venus0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2831,8 +2591,8 @@ static struct clk_branch venus0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2848,8 +2608,8 @@ static struct clk_branch venus0_core0_vcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_core0_vcodec_clk",
- .parent_names = (const char *[]){
- "vcodec0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2865,8 +2625,8 @@ static struct clk_branch venus0_core1_vcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_core1_vcodec_clk",
- .parent_names = (const char *[]){
- "vcodec0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2882,8 +2642,8 @@ static struct clk_branch venus0_ocmemnoc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_ocmemnoc_clk",
- .parent_names = (const char *[]){
- "ocmemnoc_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ocmemnoc_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2899,8 +2659,8 @@ static struct clk_branch venus0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "venus0_vcodec0_clk",
- .parent_names = (const char *[]){
- "vcodec0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2916,8 +2676,8 @@ static struct clk_branch vpu_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_ahb_clk",
- .parent_names = (const char *[]){
- "mmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_ahb_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2933,8 +2693,8 @@ static struct clk_branch vpu_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_axi_clk",
- .parent_names = (const char *[]){
- "mmss_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mmss_axi_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2950,8 +2710,8 @@ static struct clk_branch vpu_bus_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_bus_clk",
- .parent_names = (const char *[]){
- "vpu_bus_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vpu_bus_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2967,7 +2727,9 @@ static struct clk_branch vpu_cxo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_cxo_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2982,8 +2744,8 @@ static struct clk_branch vpu_maple_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_maple_clk",
- .parent_names = (const char *[]){
- "maple_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &maple_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2999,8 +2761,8 @@ static struct clk_branch vpu_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3016,8 +2778,8 @@ static struct clk_branch vpu_vdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpu_vdp_clk",
- .parent_names = (const char *[]){
- "vdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vdp_clk_src.clkr.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3252,21 +3014,6 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
[MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
[MMSS_RBCPR_AHB_CLK] = &mmss_rbcpr_ahb_clk.clkr,
[MMSS_RBCPR_CLK] = &mmss_rbcpr_clk.clkr,
- [MMSS_SPDM_AHB_CLK] = &mmss_spdm_ahb_clk.clkr,
- [MMSS_SPDM_AXI_CLK] = &mmss_spdm_axi_clk.clkr,
- [MMSS_SPDM_CSI0_CLK] = &mmss_spdm_csi0_clk.clkr,
- [MMSS_SPDM_GFX3D_CLK] = &mmss_spdm_gfx3d_clk.clkr,
- [MMSS_SPDM_JPEG0_CLK] = &mmss_spdm_jpeg0_clk.clkr,
- [MMSS_SPDM_JPEG1_CLK] = &mmss_spdm_jpeg1_clk.clkr,
- [MMSS_SPDM_JPEG2_CLK] = &mmss_spdm_jpeg2_clk.clkr,
- [MMSS_SPDM_MDP_CLK] = &mmss_spdm_mdp_clk.clkr,
- [MMSS_SPDM_PCLK0_CLK] = &mmss_spdm_pclk0_clk.clkr,
- [MMSS_SPDM_PCLK1_CLK] = &mmss_spdm_pclk1_clk.clkr,
- [MMSS_SPDM_VCODEC0_CLK] = &mmss_spdm_vcodec0_clk.clkr,
- [MMSS_SPDM_VFE0_CLK] = &mmss_spdm_vfe0_clk.clkr,
- [MMSS_SPDM_VFE1_CLK] = &mmss_spdm_vfe1_clk.clkr,
- [MMSS_SPDM_RM_AXI_CLK] = &mmss_spdm_rm_axi_clk.clkr,
- [MMSS_SPDM_RM_OCMEMNOC_CLK] = &mmss_spdm_rm_ocmemnoc_clk.clkr,
[MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
[MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
[MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index c421b1291651..4490594bde69 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -44,7 +44,6 @@ enum {
P_HDMIPLL,
P_DPVCO,
P_DPLINK,
- P_CORE_BI_PLL_TEST_SE,
};
static struct clk_fixed_factor gpll0_div = {
@@ -303,69 +302,59 @@ static struct clk_alpha_pll_postdiv mmpll10_out_even = {
static const struct parent_map mmss_xo_hdmi_map[] = {
{ P_XO, 0 },
{ P_HDMIPLL, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_hdmi[] = {
{ .fw_name = "xo" },
{ .fw_name = "hdmipll" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL, 1 },
{ P_DSI1PLL, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
{ .fw_name = "xo" },
{ .fw_name = "dsi0dsi" },
{ .fw_name = "dsi1dsi" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsibyte_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL_BYTE, 1 },
{ P_DSI1PLL_BYTE, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_dsibyte[] = {
{ .fw_name = "xo" },
{ .fw_name = "dsi0byte" },
{ .fw_name = "dsi1byte" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dp_map[] = {
{ P_XO, 0 },
{ P_DPLINK, 1 },
{ P_DPVCO, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_dp[] = {
{ .fw_name = "xo" },
{ .fw_name = "dplink" },
{ .fw_name = "dpvco" },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
{ .fw_name = "xo" },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
@@ -373,7 +362,6 @@ static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
{ P_MMPLL0_OUT_EVEN, 1 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
@@ -381,7 +369,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
{ .hw = &mmpll0_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
@@ -390,7 +377,6 @@ static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
{ P_MMPLL1_OUT_EVEN, 2 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
@@ -399,7 +385,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
{ .hw = &mmpll1_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
@@ -408,7 +393,6 @@ static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
{ P_MMPLL5_OUT_EVEN, 2 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
@@ -417,7 +401,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
{ .hw = &mmpll5_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
@@ -427,7 +410,6 @@ static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[
{ P_MMPLL6_OUT_EVEN, 4 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
@@ -437,7 +419,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div
{ .hw = &mmpll6_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -447,7 +428,6 @@ static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map
{ P_MMPLL10_OUT_EVEN, 3 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
@@ -457,7 +437,6 @@ static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_di
{ .hw = &mmpll10_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -467,7 +446,6 @@ static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map
{ P_MMPLL10_OUT_EVEN, 3 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
@@ -477,7 +455,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_di
{ .hw = &mmpll10_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -488,7 +465,6 @@ static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_
{ P_MMPLL10_OUT_EVEN, 4 },
{ P_GPLL0, 5 },
{ P_GPLL0_DIV, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 }
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
@@ -499,7 +475,6 @@ static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_g
{ .hw = &mmpll10_out_even.clkr.hw },
{ .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se" },
};
static struct clk_rcg2 byte0_clk_src = {
diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
new file mode 100644
index 000000000000..2c67ee71c196
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8550.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8550-tcsr.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x15114,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x15110,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_pad_clkref_en = {
+ .halt_reg = 0x15104,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15104,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_ufs_pad_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x15118,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15118,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x15108,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x15108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_UFS_PAD_CLKREF_EN] = &tcsr_ufs_pad_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2f000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
+ .config = &tcsr_cc_sm8550_regmap_config,
+ .clks = tcsr_cc_sm8550_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8550_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sm8550-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
+
+static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, &tcsr_cc_sm8550_desc, regmap);
+}
+
+static struct platform_driver tcsr_cc_sm8550_driver = {
+ .probe = tcsr_cc_sm8550_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8550",
+ .of_match_table = tcsr_cc_sm8550_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8550_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8550_driver);
+}
+subsys_initcall(tcsr_cc_sm8550_init);
+
+static void __exit tcsr_cc_sm8550_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8550_driver);
+}
+module_exit(tcsr_cc_sm8550_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC SM8550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index f28f2cb051d7..ad46c4014a40 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -361,19 +361,12 @@ static const struct of_device_id video_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, video_cc_sm8250_match_table);
-static void video_cc_sm8250_pm_runtime_disable(void *data)
-{
- pm_runtime_disable(data);
-}
-
static int video_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
- pm_runtime_enable(&pdev->dev);
-
- ret = devm_add_action_or_reset(&pdev->dev, video_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
diff --git a/drivers/clk/ralink/clk-mt7621.c b/drivers/clk/ralink/clk-mt7621.c
index 99256659dd96..d95a33293b0a 100644
--- a/drivers/clk/ralink/clk-mt7621.c
+++ b/drivers/clk/ralink/clk-mt7621.c
@@ -121,7 +121,7 @@ static int mt7621_gate_is_enabled(struct clk_hw *hw)
if (regmap_read(sysc, SYSC_REG_CLKCFG1, &val))
return 0;
- return val & BIT(clk_gate->bit_idx);
+ return val & clk_gate->bit_idx;
}
static const struct clk_ops mt7621_gate_ops = {
@@ -133,8 +133,14 @@ static const struct clk_ops mt7621_gate_ops = {
static int mt7621_gate_ops_init(struct device *dev,
struct mt7621_gate *sclk)
{
+ /*
+ * There are drivers for this SoC that are older
+ * than clock driver and are not prepared for the clock.
+ * We don't want the kernel to disable anything so we
+ * add CLK_IS_CRITICAL flag here.
+ */
struct clk_init_data init = {
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.num_parents = 1,
.parent_names = &sclk->parent_name,
.ops = &mt7621_gate_ops,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index cacaf9b87d26..37632a0659d8 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -22,7 +22,7 @@ config CLK_RENESAS
select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
- select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
+ select CLK_R8A7795 if ARCH_R8A77951
select CLK_R8A77960 if ARCH_R8A77960
select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 301475c74f50..7a585a777d38 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
};
static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
- DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1),
DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1),
DEF_MOD("tmu4", 121, R8A7795_CLK_S0D6),
@@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac30", 326, R8A7795_CLK_S3D1),
- DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */
DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac31", 329, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
@@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7795_CLK_CP),
DEF_MOD("pwm", 523, R8A7795_CLK_S0D12),
- DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2),
DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2),
DEF_MOD("fcpvd0", 603, R8A7795_CLK_S0D2),
DEF_MOD("fcpvb1", 606, R8A7795_CLK_S0D1),
DEF_MOD("fcpvb0", 607, R8A7795_CLK_S0D1),
- DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpvi1", 610, R8A7795_CLK_S0D1),
DEF_MOD("fcpvi0", 611, R8A7795_CLK_S0D1),
- DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpf1", 614, R8A7795_CLK_S0D1),
DEF_MOD("fcpf0", 615, R8A7795_CLK_S0D1),
- DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), /* ES1.x */
- DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpcs", 619, R8A7795_CLK_S0D1),
- DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspd2", 621, R8A7795_CLK_S0D2),
DEF_MOD("vspd1", 622, R8A7795_CLK_S0D2),
DEF_MOD("vspd0", 623, R8A7795_CLK_S0D2),
DEF_MOD("vspbc", 624, R8A7795_CLK_S0D1),
DEF_MOD("vspbd", 626, R8A7795_CLK_S0D1),
- DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2),
@@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("cmm2", 709, R8A7795_CLK_S2D1),
DEF_MOD("cmm1", 710, R8A7795_CLK_S2D1),
DEF_MOD("cmm0", 711, R8A7795_CLK_S2D1),
- DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
DEF_MOD("csi40", 716, R8A7795_CLK_CSI0),
@@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
{ 2, 192, 1, 192, 1, 32, },
};
-static const struct soc_device_attribute r8a7795es1[] __initconst = {
+static const struct soc_device_attribute r8a7795_denylist[] __initconst = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
};
-
- /*
- * Fixups for R-Car H3 ES1.x
- */
-
-static const unsigned int r8a7795es1_mod_nullify[] __initconst = {
- MOD_CLK_ID(326), /* USB-DMAC3-0 */
- MOD_CLK_ID(329), /* USB-DMAC3-1 */
- MOD_CLK_ID(700), /* EHCI/OHCI3 */
- MOD_CLK_ID(705), /* HS-USB-IF3 */
-
-};
-
-static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
- { MOD_CLK_ID(118), R8A7795_CLK_S2D1 }, /* FDP1-1 */
- { MOD_CLK_ID(119), R8A7795_CLK_S2D1 }, /* FDP1-0 */
- { MOD_CLK_ID(121), R8A7795_CLK_S3D2 }, /* TMU4 */
- { MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */
- { MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */
- { MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
- { MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */
- { MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
- { MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
- { MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
- { MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */
- { MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */
- { MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */
- { MOD_CLK_ID(606), R8A7795_CLK_S2D1 }, /* FCPVB1 */
- { MOD_CLK_ID(607), R8A7795_CLK_S2D1 }, /* FCPVB0 */
- { MOD_CLK_ID(610), R8A7795_CLK_S2D1 }, /* FCPVI1 */
- { MOD_CLK_ID(611), R8A7795_CLK_S2D1 }, /* FCPVI0 */
- { MOD_CLK_ID(614), R8A7795_CLK_S2D1 }, /* FCPF1 */
- { MOD_CLK_ID(615), R8A7795_CLK_S2D1 }, /* FCPF0 */
- { MOD_CLK_ID(619), R8A7795_CLK_S2D1 }, /* FCPCS */
- { MOD_CLK_ID(621), R8A7795_CLK_S2D1 }, /* VSPD2 */
- { MOD_CLK_ID(622), R8A7795_CLK_S2D1 }, /* VSPD1 */
- { MOD_CLK_ID(623), R8A7795_CLK_S2D1 }, /* VSPD0 */
- { MOD_CLK_ID(624), R8A7795_CLK_S2D1 }, /* VSPBC */
- { MOD_CLK_ID(626), R8A7795_CLK_S2D1 }, /* VSPBD */
- { MOD_CLK_ID(630), R8A7795_CLK_S2D1 }, /* VSPI1 */
- { MOD_CLK_ID(631), R8A7795_CLK_S2D1 }, /* VSPI0 */
- { MOD_CLK_ID(804), R8A7795_CLK_S2D1 }, /* VIN7 */
- { MOD_CLK_ID(805), R8A7795_CLK_S2D1 }, /* VIN6 */
- { MOD_CLK_ID(806), R8A7795_CLK_S2D1 }, /* VIN5 */
- { MOD_CLK_ID(807), R8A7795_CLK_S2D1 }, /* VIN4 */
- { MOD_CLK_ID(808), R8A7795_CLK_S2D1 }, /* VIN3 */
- { MOD_CLK_ID(809), R8A7795_CLK_S2D1 }, /* VIN2 */
- { MOD_CLK_ID(810), R8A7795_CLK_S2D1 }, /* VIN1 */
- { MOD_CLK_ID(811), R8A7795_CLK_S2D1 }, /* VIN0 */
- { MOD_CLK_ID(812), R8A7795_CLK_S3D2 }, /* EAVB-IF */
- { MOD_CLK_ID(820), R8A7795_CLK_S2D1 }, /* IMR3 */
- { MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */
- { MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */
- { MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */
- { MOD_CLK_ID(905), R8A7795_CLK_CP }, /* GPIO7 */
- { MOD_CLK_ID(906), R8A7795_CLK_CP }, /* GPIO6 */
- { MOD_CLK_ID(907), R8A7795_CLK_CP }, /* GPIO5 */
- { MOD_CLK_ID(908), R8A7795_CLK_CP }, /* GPIO4 */
- { MOD_CLK_ID(909), R8A7795_CLK_CP }, /* GPIO3 */
- { MOD_CLK_ID(910), R8A7795_CLK_CP }, /* GPIO2 */
- { MOD_CLK_ID(911), R8A7795_CLK_CP }, /* GPIO1 */
- { MOD_CLK_ID(912), R8A7795_CLK_CP }, /* GPIO0 */
- { MOD_CLK_ID(918), R8A7795_CLK_S3D2 }, /* I2C6 */
- { MOD_CLK_ID(919), R8A7795_CLK_S3D2 }, /* I2C5 */
- { MOD_CLK_ID(927), R8A7795_CLK_S3D2 }, /* I2C4 */
- { MOD_CLK_ID(928), R8A7795_CLK_S3D2 }, /* I2C3 */
-};
-
-
- /*
- * Fixups for R-Car H3 ES2.x
- */
-
-static const unsigned int r8a7795es2_mod_nullify[] __initconst = {
- MOD_CLK_ID(117), /* FDP1-2 */
- MOD_CLK_ID(327), /* USB3-IF1 */
- MOD_CLK_ID(600), /* FCPVD3 */
- MOD_CLK_ID(609), /* FCPVI2 */
- MOD_CLK_ID(613), /* FCPF2 */
- MOD_CLK_ID(616), /* FCPCI1 */
- MOD_CLK_ID(617), /* FCPCI0 */
- MOD_CLK_ID(620), /* VSPD3 */
- MOD_CLK_ID(629), /* VSPI2 */
- MOD_CLK_ID(713), /* CSI21 */
-};
-
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
u32 cpg_mode;
int error;
+ /*
+ * We panic here to ensure removed SoCs and clk updates are always in
+ * sync to avoid overclocking damages. The panic can only be seen with
+ * commandline args 'earlycon keep_bootcon'. But these SoCs were for
+ * developers only anyhow.
+ */
+ if (soc_device_match(r8a7795_denylist))
+ panic("SoC not supported anymore!\n");
+
error = rcar_rst_read_mode_pins(&cpg_mode);
if (error)
return error;
@@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
return -EINVAL;
}
- if (soc_device_match(r8a7795es1)) {
- cpg_core_nullify_range(r8a7795_core_clks,
- ARRAY_SIZE(r8a7795_core_clks),
- R8A7795_CLK_S0D2, R8A7795_CLK_S0D12);
- mssr_mod_nullify(r8a7795_mod_clks,
- ARRAY_SIZE(r8a7795_mod_clks),
- r8a7795es1_mod_nullify,
- ARRAY_SIZE(r8a7795es1_mod_nullify));
- mssr_mod_reparent(r8a7795_mod_clks,
- ARRAY_SIZE(r8a7795_mod_clks),
- r8a7795es1_mod_reparent,
- ARRAY_SIZE(r8a7795es1_mod_reparent));
- } else {
- mssr_mod_nullify(r8a7795_mod_clks,
- ARRAY_SIZE(r8a7795_mod_clks),
- r8a7795es2_mod_nullify,
- ARRAY_SIZE(r8a7795es2_mod_nullify));
- }
-
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index 0f59c84229a8..7e90e94c4b68 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -76,6 +76,7 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
+ DEF_FIXED("z2", R8A77970_CLK_Z2, CLK_PLL1_DIV4, 1, 1),
DEF_FIXED("ztr", R8A77970_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77970_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A77970_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 06f925aff407..aaa685ec35df 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -72,6 +72,7 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
+ DEF_FIXED("z2", R8A77980_CLK_Z2, CLK_PLL2, 4, 1),
DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A77980_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -150,11 +151,27 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("imp-ocv3", 529, R8A77980_CLK_S1D1),
DEF_MOD("imp-ocv2", 531, R8A77980_CLK_S1D1),
DEF_MOD("fcpvd0", 603, R8A77980_CLK_S3D1),
+ DEF_MOD("vin15", 604, R8A77980_CLK_S2D1),
+ DEF_MOD("vin14", 605, R8A77980_CLK_S2D1),
+ DEF_MOD("vin13", 608, R8A77980_CLK_S2D1),
+ DEF_MOD("vin12", 612, R8A77980_CLK_S2D1),
+ DEF_MOD("vin11", 618, R8A77980_CLK_S2D1),
DEF_MOD("vspd0", 623, R8A77980_CLK_S3D1),
+ DEF_MOD("vin10", 625, R8A77980_CLK_S2D1),
+ DEF_MOD("vin9", 627, R8A77980_CLK_S2D1),
+ DEF_MOD("vin8", 628, R8A77980_CLK_S2D1),
DEF_MOD("csi41", 715, R8A77980_CLK_CSI0),
DEF_MOD("csi40", 716, R8A77980_CLK_CSI0),
DEF_MOD("du0", 724, R8A77980_CLK_S2D1),
DEF_MOD("lvds", 727, R8A77980_CLK_S2D1),
+ DEF_MOD("vin7", 804, R8A77980_CLK_S2D1),
+ DEF_MOD("vin6", 805, R8A77980_CLK_S2D1),
+ DEF_MOD("vin5", 806, R8A77980_CLK_S2D1),
+ DEF_MOD("vin4", 807, R8A77980_CLK_S2D1),
+ DEF_MOD("vin3", 808, R8A77980_CLK_S2D1),
+ DEF_MOD("vin2", 809, R8A77980_CLK_S2D1),
+ DEF_MOD("vin1", 810, R8A77980_CLK_S2D1),
+ DEF_MOD("vin0", 811, R8A77980_CLK_S2D1),
DEF_MOD("etheravb", 812, R8A77980_CLK_S3D2),
DEF_MOD("gether", 813, R8A77980_CLK_S3D2),
DEF_MOD("imp3", 824, R8A77980_CLK_S1D1),
@@ -173,6 +190,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
DEF_MOD("rpc-if", 917, R8A77980_CLK_RPCD2),
+ DEF_MOD("i2c5", 919, R8A77980_CLK_S0D6),
DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 24ba9093a72f..3a73f6f911dd 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -167,7 +167,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("du0", 724, R8A77995_CLK_S1D1),
DEF_MOD("lvds", 727, R8A77995_CLK_S2D1),
DEF_MOD("mlp", 802, R8A77995_CLK_S2D1),
- DEF_MOD("vin4", 807, R8A77995_CLK_S1D2),
+ DEF_MOD("vin4", 807, R8A77995_CLK_S3D1),
DEF_MOD("etheravb", 812, R8A77995_CLK_S3D2),
DEF_MOD("imr0", 823, R8A77995_CLK_S1D2),
DEF_MOD("gpio6", 906, R8A77995_CLK_S3D4),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index e02542ca24a0..fcc8279647a6 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -176,8 +176,8 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
DEF_MOD("sdhi0", 706, R8A779A0_CLK_SD0),
- DEF_MOD("sydm1", 709, R8A779A0_CLK_S1D2),
- DEF_MOD("sydm2", 710, R8A779A0_CLK_S1D2),
+ DEF_MOD("sys-dmac1", 709, R8A779A0_CLK_S1D2),
+ DEF_MOD("sys-dmac2", 710, R8A779A0_CLK_S1D2),
DEF_MOD("tmu0", 713, R8A779A0_CLK_CL16MCK),
DEF_MOD("tmu1", 714, R8A779A0_CLK_S1D4),
DEF_MOD("tmu2", 715, R8A779A0_CLK_S1D4),
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index c6337a408e5e..7cc580d67362 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -66,13 +66,13 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2_VAR, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -145,6 +145,10 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1),
+ DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
+ DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_FIXED("dsiref", R8A779G0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
+ DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, 0x870),
DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, 0x870),
@@ -161,6 +165,14 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC),
+ DEF_MOD("canfd0", 328, R8A779G0_CLK_SASYNCPERD2),
+ DEF_MOD("csi40", 331, R8A779G0_CLK_CSI),
+ DEF_MOD("csi41", 400, R8A779G0_CLK_CSI),
+ DEF_MOD("dis0", 411, R8A779G0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink0", 415, R8A779G0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink1", 416, R8A779G0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd0", 508, R8A779G0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd1", 509, R8A779G0_CLK_VIOBUSD2),
DEF_MOD("hscif0", 514, R8A779G0_CLK_SASYNCPERD1),
DEF_MOD("hscif1", 515, R8A779G0_CLK_SASYNCPERD1),
DEF_MOD("hscif2", 516, R8A779G0_CLK_SASYNCPERD1),
@@ -172,6 +184,8 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER),
DEF_MOD("irqc", 611, R8A779G0_CLK_CL16M),
+ DEF_MOD("ispcs0", 612, R8A779G0_CLK_S0D2_VIO),
+ DEF_MOD("ispcs1", 613, R8A779G0_CLK_S0D2_VIO),
DEF_MOD("msi0", 618, R8A779G0_CLK_MSO),
DEF_MOD("msi1", 619, R8A779G0_CLK_MSO),
DEF_MOD("msi2", 620, R8A779G0_CLK_MSO),
@@ -185,14 +199,32 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("scif3", 704, R8A779G0_CLK_SASYNCPERD4),
DEF_MOD("scif4", 705, R8A779G0_CLK_SASYNCPERD4),
DEF_MOD("sdhi", 706, R8A779G0_CLK_SD0),
- DEF_MOD("sydm0", 709, R8A779G0_CLK_S0D6_PER),
- DEF_MOD("sydm1", 710, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("sys-dmac0", 709, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("sys-dmac1", 710, R8A779G0_CLK_S0D6_PER),
DEF_MOD("tmu0", 713, R8A779G0_CLK_SASYNCRT),
DEF_MOD("tmu1", 714, R8A779G0_CLK_SASYNCPERD2),
DEF_MOD("tmu2", 715, R8A779G0_CLK_SASYNCPERD2),
DEF_MOD("tmu3", 716, R8A779G0_CLK_SASYNCPERD2),
DEF_MOD("tmu4", 717, R8A779G0_CLK_SASYNCPERD2),
DEF_MOD("tpu0", 718, R8A779G0_CLK_SASYNCPERD4),
+ DEF_MOD("vin00", 730, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin01", 731, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin02", 800, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin03", 801, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin04", 802, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin05", 803, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin06", 804, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin07", 805, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin10", 806, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin11", 807, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin12", 808, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin13", 809, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin14", 810, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin15", 811, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin16", 812, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vin17", 813, R8A779G0_CLK_S0D4_VIO),
+ DEF_MOD("vspd0", 830, R8A779G0_CLK_VIOBUSD2),
+ DEF_MOD("vspd1", 831, R8A779G0_CLK_VIOBUSD2),
DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R),
DEF_MOD("cmt0", 910, R8A779G0_CLK_R),
DEF_MOD("cmt1", 911, R8A779G0_CLK_R),
@@ -202,6 +234,9 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
+ DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
};
/*
@@ -211,20 +246,20 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
* MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC
* 14 13 (MHz)
* ------------------------------------------------------------------------
- * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /15
+ * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /16
* 0 1 20 / 1 x160 x170 x160 x120 x160 x140 /19
* 1 0 Prohibited setting
- * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /38
+ * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /32
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
/* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 15, },
+ { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 16, },
{ 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 38, },
+ { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 32, },
};
static int __init r8a779g0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 983faa5707b9..40828616f723 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -25,71 +25,193 @@
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+#define R9A06G032_SYSCTRL_USB 0x00
+#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+/**
+ * struct regbit - describe one bit in a register
+ * @reg: offset of register relative to base address,
+ * expressed in units of 32-bit words (not bytes),
+ * @bit: which bit (0 to 31) in the register
+ *
+ * This structure is used to compactly encode the location
+ * of a single bit in a register. Five bits are needed to
+ * encode the bit number. With uint16_t data type, this
+ * leaves 11 bits to encode a register offset up to 2047.
+ *
+ * Since registers are aligned on 32-bit boundaries, the
+ * offset will be specified in 32-bit words rather than bytes.
+ * This allows encoding an offset up to 0x1FFC (8188) bytes.
+ *
+ * Helper macro RB() takes care of converting the register
+ * offset from bytes to 32-bit words.
+ */
+struct regbit {
+ u16 bit:5;
+ u16 reg:11;
+};
+
+#define RB(_reg, _bit) ((struct regbit) { \
+ .reg = (_reg) / 4, \
+ .bit = (_bit) \
+})
+
+/**
+ * struct r9a06g032_gate - clock-related control bits
+ * @gate: clock enable/disable
+ * @reset: clock module reset (active low)
+ * @ready: enables NoC forwarding of read/write requests to device,
+ * (eg. device is ready to handle read/write requests)
+ * @midle: request to idle the NoC interconnect
+ *
+ * Each of these fields describes a single bit in a register,
+ * which controls some aspect of clock gating. The @gate field
+ * is mandatory, this one enables/disables the clock. The
+ * other fields are optional, with zero indicating "not used".
+ *
+ * In most cases there is a @reset bit which needs to be
+ * de-asserted to bring the module out of reset.
+ *
+ * Modules may also need to signal when they are @ready to
+ * handle requests (read/writes) from the NoC interconnect.
+ *
+ * Similarly, the @midle bit is used to idle the master.
+ */
struct r9a06g032_gate {
- u16 gate, reset, ready, midle,
- scon, mirack, mistat;
+ struct regbit gate, reset, ready, midle;
+ /* Unused fields omitted to save space */
+ /* struct regbit scon, mirack, mistat */;
};
-/* This is used to describe a clock for instantiation */
+enum gate_type {
+ K_GATE = 0, /* gate which enable/disable */
+ K_FFC, /* fixed factor clock */
+ K_DIV, /* divisor */
+ K_BITSEL, /* special for UARTs */
+ K_DUALGATE /* special for UARTs */
+};
+
+/**
+ * struct r9a06g032_clkdesc - describe a single clock
+ * @name: string describing this clock
+ * @managed: boolean indicating if this clock should be
+ * started/stopped as part of power management
+ * @type: see enum @gate_type
+ * @index: the ID of this clock element
+ * @source: the ID+1 of the parent clock element.
+ * Root clock uses ID of ~0 (PARENT_ID);
+ * @gate: clock enable/disable
+ * @div_min: smallest permitted clock divider
+ * @div_max: largest permitted clock divider
+ * @reg: clock divider register offset, in 32-bit words
+ * @div_table: optional list of fixed clock divider values;
+ * must be in ascending order, zero for unused
+ * @div: divisor for fixed-factor clock
+ * @mul: multiplier for fixed-factor clock
+ * @group: UART group, 0=UART0/1/2, 1=UART3/4/5/6/7
+ * @sel: select either g1/r1 or g2/r2 as clock source
+ * @g1: 1st source gate (clock enable/disable)
+ * @r1: 1st source reset (module reset)
+ * @g2: 2nd source gate (clock enable/disable)
+ * @r2: 2nd source reset (module reset)
+ *
+ * Describes a single element in the clock tree hierarchy.
+ * As there are quite a large number of clock elements, this
+ * structure is packed tightly to conserve space.
+ */
struct r9a06g032_clkdesc {
const char *name;
- uint32_t managed: 1;
- uint32_t type: 3;
- uint32_t index: 8;
- uint32_t source : 8; /* source index + 1 (0 == none) */
- /* these are used to populate the bitsel struct */
+ uint32_t managed:1;
+ enum gate_type type:3;
+ uint32_t index:8;
+ uint32_t source:8; /* source index + 1 (0 == none) */
union {
+ /* type = K_GATE */
struct r9a06g032_gate gate;
- /* for dividers */
+ /* type = K_DIV */
struct {
- unsigned int div_min : 10, div_max : 10, reg: 10;
+ unsigned int div_min:10, div_max:10, reg:10;
u16 div_table[4];
};
- /* For fixed-factor ones */
+ /* type = K_FFC */
struct {
u16 div, mul;
};
- /* for dual gate */
+ /* type = K_DUALGATE */
struct {
- uint16_t group : 1;
- u16 sel, g1, r1, g2, r2;
+ uint16_t group:1;
+ struct regbit sel, g1, r1, g2, r2;
} dual;
};
};
-#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
- { .gate = _clk, .reset = _rst, \
- .ready = _rdy, .midle = _midle, \
- .scon = _scon, .mirack = _mirack, .mistat = _mistat }
-#define D_GATE(_idx, _n, _src, ...) \
- { .type = K_GATE, .index = R9A06G032_##_idx, \
- .source = 1 + R9A06G032_##_src, .name = _n, \
- .gate = I_GATE(__VA_ARGS__) }
-#define D_MODULE(_idx, _n, _src, ...) \
- { .type = K_GATE, .index = R9A06G032_##_idx, \
- .source = 1 + R9A06G032_##_src, .name = _n, \
- .managed = 1, .gate = I_GATE(__VA_ARGS__) }
-#define D_ROOT(_idx, _n, _mul, _div) \
- { .type = K_FFC, .index = R9A06G032_##_idx, .name = _n, \
- .div = _div, .mul = _mul }
-#define D_FFC(_idx, _n, _src, _div) \
- { .type = K_FFC, .index = R9A06G032_##_idx, \
- .source = 1 + R9A06G032_##_src, .name = _n, \
- .div = _div, .mul = 1}
-#define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) \
- { .type = K_DIV, .index = R9A06G032_##_idx, \
- .source = 1 + R9A06G032_##_src, .name = _n, \
- .reg = _reg, .div_min = _min, .div_max = _max, \
- .div_table = { __VA_ARGS__ } }
-#define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) \
- { .type = K_DUALGATE, .index = R9A06G032_##_idx, \
- .source = 1 + R9A06G032_##_src, .name = _n, \
- .dual = { .group = _g, \
- .g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
-
-enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
+/*
+ * The last three arguments are not currently used,
+ * but are kept in the r9a06g032_clocks table below.
+ */
+#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) { \
+ .gate = _clk, \
+ .reset = _rst, \
+ .ready = _rdy, \
+ .midle = _midle, \
+ /* .scon = _scon, */ \
+ /* .mirack = _mirack, */ \
+ /* .mistat = _mistat */ \
+}
+#define D_GATE(_idx, _n, _src, ...) { \
+ .type = K_GATE, \
+ .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, \
+ .name = _n, \
+ .gate = I_GATE(__VA_ARGS__) \
+}
+#define D_MODULE(_idx, _n, _src, ...) { \
+ .type = K_GATE, \
+ .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, \
+ .name = _n, \
+ .managed = 1, \
+ .gate = I_GATE(__VA_ARGS__) \
+}
+#define D_ROOT(_idx, _n, _mul, _div) { \
+ .type = K_FFC, \
+ .index = R9A06G032_##_idx, \
+ .name = _n, \
+ .div = _div, \
+ .mul = _mul \
+}
+#define D_FFC(_idx, _n, _src, _div) { \
+ .type = K_FFC, \
+ .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, \
+ .name = _n, \
+ .div = _div, \
+ .mul = 1 \
+}
+#define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) { \
+ .type = K_DIV, \
+ .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, \
+ .name = _n, \
+ .reg = _reg, \
+ .div_min = _min, \
+ .div_max = _max, \
+ .div_table = { __VA_ARGS__ } \
+}
+#define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) { \
+ .type = K_DUALGATE, \
+ .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, \
+ .name = _n, \
+ .dual = { \
+ .group = _g, \
+ .g1 = _g1, \
+ .r1 = _r1, \
+ .g2 = _g2, \
+ .r2 = _r2 \
+ }, \
+}
/* Internal clock IDs */
#define R9A06G032_CLKOUT 0
@@ -158,58 +280,160 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
D_DIV(DIV_SDIO1, "div_sdio1", CLKOUT, 75, 20, 128),
D_DIV(DIV_SWITCH, "div_switch", CLKOUT, 37, 5, 40),
D_DIV(DIV_UART, "div_uart", CLKOUT, 79, 12, 128),
- D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, 0x749, 0x74a, 0x74b, 0, 0xae3, 0, 0),
- D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, 0x74c, 0x74d, 0x74e, 0, 0xae4, 0, 0),
- D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, 0x74f, 0x750, 0x751, 0, 0xae5, 0, 0),
- D_GATE(CLK_25_PG7, "clk_25_pg7", CLKOUT_D40, 0x752, 0x753, 0x754, 0, 0xae6, 0, 0),
- D_GATE(CLK_25_PG8, "clk_25_pg8", CLKOUT_D40, 0x755, 0x756, 0x757, 0, 0xae7, 0, 0),
- D_GATE(CLK_ADC, "clk_adc", DIV_ADC, 0x1ea, 0x1eb, 0, 0, 0, 0, 0),
- D_GATE(CLK_ECAT100, "clk_ecat100", CLKOUT_D10, 0x405, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_HSR100, "clk_hsr100", CLKOUT_D10, 0x483, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_I2C0, "clk_i2c0", DIV_I2C, 0x1e6, 0x1e7, 0, 0, 0, 0, 0),
- D_GATE(CLK_I2C1, "clk_i2c1", DIV_I2C, 0x1e8, 0x1e9, 0, 0, 0, 0, 0),
- D_GATE(CLK_MII_REF, "clk_mii_ref", CLKOUT_D40, 0x342, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_NAND, "clk_nand", DIV_NAND, 0x284, 0x285, 0, 0, 0, 0, 0),
- D_GATE(CLK_NOUSBP2_PG6, "clk_nousbp2_pg6", DIV_P2_PG, 0x774, 0x775, 0, 0, 0, 0, 0),
- D_GATE(CLK_P1_PG2, "clk_p1_pg2", DIV_P1_PG, 0x862, 0x863, 0, 0, 0, 0, 0),
- D_GATE(CLK_P1_PG3, "clk_p1_pg3", DIV_P1_PG, 0x864, 0x865, 0, 0, 0, 0, 0),
- D_GATE(CLK_P1_PG4, "clk_p1_pg4", DIV_P1_PG, 0x866, 0x867, 0, 0, 0, 0, 0),
- D_GATE(CLK_P4_PG3, "clk_p4_pg3", DIV_P4_PG, 0x824, 0x825, 0, 0, 0, 0, 0),
- D_GATE(CLK_P4_PG4, "clk_p4_pg4", DIV_P4_PG, 0x826, 0x827, 0, 0, 0, 0, 0),
- D_GATE(CLK_P6_PG1, "clk_p6_pg1", DIV_P6_PG, 0x8a0, 0x8a1, 0x8a2, 0, 0xb60, 0, 0),
- D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
- D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
- D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
- D_MODULE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
- D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
- D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_RMII_REF, "clk_rmii_ref", CLKOUT_D20, 0x341, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_SDIO0, "clk_sdio0", DIV_SDIO0, 0x64, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_SDIO1, "clk_sdio1", DIV_SDIO1, 0x644, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_SERCOS100, "clk_sercos100", CLKOUT_D10, 0x425, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_SLCD, "clk_slcd", DIV_P1_PG, 0x860, 0x861, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI0, "clk_spi0", DIV_P3_PG, 0x7e0, 0x7e1, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI1, "clk_spi1", DIV_P3_PG, 0x7e2, 0x7e3, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI2, "clk_spi2", DIV_P3_PG, 0x7e4, 0x7e5, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI3, "clk_spi3", DIV_P3_PG, 0x7e6, 0x7e7, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI4, "clk_spi4", DIV_P4_PG, 0x820, 0x821, 0, 0, 0, 0, 0),
- D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, 0x822, 0x823, 0, 0, 0, 0, 0),
- D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, 0x982, 0x983, 0, 0, 0, 0, 0),
+ D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, RB(0xe8, 9),
+ RB(0xe8, 10), RB(0xe8, 11), RB(0x00, 0),
+ RB(0x15c, 3), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, RB(0xe8, 12),
+ RB(0xe8, 13), RB(0xe8, 14), RB(0x00, 0),
+ RB(0x15c, 4), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, RB(0xe8, 15),
+ RB(0xe8, 16), RB(0xe8, 17), RB(0x00, 0),
+ RB(0x15c, 5), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_25_PG7, "clk_25_pg7", CLKOUT_D40, RB(0xe8, 18),
+ RB(0xe8, 19), RB(0xe8, 20), RB(0x00, 0),
+ RB(0x15c, 6), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_25_PG8, "clk_25_pg8", CLKOUT_D40, RB(0xe8, 21),
+ RB(0xe8, 22), RB(0xe8, 23), RB(0x00, 0),
+ RB(0x15c, 7), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_ADC, "clk_adc", DIV_ADC, RB(0x3c, 10),
+ RB(0x3c, 11), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_ECAT100, "clk_ecat100", CLKOUT_D10, RB(0x80, 5),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_HSR100, "clk_hsr100", CLKOUT_D10, RB(0x90, 3),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_I2C0, "clk_i2c0", DIV_I2C, RB(0x3c, 6),
+ RB(0x3c, 7), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_I2C1, "clk_i2c1", DIV_I2C, RB(0x3c, 8),
+ RB(0x3c, 9), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_MII_REF, "clk_mii_ref", CLKOUT_D40, RB(0x68, 2),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_NAND, "clk_nand", DIV_NAND, RB(0x50, 4),
+ RB(0x50, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_NOUSBP2_PG6, "clk_nousbp2_pg6", DIV_P2_PG, RB(0xec, 20),
+ RB(0xec, 21), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P1_PG2, "clk_p1_pg2", DIV_P1_PG, RB(0x10c, 2),
+ RB(0x10c, 3), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P1_PG3, "clk_p1_pg3", DIV_P1_PG, RB(0x10c, 4),
+ RB(0x10c, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P1_PG4, "clk_p1_pg4", DIV_P1_PG, RB(0x10c, 6),
+ RB(0x10c, 7), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P4_PG3, "clk_p4_pg3", DIV_P4_PG, RB(0x104, 4),
+ RB(0x104, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P4_PG4, "clk_p4_pg4", DIV_P4_PG, RB(0x104, 6),
+ RB(0x104, 7), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P6_PG1, "clk_p6_pg1", DIV_P6_PG, RB(0x114, 0),
+ RB(0x114, 1), RB(0x114, 2), RB(0x00, 0),
+ RB(0x16c, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, RB(0x114, 3),
+ RB(0x114, 4), RB(0x114, 5), RB(0x00, 0),
+ RB(0x16c, 1), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, RB(0x114, 6),
+ RB(0x114, 7), RB(0x114, 8), RB(0x00, 0),
+ RB(0x16c, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, RB(0x114, 9),
+ RB(0x114, 10), RB(0x114, 11), RB(0x00, 0),
+ RB(0x16c, 3), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, RB(0x1c, 6),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, RB(0x54, 4),
+ RB(0x54, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, RB(0x90, 4),
+ RB(0x90, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, RB(0x68, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_RMII_REF, "clk_rmii_ref", CLKOUT_D20, RB(0x68, 1),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SDIO0, "clk_sdio0", DIV_SDIO0, RB(0x0c, 4),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SDIO1, "clk_sdio1", DIV_SDIO1, RB(0xc8, 4),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SERCOS100, "clk_sercos100", CLKOUT_D10, RB(0x84, 5),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SLCD, "clk_slcd", DIV_P1_PG, RB(0x10c, 0),
+ RB(0x10c, 1), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI0, "clk_spi0", DIV_P3_PG, RB(0xfc, 0),
+ RB(0xfc, 1), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI1, "clk_spi1", DIV_P3_PG, RB(0xfc, 2),
+ RB(0xfc, 3), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI2, "clk_spi2", DIV_P3_PG, RB(0xfc, 4),
+ RB(0xfc, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI3, "clk_spi3", DIV_P3_PG, RB(0xfc, 6),
+ RB(0xfc, 7), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI4, "clk_spi4", DIV_P4_PG, RB(0x104, 0),
+ RB(0x104, 1), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, RB(0x104, 2),
+ RB(0x104, 3), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, RB(0x130, 2),
+ RB(0x130, 3), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
D_DIV(DIV_MOTOR, "div_motor", CLKOUT_D5, 84, 2, 8),
- D_MODULE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441),
- D_MODULE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0),
- D_MODULE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461),
- D_MODULE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0),
- D_MODULE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0),
- D_MODULE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0),
- D_MODULE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0),
- D_MODULE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0),
- D_MODULE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103),
- D_MODULE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101),
- D_MODULE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0),
- D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, 0x78c, 0x78d, 0, 0x78e, 0, 0xb04, 0xb05),
- D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, 0x789, 0x78a, 0x78b, 0, 0xb03, 0, 0),
+ D_MODULE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, RB(0x80, 0),
+ RB(0x80, 1), RB(0x00, 0), RB(0x80, 2),
+ RB(0x00, 0), RB(0x88, 0), RB(0x88, 1)),
+ D_MODULE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, RB(0xe8, 0),
+ RB(0xe8, 1), RB(0xe8, 2), RB(0x00, 0),
+ RB(0x15c, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, RB(0x84, 0),
+ RB(0x84, 2), RB(0x00, 0), RB(0x84, 1),
+ RB(0x00, 0), RB(0x8c, 0), RB(0x8c, 1)),
+ D_MODULE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, RB(0x118, 3),
+ RB(0x118, 4), RB(0x118, 5), RB(0x00, 0),
+ RB(0x168, 1), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, RB(0x118, 6),
+ RB(0x118, 7), RB(0x118, 8), RB(0x00, 0),
+ RB(0x168, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, RB(0x118, 9),
+ RB(0x118, 10), RB(0x118, 11), RB(0x00, 0),
+ RB(0x168, 3), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, RB(0xe8, 3),
+ RB(0xe8, 4), RB(0xe8, 5), RB(0x00, 0),
+ RB(0x15c, 1), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, RB(0xe8, 6),
+ RB(0xe8, 7), RB(0xe8, 8), RB(0x00, 0),
+ RB(0x15c, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, RB(0x1c, 3),
+ RB(0x00, 0), RB(0x00, 0), RB(0x1c, 4),
+ RB(0x00, 0), RB(0x20, 2), RB(0x20, 3)),
+ D_MODULE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, RB(0x1c, 0),
+ RB(0x1c, 1), RB(0x00, 0), RB(0x1c, 2),
+ RB(0x00, 0), RB(0x20, 0), RB(0x20, 1)),
+ D_MODULE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, RB(0x1c, 5),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, RB(0xf0, 12),
+ RB(0xf0, 13), RB(0x00, 0), RB(0xf0, 14),
+ RB(0x00, 0), RB(0x160, 4), RB(0x160, 5)),
+ D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, RB(0xf0, 9),
+ RB(0xf0, 10), RB(0xf0, 11), RB(0x00, 0),
+ RB(0x160, 3), RB(0x00, 0), RB(0x00, 0)),
D_FFC(CLK_DDRPHY_PLLCLK_D4, "clk_ddrphy_pllclk_d4", CLK_DDRPHY_PLLCLK, 4),
D_FFC(CLK_ECAT100_D4, "clk_ecat100_d4", CLK_ECAT100, 4),
D_FFC(CLK_HSR100_D2, "clk_hsr100_d2", CLK_HSR100, 2),
@@ -217,67 +441,187 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
D_FFC(CLK_REF_SYNC_D8, "clk_ref_sync_d8", CLK_REF_SYNC, 8),
D_FFC(CLK_SERCOS100_D2, "clk_sercos100_d2", CLK_SERCOS100, 2),
D_DIV(DIV_CA7, "div_ca7", CLK_REF_SYNC, 57, 1, 4, 1, 2, 4),
- D_MODULE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0),
- D_MODULE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0),
- D_MODULE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0),
- D_MODULE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0),
- D_MODULE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0),
- D_MODULE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0),
- D_MODULE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0),
+ D_MODULE(HCLK_CAN0, "hclk_can0", CLK_48, RB(0xf0, 3),
+ RB(0xf0, 4), RB(0xf0, 5), RB(0x00, 0),
+ RB(0x160, 1), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_CAN1, "hclk_can1", CLK_48, RB(0xf0, 6),
+ RB(0xf0, 7), RB(0xf0, 8), RB(0x00, 0),
+ RB(0x160, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, RB(0x3c, 15),
+ RB(0x3c, 16), RB(0x3c, 17), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, RB(0x3c, 12),
+ RB(0x3c, 13), RB(0x3c, 14), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_RSV, "hclk_rsv", CLK_48, RB(0xf0, 0),
+ RB(0xf0, 1), RB(0xf0, 2), RB(0x00, 0),
+ RB(0x160, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, RB(0x3c, 0),
+ RB(0x3c, 1), RB(0x3c, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, RB(0x3c, 3),
+ RB(0x3c, 4), RB(0x3c, 5), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
D_DIV(RTOS_MDC, "rtos_mdc", CLK_REF_SYNC, 100, 80, 640, 80, 160, 320, 640),
- D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, 0xba0, 0xba1, 0, 0xba2, 0, 0xbc0, 0xbc1),
- D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, 0x323, 0x324, 0, 0, 0, 0, 0),
- D_GATE(CLK_ECAT25, "clk_ecat25", CLK_ECAT100_D4, 0x403, 0x404, 0, 0, 0, 0, 0),
- D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, 0x484, 0x485, 0, 0, 0, 0, 0),
- D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, 0xc60, 0xc61, 0, 0, 0, 0, 0),
- D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, 0x424, 0x423, 0, 0, 0, 0, 0),
- D_MODULE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0),
- D_MODULE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0),
- D_MODULE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0),
- D_MODULE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141),
- D_MODULE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1),
- D_MODULE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2),
- D_MODULE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5),
- D_MODULE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2),
- D_MODULE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2),
- D_MODULE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0),
- D_MODULE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0),
- D_MODULE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0),
- D_MODULE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1),
- D_MODULE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0),
- D_MODULE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0),
- D_MODULE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0),
- D_MODULE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0),
- D_MODULE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182),
- D_MODULE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2),
- D_MODULE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25),
- D_MODULE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0),
- D_MODULE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0),
- D_MODULE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0),
- D_MODULE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0),
- D_MODULE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302),
- D_MODULE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2),
- D_MODULE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0),
- D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0xa03, 0, 0xa02, 0, 0, 0),
- D_MODULE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82),
- D_MODULE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662),
- D_MODULE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0),
- D_MODULE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0),
- D_MODULE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0),
- D_MODULE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0),
- D_MODULE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0),
- D_MODULE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0),
- D_MODULE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0),
- D_MODULE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0),
- D_MODULE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0),
- D_MODULE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0),
- D_MODULE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0),
- D_MODULE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0),
- D_MODULE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0),
- D_MODULE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0),
- D_MODULE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0),
- D_MODULE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0),
- D_MODULE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0),
+ D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, RB(0x174, 0),
+ RB(0x174, 1), RB(0x00, 0), RB(0x174, 2),
+ RB(0x00, 0), RB(0x178, 0), RB(0x178, 1)),
+ D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, RB(0x64, 3),
+ RB(0x64, 4), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_ECAT25, "clk_ecat25", CLK_ECAT100_D4, RB(0x80, 3),
+ RB(0x80, 4), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, RB(0x90, 4),
+ RB(0x90, 5), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, RB(0x18c, 0),
+ RB(0x18c, 1), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, RB(0x84, 4),
+ RB(0x84, 3), RB(0x00, 0), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, RB(0x34, 15),
+ RB(0x34, 16), RB(0x34, 17), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, RB(0x184, 0),
+ RB(0x184, 1), RB(0x184, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, RB(0x24, 3),
+ RB(0x24, 4), RB(0x24, 5), RB(0x00, 0),
+ RB(0x28, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, RB(0x24, 0),
+ RB(0x24, 1), RB(0x00, 0), RB(0x24, 2),
+ RB(0x00, 0), RB(0x28, 0), RB(0x28, 1)),
+ D_MODULE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, RB(0x64, 0),
+ RB(0x64, 2), RB(0x00, 0), RB(0x64, 1),
+ RB(0x00, 0), RB(0x74, 0), RB(0x74, 1)),
+ D_MODULE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, RB(0x4c, 0),
+ RB(0x4c, 1), RB(0x4c, 2), RB(0x4c, 3),
+ RB(0x58, 0), RB(0x58, 1), RB(0x58, 2)),
+ D_MODULE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, RB(0x4c, 4),
+ RB(0x4c, 5), RB(0x4c, 6), RB(0x4c, 7),
+ RB(0x58, 3), RB(0x58, 4), RB(0x58, 5)),
+ D_MODULE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, RB(0x6c, 0),
+ RB(0x6c, 1), RB(0x6c, 2), RB(0x6c, 3),
+ RB(0x78, 0), RB(0x78, 1), RB(0x78, 2)),
+ D_MODULE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, RB(0x70, 0),
+ RB(0x70, 1), RB(0x70, 2), RB(0x70, 3),
+ RB(0x7c, 0), RB(0x7c, 1), RB(0x7c, 2)),
+ D_MODULE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, RB(0x40, 18),
+ RB(0x40, 19), RB(0x40, 20), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, RB(0x40, 21),
+ RB(0x40, 22), RB(0x40, 23), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, RB(0x44, 9),
+ RB(0x44, 10), RB(0x44, 11), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, RB(0x90, 0),
+ RB(0x90, 2), RB(0x00, 0), RB(0x90, 1),
+ RB(0x00, 0), RB(0x98, 0), RB(0x98, 1)),
+ D_MODULE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, RB(0x34, 9),
+ RB(0x34, 10), RB(0x34, 11), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, RB(0x34, 12),
+ RB(0x34, 13), RB(0x34, 14), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, RB(0xf4, 0),
+ RB(0xf4, 1), RB(0xf4, 2), RB(0x00, 0),
+ RB(0x164, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, RB(0x2c, 4),
+ RB(0x2c, 5), RB(0x2c, 6), RB(0x00, 0),
+ RB(0x30, 3), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, RB(0x2c, 0),
+ RB(0x2c, 1), RB(0x2c, 2), RB(0x2c, 3),
+ RB(0x30, 0), RB(0x30, 1), RB(0x30, 2)),
+ D_MODULE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, RB(0x50, 0),
+ RB(0x50, 1), RB(0x50, 2), RB(0x50, 3),
+ RB(0x5c, 0), RB(0x5c, 1), RB(0x5c, 2)),
+ D_MODULE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, RB(0xf4, 12),
+ RB(0xf4, 13), RB(0x00, 0), RB(0xf4, 14),
+ RB(0x00, 0), RB(0x164, 4), RB(0x164, 5)),
+ D_MODULE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, RB(0x44, 12),
+ RB(0x44, 13), RB(0x44, 14), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, RB(0x44, 15),
+ RB(0x44, 16), RB(0x44, 17), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, RB(0xf4, 6),
+ RB(0xf4, 7), RB(0xf4, 8), RB(0x00, 0),
+ RB(0x164, 2), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, RB(0xf4, 9),
+ RB(0xf4, 10), RB(0xf4, 11), RB(0x00, 0),
+ RB(0x164, 3), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, RB(0x54, 0),
+ RB(0x54, 1), RB(0x54, 2), RB(0x54, 3),
+ RB(0x60, 0), RB(0x60, 1), RB(0x60, 2)),
+ D_MODULE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, RB(0x90, 0),
+ RB(0x90, 1), RB(0x90, 2), RB(0x90, 3),
+ RB(0x98, 0), RB(0x98, 1), RB(0x98, 2)),
+ D_MODULE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, RB(0x154, 0),
+ RB(0x154, 1), RB(0x154, 2), RB(0x00, 0),
+ RB(0x170, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, RB(0x140, 0),
+ RB(0x140, 3), RB(0x00, 0), RB(0x140, 2),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, RB(0x0c, 0),
+ RB(0x0c, 1), RB(0x0c, 2), RB(0x0c, 3),
+ RB(0x10, 0), RB(0x10, 1), RB(0x10, 2)),
+ D_MODULE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, RB(0xc8, 0),
+ RB(0xc8, 1), RB(0xc8, 2), RB(0xc8, 3),
+ RB(0xcc, 0), RB(0xcc, 1), RB(0xcc, 2)),
+ D_MODULE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, RB(0xf4, 3),
+ RB(0xf4, 4), RB(0xf4, 5), RB(0x00, 0),
+ RB(0x164, 1), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, RB(0x40, 0),
+ RB(0x40, 1), RB(0x40, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, RB(0x40, 3),
+ RB(0x40, 4), RB(0x40, 5), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, RB(0x40, 6),
+ RB(0x40, 7), RB(0x40, 8), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, RB(0x40, 9),
+ RB(0x40, 10), RB(0x40, 11), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, RB(0x40, 12),
+ RB(0x40, 13), RB(0x40, 14), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, RB(0x40, 15),
+ RB(0x40, 16), RB(0x40, 17), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, RB(0x130, 0),
+ RB(0x00, 0), RB(0x130, 1), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, RB(0x188, 0),
+ RB(0x188, 1), RB(0x188, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, RB(0x34, 0),
+ RB(0x34, 1), RB(0x34, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, RB(0x34, 3),
+ RB(0x34, 4), RB(0x34, 5), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, RB(0x34, 6),
+ RB(0x34, 7), RB(0x34, 8), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, RB(0x40, 24),
+ RB(0x40, 25), RB(0x40, 26), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, RB(0x40, 27),
+ RB(0x40, 28), RB(0x40, 29), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, RB(0x44, 0),
+ RB(0x44, 1), RB(0x44, 2), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, RB(0x44, 3),
+ RB(0x44, 4), RB(0x44, 5), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
+ D_MODULE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, RB(0x44, 6),
+ RB(0x44, 7), RB(0x44, 8), RB(0x00, 0),
+ RB(0x00, 0), RB(0x00, 0), RB(0x00, 0)),
/*
* These are not hardware clocks, but are needed to handle the special
* case where we have a 'selector bit' that doesn't just change the
@@ -289,7 +633,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
/* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ .dual.sel = RB(0x34, 30),
.dual.group = 0,
},
{
@@ -298,17 +642,25 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
/* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ .dual.sel = RB(0xec, 24),
.dual.group = 1,
},
- D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
- D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
- D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
- D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0x760, 0x761, 0x762, 0x763),
- D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 0x764, 0x765, 0x766, 0x767),
- D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 0x768, 0x769, 0x76a, 0x76b),
- D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 0x76c, 0x76d, 0x76e, 0x76f),
- D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 0x770, 0x771, 0x772, 0x773),
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0,
+ RB(0x34, 18), RB(0x34, 19), RB(0x34, 20), RB(0x34, 21)),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0,
+ RB(0x34, 22), RB(0x34, 23), RB(0x34, 24), RB(0x34, 25)),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0,
+ RB(0x34, 26), RB(0x34, 27), RB(0x34, 28), RB(0x34, 29)),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1,
+ RB(0xec, 0), RB(0xec, 1), RB(0xec, 2), RB(0xec, 3)),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1,
+ RB(0xec, 4), RB(0xec, 5), RB(0xec, 6), RB(0xec, 7)),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1,
+ RB(0xec, 8), RB(0xec, 9), RB(0xec, 10), RB(0xec, 11)),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1,
+ RB(0xec, 12), RB(0xec, 13), RB(0xec, 14), RB(0xec, 15)),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1,
+ RB(0xec, 16), RB(0xec, 17), RB(0xec, 18), RB(0xec, 19)),
};
struct r9a06g032_priv {
@@ -341,26 +693,26 @@ int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val)
}
EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux);
-/* register/bit pairs are encoded as an uint16_t */
-static void
-clk_rdesc_set(struct r9a06g032_priv *clocks,
- u16 one, unsigned int on)
+static void clk_rdesc_set(struct r9a06g032_priv *clocks,
+ struct regbit rb, unsigned int on)
{
- u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
- u32 val = readl(reg);
+ u32 __iomem *reg = clocks->reg + (rb.reg * 4);
+ u32 val;
+
+ if (!rb.reg && !rb.bit)
+ return;
- val = (val & ~(1U << (one & 0x1f))) | ((!!on) << (one & 0x1f));
+ val = readl(reg);
+ val = (val & ~BIT(rb.bit)) | ((!!on) << rb.bit);
writel(val, reg);
}
-static int
-clk_rdesc_get(struct r9a06g032_priv *clocks,
- uint16_t one)
+static int clk_rdesc_get(struct r9a06g032_priv *clocks, struct regbit rb)
{
- u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 __iomem *reg = clocks->reg + (rb.reg * 4);
u32 val = readl(reg);
- return !!(val & (1U << (one & 0x1f)));
+ return !!(val & BIT(rb.bit));
}
/*
@@ -462,13 +814,12 @@ r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks,
{
unsigned long flags;
- WARN_ON(!g->gate);
+ WARN_ON(!g->gate.reg && !g->gate.bit);
spin_lock_irqsave(&clocks->lock, flags);
clk_rdesc_set(clocks, g->gate, on);
/* De-assert reset */
- if (g->reset)
- clk_rdesc_set(clocks, g->reset, 1);
+ clk_rdesc_set(clocks, g->reset, 1);
spin_unlock_irqrestore(&clocks->lock, flags);
/* Hardware manual recommends 5us delay after enabling clock & reset */
@@ -478,15 +829,12 @@ r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks,
* associated SLVRDY bit in the System Controller that needs to be set
* so that the FlexWAY bus fabric passes on the read/write requests.
*/
- if (g->ready || g->midle) {
- spin_lock_irqsave(&clocks->lock, flags);
- if (g->ready)
- clk_rdesc_set(clocks, g->ready, on);
- /* Clear 'Master Idle Request' bit */
- if (g->midle)
- clk_rdesc_set(clocks, g->midle, !on);
- spin_unlock_irqrestore(&clocks->lock, flags);
- }
+ spin_lock_irqsave(&clocks->lock, flags);
+ clk_rdesc_set(clocks, g->ready, on);
+ /* Clear 'Master Idle Request' bit */
+ clk_rdesc_set(clocks, g->midle, !on);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+
/* Note: We don't wait for FlexWAY Socket Connection signal */
}
@@ -510,7 +858,7 @@ static int r9a06g032_clk_gate_is_enabled(struct clk_hw *hw)
struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
/* if clock is in reset, the gate might be on, and still not 'be' on */
- if (g->gate.reset && !clk_rdesc_get(g->clocks, g->gate.reset))
+ if (g->gate.reset.reg && !clk_rdesc_get(g->clocks, g->gate.reset))
return 0;
return clk_rdesc_get(g->clocks, g->gate.gate);
@@ -749,7 +1097,7 @@ struct r9a06g032_clk_bitsel {
struct clk_hw hw;
struct r9a06g032_priv *clocks;
u16 index;
- u16 selector; /* selector register + bit */
+ struct regbit selector; /* selector register + bit */
};
#define to_clk_bitselect(_hw) \
@@ -818,7 +1166,7 @@ struct r9a06g032_clk_dualgate {
struct clk_hw hw;
struct r9a06g032_priv *clocks;
u16 index;
- u16 selector; /* selector register + bit */
+ struct regbit selector; /* selector register + bit */
struct r9a06g032_gate gate[2];
};
@@ -871,7 +1219,7 @@ static struct clk *
r9a06g032_register_dualgate(struct r9a06g032_priv *clocks,
const char *parent_name,
const struct r9a06g032_clkdesc *desc,
- uint16_t sel)
+ struct regbit sel)
{
struct r9a06g032_clk_dualgate *g;
struct clk *clk;
@@ -918,6 +1266,29 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
+{
+ struct device_node *usbf_np = NULL;
+ u32 usb;
+
+ while ((usbf_np = of_find_compatible_node(usbf_np, NULL,
+ "renesas,rzn1-usbf"))) {
+ if (of_device_is_available(usbf_np))
+ break;
+ }
+
+ usb = readl(clocks->reg + R9A06G032_SYSCTRL_USB);
+ if (usbf_np) {
+ /* 1 host and 1 device mode */
+ usb &= ~R9A06G032_SYSCTRL_USB_H2MODE;
+ of_node_put(usbf_np);
+ } else {
+ /* 2 hosts mode */
+ usb |= R9A06G032_SYSCTRL_USB_H2MODE;
+ }
+ writel(usb, clocks->reg + R9A06G032_SYSCTRL_USB);
+}
+
static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -926,7 +1297,7 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
struct clk **clks;
struct clk *mclk;
unsigned int i;
- u16 uart_group_sel[2];
+ struct regbit uart_group_sel[2];
int error;
clocks = devm_kzalloc(dev, sizeof(*clocks), GFP_KERNEL);
@@ -947,6 +1318,9 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
clocks->reg = of_iomap(np, 0);
if (WARN_ON(!clocks->reg))
return -ENOMEM;
+
+ r9a06g032_init_h2mode(clocks);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index f5550fccb029..c597414a94d8 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -182,7 +182,7 @@ static const struct {
};
static const struct {
- struct rzg2l_mod_clk common[75];
+ struct rzg2l_mod_clk common[79];
#ifdef CONFIG_CLK_R9A07G054
struct rzg2l_mod_clk drp[0];
#endif
@@ -250,6 +250,14 @@ static const struct {
0x558, 1),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
0x558, 2),
+ DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0),
+ DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
+ 0x564, 1),
+ DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
+ 0x564, 2),
+ DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
+ 0x564, 3),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
0x568, 0),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
@@ -368,6 +376,9 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GPU_RESETN, 0x858, 0),
DEF_RST(R9A07G044_GPU_AXI_RESETN, 0x858, 1),
DEF_RST(R9A07G044_GPU_ACE_RESETN, 0x858, 2),
+ DEF_RST(R9A07G044_CRU_CMN_RSTB, 0x864, 0),
+ DEF_RST(R9A07G044_CRU_PRESETN, 0x864, 1),
+ DEF_RST(R9A07G044_CRU_ARESETN, 0x864, 2),
DEF_RST(R9A07G044_MIPI_DSI_CMN_RSTB, 0x868, 0),
DEF_RST(R9A07G044_MIPI_DSI_ARESET_N, 0x868, 1),
DEF_RST(R9A07G044_MIPI_DSI_PRESET_N, 0x868, 2),
@@ -412,6 +423,11 @@ static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
};
+static const unsigned int r9a07g044_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A07G044_CRU_SYSCLK,
+ MOD_CLK_BASE + R9A07G044_CRU_VCLK,
+};
+
#ifdef CONFIG_CLK_R9A07G044
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Core Clocks */
@@ -429,6 +445,10 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
.num_mod_clks = ARRAY_SIZE(mod_clks.common),
.num_hw_mod_clks = R9A07G044_TSU_PCLK + 1,
+ /* No PM Module Clocks */
+ .no_pm_mod_clks = r9a07g044_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a07g044_no_pm_mod_clks),
+
/* Resets */
.resets = r9a07g044_resets,
.num_resets = R9A07G044_TSU_PRESETN + 1, /* Last reset ID + 1 */
@@ -454,6 +474,10 @@ const struct rzg2l_cpg_info r9a07g054_cpg_info = {
.num_mod_clks = ARRAY_SIZE(mod_clks.common) + ARRAY_SIZE(mod_clks.drp),
.num_hw_mod_clks = R9A07G054_STPAI_ACLK_DRP + 1,
+ /* No PM Module Clocks */
+ .no_pm_mod_clks = r9a07g044_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a07g044_no_pm_mod_clks),
+
/* Resets */
.resets = r9a07g044_resets,
.num_resets = R9A07G054_STPAI_ARESETN + 1, /* Last reset ID + 1 */
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index fbef1b35d254..3d06baf5061d 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -23,11 +23,14 @@
#define DIV_A DDIV_PACK(0x200, 0, 3)
#define DIV_B DDIV_PACK(0x204, 0, 2)
+#define DIV_D DDIV_PACK(0x204, 4, 2)
#define DIV_E DDIV_PACK(0x204, 8, 1)
#define DIV_W DDIV_PACK(0x328, 0, 3)
#define SEL_B SEL_PLL_PACK(0x214, 0, 1)
+#define SEL_D SEL_PLL_PACK(0x214, 1, 1)
#define SEL_E SEL_PLL_PACK(0x214, 2, 1)
+#define SEL_SDI SEL_PLL_PACK(0x300, 0, 1)
#define SEL_W0 SEL_PLL_PACK(0x32C, 0, 1)
enum clk_ids {
@@ -50,11 +53,14 @@ enum clk_ids {
CLK_PLL4,
CLK_DIV_A,
CLK_DIV_B,
+ CLK_DIV_D,
CLK_DIV_E,
CLK_DIV_W,
CLK_SEL_B,
CLK_SEL_B_D2,
+ CLK_SEL_D,
CLK_SEL_E,
+ CLK_SEL_SDI,
CLK_SEL_W0,
/* Module Clocks */
@@ -81,6 +87,14 @@ static const struct clk_div_table dtable_divb[] = {
{0, 0},
};
+static const struct clk_div_table dtable_divd[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {0, 0},
+};
+
+
static const struct clk_div_table dtable_divw[] = {
{0, 6},
{1, 7},
@@ -94,8 +108,10 @@ static const struct clk_div_table dtable_divw[] = {
/* Mux clock tables */
static const char * const sel_b[] = { ".main", ".divb" };
+static const char * const sel_d[] = { ".main", ".divd" };
static const char * const sel_e[] = { ".main", ".dive" };
static const char * const sel_w[] = { ".main", ".divw" };
+static const char * const sel_sdi[] = { ".main", ".pll2_200" };
static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
/* External Clock Inputs */
@@ -115,11 +131,14 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
DEF_DIV_RO(".diva", CLK_DIV_A, CLK_PLL1, DIV_A, dtable_diva),
DEF_DIV_RO(".divb", CLK_DIV_B, CLK_PLL2_400, DIV_B, dtable_divb),
+ DEF_DIV_RO(".divd", CLK_DIV_D, CLK_PLL2_200, DIV_D, dtable_divd),
DEF_DIV_RO(".dive", CLK_DIV_E, CLK_PLL2_100, DIV_E, NULL),
DEF_DIV_RO(".divw", CLK_DIV_W, CLK_PLL4, DIV_W, dtable_divw),
DEF_MUX_RO(".selb", CLK_SEL_B, SEL_B, sel_b),
+ DEF_MUX_RO(".seld", CLK_SEL_D, SEL_D, sel_d),
DEF_MUX_RO(".sele", CLK_SEL_E, SEL_E, sel_e),
+ DEF_MUX(".selsdi", CLK_SEL_SDI, SEL_SDI, sel_sdi),
DEF_MUX(".selw0", CLK_SEL_W0, SEL_W0, sel_w),
DEF_FIXED(".selb_d2", CLK_SEL_B_D2, CLK_SEL_B, 1, 2),
@@ -128,14 +147,55 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
+ DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0),
+ DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1),
+ DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2),
+ DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3),
+ DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4),
+ DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5),
+ DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6),
+ DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7),
+ DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8),
+ DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9),
+ DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10),
+ DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11),
DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
+ DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4),
+ DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5),
+ DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
+ DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0),
+ DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4),
+ DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5),
+ DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6),
+ DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7),
+ DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8),
+ DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9),
+ DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10),
+ DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11),
DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
+ DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0),
+ DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4),
+ DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5),
+ DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6),
+ DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7),
+ DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8),
+ DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9),
+ DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10),
+ DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11),
DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
+ DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0),
+ DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4),
+ DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5),
+ DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6),
+ DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7),
+ DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8),
+ DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9),
+ DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
@@ -143,8 +203,18 @@ static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
static const struct rzg2l_reset r9a09g011_resets[] = {
DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
+ DEF_RST_MON(R9A09G011_SDI0_IXRST, 0x608, 0, 6),
+ DEF_RST_MON(R9A09G011_SDI1_IXRST, 0x608, 1, 7),
+ DEF_RST_MON(R9A09G011_EMM_IXRST, 0x608, 2, 8),
+ DEF_RST(R9A09G011_USB_PRESET_N, 0x608, 7),
+ DEF_RST(R9A09G011_USB_DRD_RESET, 0x608, 8),
+ DEF_RST(R9A09G011_USB_ARESETN_P, 0x608, 9),
+ DEF_RST(R9A09G011_USB_ARESETN_H, 0x608, 10),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST(R9A09G011_TIM_GPB_PRESETN, 0x614, 1),
+ DEF_RST(R9A09G011_TIM_GPC_PRESETN, 0x614, 2),
+ DEF_RST_MON(R9A09G011_PWM_GPF_PRESETN, 0x614, 5, 23),
DEF_RST(R9A09G011_IIC_GPA_PRESETN, 0x614, 8),
DEF_RST(R9A09G011_IIC_GPB_PRESETN, 0x614, 9),
DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
@@ -152,6 +222,9 @@ static const struct rzg2l_reset r9a09g011_resets[] = {
static const unsigned int r9a09g011_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A09G011_CA53_CLK,
+ MOD_CLK_BASE + R9A09G011_CPERI_GRPB_PCLK,
+ MOD_CLK_BASE + R9A09G011_CPERI_GRPC_PCLK,
+ MOD_CLK_BASE + R9A09G011_CPERI_GRPF_PCLK,
MOD_CLK_BASE + R9A09G011_GIC_CLK,
MOD_CLK_BASE + R9A09G011_SYC_CNT_CLK,
MOD_CLK_BASE + R9A09G011_URT_PCLK,
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index e668f23c75e7..b3ef62fa612e 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -310,20 +310,11 @@ static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
static u32 cpg_quirks __initdata;
-#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
- .soc_id = "r8a7795", .revision = "ES1.0",
- .data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
- },
- {
- .soc_id = "r8a7795", .revision = "ES1.*",
- .data = (void *)(RCKCR_CKSEL),
- },
- {
.soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)(RCKCR_CKSEL),
},
@@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
* multiplier when cpufreq changes between normal and boost
* modes.
*/
- mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
return cpg_pll_clk_register(core->name, __clk_get_name(parent),
- base, mult, CPG_PLL0CR, 0);
+ base, 2, CPG_PLL0CR, 0);
case CLK_TYPE_GEN3_PLL1:
mult = cpg_pll_config->pll1_mult;
@@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
* multiplier when cpufreq changes between normal and boost
* modes.
*/
- mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
return cpg_pll_clk_register(core->name, __clk_get_name(parent),
- base, mult, CPG_PLL2CR, 2);
+ base, 2, CPG_PLL2CR, 2);
case CLK_TYPE_GEN3_PLL3:
mult = cpg_pll_config->pll3_mult;
@@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
*/
value = readl(base + CPG_PLL4CR);
mult = (((value >> 24) & 0x7f) + 1) * 2;
- if (cpg_quirks & PLL_ERRATA)
- mult *= 2;
break;
case CLK_TYPE_GEN3_SDH:
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index e27832e5114f..c68d8b987054 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include "renesas-cpg-mssr.h"
@@ -27,6 +28,152 @@ static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
+#define CPG_PLLECR 0x0820 /* PLL Enable Control Register */
+
+#define CPG_PLLECR_PLLST(n) BIT(8 + ((n) < 3 ? (n) - 1 : \
+ (n) > 3 ? (n) + 1 : n)) /* PLLn Circuit Status */
+
+#define CPG_PLL1CR0 0x830 /* PLLn Control Registers */
+#define CPG_PLL1CR1 0x8b0
+#define CPG_PLL2CR0 0x834
+#define CPG_PLL2CR1 0x8b8
+#define CPG_PLL3CR0 0x83c
+#define CPG_PLL3CR1 0x8c0
+#define CPG_PLL4CR0 0x844
+#define CPG_PLL4CR1 0x8c8
+#define CPG_PLL6CR0 0x84c
+#define CPG_PLL6CR1 0x8d8
+
+#define CPG_PLLxCR0_KICK BIT(31)
+#define CPG_PLLxCR0_NI GENMASK(27, 20) /* Integer mult. factor */
+#define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */
+#define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */
+#define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
+#define CPG_PLLxCR0_SSMODE_CENT BIT(16) /* Center (vs. Down) Spread Dithering */
+#define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */
+#define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */
+
+#define SSMODE_FM BIT(2) /* Fractional Multiplication */
+#define SSMODE_DITHER BIT(1) /* Frequency Dithering */
+#define SSMODE_CENTER BIT(0) /* Center (vs. Down) Spread Dithering */
+
+/* PLL Clocks */
+struct cpg_pll_clk {
+ struct clk_hw hw;
+ void __iomem *pllcr0_reg;
+ void __iomem *pllecr_reg;
+ u32 pllecr_pllst_mask;
+};
+
+#define to_pll_clk(_hw) container_of(_hw, struct cpg_pll_clk, hw)
+
+static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ unsigned int mult;
+
+ mult = FIELD_GET(CPG_PLLxCR0_NI, readl(pll_clk->pllcr0_reg)) + 1;
+
+ return parent_rate * mult * 2;
+}
+
+static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned int min_mult, max_mult, mult;
+ unsigned long prate;
+
+ prate = req->best_parent_rate * 2;
+ min_mult = max(div64_ul(req->min_rate, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate, prate), 256ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
+ mult = clamp(mult, min_mult, max_mult);
+
+ req->rate = prate * mult;
+ return 0;
+}
+
+static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ mult = DIV_ROUND_CLOSEST_ULL(rate, parent_rate * 2);
+ mult = clamp(mult, 1U, 256U);
+
+ if (readl(pll_clk->pllcr0_reg) & CPG_PLLxCR0_KICK)
+ return -EBUSY;
+
+ cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI,
+ FIELD_PREP(CPG_PLLxCR0_NI, mult - 1));
+
+ /*
+ * Set KICK bit in PLLxCR0 to update hardware setting and wait for
+ * clock change completion.
+ */
+ cpg_reg_modify(pll_clk->pllcr0_reg, 0, CPG_PLLxCR0_KICK);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~45 µs are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ return readl_poll_timeout(pll_clk->pllecr_reg, val,
+ val & pll_clk->pllecr_pllst_mask, 0, 1000);
+}
+
+static const struct clk_ops cpg_pll_clk_ops = {
+ .recalc_rate = cpg_pll_clk_recalc_rate,
+ .determine_rate = cpg_pll_clk_determine_rate,
+ .set_rate = cpg_pll_clk_set_rate,
+};
+
+static struct clk * __init cpg_pll_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base,
+ unsigned int cr0_offset,
+ unsigned int cr1_offset,
+ unsigned int index)
+
+{
+ struct cpg_pll_clk *pll_clk;
+ struct clk_init_data init = {};
+ struct clk *clk;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_pll_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll_clk->hw.init = &init;
+ pll_clk->pllcr0_reg = base + cr0_offset;
+ pll_clk->pllecr_reg = base + CPG_PLLECR;
+ pll_clk->pllecr_pllst_mask = CPG_PLLECR_PLLST(index);
+
+ /* Disable Fractional Multiplication and Frequency Dithering */
+ writel(0, base + cr1_offset);
+ cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_SSMODE, 0);
+
+ clk = clk_register(NULL, &pll_clk->hw);
+ if (IS_ERR(clk))
+ kfree(pll_clk);
+
+ return clk;
+}
/*
* Z0 Clock & Z1 Clock
*/
@@ -205,6 +352,15 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll1_div;
break;
+ case CLK_TYPE_GEN4_PLL2_VAR:
+ /*
+ * PLL2 is implemented as a custom clock, to change the
+ * multiplier when cpufreq changes between normal and boost
+ * modes.
+ */
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, CPG_PLL2CR0, CPG_PLL2CR1, 2);
+
case CLK_TYPE_GEN4_PLL2:
mult = cpg_pll_config->pll2_mult;
div = cpg_pll_config->pll2_div;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 0b15dcfdca7b..006537e29e4e 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -13,10 +13,11 @@ enum rcar_gen4_clk_types {
CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM,
CLK_TYPE_GEN4_PLL1,
CLK_TYPE_GEN4_PLL2,
+ CLK_TYPE_GEN4_PLL2_VAR,
CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
CLK_TYPE_GEN4_PLL3,
- CLK_TYPE_GEN4_PLL5,
CLK_TYPE_GEN4_PLL4,
+ CLK_TYPE_GEN4_PLL5,
CLK_TYPE_GEN4_PLL6,
CLK_TYPE_GEN4_SDSRC,
CLK_TYPE_GEN4_SDH,
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index 684d8937965e..17c110978e33 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -125,15 +125,13 @@ static int rcar_usb2_clock_sel_resume(struct device *dev)
return 0;
}
-static int rcar_usb2_clock_sel_remove(struct platform_device *pdev)
+static void rcar_usb2_clock_sel_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
of_clk_del_provider(dev->of_node);
pm_runtime_put(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
@@ -215,7 +213,7 @@ static struct platform_driver rcar_usb2_clock_sel_driver = {
.pm = &rcar_usb2_clock_sel_pm_ops,
},
.probe = rcar_usb2_clock_sel_probe,
- .remove = rcar_usb2_clock_sel_remove,
+ .remove_new = rcar_usb2_clock_sel_remove,
};
builtin_platform_driver(rcar_usb2_clock_sel_driver);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1a0cdf001b2f..e9c0e341380e 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -61,7 +61,7 @@ static const u16 mstpsr_for_gen4[] = {
0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
- 0x2E60, 0x2E64, 0x2E68, 0x2E6C,
+ 0x2E60, 0x2E64, 0x2E68, 0x2E6C, 0x2E70, 0x2E74,
};
/*
@@ -77,7 +77,7 @@ static const u16 mstpcr_for_gen4[] = {
0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
- 0x2D60, 0x2D64, 0x2D68, 0x2D6C,
+ 0x2D60, 0x2D64, 0x2D68, 0x2D6C, 0x2D70, 0x2D74,
};
/*
@@ -103,7 +103,7 @@ static const u16 srcr_for_gen4[] = {
0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
- 0x2C60, 0x2C64, 0x2C68, 0x2C6C,
+ 0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
};
/*
@@ -119,7 +119,7 @@ static const u16 srstclr_for_gen4[] = {
0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
- 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC,
+ 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC, 0x2CF0, 0x2CF4,
};
/**
@@ -943,9 +943,8 @@ static int cpg_mssr_resume_noirq(struct device *dev)
}
if (!i)
- dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
- priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
- "STB" : "SMSTP", reg, oldval & mask);
+ dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
+ oldval & mask);
}
return 0;
@@ -989,7 +988,6 @@ static int __init cpg_mssr_common_init(struct device *dev,
goto out_err;
}
- cpg_mssr_priv = priv;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
@@ -1019,6 +1017,8 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (error)
goto out_err;
+ cpg_mssr_priv = priv;
+
return 0;
out_err:
@@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void)
subsys_initcall(cpg_mssr_init);
-void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
- unsigned int num_core_clks,
- unsigned int first_clk,
- unsigned int last_clk)
-{
- unsigned int i;
-
- for (i = 0; i < num_core_clks; i++)
- if (core_clks[i].id >= first_clk &&
- core_clks[i].id <= last_clk)
- core_clks[i].name = NULL;
-}
-
void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const unsigned int *clks, unsigned int n)
@@ -1139,19 +1126,4 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
}
}
-void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
- unsigned int num_mod_clks,
- const struct mssr_mod_reparent *clks,
- unsigned int n)
-{
- unsigned int i, j;
-
- for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
- if (mod_clks[i].id == clks[j].clk) {
- mod_clks[i].parent = clks[j].parent;
- j++;
- }
-}
-
MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 1c3c057d17f5..80c5b462924a 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np,
/*
* Helpers for fixing up clock tables depending on SoC revision
*/
-
-struct mssr_mod_reparent {
- unsigned int clk, parent;
-};
-
-
-extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks,
- unsigned int num_core_clks,
- unsigned int first_clk,
- unsigned int last_clk);
extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const unsigned int *clks, unsigned int n);
-extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
- unsigned int num_mod_clks,
- const struct mssr_mod_reparent *clks,
- unsigned int n);
#endif
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 4bf40f6ccd1d..93b02cdc98c2 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1440,4 +1440,3 @@ static int __init rzg2l_cpg_init(void)
subsys_initcall(rzg2l_cpg_init);
MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 306910a3a0d3..9ebd6c451b3d 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1263,7 +1263,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
RK3399_CLKGATE_CON(10), 7, GFLAGS),
- COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
/* gic */
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index b7ce3fbd6fa6..6994165e0395 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -13,15 +13,25 @@
#include "clk.h"
/*
- * GATE with additional linked clock. Downstream enables the linked clock
- * (via runtime PM) whenever the gate is enabled. The downstream implementation
- * does this via separate clock nodes for each of the linked gate clocks,
- * which leaks parts of the clock tree into DT. It is unclear why this is
- * actually needed and things work without it for simple use cases. Thus
- * the linked clock is ignored for now.
+ * Recent Rockchip SoCs have a new hardware block called Native Interface
+ * Unit (NIU), which gates clocks to devices behind them. These effectively
+ * need two parent clocks.
+ *
+ * Downstream enables the linked clock via runtime PM whenever the gate is
+ * enabled. This implementation uses separate clock nodes for each of the
+ * linked gate clocks, which leaks parts of the clock tree into DT.
+ *
+ * The GATE_LINK macro instead takes the second parent via 'linkname', but
+ * ignores the information. Once the clock framework is ready to handle it, the
+ * information should be passed on here. But since these clocks are required to
+ * access multiple relevant IP blocks, such as PCIe or USB, we mark all linked
+ * clocks critical until a better solution is available. This will waste some
+ * power, but avoids leaking implementation details into DT or hanging the
+ * system.
*/
#define GATE_LINK(_id, cname, pname, linkname, f, o, b, gf) \
GATE(_id, cname, pname, f, o, b, gf)
+#define RK3588_LINKED_CLK CLK_IS_CRITICAL
#define RK3588_GRF_SOC_STATUS0 0x600
@@ -1446,7 +1456,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(77), 0, 2, MFLAGS,
RK3588_CLKGATE_CON(31), 0, GFLAGS),
- COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, 0,
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(77), 7, 1, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(31), 1, GFLAGS),
GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
@@ -1675,13 +1685,13 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(42), 9, GFLAGS),
/* vdpu */
- COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, 0,
+ COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(98), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(44), 0, GFLAGS),
COMPOSITE_NODIV(ACLK_VDPU_LOW_ROOT, "aclk_vdpu_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(98), 7, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, 0,
+ COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(98), 9, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 2, GFLAGS),
COMPOSITE(ACLK_JPEG_DECODER_ROOT, "aclk_jpeg_decoder_root", gpll_cpll_aupll_spll_p, 0,
@@ -1732,9 +1742,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_RKVENC0_ROOT, "aclk_rkvenc0_root", gpll_cpll_npll_p, 0,
RK3588_CLKSEL_CON(102), 7, 2, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(47), 1, GFLAGS),
- GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", 0,
+ GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", RK3588_LINKED_CLK,
RK3588_CLKGATE_CON(47), 4, GFLAGS),
- GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", 0,
+ GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", RK3588_LINKED_CLK,
RK3588_CLKGATE_CON(47), 5, GFLAGS),
COMPOSITE(CLK_RKVENC0_CORE, "clk_rkvenc0_core", gpll_cpll_aupll_npll_p, 0,
RK3588_CLKSEL_CON(102), 14, 2, MFLAGS, 9, 5, DFLAGS,
@@ -1744,10 +1754,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(48), 6, GFLAGS),
/* vi */
- COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, 0,
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(106), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(49), 0, GFLAGS),
- COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, 0,
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(106), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(49), 1, GFLAGS),
COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
@@ -1919,10 +1929,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_dmyaupll_npll_spll_p, 0,
RK3588_CLKSEL_CON(110), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(52), 0, GFLAGS),
- COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, 0,
+ COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(110), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, 0,
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
RK3588_CLKSEL_CON(110), 10, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 2, GFLAGS),
COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
@@ -2425,7 +2435,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", "aclk_vi_root", 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", "hclk_vi_root", 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", "aclk_nvm_root", 0, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", "aclk_nvm_root", RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", "aclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", "hclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index a8646794575a..4059d9365ae6 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -244,10 +244,8 @@ static struct clk *rockchip_clk_register_frac_branch(
div->reg = base + muxdiv_offset;
div->mshift = 16;
div->mwidth = 16;
- div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
div->nshift = 0;
div->nwidth = 16;
- div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
div->lock = lock;
div->approximation = rockchip_fractional_approximation;
div_ops = &clk_fractional_divider_ops;
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 8e8245ab3fd1..c07bb50513bf 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -94,38 +94,6 @@ config EXYNOS_CLKOUT
status of the certains clocks from SoC, but it could also be tied to
other devices as an input clock.
-# For S3C24XX platforms, select following symbols:
-config S3C2410_COMMON_CLK
- bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung
- S3C2410/S3C2440/S3C2442 SoCs. Choose Y here only if you build for
- this SoC.
-
-config S3C2410_COMMON_DCLK
- bool
- select COMMON_CLK_SAMSUNG
- select REGMAP_MMIO
- help
- Support for the dclk clock controller present on the Samsung
- S3C2410/S3C2412/S3C2440/S3C2443 SoCs. Choose Y here only if you build
- for this SoC.
-
-config S3C2412_COMMON_CLK
- bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung S3C2412 SoCs.
- Choose Y here only if you build for this SoC.
-
-config S3C2443_COMMON_CLK
- bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung
- S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC.
-
config TESLA_FSD_COMMON_CLK
bool "Tesla FSD clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 239d9eead77f..ebbeacabe88f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,10 +21,6 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
-obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
-obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
-obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
-obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-exynos-arm64.c b/drivers/clk/samsung/clk-exynos-arm64.c
index b921b9a1134a..7d8937caf22a 100644
--- a/drivers/clk/samsung/clk-exynos-arm64.c
+++ b/drivers/clk/samsung/clk-exynos-arm64.c
@@ -10,6 +10,9 @@
*/
#include <linux/clk.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include "clk-exynos-arm64.h"
@@ -21,6 +24,19 @@
#define GATE_OFF_START 0x2000
#define GATE_OFF_END 0x2fff
+struct exynos_arm64_cmu_data {
+ struct samsung_clk_reg_dump *clk_save;
+ unsigned int nr_clk_save;
+ const struct samsung_clk_reg_dump *clk_suspend;
+ unsigned int nr_clk_suspend;
+
+ struct clk *clk;
+ struct clk **pclks;
+ int nr_pclks;
+
+ struct samsung_clk_provider *ctx;
+};
+
/**
* exynos_arm64_init_clocks - Set clocks initial configuration
* @np: CMU device tree node with "reg" property (CMU addr)
@@ -57,6 +73,83 @@ static void __init exynos_arm64_init_clocks(struct device_node *np,
}
/**
+ * exynos_arm64_enable_bus_clk - Enable parent clock of specified CMU
+ *
+ * @dev: Device object; may be NULL if this function is not being
+ * called from platform driver probe function
+ * @np: CMU device tree node
+ * @cmu: CMU data
+ *
+ * Keep CMU parent clock running (needed for CMU registers access).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int __init exynos_arm64_enable_bus_clk(struct device *dev,
+ struct device_node *np, const struct samsung_cmu_info *cmu)
+{
+ struct clk *parent_clk;
+
+ if (!cmu->clk_name)
+ return 0;
+
+ if (dev) {
+ struct exynos_arm64_cmu_data *data;
+
+ parent_clk = clk_get(dev, cmu->clk_name);
+ data = dev_get_drvdata(dev);
+ if (data)
+ data->clk = parent_clk;
+ } else {
+ parent_clk = of_clk_get_by_name(np, cmu->clk_name);
+ }
+
+ if (IS_ERR(parent_clk))
+ return PTR_ERR(parent_clk);
+
+ return clk_prepare_enable(parent_clk);
+}
+
+static int __init exynos_arm64_cmu_prepare_pm(struct device *dev,
+ const struct samsung_cmu_info *cmu)
+{
+ struct exynos_arm64_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ data->clk_save = samsung_clk_alloc_reg_dump(cmu->clk_regs,
+ cmu->nr_clk_regs);
+ if (!data->clk_save)
+ return -ENOMEM;
+
+ data->nr_clk_save = cmu->nr_clk_regs;
+ data->clk_suspend = cmu->suspend_regs;
+ data->nr_clk_suspend = cmu->nr_suspend_regs;
+ data->nr_pclks = of_clk_get_parent_count(dev->of_node);
+ if (!data->nr_pclks)
+ return 0;
+
+ data->pclks = devm_kcalloc(dev, sizeof(struct clk *), data->nr_pclks,
+ GFP_KERNEL);
+ if (!data->pclks) {
+ kfree(data->clk_save);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < data->nr_pclks; i++) {
+ struct clk *clk = of_clk_get(dev->of_node, i);
+
+ if (IS_ERR(clk)) {
+ kfree(data->clk_save);
+ while (--i >= 0)
+ clk_put(data->pclks[i]);
+ return PTR_ERR(clk);
+ }
+ data->pclks[i] = clk;
+ }
+
+ return 0;
+}
+
+/**
* exynos_arm64_register_cmu - Register specified Exynos CMU domain
* @dev: Device object; may be NULL if this function is not being
* called from platform driver probe function
@@ -72,23 +165,127 @@ static void __init exynos_arm64_init_clocks(struct device_node *np,
void __init exynos_arm64_register_cmu(struct device *dev,
struct device_node *np, const struct samsung_cmu_info *cmu)
{
- /* Keep CMU parent clock running (needed for CMU registers access) */
- if (cmu->clk_name) {
- struct clk *parent_clk;
-
- if (dev)
- parent_clk = clk_get(dev, cmu->clk_name);
- else
- parent_clk = of_clk_get_by_name(np, cmu->clk_name);
-
- if (IS_ERR(parent_clk)) {
- pr_err("%s: could not find bus clock %s; err = %ld\n",
- __func__, cmu->clk_name, PTR_ERR(parent_clk));
- } else {
- clk_prepare_enable(parent_clk);
- }
- }
+ int err;
+
+ /*
+ * Try to boot even if the parent clock enablement fails, as it might be
+ * already enabled by bootloader.
+ */
+ err = exynos_arm64_enable_bus_clk(dev, np, cmu);
+ if (err)
+ pr_err("%s: could not enable bus clock %s; err = %d\n",
+ __func__, cmu->clk_name, err);
exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
samsung_cmu_register_one(np, cmu);
}
+
+/**
+ * exynos_arm64_register_cmu_pm - Register Exynos CMU domain with PM support
+ *
+ * @pdev: Platform device object
+ * @set_manual: If true, set gate clocks to manual mode
+ *
+ * It's a version of exynos_arm64_register_cmu() with PM support. Should be
+ * called from probe function of platform driver.
+ *
+ * Return: 0 on success, or negative error code on error.
+ */
+int __init exynos_arm64_register_cmu_pm(struct platform_device *pdev,
+ bool set_manual)
+{
+ const struct samsung_cmu_info *cmu;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos_arm64_cmu_data *data;
+ void __iomem *reg_base;
+ int ret;
+
+ cmu = of_device_get_match_data(dev);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = exynos_arm64_cmu_prepare_pm(dev, cmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to boot even if the parent clock enablement fails, as it might be
+ * already enabled by bootloader.
+ */
+ ret = exynos_arm64_enable_bus_clk(dev, NULL, cmu);
+ if (ret)
+ dev_err(dev, "%s: could not enable bus clock %s; err = %d\n",
+ __func__, cmu->clk_name, ret);
+
+ if (set_manual)
+ exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ data->ctx = samsung_clk_init(dev, reg_base, cmu->nr_clk_ids);
+
+ /*
+ * Enable runtime PM here to allow the clock core using runtime PM
+ * for the registered clocks. Additionally, we increase the runtime
+ * PM usage count before registering the clocks, to prevent the
+ * clock core from runtime suspending the device.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ samsung_cmu_register_clocks(data->ctx, cmu);
+ samsung_clk_of_add_provider(dev->of_node, data->ctx);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int exynos_arm64_cmu_suspend(struct device *dev)
+{
+ struct exynos_arm64_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ samsung_clk_save(data->ctx->reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ /* For suspend some registers have to be set to certain values */
+ samsung_clk_restore(data->ctx->reg_base, data->clk_suspend,
+ data->nr_clk_suspend);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+int exynos_arm64_cmu_resume(struct device *dev)
+{
+ struct exynos_arm64_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ clk_prepare_enable(data->clk);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ samsung_clk_restore(data->ctx->reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ return 0;
+}
diff --git a/drivers/clk/samsung/clk-exynos-arm64.h b/drivers/clk/samsung/clk-exynos-arm64.h
index 0dd174693935..969979e714bc 100644
--- a/drivers/clk/samsung/clk-exynos-arm64.h
+++ b/drivers/clk/samsung/clk-exynos-arm64.h
@@ -16,5 +16,8 @@
void exynos_arm64_register_cmu(struct device *dev,
struct device_node *np, const struct samsung_cmu_info *cmu);
+int exynos_arm64_register_cmu_pm(struct platform_device *pdev, bool set_manual);
+int exynos_arm64_cmu_suspend(struct device *dev);
+int exynos_arm64_cmu_resume(struct device *dev);
#endif /* __CLK_EXYNOS_ARM64_H */
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 9cc127a162ad..7626dff41f6f 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -268,7 +268,7 @@ unregister:
return ret;
}
-static int exynos_audss_clk_remove(struct platform_device *pdev)
+static void exynos_audss_clk_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
@@ -277,8 +277,6 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
if (!IS_ERR(epll))
clk_disable_unprepare(epll);
-
- return 0;
}
static const struct dev_pm_ops exynos_audss_clk_pm_ops = {
@@ -295,7 +293,7 @@ static struct platform_driver exynos_audss_clk_driver = {
.pm = &exynos_audss_clk_pm_ops,
},
.probe = exynos_audss_clk_probe,
- .remove = exynos_audss_clk_remove,
+ .remove_new = exynos_audss_clk_remove,
};
module_platform_driver(exynos_audss_clk_driver);
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index e6d6cbf8c4e6..0cff1c94c35e 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -196,15 +196,13 @@ clks_put:
return ret;
}
-static int exynos_clkout_remove(struct platform_device *pdev)
+static void exynos_clkout_remove(struct platform_device *pdev)
{
struct exynos_clkout *clkout = platform_get_drvdata(pdev);
of_clk_del_provider(clkout->np);
clk_hw_unregister(clkout->data.hws[0]);
iounmap(clkout->reg);
-
- return 0;
}
static int __maybe_unused exynos_clkout_suspend(struct device *dev)
@@ -235,7 +233,7 @@ static struct platform_driver exynos_clkout_driver = {
.pm = &exynos_clkout_pm_ops,
},
.probe = exynos_clkout_probe,
- .remove = exynos_clkout_remove,
+ .remove_new = exynos_clkout_remove,
};
module_platform_driver(exynos_clkout_driver);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 22009cb53428..d7dbb3858347 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1251,7 +1251,7 @@ static void __init exynos4_clk_init(struct device_node *np,
if (!reg_base)
panic("%s: failed to map registers\n", __func__);
- ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = samsung_clk_init(NULL, reg_base, CLK_NR_CLKS);
hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks,
@@ -1276,7 +1276,7 @@ static void __init exynos4_clk_init(struct device_node *np,
exynos4210_vpll_rates;
samsung_clk_register_pll(ctx, exynos4210_plls,
- ARRAY_SIZE(exynos4210_plls), reg_base);
+ ARRAY_SIZE(exynos4210_plls));
} else {
if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24000000) {
exynos4x12_plls[apll].rate_table =
@@ -1288,7 +1288,7 @@ static void __init exynos4_clk_init(struct device_node *np,
}
samsung_clk_register_pll(ctx, exynos4x12_plls,
- ARRAY_SIZE(exynos4x12_plls), reg_base);
+ ARRAY_SIZE(exynos4x12_plls));
}
samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index 471a6fb82670..1470c15e95da 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -121,8 +121,7 @@ static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
if (!exynos4x12_save_isp)
return -ENOMEM;
- ctx = samsung_clk_init(np, reg_base, CLK_NR_ISP_CLKS);
- ctx->dev = dev;
+ ctx = samsung_clk_init(dev, reg_base, CLK_NR_ISP_CLKS);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 113df773ee44..92fb09922f28 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -797,7 +797,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = samsung_clk_init(NULL, reg_base, CLK_NR_CLKS);
hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks,
@@ -815,8 +815,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
samsung_clk_register_pll(ctx, exynos5250_plls,
- ARRAY_SIZE(exynos5250_plls),
- reg_base);
+ ARRAY_SIZE(exynos5250_plls));
samsung_clk_register_fixed_rate(ctx, exynos5250_fixed_rate_clks,
ARRAY_SIZE(exynos5250_fixed_rate_clks));
samsung_clk_register_fixed_factor(ctx, exynos5250_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index caad74dee297..1e0cbf762408 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1587,7 +1587,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_soc = soc;
- ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = samsung_clk_init(NULL, reg_base, CLK_NR_CLKS);
hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
@@ -1606,8 +1606,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
else
exynos5x_plls[bpll].rate_table = exynos5422_bpll_rate_table;
- samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls),
- reg_base);
+ samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls));
samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks,
ARRAY_SIZE(exynos5x_fixed_rate_clks));
samsung_clk_register_fixed_factor(ctx, exynos5x_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index f9daae20f393..ed43233649ae 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -10,7 +10,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -19,6 +18,7 @@
#include "clk.h"
#include "clk-cpu.h"
+#include "clk-exynos-arm64.h"
#include "clk-pll.h"
/*
@@ -5478,160 +5478,9 @@ static const struct samsung_cmu_info imem_cmu_info __initconst = {
.clk_name = "aclk_imem_200",
};
-struct exynos5433_cmu_data {
- struct samsung_clk_reg_dump *clk_save;
- unsigned int nr_clk_save;
- const struct samsung_clk_reg_dump *clk_suspend;
- unsigned int nr_clk_suspend;
-
- struct clk *clk;
- struct clk **pclks;
- int nr_pclks;
-
- /* must be the last entry */
- struct samsung_clk_provider ctx;
-};
-
-static int __maybe_unused exynos5433_cmu_suspend(struct device *dev)
-{
- struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
- int i;
-
- samsung_clk_save(data->ctx.reg_base, data->clk_save,
- data->nr_clk_save);
-
- for (i = 0; i < data->nr_pclks; i++)
- clk_prepare_enable(data->pclks[i]);
-
- /* for suspend some registers have to be set to certain values */
- samsung_clk_restore(data->ctx.reg_base, data->clk_suspend,
- data->nr_clk_suspend);
-
- for (i = 0; i < data->nr_pclks; i++)
- clk_disable_unprepare(data->pclks[i]);
-
- clk_disable_unprepare(data->clk);
-
- return 0;
-}
-
-static int __maybe_unused exynos5433_cmu_resume(struct device *dev)
-{
- struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
- int i;
-
- clk_prepare_enable(data->clk);
-
- for (i = 0; i < data->nr_pclks; i++)
- clk_prepare_enable(data->pclks[i]);
-
- samsung_clk_restore(data->ctx.reg_base, data->clk_save,
- data->nr_clk_save);
-
- for (i = 0; i < data->nr_pclks; i++)
- clk_disable_unprepare(data->pclks[i]);
-
- return 0;
-}
-
static int __init exynos5433_cmu_probe(struct platform_device *pdev)
{
- const struct samsung_cmu_info *info;
- struct exynos5433_cmu_data *data;
- struct samsung_clk_provider *ctx;
- struct device *dev = &pdev->dev;
- void __iomem *reg_base;
- int i;
-
- info = of_device_get_match_data(dev);
-
- data = devm_kzalloc(dev,
- struct_size(data, ctx.clk_data.hws, info->nr_clk_ids),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- ctx = &data->ctx;
-
- reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(reg_base))
- return PTR_ERR(reg_base);
-
- for (i = 0; i < info->nr_clk_ids; ++i)
- ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
-
- ctx->clk_data.num = info->nr_clk_ids;
- ctx->reg_base = reg_base;
- ctx->dev = dev;
- spin_lock_init(&ctx->lock);
-
- data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
- info->nr_clk_regs);
- if (!data->clk_save)
- return -ENOMEM;
- data->nr_clk_save = info->nr_clk_regs;
- data->clk_suspend = info->suspend_regs;
- data->nr_clk_suspend = info->nr_suspend_regs;
- data->nr_pclks = of_clk_get_parent_count(dev->of_node);
-
- if (data->nr_pclks > 0) {
- data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
- data->nr_pclks, GFP_KERNEL);
- if (!data->pclks) {
- kfree(data->clk_save);
- return -ENOMEM;
- }
- for (i = 0; i < data->nr_pclks; i++) {
- struct clk *clk = of_clk_get(dev->of_node, i);
-
- if (IS_ERR(clk)) {
- kfree(data->clk_save);
- while (--i >= 0)
- clk_put(data->pclks[i]);
- return PTR_ERR(clk);
- }
- data->pclks[i] = clk;
- }
- }
-
- if (info->clk_name)
- data->clk = clk_get(dev, info->clk_name);
- clk_prepare_enable(data->clk);
-
- platform_set_drvdata(pdev, data);
-
- /*
- * Enable runtime PM here to allow the clock core using runtime PM
- * for the registered clocks. Additionally, we increase the runtime
- * PM usage count before registering the clocks, to prevent the
- * clock core from runtime suspending the device.
- */
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- if (info->pll_clks)
- samsung_clk_register_pll(ctx, info->pll_clks, info->nr_pll_clks,
- reg_base);
- if (info->mux_clks)
- samsung_clk_register_mux(ctx, info->mux_clks,
- info->nr_mux_clks);
- if (info->div_clks)
- samsung_clk_register_div(ctx, info->div_clks,
- info->nr_div_clks);
- if (info->gate_clks)
- samsung_clk_register_gate(ctx, info->gate_clks,
- info->nr_gate_clks);
- if (info->fixed_clks)
- samsung_clk_register_fixed_rate(ctx, info->fixed_clks,
- info->nr_fixed_clks);
- if (info->fixed_factor_clks)
- samsung_clk_register_fixed_factor(ctx, info->fixed_factor_clks,
- info->nr_fixed_factor_clks);
-
- samsung_clk_of_add_provider(dev->of_node, ctx);
- pm_runtime_put_sync(dev);
-
- return 0;
+ return exynos_arm64_register_cmu_pm(pdev, false);
}
static const struct of_device_id exynos5433_cmu_of_match[] = {
@@ -5679,7 +5528,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
};
static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
+ SET_RUNTIME_PM_OPS(exynos_arm64_cmu_suspend, exynos_arm64_cmu_resume,
NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 541761e96aeb..98b23af7324d 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -36,6 +36,7 @@
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
#define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020
#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1038
#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
@@ -57,6 +58,7 @@
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
#define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c
#define CLK_CON_DIV_CLKCMU_DPU 0x1840
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1844
#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
@@ -84,6 +86,7 @@
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
#define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028
#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2040
#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
@@ -116,6 +119,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
CLK_CON_MUX_MUX_CLKCMU_CORE_SSS,
CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
@@ -137,6 +141,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
CLK_CON_DIV_CLKCMU_CORE_SSS,
CLK_CON_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
CLK_CON_DIV_CLKCMU_HSI_BUS,
CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
@@ -164,6 +169,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS,
CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
@@ -216,6 +222,9 @@ PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2",
"oscclk", "oscclk" };
PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_G3D */
+PNAME(mout_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_HSI */
PNAME(mout_hsi_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
@@ -283,6 +292,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p,
CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2),
+ /* G3D */
+ MUX(CLK_MOUT_G3D_SWITCH, "mout_g3d_switch", mout_g3d_switch_p,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 2),
+
/* HSI */
MUX(CLK_MOUT_HSI_BUS, "mout_hsi_bus", mout_hsi_bus_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS, 0, 1),
@@ -357,6 +370,10 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu",
CLK_CON_DIV_CLKCMU_DPU, 0, 4),
+ /* G3D */
+ DIV(CLK_DOUT_G3D_SWITCH, "dout_g3d_switch", "gout_g3d_switch",
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+
/* HSI */
DIV(CLK_DOUT_HSI_BUS, "dout_hsi_bus", "gout_hsi_bus",
CLK_CON_DIV_CLKCMU_HSI_BUS, 0, 4),
@@ -417,6 +434,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+ /* G3D */
+ GATE(CLK_GOUT_G3D_SWITCH, "gout_g3d_switch", "mout_g3d_switch",
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, 21, 0, 0),
+
/* HSI */
GATE(CLK_GOUT_HSI_BUS, "gout_hsi_bus", "mout_hsi_bus",
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS, 21, 0, 0),
@@ -591,7 +612,7 @@ static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK, 21, CLK_IGNORE_UNUSED,
0),
GATE(CLK_GOUT_PMU_ALIVE_PCLK, "gout_pmu_alive_pclk", "dout_apm_bus",
- CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_SYSREG_APM_PCLK, "gout_sysreg_apm_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK, 21, 0, 0),
};
@@ -653,6 +674,7 @@ static const struct samsung_cmu_info apm_cmu_info __initconst = {
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4 0x2014
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5 0x2018
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6 0x201c
+#define CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK 0x2020
#define CLK_CON_GAT_GOUT_AUD_ABOX_ACLK 0x2048
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY 0x204c
#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB 0x2050
@@ -708,6 +730,7 @@ static const unsigned long aud_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6,
+ CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK,
CLK_CON_GAT_GOUT_AUD_ABOX_ACLK,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY,
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB,
@@ -827,6 +850,9 @@ static const struct samsung_div_clock aud_div_clks[] __initconst = {
};
static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_AUD_CMU_AUD_PCLK, "gout_aud_cmu_aud_pclk",
+ "dout_aud_busd",
+ CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_AUD_CA32_CCLK, "gout_aud_ca32_cclk", "mout_aud_cpu_hch",
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32, 21, 0, 0),
GATE(CLK_GOUT_AUD_ASB_CCLK, "gout_aud_asb_cclk", "dout_aud_cpu_aclk",
@@ -992,6 +1018,102 @@ static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
.clk_name = "gout_clkcmu_cmgp_bus",
};
+/* ---- CMU_G3D ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_G3D (0x11400000) */
+#define PLL_LOCKTIME_PLL_G3D 0x0000
+#define PLL_CON0_PLL_G3D 0x0100
+#define PLL_CON3_PLL_G3D 0x010c
+#define PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER 0x0600
+#define CLK_CON_MUX_MUX_CLK_G3D_BUSD 0x1000
+#define CLK_CON_DIV_DIV_CLK_G3D_BUSP 0x1804
+#define CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK 0x2000
+#define CLK_CON_GAT_CLK_G3D_GPU_CLK 0x2004
+#define CLK_CON_GAT_GOUT_G3D_TZPC_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK 0x2010
+#define CLK_CON_GAT_GOUT_G3D_BUSD_CLK 0x2024
+#define CLK_CON_GAT_GOUT_G3D_BUSP_CLK 0x2028
+#define CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK 0x202c
+
+static const unsigned long g3d_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_G3D,
+ PLL_CON0_PLL_G3D,
+ PLL_CON3_PLL_G3D,
+ PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_G3D_BUSD,
+ CLK_CON_DIV_DIV_CLK_G3D_BUSP,
+ CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK,
+ CLK_CON_GAT_CLK_G3D_GPU_CLK,
+ CLK_CON_GAT_GOUT_G3D_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK,
+ CLK_CON_GAT_GOUT_G3D_BUSD_CLK,
+ CLK_CON_GAT_GOUT_G3D_BUSP_CLK,
+ CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_G3D */
+PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_g3d_switch_user_p) = { "oscclk", "dout_g3d_switch" };
+PNAME(mout_g3d_busd_p) = { "mout_g3d_pll", "mout_g3d_switch_user" };
+
+/*
+ * Do not provide PLL table to PLL_G3D, as MANUAL_PLL_CTRL bit is not set
+ * for that PLL by default, so set_rate operation would fail.
+ */
+static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
+ PLL(pll_0818x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
+};
+
+static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+ PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_G3D_SWITCH_USER, "mout_g3d_switch_user",
+ mout_g3d_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_G3D_BUSD, "mout_g3d_busd", mout_g3d_busd_p,
+ CLK_CON_MUX_MUX_CLK_G3D_BUSD, 0, 1),
+};
+
+static const struct samsung_div_clock g3d_div_clks[] __initconst = {
+ DIV(CLK_DOUT_G3D_BUSP, "dout_g3d_busp", "mout_g3d_busd",
+ CLK_CON_DIV_DIV_CLK_G3D_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_G3D_CMU_G3D_PCLK, "gout_g3d_cmu_g3d_pclk",
+ "dout_g3d_busp",
+ CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_G3D_GPU_CLK, "gout_g3d_gpu_clk", "mout_g3d_busd",
+ CLK_CON_GAT_CLK_G3D_GPU_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_G3D_TZPC_PCLK, "gout_g3d_tzpc_pclk", "dout_g3d_busp",
+ CLK_CON_GAT_GOUT_G3D_TZPC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_G3D_GRAY2BIN_CLK, "gout_g3d_gray2bin_clk",
+ "mout_g3d_busd",
+ CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_G3D_BUSD_CLK, "gout_g3d_busd_clk", "mout_g3d_busd",
+ CLK_CON_GAT_GOUT_G3D_BUSD_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_G3D_BUSP_CLK, "gout_g3d_busp_clk", "dout_g3d_busp",
+ CLK_CON_GAT_GOUT_G3D_BUSP_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_G3D_SYSREG_PCLK, "gout_g3d_sysreg_pclk", "dout_g3d_busp",
+ CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info g3d_cmu_info __initconst = {
+ .pll_clks = g3d_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
+ .mux_clks = g3d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
+ .div_clks = g3d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g3d_div_clks),
+ .gate_clks = g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
+ .nr_clk_ids = G3D_NR_CLK,
+ .clk_regs = g3d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+ .clk_name = "dout_g3d_switch",
+};
+
/* ---- CMU_HSI ------------------------------------------------------------- */
/* Register Offset definitions for CMU_HSI (0x13400000) */
@@ -999,12 +1121,15 @@ static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
#define PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER 0x0620
#define CLK_CON_MUX_MUX_CLK_HSI_RTC 0x1000
+#define CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK 0x2000
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV 0x2008
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50 0x200c
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26 0x2010
#define CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK 0x2018
#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK 0x2024
#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN 0x2028
+#define CLK_CON_GAT_GOUT_HSI_PPMU_ACLK 0x202c
+#define CLK_CON_GAT_GOUT_HSI_PPMU_PCLK 0x2030
#define CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK 0x2038
#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20 0x203c
#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY 0x2040
@@ -1014,12 +1139,15 @@ static const unsigned long hsi_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
CLK_CON_MUX_MUX_CLK_HSI_RTC,
+ CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26,
CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK,
CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK,
CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN,
+ CLK_CON_GAT_GOUT_HSI_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_HSI_PPMU_PCLK,
CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK,
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20,
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
@@ -1045,6 +1173,10 @@ static const struct samsung_mux_clock hsi_mux_clks[] __initconst = {
};
static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in corresponding driver */
+ GATE(CLK_GOUT_HSI_CMU_HSI_PCLK, "gout_hsi_cmu_hsi_pclk",
+ "mout_hsi_bus_user",
+ CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_USB_RTC_CLK, "gout_usb_rtc", "mout_hsi_rtc",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV, 21, 0, 0),
GATE(CLK_GOUT_USB_REF_CLK, "gout_usb_ref", "mout_hsi_usb20drd_user",
@@ -1059,6 +1191,10 @@ static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
"mout_hsi_mmc_card_user",
CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_HSI_PPMU_ACLK, "gout_hsi_ppmu_aclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_PPMU_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI_PPMU_PCLK, "gout_hsi_ppmu_pclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_PPMU_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_HSI_PCLK, "gout_sysreg_hsi_pclk",
"mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK, 21, 0, 0),
@@ -1701,6 +1837,9 @@ static const struct of_device_id exynos850_cmu_of_match[] = {
.compatible = "samsung,exynos850-cmu-cmgp",
.data = &cmgp_cmu_info,
}, {
+ .compatible = "samsung,exynos850-cmu-g3d",
+ .data = &g3d_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 0ff28938943f..74934c6182ce 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -941,169 +941,6 @@ static const struct clk_ops samsung_pll6553_clk_ops = {
};
/*
- * PLL Clock Type of S3C24XX before S3C2443
- */
-
-#define PLLS3C2410_MDIV_MASK (0xff)
-#define PLLS3C2410_PDIV_MASK (0x1f)
-#define PLLS3C2410_SDIV_MASK (0x3)
-#define PLLS3C2410_MDIV_SHIFT (12)
-#define PLLS3C2410_PDIV_SHIFT (4)
-#define PLLS3C2410_SDIV_SHIFT (0)
-
-#define PLLS3C2410_ENABLE_REG_OFFSET 0x10
-
-static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con, mdiv, pdiv, sdiv;
- u64 fvco = parent_rate;
-
- pll_con = readl_relaxed(pll->con_reg);
- mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
- pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
- sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
-
- fvco *= (mdiv + 8);
- do_div(fvco, (pdiv + 2) << sdiv);
-
- return (unsigned int)fvco;
-}
-
-static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con, mdiv, pdiv, sdiv;
- u64 fvco = parent_rate;
-
- pll_con = readl_relaxed(pll->con_reg);
- mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
- pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
- sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
-
- fvco *= (2 * (mdiv + 8));
- do_div(fvco, (pdiv + 2) << sdiv);
-
- return (unsigned int)fvco;
-}
-
-static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long prate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- const struct samsung_pll_rate_table *rate;
- u32 tmp;
-
- /* Get required rate settings from table */
- rate = samsung_get_pll_settings(pll, drate);
- if (!rate) {
- pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, clk_hw_get_name(hw));
- return -EINVAL;
- }
-
- tmp = readl_relaxed(pll->con_reg);
-
- /* Change PLL PMS values */
- tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) |
- (PLLS3C2410_PDIV_MASK << PLLS3C2410_PDIV_SHIFT) |
- (PLLS3C2410_SDIV_MASK << PLLS3C2410_SDIV_SHIFT));
- tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) |
- (rate->pdiv << PLLS3C2410_PDIV_SHIFT) |
- (rate->sdiv << PLLS3C2410_SDIV_SHIFT);
- writel_relaxed(tmp, pll->con_reg);
-
- /* Time to settle according to the manual */
- udelay(300);
-
- return 0;
-}
-
-static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_en = readl_relaxed(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
- u32 pll_en_orig = pll_en;
-
- if (enable)
- pll_en &= ~BIT(bit);
- else
- pll_en |= BIT(bit);
-
- writel_relaxed(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
-
- /* if we started the UPLL, then allow to settle */
- if (enable && (pll_en_orig & BIT(bit)))
- udelay(300);
-
- return 0;
-}
-
-static int samsung_s3c2410_mpll_enable(struct clk_hw *hw)
-{
- return samsung_s3c2410_pll_enable(hw, 5, true);
-}
-
-static void samsung_s3c2410_mpll_disable(struct clk_hw *hw)
-{
- samsung_s3c2410_pll_enable(hw, 5, false);
-}
-
-static int samsung_s3c2410_upll_enable(struct clk_hw *hw)
-{
- return samsung_s3c2410_pll_enable(hw, 7, true);
-}
-
-static void samsung_s3c2410_upll_disable(struct clk_hw *hw)
-{
- samsung_s3c2410_pll_enable(hw, 7, false);
-}
-
-static const struct clk_ops samsung_s3c2410_mpll_clk_min_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
-};
-
-static const struct clk_ops samsung_s3c2410_upll_clk_min_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_upll_enable,
- .disable = samsung_s3c2410_upll_disable,
-};
-
-static const struct clk_ops samsung_s3c2440_mpll_clk_min_ops = {
- .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
-};
-
-static const struct clk_ops samsung_s3c2410_mpll_clk_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-static const struct clk_ops samsung_s3c2410_upll_clk_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_upll_enable,
- .disable = samsung_s3c2410_upll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-static const struct clk_ops samsung_s3c2440_mpll_clk_ops = {
- .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-/*
* PLL2550x Clock Type
*/
@@ -1422,8 +1259,7 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
};
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
- const struct samsung_pll_clock *pll_clk,
- void __iomem *base)
+ const struct samsung_pll_clock *pll_clk)
{
struct samsung_clk_pll *pll;
struct clk_init_data init;
@@ -1478,6 +1314,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
init.ops = &samsung_pll35xx_clk_ops;
break;
case pll_1417x:
+ case pll_0818x:
case pll_0822x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
@@ -1530,24 +1367,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll46xx_clk_ops;
break;
- case pll_s3c2410_mpll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2410_mpll_clk_min_ops;
- else
- init.ops = &samsung_s3c2410_mpll_clk_ops;
- break;
- case pll_s3c2410_upll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2410_upll_clk_min_ops;
- else
- init.ops = &samsung_s3c2410_upll_clk_ops;
- break;
- case pll_s3c2440_mpll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2440_mpll_clk_min_ops;
- else
- init.ops = &samsung_s3c2440_mpll_clk_ops;
- break;
case pll_2550x:
init.ops = &samsung_pll2550x_clk_ops;
break;
@@ -1576,8 +1395,8 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
pll->hw.init = &init;
pll->type = pll_clk->type;
- pll->lock_reg = base + pll_clk->lock_offset;
- pll->con_reg = base + pll_clk->con_offset;
+ pll->lock_reg = ctx->reg_base + pll_clk->lock_offset;
+ pll->con_reg = ctx->reg_base + pll_clk->con_offset;
ret = clk_hw_register(ctx->dev, &pll->hw);
if (ret) {
@@ -1593,10 +1412,10 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
- unsigned int nr_pll, void __iomem *base)
+ unsigned int nr_pll)
{
int cnt;
for (cnt = 0; cnt < nr_pll; cnt++)
- _samsung_clk_register_pll(ctx, &pll_list[cnt], base);
+ _samsung_clk_register_pll(ctx, &pll_list[cnt]);
}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index a9892c2d1f57..0725d485c6ee 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -25,9 +25,6 @@ enum samsung_pll_type {
pll_6552,
pll_6552_s3c2416,
pll_6553,
- pll_s3c2410_mpll,
- pll_s3c2410_upll,
- pll_s3c2440_mpll,
pll_2550x,
pll_2550xx,
pll_2650x,
@@ -37,6 +34,7 @@ enum samsung_pll_type {
pll_1451x,
pll_1452x,
pll_1460x,
+ pll_0818x,
pll_0822x,
pll_0831x,
pll_142xx,
@@ -56,24 +54,6 @@ enum samsung_pll_type {
.sdiv = (_s), \
}
-#define PLL_S3C2410_MPLL_RATE(_fin, _rate, _m, _p, _s) \
- { \
- .rate = PLL_VALID_RATE(_fin, _rate, \
- _m + 8, _p + 2, _s, 0, 16), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- }
-
-#define PLL_S3C2440_MPLL_RATE(_fin, _rate, _m, _p, _s) \
- { \
- .rate = PLL_VALID_RATE(_fin, _rate, \
- 2 * (_m + 8), _p + 2, _s, 0, 16), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- }
-
#define PLL_36XX_RATE(_fin, _rate, _m, _p, _s, _k) \
{ \
.rate = PLL_VALID_RATE(_fin, _rate, \
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
deleted file mode 100644
index f5e0a6ba2d12..000000000000
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for s3c24xx external clock output.
- */
-
-#include <linux/clkdev.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/clk-s3c2410.h>
-#include <linux/module.h>
-#include "clk.h"
-
-#define MUX_DCLK0 0
-#define MUX_DCLK1 1
-#define DIV_DCLK0 2
-#define DIV_DCLK1 3
-#define GATE_DCLK0 4
-#define GATE_DCLK1 5
-#define MUX_CLKOUT0 6
-#define MUX_CLKOUT1 7
-#define DCLK_MAX_CLKS (MUX_CLKOUT1 + 1)
-
-enum supported_socs {
- S3C2410,
- S3C2412,
- S3C2440,
- S3C2443,
-};
-
-struct s3c24xx_dclk_drv_data {
- const char **clkout0_parent_names;
- int clkout0_num_parents;
- const char **clkout1_parent_names;
- int clkout1_num_parents;
- const char **mux_parent_names;
- int mux_num_parents;
-};
-
-/*
- * Clock for output-parent selection in misccr
- */
-
-struct s3c24xx_clkout {
- struct clk_hw hw;
- u32 mask;
- u8 shift;
- unsigned int (*modify_misccr)(unsigned int clr, unsigned int chg);
-};
-
-#define to_s3c24xx_clkout(_hw) container_of(_hw, struct s3c24xx_clkout, hw)
-
-static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
-{
- struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
- int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
-
- val = clkout->modify_misccr(0, 0) >> clkout->shift;
- val >>= clkout->shift;
- val &= clkout->mask;
-
- if (val >= num_parents)
- return -EINVAL;
-
- return val;
-}
-
-static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index)
-{
- struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
-
- clkout->modify_misccr((clkout->mask << clkout->shift),
- (index << clkout->shift));
-
- return 0;
-}
-
-static const struct clk_ops s3c24xx_clkout_ops = {
- .get_parent = s3c24xx_clkout_get_parent,
- .set_parent = s3c24xx_clkout_set_parent,
- .determine_rate = __clk_mux_determine_rate,
-};
-
-static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
- const char *name, const char **parent_names, u8 num_parents,
- u8 shift, u32 mask)
-{
- struct s3c2410_clk_platform_data *pdata = dev_get_platdata(dev);
- struct s3c24xx_clkout *clkout;
- struct clk_init_data init;
- int ret;
-
- if (!pdata)
- return ERR_PTR(-EINVAL);
-
- /* allocate the clkout */
- clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
- if (!clkout)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &s3c24xx_clkout_ops;
- init.flags = 0;
- init.parent_names = parent_names;
- init.num_parents = num_parents;
-
- clkout->shift = shift;
- clkout->mask = mask;
- clkout->hw.init = &init;
- clkout->modify_misccr = pdata->modify_misccr;
-
- ret = clk_hw_register(dev, &clkout->hw);
- if (ret)
- return ERR_PTR(ret);
-
- return &clkout->hw;
-}
-
-/*
- * dclk and clkout init
- */
-
-struct s3c24xx_dclk {
- struct device *dev;
- void __iomem *base;
- struct notifier_block dclk0_div_change_nb;
- struct notifier_block dclk1_div_change_nb;
- spinlock_t dclk_lock;
- unsigned long reg_save;
- /* clk_data must be the last entry in the structure */
- struct clk_hw_onecell_data clk_data;
-};
-
-#define to_s3c24xx_dclk0(x) \
- container_of(x, struct s3c24xx_dclk, dclk0_div_change_nb)
-
-#define to_s3c24xx_dclk1(x) \
- container_of(x, struct s3c24xx_dclk, dclk1_div_change_nb)
-
-static const char *dclk_s3c2410_p[] = { "pclk", "uclk" };
-static const char *clkout0_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
- "gate_dclk1" };
-
-static const char *clkout0_s3c2412_p[] = { "mpll", "upll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk0" };
-static const char *clkout1_s3c2412_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
- "gate_dclk1" };
-
-static const char *clkout0_s3c2440_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2440_p[] = { "mpll", "upll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk1" };
-
-static const char *dclk_s3c2443_p[] = { "pclk", "epll" };
-static const char *clkout0_s3c2443_p[] = { "xti", "epll", "armclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2443_p[] = { "dummy", "epll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk1" };
-
-#define DCLKCON_DCLK_DIV_MASK 0xf
-#define DCLKCON_DCLK0_DIV_SHIFT 4
-#define DCLKCON_DCLK0_CMP_SHIFT 8
-#define DCLKCON_DCLK1_DIV_SHIFT 20
-#define DCLKCON_DCLK1_CMP_SHIFT 24
-
-static void s3c24xx_dclk_update_cmp(struct s3c24xx_dclk *s3c24xx_dclk,
- int div_shift, int cmp_shift)
-{
- unsigned long flags = 0;
- u32 dclk_con, div, cmp;
-
- spin_lock_irqsave(&s3c24xx_dclk->dclk_lock, flags);
-
- dclk_con = readl_relaxed(s3c24xx_dclk->base);
-
- div = ((dclk_con >> div_shift) & DCLKCON_DCLK_DIV_MASK) + 1;
- cmp = ((div + 1) / 2) - 1;
-
- dclk_con &= ~(DCLKCON_DCLK_DIV_MASK << cmp_shift);
- dclk_con |= (cmp << cmp_shift);
-
- writel_relaxed(dclk_con, s3c24xx_dclk->base);
-
- spin_unlock_irqrestore(&s3c24xx_dclk->dclk_lock, flags);
-}
-
-static int s3c24xx_dclk0_div_notify(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk0(nb);
-
- if (event == POST_RATE_CHANGE) {
- s3c24xx_dclk_update_cmp(s3c24xx_dclk,
- DCLKCON_DCLK0_DIV_SHIFT, DCLKCON_DCLK0_CMP_SHIFT);
- }
-
- return NOTIFY_DONE;
-}
-
-static int s3c24xx_dclk1_div_notify(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk1(nb);
-
- if (event == POST_RATE_CHANGE) {
- s3c24xx_dclk_update_cmp(s3c24xx_dclk,
- DCLKCON_DCLK1_DIV_SHIFT, DCLKCON_DCLK1_CMP_SHIFT);
- }
-
- return NOTIFY_DONE;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int s3c24xx_dclk_suspend(struct device *dev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
-
- s3c24xx_dclk->reg_save = readl_relaxed(s3c24xx_dclk->base);
- return 0;
-}
-
-static int s3c24xx_dclk_resume(struct device *dev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
-
- writel_relaxed(s3c24xx_dclk->reg_save, s3c24xx_dclk->base);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops,
- s3c24xx_dclk_suspend, s3c24xx_dclk_resume);
-
-static int s3c24xx_dclk_probe(struct platform_device *pdev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk;
- struct s3c24xx_dclk_drv_data *dclk_variant;
- struct clk_hw **clk_table;
- int ret, i;
-
- s3c24xx_dclk = devm_kzalloc(&pdev->dev,
- struct_size(s3c24xx_dclk, clk_data.hws,
- DCLK_MAX_CLKS),
- GFP_KERNEL);
- if (!s3c24xx_dclk)
- return -ENOMEM;
-
- clk_table = s3c24xx_dclk->clk_data.hws;
-
- s3c24xx_dclk->dev = &pdev->dev;
- s3c24xx_dclk->clk_data.num = DCLK_MAX_CLKS;
- platform_set_drvdata(pdev, s3c24xx_dclk);
- spin_lock_init(&s3c24xx_dclk->dclk_lock);
-
- s3c24xx_dclk->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(s3c24xx_dclk->base))
- return PTR_ERR(s3c24xx_dclk->base);
-
- dclk_variant = (struct s3c24xx_dclk_drv_data *)
- platform_get_device_id(pdev)->driver_data;
-
-
- clk_table[MUX_DCLK0] = clk_hw_register_mux(&pdev->dev, "mux_dclk0",
- dclk_variant->mux_parent_names,
- dclk_variant->mux_num_parents, 0,
- s3c24xx_dclk->base, 1, 1, 0,
- &s3c24xx_dclk->dclk_lock);
- clk_table[MUX_DCLK1] = clk_hw_register_mux(&pdev->dev, "mux_dclk1",
- dclk_variant->mux_parent_names,
- dclk_variant->mux_num_parents, 0,
- s3c24xx_dclk->base, 17, 1, 0,
- &s3c24xx_dclk->dclk_lock);
-
- clk_table[DIV_DCLK0] = clk_hw_register_divider(&pdev->dev, "div_dclk0",
- "mux_dclk0", 0, s3c24xx_dclk->base,
- 4, 4, 0, &s3c24xx_dclk->dclk_lock);
- clk_table[DIV_DCLK1] = clk_hw_register_divider(&pdev->dev, "div_dclk1",
- "mux_dclk1", 0, s3c24xx_dclk->base,
- 20, 4, 0, &s3c24xx_dclk->dclk_lock);
-
- clk_table[GATE_DCLK0] = clk_hw_register_gate(&pdev->dev, "gate_dclk0",
- "div_dclk0", CLK_SET_RATE_PARENT,
- s3c24xx_dclk->base, 0, 0,
- &s3c24xx_dclk->dclk_lock);
- clk_table[GATE_DCLK1] = clk_hw_register_gate(&pdev->dev, "gate_dclk1",
- "div_dclk1", CLK_SET_RATE_PARENT,
- s3c24xx_dclk->base, 16, 0,
- &s3c24xx_dclk->dclk_lock);
-
- clk_table[MUX_CLKOUT0] = s3c24xx_register_clkout(&pdev->dev,
- "clkout0", dclk_variant->clkout0_parent_names,
- dclk_variant->clkout0_num_parents, 4, 7);
- clk_table[MUX_CLKOUT1] = s3c24xx_register_clkout(&pdev->dev,
- "clkout1", dclk_variant->clkout1_parent_names,
- dclk_variant->clkout1_num_parents, 8, 7);
-
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- if (IS_ERR(clk_table[i])) {
- dev_err(&pdev->dev, "clock %d failed to register\n", i);
- ret = PTR_ERR(clk_table[i]);
- goto err_clk_register;
- }
-
- ret = clk_hw_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_DCLK1], "dclk1",
- NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT0],
- "clkout0", NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT1],
- "clkout1", NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to register aliases, %d\n", ret);
- goto err_clk_register;
- }
-
- s3c24xx_dclk->dclk0_div_change_nb.notifier_call =
- s3c24xx_dclk0_div_notify;
-
- s3c24xx_dclk->dclk1_div_change_nb.notifier_call =
- s3c24xx_dclk1_div_notify;
-
- ret = clk_notifier_register(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
- if (ret)
- goto err_clk_register;
-
- ret = clk_notifier_register(clk_table[DIV_DCLK1]->clk,
- &s3c24xx_dclk->dclk1_div_change_nb);
- if (ret)
- goto err_dclk_notify;
-
- return 0;
-
-err_dclk_notify:
- clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
-err_clk_register:
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- if (clk_table[i] && !IS_ERR(clk_table[i]))
- clk_hw_unregister(clk_table[i]);
-
- return ret;
-}
-
-static int s3c24xx_dclk_remove(struct platform_device *pdev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
- struct clk_hw **clk_table = s3c24xx_dclk->clk_data.hws;
- int i;
-
- clk_notifier_unregister(clk_table[DIV_DCLK1]->clk,
- &s3c24xx_dclk->dclk1_div_change_nb);
- clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
-
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- clk_hw_unregister(clk_table[i]);
-
- return 0;
-}
-
-static struct s3c24xx_dclk_drv_data dclk_variants[] = {
- [S3C2410] = {
- .clkout0_parent_names = clkout0_s3c2410_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2410_p),
- .clkout1_parent_names = clkout1_s3c2410_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2410_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2412] = {
- .clkout0_parent_names = clkout0_s3c2412_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2412_p),
- .clkout1_parent_names = clkout1_s3c2412_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2412_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2440] = {
- .clkout0_parent_names = clkout0_s3c2440_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2440_p),
- .clkout1_parent_names = clkout1_s3c2440_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2440_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2443] = {
- .clkout0_parent_names = clkout0_s3c2443_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2443_p),
- .clkout1_parent_names = clkout1_s3c2443_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2443_p),
- .mux_parent_names = dclk_s3c2443_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2443_p),
- },
-};
-
-static const struct platform_device_id s3c24xx_dclk_driver_ids[] = {
- {
- .name = "s3c2410-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2410],
- }, {
- .name = "s3c2412-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2412],
- }, {
- .name = "s3c2440-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2440],
- }, {
- .name = "s3c2443-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2443],
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids);
-
-static struct platform_driver s3c24xx_dclk_driver = {
- .driver = {
- .name = "s3c24xx-dclk",
- .pm = &s3c24xx_dclk_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = s3c24xx_dclk_probe,
- .remove = s3c24xx_dclk_remove,
- .id_table = s3c24xx_dclk_driver_ids,
-};
-module_platform_driver(s3c24xx_dclk_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_DESCRIPTION("Driver for the S3C24XX external clock outputs");
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
deleted file mode 100644
index 3d152a46169b..000000000000
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2410 and following SoCs.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <dt-bindings/clock/s3c2410.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-#define LOCKTIME 0x00
-#define MPLLCON 0x04
-#define UPLLCON 0x08
-#define CLKCON 0x0c
-#define CLKSLOW 0x10
-#define CLKDIVN 0x14
-#define CAMDIVN 0x18
-
-/* the soc types */
-enum supported_socs {
- S3C2410,
- S3C2440,
- S3C2442,
-};
-
-/* list of PLLs to be registered */
-enum s3c2410_plls {
- mpll, upll,
-};
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2410_clk_regs[] __initdata = {
- LOCKTIME,
- MPLLCON,
- UPLLCON,
- CLKCON,
- CLKSLOW,
- CLKDIVN,
- CAMDIVN,
-};
-
-PNAME(fclk_p) = { "mpll", "div_slow" };
-
-static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
- MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1),
-};
-
-static struct clk_div_table divslow_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
- DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d),
- DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1),
-};
-
-static struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
- GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0),
- GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0),
- GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", CLKCON, 15, 0, 0),
- GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 14, 0, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 12, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 11, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 10, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 9, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 8, 0, 0),
- GATE(HCLK_USBD, "usb-device", "hclk", CLKCON, 7, 0, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
- GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
-};
-
-/* should be added _after_ the soc-specific clocks are created */
-static struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
- ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(HCLK_USBD, NULL, "usb-device"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(UCLK, NULL, "usb-bus-host"),
- ALIAS(UCLK, NULL, "usb-bus-gadget"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(UCLK, NULL, "uclk"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(FCLK, NULL, "fclk"),
- ALIAS(PCLK, NULL, "watchdog"),
- ALIAS(PCLK_SDI, NULL, "sdi"),
- ALIAS(HCLK_NAND, NULL, "nand"),
- ALIAS(PCLK_I2S, NULL, "iis"),
- ALIAS(PCLK_I2C, NULL, "i2c"),
-};
-
-/* S3C2410 specific clocks */
-
-static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
- /* sorted in descending order */
- /* 2410A extras */
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 270000000, 127, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 268000000, 126, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 266000000, 125, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 226000000, 105, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 210000000, 132, 2, 1),
- /* 2410 common */
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 202800000, 161, 3, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 192000000, 88, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 186000000, 85, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 180000000, 82, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 170000000, 77, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 158000000, 71, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 152000000, 68, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 147000000, 90, 2, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 135000000, 82, 2, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 124000000, 116, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 118500000, 150, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 113000000, 105, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 101250000, 127, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 90000000, 112, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 84750000, 105, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 79000000, 71, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 67500000, 82, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 56250000, 142, 2, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 48000000, 120, 2, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 50700000, 161, 3, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 45000000, 82, 1, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 33750000, 82, 2, 3),
- { /* sentinel */ },
-};
-
-static struct samsung_pll_clock s3c2410_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2410_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
- LOCKTIME, UPLLCON, NULL),
-};
-
-static struct samsung_div_clock s3c2410_dividers[] __initdata = {
- DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1),
-};
-
-static struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
- /*
- * armclk is directly supplied by the fclk, without
- * switching possibility like on the s3c244x below.
- */
- FFACTOR(ARMCLK, "armclk", "fclk", 1, 1, 0),
-
- /* uclk is fed from the unmodified upll */
- FFACTOR(UCLK, "uclk", "upll", 1, 1, 0),
-};
-
-static struct samsung_clock_alias s3c2410_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2410-uart.0", "clk_uart_baud0"),
- ALIAS(PCLK_UART1, "s3c2410-uart.1", "clk_uart_baud0"),
- ALIAS(PCLK_UART2, "s3c2410-uart.2", "clk_uart_baud0"),
- ALIAS(UCLK, NULL, "clk_uart_baud1"),
-};
-
-/* S3C244x specific clocks */
-
-static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = {
- /* sorted in descending order */
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 400000000, 0x5c, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 390000000, 0x7a, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 380000000, 0x57, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 370000000, 0xb1, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 360000000, 0x70, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 350000000, 0xa7, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 340000000, 0x4d, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 330000000, 0x66, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 320000000, 0x98, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 310000000, 0x93, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 300000000, 0x75, 3, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 240000000, 0x70, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 230000000, 0x6b, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 220000000, 0x66, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 210000000, 0x84, 2, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 200000000, 0x5c, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 190000000, 0x57, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 180000000, 0x70, 2, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 170000000, 0x4d, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 160000000, 0x98, 4, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 150000000, 0x75, 3, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 120000000, 0x70, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 110000000, 0x66, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 100000000, 0x5c, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 90000000, 0x70, 2, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 80000000, 0x98, 4, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 75000000, 0x75, 3, 3),
- { /* sentinel */ },
-};
-
-static struct samsung_pll_clock s3c244x_common_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
- LOCKTIME, UPLLCON, NULL),
-};
-
-PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" };
-PNAME(armclk_p) = { "fclk", "hclk" };
-
-static struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
- MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2),
- MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1),
-};
-
-static struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
- FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0),
- FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT),
-};
-
-static struct clk_div_table div_hclk_4_d[] = {
- { .val = 0, .div = 4 },
- { .val = 1, .div = 8 },
- { /* sentinel */ },
-};
-
-static struct clk_div_table div_hclk_3_d[] = {
- { .val = 0, .div = 3 },
- { .val = 1, .div = 6 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
- DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1),
- DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1),
- DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d),
- DIV_T(0, "div_hclk_3", "fclk", CAMDIVN, 8, 1, div_hclk_3_d),
- DIV(0, "div_cam", "upll", CAMDIVN, 0, 3),
-};
-
-static struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
- GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0),
-};
-
-static struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
- ALIAS(HCLK_CAM, NULL, "camif"),
- ALIAS(CAMIF, NULL, "camif-upll"),
-};
-
-/* S3C2440 specific clocks */
-
-PNAME(s3c2440_camif_p) = { "upll", "ff_cam" };
-
-static struct samsung_mux_clock s3c2440_muxes[] __initdata = {
- MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1),
-};
-
-static struct samsung_gate_clock s3c2440_gates[] __initdata = {
- GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0),
-};
-
-/* S3C2442 specific clocks */
-
-static struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
- FFACTOR(0, "upll_3", "upll", 1, 3, 0),
-};
-
-PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" };
-
-static struct samsung_mux_clock s3c2442_muxes[] __initdata = {
- MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2),
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-#define XTI 1
-static struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, 0, 0),
-};
-
-static void __init s3c2410_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx,
- unsigned long xti_f)
-{
- struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
-
- s3c2410_common_frate_clks[0].fixed_rate = xti_f;
- samsung_clk_register_fixed_rate(ctx, s3c2410_common_frate_clks,
- ARRAY_SIZE(s3c2410_common_frate_clks));
-
- samsung_clk_register_alias(ctx, &xti_alias, 1);
-}
-
-void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- hws = ctx->clk_data.hws;
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
-
- if (current_soc == S3C2410) {
- if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
- s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
- s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
- }
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c2410_plls,
- ARRAY_SIZE(s3c2410_plls), reg_base);
-
- } else { /* S3C2440, S3C2442 */
- if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
- /*
- * plls follow different calculation schemes, with the
- * upll following the same scheme as the s3c2410 plls
- */
- s3c244x_common_plls[mpll].rate_table =
- pll_s3c244x_12mhz_tbl;
- s3c244x_common_plls[upll].rate_table =
- pll_s3c2410_12mhz_tbl;
- }
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c244x_common_plls,
- ARRAY_SIZE(s3c244x_common_plls), reg_base);
- }
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2410_common_muxes,
- ARRAY_SIZE(s3c2410_common_muxes));
- samsung_clk_register_div(ctx, s3c2410_common_dividers,
- ARRAY_SIZE(s3c2410_common_dividers));
- samsung_clk_register_gate(ctx, s3c2410_common_gates,
- ARRAY_SIZE(s3c2410_common_gates));
-
- if (current_soc == S3C2440 || current_soc == S3C2442) {
- samsung_clk_register_div(ctx, s3c244x_common_dividers,
- ARRAY_SIZE(s3c244x_common_dividers));
- samsung_clk_register_gate(ctx, s3c244x_common_gates,
- ARRAY_SIZE(s3c244x_common_gates));
- samsung_clk_register_mux(ctx, s3c244x_common_muxes,
- ARRAY_SIZE(s3c244x_common_muxes));
- samsung_clk_register_fixed_factor(ctx, s3c244x_common_ffactor,
- ARRAY_SIZE(s3c244x_common_ffactor));
- }
-
- /* Register SoC-specific clocks. */
- switch (current_soc) {
- case S3C2410:
- samsung_clk_register_div(ctx, s3c2410_dividers,
- ARRAY_SIZE(s3c2410_dividers));
- samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
- ARRAY_SIZE(s3c2410_ffactor));
- samsung_clk_register_alias(ctx, s3c2410_aliases,
- ARRAY_SIZE(s3c2410_aliases));
- break;
- case S3C2440:
- samsung_clk_register_mux(ctx, s3c2440_muxes,
- ARRAY_SIZE(s3c2440_muxes));
- samsung_clk_register_gate(ctx, s3c2440_gates,
- ARRAY_SIZE(s3c2440_gates));
- break;
- case S3C2442:
- samsung_clk_register_mux(ctx, s3c2442_muxes,
- ARRAY_SIZE(s3c2442_muxes));
- samsung_clk_register_fixed_factor(ctx, s3c2442_ffactor,
- ARRAY_SIZE(s3c2442_ffactor));
- break;
- }
-
- /*
- * Register common aliases at the end, as some of the aliased clocks
- * are SoC specific.
- */
- samsung_clk_register_alias(ctx, s3c2410_common_aliases,
- ARRAY_SIZE(s3c2410_common_aliases));
-
- if (current_soc == S3C2440 || current_soc == S3C2442) {
- samsung_clk_register_alias(ctx, s3c244x_common_aliases,
- ARRAY_SIZE(s3c244x_common_aliases));
- }
-
- samsung_clk_sleep_init(reg_base, s3c2410_clk_regs,
- ARRAY_SIZE(s3c2410_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-}
-
-static void __init s3c2410_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2410, NULL);
-}
-CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init);
-
-static void __init s3c2440_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2440, NULL);
-}
-CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init);
-
-static void __init s3c2442_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2442, NULL);
-}
-CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
deleted file mode 100644
index 724ef642f048..000000000000
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2412 and S3C2413.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/reboot.h>
-
-#include <dt-bindings/clock/s3c2412.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-#define LOCKTIME 0x00
-#define MPLLCON 0x04
-#define UPLLCON 0x08
-#define CLKCON 0x0c
-#define CLKDIVN 0x14
-#define CLKSRC 0x1c
-#define SWRST 0x30
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2412_clk_regs[] __initdata = {
- LOCKTIME,
- MPLLCON,
- UPLLCON,
- CLKCON,
- CLKDIVN,
- CLKSRC,
-};
-
-static struct clk_div_table divxti_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2412_dividers[] __initdata = {
- DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d),
- DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4),
- DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4),
- DIV(0, "div_uart", "mux_uart", CLKDIVN, 8, 4),
- DIV(0, "div_usb", "mux_usb", CLKDIVN, 6, 1),
- DIV(0, "div_hclk_half", "hclk", CLKDIVN, 5, 1),
- DIV(ARMDIV, "armdiv", "msysclk", CLKDIVN, 3, 1),
- DIV(PCLK, "pclk", "hclk", CLKDIVN, 2, 1),
- DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2),
-};
-
-static struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
- FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT),
-};
-
-/*
- * The first two use the OM[4] setting, which is not readable from
- * software, so assume it is set to xti.
- */
-PNAME(erefclk_p) = { "xti", "xti", "xti", "ext" };
-PNAME(urefclk_p) = { "xti", "xti", "xti", "ext" };
-
-PNAME(camclk_p) = { "usysclk", "hclk" };
-PNAME(usbclk_p) = { "usysclk", "hclk" };
-PNAME(i2sclk_p) = { "erefclk", "mpll" };
-PNAME(uartclk_p) = { "erefclk", "mpll" };
-PNAME(usysclk_p) = { "urefclk", "upll" };
-PNAME(msysclk_p) = { "mdivclk", "mpll" };
-PNAME(mdivclk_p) = { "xti", "div_xti" };
-PNAME(armclk_p) = { "armdiv", "hclk" };
-
-static struct samsung_mux_clock s3c2412_muxes[] __initdata = {
- MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2),
- MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2),
- MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1),
- MUX(0, "mux_usb", usbclk_p, CLKSRC, 10, 1),
- MUX(0, "mux_i2s", i2sclk_p, CLKSRC, 9, 1),
- MUX(0, "mux_uart", uartclk_p, CLKSRC, 8, 1),
- MUX(USYSCLK, "usysclk", usysclk_p, CLKSRC, 5, 1),
- MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
- MUX(MDIVCLK, "mdivclk", mdivclk_p, CLKSRC, 3, 1),
- MUX(ARMCLK, "armclk", armclk_p, CLKDIVN, 4, 1),
-};
-
-static struct samsung_pll_clock s3c2412_plls[] __initdata = {
- PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", LOCKTIME, MPLLCON, NULL),
- PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk", LOCKTIME, UPLLCON, NULL),
-};
-
-static struct samsung_gate_clock s3c2412_gates[] __initdata = {
- GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0),
- GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0),
- GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0),
- GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 25, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", CLKCON, 24, 0, 0),
- GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 23, 0, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 22, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 21, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 20, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 19, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 18, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 17, 0, 0),
- GATE(PCLK_USBD, "usb-device", "pclk", CLKCON, 16, 0, 0),
- GATE(SCLK_CAM, "sclk_cam", "div_cam", CLKCON, 15, 0, 0),
- GATE(SCLK_UART, "sclk_uart", "div_uart", CLKCON, 14, 0, 0),
- GATE(SCLK_I2S, "sclk_i2s", "div_i2s", CLKCON, 13, 0, 0),
- GATE(SCLK_USBH, "sclk_usbh", "div_usb", CLKCON, 12, 0, 0),
- GATE(SCLK_USBD, "sclk_usbd", "div_usb", CLKCON, 11, 0, 0),
- GATE(HCLK_HALF, "hclk_half", "div_hclk_half", CLKCON, 10, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_X2, "hclkx2", "ff_hclk", CLKCON, 9, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_SDRAM, "sdram", "hclk", CLKCON, 8, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
- GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
- GATE(HCLK_DMA3, "dma3", "hclk", CLKCON, 3, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA2, "dma2", "hclk", CLKCON, 2, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA1, "dma1", "hclk", CLKCON, 1, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0),
-};
-
-static struct samsung_clock_alias s3c2412_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2412-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2412-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2412-uart.2", "clk_uart_baud2"),
- ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
- ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(PCLK_USBD, NULL, "usb-device"),
- ALIAS(SCLK_USBD, NULL, "usb-bus-gadget"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(MSYSCLK, NULL, "fclk"),
-};
-
-static int s3c2412_restart(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- /* errata "Watch-dog/Software Reset Problem" specifies that
- * this reset must be done with the SYSCLK sourced from
- * EXTCLK instead of FOUT to avoid a glitch in the reset
- * mechanism.
- *
- * See the watchdog section of the S3C2412 manual for more
- * information on this fix.
- */
-
- __raw_writel(0x00, reg_base + CLKSRC);
- __raw_writel(0x533C2412, reg_base + SWRST);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2412_restart_handler = {
- .notifier_call = s3c2412_restart,
- .priority = 129,
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-#define XTI 1
-static struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, 0, 0),
- FRATE(0, "ext", NULL, 0, 0),
-};
-
-static void __init s3c2412_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx,
- unsigned long xti_f, unsigned long ext_f)
-{
- /* xtal alias is necessary for the current cpufreq driver */
- struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
-
- s3c2412_common_frate_clks[0].fixed_rate = xti_f;
- s3c2412_common_frate_clks[1].fixed_rate = ext_f;
- samsung_clk_register_fixed_rate(ctx, s3c2412_common_frate_clks,
- ARRAY_SIZE(s3c2412_common_frate_clks));
-
- samsung_clk_register_alias(ctx, &xti_alias, 1);
-}
-
-void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
- unsigned long ext_f, void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- int ret;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2412_common_clk_register_fixed_ext(ctx, xti_f, ext_f);
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c2412_plls, ARRAY_SIZE(s3c2412_plls),
- reg_base);
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2412_muxes, ARRAY_SIZE(s3c2412_muxes));
- samsung_clk_register_div(ctx, s3c2412_dividers,
- ARRAY_SIZE(s3c2412_dividers));
- samsung_clk_register_gate(ctx, s3c2412_gates,
- ARRAY_SIZE(s3c2412_gates));
- samsung_clk_register_fixed_factor(ctx, s3c2412_ffactor,
- ARRAY_SIZE(s3c2412_ffactor));
- samsung_clk_register_alias(ctx, s3c2412_aliases,
- ARRAY_SIZE(s3c2412_aliases));
-
- samsung_clk_sleep_init(reg_base, s3c2412_clk_regs,
- ARRAY_SIZE(s3c2412_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-
- ret = register_restart_handler(&s3c2412_restart_handler);
- if (ret)
- pr_warn("cannot register restart handler, %d\n", ret);
-}
-
-static void __init s3c2412_clk_init(struct device_node *np)
-{
- s3c2412_common_clk_init(np, 0, 0, NULL);
-}
-CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
deleted file mode 100644
index a827d63766d1..000000000000
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2443 and following SoCs.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/reboot.h>
-
-#include <dt-bindings/clock/s3c2443.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-/* S3C2416 clock controller register offsets */
-#define LOCKCON0 0x00
-#define LOCKCON1 0x04
-#define MPLLCON 0x10
-#define EPLLCON 0x18
-#define EPLLCON_K 0x1C
-#define CLKSRC 0x20
-#define CLKDIV0 0x24
-#define CLKDIV1 0x28
-#define CLKDIV2 0x2C
-#define HCLKCON 0x30
-#define PCLKCON 0x34
-#define SCLKCON 0x38
-#define SWRST 0x44
-
-/* the soc types */
-enum supported_socs {
- S3C2416,
- S3C2443,
- S3C2450,
-};
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2443_clk_regs[] __initdata = {
- LOCKCON0,
- LOCKCON1,
- MPLLCON,
- EPLLCON,
- EPLLCON_K,
- CLKSRC,
- CLKDIV0,
- CLKDIV1,
- CLKDIV2,
- PCLKCON,
- HCLKCON,
- SCLKCON,
-};
-
-PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" };
-PNAME(esysclk_p) = { "epllref", "epll" };
-PNAME(mpllref_p) = { "xti", "mdivclk" };
-PNAME(msysclk_p) = { "mpllref", "mpll" };
-PNAME(armclk_p) = { "armdiv" , "hclk" };
-PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" };
-
-static struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
- MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
- MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
- MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
- MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
- MUX(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1),
- MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
-};
-
-static struct clk_div_table hclk_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 3, .div = 4 },
- { /* sentinel */ },
-};
-
-static struct clk_div_table mdivclk_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 3 },
- { .val = 2, .div = 5 },
- { .val = 3, .div = 7 },
- { .val = 4, .div = 9 },
- { .val = 5, .div = 11 },
- { .val = 6, .div = 13 },
- { .val = 7, .div = 15 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
- DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d),
- DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2),
- DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d),
- DIV(PCLK, "pclk", "hclk", CLKDIV0, 2, 1),
- DIV(0, "div_hsspi0_epll", "esysclk", CLKDIV1, 24, 2),
- DIV(0, "div_fimd", "esysclk", CLKDIV1, 16, 8),
- DIV(0, "div_i2s0", "esysclk", CLKDIV1, 12, 4),
- DIV(0, "div_uart", "esysclk", CLKDIV1, 8, 4),
- DIV(0, "div_hsmmc1", "esysclk", CLKDIV1, 6, 2),
- DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2),
-};
-
-static struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
- GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0),
- GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0),
- GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0),
- GATE(SCLK_I2S0, "sclk_i2s0", "mux_i2s0", SCLKCON, 9, 0, 0),
- GATE(SCLK_UART, "sclk_uart", "div_uart", SCLKCON, 8, 0, 0),
- GATE(SCLK_USBH, "sclk_usbhost", "div_usbhost", SCLKCON, 1, 0, 0),
- GATE(HCLK_DRAM, "dram", "hclk", HCLKCON, 19, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_SSMC, "ssmc", "hclk", HCLKCON, 18, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_HSMMC1, "hsmmc1", "hclk", HCLKCON, 16, 0, 0),
- GATE(HCLK_USBD, "usb-device", "hclk", HCLKCON, 12, 0, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", HCLKCON, 11, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", HCLKCON, 9, 0, 0),
- GATE(HCLK_DMA5, "dma5", "hclk", HCLKCON, 5, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA4, "dma4", "hclk", HCLKCON, 4, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA3, "dma3", "hclk", HCLKCON, 3, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA2, "dma2", "hclk", HCLKCON, 2, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA1, "dma1", "hclk", HCLKCON, 1, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA0, "dma0", "hclk", HCLKCON, 0, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", PCLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_RTC, "rtc", "pclk", PCLKCON, 12, 0, 0),
- GATE(PCLK_WDT, "wdt", "pclk", PCLKCON, 11, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", PCLKCON, 10, 0, 0),
- GATE(PCLK_I2S0, "i2s0", "pclk", PCLKCON, 9, 0, 0),
- GATE(PCLK_AC97, "ac97", "pclk", PCLKCON, 8, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", PCLKCON, 7, 0, 0),
- GATE(PCLK_SPI0, "spi0", "pclk", PCLKCON, 6, 0, 0),
- GATE(PCLK_I2C0, "i2c0", "pclk", PCLKCON, 4, 0, 0),
- GATE(PCLK_UART3, "uart3", "pclk", PCLKCON, 3, 0, 0),
- GATE(PCLK_UART2, "uart2", "pclk", PCLKCON, 2, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", PCLKCON, 1, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
- ALIAS(MSYSCLK, NULL, "msysclk"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(EPLL, NULL, "epll"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(HCLK_SSMC, NULL, "nand"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
- ALIAS(PCLK_UART3, "s3c2440-uart.3", "uart"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
- ALIAS(PCLK_UART3, "s3c2440-uart.3", "clk_uart_baud2"),
- ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_WDT, NULL, "watchdog"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_I2C0, "s3c2410-i2c.0", "i2c"),
- ALIAS(HCLK_USBD, NULL, "usb-device"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
- ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi"),
- ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi_busclk0"),
- ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
- ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
- ALIAS(PCLK_I2S0, "samsung-i2s.0", "iis"),
- ALIAS(SCLK_I2S0, NULL, "i2s-if"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(SCLK_FIMD, NULL, "sclk_fimd"),
-};
-
-/* S3C2416 specific clocks */
-
-static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
- PLL(pll_6553, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
-};
-
-PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" };
-PNAME(s3c2416_hsmmc1_p) = { "sclk_hsmmc1", "sclk_hsmmcext" };
-PNAME(s3c2416_hsspi0_p) = { "hsspi0_epll", "hsspi0_mpll" };
-
-static struct clk_div_table armdiv_s3c2416_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 3 },
- { .val = 3, .div = 4 },
- { .val = 5, .div = 6 },
- { .val = 7, .div = 8 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2416_dividers[] __initdata = {
- DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d),
- DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4),
- DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2),
-};
-
-static struct samsung_mux_clock s3c2416_muxes[] __initdata = {
- MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1),
- MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1),
- MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1),
-};
-
-static struct samsung_gate_clock s3c2416_gates[] __initdata = {
- GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0),
- GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
- GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0),
- GATE(HCLK_2D, "2d", "hclk", HCLKCON, 20, 0, 0),
- GATE(HCLK_HSMMC0, "hsmmc0", "hclk", HCLKCON, 15, 0, 0),
- GATE(HCLK_IROM, "irom", "hclk", HCLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2416_aliases[] __initdata = {
- ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
- ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
- ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"),
- ALIAS(MUX_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
- ALIAS(MUX_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
- ALIAS(ARMDIV, NULL, "armdiv"),
-};
-
-/* S3C2443 specific clocks */
-
-static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- PLL(pll_3000, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
- PLL(pll_2126, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
-};
-
-static struct clk_div_table armdiv_s3c2443_d[] = {
- { .val = 0, .div = 1 },
- { .val = 8, .div = 2 },
- { .val = 2, .div = 3 },
- { .val = 9, .div = 4 },
- { .val = 10, .div = 6 },
- { .val = 11, .div = 8 },
- { .val = 13, .div = 12 },
- { .val = 15, .div = 16 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2443_dividers[] __initdata = {
- DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d),
- DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
-};
-
-static struct samsung_gate_clock s3c2443_gates[] __initdata = {
- GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
- GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0),
- GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
- GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 15, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2443_aliases[] __initdata = {
- ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
- ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
- ALIAS(SCLK_CAM, NULL, "camif-upll"),
- ALIAS(PCLK_SPI1, "s3c2410-spi.0", "spi"),
- ALIAS(PCLK_SDI, NULL, "sdi"),
- ALIAS(HCLK_CFC, NULL, "cfc"),
- ALIAS(ARMDIV, NULL, "armdiv"),
-};
-
-/* S3C2450 specific clocks */
-
-PNAME(s3c2450_cam_p) = { "div_cam", "hclk" };
-PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" };
-PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" };
-
-static struct samsung_div_clock s3c2450_dividers[] __initdata = {
- DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
- DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2),
- DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4),
- DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4),
-};
-
-static struct samsung_mux_clock s3c2450_muxes[] __initdata = {
- MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1),
- MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1),
- MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2),
-};
-
-static struct samsung_gate_clock s3c2450_gates[] __initdata = {
- GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0),
- GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0),
- GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
- GATE(HCLK_DMA7, "dma7", "hclk", HCLKCON, 7, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA6, "dma6", "hclk", HCLKCON, 6, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_I2S1, "i2s1", "pclk", PCLKCON, 17, 0, 0),
- GATE(PCLK_I2C1, "i2c1", "pclk", PCLKCON, 16, 0, 0),
- GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2450_aliases[] __initdata = {
- ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"),
- ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"),
- ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"),
- ALIAS(PCLK_I2C1, "s3c2410-i2c.1", "i2c"),
-};
-
-static int s3c2443_restart(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- __raw_writel(0x533c2443, reg_base + SWRST);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2443_restart_handler = {
- .notifier_call = s3c2443_restart,
- .priority = 129,
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-static struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
- FRATE(0, "xti", NULL, 0, 0),
- FRATE(0, "ext", NULL, 0, 0),
- FRATE(0, "ext_i2s", NULL, 0, 0),
- FRATE(0, "ext_uart", NULL, 0, 0),
-};
-
-static void __init s3c2443_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx, unsigned long xti_f)
-{
- s3c2443_common_frate_clks[0].fixed_rate = xti_f;
- samsung_clk_register_fixed_rate(ctx, s3c2443_common_frate_clks,
- ARRAY_SIZE(s3c2443_common_frate_clks));
-}
-
-void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- int ret;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2443_common_clk_register_fixed_ext(ctx, xti_f);
-
- /* Register PLLs. */
- if (current_soc == S3C2416 || current_soc == S3C2450)
- samsung_clk_register_pll(ctx, s3c2416_pll_clks,
- ARRAY_SIZE(s3c2416_pll_clks), reg_base);
- else
- samsung_clk_register_pll(ctx, s3c2443_pll_clks,
- ARRAY_SIZE(s3c2443_pll_clks), reg_base);
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2443_common_muxes,
- ARRAY_SIZE(s3c2443_common_muxes));
- samsung_clk_register_div(ctx, s3c2443_common_dividers,
- ARRAY_SIZE(s3c2443_common_dividers));
- samsung_clk_register_gate(ctx, s3c2443_common_gates,
- ARRAY_SIZE(s3c2443_common_gates));
- samsung_clk_register_alias(ctx, s3c2443_common_aliases,
- ARRAY_SIZE(s3c2443_common_aliases));
-
- /* Register SoC-specific clocks. */
- switch (current_soc) {
- case S3C2450:
- samsung_clk_register_div(ctx, s3c2450_dividers,
- ARRAY_SIZE(s3c2450_dividers));
- samsung_clk_register_mux(ctx, s3c2450_muxes,
- ARRAY_SIZE(s3c2450_muxes));
- samsung_clk_register_gate(ctx, s3c2450_gates,
- ARRAY_SIZE(s3c2450_gates));
- samsung_clk_register_alias(ctx, s3c2450_aliases,
- ARRAY_SIZE(s3c2450_aliases));
- fallthrough; /* as s3c2450 extends the s3c2416 clocks */
- case S3C2416:
- samsung_clk_register_div(ctx, s3c2416_dividers,
- ARRAY_SIZE(s3c2416_dividers));
- samsung_clk_register_mux(ctx, s3c2416_muxes,
- ARRAY_SIZE(s3c2416_muxes));
- samsung_clk_register_gate(ctx, s3c2416_gates,
- ARRAY_SIZE(s3c2416_gates));
- samsung_clk_register_alias(ctx, s3c2416_aliases,
- ARRAY_SIZE(s3c2416_aliases));
- break;
- case S3C2443:
- samsung_clk_register_div(ctx, s3c2443_dividers,
- ARRAY_SIZE(s3c2443_dividers));
- samsung_clk_register_gate(ctx, s3c2443_gates,
- ARRAY_SIZE(s3c2443_gates));
- samsung_clk_register_alias(ctx, s3c2443_aliases,
- ARRAY_SIZE(s3c2443_aliases));
- break;
- }
-
- samsung_clk_sleep_init(reg_base, s3c2443_clk_regs,
- ARRAY_SIZE(s3c2443_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-
- ret = register_restart_handler(&s3c2443_restart_handler);
- if (ret)
- pr_warn("cannot register restart handler, %d\n", ret);
-}
-
-static void __init s3c2416_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2416, NULL);
-}
-CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init);
-
-static void __init s3c2443_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2443, NULL);
-}
-CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init);
-
-static void __init s3c2450_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2450, NULL);
-}
-CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index d6b432a26d63..d27a1f73f077 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -405,7 +405,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
panic("%s: failed to map registers\n", __func__);
}
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ ctx = samsung_clk_init(NULL, reg_base, NR_CLKS);
hws = ctx->clk_data.hws;
/* Register external clocks. */
@@ -414,7 +414,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
/* Register PLLs. */
samsung_clk_register_pll(ctx, s3c64xx_pll_clks,
- ARRAY_SIZE(s3c64xx_pll_clks), reg_base);
+ ARRAY_SIZE(s3c64xx_pll_clks));
/* Register common internal clocks. */
samsung_clk_register_fixed_rate(ctx, s3c64xx_fixed_rate_clks,
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index 4425186bdcab..cd85342e4ddb 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -743,7 +743,7 @@ static void __init __s5pv210_clk_init(struct device_node *np,
struct samsung_clk_provider *ctx;
struct clk_hw **hws;
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ ctx = samsung_clk_init(NULL, reg_base, NR_CLKS);
hws = ctx->clk_data.hws;
samsung_clk_register_mux(ctx, early_mux_clks,
@@ -753,7 +753,7 @@ static void __init __s5pv210_clk_init(struct device_node *np,
samsung_clk_register_fixed_rate(ctx, s5p6442_frate_clks,
ARRAY_SIZE(s5p6442_frate_clks));
samsung_clk_register_pll(ctx, s5p6442_pll_clks,
- ARRAY_SIZE(s5p6442_pll_clks), reg_base);
+ ARRAY_SIZE(s5p6442_pll_clks));
samsung_clk_register_mux(ctx, s5p6442_mux_clks,
ARRAY_SIZE(s5p6442_mux_clks));
samsung_clk_register_div(ctx, s5p6442_div_clks,
@@ -764,7 +764,7 @@ static void __init __s5pv210_clk_init(struct device_node *np,
samsung_clk_register_fixed_rate(ctx, s5pv210_frate_clks,
ARRAY_SIZE(s5pv210_frate_clks));
samsung_clk_register_pll(ctx, s5pv210_pll_clks,
- ARRAY_SIZE(s5pv210_pll_clks), reg_base);
+ ARRAY_SIZE(s5pv210_pll_clks));
samsung_clk_register_mux(ctx, s5pv210_mux_clks,
ARRAY_SIZE(s5pv210_mux_clks));
samsung_clk_register_div(ctx, s5pv210_div_clks,
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index bca4731b14ea..b6701905f254 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -53,8 +53,18 @@ struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
return rd;
}
-/* setup the essentials required to support clock lookup using ccf */
-struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
+/**
+ * samsung_clk_init() - Create and initialize a clock provider object
+ * @dev: CMU device to enable runtime PM, or NULL if RPM is not needed
+ * @base: Start address (mapped) of CMU registers
+ * @nr_clks: Total clock count to allocate in clock provider object
+ *
+ * Setup the essentials required to support clock lookup using Common Clock
+ * Framework.
+ *
+ * Return: Allocated and initialized clock provider object.
+ */
+struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
void __iomem *base, unsigned long nr_clks)
{
struct samsung_clk_provider *ctx;
@@ -67,6 +77,7 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
for (i = 0; i < nr_clks; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+ ctx->dev = dev;
ctx->reg_base = base;
ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
@@ -324,6 +335,33 @@ void samsung_clk_extended_sleep_init(void __iomem *reg_base,
}
#endif
+/**
+ * samsung_cmu_register_clocks() - Register all clocks provided in CMU object
+ * @ctx: Clock provider object
+ * @cmu: CMU object with clocks to register
+ */
+void __init samsung_cmu_register_clocks(struct samsung_clk_provider *ctx,
+ const struct samsung_cmu_info *cmu)
+{
+ if (cmu->pll_clks)
+ samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks);
+ if (cmu->mux_clks)
+ samsung_clk_register_mux(ctx, cmu->mux_clks, cmu->nr_mux_clks);
+ if (cmu->div_clks)
+ samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
+ if (cmu->gate_clks)
+ samsung_clk_register_gate(ctx, cmu->gate_clks,
+ cmu->nr_gate_clks);
+ if (cmu->fixed_clks)
+ samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
+ cmu->nr_fixed_clks);
+ if (cmu->fixed_factor_clks)
+ samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
+ cmu->nr_fixed_factor_clks);
+ if (cmu->cpu_clks)
+ samsung_clk_register_cpu(ctx, cmu->cpu_clks, cmu->nr_cpu_clks);
+}
+
/*
* Common function which registers plls, muxes, dividers and gates
* for each CMU. It also add CMU register list to register cache.
@@ -341,31 +379,13 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
return NULL;
}
- ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
+ ctx = samsung_clk_init(NULL, reg_base, cmu->nr_clk_ids);
+ samsung_cmu_register_clocks(ctx, cmu);
- if (cmu->pll_clks)
- samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
- reg_base);
- if (cmu->mux_clks)
- samsung_clk_register_mux(ctx, cmu->mux_clks,
- cmu->nr_mux_clks);
- if (cmu->div_clks)
- samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
- if (cmu->gate_clks)
- samsung_clk_register_gate(ctx, cmu->gate_clks,
- cmu->nr_gate_clks);
- if (cmu->fixed_clks)
- samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
- cmu->nr_fixed_clks);
- if (cmu->fixed_factor_clks)
- samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
- cmu->nr_fixed_factor_clks);
if (cmu->clk_regs)
samsung_clk_extended_sleep_init(reg_base,
cmu->clk_regs, cmu->nr_clk_regs,
cmu->suspend_regs, cmu->nr_suspend_regs);
- if (cmu->cpu_clks)
- samsung_clk_register_cpu(ctx, cmu->cpu_clks, cmu->nr_cpu_clks);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index b46e83a2581f..ab9c3d7a25b3 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -16,6 +16,7 @@
/**
* struct samsung_clk_provider: information about clock provider
* @reg_base: virtual address for the register base.
+ * @dev: clock provider device needed for runtime PM.
* @lock: maintains exclusion between callbacks for a given clock-provider.
* @clk_data: holds clock related data like clk_hw* and number of clocks.
*/
@@ -337,9 +338,8 @@ struct samsung_cmu_info {
const char *clk_name;
};
-struct samsung_clk_provider * samsung_clk_init(
- struct device_node *np, void __iomem *base,
- unsigned long nr_clks);
+struct samsung_clk_provider *samsung_clk_init(struct device *dev,
+ void __iomem *base, unsigned long nr_clks);
void samsung_clk_of_add_provider(struct device_node *np,
struct samsung_clk_provider *ctx);
void samsung_clk_of_register_fixed_ext(
@@ -373,10 +373,12 @@ void samsung_clk_register_gate(struct samsung_clk_provider *ctx,
unsigned int nr_clk);
void samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
- unsigned int nr_clk, void __iomem *base);
+ unsigned int nr_clk);
void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
const struct samsung_cpu_clock *list, unsigned int nr_clk);
+void samsung_cmu_register_clocks(struct samsung_clk_provider *ctx,
+ const struct samsung_cmu_info *cmu);
struct samsung_clk_provider *samsung_cmu_register_one(
struct device_node *,
const struct samsung_cmu_info *);
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
index b7fde0aadfcb..2322f634a910 100644
--- a/drivers/clk/sifive/Kconfig
+++ b/drivers/clk/sifive/Kconfig
@@ -2,8 +2,8 @@
menuconfig CLK_SIFIVE
bool "SiFive SoC driver support"
- depends on SOC_SIFIVE || COMPILE_TEST
- default SOC_SIFIVE
+ depends on ARCH_SIFIVE || COMPILE_TEST
+ default ARCH_SIFIVE
help
SoC drivers for SiFive Linux-capable SoCs.
@@ -11,7 +11,7 @@ if CLK_SIFIVE
config CLK_SIFIVE_PRCI
bool "PRCI driver for SiFive SoCs"
- default SOC_SIFIVE
+ default ARCH_SIFIVE
select RESET_CONTROLLER
select RESET_SIMPLE
select CLK_ANALOGBITS_WRPLL_CLN28HPC
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 7cdf2f07c79b..06f129c160bc 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -40,7 +40,7 @@ static struct clk_ops gateclk_ops = {
};
static void __init __socfpga_gate_init(struct device_node *node,
- const struct clk_ops *ops)
+ const struct clk_ops *ops)
{
u32 clk_gate[2];
u32 div_reg[3];
@@ -94,13 +94,25 @@ static void __init __socfpga_gate_init(struct device_node *node,
socfpga_clk->hw.hw.init = &init;
hw_clk = &socfpga_clk->hw.hw;
- if (clk_hw_register(NULL, hw_clk)) {
- kfree(socfpga_clk);
- return;
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
- if (WARN_ON(rc))
- return;
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock provider for node:%s\n",
+ clk_name);
+ goto err_of_clk_add_hw_provider;
+ }
+
+ return;
+
+err_of_clk_add_hw_provider:
+ clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(socfpga_clk);
}
void __init socfpga_a10_gate_init(struct device_node *node)
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 0c18c55edf8c..32ccda960f28 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -126,17 +126,14 @@ void __init socfpga_gate_init(struct device_node *node)
struct clk_init_data init;
struct clk_ops *ops;
int rc;
- int err;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
return;
ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
- if (WARN_ON(!ops)) {
- kfree(socfpga_clk);
- return;
- }
+ if (WARN_ON(!ops))
+ goto err_kmemdup;
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
if (rc)
@@ -182,13 +179,25 @@ void __init socfpga_gate_init(struct device_node *node)
hw_clk = &socfpga_clk->hw.hw;
- err = clk_hw_register(NULL, hw_clk);
- if (err) {
- kfree(ops);
- kfree(socfpga_clk);
- return;
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
- if (WARN_ON(rc))
- return;
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock provider for node:%s\n",
+ clk_name);
+ goto err_of_clk_add_hw_provider;
+ }
+
+ return;
+
+err_of_clk_add_hw_provider:
+ clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(ops);
+err_kmemdup:
+ kfree(socfpga_clk);
}
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
index b9cdde4b8441..64cc70b970b7 100644
--- a/drivers/clk/socfpga/clk-periph-a10.c
+++ b/drivers/clk/socfpga/clk-periph-a10.c
@@ -57,8 +57,8 @@ static const struct clk_ops periclk_ops = {
.get_parent = clk_periclk_get_parent,
};
-static __init void __socfpga_periph_init(struct device_node *node,
- const struct clk_ops *ops)
+static void __init __socfpga_periph_init(struct device_node *node,
+ const struct clk_ops *ops)
{
u32 reg;
struct clk_hw *hw_clk;
@@ -106,21 +106,25 @@ static __init void __socfpga_periph_init(struct device_node *node,
hw_clk = &periph_clk->hw.hw;
- if (clk_hw_register(NULL, hw_clk)) {
- kfree(periph_clk);
- return;
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
- if (rc < 0) {
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
pr_err("Could not register clock provider for node:%s\n",
clk_name);
- goto err_clk;
+ goto err_of_clk_add_hw_provider;
}
return;
-err_clk:
+err_of_clk_add_hw_provider:
clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(periph_clk);
}
void __init socfpga_a10_periph_init(struct device_node *node)
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
index 43707e2d7248..6a4075147b9c 100644
--- a/drivers/clk/socfpga/clk-periph.c
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -47,8 +47,8 @@ static const struct clk_ops periclk_ops = {
.get_parent = clk_periclk_get_parent,
};
-static __init void __socfpga_periph_init(struct device_node *node,
- const struct clk_ops *ops)
+static void __init __socfpga_periph_init(struct device_node *node,
+ const struct clk_ops *ops)
{
u32 reg;
struct clk_hw *hw_clk;
@@ -96,11 +96,25 @@ static __init void __socfpga_periph_init(struct device_node *node,
periph_clk->hw.hw.init = &init;
hw_clk = &periph_clk->hw.hw;
- if (clk_hw_register(NULL, hw_clk)) {
- kfree(periph_clk);
- return;
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
+ }
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock provider for node:%s\n",
+ clk_name);
+ goto err_of_clk_add_hw_provider;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
+
+ return;
+
+err_of_clk_add_hw_provider:
+ clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(periph_clk);
}
void __init socfpga_periph_init(struct device_node *node)
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index bee0f7da5b6e..b028f25c658a 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -63,8 +63,8 @@ static const struct clk_ops clk_pll_ops = {
.get_parent = clk_pll_get_parent,
};
-static struct clk_hw * __init __socfpga_pll_init(struct device_node *node,
- const struct clk_ops *ops)
+static void __init __socfpga_pll_init(struct device_node *node,
+ const struct clk_ops *ops)
{
u32 reg;
struct clk_hw *hw_clk;
@@ -73,13 +73,14 @@ static struct clk_hw * __init __socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFGPA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
+ int rc;
int i = 0;
of_property_read_u32(node, "reg", &reg);
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
- return NULL;
+ return;
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
@@ -103,12 +104,25 @@ static struct clk_hw * __init __socfpga_pll_init(struct device_node *node,
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
hw_clk = &pll_clk->hw.hw;
- if (clk_hw_register(NULL, hw_clk)) {
- kfree(pll_clk);
- return NULL;
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
}
- of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
- return hw_clk;
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock provider for node:%s\n",
+ clk_name);
+ goto err_of_clk_add_hw_provider;
+ }
+
+ return;
+
+err_of_clk_add_hw_provider:
+ clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(pll_clk);
}
void __init socfpga_a10_pll_init(struct device_node *node)
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 127cc849c5ee..9dcc1b2d2cc0 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -70,8 +70,8 @@ static const struct clk_ops clk_pll_ops = {
.get_parent = clk_pll_get_parent,
};
-static __init struct clk_hw *__socfpga_pll_init(struct device_node *node,
- const struct clk_ops *ops)
+static void __init __socfpga_pll_init(struct device_node *node,
+ const struct clk_ops *ops)
{
u32 reg;
struct clk_hw *hw_clk;
@@ -80,13 +80,13 @@ static __init struct clk_hw *__socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
- int err;
+ int rc;
of_property_read_u32(node, "reg", &reg);
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
- return NULL;
+ return;
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
@@ -108,13 +108,25 @@ static __init struct clk_hw *__socfpga_pll_init(struct device_node *node,
hw_clk = &pll_clk->hw.hw;
- err = clk_hw_register(NULL, hw_clk);
- if (err) {
- kfree(pll_clk);
- return ERR_PTR(err);
+ rc = clk_hw_register(NULL, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock:%s\n", clk_name);
+ goto err_clk_hw_register;
}
- of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
- return hw_clk;
+
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
+ if (rc) {
+ pr_err("Could not register clock provider for node:%s\n",
+ clk_name);
+ goto err_of_clk_add_hw_provider;
+ }
+
+ return;
+
+err_of_clk_add_hw_provider:
+ clk_hw_unregister(hw_clk);
+err_clk_hw_register:
+ kfree(pll_clk);
}
void __init socfpga_pll_init(struct device_node *node)
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index c744bd9d2f96..2f19c8d58ff2 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -24,6 +24,8 @@ config SPRD_SC9863A_CLK
config SPRD_UMS512_CLK
tristate "Support for the Spreadtrum UMS512 clocks"
+ depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST
+ default ARM64 && ARCH_SPRD
help
Support for the global clock controller on ums512 devices.
Say Y if you want to use peripheral devices on ums512 SoC.
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index ce81e4087a8f..16142fbb7a47 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xffff,
.fast_io = true,
};
@@ -43,8 +42,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
+ struct resource *res;
+ struct regmap_config reg_config = sprdclk_regmap_config;
- if (of_find_property(node, "sprd,syscon", NULL)) {
+ if (of_property_present(node, "sprd,syscon")) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
if (IS_ERR(regmap)) {
pr_err("%s: failed to get syscon regmap\n", __func__);
@@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
return PTR_ERR(regmap);
}
} else {
- base = devm_platform_ioremap_resource(pdev, 0);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
+ reg_config.max_register = resource_size(res) - reg_config.reg_stride;
+
regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &sprdclk_regmap_config);
+ &reg_config);
if (IS_ERR(regmap)) {
pr_err("failed to init regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/starfive/Kconfig b/drivers/clk/starfive/Kconfig
index 003bd2d56ce7..5d2333106f13 100644
--- a/drivers/clk/starfive/Kconfig
+++ b/drivers/clk/starfive/Kconfig
@@ -1,9 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+config CLK_STARFIVE_JH71X0
+ bool
+
config CLK_STARFIVE_JH7100
bool "StarFive JH7100 clock support"
- depends on SOC_STARFIVE || COMPILE_TEST
- default SOC_STARFIVE
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select CLK_STARFIVE_JH71X0
+ default ARCH_STARFIVE
help
Say yes here to support the clock controller on the StarFive JH7100
SoC.
@@ -11,7 +15,27 @@ config CLK_STARFIVE_JH7100
config CLK_STARFIVE_JH7100_AUDIO
tristate "StarFive JH7100 audio clock support"
depends on CLK_STARFIVE_JH7100
- default m if SOC_STARFIVE
+ select CLK_STARFIVE_JH71X0
+ default m if ARCH_STARFIVE
help
Say Y or M here to support the audio clocks on the StarFive JH7100
SoC.
+
+config CLK_STARFIVE_JH7110_SYS
+ bool "StarFive JH7110 system clock support"
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select AUXILIARY_BUS
+ select CLK_STARFIVE_JH71X0
+ select RESET_STARFIVE_JH7110 if RESET_CONTROLLER
+ default ARCH_STARFIVE
+ help
+ Say yes here to support the system clock controller on the
+ StarFive JH7110 SoC.
+
+config CLK_STARFIVE_JH7110_AON
+ tristate "StarFive JH7110 always-on clock support"
+ depends on CLK_STARFIVE_JH7110_SYS
+ default m if ARCH_STARFIVE
+ help
+ Say yes here to support the always-on clock controller on the
+ StarFive JH7110 SoC.
diff --git a/drivers/clk/starfive/Makefile b/drivers/clk/starfive/Makefile
index 0fa8ecb9ec1c..f3df7d957b1e 100644
--- a/drivers/clk/starfive/Makefile
+++ b/drivers/clk/starfive/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-# StarFive Clock
+obj-$(CONFIG_CLK_STARFIVE_JH71X0) += clk-starfive-jh71x0.o
+
obj-$(CONFIG_CLK_STARFIVE_JH7100) += clk-starfive-jh7100.o
obj-$(CONFIG_CLK_STARFIVE_JH7100_AUDIO) += clk-starfive-jh7100-audio.o
+
+obj-$(CONFIG_CLK_STARFIVE_JH7110_SYS) += clk-starfive-jh7110-sys.o
+obj-$(CONFIG_CLK_STARFIVE_JH7110_AON) += clk-starfive-jh7110-aon.o
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index 8473a65e219b..02aefb7264f8 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -16,7 +16,7 @@
#include <dt-bindings/clock/starfive-jh7100-audio.h>
-#include "clk-starfive-jh7100.h"
+#include "clk-starfive-jh71x0.h"
/* external clocks */
#define JH7100_AUDCLK_AUDIO_SRC (JH7100_AUDCLK_END + 0)
@@ -28,66 +28,66 @@
#define JH7100_AUDCLK_I2SDAC_LRCLK_IOPAD (JH7100_AUDCLK_END + 6)
#define JH7100_AUDCLK_VAD_INTMEM (JH7100_AUDCLK_END + 7)
-static const struct jh7100_clk_data jh7100_audclk_data[] = {
- JH7100__GMD(JH7100_AUDCLK_ADC_MCLK, "adc_mclk", 0, 15, 2,
+static const struct jh71x0_clk_data jh7100_audclk_data[] = {
+ JH71X0__GMD(JH7100_AUDCLK_ADC_MCLK, "adc_mclk", 0, 15, 2,
JH7100_AUDCLK_AUDIO_SRC,
JH7100_AUDCLK_AUDIO_12288),
- JH7100__GMD(JH7100_AUDCLK_I2S1_MCLK, "i2s1_mclk", 0, 15, 2,
+ JH71X0__GMD(JH7100_AUDCLK_I2S1_MCLK, "i2s1_mclk", 0, 15, 2,
JH7100_AUDCLK_AUDIO_SRC,
JH7100_AUDCLK_AUDIO_12288),
- JH7100_GATE(JH7100_AUDCLK_I2SADC_APB, "i2sadc_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100_MDIV(JH7100_AUDCLK_I2SADC_BCLK, "i2sadc_bclk", 31, 2,
+ JH71X0_GATE(JH7100_AUDCLK_I2SADC_APB, "i2sadc_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0_MDIV(JH7100_AUDCLK_I2SADC_BCLK, "i2sadc_bclk", 31, 2,
JH7100_AUDCLK_ADC_MCLK,
JH7100_AUDCLK_I2SADC_BCLK_IOPAD),
- JH7100__INV(JH7100_AUDCLK_I2SADC_BCLK_N, "i2sadc_bclk_n", JH7100_AUDCLK_I2SADC_BCLK),
- JH7100_MDIV(JH7100_AUDCLK_I2SADC_LRCLK, "i2sadc_lrclk", 63, 3,
+ JH71X0__INV(JH7100_AUDCLK_I2SADC_BCLK_N, "i2sadc_bclk_n", JH7100_AUDCLK_I2SADC_BCLK),
+ JH71X0_MDIV(JH7100_AUDCLK_I2SADC_LRCLK, "i2sadc_lrclk", 63, 3,
JH7100_AUDCLK_I2SADC_BCLK_N,
JH7100_AUDCLK_I2SADC_LRCLK_IOPAD,
JH7100_AUDCLK_I2SADC_BCLK),
- JH7100_GATE(JH7100_AUDCLK_PDM_APB, "pdm_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100__GMD(JH7100_AUDCLK_PDM_MCLK, "pdm_mclk", 0, 15, 2,
+ JH71X0_GATE(JH7100_AUDCLK_PDM_APB, "pdm_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0__GMD(JH7100_AUDCLK_PDM_MCLK, "pdm_mclk", 0, 15, 2,
JH7100_AUDCLK_AUDIO_SRC,
JH7100_AUDCLK_AUDIO_12288),
- JH7100_GATE(JH7100_AUDCLK_I2SVAD_APB, "i2svad_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100__GMD(JH7100_AUDCLK_SPDIF, "spdif", 0, 15, 2,
+ JH71X0_GATE(JH7100_AUDCLK_I2SVAD_APB, "i2svad_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0__GMD(JH7100_AUDCLK_SPDIF, "spdif", 0, 15, 2,
JH7100_AUDCLK_AUDIO_SRC,
JH7100_AUDCLK_AUDIO_12288),
- JH7100_GATE(JH7100_AUDCLK_SPDIF_APB, "spdif_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100_GATE(JH7100_AUDCLK_PWMDAC_APB, "pwmdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100__GMD(JH7100_AUDCLK_DAC_MCLK, "dac_mclk", 0, 15, 2,
+ JH71X0_GATE(JH7100_AUDCLK_SPDIF_APB, "spdif_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0_GATE(JH7100_AUDCLK_PWMDAC_APB, "pwmdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0__GMD(JH7100_AUDCLK_DAC_MCLK, "dac_mclk", 0, 15, 2,
JH7100_AUDCLK_AUDIO_SRC,
JH7100_AUDCLK_AUDIO_12288),
- JH7100_GATE(JH7100_AUDCLK_I2SDAC_APB, "i2sdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100_MDIV(JH7100_AUDCLK_I2SDAC_BCLK, "i2sdac_bclk", 31, 2,
+ JH71X0_GATE(JH7100_AUDCLK_I2SDAC_APB, "i2sdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0_MDIV(JH7100_AUDCLK_I2SDAC_BCLK, "i2sdac_bclk", 31, 2,
JH7100_AUDCLK_DAC_MCLK,
JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
- JH7100__INV(JH7100_AUDCLK_I2SDAC_BCLK_N, "i2sdac_bclk_n", JH7100_AUDCLK_I2SDAC_BCLK),
- JH7100_MDIV(JH7100_AUDCLK_I2SDAC_LRCLK, "i2sdac_lrclk", 31, 2,
+ JH71X0__INV(JH7100_AUDCLK_I2SDAC_BCLK_N, "i2sdac_bclk_n", JH7100_AUDCLK_I2SDAC_BCLK),
+ JH71X0_MDIV(JH7100_AUDCLK_I2SDAC_LRCLK, "i2sdac_lrclk", 31, 2,
JH7100_AUDCLK_I2S1_MCLK,
JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
- JH7100_GATE(JH7100_AUDCLK_I2S1_APB, "i2s1_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100_MDIV(JH7100_AUDCLK_I2S1_BCLK, "i2s1_bclk", 31, 2,
+ JH71X0_GATE(JH7100_AUDCLK_I2S1_APB, "i2s1_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0_MDIV(JH7100_AUDCLK_I2S1_BCLK, "i2s1_bclk", 31, 2,
JH7100_AUDCLK_I2S1_MCLK,
JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
- JH7100__INV(JH7100_AUDCLK_I2S1_BCLK_N, "i2s1_bclk_n", JH7100_AUDCLK_I2S1_BCLK),
- JH7100_MDIV(JH7100_AUDCLK_I2S1_LRCLK, "i2s1_lrclk", 63, 3,
+ JH71X0__INV(JH7100_AUDCLK_I2S1_BCLK_N, "i2s1_bclk_n", JH7100_AUDCLK_I2S1_BCLK),
+ JH71X0_MDIV(JH7100_AUDCLK_I2S1_LRCLK, "i2s1_lrclk", 63, 3,
JH7100_AUDCLK_I2S1_BCLK_N,
JH7100_AUDCLK_I2SDAC_LRCLK_IOPAD),
- JH7100_GATE(JH7100_AUDCLK_I2SDAC16K_APB, "i2s1dac16k_apb", 0, JH7100_AUDCLK_APB0_BUS),
- JH7100__DIV(JH7100_AUDCLK_APB0_BUS, "apb0_bus", 8, JH7100_AUDCLK_DOM7AHB_BUS),
- JH7100_GATE(JH7100_AUDCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7100_AUDCLK_DOM7AHB_BUS),
- JH7100_GATE(JH7100_AUDCLK_USB_APB, "usb_apb", CLK_IGNORE_UNUSED, JH7100_AUDCLK_APB_EN),
- JH7100_GDIV(JH7100_AUDCLK_USB_LPM, "usb_lpm", CLK_IGNORE_UNUSED, 4, JH7100_AUDCLK_USB_APB),
- JH7100_GDIV(JH7100_AUDCLK_USB_STB, "usb_stb", CLK_IGNORE_UNUSED, 3, JH7100_AUDCLK_USB_APB),
- JH7100__DIV(JH7100_AUDCLK_APB_EN, "apb_en", 8, JH7100_AUDCLK_DOM7AHB_BUS),
- JH7100__MUX(JH7100_AUDCLK_VAD_MEM, "vad_mem", 2,
+ JH71X0_GATE(JH7100_AUDCLK_I2SDAC16K_APB, "i2s1dac16k_apb", 0, JH7100_AUDCLK_APB0_BUS),
+ JH71X0__DIV(JH7100_AUDCLK_APB0_BUS, "apb0_bus", 8, JH7100_AUDCLK_DOM7AHB_BUS),
+ JH71X0_GATE(JH7100_AUDCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7100_AUDCLK_DOM7AHB_BUS),
+ JH71X0_GATE(JH7100_AUDCLK_USB_APB, "usb_apb", CLK_IGNORE_UNUSED, JH7100_AUDCLK_APB_EN),
+ JH71X0_GDIV(JH7100_AUDCLK_USB_LPM, "usb_lpm", CLK_IGNORE_UNUSED, 4, JH7100_AUDCLK_USB_APB),
+ JH71X0_GDIV(JH7100_AUDCLK_USB_STB, "usb_stb", CLK_IGNORE_UNUSED, 3, JH7100_AUDCLK_USB_APB),
+ JH71X0__DIV(JH7100_AUDCLK_APB_EN, "apb_en", 8, JH7100_AUDCLK_DOM7AHB_BUS),
+ JH71X0__MUX(JH7100_AUDCLK_VAD_MEM, "vad_mem", 2,
JH7100_AUDCLK_VAD_INTMEM,
JH7100_AUDCLK_AUDIO_12288),
};
static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
{
- struct jh7100_clk_priv *priv = data;
+ struct jh71x0_clk_priv *priv = data;
unsigned int idx = clkspec->args[0];
if (idx < JH7100_AUDCLK_END)
@@ -98,7 +98,7 @@ static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *d
static int jh7100_audclk_probe(struct platform_device *pdev)
{
- struct jh7100_clk_priv *priv;
+ struct jh71x0_clk_priv *priv;
unsigned int idx;
int ret;
@@ -117,12 +117,12 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
struct clk_parent_data parents[4] = {};
struct clk_init_data init = {
.name = jh7100_audclk_data[idx].name,
- .ops = starfive_jh7100_clk_ops(max),
+ .ops = starfive_jh71x0_clk_ops(max),
.parent_data = parents,
- .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1,
+ .num_parents = ((max & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT) + 1,
.flags = jh7100_audclk_data[idx].flags,
};
- struct jh7100_clk *clk = &priv->reg[idx];
+ struct jh71x0_clk *clk = &priv->reg[idx];
unsigned int i;
for (i = 0; i < init.num_parents; i++) {
@@ -140,7 +140,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
clk->hw.init = &init;
clk->idx = idx;
- clk->max_div = max & JH7100_CLK_DIV_MASK;
+ clk->max_div = max & JH71X0_CLK_DIV_MASK;
ret = devm_clk_hw_register(priv->dev, &clk->hw);
if (ret)
diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c
index 691aeebc7092..69cc11ea7e33 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100.c
@@ -7,20 +7,15 @@
* Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
*/
-#include <linux/bits.h>
#include <linux/clk-provider.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/starfive-jh7100.h>
-#include "clk-starfive-jh7100.h"
+#include "clk-starfive-jh71x0.h"
/* external clocks */
#define JH7100_CLK_OSC_SYS (JH7100_CLK_END + 0)
@@ -28,570 +23,253 @@
#define JH7100_CLK_GMAC_RMII_REF (JH7100_CLK_END + 2)
#define JH7100_CLK_GMAC_GR_MII_RX (JH7100_CLK_END + 3)
-static const struct jh7100_clk_data jh7100_clk_data[] __initconst = {
- JH7100__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4,
+static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
+ JH71X0__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 3,
+ JH71X0__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 4,
+ JH71X0__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 4,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 3,
+ JH71X0__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 2,
+ JH71X0__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT),
- JH7100__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 2,
+ JH71X0__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 3,
+ JH71X0__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 3,
+ JH71X0__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 3,
JH7100_CLK_OSC_AUD,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100_GDIV(JH7100_CLK_AUDIO_ROOT, "audio_root", 0, 8, JH7100_CLK_PLL0_OUT),
- JH7100__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 3,
+ JH71X0_GDIV(JH7100_CLK_AUDIO_ROOT, "audio_root", 0, 8, JH7100_CLK_PLL0_OUT),
+ JH71X0__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 3,
+ JH71X0__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT),
- JH7100__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 3,
+ JH71X0__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 3,
JH7100_CLK_OSC_AUD,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
- JH7100__DIV(JH7100_CLK_CPUNBUS_ROOT_DIV, "cpunbus_root_div", 2, JH7100_CLK_CPUNDBUS_ROOT),
- JH7100__DIV(JH7100_CLK_DSP_ROOT_DIV, "dsp_root_div", 4, JH7100_CLK_DSP_ROOT),
- JH7100__DIV(JH7100_CLK_PERH0_SRC, "perh0_src", 4, JH7100_CLK_PERH0_ROOT),
- JH7100__DIV(JH7100_CLK_PERH1_SRC, "perh1_src", 4, JH7100_CLK_PERH1_ROOT),
- JH7100_GDIV(JH7100_CLK_PLL0_TESTOUT, "pll0_testout", 0, 31, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_PLL1_TESTOUT, "pll1_testout", 0, 31, JH7100_CLK_DLA_ROOT),
- JH7100_GDIV(JH7100_CLK_PLL2_TESTOUT, "pll2_testout", 0, 31, JH7100_CLK_PERH1_SRC),
- JH7100__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 2,
+ JH71X0__DIV(JH7100_CLK_CPUNBUS_ROOT_DIV, "cpunbus_root_div", 2, JH7100_CLK_CPUNDBUS_ROOT),
+ JH71X0__DIV(JH7100_CLK_DSP_ROOT_DIV, "dsp_root_div", 4, JH7100_CLK_DSP_ROOT),
+ JH71X0__DIV(JH7100_CLK_PERH0_SRC, "perh0_src", 4, JH7100_CLK_PERH0_ROOT),
+ JH71X0__DIV(JH7100_CLK_PERH1_SRC, "perh1_src", 4, JH7100_CLK_PERH1_ROOT),
+ JH71X0_GDIV(JH7100_CLK_PLL0_TESTOUT, "pll0_testout", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_PLL1_TESTOUT, "pll1_testout", 0, 31, JH7100_CLK_DLA_ROOT),
+ JH71X0_GDIV(JH7100_CLK_PLL2_TESTOUT, "pll2_testout", 0, 31, JH7100_CLK_PERH1_SRC),
+ JH71X0__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_OSC_AUD),
- JH7100__DIV(JH7100_CLK_CPU_CORE, "cpu_core", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100__DIV(JH7100_CLK_CPU_AXI, "cpu_axi", 8, JH7100_CLK_CPU_CORE),
- JH7100__DIV(JH7100_CLK_AHB_BUS, "ahb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100__DIV(JH7100_CLK_APB1_BUS, "apb1_bus", 8, JH7100_CLK_AHB_BUS),
- JH7100__DIV(JH7100_CLK_APB2_BUS, "apb2_bus", 8, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_DOM3AHB_BUS, "dom3ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_DOM7AHB_BUS, "dom7ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_U74_CORE0, "u74_core0", CLK_IS_CRITICAL, JH7100_CLK_CPU_CORE),
- JH7100_GDIV(JH7100_CLK_U74_CORE1, "u74_core1", CLK_IS_CRITICAL, 8, JH7100_CLK_CPU_CORE),
- JH7100_GATE(JH7100_CLK_U74_AXI, "u74_axi", CLK_IS_CRITICAL, JH7100_CLK_CPU_AXI),
- JH7100_GATE(JH7100_CLK_U74RTC_TOGGLE, "u74rtc_toggle", CLK_IS_CRITICAL, JH7100_CLK_OSC_SYS),
- JH7100_GATE(JH7100_CLK_SGDMA2P_AXI, "sgdma2p_axi", 0, JH7100_CLK_CPU_AXI),
- JH7100_GATE(JH7100_CLK_DMA2PNOC_AXI, "dma2pnoc_axi", 0, JH7100_CLK_CPU_AXI),
- JH7100_GATE(JH7100_CLK_SGDMA2P_AHB, "sgdma2p_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100__DIV(JH7100_CLK_DLA_BUS, "dla_bus", 4, JH7100_CLK_DLA_ROOT),
- JH7100_GATE(JH7100_CLK_DLA_AXI, "dla_axi", 0, JH7100_CLK_DLA_BUS),
- JH7100_GATE(JH7100_CLK_DLANOC_AXI, "dlanoc_axi", 0, JH7100_CLK_DLA_BUS),
- JH7100_GATE(JH7100_CLK_DLA_APB, "dla_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_VP6_CORE, "vp6_core", 0, 4, JH7100_CLK_DSP_ROOT_DIV),
- JH7100__DIV(JH7100_CLK_VP6BUS_SRC, "vp6bus_src", 4, JH7100_CLK_DSP_ROOT),
- JH7100_GDIV(JH7100_CLK_VP6_AXI, "vp6_axi", 0, 4, JH7100_CLK_VP6BUS_SRC),
- JH7100__DIV(JH7100_CLK_VCDECBUS_SRC, "vcdecbus_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
- JH7100__DIV(JH7100_CLK_VDEC_BUS, "vdec_bus", 8, JH7100_CLK_VCDECBUS_SRC),
- JH7100_GATE(JH7100_CLK_VDEC_AXI, "vdec_axi", 0, JH7100_CLK_VDEC_BUS),
- JH7100_GATE(JH7100_CLK_VDECBRG_MAIN, "vdecbrg_mainclk", 0, JH7100_CLK_VDEC_BUS),
- JH7100_GDIV(JH7100_CLK_VDEC_BCLK, "vdec_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
- JH7100_GDIV(JH7100_CLK_VDEC_CCLK, "vdec_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
- JH7100_GATE(JH7100_CLK_VDEC_APB, "vdec_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_JPEG_AXI, "jpeg_axi", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100_GDIV(JH7100_CLK_JPEG_CCLK, "jpeg_cclk", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100_GATE(JH7100_CLK_JPEG_APB, "jpeg_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_GC300_2X, "gc300_2x", 0, 8, JH7100_CLK_CDECHIFI4_ROOT),
- JH7100_GATE(JH7100_CLK_GC300_AHB, "gc300_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100__DIV(JH7100_CLK_JPCGC300_AXIBUS, "jpcgc300_axibus", 8, JH7100_CLK_VCDECBUS_SRC),
- JH7100_GATE(JH7100_CLK_GC300_AXI, "gc300_axi", 0, JH7100_CLK_JPCGC300_AXIBUS),
- JH7100_GATE(JH7100_CLK_JPCGC300_MAIN, "jpcgc300_mainclk", 0, JH7100_CLK_JPCGC300_AXIBUS),
- JH7100__DIV(JH7100_CLK_VENC_BUS, "venc_bus", 8, JH7100_CLK_VCDECBUS_SRC),
- JH7100_GATE(JH7100_CLK_VENC_AXI, "venc_axi", 0, JH7100_CLK_VENC_BUS),
- JH7100_GATE(JH7100_CLK_VENCBRG_MAIN, "vencbrg_mainclk", 0, JH7100_CLK_VENC_BUS),
- JH7100_GDIV(JH7100_CLK_VENC_BCLK, "venc_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
- JH7100_GDIV(JH7100_CLK_VENC_CCLK, "venc_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
- JH7100_GATE(JH7100_CLK_VENC_APB, "venc_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_DDRPLL_DIV2, "ddrpll_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_PLL1_OUT),
- JH7100_GDIV(JH7100_CLK_DDRPLL_DIV4, "ddrpll_div4", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV2),
- JH7100_GDIV(JH7100_CLK_DDRPLL_DIV8, "ddrpll_div8", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV4),
- JH7100_GDIV(JH7100_CLK_DDROSC_DIV2, "ddrosc_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_OSC_SYS),
- JH7100_GMUX(JH7100_CLK_DDRC0, "ddrc0", CLK_IS_CRITICAL, 4,
+ JH71X0__DIV(JH7100_CLK_CPU_CORE, "cpu_core", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0__DIV(JH7100_CLK_CPU_AXI, "cpu_axi", 8, JH7100_CLK_CPU_CORE),
+ JH71X0__DIV(JH7100_CLK_AHB_BUS, "ahb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0__DIV(JH7100_CLK_APB1_BUS, "apb1_bus", 8, JH7100_CLK_AHB_BUS),
+ JH71X0__DIV(JH7100_CLK_APB2_BUS, "apb2_bus", 8, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_DOM3AHB_BUS, "dom3ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_DOM7AHB_BUS, "dom7ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_U74_CORE0, "u74_core0", CLK_IS_CRITICAL, JH7100_CLK_CPU_CORE),
+ JH71X0_GDIV(JH7100_CLK_U74_CORE1, "u74_core1", CLK_IS_CRITICAL, 8, JH7100_CLK_CPU_CORE),
+ JH71X0_GATE(JH7100_CLK_U74_AXI, "u74_axi", CLK_IS_CRITICAL, JH7100_CLK_CPU_AXI),
+ JH71X0_GATE(JH7100_CLK_U74RTC_TOGGLE, "u74rtc_toggle", CLK_IS_CRITICAL, JH7100_CLK_OSC_SYS),
+ JH71X0_GATE(JH7100_CLK_SGDMA2P_AXI, "sgdma2p_axi", 0, JH7100_CLK_CPU_AXI),
+ JH71X0_GATE(JH7100_CLK_DMA2PNOC_AXI, "dma2pnoc_axi", 0, JH7100_CLK_CPU_AXI),
+ JH71X0_GATE(JH7100_CLK_SGDMA2P_AHB, "sgdma2p_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0__DIV(JH7100_CLK_DLA_BUS, "dla_bus", 4, JH7100_CLK_DLA_ROOT),
+ JH71X0_GATE(JH7100_CLK_DLA_AXI, "dla_axi", 0, JH7100_CLK_DLA_BUS),
+ JH71X0_GATE(JH7100_CLK_DLANOC_AXI, "dlanoc_axi", 0, JH7100_CLK_DLA_BUS),
+ JH71X0_GATE(JH7100_CLK_DLA_APB, "dla_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_VP6_CORE, "vp6_core", 0, 4, JH7100_CLK_DSP_ROOT_DIV),
+ JH71X0__DIV(JH7100_CLK_VP6BUS_SRC, "vp6bus_src", 4, JH7100_CLK_DSP_ROOT),
+ JH71X0_GDIV(JH7100_CLK_VP6_AXI, "vp6_axi", 0, 4, JH7100_CLK_VP6BUS_SRC),
+ JH71X0__DIV(JH7100_CLK_VCDECBUS_SRC, "vcdecbus_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
+ JH71X0__DIV(JH7100_CLK_VDEC_BUS, "vdec_bus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH71X0_GATE(JH7100_CLK_VDEC_AXI, "vdec_axi", 0, JH7100_CLK_VDEC_BUS),
+ JH71X0_GATE(JH7100_CLK_VDECBRG_MAIN, "vdecbrg_mainclk", 0, JH7100_CLK_VDEC_BUS),
+ JH71X0_GDIV(JH7100_CLK_VDEC_BCLK, "vdec_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
+ JH71X0_GDIV(JH7100_CLK_VDEC_CCLK, "vdec_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
+ JH71X0_GATE(JH7100_CLK_VDEC_APB, "vdec_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_JPEG_AXI, "jpeg_axi", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0_GDIV(JH7100_CLK_JPEG_CCLK, "jpeg_cclk", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0_GATE(JH7100_CLK_JPEG_APB, "jpeg_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_GC300_2X, "gc300_2x", 0, 8, JH7100_CLK_CDECHIFI4_ROOT),
+ JH71X0_GATE(JH7100_CLK_GC300_AHB, "gc300_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0__DIV(JH7100_CLK_JPCGC300_AXIBUS, "jpcgc300_axibus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH71X0_GATE(JH7100_CLK_GC300_AXI, "gc300_axi", 0, JH7100_CLK_JPCGC300_AXIBUS),
+ JH71X0_GATE(JH7100_CLK_JPCGC300_MAIN, "jpcgc300_mainclk", 0, JH7100_CLK_JPCGC300_AXIBUS),
+ JH71X0__DIV(JH7100_CLK_VENC_BUS, "venc_bus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH71X0_GATE(JH7100_CLK_VENC_AXI, "venc_axi", 0, JH7100_CLK_VENC_BUS),
+ JH71X0_GATE(JH7100_CLK_VENCBRG_MAIN, "vencbrg_mainclk", 0, JH7100_CLK_VENC_BUS),
+ JH71X0_GDIV(JH7100_CLK_VENC_BCLK, "venc_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
+ JH71X0_GDIV(JH7100_CLK_VENC_CCLK, "venc_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
+ JH71X0_GATE(JH7100_CLK_VENC_APB, "venc_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_DDRPLL_DIV2, "ddrpll_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_PLL1_OUT),
+ JH71X0_GDIV(JH7100_CLK_DDRPLL_DIV4, "ddrpll_div4", CLK_IS_CRITICAL, 2,
+ JH7100_CLK_DDRPLL_DIV2),
+ JH71X0_GDIV(JH7100_CLK_DDRPLL_DIV8, "ddrpll_div8", CLK_IS_CRITICAL, 2,
+ JH7100_CLK_DDRPLL_DIV4),
+ JH71X0_GDIV(JH7100_CLK_DDROSC_DIV2, "ddrosc_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_OSC_SYS),
+ JH71X0_GMUX(JH7100_CLK_DDRC0, "ddrc0", CLK_IS_CRITICAL, 4,
JH7100_CLK_DDROSC_DIV2,
JH7100_CLK_DDRPLL_DIV2,
JH7100_CLK_DDRPLL_DIV4,
JH7100_CLK_DDRPLL_DIV8),
- JH7100_GMUX(JH7100_CLK_DDRC1, "ddrc1", CLK_IS_CRITICAL, 4,
+ JH71X0_GMUX(JH7100_CLK_DDRC1, "ddrc1", CLK_IS_CRITICAL, 4,
JH7100_CLK_DDROSC_DIV2,
JH7100_CLK_DDRPLL_DIV2,
JH7100_CLK_DDRPLL_DIV4,
JH7100_CLK_DDRPLL_DIV8),
- JH7100_GATE(JH7100_CLK_DDRPHY_APB, "ddrphy_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100__DIV(JH7100_CLK_NOC_ROB, "noc_rob", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100__DIV(JH7100_CLK_NOC_COG, "noc_cog", 8, JH7100_CLK_DLA_ROOT),
- JH7100_GATE(JH7100_CLK_NNE_AHB, "nne_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100__DIV(JH7100_CLK_NNEBUS_SRC1, "nnebus_src1", 4, JH7100_CLK_DSP_ROOT),
- JH7100__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 2,
+ JH71X0_GATE(JH7100_CLK_DDRPHY_APB, "ddrphy_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0__DIV(JH7100_CLK_NOC_ROB, "noc_rob", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0__DIV(JH7100_CLK_NOC_COG, "noc_cog", 8, JH7100_CLK_DLA_ROOT),
+ JH71X0_GATE(JH7100_CLK_NNE_AHB, "nne_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0__DIV(JH7100_CLK_NNEBUS_SRC1, "nnebus_src1", 4, JH7100_CLK_DSP_ROOT),
+ JH71X0__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 2,
JH7100_CLK_CPU_AXI,
JH7100_CLK_NNEBUS_SRC1),
- JH7100_GATE(JH7100_CLK_NNE_AXI, "nne_axi", 0, JH7100_CLK_NNE_BUS),
- JH7100_GATE(JH7100_CLK_NNENOC_AXI, "nnenoc_axi", 0, JH7100_CLK_NNE_BUS),
- JH7100_GATE(JH7100_CLK_DLASLV_AXI, "dlaslv_axi", 0, JH7100_CLK_NNE_BUS),
- JH7100_GATE(JH7100_CLK_DSPX2C_AXI, "dspx2c_axi", CLK_IS_CRITICAL, JH7100_CLK_NNE_BUS),
- JH7100__DIV(JH7100_CLK_HIFI4_SRC, "hifi4_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
- JH7100__DIV(JH7100_CLK_HIFI4_COREFREE, "hifi4_corefree", 8, JH7100_CLK_HIFI4_SRC),
- JH7100_GATE(JH7100_CLK_HIFI4_CORE, "hifi4_core", 0, JH7100_CLK_HIFI4_COREFREE),
- JH7100__DIV(JH7100_CLK_HIFI4_BUS, "hifi4_bus", 8, JH7100_CLK_HIFI4_COREFREE),
- JH7100_GATE(JH7100_CLK_HIFI4_AXI, "hifi4_axi", 0, JH7100_CLK_HIFI4_BUS),
- JH7100_GATE(JH7100_CLK_HIFI4NOC_AXI, "hifi4noc_axi", 0, JH7100_CLK_HIFI4_BUS),
- JH7100__DIV(JH7100_CLK_SGDMA1P_BUS, "sgdma1p_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100_GATE(JH7100_CLK_SGDMA1P_AXI, "sgdma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
- JH7100_GATE(JH7100_CLK_DMA1P_AXI, "dma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
- JH7100_GDIV(JH7100_CLK_X2C_AXI, "x2c_axi", CLK_IS_CRITICAL, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100__DIV(JH7100_CLK_USB_BUS, "usb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
- JH7100_GATE(JH7100_CLK_USB_AXI, "usb_axi", 0, JH7100_CLK_USB_BUS),
- JH7100_GATE(JH7100_CLK_USBNOC_AXI, "usbnoc_axi", 0, JH7100_CLK_USB_BUS),
- JH7100__DIV(JH7100_CLK_USBPHY_ROOTDIV, "usbphy_rootdiv", 4, JH7100_CLK_GMACUSB_ROOT),
- JH7100_GDIV(JH7100_CLK_USBPHY_125M, "usbphy_125m", 0, 8, JH7100_CLK_USBPHY_ROOTDIV),
- JH7100_GDIV(JH7100_CLK_USBPHY_PLLDIV25M, "usbphy_plldiv25m", 0, 32, JH7100_CLK_USBPHY_ROOTDIV),
- JH7100__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
+ JH71X0_GATE(JH7100_CLK_NNE_AXI, "nne_axi", 0, JH7100_CLK_NNE_BUS),
+ JH71X0_GATE(JH7100_CLK_NNENOC_AXI, "nnenoc_axi", 0, JH7100_CLK_NNE_BUS),
+ JH71X0_GATE(JH7100_CLK_DLASLV_AXI, "dlaslv_axi", 0, JH7100_CLK_NNE_BUS),
+ JH71X0_GATE(JH7100_CLK_DSPX2C_AXI, "dspx2c_axi", CLK_IS_CRITICAL, JH7100_CLK_NNE_BUS),
+ JH71X0__DIV(JH7100_CLK_HIFI4_SRC, "hifi4_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
+ JH71X0__DIV(JH7100_CLK_HIFI4_COREFREE, "hifi4_corefree", 8, JH7100_CLK_HIFI4_SRC),
+ JH71X0_GATE(JH7100_CLK_HIFI4_CORE, "hifi4_core", 0, JH7100_CLK_HIFI4_COREFREE),
+ JH71X0__DIV(JH7100_CLK_HIFI4_BUS, "hifi4_bus", 8, JH7100_CLK_HIFI4_COREFREE),
+ JH71X0_GATE(JH7100_CLK_HIFI4_AXI, "hifi4_axi", 0, JH7100_CLK_HIFI4_BUS),
+ JH71X0_GATE(JH7100_CLK_HIFI4NOC_AXI, "hifi4noc_axi", 0, JH7100_CLK_HIFI4_BUS),
+ JH71X0__DIV(JH7100_CLK_SGDMA1P_BUS, "sgdma1p_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0_GATE(JH7100_CLK_SGDMA1P_AXI, "sgdma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
+ JH71X0_GATE(JH7100_CLK_DMA1P_AXI, "dma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
+ JH71X0_GDIV(JH7100_CLK_X2C_AXI, "x2c_axi", CLK_IS_CRITICAL, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0__DIV(JH7100_CLK_USB_BUS, "usb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH71X0_GATE(JH7100_CLK_USB_AXI, "usb_axi", 0, JH7100_CLK_USB_BUS),
+ JH71X0_GATE(JH7100_CLK_USBNOC_AXI, "usbnoc_axi", 0, JH7100_CLK_USB_BUS),
+ JH71X0__DIV(JH7100_CLK_USBPHY_ROOTDIV, "usbphy_rootdiv", 4, JH7100_CLK_GMACUSB_ROOT),
+ JH71X0_GDIV(JH7100_CLK_USBPHY_125M, "usbphy_125m", 0, 8, JH7100_CLK_USBPHY_ROOTDIV),
+ JH71X0_GDIV(JH7100_CLK_USBPHY_PLLDIV25M, "usbphy_plldiv25m", 0, 32,
+ JH7100_CLK_USBPHY_ROOTDIV),
+ JH71X0__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_USBPHY_PLLDIV25M),
- JH7100_FDIV(JH7100_CLK_AUDIO_DIV, "audio_div", JH7100_CLK_AUDIO_ROOT),
- JH7100_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV),
- JH7100_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD),
- JH7100_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT),
- JH7100__DIV(JH7100_CLK_ISP0_BUS, "isp0_bus", 8, JH7100_CLK_VIN_SRC),
- JH7100_GATE(JH7100_CLK_ISP0_AXI, "isp0_axi", 0, JH7100_CLK_ISP0_BUS),
- JH7100_GATE(JH7100_CLK_ISP0NOC_AXI, "isp0noc_axi", 0, JH7100_CLK_ISP0_BUS),
- JH7100_GATE(JH7100_CLK_ISPSLV_AXI, "ispslv_axi", 0, JH7100_CLK_ISP0_BUS),
- JH7100__DIV(JH7100_CLK_ISP1_BUS, "isp1_bus", 8, JH7100_CLK_VIN_SRC),
- JH7100_GATE(JH7100_CLK_ISP1_AXI, "isp1_axi", 0, JH7100_CLK_ISP1_BUS),
- JH7100_GATE(JH7100_CLK_ISP1NOC_AXI, "isp1noc_axi", 0, JH7100_CLK_ISP1_BUS),
- JH7100__DIV(JH7100_CLK_VIN_BUS, "vin_bus", 8, JH7100_CLK_VIN_SRC),
- JH7100_GATE(JH7100_CLK_VIN_AXI, "vin_axi", 0, JH7100_CLK_VIN_BUS),
- JH7100_GATE(JH7100_CLK_VINNOC_AXI, "vinnoc_axi", 0, JH7100_CLK_VIN_BUS),
- JH7100_GDIV(JH7100_CLK_VOUT_SRC, "vout_src", 0, 4, JH7100_CLK_VOUT_ROOT),
- JH7100__DIV(JH7100_CLK_DISPBUS_SRC, "dispbus_src", 4, JH7100_CLK_VOUTBUS_ROOT),
- JH7100__DIV(JH7100_CLK_DISP_BUS, "disp_bus", 4, JH7100_CLK_DISPBUS_SRC),
- JH7100_GATE(JH7100_CLK_DISP_AXI, "disp_axi", 0, JH7100_CLK_DISP_BUS),
- JH7100_GATE(JH7100_CLK_DISPNOC_AXI, "dispnoc_axi", 0, JH7100_CLK_DISP_BUS),
- JH7100_GATE(JH7100_CLK_SDIO0_AHB, "sdio0_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GDIV(JH7100_CLK_SDIO0_CCLKINT, "sdio0_cclkint", 0, 24, JH7100_CLK_PERH0_SRC),
- JH7100__INV(JH7100_CLK_SDIO0_CCLKINT_INV, "sdio0_cclkint_inv", JH7100_CLK_SDIO0_CCLKINT),
- JH7100_GATE(JH7100_CLK_SDIO1_AHB, "sdio1_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GDIV(JH7100_CLK_SDIO1_CCLKINT, "sdio1_cclkint", 0, 24, JH7100_CLK_PERH1_SRC),
- JH7100__INV(JH7100_CLK_SDIO1_CCLKINT_INV, "sdio1_cclkint_inv", JH7100_CLK_SDIO1_CCLKINT),
- JH7100_GATE(JH7100_CLK_GMAC_AHB, "gmac_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100__DIV(JH7100_CLK_GMAC_ROOT_DIV, "gmac_root_div", 8, JH7100_CLK_GMACUSB_ROOT),
- JH7100_GDIV(JH7100_CLK_GMAC_PTP_REF, "gmac_ptp_refclk", 0, 31, JH7100_CLK_GMAC_ROOT_DIV),
- JH7100_GDIV(JH7100_CLK_GMAC_GTX, "gmac_gtxclk", 0, 255, JH7100_CLK_GMAC_ROOT_DIV),
- JH7100_GDIV(JH7100_CLK_GMAC_RMII_TX, "gmac_rmii_txclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
- JH7100_GDIV(JH7100_CLK_GMAC_RMII_RX, "gmac_rmii_rxclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
- JH7100__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", 3,
+ JH71X0_FDIV(JH7100_CLK_AUDIO_DIV, "audio_div", JH7100_CLK_AUDIO_ROOT),
+ JH71X0_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV),
+ JH71X0_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD),
+ JH71X0_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT),
+ JH71X0__DIV(JH7100_CLK_ISP0_BUS, "isp0_bus", 8, JH7100_CLK_VIN_SRC),
+ JH71X0_GATE(JH7100_CLK_ISP0_AXI, "isp0_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH71X0_GATE(JH7100_CLK_ISP0NOC_AXI, "isp0noc_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH71X0_GATE(JH7100_CLK_ISPSLV_AXI, "ispslv_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH71X0__DIV(JH7100_CLK_ISP1_BUS, "isp1_bus", 8, JH7100_CLK_VIN_SRC),
+ JH71X0_GATE(JH7100_CLK_ISP1_AXI, "isp1_axi", 0, JH7100_CLK_ISP1_BUS),
+ JH71X0_GATE(JH7100_CLK_ISP1NOC_AXI, "isp1noc_axi", 0, JH7100_CLK_ISP1_BUS),
+ JH71X0__DIV(JH7100_CLK_VIN_BUS, "vin_bus", 8, JH7100_CLK_VIN_SRC),
+ JH71X0_GATE(JH7100_CLK_VIN_AXI, "vin_axi", 0, JH7100_CLK_VIN_BUS),
+ JH71X0_GATE(JH7100_CLK_VINNOC_AXI, "vinnoc_axi", 0, JH7100_CLK_VIN_BUS),
+ JH71X0_GDIV(JH7100_CLK_VOUT_SRC, "vout_src", 0, 4, JH7100_CLK_VOUT_ROOT),
+ JH71X0__DIV(JH7100_CLK_DISPBUS_SRC, "dispbus_src", 4, JH7100_CLK_VOUTBUS_ROOT),
+ JH71X0__DIV(JH7100_CLK_DISP_BUS, "disp_bus", 4, JH7100_CLK_DISPBUS_SRC),
+ JH71X0_GATE(JH7100_CLK_DISP_AXI, "disp_axi", 0, JH7100_CLK_DISP_BUS),
+ JH71X0_GATE(JH7100_CLK_DISPNOC_AXI, "dispnoc_axi", 0, JH7100_CLK_DISP_BUS),
+ JH71X0_GATE(JH7100_CLK_SDIO0_AHB, "sdio0_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GDIV(JH7100_CLK_SDIO0_CCLKINT, "sdio0_cclkint", 0, 24, JH7100_CLK_PERH0_SRC),
+ JH71X0__INV(JH7100_CLK_SDIO0_CCLKINT_INV, "sdio0_cclkint_inv", JH7100_CLK_SDIO0_CCLKINT),
+ JH71X0_GATE(JH7100_CLK_SDIO1_AHB, "sdio1_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GDIV(JH7100_CLK_SDIO1_CCLKINT, "sdio1_cclkint", 0, 24, JH7100_CLK_PERH1_SRC),
+ JH71X0__INV(JH7100_CLK_SDIO1_CCLKINT_INV, "sdio1_cclkint_inv", JH7100_CLK_SDIO1_CCLKINT),
+ JH71X0_GATE(JH7100_CLK_GMAC_AHB, "gmac_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0__DIV(JH7100_CLK_GMAC_ROOT_DIV, "gmac_root_div", 8, JH7100_CLK_GMACUSB_ROOT),
+ JH71X0_GDIV(JH7100_CLK_GMAC_PTP_REF, "gmac_ptp_refclk", 0, 31, JH7100_CLK_GMAC_ROOT_DIV),
+ JH71X0_GDIV(JH7100_CLK_GMAC_GTX, "gmac_gtxclk", 0, 255, JH7100_CLK_GMAC_ROOT_DIV),
+ JH71X0_GDIV(JH7100_CLK_GMAC_RMII_TX, "gmac_rmii_txclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
+ JH71X0_GDIV(JH7100_CLK_GMAC_RMII_RX, "gmac_rmii_rxclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
+ JH71X0__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", 3,
JH7100_CLK_GMAC_GTX,
JH7100_CLK_GMAC_TX_INV,
JH7100_CLK_GMAC_RMII_TX),
- JH7100__INV(JH7100_CLK_GMAC_TX_INV, "gmac_tx_inv", JH7100_CLK_GMAC_TX),
- JH7100__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 2,
+ JH71X0__INV(JH7100_CLK_GMAC_TX_INV, "gmac_tx_inv", JH7100_CLK_GMAC_TX),
+ JH71X0__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 2,
JH7100_CLK_GMAC_GR_MII_RX,
JH7100_CLK_GMAC_RMII_RX),
- JH7100__INV(JH7100_CLK_GMAC_RX_INV, "gmac_rx_inv", JH7100_CLK_GMAC_RX_PRE),
- JH7100_GATE(JH7100_CLK_GMAC_RMII, "gmac_rmii", 0, JH7100_CLK_GMAC_RMII_REF),
- JH7100_GDIV(JH7100_CLK_GMAC_TOPHYREF, "gmac_tophyref", 0, 127, JH7100_CLK_GMAC_ROOT_DIV),
- JH7100_GATE(JH7100_CLK_SPI2AHB_AHB, "spi2ahb_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GDIV(JH7100_CLK_SPI2AHB_CORE, "spi2ahb_core", 0, 31, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_EZMASTER_AHB, "ezmaster_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_E24_AHB, "e24_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_E24RTC_TOGGLE, "e24rtc_toggle", 0, JH7100_CLK_OSC_SYS),
- JH7100_GATE(JH7100_CLK_QSPI_AHB, "qspi_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_QSPI_APB, "qspi_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_QSPI_REF, "qspi_refclk", 0, 31, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_SEC_AHB, "sec_ahb", 0, JH7100_CLK_AHB_BUS),
- JH7100_GATE(JH7100_CLK_AES, "aes_clk", 0, JH7100_CLK_SEC_AHB),
- JH7100_GATE(JH7100_CLK_SHA, "sha_clk", 0, JH7100_CLK_SEC_AHB),
- JH7100_GATE(JH7100_CLK_PKA, "pka_clk", 0, JH7100_CLK_SEC_AHB),
- JH7100_GATE(JH7100_CLK_TRNG_APB, "trng_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GATE(JH7100_CLK_OTP_APB, "otp_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GATE(JH7100_CLK_UART0_APB, "uart0_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_UART0_CORE, "uart0_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_UART1_APB, "uart1_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_UART1_CORE, "uart1_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_SPI0_APB, "spi0_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_SPI0_CORE, "spi0_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_SPI1_APB, "spi1_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_SPI1_CORE, "spi1_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_I2C0_APB, "i2c0_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_I2C0_CORE, "i2c0_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_I2C1_APB, "i2c1_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GDIV(JH7100_CLK_I2C1_CORE, "i2c1_core", 0, 63, JH7100_CLK_PERH1_SRC),
- JH7100_GATE(JH7100_CLK_GPIO_APB, "gpio_apb", 0, JH7100_CLK_APB1_BUS),
- JH7100_GATE(JH7100_CLK_UART2_APB, "uart2_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_UART2_CORE, "uart2_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_UART3_APB, "uart3_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_UART3_CORE, "uart3_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_SPI2_APB, "spi2_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_SPI2_CORE, "spi2_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_SPI3_APB, "spi3_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_SPI3_CORE, "spi3_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_I2C2_APB, "i2c2_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_I2C2_CORE, "i2c2_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_I2C3_APB, "i2c3_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_I2C3_CORE, "i2c3_core", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_WDTIMER_APB, "wdtimer_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_WDT_CORE, "wdt_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER0_CORE, "timer0_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER1_CORE, "timer1_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER2_CORE, "timer2_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER3_CORE, "timer3_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER4_CORE, "timer4_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER5_CORE, "timer5_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GDIV(JH7100_CLK_TIMER6_CORE, "timer6_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
- JH7100_GATE(JH7100_CLK_VP6INTC_APB, "vp6intc_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GATE(JH7100_CLK_PWM_APB, "pwm_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GATE(JH7100_CLK_MSI_APB, "msi_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GATE(JH7100_CLK_TEMP_APB, "temp_apb", 0, JH7100_CLK_APB2_BUS),
- JH7100_GDIV(JH7100_CLK_TEMP_SENSE, "temp_sense", 0, 31, JH7100_CLK_OSC_SYS),
- JH7100_GATE(JH7100_CLK_SYSERR_APB, "syserr_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0__INV(JH7100_CLK_GMAC_RX_INV, "gmac_rx_inv", JH7100_CLK_GMAC_RX_PRE),
+ JH71X0_GATE(JH7100_CLK_GMAC_RMII, "gmac_rmii", 0, JH7100_CLK_GMAC_RMII_REF),
+ JH71X0_GDIV(JH7100_CLK_GMAC_TOPHYREF, "gmac_tophyref", 0, 127, JH7100_CLK_GMAC_ROOT_DIV),
+ JH71X0_GATE(JH7100_CLK_SPI2AHB_AHB, "spi2ahb_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GDIV(JH7100_CLK_SPI2AHB_CORE, "spi2ahb_core", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_EZMASTER_AHB, "ezmaster_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_E24_AHB, "e24_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_E24RTC_TOGGLE, "e24rtc_toggle", 0, JH7100_CLK_OSC_SYS),
+ JH71X0_GATE(JH7100_CLK_QSPI_AHB, "qspi_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_QSPI_APB, "qspi_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_QSPI_REF, "qspi_refclk", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_SEC_AHB, "sec_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH71X0_GATE(JH7100_CLK_AES, "aes_clk", 0, JH7100_CLK_SEC_AHB),
+ JH71X0_GATE(JH7100_CLK_SHA, "sha_clk", 0, JH7100_CLK_SEC_AHB),
+ JH71X0_GATE(JH7100_CLK_PKA, "pka_clk", 0, JH7100_CLK_SEC_AHB),
+ JH71X0_GATE(JH7100_CLK_TRNG_APB, "trng_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GATE(JH7100_CLK_OTP_APB, "otp_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GATE(JH7100_CLK_UART0_APB, "uart0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_UART0_CORE, "uart0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_UART1_APB, "uart1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_UART1_CORE, "uart1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_SPI0_APB, "spi0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_SPI0_CORE, "spi0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_SPI1_APB, "spi1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_SPI1_CORE, "spi1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_I2C0_APB, "i2c0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_I2C0_CORE, "i2c0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_I2C1_APB, "i2c1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GDIV(JH7100_CLK_I2C1_CORE, "i2c1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH71X0_GATE(JH7100_CLK_GPIO_APB, "gpio_apb", 0, JH7100_CLK_APB1_BUS),
+ JH71X0_GATE(JH7100_CLK_UART2_APB, "uart2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_UART2_CORE, "uart2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_UART3_APB, "uart3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_UART3_CORE, "uart3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_SPI2_APB, "spi2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_SPI2_CORE, "spi2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_SPI3_APB, "spi3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_SPI3_CORE, "spi3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_I2C2_APB, "i2c2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_I2C2_CORE, "i2c2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_I2C3_APB, "i2c3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_I2C3_CORE, "i2c3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_WDTIMER_APB, "wdtimer_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_WDT_CORE, "wdt_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER0_CORE, "timer0_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER1_CORE, "timer1_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER2_CORE, "timer2_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER3_CORE, "timer3_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER4_CORE, "timer4_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER5_CORE, "timer5_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GDIV(JH7100_CLK_TIMER6_CORE, "timer6_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH71X0_GATE(JH7100_CLK_VP6INTC_APB, "vp6intc_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GATE(JH7100_CLK_PWM_APB, "pwm_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GATE(JH7100_CLK_MSI_APB, "msi_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GATE(JH7100_CLK_TEMP_APB, "temp_apb", 0, JH7100_CLK_APB2_BUS),
+ JH71X0_GDIV(JH7100_CLK_TEMP_SENSE, "temp_sense", 0, 31, JH7100_CLK_OSC_SYS),
+ JH71X0_GATE(JH7100_CLK_SYSERR_APB, "syserr_apb", 0, JH7100_CLK_APB2_BUS),
};
-static struct jh7100_clk *jh7100_clk_from(struct clk_hw *hw)
-{
- return container_of(hw, struct jh7100_clk, hw);
-}
-
-static struct jh7100_clk_priv *jh7100_priv_from(struct jh7100_clk *clk)
-{
- return container_of(clk, struct jh7100_clk_priv, reg[clk->idx]);
-}
-
-static u32 jh7100_clk_reg_get(struct jh7100_clk *clk)
-{
- struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
- void __iomem *reg = priv->base + 4 * clk->idx;
-
- return readl_relaxed(reg);
-}
-
-static void jh7100_clk_reg_rmw(struct jh7100_clk *clk, u32 mask, u32 value)
-{
- struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
- void __iomem *reg = priv->base + 4 * clk->idx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value |= readl_relaxed(reg) & ~mask;
- writel_relaxed(value, reg);
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
-}
-
-static int jh7100_clk_enable(struct clk_hw *hw)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, JH7100_CLK_ENABLE);
- return 0;
-}
-
-static void jh7100_clk_disable(struct clk_hw *hw)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, 0);
-}
-
-static int jh7100_clk_is_enabled(struct clk_hw *hw)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
-
- return !!(jh7100_clk_reg_get(clk) & JH7100_CLK_ENABLE);
-}
-
-static unsigned long jh7100_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 div = jh7100_clk_reg_get(clk) & JH7100_CLK_DIV_MASK;
-
- return div ? parent_rate / div : 0;
-}
-
-static int jh7100_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- unsigned long parent = req->best_parent_rate;
- unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
- unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
- unsigned long result = parent / div;
-
- /*
- * we want the result clamped by min_rate and max_rate if possible:
- * case 1: div hits the max divider value, which means it's less than
- * parent / rate, so the result is greater than rate and min_rate in
- * particular. we can't do anything about result > max_rate because the
- * divider doesn't go any further.
- * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
- * always lower or equal to rate and max_rate. however the result may
- * turn out lower than min_rate, but then the next higher rate is fine:
- * div - 1 = ceil(parent / rate) - 1 < parent / rate
- * and thus
- * min_rate <= rate < parent / (div - 1)
- */
- if (result < req->min_rate && div > 1)
- result = parent / (div - 1);
-
- req->rate = result;
- return 0;
-}
-
-static int jh7100_clk_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
- 1UL, (unsigned long)clk->max_div);
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, div);
- return 0;
-}
-
-static unsigned long jh7100_clk_frac_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 reg = jh7100_clk_reg_get(clk);
- unsigned long div100 = 100 * (reg & JH7100_CLK_INT_MASK) +
- ((reg & JH7100_CLK_FRAC_MASK) >> JH7100_CLK_FRAC_SHIFT);
-
- return (div100 >= JH7100_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
-}
-
-static int jh7100_clk_frac_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long parent100 = 100 * req->best_parent_rate;
- unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
- unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
- JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
- unsigned long result = parent100 / div100;
-
- /* clamp the result as in jh7100_clk_determine_rate() above */
- if (result > req->max_rate && div100 < JH7100_CLK_FRAC_MAX)
- result = parent100 / (div100 + 1);
- if (result < req->min_rate && div100 > JH7100_CLK_FRAC_MIN)
- result = parent100 / (div100 - 1);
-
- req->rate = result;
- return 0;
-}
-
-static int jh7100_clk_frac_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
- JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
- u32 value = ((div100 % 100) << JH7100_CLK_FRAC_SHIFT) | (div100 / 100);
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, value);
- return 0;
-}
-
-static u8 jh7100_clk_get_parent(struct clk_hw *hw)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 value = jh7100_clk_reg_get(clk);
-
- return (value & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT;
-}
-
-static int jh7100_clk_set_parent(struct clk_hw *hw, u8 index)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 value = (u32)index << JH7100_CLK_MUX_SHIFT;
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_MUX_MASK, value);
- return 0;
-}
-
-static int jh7100_clk_mux_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- return clk_mux_determine_rate_flags(hw, req, 0);
-}
-
-static int jh7100_clk_get_phase(struct clk_hw *hw)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 value = jh7100_clk_reg_get(clk);
-
- return (value & JH7100_CLK_INVERT) ? 180 : 0;
-}
-
-static int jh7100_clk_set_phase(struct clk_hw *hw, int degrees)
-{
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- u32 value;
-
- if (degrees == 0)
- value = 0;
- else if (degrees == 180)
- value = JH7100_CLK_INVERT;
- else
- return -EINVAL;
-
- jh7100_clk_reg_rmw(clk, JH7100_CLK_INVERT, value);
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static void jh7100_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
-{
- static const struct debugfs_reg32 jh7100_clk_reg = {
- .name = "CTRL",
- .offset = 0,
- };
- struct jh7100_clk *clk = jh7100_clk_from(hw);
- struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
- struct debugfs_regset32 *regset;
-
- regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
- if (!regset)
- return;
-
- regset->regs = &jh7100_clk_reg;
- regset->nregs = 1;
- regset->base = priv->base + 4 * clk->idx;
-
- debugfs_create_regset32("registers", 0400, dentry, regset);
-}
-#else
-#define jh7100_clk_debug_init NULL
-#endif
-
-static const struct clk_ops jh7100_clk_gate_ops = {
- .enable = jh7100_clk_enable,
- .disable = jh7100_clk_disable,
- .is_enabled = jh7100_clk_is_enabled,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_div_ops = {
- .recalc_rate = jh7100_clk_recalc_rate,
- .determine_rate = jh7100_clk_determine_rate,
- .set_rate = jh7100_clk_set_rate,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_fdiv_ops = {
- .recalc_rate = jh7100_clk_frac_recalc_rate,
- .determine_rate = jh7100_clk_frac_determine_rate,
- .set_rate = jh7100_clk_frac_set_rate,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_gdiv_ops = {
- .enable = jh7100_clk_enable,
- .disable = jh7100_clk_disable,
- .is_enabled = jh7100_clk_is_enabled,
- .recalc_rate = jh7100_clk_recalc_rate,
- .determine_rate = jh7100_clk_determine_rate,
- .set_rate = jh7100_clk_set_rate,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_mux_ops = {
- .determine_rate = jh7100_clk_mux_determine_rate,
- .set_parent = jh7100_clk_set_parent,
- .get_parent = jh7100_clk_get_parent,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_gmux_ops = {
- .enable = jh7100_clk_enable,
- .disable = jh7100_clk_disable,
- .is_enabled = jh7100_clk_is_enabled,
- .determine_rate = jh7100_clk_mux_determine_rate,
- .set_parent = jh7100_clk_set_parent,
- .get_parent = jh7100_clk_get_parent,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_mdiv_ops = {
- .recalc_rate = jh7100_clk_recalc_rate,
- .determine_rate = jh7100_clk_determine_rate,
- .get_parent = jh7100_clk_get_parent,
- .set_parent = jh7100_clk_set_parent,
- .set_rate = jh7100_clk_set_rate,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_gmd_ops = {
- .enable = jh7100_clk_enable,
- .disable = jh7100_clk_disable,
- .is_enabled = jh7100_clk_is_enabled,
- .recalc_rate = jh7100_clk_recalc_rate,
- .determine_rate = jh7100_clk_determine_rate,
- .get_parent = jh7100_clk_get_parent,
- .set_parent = jh7100_clk_set_parent,
- .set_rate = jh7100_clk_set_rate,
- .debug_init = jh7100_clk_debug_init,
-};
-
-static const struct clk_ops jh7100_clk_inv_ops = {
- .get_phase = jh7100_clk_get_phase,
- .set_phase = jh7100_clk_set_phase,
- .debug_init = jh7100_clk_debug_init,
-};
-
-const struct clk_ops *starfive_jh7100_clk_ops(u32 max)
-{
- if (max & JH7100_CLK_DIV_MASK) {
- if (max & JH7100_CLK_MUX_MASK) {
- if (max & JH7100_CLK_ENABLE)
- return &jh7100_clk_gmd_ops;
- return &jh7100_clk_mdiv_ops;
- }
- if (max & JH7100_CLK_ENABLE)
- return &jh7100_clk_gdiv_ops;
- if (max == JH7100_CLK_FRAC_MAX)
- return &jh7100_clk_fdiv_ops;
- return &jh7100_clk_div_ops;
- }
-
- if (max & JH7100_CLK_MUX_MASK) {
- if (max & JH7100_CLK_ENABLE)
- return &jh7100_clk_gmux_ops;
- return &jh7100_clk_mux_ops;
- }
-
- if (max & JH7100_CLK_ENABLE)
- return &jh7100_clk_gate_ops;
-
- return &jh7100_clk_inv_ops;
-}
-EXPORT_SYMBOL_GPL(starfive_jh7100_clk_ops);
-
static struct clk_hw *jh7100_clk_get(struct of_phandle_args *clkspec, void *data)
{
- struct jh7100_clk_priv *priv = data;
+ struct jh71x0_clk_priv *priv = data;
unsigned int idx = clkspec->args[0];
if (idx < JH7100_CLK_PLL0_OUT)
@@ -605,7 +283,7 @@ static struct clk_hw *jh7100_clk_get(struct of_phandle_args *clkspec, void *data
static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
{
- struct jh7100_clk_priv *priv;
+ struct jh71x0_clk_priv *priv;
unsigned int idx;
int ret;
@@ -639,12 +317,12 @@ static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
struct clk_parent_data parents[4] = {};
struct clk_init_data init = {
.name = jh7100_clk_data[idx].name,
- .ops = starfive_jh7100_clk_ops(max),
+ .ops = starfive_jh71x0_clk_ops(max),
.parent_data = parents,
- .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1,
+ .num_parents = ((max & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT) + 1,
.flags = jh7100_clk_data[idx].flags,
};
- struct jh7100_clk *clk = &priv->reg[idx];
+ struct jh71x0_clk *clk = &priv->reg[idx];
unsigned int i;
for (i = 0; i < init.num_parents; i++) {
@@ -666,7 +344,7 @@ static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
clk->hw.init = &init;
clk->idx = idx;
- clk->max_div = max & JH7100_CLK_DIV_MASK;
+ clk->max_div = max & JH71X0_CLK_DIV_MASK;
ret = devm_clk_hw_register(priv->dev, &clk->hw);
if (ret)
diff --git a/drivers/clk/starfive/clk-starfive-jh7100.h b/drivers/clk/starfive/clk-starfive-jh7100.h
deleted file mode 100644
index f116be5740a5..000000000000
--- a/drivers/clk/starfive/clk-starfive-jh7100.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLK_STARFIVE_JH7100_H
-#define __CLK_STARFIVE_JH7100_H
-
-#include <linux/bits.h>
-#include <linux/clk-provider.h>
-
-/* register fields */
-#define JH7100_CLK_ENABLE BIT(31)
-#define JH7100_CLK_INVERT BIT(30)
-#define JH7100_CLK_MUX_MASK GENMASK(27, 24)
-#define JH7100_CLK_MUX_SHIFT 24
-#define JH7100_CLK_DIV_MASK GENMASK(23, 0)
-#define JH7100_CLK_FRAC_MASK GENMASK(15, 8)
-#define JH7100_CLK_FRAC_SHIFT 8
-#define JH7100_CLK_INT_MASK GENMASK(7, 0)
-
-/* fractional divider min/max */
-#define JH7100_CLK_FRAC_MIN 100UL
-#define JH7100_CLK_FRAC_MAX 25599UL
-
-/* clock data */
-struct jh7100_clk_data {
- const char *name;
- unsigned long flags;
- u32 max;
- u8 parents[4];
-};
-
-#define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = { \
- .name = _name, \
- .flags = CLK_SET_RATE_PARENT | (_flags), \
- .max = JH7100_CLK_ENABLE, \
- .parents = { [0] = _parent }, \
-}
-
-#define JH7100__DIV(_idx, _name, _max, _parent) [_idx] = { \
- .name = _name, \
- .flags = 0, \
- .max = _max, \
- .parents = { [0] = _parent }, \
-}
-
-#define JH7100_GDIV(_idx, _name, _flags, _max, _parent) [_idx] = { \
- .name = _name, \
- .flags = _flags, \
- .max = JH7100_CLK_ENABLE | (_max), \
- .parents = { [0] = _parent }, \
-}
-
-#define JH7100_FDIV(_idx, _name, _parent) [_idx] = { \
- .name = _name, \
- .flags = 0, \
- .max = JH7100_CLK_FRAC_MAX, \
- .parents = { [0] = _parent }, \
-}
-
-#define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = { \
- .name = _name, \
- .flags = 0, \
- .max = ((_nparents) - 1) << JH7100_CLK_MUX_SHIFT, \
- .parents = { __VA_ARGS__ }, \
-}
-
-#define JH7100_GMUX(_idx, _name, _flags, _nparents, ...) [_idx] = { \
- .name = _name, \
- .flags = _flags, \
- .max = JH7100_CLK_ENABLE | \
- (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT), \
- .parents = { __VA_ARGS__ }, \
-}
-
-#define JH7100_MDIV(_idx, _name, _max, _nparents, ...) [_idx] = { \
- .name = _name, \
- .flags = 0, \
- .max = (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT) | (_max), \
- .parents = { __VA_ARGS__ }, \
-}
-
-#define JH7100__GMD(_idx, _name, _flags, _max, _nparents, ...) [_idx] = { \
- .name = _name, \
- .flags = _flags, \
- .max = JH7100_CLK_ENABLE | \
- (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT) | (_max), \
- .parents = { __VA_ARGS__ }, \
-}
-
-#define JH7100__INV(_idx, _name, _parent) [_idx] = { \
- .name = _name, \
- .flags = CLK_SET_RATE_PARENT, \
- .max = JH7100_CLK_INVERT, \
- .parents = { [0] = _parent }, \
-}
-
-struct jh7100_clk {
- struct clk_hw hw;
- unsigned int idx;
- unsigned int max_div;
-};
-
-struct jh7100_clk_priv {
- /* protect clk enable and set rate/parent from happening at the same time */
- spinlock_t rmw_lock;
- struct device *dev;
- void __iomem *base;
- struct clk_hw *pll[3];
- struct jh7100_clk reg[];
-};
-
-const struct clk_ops *starfive_jh7100_clk_ops(u32 max);
-
-#endif
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
new file mode 100644
index 000000000000..62954eb7b50a
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH7110 Always-On Clock Driver
+ *
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clk-starfive-jh7110.h"
+
+/* external clocks */
+#define JH7110_AONCLK_OSC (JH7110_AONCLK_END + 0)
+#define JH7110_AONCLK_GMAC0_RMII_REFIN (JH7110_AONCLK_END + 1)
+#define JH7110_AONCLK_GMAC0_RGMII_RXIN (JH7110_AONCLK_END + 2)
+#define JH7110_AONCLK_STG_AXIAHB (JH7110_AONCLK_END + 3)
+#define JH7110_AONCLK_APB_BUS (JH7110_AONCLK_END + 4)
+#define JH7110_AONCLK_GMAC0_GTXCLK (JH7110_AONCLK_END + 5)
+#define JH7110_AONCLK_RTC_OSC (JH7110_AONCLK_END + 6)
+
+static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
+ /* source */
+ JH71X0__DIV(JH7110_AONCLK_OSC_DIV4, "osc_div4", 4, JH7110_AONCLK_OSC),
+ JH71X0__MUX(JH7110_AONCLK_APB_FUNC, "apb_func", 2,
+ JH7110_AONCLK_OSC_DIV4,
+ JH7110_AONCLK_OSC),
+ /* gmac0 */
+ JH71X0_GATE(JH7110_AONCLK_GMAC0_AHB, "gmac0_ahb", 0, JH7110_AONCLK_STG_AXIAHB),
+ JH71X0_GATE(JH7110_AONCLK_GMAC0_AXI, "gmac0_axi", 0, JH7110_AONCLK_STG_AXIAHB),
+ JH71X0__DIV(JH7110_AONCLK_GMAC0_RMII_RTX, "gmac0_rmii_rtx", 30,
+ JH7110_AONCLK_GMAC0_RMII_REFIN),
+ JH71X0_GMUX(JH7110_AONCLK_GMAC0_TX, "gmac0_tx",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, 2,
+ JH7110_AONCLK_GMAC0_GTXCLK,
+ JH7110_AONCLK_GMAC0_RMII_RTX),
+ JH71X0__INV(JH7110_AONCLK_GMAC0_TX_INV, "gmac0_tx_inv", JH7110_AONCLK_GMAC0_TX),
+ JH71X0__MUX(JH7110_AONCLK_GMAC0_RX, "gmac0_rx", 2,
+ JH7110_AONCLK_GMAC0_RGMII_RXIN,
+ JH7110_AONCLK_GMAC0_RMII_RTX),
+ JH71X0__INV(JH7110_AONCLK_GMAC0_RX_INV, "gmac0_rx_inv", JH7110_AONCLK_GMAC0_RX),
+ /* otpc */
+ JH71X0_GATE(JH7110_AONCLK_OTPC_APB, "otpc_apb", 0, JH7110_AONCLK_APB_BUS),
+ /* rtc */
+ JH71X0_GATE(JH7110_AONCLK_RTC_APB, "rtc_apb", 0, JH7110_AONCLK_APB_BUS),
+ JH71X0__DIV(JH7110_AONCLK_RTC_INTERNAL, "rtc_internal", 1022, JH7110_AONCLK_OSC),
+ JH71X0__MUX(JH7110_AONCLK_RTC_32K, "rtc_32k", 2,
+ JH7110_AONCLK_RTC_OSC,
+ JH7110_AONCLK_RTC_INTERNAL),
+ JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
+};
+
+static struct clk_hw *jh7110_aonclk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < JH7110_AONCLK_END)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int jh7110_aoncrg_probe(struct platform_device *pdev)
+{
+ struct jh71x0_clk_priv *priv;
+ unsigned int idx;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, reg, JH7110_AONCLK_END),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->rmw_lock);
+ priv->dev = &pdev->dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ for (idx = 0; idx < JH7110_AONCLK_END; idx++) {
+ u32 max = jh7110_aonclk_data[idx].max;
+ struct clk_parent_data parents[4] = {};
+ struct clk_init_data init = {
+ .name = jh7110_aonclk_data[idx].name,
+ .ops = starfive_jh71x0_clk_ops(max),
+ .parent_data = parents,
+ .num_parents =
+ ((max & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT) + 1,
+ .flags = jh7110_aonclk_data[idx].flags,
+ };
+ struct jh71x0_clk *clk = &priv->reg[idx];
+ unsigned int i;
+
+ for (i = 0; i < init.num_parents; i++) {
+ unsigned int pidx = jh7110_aonclk_data[idx].parents[i];
+
+ if (pidx < JH7110_AONCLK_END)
+ parents[i].hw = &priv->reg[pidx].hw;
+ else if (pidx == JH7110_AONCLK_OSC)
+ parents[i].fw_name = "osc";
+ else if (pidx == JH7110_AONCLK_GMAC0_RMII_REFIN)
+ parents[i].fw_name = "gmac0_rmii_refin";
+ else if (pidx == JH7110_AONCLK_GMAC0_RGMII_RXIN)
+ parents[i].fw_name = "gmac0_rgmii_rxin";
+ else if (pidx == JH7110_AONCLK_STG_AXIAHB)
+ parents[i].fw_name = "stg_axiahb";
+ else if (pidx == JH7110_AONCLK_APB_BUS)
+ parents[i].fw_name = "apb_bus";
+ else if (pidx == JH7110_AONCLK_GMAC0_GTXCLK)
+ parents[i].fw_name = "gmac0_gtxclk";
+ else if (pidx == JH7110_AONCLK_RTC_OSC)
+ parents[i].fw_name = "rtc_osc";
+ }
+
+ clk->hw.init = &init;
+ clk->idx = idx;
+ clk->max_div = max & JH71X0_CLK_DIV_MASK;
+
+ ret = devm_clk_hw_register(&pdev->dev, &clk->hw);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_aonclk_get, priv);
+ if (ret)
+ return ret;
+
+ return jh7110_reset_controller_register(priv, "rst-aon", 1);
+}
+
+static const struct of_device_id jh7110_aoncrg_match[] = {
+ { .compatible = "starfive,jh7110-aoncrg" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh7110_aoncrg_match);
+
+static struct platform_driver jh7110_aoncrg_driver = {
+ .probe = jh7110_aoncrg_probe,
+ .driver = {
+ .name = "clk-starfive-jh7110-aon",
+ .of_match_table = jh7110_aoncrg_match,
+ },
+};
+module_platform_driver(jh7110_aoncrg_driver);
+
+MODULE_AUTHOR("Emil Renner Berthing");
+MODULE_DESCRIPTION("StarFive JH7110 always-on clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
new file mode 100644
index 000000000000..e6031345ef05
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH7110 System Clock Driver
+ *
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <soc/starfive/reset-starfive-jh71x0.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clk-starfive-jh7110.h"
+
+/* external clocks */
+#define JH7110_SYSCLK_OSC (JH7110_SYSCLK_END + 0)
+#define JH7110_SYSCLK_GMAC1_RMII_REFIN (JH7110_SYSCLK_END + 1)
+#define JH7110_SYSCLK_GMAC1_RGMII_RXIN (JH7110_SYSCLK_END + 2)
+#define JH7110_SYSCLK_I2STX_BCLK_EXT (JH7110_SYSCLK_END + 3)
+#define JH7110_SYSCLK_I2STX_LRCK_EXT (JH7110_SYSCLK_END + 4)
+#define JH7110_SYSCLK_I2SRX_BCLK_EXT (JH7110_SYSCLK_END + 5)
+#define JH7110_SYSCLK_I2SRX_LRCK_EXT (JH7110_SYSCLK_END + 6)
+#define JH7110_SYSCLK_TDM_EXT (JH7110_SYSCLK_END + 7)
+#define JH7110_SYSCLK_MCLK_EXT (JH7110_SYSCLK_END + 8)
+#define JH7110_SYSCLK_PLL0_OUT (JH7110_SYSCLK_END + 9)
+#define JH7110_SYSCLK_PLL1_OUT (JH7110_SYSCLK_END + 10)
+#define JH7110_SYSCLK_PLL2_OUT (JH7110_SYSCLK_END + 11)
+
+static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
+ /* root */
+ JH71X0__MUX(JH7110_SYSCLK_CPU_ROOT, "cpu_root", 2,
+ JH7110_SYSCLK_OSC,
+ JH7110_SYSCLK_PLL0_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_CPU_CORE, "cpu_core", 7, JH7110_SYSCLK_CPU_ROOT),
+ JH71X0__DIV(JH7110_SYSCLK_CPU_BUS, "cpu_bus", 2, JH7110_SYSCLK_CPU_CORE),
+ JH71X0__MUX(JH7110_SYSCLK_GPU_ROOT, "gpu_root", 2,
+ JH7110_SYSCLK_PLL2_OUT,
+ JH7110_SYSCLK_PLL1_OUT),
+ JH71X0_MDIV(JH7110_SYSCLK_PERH_ROOT, "perh_root", 2, 2,
+ JH7110_SYSCLK_PLL0_OUT,
+ JH7110_SYSCLK_PLL2_OUT),
+ JH71X0__MUX(JH7110_SYSCLK_BUS_ROOT, "bus_root", 2,
+ JH7110_SYSCLK_OSC,
+ JH7110_SYSCLK_PLL2_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_NOCSTG_BUS, "nocstg_bus", 3, JH7110_SYSCLK_BUS_ROOT),
+ JH71X0__DIV(JH7110_SYSCLK_AXI_CFG0, "axi_cfg0", 3, JH7110_SYSCLK_BUS_ROOT),
+ JH71X0__DIV(JH7110_SYSCLK_STG_AXIAHB, "stg_axiahb", 2, JH7110_SYSCLK_AXI_CFG0),
+ JH71X0_GATE(JH7110_SYSCLK_AHB0, "ahb0", CLK_IS_CRITICAL, JH7110_SYSCLK_STG_AXIAHB),
+ JH71X0_GATE(JH7110_SYSCLK_AHB1, "ahb1", CLK_IS_CRITICAL, JH7110_SYSCLK_STG_AXIAHB),
+ JH71X0__DIV(JH7110_SYSCLK_APB_BUS, "apb_bus", 8, JH7110_SYSCLK_STG_AXIAHB),
+ JH71X0_GATE(JH7110_SYSCLK_APB0, "apb0", CLK_IS_CRITICAL, JH7110_SYSCLK_APB_BUS),
+ JH71X0__DIV(JH7110_SYSCLK_PLL0_DIV2, "pll0_div2", 2, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_PLL1_DIV2, "pll1_div2", 2, JH7110_SYSCLK_PLL1_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_PLL2_DIV2, "pll2_div2", 2, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_AUDIO_ROOT, "audio_root", 8, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_MCLK_INNER, "mclk_inner", 64, JH7110_SYSCLK_AUDIO_ROOT),
+ JH71X0__MUX(JH7110_SYSCLK_MCLK, "mclk", 2,
+ JH7110_SYSCLK_MCLK_INNER,
+ JH7110_SYSCLK_MCLK_EXT),
+ JH71X0_GATE(JH7110_SYSCLK_MCLK_OUT, "mclk_out", 0, JH7110_SYSCLK_MCLK_INNER),
+ JH71X0_MDIV(JH7110_SYSCLK_ISP_2X, "isp_2x", 8, 2,
+ JH7110_SYSCLK_PLL2_OUT,
+ JH7110_SYSCLK_PLL1_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_ISP_AXI, "isp_axi", 4, JH7110_SYSCLK_ISP_2X),
+ JH71X0_GDIV(JH7110_SYSCLK_GCLK0, "gclk0", 0, 62, JH7110_SYSCLK_PLL0_DIV2),
+ JH71X0_GDIV(JH7110_SYSCLK_GCLK1, "gclk1", 0, 62, JH7110_SYSCLK_PLL1_DIV2),
+ JH71X0_GDIV(JH7110_SYSCLK_GCLK2, "gclk2", 0, 62, JH7110_SYSCLK_PLL2_DIV2),
+ /* cores */
+ JH71X0_GATE(JH7110_SYSCLK_CORE, "core", CLK_IS_CRITICAL, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_CORE1, "core1", CLK_IS_CRITICAL, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_CORE2, "core2", CLK_IS_CRITICAL, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_CORE3, "core3", CLK_IS_CRITICAL, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_CORE4, "core4", CLK_IS_CRITICAL, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_DEBUG, "debug", 0, JH7110_SYSCLK_CPU_BUS),
+ JH71X0__DIV(JH7110_SYSCLK_RTC_TOGGLE, "rtc_toggle", 6, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE0, "trace0", 0, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE1, "trace1", 0, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE2, "trace2", 0, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE3, "trace3", 0, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE4, "trace4", 0, JH7110_SYSCLK_CPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_TRACE_COM, "trace_com", 0, JH7110_SYSCLK_CPU_BUS),
+ /* noc */
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_CPU_AXI, "noc_bus_cpu_axi", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_CPU_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_AXICFG0_AXI, "noc_bus_axicfg0_axi", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_AXI_CFG0),
+ /* ddr */
+ JH71X0__DIV(JH7110_SYSCLK_OSC_DIV2, "osc_div2", 2, JH7110_SYSCLK_OSC),
+ JH71X0__DIV(JH7110_SYSCLK_PLL1_DIV4, "pll1_div4", 2, JH7110_SYSCLK_PLL1_DIV2),
+ JH71X0__DIV(JH7110_SYSCLK_PLL1_DIV8, "pll1_div8", 2, JH7110_SYSCLK_PLL1_DIV4),
+ JH71X0__MUX(JH7110_SYSCLK_DDR_BUS, "ddr_bus", 4,
+ JH7110_SYSCLK_OSC_DIV2,
+ JH7110_SYSCLK_PLL1_DIV2,
+ JH7110_SYSCLK_PLL1_DIV4,
+ JH7110_SYSCLK_PLL1_DIV8),
+ JH71X0_GATE(JH7110_SYSCLK_DDR_AXI, "ddr_axi", CLK_IS_CRITICAL, JH7110_SYSCLK_DDR_BUS),
+ /* gpu */
+ JH71X0__DIV(JH7110_SYSCLK_GPU_CORE, "gpu_core", 7, JH7110_SYSCLK_GPU_ROOT),
+ JH71X0_GATE(JH7110_SYSCLK_GPU_CORE_CLK, "gpu_core_clk", 0, JH7110_SYSCLK_GPU_CORE),
+ JH71X0_GATE(JH7110_SYSCLK_GPU_SYS_CLK, "gpu_sys_clk", 0, JH7110_SYSCLK_ISP_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_GPU_APB, "gpu_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GDIV(JH7110_SYSCLK_GPU_RTC_TOGGLE, "gpu_rtc_toggle", 0, 12, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_GPU_AXI, "noc_bus_gpu_axi", 0, JH7110_SYSCLK_GPU_CORE),
+ /* isp */
+ JH71X0_GATE(JH7110_SYSCLK_ISP_TOP_CORE, "isp_top_core", 0, JH7110_SYSCLK_ISP_2X),
+ JH71X0_GATE(JH7110_SYSCLK_ISP_TOP_AXI, "isp_top_axi", 0, JH7110_SYSCLK_ISP_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_ISP_AXI, "noc_bus_isp_axi", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_ISP_AXI),
+ /* hifi4 */
+ JH71X0__DIV(JH7110_SYSCLK_HIFI4_CORE, "hifi4_core", 15, JH7110_SYSCLK_BUS_ROOT),
+ JH71X0__DIV(JH7110_SYSCLK_HIFI4_AXI, "hifi4_axi", 2, JH7110_SYSCLK_HIFI4_CORE),
+ /* axi_cfg1 */
+ JH71X0_GATE(JH7110_SYSCLK_AXI_CFG1_MAIN, "axi_cfg1_main", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_ISP_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_AXI_CFG1_AHB, "axi_cfg1_ahb", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_AHB0),
+ /* vout */
+ JH71X0_GATE(JH7110_SYSCLK_VOUT_SRC, "vout_src", 0, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_VOUT_AXI, "vout_axi", 7, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_DISP_AXI, "noc_bus_disp_axi", 0, JH7110_SYSCLK_VOUT_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_VOUT_TOP_AHB, "vout_top_ahb", 0, JH7110_SYSCLK_AHB1),
+ JH71X0_GATE(JH7110_SYSCLK_VOUT_TOP_AXI, "vout_top_axi", 0, JH7110_SYSCLK_VOUT_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_VOUT_TOP_HDMITX0_MCLK, "vout_top_hdmitx0_mclk", 0,
+ JH7110_SYSCLK_MCLK),
+ JH71X0__DIV(JH7110_SYSCLK_VOUT_TOP_MIPIPHY_REF, "vout_top_mipiphy_ref", 2,
+ JH7110_SYSCLK_OSC),
+ /* jpegc */
+ JH71X0__DIV(JH7110_SYSCLK_JPEGC_AXI, "jpegc_axi", 16, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_CODAJ12_AXI, "codaj12_axi", 0, JH7110_SYSCLK_JPEGC_AXI),
+ JH71X0_GDIV(JH7110_SYSCLK_CODAJ12_CORE, "codaj12_core", 0, 16, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_CODAJ12_APB, "codaj12_apb", 0, JH7110_SYSCLK_APB_BUS),
+ /* vdec */
+ JH71X0__DIV(JH7110_SYSCLK_VDEC_AXI, "vdec_axi", 7, JH7110_SYSCLK_BUS_ROOT),
+ JH71X0_GATE(JH7110_SYSCLK_WAVE511_AXI, "wave511_axi", 0, JH7110_SYSCLK_VDEC_AXI),
+ JH71X0_GDIV(JH7110_SYSCLK_WAVE511_BPU, "wave511_bpu", 0, 7, JH7110_SYSCLK_BUS_ROOT),
+ JH71X0_GDIV(JH7110_SYSCLK_WAVE511_VCE, "wave511_vce", 0, 7, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_WAVE511_APB, "wave511_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_VDEC_JPG, "vdec_jpg", 0, JH7110_SYSCLK_JPEGC_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_VDEC_MAIN, "vdec_main", 0, JH7110_SYSCLK_VDEC_AXI),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_VDEC_AXI, "noc_bus_vdec_axi", 0, JH7110_SYSCLK_VDEC_AXI),
+ /* venc */
+ JH71X0__DIV(JH7110_SYSCLK_VENC_AXI, "venc_axi", 15, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_WAVE420L_AXI, "wave420l_axi", 0, JH7110_SYSCLK_VENC_AXI),
+ JH71X0_GDIV(JH7110_SYSCLK_WAVE420L_BPU, "wave420l_bpu", 0, 15, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GDIV(JH7110_SYSCLK_WAVE420L_VCE, "wave420l_vce", 0, 15, JH7110_SYSCLK_PLL2_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_WAVE420L_APB, "wave420l_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_VENC_AXI, "noc_bus_venc_axi", 0, JH7110_SYSCLK_VENC_AXI),
+ /* axi_cfg0 */
+ JH71X0_GATE(JH7110_SYSCLK_AXI_CFG0_MAIN_DIV, "axi_cfg0_main_div", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_AHB1),
+ JH71X0_GATE(JH7110_SYSCLK_AXI_CFG0_MAIN, "axi_cfg0_main", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_AXI_CFG0),
+ JH71X0_GATE(JH7110_SYSCLK_AXI_CFG0_HIFI4, "axi_cfg0_hifi4", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_HIFI4_AXI),
+ /* intmem */
+ JH71X0_GATE(JH7110_SYSCLK_AXIMEM2_AXI, "aximem2_axi", 0, JH7110_SYSCLK_AXI_CFG0),
+ /* qspi */
+ JH71X0_GATE(JH7110_SYSCLK_QSPI_AHB, "qspi_ahb", 0, JH7110_SYSCLK_AHB1),
+ JH71X0_GATE(JH7110_SYSCLK_QSPI_APB, "qspi_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0__DIV(JH7110_SYSCLK_QSPI_REF_SRC, "qspi_ref_src", 16, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0_GMUX(JH7110_SYSCLK_QSPI_REF, "qspi_ref", 0, 2,
+ JH7110_SYSCLK_OSC,
+ JH7110_SYSCLK_QSPI_REF_SRC),
+ /* sdio */
+ JH71X0_GATE(JH7110_SYSCLK_SDIO0_AHB, "sdio0_ahb", 0, JH7110_SYSCLK_AHB0),
+ JH71X0_GATE(JH7110_SYSCLK_SDIO1_AHB, "sdio1_ahb", 0, JH7110_SYSCLK_AHB0),
+ JH71X0_GDIV(JH7110_SYSCLK_SDIO0_SDCARD, "sdio0_sdcard", 0, 15, JH7110_SYSCLK_AXI_CFG0),
+ JH71X0_GDIV(JH7110_SYSCLK_SDIO1_SDCARD, "sdio1_sdcard", 0, 15, JH7110_SYSCLK_AXI_CFG0),
+ /* stg */
+ JH71X0__DIV(JH7110_SYSCLK_USB_125M, "usb_125m", 15, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0_GATE(JH7110_SYSCLK_NOC_BUS_STG_AXI, "noc_bus_stg_axi", CLK_IS_CRITICAL,
+ JH7110_SYSCLK_NOCSTG_BUS),
+ /* gmac1 */
+ JH71X0_GATE(JH7110_SYSCLK_GMAC1_AHB, "gmac1_ahb", 0, JH7110_SYSCLK_AHB0),
+ JH71X0_GATE(JH7110_SYSCLK_GMAC1_AXI, "gmac1_axi", 0, JH7110_SYSCLK_STG_AXIAHB),
+ JH71X0__DIV(JH7110_SYSCLK_GMAC_SRC, "gmac_src", 7, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_GMAC1_GTXCLK, "gmac1_gtxclk", 15, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0__DIV(JH7110_SYSCLK_GMAC1_RMII_RTX, "gmac1_rmii_rtx", 30,
+ JH7110_SYSCLK_GMAC1_RMII_REFIN),
+ JH71X0_GDIV(JH7110_SYSCLK_GMAC1_PTP, "gmac1_ptp", 0, 31, JH7110_SYSCLK_GMAC_SRC),
+ JH71X0__MUX(JH7110_SYSCLK_GMAC1_RX, "gmac1_rx", 2,
+ JH7110_SYSCLK_GMAC1_RGMII_RXIN,
+ JH7110_SYSCLK_GMAC1_RMII_RTX),
+ JH71X0__INV(JH7110_SYSCLK_GMAC1_RX_INV, "gmac1_rx_inv", JH7110_SYSCLK_GMAC1_RX),
+ JH71X0_GMUX(JH7110_SYSCLK_GMAC1_TX, "gmac1_tx",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, 2,
+ JH7110_SYSCLK_GMAC1_GTXCLK,
+ JH7110_SYSCLK_GMAC1_RMII_RTX),
+ JH71X0__INV(JH7110_SYSCLK_GMAC1_TX_INV, "gmac1_tx_inv", JH7110_SYSCLK_GMAC1_TX),
+ JH71X0_GATE(JH7110_SYSCLK_GMAC1_GTXC, "gmac1_gtxc", 0, JH7110_SYSCLK_GMAC1_GTXCLK),
+ /* gmac0 */
+ JH71X0_GDIV(JH7110_SYSCLK_GMAC0_GTXCLK, "gmac0_gtxclk", 0, 15, JH7110_SYSCLK_PLL0_OUT),
+ JH71X0_GDIV(JH7110_SYSCLK_GMAC0_PTP, "gmac0_ptp", 0, 31, JH7110_SYSCLK_GMAC_SRC),
+ JH71X0_GDIV(JH7110_SYSCLK_GMAC_PHY, "gmac_phy", 0, 31, JH7110_SYSCLK_GMAC_SRC),
+ JH71X0_GATE(JH7110_SYSCLK_GMAC0_GTXC, "gmac0_gtxc", 0, JH7110_SYSCLK_GMAC0_GTXCLK),
+ /* apb misc */
+ JH71X0_GATE(JH7110_SYSCLK_IOMUX_APB, "iomux_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_MAILBOX_APB, "mailbox_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_INT_CTRL_APB, "int_ctrl_apb", 0, JH7110_SYSCLK_APB_BUS),
+ /* can0 */
+ JH71X0_GATE(JH7110_SYSCLK_CAN0_APB, "can0_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GDIV(JH7110_SYSCLK_CAN0_TIMER, "can0_timer", 0, 24, JH7110_SYSCLK_OSC),
+ JH71X0_GDIV(JH7110_SYSCLK_CAN0_CAN, "can0_can", 0, 63, JH7110_SYSCLK_PERH_ROOT),
+ /* can1 */
+ JH71X0_GATE(JH7110_SYSCLK_CAN1_APB, "can1_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GDIV(JH7110_SYSCLK_CAN1_TIMER, "can1_timer", 0, 24, JH7110_SYSCLK_OSC),
+ JH71X0_GDIV(JH7110_SYSCLK_CAN1_CAN, "can1_can", 0, 63, JH7110_SYSCLK_PERH_ROOT),
+ /* pwm */
+ JH71X0_GATE(JH7110_SYSCLK_PWM_APB, "pwm_apb", 0, JH7110_SYSCLK_APB_BUS),
+ /* wdt */
+ JH71X0_GATE(JH7110_SYSCLK_WDT_APB, "wdt_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_WDT_CORE, "wdt_core", 0, JH7110_SYSCLK_OSC),
+ /* timer */
+ JH71X0_GATE(JH7110_SYSCLK_TIMER_APB, "timer_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_TIMER0, "timer0", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_TIMER1, "timer1", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_TIMER2, "timer2", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_TIMER3, "timer3", 0, JH7110_SYSCLK_OSC),
+ /* temp sensor */
+ JH71X0_GATE(JH7110_SYSCLK_TEMP_APB, "temp_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GDIV(JH7110_SYSCLK_TEMP_CORE, "temp_core", 0, 24, JH7110_SYSCLK_OSC),
+ /* spi */
+ JH71X0_GATE(JH7110_SYSCLK_SPI0_APB, "spi0_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_SPI1_APB, "spi1_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_SPI2_APB, "spi2_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_SPI3_APB, "spi3_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_SPI4_APB, "spi4_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_SPI5_APB, "spi5_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_SPI6_APB, "spi6_apb", 0, JH7110_SYSCLK_APB_BUS),
+ /* i2c */
+ JH71X0_GATE(JH7110_SYSCLK_I2C0_APB, "i2c0_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_I2C1_APB, "i2c1_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_I2C2_APB, "i2c2_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_I2C3_APB, "i2c3_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_I2C4_APB, "i2c4_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_I2C5_APB, "i2c5_apb", 0, JH7110_SYSCLK_APB_BUS),
+ JH71X0_GATE(JH7110_SYSCLK_I2C6_APB, "i2c6_apb", 0, JH7110_SYSCLK_APB_BUS),
+ /* uart */
+ JH71X0_GATE(JH7110_SYSCLK_UART0_APB, "uart0_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_UART0_CORE, "uart0_core", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_UART1_APB, "uart1_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_UART1_CORE, "uart1_core", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_UART2_APB, "uart2_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_UART2_CORE, "uart2_core", 0, JH7110_SYSCLK_OSC),
+ JH71X0_GATE(JH7110_SYSCLK_UART3_APB, "uart3_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_UART3_CORE, "uart3_core", 0, 10, JH7110_SYSCLK_PERH_ROOT),
+ JH71X0_GATE(JH7110_SYSCLK_UART4_APB, "uart4_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_UART4_CORE, "uart4_core", 0, 10, JH7110_SYSCLK_PERH_ROOT),
+ JH71X0_GATE(JH7110_SYSCLK_UART5_APB, "uart5_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_UART5_CORE, "uart5_core", 0, 10, JH7110_SYSCLK_PERH_ROOT),
+ /* pwmdac */
+ JH71X0_GATE(JH7110_SYSCLK_PWMDAC_APB, "pwmdac_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_PWMDAC_CORE, "pwmdac_core", 0, 256, JH7110_SYSCLK_AUDIO_ROOT),
+ /* spdif */
+ JH71X0_GATE(JH7110_SYSCLK_SPDIF_APB, "spdif_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GATE(JH7110_SYSCLK_SPDIF_CORE, "spdif_core", 0, JH7110_SYSCLK_MCLK),
+ /* i2stx0 */
+ JH71X0_GATE(JH7110_SYSCLK_I2STX0_APB, "i2stx0_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_I2STX0_BCLK_MST, "i2stx0_bclk_mst", 0, 32, JH7110_SYSCLK_MCLK),
+ JH71X0__INV(JH7110_SYSCLK_I2STX0_BCLK_MST_INV, "i2stx0_bclk_mst_inv",
+ JH7110_SYSCLK_I2STX0_BCLK_MST),
+ JH71X0_MDIV(JH7110_SYSCLK_I2STX0_LRCK_MST, "i2stx0_lrck_mst", 64, 2,
+ JH7110_SYSCLK_I2STX0_BCLK_MST_INV,
+ JH7110_SYSCLK_I2STX0_BCLK_MST),
+ JH71X0__MUX(JH7110_SYSCLK_I2STX0_BCLK, "i2stx0_bclk", 2,
+ JH7110_SYSCLK_I2STX0_BCLK_MST,
+ JH7110_SYSCLK_I2STX_BCLK_EXT),
+ JH71X0__INV(JH7110_SYSCLK_I2STX0_BCLK_INV, "i2stx0_bclk_inv", JH7110_SYSCLK_I2STX0_BCLK),
+ JH71X0__MUX(JH7110_SYSCLK_I2STX0_LRCK, "i2stx0_lrck", 2,
+ JH7110_SYSCLK_I2STX0_LRCK_MST,
+ JH7110_SYSCLK_I2STX_LRCK_EXT),
+ /* i2stx1 */
+ JH71X0_GATE(JH7110_SYSCLK_I2STX1_APB, "i2stx1_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_I2STX1_BCLK_MST, "i2stx1_bclk_mst", 0, 32, JH7110_SYSCLK_MCLK),
+ JH71X0__INV(JH7110_SYSCLK_I2STX1_BCLK_MST_INV, "i2stx1_bclk_mst_inv",
+ JH7110_SYSCLK_I2STX1_BCLK_MST),
+ JH71X0_MDIV(JH7110_SYSCLK_I2STX1_LRCK_MST, "i2stx1_lrck_mst", 64, 2,
+ JH7110_SYSCLK_I2STX1_BCLK_MST_INV,
+ JH7110_SYSCLK_I2STX1_BCLK_MST),
+ JH71X0__MUX(JH7110_SYSCLK_I2STX1_BCLK, "i2stx1_bclk", 2,
+ JH7110_SYSCLK_I2STX1_BCLK_MST,
+ JH7110_SYSCLK_I2STX_BCLK_EXT),
+ JH71X0__INV(JH7110_SYSCLK_I2STX1_BCLK_INV, "i2stx1_bclk_inv", JH7110_SYSCLK_I2STX1_BCLK),
+ JH71X0__MUX(JH7110_SYSCLK_I2STX1_LRCK, "i2stx1_lrck", 2,
+ JH7110_SYSCLK_I2STX1_LRCK_MST,
+ JH7110_SYSCLK_I2STX_LRCK_EXT),
+ /* i2srx */
+ JH71X0_GATE(JH7110_SYSCLK_I2SRX_APB, "i2srx_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_I2SRX_BCLK_MST, "i2srx_bclk_mst", 0, 32, JH7110_SYSCLK_MCLK),
+ JH71X0__INV(JH7110_SYSCLK_I2SRX_BCLK_MST_INV, "i2srx_bclk_mst_inv",
+ JH7110_SYSCLK_I2SRX_BCLK_MST),
+ JH71X0_MDIV(JH7110_SYSCLK_I2SRX_LRCK_MST, "i2srx_lrck_mst", 64, 2,
+ JH7110_SYSCLK_I2SRX_BCLK_MST_INV,
+ JH7110_SYSCLK_I2SRX_BCLK_MST),
+ JH71X0__MUX(JH7110_SYSCLK_I2SRX_BCLK, "i2srx_bclk", 2,
+ JH7110_SYSCLK_I2SRX_BCLK_MST,
+ JH7110_SYSCLK_I2SRX_BCLK_EXT),
+ JH71X0__INV(JH7110_SYSCLK_I2SRX_BCLK_INV, "i2srx_bclk_inv", JH7110_SYSCLK_I2SRX_BCLK),
+ JH71X0__MUX(JH7110_SYSCLK_I2SRX_LRCK, "i2srx_lrck", 2,
+ JH7110_SYSCLK_I2SRX_LRCK_MST,
+ JH7110_SYSCLK_I2SRX_LRCK_EXT),
+ /* pdm */
+ JH71X0_GDIV(JH7110_SYSCLK_PDM_DMIC, "pdm_dmic", 0, 64, JH7110_SYSCLK_MCLK),
+ JH71X0_GATE(JH7110_SYSCLK_PDM_APB, "pdm_apb", 0, JH7110_SYSCLK_APB0),
+ /* tdm */
+ JH71X0_GATE(JH7110_SYSCLK_TDM_AHB, "tdm_ahb", 0, JH7110_SYSCLK_AHB0),
+ JH71X0_GATE(JH7110_SYSCLK_TDM_APB, "tdm_apb", 0, JH7110_SYSCLK_APB0),
+ JH71X0_GDIV(JH7110_SYSCLK_TDM_INTERNAL, "tdm_internal", 0, 64, JH7110_SYSCLK_MCLK),
+ JH71X0__MUX(JH7110_SYSCLK_TDM_TDM, "tdm_tdm", 2,
+ JH7110_SYSCLK_TDM_INTERNAL,
+ JH7110_SYSCLK_TDM_EXT),
+ JH71X0__INV(JH7110_SYSCLK_TDM_TDM_INV, "tdm_tdm_inv", JH7110_SYSCLK_TDM_TDM),
+ /* jtag */
+ JH71X0__DIV(JH7110_SYSCLK_JTAG_CERTIFICATION_TRNG, "jtag_certification_trng", 4,
+ JH7110_SYSCLK_OSC),
+};
+
+static struct clk_hw *jh7110_sysclk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < JH7110_SYSCLK_END)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void jh7110_reset_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void jh7110_reset_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct jh71x0_reset_adev *rdev = to_jh71x0_reset_adev(adev);
+
+ kfree(rdev);
+}
+
+int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
+ const char *adev_name,
+ u32 adev_id)
+{
+ struct jh71x0_reset_adev *rdev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return -ENOMEM;
+
+ rdev->base = priv->base;
+
+ adev = &rdev->adev;
+ adev->name = adev_name;
+ adev->dev.parent = priv->dev;
+ adev->dev.release = jh7110_reset_adev_release;
+ adev->id = adev_id;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(priv->dev,
+ jh7110_reset_unregister_adev, adev);
+}
+EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
+
+static int __init jh7110_syscrg_probe(struct platform_device *pdev)
+{
+ struct jh71x0_clk_priv *priv;
+ unsigned int idx;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, reg, JH7110_SYSCLK_END),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->rmw_lock);
+ priv->dev = &pdev->dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * These PLL clocks are not actually fixed factor clocks and can be
+ * controlled by the syscon registers of JH7110. They will be dropped
+ * and registered in the PLL clock driver instead.
+ */
+ /* 24MHz -> 1000.0MHz */
+ priv->pll[0] = devm_clk_hw_register_fixed_factor(priv->dev, "pll0_out",
+ "osc", 0, 125, 3);
+ if (IS_ERR(priv->pll[0]))
+ return PTR_ERR(priv->pll[0]);
+
+ /* 24MHz -> 1066.0MHz */
+ priv->pll[1] = devm_clk_hw_register_fixed_factor(priv->dev, "pll1_out",
+ "osc", 0, 533, 12);
+ if (IS_ERR(priv->pll[1]))
+ return PTR_ERR(priv->pll[1]);
+
+ /* 24MHz -> 1188.0MHz */
+ priv->pll[2] = devm_clk_hw_register_fixed_factor(priv->dev, "pll2_out",
+ "osc", 0, 99, 2);
+ if (IS_ERR(priv->pll[2]))
+ return PTR_ERR(priv->pll[2]);
+
+ for (idx = 0; idx < JH7110_SYSCLK_END; idx++) {
+ u32 max = jh7110_sysclk_data[idx].max;
+ struct clk_parent_data parents[4] = {};
+ struct clk_init_data init = {
+ .name = jh7110_sysclk_data[idx].name,
+ .ops = starfive_jh71x0_clk_ops(max),
+ .parent_data = parents,
+ .num_parents =
+ ((max & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT) + 1,
+ .flags = jh7110_sysclk_data[idx].flags,
+ };
+ struct jh71x0_clk *clk = &priv->reg[idx];
+ unsigned int i;
+
+ for (i = 0; i < init.num_parents; i++) {
+ unsigned int pidx = jh7110_sysclk_data[idx].parents[i];
+
+ if (pidx < JH7110_SYSCLK_END)
+ parents[i].hw = &priv->reg[pidx].hw;
+ else if (pidx == JH7110_SYSCLK_OSC)
+ parents[i].fw_name = "osc";
+ else if (pidx == JH7110_SYSCLK_GMAC1_RMII_REFIN)
+ parents[i].fw_name = "gmac1_rmii_refin";
+ else if (pidx == JH7110_SYSCLK_GMAC1_RGMII_RXIN)
+ parents[i].fw_name = "gmac1_rgmii_rxin";
+ else if (pidx == JH7110_SYSCLK_I2STX_BCLK_EXT)
+ parents[i].fw_name = "i2stx_bclk_ext";
+ else if (pidx == JH7110_SYSCLK_I2STX_LRCK_EXT)
+ parents[i].fw_name = "i2stx_lrck_ext";
+ else if (pidx == JH7110_SYSCLK_I2SRX_BCLK_EXT)
+ parents[i].fw_name = "i2srx_bclk_ext";
+ else if (pidx == JH7110_SYSCLK_I2SRX_LRCK_EXT)
+ parents[i].fw_name = "i2srx_lrck_ext";
+ else if (pidx == JH7110_SYSCLK_TDM_EXT)
+ parents[i].fw_name = "tdm_ext";
+ else if (pidx == JH7110_SYSCLK_MCLK_EXT)
+ parents[i].fw_name = "mclk_ext";
+ else
+ parents[i].hw = priv->pll[pidx - JH7110_SYSCLK_PLL0_OUT];
+ }
+
+ clk->hw.init = &init;
+ clk->idx = idx;
+ clk->max_div = max & JH71X0_CLK_DIV_MASK;
+
+ ret = devm_clk_hw_register(&pdev->dev, &clk->hw);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_sysclk_get, priv);
+ if (ret)
+ return ret;
+
+ return jh7110_reset_controller_register(priv, "rst-sys", 0);
+}
+
+static const struct of_device_id jh7110_syscrg_match[] = {
+ { .compatible = "starfive,jh7110-syscrg" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver jh7110_syscrg_driver = {
+ .driver = {
+ .name = "clk-starfive-jh7110-sys",
+ .of_match_table = jh7110_syscrg_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(jh7110_syscrg_driver, jh7110_syscrg_probe);
diff --git a/drivers/clk/starfive/clk-starfive-jh7110.h b/drivers/clk/starfive/clk-starfive-jh7110.h
new file mode 100644
index 000000000000..f29682b8d400
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh7110.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CLK_STARFIVE_JH7110_H
+#define __CLK_STARFIVE_JH7110_H
+
+#include "clk-starfive-jh71x0.h"
+
+int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
+ const char *adev_name,
+ u32 adev_id);
+
+#endif
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.c b/drivers/clk/starfive/clk-starfive-jh71x0.c
new file mode 100644
index 000000000000..b372083d11c3
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH71X0 Clock Generator Driver
+ *
+ * Copyright (C) 2021-2022 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "clk-starfive-jh71x0.h"
+
+static struct jh71x0_clk *jh71x0_clk_from(struct clk_hw *hw)
+{
+ return container_of(hw, struct jh71x0_clk, hw);
+}
+
+static struct jh71x0_clk_priv *jh71x0_priv_from(struct jh71x0_clk *clk)
+{
+ return container_of(clk, struct jh71x0_clk_priv, reg[clk->idx]);
+}
+
+static u32 jh71x0_clk_reg_get(struct jh71x0_clk *clk)
+{
+ struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
+ void __iomem *reg = priv->base + 4 * clk->idx;
+
+ return readl_relaxed(reg);
+}
+
+static void jh71x0_clk_reg_rmw(struct jh71x0_clk *clk, u32 mask, u32 value)
+{
+ struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
+ void __iomem *reg = priv->base + 4 * clk->idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ value |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(value, reg);
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int jh71x0_clk_enable(struct clk_hw *hw)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, JH71X0_CLK_ENABLE);
+ return 0;
+}
+
+static void jh71x0_clk_disable(struct clk_hw *hw)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, 0);
+}
+
+static int jh71x0_clk_is_enabled(struct clk_hw *hw)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+
+ return !!(jh71x0_clk_reg_get(clk) & JH71X0_CLK_ENABLE);
+}
+
+static unsigned long jh71x0_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 div = jh71x0_clk_reg_get(clk) & JH71X0_CLK_DIV_MASK;
+
+ return div ? parent_rate / div : 0;
+}
+
+static int jh71x0_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ unsigned long parent = req->best_parent_rate;
+ unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
+ unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
+ unsigned long result = parent / div;
+
+ /*
+ * we want the result clamped by min_rate and max_rate if possible:
+ * case 1: div hits the max divider value, which means it's less than
+ * parent / rate, so the result is greater than rate and min_rate in
+ * particular. we can't do anything about result > max_rate because the
+ * divider doesn't go any further.
+ * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
+ * always lower or equal to rate and max_rate. however the result may
+ * turn out lower than min_rate, but then the next higher rate is fine:
+ * div - 1 = ceil(parent / rate) - 1 < parent / rate
+ * and thus
+ * min_rate <= rate < parent / (div - 1)
+ */
+ if (result < req->min_rate && div > 1)
+ result = parent / (div - 1);
+
+ req->rate = result;
+ return 0;
+}
+
+static int jh71x0_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
+ 1UL, (unsigned long)clk->max_div);
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, div);
+ return 0;
+}
+
+static unsigned long jh71x0_clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 reg = jh71x0_clk_reg_get(clk);
+ unsigned long div100 = 100 * (reg & JH71X0_CLK_INT_MASK) +
+ ((reg & JH71X0_CLK_FRAC_MASK) >> JH71X0_CLK_FRAC_SHIFT);
+
+ return (div100 >= JH71X0_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
+}
+
+static int jh71x0_clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long parent100 = 100 * req->best_parent_rate;
+ unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
+ unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
+ JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
+ unsigned long result = parent100 / div100;
+
+ /* clamp the result as in jh71x0_clk_determine_rate() above */
+ if (result > req->max_rate && div100 < JH71X0_CLK_FRAC_MAX)
+ result = parent100 / (div100 + 1);
+ if (result < req->min_rate && div100 > JH71X0_CLK_FRAC_MIN)
+ result = parent100 / (div100 - 1);
+
+ req->rate = result;
+ return 0;
+}
+
+static int jh71x0_clk_frac_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
+ JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
+ u32 value = ((div100 % 100) << JH71X0_CLK_FRAC_SHIFT) | (div100 / 100);
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, value);
+ return 0;
+}
+
+static u8 jh71x0_clk_get_parent(struct clk_hw *hw)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 value = jh71x0_clk_reg_get(clk);
+
+ return (value & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT;
+}
+
+static int jh71x0_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 value = (u32)index << JH71X0_CLK_MUX_SHIFT;
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_MUX_MASK, value);
+ return 0;
+}
+
+static int jh71x0_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static int jh71x0_clk_get_phase(struct clk_hw *hw)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 value = jh71x0_clk_reg_get(clk);
+
+ return (value & JH71X0_CLK_INVERT) ? 180 : 0;
+}
+
+static int jh71x0_clk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ u32 value;
+
+ if (degrees == 0)
+ value = 0;
+ else if (degrees == 180)
+ value = JH71X0_CLK_INVERT;
+ else
+ return -EINVAL;
+
+ jh71x0_clk_reg_rmw(clk, JH71X0_CLK_INVERT, value);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void jh71x0_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ static const struct debugfs_reg32 jh71x0_clk_reg = {
+ .name = "CTRL",
+ .offset = 0,
+ };
+ struct jh71x0_clk *clk = jh71x0_clk_from(hw);
+ struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
+ struct debugfs_regset32 *regset;
+
+ regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ regset->regs = &jh71x0_clk_reg;
+ regset->nregs = 1;
+ regset->base = priv->base + 4 * clk->idx;
+
+ debugfs_create_regset32("registers", 0400, dentry, regset);
+}
+#else
+#define jh71x0_clk_debug_init NULL
+#endif
+
+static const struct clk_ops jh71x0_clk_gate_ops = {
+ .enable = jh71x0_clk_enable,
+ .disable = jh71x0_clk_disable,
+ .is_enabled = jh71x0_clk_is_enabled,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_div_ops = {
+ .recalc_rate = jh71x0_clk_recalc_rate,
+ .determine_rate = jh71x0_clk_determine_rate,
+ .set_rate = jh71x0_clk_set_rate,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_fdiv_ops = {
+ .recalc_rate = jh71x0_clk_frac_recalc_rate,
+ .determine_rate = jh71x0_clk_frac_determine_rate,
+ .set_rate = jh71x0_clk_frac_set_rate,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_gdiv_ops = {
+ .enable = jh71x0_clk_enable,
+ .disable = jh71x0_clk_disable,
+ .is_enabled = jh71x0_clk_is_enabled,
+ .recalc_rate = jh71x0_clk_recalc_rate,
+ .determine_rate = jh71x0_clk_determine_rate,
+ .set_rate = jh71x0_clk_set_rate,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_mux_ops = {
+ .determine_rate = jh71x0_clk_mux_determine_rate,
+ .set_parent = jh71x0_clk_set_parent,
+ .get_parent = jh71x0_clk_get_parent,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_gmux_ops = {
+ .enable = jh71x0_clk_enable,
+ .disable = jh71x0_clk_disable,
+ .is_enabled = jh71x0_clk_is_enabled,
+ .determine_rate = jh71x0_clk_mux_determine_rate,
+ .set_parent = jh71x0_clk_set_parent,
+ .get_parent = jh71x0_clk_get_parent,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_mdiv_ops = {
+ .recalc_rate = jh71x0_clk_recalc_rate,
+ .determine_rate = jh71x0_clk_determine_rate,
+ .get_parent = jh71x0_clk_get_parent,
+ .set_parent = jh71x0_clk_set_parent,
+ .set_rate = jh71x0_clk_set_rate,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_gmd_ops = {
+ .enable = jh71x0_clk_enable,
+ .disable = jh71x0_clk_disable,
+ .is_enabled = jh71x0_clk_is_enabled,
+ .recalc_rate = jh71x0_clk_recalc_rate,
+ .determine_rate = jh71x0_clk_determine_rate,
+ .get_parent = jh71x0_clk_get_parent,
+ .set_parent = jh71x0_clk_set_parent,
+ .set_rate = jh71x0_clk_set_rate,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+static const struct clk_ops jh71x0_clk_inv_ops = {
+ .get_phase = jh71x0_clk_get_phase,
+ .set_phase = jh71x0_clk_set_phase,
+ .debug_init = jh71x0_clk_debug_init,
+};
+
+const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
+{
+ if (max & JH71X0_CLK_DIV_MASK) {
+ if (max & JH71X0_CLK_MUX_MASK) {
+ if (max & JH71X0_CLK_ENABLE)
+ return &jh71x0_clk_gmd_ops;
+ return &jh71x0_clk_mdiv_ops;
+ }
+ if (max & JH71X0_CLK_ENABLE)
+ return &jh71x0_clk_gdiv_ops;
+ if (max == JH71X0_CLK_FRAC_MAX)
+ return &jh71x0_clk_fdiv_ops;
+ return &jh71x0_clk_div_ops;
+ }
+
+ if (max & JH71X0_CLK_MUX_MASK) {
+ if (max & JH71X0_CLK_ENABLE)
+ return &jh71x0_clk_gmux_ops;
+ return &jh71x0_clk_mux_ops;
+ }
+
+ if (max & JH71X0_CLK_ENABLE)
+ return &jh71x0_clk_gate_ops;
+
+ return &jh71x0_clk_inv_ops;
+}
+EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
new file mode 100644
index 000000000000..34bb11c72eb7
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CLK_STARFIVE_JH71X0_H
+#define __CLK_STARFIVE_JH71X0_H
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+/* register fields */
+#define JH71X0_CLK_ENABLE BIT(31)
+#define JH71X0_CLK_INVERT BIT(30)
+#define JH71X0_CLK_MUX_MASK GENMASK(27, 24)
+#define JH71X0_CLK_MUX_SHIFT 24
+#define JH71X0_CLK_DIV_MASK GENMASK(23, 0)
+#define JH71X0_CLK_FRAC_MASK GENMASK(15, 8)
+#define JH71X0_CLK_FRAC_SHIFT 8
+#define JH71X0_CLK_INT_MASK GENMASK(7, 0)
+
+/* fractional divider min/max */
+#define JH71X0_CLK_FRAC_MIN 100UL
+#define JH71X0_CLK_FRAC_MAX 25599UL
+
+/* clock data */
+struct jh71x0_clk_data {
+ const char *name;
+ unsigned long flags;
+ u32 max;
+ u8 parents[4];
+};
+
+#define JH71X0_GATE(_idx, _name, _flags, _parent) \
+[_idx] = { \
+ .name = _name, \
+ .flags = CLK_SET_RATE_PARENT | (_flags), \
+ .max = JH71X0_CLK_ENABLE, \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH71X0__DIV(_idx, _name, _max, _parent) \
+[_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = _max, \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH71X0_GDIV(_idx, _name, _flags, _max, _parent) \
+[_idx] = { \
+ .name = _name, \
+ .flags = _flags, \
+ .max = JH71X0_CLK_ENABLE | (_max), \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH71X0_FDIV(_idx, _name, _parent) \
+[_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = JH71X0_CLK_FRAC_MAX, \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH71X0__MUX(_idx, _name, _nparents, ...) \
+[_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = ((_nparents) - 1) << JH71X0_CLK_MUX_SHIFT, \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH71X0_GMUX(_idx, _name, _flags, _nparents, ...) \
+[_idx] = { \
+ .name = _name, \
+ .flags = _flags, \
+ .max = JH71X0_CLK_ENABLE | \
+ (((_nparents) - 1) << JH71X0_CLK_MUX_SHIFT), \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH71X0_MDIV(_idx, _name, _max, _nparents, ...) \
+[_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = (((_nparents) - 1) << JH71X0_CLK_MUX_SHIFT) | (_max), \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH71X0__GMD(_idx, _name, _flags, _max, _nparents, ...) \
+[_idx] = { \
+ .name = _name, \
+ .flags = _flags, \
+ .max = JH71X0_CLK_ENABLE | \
+ (((_nparents) - 1) << JH71X0_CLK_MUX_SHIFT) | (_max), \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH71X0__INV(_idx, _name, _parent) \
+[_idx] = { \
+ .name = _name, \
+ .flags = CLK_SET_RATE_PARENT, \
+ .max = JH71X0_CLK_INVERT, \
+ .parents = { [0] = _parent }, \
+}
+
+struct jh71x0_clk {
+ struct clk_hw hw;
+ unsigned int idx;
+ unsigned int max_div;
+};
+
+struct jh71x0_clk_priv {
+ /* protect clk enable and set rate/parent from happening at the same time */
+ spinlock_t rmw_lock;
+ struct device *dev;
+ void __iomem *base;
+ struct clk_hw *pll[3];
+ struct jh71x0_clk reg[];
+};
+
+const struct clk_ops *starfive_jh71x0_clk_ops(u32 max);
+
+#endif
diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c
index 1192eee8abe4..c4a737482fe5 100644
--- a/drivers/clk/stm32/clk-stm32mp13.c
+++ b/drivers/clk/stm32/clk-stm32mp13.c
@@ -1593,15 +1593,13 @@ static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
return ret;
}
-static int stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
+static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev_of_node(dev);
for_each_available_child_of_node(np, child)
of_clk_del_provider(child);
-
- return 0;
}
static struct platform_driver stm32mp13_rcc_clocks_driver = {
@@ -1610,7 +1608,7 @@ static struct platform_driver stm32mp13_rcc_clocks_driver = {
.of_match_table = stm32mp13_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
- .remove = stm32mp1_rcc_clocks_remove,
+ .remove_new = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp13_clocks_init(void)
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 461537679c04..b547198a2c65 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,112 +9,113 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default MACH_SUNIV
+ default y
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
- tristate "Support for the Allwinner D1 CCU"
- default RISCV && ARCH_SUNXI
- depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+ tristate "Support for the Allwinner D1/R528/T113 CCU"
+ default y
+ depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
- tristate "Support for the Allwinner D1 PRCM CCU"
- default RISCV && ARCH_SUNXI
- depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+ tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
+ default y
+ depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default ARM64 && ARCH_SUNXI
- depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default MACH_SUN4I
- default MACH_SUN7I
+ default y
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default MACH_SUN5I
+ default y
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default MACH_SUN6I
+ default y
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default ARCH_SUNXI
- depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+ depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default MACH_SUN8I
+ default y
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default MACH_SUN8I
+ default y
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default MACH_SUN8I
+ default y
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
- depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ default y
+ depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default MACH_SUN8I
+ default y
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
+ default y
+ depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default MACH_SUN8I
+ default y
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default MACH_SUN9I
+ default y
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default MACH_SUN8I || (ARCH_SUNXI && ARM64)
+ default y
+ depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 8ef3cdeb7962..48a8fb2c43b7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -240,7 +240,7 @@ static const struct clk_parent_data cpux_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
};
static SUNXI_CCU_MUX_DATA(cpux_clk, "cpux", cpux_parents,
- 0x500, 24, 3, CLK_SET_RATE_PARENT);
+ 0x500, 24, 3, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
static const struct clk_hw *cpux_hws[] = { &cpux_clk.common.hw };
static SUNXI_CCU_M_HWS(cpux_axi_clk, "cpux-axi",
@@ -469,6 +469,11 @@ static SUNXI_CCU_GATE_HWS(bus_i2c2_clk, "bus-i2c2", apb1_hws,
static SUNXI_CCU_GATE_HWS(bus_i2c3_clk, "bus-i2c3", apb1_hws,
0x91c, BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_can0_clk, "bus-can0", apb1_hws,
+ 0x92c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_can1_clk, "bus-can1", apb1_hws,
+ 0x92c, BIT(1), 0);
+
static const struct clk_parent_data spi_parents[] = {
{ .fw_name = "hosc" },
{ .hw = &pll_periph0_clk.hw },
@@ -997,6 +1002,8 @@ static struct ccu_common *sun20i_d1_ccu_clks[] = {
&bus_i2c1_clk.common,
&bus_i2c2_clk.common,
&bus_i2c3_clk.common,
+ &bus_can0_clk.common,
+ &bus_can1_clk.common,
&spi0_clk.common,
&spi1_clk.common,
&bus_spi0_clk.common,
@@ -1147,6 +1154,8 @@ static struct clk_hw_onecell_data sun20i_d1_hw_clks = {
[CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
[CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
[CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_CAN0] = &bus_can0_clk.common.hw,
+ [CLK_BUS_CAN1] = &bus_can1_clk.common.hw,
[CLK_SPI0] = &spi0_clk.common.hw,
[CLK_SPI1] = &spi1_clk.common.hw,
[CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
@@ -1252,6 +1261,8 @@ static struct ccu_reset_map sun20i_d1_ccu_resets[] = {
[RST_BUS_I2C1] = { 0x91c, BIT(17) },
[RST_BUS_I2C2] = { 0x91c, BIT(18) },
[RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_CAN0] = { 0x92c, BIT(16) },
+ [RST_BUS_CAN1] = { 0x92c, BIT(17) },
[RST_BUS_SPI0] = { 0x96c, BIT(16) },
[RST_BUS_SPI1] = { 0x96c, BIT(17) },
[RST_BUS_EMAC] = { 0x97c, BIT(16) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.h b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
index e303176f0d4e..b14da36e2537 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
@@ -10,6 +10,6 @@
#include <dt-bindings/clock/sun20i-d1-ccu.h>
#include <dt-bindings/reset/sun20i-d1-ccu.h>
-#define CLK_NUMBER (CLK_FANOUT2 + 1)
+#define CLK_NUMBER (CLK_BUS_CAN1 + 1)
#endif /* _CCU_SUN20I_D1_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index d3fcb983c17c..bfebe8dbbe65 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -434,8 +434,13 @@ static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M",
static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc24M",
0x0cc, BIT(19), 0);
-static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" };
-static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+/* H3 has broken MDFS hardware, so the mux/divider cannot be changed. */
+static CLK_FIXED_FACTOR_HW(h3_dram_clk, "dram",
+ &pll_ddr_clk.common.hw,
+ 1, 1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static const char * const h5_dram_parents[] = { "pll-ddr", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX(h5_dram_clk, "dram", h5_dram_parents,
0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
@@ -592,7 +597,7 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&usb_ohci1_clk.common,
&usb_ohci2_clk.common,
&usb_ohci3_clk.common,
- &dram_clk.common,
+ &h5_dram_clk.common,
&dram_ve_clk.common,
&dram_csi_clk.common,
&dram_deinterlace_clk.common,
@@ -732,7 +737,7 @@ static struct clk_hw_onecell_data sun8i_h3_hw_clks = {
[CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
[CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
[CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
- [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM] = &h3_dram_clk.hw,
[CLK_DRAM_VE] = &dram_ve_clk.common.hw,
[CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
[CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
@@ -848,7 +853,7 @@ static struct clk_hw_onecell_data sun50i_h5_hw_clks = {
[CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
[CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
[CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
- [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM] = &h5_dram_clk.common.hw,
[CLK_DRAM_VE] = &dram_ve_clk.common.hw,
[CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
[CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index de33414fc5c2..23a8d44e2449 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -10,11 +10,11 @@
#include "ccu_common.h"
/**
- * sunxi_ccu_set_mmc_timing_mode: Configure the MMC clock timing mode
+ * sunxi_ccu_set_mmc_timing_mode - Configure the MMC clock timing mode
* @clk: clock to be configured
* @new_mode: true for new timing mode introduced in A83T and later
*
- * Returns 0 on success, -ENOTSUPP if the clock does not support
+ * Return: %0 on success, %-ENOTSUPP if the clock does not support
* switching modes.
*/
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
@@ -46,8 +46,8 @@ EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
* sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
* @clk: clock to query
*
- * Returns 0 if the clock is in old timing mode, > 0 if it is in
- * new timing mode, and -ENOTSUPP if the clock does not support
+ * Return: %0 if the clock is in old timing mode, > %0 if it is in
+ * new timing mode, and %-ENOTSUPP if the clock does not support
* this function.
*/
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 57cf2d615148..cc94a694cb67 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -10,9 +10,9 @@
#include "ccu_gate.h"
#include "ccu_mp.h"
-static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_m, unsigned int max_p,
- unsigned int *m, unsigned int *p)
+static unsigned long ccu_mp_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_m, unsigned int max_p,
+ unsigned int *m, unsigned int *p)
{
unsigned long best_rate = 0;
unsigned int best_m = 0, best_p = 0;
@@ -35,6 +35,8 @@ static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
*m = best_m;
*p = best_p;
+
+ return best_rate;
}
static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
@@ -109,8 +111,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
- ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
- rate = *parent_rate / p / m;
+ rate = ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
} else {
rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
max_m, max_p);
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index c4fb82af97e8..8aa35d5804f3 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -15,8 +15,8 @@ struct _ccu_nk {
unsigned long k, min_k, max_k;
};
-static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
- struct _ccu_nk *nk)
+static unsigned long ccu_nk_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nk *nk)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;
@@ -39,6 +39,8 @@ static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
nk->k = best_k;
nk->n = best_n;
+
+ return best_rate;
}
static void ccu_nk_disable(struct clk_hw *hw)
@@ -104,8 +106,7 @@ static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
_nk.min_k = nk->k.min ?: 1;
_nk.max_k = nk->k.max ?: 1 << nk->k.width;
- ccu_nk_find_best(*parent_rate, rate, &_nk);
- rate = *parent_rate * _nk.n * _nk.k;
+ rate = ccu_nk_find_best(*parent_rate, rate, &_nk);
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate / nk->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 67da2c189b53..a0978a50edae 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -16,8 +16,8 @@ struct _ccu_nkm {
unsigned long m, min_m, max_m;
};
-static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
- struct _ccu_nkm *nkm)
+static unsigned long ccu_nkm_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nkm *nkm)
{
unsigned long best_rate = 0;
unsigned long best_n = 0, best_k = 0, best_m = 0;
@@ -45,6 +45,8 @@ static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
nkm->n = best_n;
nkm->k = best_k;
nkm->m = best_m;
+
+ return best_rate;
}
static void ccu_nkm_disable(struct clk_hw *hw)
@@ -122,9 +124,7 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nkm->fixed_post_div;
- ccu_nkm_find_best(*parent_rate, rate, &_nkm);
-
- rate = *parent_rate * _nkm.n * _nkm.k / _nkm.m;
+ rate = ccu_nkm_find_best(*parent_rate, rate, &_nkm);
if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nkm->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 39413cb0985c..99359a06892d 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -29,8 +29,8 @@ static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
return rate;
}
-static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
- struct _ccu_nkmp *nkmp)
+static unsigned long ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nkmp *nkmp)
{
unsigned long best_rate = 0;
unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
@@ -65,6 +65,8 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
nkmp->k = best_k;
nkmp->m = best_m;
nkmp->p = best_p;
+
+ return best_rate;
}
static void ccu_nkmp_disable(struct clk_hw *hw)
@@ -150,10 +152,8 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
- ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
+ rate = ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
- rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
- _nkmp.m, _nkmp.p);
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate / nkmp->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 9ca9257f4426..c1fd11542c45 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -27,8 +27,8 @@ static unsigned long ccu_nm_calc_rate(unsigned long parent,
return rate;
}
-static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
- struct _ccu_nm *nm)
+static unsigned long ccu_nm_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nm *nm)
{
unsigned long best_rate = 0;
unsigned long best_n = 0, best_m = 0;
@@ -52,6 +52,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
nm->n = best_n;
nm->m = best_m;
+
+ return best_rate;
}
static void ccu_nm_disable(struct clk_hw *hw)
@@ -157,8 +159,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- ccu_nm_find_best(*parent_rate, rate, &_nm);
- rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
+ rate = ccu_nm_find_best(*parent_rate, rate, &_nm);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 41433927b55c..58fa5a59e0c7 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -2081,7 +2081,10 @@ struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
{
struct tegra_dfll *td = platform_get_drvdata(pdev);
- /* Try to prevent removal while the DFLL is active */
+ /*
+ * Note that exiting early here doesn't prevent unbinding the driver.
+ * Exiting early here only leaks some resources.
+ */
if (td->mode != DFLL_DISABLED) {
dev_err(&pdev->dev,
"must disable DFLL before removing driver\n");
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 5e339ad0a97c..2a164e565c86 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -612,20 +612,19 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return 0;
}
-static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
+static void tegra124_dfll_fcpu_remove(struct platform_device *pdev)
{
struct tegra_dfll_soc_data *soc;
+ /*
+ * Note that exiting early here is dangerous as after this function
+ * returns *soc is freed.
+ */
soc = tegra_dfll_unregister(pdev);
- if (IS_ERR(soc)) {
- dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
- PTR_ERR(soc));
- return PTR_ERR(soc);
- }
+ if (IS_ERR(soc))
+ return;
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
-
- return 0;
}
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
@@ -636,7 +635,7 @@ static const struct dev_pm_ops tegra124_dfll_pm_ops = {
static struct platform_driver tegra124_dfll_fcpu_driver = {
.probe = tegra124_dfll_fcpu_probe,
- .remove = tegra124_dfll_fcpu_remove,
+ .remove_new = tegra124_dfll_fcpu_remove,
.driver = {
.name = "tegra124-dfll",
.of_match_table = tegra124_dfll_fcpu_of_match,
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 422d78247553..dcacc5064d33 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -21,24 +21,24 @@
#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
-#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
-#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
-#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
-#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
-#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
-#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
-
-#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
-#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
-#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
-#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
+#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
+#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
+#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
+#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28)
+#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28)
+#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28)
+#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28)
#define OSC_FREQ_DET 0x58
-#define OSC_FREQ_DET_TRIG (1<<31)
+#define OSC_FREQ_DET_TRIG (1u<<31)
#define OSC_FREQ_DET_STATUS 0x5c
-#define OSC_FREQ_DET_BUSY (1<<31)
-#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define OSC_FREQ_DET_BUSYu (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
#define TEGRA20_CLK_PERIPH_BANKS 3
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index f5e7e2049241..6ecbba4342c5 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -931,13 +931,11 @@ free:
return err;
}
-static int ti_adpll_remove(struct platform_device *pdev)
+static void ti_adpll_remove(struct platform_device *pdev)
{
struct ti_adpll_data *d = dev_get_drvdata(&pdev->dev);
ti_adpll_free_resources(d);
-
- return 0;
}
static struct platform_driver ti_adpll_driver = {
@@ -946,7 +944,7 @@ static struct platform_driver ti_adpll_driver = {
.of_match_table = ti_adpll_match,
},
.probe = ti_adpll_probe,
- .remove = ti_adpll_remove,
+ .remove_new = ti_adpll_remove,
};
static int __init ti_adpll_init(void)
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index f73f402ff7de..b6fce916967c 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -512,16 +512,16 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
struct clk_hw_omap *hw;
struct clk *clk;
struct omap_clkctrl_clk *clkctrl_clk = NULL;
- const __be32 *addrp;
bool legacy_naming;
const char *clkctrl_name;
u32 addr;
int ret;
char *c;
u16 soc_mask = 0;
+ struct resource res;
- addrp = of_get_address(node, 0, NULL, NULL);
- addr = (u32)of_translate_address(node, addrp);
+ of_address_to_resource(node, 0, &res);
+ addr = (u32)res.start;
#ifdef CONFIG_ARCH_OMAP4
if (of_machine_is_compatible("ti,omap4"))
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 46c66fac48e6..a61213311d6c 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -87,15 +87,8 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->hws[p->idx] = hw;
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- hw_data);
-}
-
-static int uniphier_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ hw_data);
}
static const struct of_device_id uniphier_clk_match[] = {
@@ -220,7 +213,6 @@ static const struct of_device_id uniphier_clk_match[] = {
static struct platform_driver uniphier_clk_driver = {
.probe = uniphier_clk_probe,
- .remove = uniphier_clk_remove,
.driver = {
.name = "uniphier-clk",
.of_match_table = uniphier_clk_match,
diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
index 16dae35ab370..01d07f1bf01b 100644
--- a/drivers/clk/visconti/pll.h
+++ b/drivers/clk/visconti/pll.h
@@ -15,7 +15,6 @@
struct visconti_pll_provider {
void __iomem *reg_base;
- struct regmap *regmap;
struct clk_hw_onecell_data clk_data;
struct device_node *node;
};
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
index fdc060e75839..aed7d22fae63 100644
--- a/drivers/clk/x86/clk-fch.c
+++ b/drivers/clk/x86/clk-fch.c
@@ -92,14 +92,14 @@ static int fch_clk_probe(struct platform_device *pdev)
return 0;
}
-static int fch_clk_remove(struct platform_device *pdev)
+static void fch_clk_remove(struct platform_device *pdev)
{
int i, clks;
struct pci_dev *rdev;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev)
- return -ENODEV;
+ return;
clks = pci_match_id(fch_pci_ids, rdev) ? CLK_MAX_FIXED : ST_MAX_CLKS;
@@ -107,7 +107,6 @@ static int fch_clk_remove(struct platform_device *pdev)
clk_hw_unregister(hws[i]);
pci_dev_put(rdev);
- return 0;
}
static struct platform_driver fch_clk_driver = {
@@ -116,6 +115,6 @@ static struct platform_driver fch_clk_driver = {
.suppress_bind_attrs = true,
},
.probe = fch_clk_probe,
- .remove = fch_clk_remove,
+ .remove_new = fch_clk_remove,
};
builtin_platform_driver(fch_clk_driver);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index e746e3f8d05a..2974dd0ec6f4 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -367,7 +367,7 @@ err_unreg_clk_plt:
return err;
}
-static int plt_clk_remove(struct platform_device *pdev)
+static void plt_clk_remove(struct platform_device *pdev)
{
struct clk_plt_data *data;
@@ -377,7 +377,6 @@ static int plt_clk_remove(struct platform_device *pdev)
clkdev_drop(data->mclk_lookup);
plt_clk_unregister_loop(data, PMC_CLK_NUM);
plt_clk_unregister_parents(data);
- return 0;
}
static struct platform_driver plt_clk_driver = {
@@ -385,6 +384,6 @@ static struct platform_driver plt_clk_driver = {
.name = "clk-pmc-atom",
},
.probe = plt_clk_probe,
- .remove = plt_clk_remove,
+ .remove_new = plt_clk_remove,
};
builtin_platform_driver(plt_clk_driver);
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index eb1dfe7ecc1b..e83f104fad02 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -8,12 +8,14 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/iopoll.h>
@@ -37,6 +39,7 @@
#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
#define WZRD_CLKOUT_FRAC_SHIFT 8
#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+#define WZRD_CLKOUT0_FRAC_MASK GENMASK(17, 8)
#define WZRD_DR_MAX_INT_DIV_VALUE 255
#define WZRD_DR_STATUS_REG_OFFSET 0x04
@@ -49,6 +52,22 @@
#define WZRD_USEC_POLL 10
#define WZRD_TIMEOUT_POLL 1000
+
+/* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
+#define DIV_O 0x01
+#define DIV_ALL 0x03
+
+#define WZRD_M_MIN 2
+#define WZRD_M_MAX 128
+#define WZRD_D_MIN 1
+#define WZRD_D_MAX 106
+#define WZRD_VCO_MIN 800000000
+#define WZRD_VCO_MAX 1600000000
+#define WZRD_O_MIN 1
+#define WZRD_O_MAX 128
+#define WZRD_MIN_ERR 20000
+#define WZRD_FRAC_POINTS 1000
+
/* Get the mask from width */
#define div_mask(width) ((1 << (width)) - 1)
@@ -97,6 +116,9 @@ struct clk_wzrd {
* @width: width of the divider bit field
* @flags: clk_wzrd divider flags
* @table: array of value/divider pairs, last entry should have div = 0
+ * @m: value of the multiplier
+ * @d: value of the common divider
+ * @o: value of the leaf divider
* @lock: register lock
*/
struct clk_wzrd_divider {
@@ -107,6 +129,9 @@ struct clk_wzrd_divider {
u8 width;
u8 flags;
const struct clk_div_table *table;
+ u32 m;
+ u32 d;
+ u32 o;
spinlock_t *lock; /* divider lock */
};
@@ -198,12 +223,155 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
return *prate / div;
}
+static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ unsigned long vco_freq, freq, diff;
+ u32 m, d, o;
+
+ for (m = WZRD_M_MIN; m <= WZRD_M_MAX; m++) {
+ for (d = WZRD_D_MIN; d <= WZRD_D_MAX; d++) {
+ vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
+ if (vco_freq >= WZRD_VCO_MIN && vco_freq <= WZRD_VCO_MAX) {
+ for (o = WZRD_O_MIN; o <= WZRD_O_MAX; o++) {
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < WZRD_MIN_ERR) {
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ return -EBUSY;
+}
+
+static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ unsigned long vco_freq, rate_div, clockout0_div;
+ u32 reg, pre, value, f;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, rate, parent_rate);
+ if (err)
+ return err;
+
+ vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
+ rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
+
+ clockout0_div = div_u64(rate_div, WZRD_FRAC_POINTS);
+
+ pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
+ f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
+ f &= WZRD_CLKOUT_FRAC_MASK;
+
+ reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
+ FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
+
+ writel(reg, divider->base + WZRD_CLK_CFG_REG(2));
+ /* Set divisor and clear phase offset */
+ reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
+ FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
+ writel(reg, divider->base + WZRD_CLK_CFG_REG(0));
+ writel(divider->o, divider->base + WZRD_CLK_CFG_REG(2));
+ writel(0, divider->base + WZRD_CLK_CFG_REG(3));
+ /* Check status register */
+ err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
+ value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
+ value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+}
+
+static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return ret;
+}
+
+static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 m, d, o, div, reg, f;
+
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(0));
+ d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
+ m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(2));
+ o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
+ f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
+
+ div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
+ return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
+ divider->flags, divider->width);
+}
+
+static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ unsigned long int_freq;
+ u32 m, d, o, div, f;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, rate, *prate);
+ if (err)
+ return err;
+
+ m = divider->m;
+ d = divider->d;
+ o = divider->o;
+
+ div = d * o;
+ int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table,
+ divider->flags, divider->width);
+
+ if (rate > int_freq) {
+ f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
+ rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
+ }
+ return rate;
+}
+
static const struct clk_ops clk_wzrd_clk_divider_ops = {
.round_rate = clk_wzrd_round_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate,
};
+static const struct clk_ops clk_wzrd_clk_div_all_ops = {
+ .round_rate = clk_wzrd_round_rate_all,
+ .set_rate = clk_wzrd_dynamic_all,
+ .recalc_rate = clk_wzrd_recalc_rate_all,
+};
+
static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -280,7 +448,7 @@ static struct clk *clk_wzrd_register_divf(struct device *dev,
void __iomem *base, u16 offset,
u8 shift, u8 width,
u8 clk_divider_flags,
- const struct clk_div_table *table,
+ u32 div_type,
spinlock_t *lock)
{
struct clk_wzrd_divider *div;
@@ -307,7 +475,6 @@ static struct clk *clk_wzrd_register_divf(struct device *dev,
div->flags = clk_divider_flags;
div->lock = lock;
div->hw.init = &init;
- div->table = table;
hw = &div->hw;
ret = devm_clk_hw_register(dev, hw);
@@ -324,7 +491,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
void __iomem *base, u16 offset,
u8 shift, u8 width,
u8 clk_divider_flags,
- const struct clk_div_table *table,
+ u32 div_type,
spinlock_t *lock)
{
struct clk_wzrd_divider *div;
@@ -337,7 +504,12 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &clk_wzrd_clk_divider_ops;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else if (div_type == DIV_O)
+ init.ops = &clk_wzrd_clk_divider_ops;
+ else
+ init.ops = &clk_wzrd_clk_div_all_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -349,7 +521,6 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
div->flags = clk_divider_flags;
div->lock = lock;
div->hw.init = &init;
- div->table = table;
hw = &div->hw;
ret = devm_clk_hw_register(dev, hw);
@@ -425,6 +596,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
const char *clk_name;
void __iomem *ctrl_reg;
struct clk_wzrd *clk_wzrd;
+ const char *clkout_name;
struct device_node *np = pdev->dev.of_node;
int nr_outputs;
unsigned long flags = 0;
@@ -469,6 +641,26 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
+ ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
+ if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_out0", dev_name(&pdev->dev));
+ if (nr_outputs == 1) {
+ clk_wzrd->clkout[0] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ __clk_get_name(clk_wzrd->clk_in1), 0,
+ clk_wzrd->base, WZRD_CLK_CFG_REG(3),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ DIV_ALL, &clkwzrd_lock);
+
+ goto out;
+ }
+
reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0));
reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
reg_f = reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
@@ -476,20 +668,11 @@ static int clk_wzrd_probe(struct platform_device *pdev)
reg = reg & WZRD_CLKFBOUT_MULT_MASK;
reg = reg >> WZRD_CLKFBOUT_MULT_SHIFT;
mult = (reg * 1000) + reg_f;
- clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
+ clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
goto err_disable_clk;
}
-
- ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
- if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
- ret = -EINVAL;
- goto err_disable_clk;
- }
- if (nr_outputs == 1)
- flags = CLK_SET_RATE_PARENT;
-
clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
@@ -500,7 +683,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
+ clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
goto err_rm_int_clk;
@@ -521,9 +704,8 @@ static int clk_wzrd_probe(struct platform_device *pdev)
/* register div per output */
for (i = nr_outputs - 1; i >= 0 ; i--) {
- const char *clkout_name;
-
- clkout_name = kasprintf(GFP_KERNEL, "%s_out%d", dev_name(&pdev->dev), i);
+ clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%s_out%d", dev_name(&pdev->dev), i);
if (!clkout_name) {
ret = -ENOMEM;
goto err_rm_int_clk;
@@ -537,7 +719,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
- NULL, &clkwzrd_lock);
+ DIV_O, &clkwzrd_lock);
else
clk_wzrd->clkout[i] = clk_wzrd_register_divider
(&pdev->dev, clkout_name,
@@ -546,7 +728,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
- NULL, &clkwzrd_lock);
+ DIV_O, &clkwzrd_lock);
if (IS_ERR(clk_wzrd->clkout[i])) {
int j;
@@ -559,8 +741,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
}
}
- kfree(clk_name);
-
+out:
clk_wzrd->clk_data.clks = clk_wzrd->clkout;
clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
@@ -585,7 +766,6 @@ static int clk_wzrd_probe(struct platform_device *pdev)
err_rm_int_clks:
clk_unregister(clk_wzrd->clks_internal[1]);
err_rm_int_clk:
- kfree(clk_name);
clk_unregister(clk_wzrd->clks_internal[0]);
err_disable_clk:
clk_disable_unprepare(clk_wzrd->axi_clk);
@@ -593,7 +773,7 @@ err_disable_clk:
return ret;
}
-static int clk_wzrd_remove(struct platform_device *pdev)
+static void clk_wzrd_remove(struct platform_device *pdev)
{
int i;
struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
@@ -611,8 +791,6 @@ static int clk_wzrd_remove(struct platform_device *pdev)
}
clk_disable_unprepare(clk_wzrd->axi_clk);
-
- return 0;
}
static const struct of_device_id clk_wzrd_ids[] = {
@@ -630,7 +808,7 @@ static struct platform_driver clk_wzrd_driver = {
.pm = &clk_wzrd_dev_pm_ops,
},
.probe = clk_wzrd_probe,
- .remove = clk_wzrd_remove,
+ .remove_new = clk_wzrd_remove,
};
module_platform_driver(clk_wzrd_driver);
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index d66b1315114e..0786f15ebbe8 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -702,13 +702,11 @@ error_clk_provider:
* Return: Returns 0 on success
* Negative error code otherwise
*/
-static int xvcu_remove(struct platform_device *pdev)
+static void xvcu_remove(struct platform_device *pdev)
{
struct xvcu_device *xvcu;
xvcu = platform_get_drvdata(pdev);
- if (!xvcu)
- return -ENODEV;
xvcu_unregister_clock_provider(xvcu);
@@ -716,8 +714,6 @@ static int xvcu_remove(struct platform_device *pdev)
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->aclk);
-
- return 0;
}
static const struct of_device_id xvcu_of_id_table[] = {
@@ -733,7 +729,7 @@ static struct platform_driver xvcu_driver = {
.of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
- .remove = xvcu_remove,
+ .remove_new = xvcu_remove,
};
module_platform_driver(xvcu_driver);
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 0d3e1377b092..7411a7fd50ac 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -341,7 +341,5 @@ struct clk_hw *zynqmp_clk_register_pll(const char *name, u32 clk_id,
return ERR_PTR(ret);
}
- clk_hw_set_rate_range(hw, PS_PLL_VCO_MIN, PS_PLL_VCO_MAX);
-
return hw;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4469e7f555e9..526382dc7482 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -441,7 +441,7 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM
- depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -479,6 +479,15 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config MTK_CPUX_TIMER
+ bool "MediaTek CPUX timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_MEDIATEK
+ select TIMER_OF
+ select CLKSRC_MMIO
+ help
+ Support for MediaTek CPUXGPT timer driver.
+
config SPRD_TIMER
bool "Spreadtrum timer driver" if EXPERT
depends on HAS_IOMEM
@@ -706,7 +715,7 @@ config INGENIC_OST
config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
- depends on OF || COMPILE_TEST
+ depends on OF && ARM
select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 64ab547de97b..f12d3987a960 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
+obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 279ddff81ab4..82338773602c 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
+#include <asm/time.h>
/*
* The I/O port the PMTMR resides at.
@@ -210,8 +211,9 @@ static int __init init_acpi_pm_clocksource(void)
return -ENODEV;
}
- return clocksource_register_hz(&clocksource_acpi_pm,
- PMTMR_TICKS_PER_SEC);
+ if (tsc_clocksource_watchdog_disabled())
+ clocksource_acpi_pm.flags |= CLOCK_SOURCE_MUST_VERIFY;
+ return clocksource_register_hz(&clocksource_acpi_pm, PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index ab190dffb1ed..ca8d29ab70da 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -333,11 +333,6 @@ static int em_sti_probe(struct platform_device *pdev)
return 0;
}
-static int em_sti_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static const struct of_device_id em_sti_dt_ids[] = {
{ .compatible = "renesas,em-sti", },
{},
@@ -346,10 +341,10 @@ MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
static struct platform_driver em_sti_device_driver = {
.probe = em_sti_probe,
- .remove = em_sti_remove,
.driver = {
.name = "em_sti",
.of_match_table = em_sti_dt_ids,
+ .suppress_bind_attrs = true,
}
};
@@ -368,4 +363,3 @@ module_exit(em_sti_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index bfd60093ee1c..ef8cb1b71be4 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -682,7 +682,7 @@ static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
* processor cannot use the global comparator.
*/
if (frc_shared)
- return ret;
+ return 0;
return exynos4_clockevent_init();
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index c0cef92b12b8..bcd9042a0c9f 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -49,7 +49,7 @@ static bool direct_mode_enabled;
static int stimer0_irq = -1;
static int stimer0_message_sint;
-static DEFINE_PER_CPU(long, stimer0_evt);
+static __maybe_unused DEFINE_PER_CPU(long, stimer0_evt);
/*
* Common code for stimer0 interrupts coming via Direct Mode or
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(hv_stimer0_isr);
* stimer0 interrupt handler for architectures that support
* per-cpu interrupts, which also implies Direct Mode.
*/
-static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
+static irqreturn_t __maybe_unused hv_stimer0_percpu_isr(int irq, void *dev_id)
{
hv_stimer0_isr();
return IRQ_HANDLED;
@@ -196,6 +196,7 @@ void __weak hv_remove_stimer0_handler(void)
{
};
+#ifdef CONFIG_ACPI
/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
static int hv_setup_stimer0_irq(void)
{
@@ -230,6 +231,16 @@ static void hv_remove_stimer0_irq(void)
stimer0_irq = -1;
}
}
+#else
+static int hv_setup_stimer0_irq(void)
+{
+ return 0;
+}
+
+static void hv_remove_stimer0_irq(void)
+{
+}
+#endif
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
int hv_stimer_alloc(bool have_percpu_irqs)
@@ -506,9 +517,6 @@ static bool __init hv_init_tsc_clocksource(void)
{
union hv_reference_tsc_msr tsc_msr;
- if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
- return false;
-
/*
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
@@ -525,6 +533,9 @@ static bool __init hv_init_tsc_clocksource(void)
hyperv_cs_msr.rating = 250;
}
+ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+ return false;
+
hv_read_reference_counter = read_hv_clock_tsc;
/*
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 24ed0f1f089b..089ce64b1c3f 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -9,13 +9,12 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 7b952aa52c0b..e81c588d9afe 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1145,17 +1145,12 @@ static int sh_cmt_probe(struct platform_device *pdev)
return 0;
}
-static int sh_cmt_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
- .remove = sh_cmt_remove,
.driver = {
.name = "sh_cmt",
.of_match_table = of_match_ptr(sh_cmt_of_table),
+ .suppress_bind_attrs = true,
},
.id_table = sh_cmt_id_table,
};
@@ -1179,4 +1174,3 @@ module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH CMT Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 169a1fccc497..34872df5458a 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -484,11 +484,6 @@ static int sh_mtu2_probe(struct platform_device *pdev)
return 0;
}
-static int sh_mtu2_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent */
-}
-
static const struct platform_device_id sh_mtu2_id_table[] = {
{ "sh-mtu2", 0 },
{ },
@@ -503,10 +498,10 @@ MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
- .remove = sh_mtu2_remove,
.driver = {
.name = "sh_mtu2",
.of_match_table = of_match_ptr(sh_mtu2_of_table),
+ .suppress_bind_attrs = true,
},
.id_table = sh_mtu2_id_table,
};
@@ -530,4 +525,3 @@ module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b00dec0655cb..beffff81c00f 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -632,11 +632,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
return 0;
}
-static int sh_tmu_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static const struct platform_device_id sh_tmu_id_table[] = {
{ "sh-tmu", SH_TMU },
{ "sh-tmu-sh3", SH_TMU_SH3 },
@@ -652,10 +647,10 @@ MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
- .remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
.of_match_table = of_match_ptr(sh_tmu_of_table),
+ .suppress_bind_attrs = true,
},
.id_table = sh_tmu_id_table,
};
@@ -679,4 +674,3 @@ module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH TMU Timer Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 6cfe2ab73eb0..9a55e733ae99 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -17,6 +17,9 @@
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/smp.h>
#include <linux/timex.h>
@@ -31,6 +34,7 @@
/* CLINT manages IPI and Timer for RISC-V M-mode */
static u32 __iomem *clint_ipi_base;
+static unsigned int clint_ipi_irq;
static u64 __iomem *clint_timer_cmp;
static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq;
@@ -41,12 +45,10 @@ u64 __iomem *clint_time_val;
EXPORT_SYMBOL(clint_time_val);
#endif
-static void clint_send_ipi(const struct cpumask *target)
+#ifdef CONFIG_SMP
+static void clint_send_ipi(unsigned int cpu)
{
- unsigned int cpu;
-
- for_each_cpu(cpu, target)
- writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
+ writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
}
static void clint_clear_ipi(void)
@@ -54,10 +56,18 @@ static void clint_clear_ipi(void)
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
}
-static struct riscv_ipi_ops clint_ipi_ops = {
- .ipi_inject = clint_send_ipi,
- .ipi_clear = clint_clear_ipi,
-};
+static void clint_ipi_interrupt(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ clint_clear_ipi();
+ ipi_mux_process();
+
+ chained_irq_exit(chip, desc);
+}
+#endif
#ifdef CONFIG_64BIT
#define clint_get_cycles() readq_relaxed(clint_timer_val)
@@ -125,12 +135,19 @@ static int clint_timer_starting_cpu(unsigned int cpu)
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
+ enable_percpu_irq(clint_ipi_irq,
+ irq_get_trigger_type(clint_ipi_irq));
return 0;
}
static int clint_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(clint_timer_irq);
+ /*
+ * Don't disable IPI when CPU goes offline because
+ * the masking/unmasking of virtual IPIs is done
+ * via generic IPI-Mux
+ */
return 0;
}
@@ -170,6 +187,12 @@ static int __init clint_timer_init_dt(struct device_node *np)
return -ENODEV;
}
+ /* Find parent irq domain and map ipi irq */
+ if (!clint_ipi_irq &&
+ oirq.args[0] == RV_IRQ_SOFT &&
+ irq_find_host(oirq.np))
+ clint_ipi_irq = irq_of_parse_and_map(np, i);
+
/* Find parent irq domain and map timer irq */
if (!clint_timer_irq &&
oirq.args[0] == RV_IRQ_TIMER &&
@@ -177,9 +200,9 @@ static int __init clint_timer_init_dt(struct device_node *np)
clint_timer_irq = irq_of_parse_and_map(np, i);
}
- /* If CLINT timer irq not found then fail */
- if (!clint_timer_irq) {
- pr_err("%pOFP: timer irq not found\n", np);
+ /* If CLINT ipi or timer irq not found then fail */
+ if (!clint_ipi_irq || !clint_timer_irq) {
+ pr_err("%pOFP: ipi/timer irq not found\n", np);
return -ENODEV;
}
@@ -219,6 +242,19 @@ static int __init clint_timer_init_dt(struct device_node *np)
goto fail_iounmap;
}
+#ifdef CONFIG_SMP
+ rc = ipi_mux_create(BITS_PER_BYTE, clint_send_ipi);
+ if (rc <= 0) {
+ pr_err("unable to create muxed IPIs\n");
+ rc = (rc < 0) ? rc : -ENODEV;
+ goto fail_free_irq;
+ }
+
+ irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
+ riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
+ clint_clear_ipi();
+#endif
+
rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
"clockevents/clint/timer:starting",
clint_timer_starting_cpu,
@@ -228,13 +264,10 @@ static int __init clint_timer_init_dt(struct device_node *np)
goto fail_free_irq;
}
- riscv_set_ipi_ops(&clint_ipi_ops);
- clint_clear_ipi();
-
return 0;
fail_free_irq:
- free_irq(clint_timer_irq, &clint_clock_event);
+ free_percpu_irq(clint_timer_irq, &clint_clock_event);
fail_iounmap:
iounmap(base);
return rc;
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index 9996c0542520..b1c248498be4 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -257,21 +257,25 @@ int __init davinci_timer_register(struct clk *clk,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
pr_err("Unable to request memory region\n");
- return -EBUSY;
+ rv = -EBUSY;
+ goto exit_clk_disable;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
pr_err("Unable to map the register range\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto exit_mem_region;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
- if (!clockevent)
- return -ENOMEM;
+ if (!clockevent) {
+ rv = -ENOMEM;
+ goto exit_iounmap_base;
+ }
clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
@@ -296,7 +300,7 @@ int __init davinci_timer_register(struct clk *clk,
"clockevent/tim12", clockevent);
if (rv) {
pr_err("Unable to request the clockevent interrupt\n");
- return rv;
+ goto exit_free_clockevent;
}
davinci_clocksource.dev.rating = 300;
@@ -323,13 +327,27 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
pr_err("Unable to register clocksource\n");
- return rv;
+ goto exit_free_irq;
}
sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
return 0;
+
+exit_free_irq:
+ free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ clockevent);
+exit_free_clockevent:
+ kfree(clockevent);
+exit_iounmap_base:
+ iounmap(base);
+exit_mem_region:
+ release_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg));
+exit_clk_disable:
+ clk_disable_unprepare(clk);
+ return rv;
}
static int __init of_davinci_timer_register(struct device_node *np)
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 7b2c70f2f353..ca3e4cbc80c6 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -420,25 +420,6 @@ static int __init _mxc_timer_init(struct imx_timer *imxtm)
return mxc_clockevent_init(imxtm);
}
-void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
-{
- struct imx_timer *imxtm;
-
- imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
- BUG_ON(!imxtm);
-
- imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
- imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
-
- imxtm->base = ioremap(pbase, SZ_4K);
- BUG_ON(!imxtm->base);
-
- imxtm->type = type;
- imxtm->irq = irq;
-
- _mxc_timer_init(imxtm);
-}
-
static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
diff --git a/drivers/clocksource/timer-mediatek-cpux.c b/drivers/clocksource/timer-mediatek-cpux.c
new file mode 100644
index 000000000000..a8e3df4c09fd
--- /dev/null
+++ b/drivers/clocksource/timer-mediatek-cpux.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MediaTek SoCs CPUX General Purpose Timer handling
+ *
+ * Based on timer-mediatek.c:
+ * Copyright (C) 2014 Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * Copyright (C) 2022 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_SYNC_TICKS 3
+
+/* cpux mcusys wrapper */
+#define CPUX_CON_REG 0x0
+#define CPUX_IDX_REG 0x4
+
+/* cpux */
+#define CPUX_IDX_GLOBAL_CTRL 0x0
+ #define CPUX_ENABLE BIT(0)
+ #define CPUX_CLK_DIV_MASK GENMASK(10, 8)
+ #define CPUX_CLK_DIV1 BIT(8)
+ #define CPUX_CLK_DIV2 BIT(9)
+ #define CPUX_CLK_DIV4 BIT(10)
+#define CPUX_IDX_GLOBAL_IRQ 0x30
+
+static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to)
+{
+ writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
+ return readl(timer_of_base(to) + CPUX_CON_REG);
+}
+
+static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to)
+{
+ writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
+ writel(val, timer_of_base(to) + CPUX_CON_REG);
+}
+
+static void mtk_cpux_set_irq(struct timer_of *to, bool enable)
+{
+ const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
+ u32 val;
+
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to);
+
+ if (enable)
+ val |= *irq_mask;
+ else
+ val &= ~(*irq_mask);
+
+ mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to);
+}
+
+static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Clear any irq */
+ mtk_cpux_set_irq(to_timer_of(clkevt), false);
+
+ /*
+ * Disabling CPUXGPT timer will crash the platform, especially
+ * if Trusted Firmware is using it (usually, for sleep states),
+ * so we only mask the IRQ and call it a day.
+ */
+ return 0;
+}
+
+static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt)
+{
+ mtk_cpux_set_irq(to_timer_of(clkevt), true);
+ return 0;
+}
+
+static struct timer_of to = {
+ /*
+ * There are per-cpu interrupts for the CPUX General Purpose Timer
+ * but since this timer feeds the AArch64 System Timer we can rely
+ * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ.
+ */
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mtk-cpuxgpt",
+ .cpumask = cpu_possible_mask,
+ .rating = 10,
+ .set_state_shutdown = mtk_cpux_clkevt_shutdown,
+ .tick_resume = mtk_cpux_clkevt_resume,
+ },
+};
+
+static int __init mtk_cpux_init(struct device_node *node)
+{
+ u32 freq, val;
+ int ret;
+
+ /* If this fails, bad things are about to happen... */
+ ret = timer_of_init(node, &to);
+ if (ret) {
+ WARN(1, "Cannot start CPUX timers.\n");
+ return ret;
+ }
+
+ /*
+ * Check if we're given a clock with the right frequency for this
+ * timer, otherwise warn but keep going with the setup anyway, as
+ * that makes it possible to still boot the kernel, even though
+ * it may not work correctly (random lockups, etc).
+ * The reason behind this is that having an early UART may not be
+ * possible for everyone and this gives a chance to retrieve kmsg
+ * for eventual debugging even on consumer devices.
+ */
+ freq = timer_of_rate(&to);
+ if (freq > 13000000)
+ WARN(1, "Requested unsupported timer frequency %u\n", freq);
+
+ /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to);
+ val &= ~CPUX_CLK_DIV_MASK;
+ val |= CPUX_CLK_DIV2;
+ mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to);
+
+ /* Enable all CPUXGPT timers */
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to);
+ mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to);
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+}
+TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init);
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index d5b29fd03ca2..7bcb4a3f26fb 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -22,19 +22,6 @@
#define TIMER_SYNC_TICKS (3)
-/* cpux mcusys wrapper */
-#define CPUX_CON_REG 0x0
-#define CPUX_IDX_REG 0x4
-
-/* cpux */
-#define CPUX_IDX_GLOBAL_CTRL 0x0
- #define CPUX_ENABLE BIT(0)
- #define CPUX_CLK_DIV_MASK GENMASK(10, 8)
- #define CPUX_CLK_DIV1 BIT(8)
- #define CPUX_CLK_DIV2 BIT(9)
- #define CPUX_CLK_DIV4 BIT(10)
-#define CPUX_IDX_GLOBAL_IRQ 0x30
-
/* gpt */
#define GPT_IRQ_EN_REG 0x00
#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
@@ -85,52 +72,6 @@
static void __iomem *gpt_sched_reg __read_mostly;
-static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to)
-{
- writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
- return readl(timer_of_base(to) + CPUX_CON_REG);
-}
-
-static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to)
-{
- writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
- writel(val, timer_of_base(to) + CPUX_CON_REG);
-}
-
-static void mtk_cpux_set_irq(struct timer_of *to, bool enable)
-{
- const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
- u32 val;
-
- val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to);
-
- if (enable)
- val |= *irq_mask;
- else
- val &= ~(*irq_mask);
-
- mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to);
-}
-
-static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt)
-{
- /* Clear any irq */
- mtk_cpux_set_irq(to_timer_of(clkevt), false);
-
- /*
- * Disabling CPUXGPT timer will crash the platform, especially
- * if Trusted Firmware is using it (usually, for sleep states),
- * so we only mask the IRQ and call it a day.
- */
- return 0;
-}
-
-static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt)
-{
- mtk_cpux_set_irq(to_timer_of(clkevt), true);
- return 0;
-}
-
static void mtk_syst_ack_irq(struct timer_of *to)
{
/* Clear and disable interrupt */
@@ -340,60 +281,6 @@ static struct timer_of to = {
},
};
-static int __init mtk_cpux_init(struct device_node *node)
-{
- static struct timer_of to_cpux;
- u32 freq, val;
- int ret;
-
- /*
- * There are per-cpu interrupts for the CPUX General Purpose Timer
- * but since this timer feeds the AArch64 System Timer we can rely
- * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ.
- */
- to_cpux.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
- to_cpux.clkevt.name = "mtk-cpuxgpt";
- to_cpux.clkevt.rating = 10;
- to_cpux.clkevt.cpumask = cpu_possible_mask;
- to_cpux.clkevt.set_state_shutdown = mtk_cpux_clkevt_shutdown;
- to_cpux.clkevt.tick_resume = mtk_cpux_clkevt_resume;
-
- /* If this fails, bad things are about to happen... */
- ret = timer_of_init(node, &to_cpux);
- if (ret) {
- WARN(1, "Cannot start CPUX timers.\n");
- return ret;
- }
-
- /*
- * Check if we're given a clock with the right frequency for this
- * timer, otherwise warn but keep going with the setup anyway, as
- * that makes it possible to still boot the kernel, even though
- * it may not work correctly (random lockups, etc).
- * The reason behind this is that having an early UART may not be
- * possible for everyone and this gives a chance to retrieve kmsg
- * for eventual debugging even on consumer devices.
- */
- freq = timer_of_rate(&to_cpux);
- if (freq > 13000000)
- WARN(1, "Requested unsupported timer frequency %u\n", freq);
-
- /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */
- val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
- val &= ~CPUX_CLK_DIV_MASK;
- val |= CPUX_CLK_DIV2;
- mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
-
- /* Enable all CPUXGPT timers */
- val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
- mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
-
- clockevents_config_and_register(&to_cpux.clkevt, timer_of_rate(&to_cpux),
- TIMER_SYNC_TICKS, 0xffffffff);
-
- return 0;
-}
-
static int __init mtk_syst_init(struct device_node *node)
{
int ret;
@@ -452,4 +339,3 @@ static int __init mtk_gpt_init(struct device_node *node)
}
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
-TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index d5f1436f33d9..57209bb38c70 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -92,6 +93,8 @@ struct mchp_pit64b_clksrc {
static void __iomem *mchp_pit64b_cs_base;
/* Default cycles for clockevent timer. */
static u64 mchp_pit64b_ce_cycles;
+/* Delay timer. */
+static struct delay_timer mchp_pit64b_dt;
static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
{
@@ -169,6 +172,11 @@ static u64 notrace mchp_pit64b_sched_read_clk(void)
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
+static unsigned long notrace mchp_pit64b_dt_read(void)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
@@ -376,6 +384,10 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+ mchp_pit64b_dt.read_current_timer = mchp_pit64b_dt_read;
+ mchp_pit64b_dt.freq = clk_rate;
+ register_current_timer_delay(&mchp_pit64b_dt);
+
return 0;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index a0d66fabf073..5f0f10c7e222 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -28,6 +28,7 @@
#include <asm/timex.h>
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+static bool riscv_timer_cannot_wake_cpu;
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
@@ -73,10 +74,15 @@ static u64 notrace riscv_sched_clock(void)
static struct clocksource riscv_clocksource = {
.name = "riscv_clocksource",
- .rating = 300,
+ .rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
+#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
+ .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
+#else
+ .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
+#endif
};
static int riscv_timer_starting_cpu(unsigned int cpu)
@@ -85,6 +91,8 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
+ if (riscv_timer_cannot_wake_cpu)
+ ce->features |= CLOCK_EVT_FEAT_C3STOP;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
@@ -139,6 +147,13 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id())
return 0;
+ child = of_find_compatible_node(NULL, NULL, "riscv,timer");
+ if (child) {
+ riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
+ "riscv,timer-cannot-wake-cpu");
+ of_node_put(child);
+ }
+
domain = NULL;
child = of_get_compatible_child(n, "riscv,cpu-intc");
if (!child) {
@@ -177,6 +192,11 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return error;
}
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
@@ -184,11 +204,6 @@ static int __init riscv_timer_init_dt(struct device_node *n)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
- if (riscv_isa_extension_available(NULL, SSTC)) {
- pr_info("Timer interrupt in S-mode is available via sstc extension\n");
- static_branch_enable(&riscv_sstc_available);
- }
-
return error;
}
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index db2841d0beb8..a4c95161cb22 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -195,11 +195,6 @@ out_clk_disable:
return ret;
}
-static int stm32_clkevent_lp_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent */
-}
-
static const struct of_device_id stm32_clkevent_lp_of_match[] = {
{ .compatible = "st,stm32-lptimer-timer", },
{},
@@ -207,15 +202,14 @@ static const struct of_device_id stm32_clkevent_lp_of_match[] = {
MODULE_DEVICE_TABLE(of, stm32_clkevent_lp_of_match);
static struct platform_driver stm32_clkevent_lp_driver = {
- .probe = stm32_clkevent_lp_probe,
- .remove = stm32_clkevent_lp_remove,
+ .probe = stm32_clkevent_lp_probe,
.driver = {
.name = "stm32-lptimer-timer",
- .of_match_table = of_match_ptr(stm32_clkevent_lp_of_match),
+ .of_match_table = stm32_clkevent_lp_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(stm32_clkevent_lp_driver);
MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index e5a70aa1deb4..7bdcc60ad43c 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -144,7 +144,8 @@ static struct timer_of to = {
.clkevt = {
.name = "sun4i_tick",
.rating = 350,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = sun4i_clkevt_shutdown,
.set_state_periodic = sun4i_clkevt_set_periodic,
.set_state_oneshot = sun4i_clkevt_set_oneshot,
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index ea742889ee06..83d08591ea0a 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -447,15 +447,13 @@ unregister_tsc:
return err;
}
-static int tegra186_timer_remove(struct platform_device *pdev)
+static void tegra186_timer_remove(struct platform_device *pdev)
{
struct tegra186_timer *tegra = platform_get_drvdata(pdev);
clocksource_unregister(&tegra->usec);
clocksource_unregister(&tegra->osc);
clocksource_unregister(&tegra->tsc);
-
- return 0;
}
static int __maybe_unused tegra186_timer_suspend(struct device *dev)
@@ -505,10 +503,9 @@ static struct platform_driver tegra186_wdt_driver = {
.of_match_table = tegra186_timer_of_match,
},
.probe = tegra186_timer_probe,
- .remove = tegra186_timer_remove,
+ .remove_new = tegra186_timer_remove,
};
module_platform_driver(tegra186_wdt_driver);
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 632523c1232f..c2dcd8d68e45 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -251,24 +251,24 @@ static void __init dmtimer_systimer_assign_alwon(void)
counter_32k = -ENODEV;
for_each_matching_node(np, dmtimer_match_table) {
+ struct resource res;
if (!dmtimer_is_preferred(np))
continue;
- if (of_property_read_bool(np, "ti,timer-alwon")) {
- const __be32 *addr;
-
- addr = of_get_address(np, 0, NULL, NULL);
- pa = of_translate_address(np, addr);
- if (pa) {
- /* Quirky omap3 boards must use dmtimer12 */
- if (quirk_unreliable_oscillator &&
- pa == 0x48318000)
- continue;
-
- of_node_put(np);
- break;
- }
- }
+ if (!of_property_read_bool(np, "ti,timer-alwon"))
+ continue;
+
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+
+ pa = res.start;
+
+ /* Quirky omap3 boards must use dmtimer12 */
+ if (quirk_unreliable_oscillator && pa == 0x48318000)
+ continue;
+
+ of_node_put(np);
+ break;
}
/* Usually no need for dmtimer clocksource if we have counter32 */
@@ -285,24 +285,22 @@ static void __init dmtimer_systimer_assign_alwon(void)
static u32 __init dmtimer_systimer_find_first_available(void)
{
struct device_node *np;
- const __be32 *addr;
u32 pa = 0;
for_each_matching_node(np, dmtimer_match_table) {
+ struct resource res;
if (!dmtimer_is_preferred(np))
continue;
- addr = of_get_address(np, 0, NULL, NULL);
- pa = of_translate_address(np, addr);
- if (pa) {
- if (pa == clocksource || pa == clockevent) {
- pa = 0;
- continue;
- }
-
- of_node_put(np);
- break;
- }
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+
+ if (res.start == clocksource || res.start == clockevent)
+ continue;
+
+ pa = res.start;
+ of_node_put(np);
+ break;
}
return pa;
@@ -586,7 +584,7 @@ static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
- name, of_find_property(np, "ti,timer-alwon", NULL) ?
+ name, of_property_read_bool(np, "ti,timer-alwon") ?
"always-on " : "", t->rate, np->parent);
return 0;
@@ -787,7 +785,7 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
t->base + t->ctrl);
pr_info("TI gptimer clocksource: %s%pOF\n",
- of_find_property(np, "ti,timer-alwon", NULL) ?
+ of_property_read_bool(np, "ti,timer-alwon") ?
"always-on " : "", np->parent);
if (!dmtimer_sched_clock_counter) {
@@ -812,7 +810,7 @@ err_out_free:
*/
static int __init dmtimer_systimer_init(struct device_node *np)
{
- const __be32 *addr;
+ struct resource res;
u32 pa;
/* One time init for the preferred timer configuration */
@@ -826,8 +824,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
return -EINVAL;
}
- addr = of_get_address(np, 0, NULL, NULL);
- pa = of_translate_address(np, addr);
+
+ of_address_to_resource(np, 0, &res);
+ pa = (u32)res.start;
if (!pa)
return -EINVAL;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index b24b903a8822..349236a7ba5f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -1104,13 +1104,13 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, timer);
if (dev->of_node) {
- if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
+ if (of_property_read_bool(dev->of_node, "ti,timer-alwon"))
timer->capability |= OMAP_TIMER_ALWON;
- if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
+ if (of_property_read_bool(dev->of_node, "ti,timer-dsp"))
timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
- if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
+ if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
timer->capability |= OMAP_TIMER_HAS_PWM;
- if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
+ if (of_property_read_bool(dev->of_node, "ti,timer-secure"))
timer->capability |= OMAP_TIMER_SECURE;
} else {
timer->id = pdev->id;
@@ -1177,7 +1177,7 @@ err_disable:
* In addition to freeing platform resources it also deletes the timer
* entry from the local list.
*/
-static int omap_dm_timer_remove(struct platform_device *pdev)
+static void omap_dm_timer_remove(struct platform_device *pdev)
{
struct dmtimer *timer;
unsigned long flags;
@@ -1197,7 +1197,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- return ret;
+ if (ret)
+ dev_err(&pdev->dev, "Unable to determine timer entry in list of drivers on remove\n");
}
static const struct omap_dm_timer_ops dmtimer_ops = {
@@ -1272,7 +1273,7 @@ MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
- .remove = omap_dm_timer_remove,
+ .remove_new = omap_dm_timer_remove,
.driver = {
.name = "omap_timer",
.of_match_table = omap_timer_match,
@@ -1283,5 +1284,4 @@ static struct platform_driver omap_dm_timer_driver = {
module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 3cb61fa2c5c3..9af280735cba 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-config COMEDI
+menuconfig COMEDI
tristate "Data acquisition support (comedi)"
help
Enable support for a wide range of data acquisition devices
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index e2114bcf815a..8e43918d38c4 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1215,6 +1215,7 @@ static int check_insn_config_length(struct comedi_insn *insn,
case INSN_CONFIG_GET_CLOCK_SRC:
case INSN_CONFIG_SET_OTHER_SRC:
case INSN_CONFIG_GET_COUNTER_STATUS:
+ case INSN_CONFIG_GET_PWM_OUTPUT:
case INSN_CONFIG_PWM_SET_H_BRIDGE:
case INSN_CONFIG_PWM_GET_H_BRIDGE:
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
@@ -3382,7 +3383,7 @@ static int __init comedi_init(void)
if (retval)
goto out_unregister_chrdev_region;
- comedi_class = class_create(THIS_MODULE, "comedi");
+ comedi_class = class_create("comedi");
if (IS_ERR(comedi_class)) {
retval = PTR_ERR(comedi_class);
pr_err("failed to create class\n");
diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c
index fcfc2e299110..27f3890f471d 100644
--- a/drivers/comedi/drivers/adv_pci1760.c
+++ b/drivers/comedi/drivers/adv_pci1760.c
@@ -58,7 +58,7 @@
#define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
-#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
+#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */
#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 0b5c0af1cebf..c02dc19a679b 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -795,7 +795,7 @@ static int __init comedi_test_init(void)
}
if (!config_mode) {
- ctcls = class_create(THIS_MODULE, CLASS_NAME);
+ ctcls = class_create(CLASS_NAME);
if (IS_ERR(ctcls)) {
pr_warn("comedi_test: unable to create class\n");
goto clean3;
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index deed4afadb29..d9cb937665cf 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -97,10 +97,6 @@ struct quad8 {
struct quad8_reg __iomem *reg;
};
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@@ -133,6 +129,9 @@ struct quad8 {
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
+/* Each Counter is 24 bits wide */
+#define LS7267_CNTR_MAX GENMASK(23, 0)
+
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
unsigned long irqflags;
int i;
- flags = ioread8(&chan->control);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (unsigned long)(borrow ^ carry) << 24;
+ *val = 0;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
unsigned long irqflags;
int i;
- /* Only 24-bit values are supported */
- if (val > 0xFFFFFF)
+ if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
/* Handle Index signals */
if (synapse->signal->id >= 16) {
- if (priv->preset_enable[count->id])
+ if (!priv->preset_enable[count->id])
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (preset > 0xFFFFFF)
+ if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
*ceiling = priv->preset[count->id];
break;
default:
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- *ceiling = 0x1FFFFFF;
+ *ceiling = LS7267_CNTR_MAX;
break;
}
@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (ceiling > 0xFFFFFF)
+ if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index d388bf26f4dc..4228be917038 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -29,6 +29,28 @@ config 104_QUAD_8
array module parameter. The interrupt line numbers for the devices may
be configured via the irq array module parameter.
+config FTM_QUADDEC
+ tristate "Flex Timer Module Quadrature decoder driver"
+ depends on SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ Select this option to enable the Flex Timer Quadrature decoder
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ftm-quaddec.
+
+config INTEL_QEP
+ tristate "Intel Quadrature Encoder Peripheral driver"
+ depends on X86
+ depends on PCI
+ help
+ Select this option to enable the Intel Quadrature Encoder Peripheral
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-qep.
+
config INTERRUPT_CNT
tristate "Interrupt counter driver"
depends on GPIOLIB
@@ -39,15 +61,28 @@ config INTERRUPT_CNT
To compile this driver as a module, choose M here: the
module will be called interrupt-cnt.
-config STM32_TIMER_CNT
- tristate "STM32 Timer encoder counter driver"
- depends on MFD_STM32_TIMERS || COMPILE_TEST
+config MICROCHIP_TCB_CAPTURE
+ tristate "Microchip Timer Counter Capture driver"
+ depends on SOC_AT91SAM9 || SOC_SAM_V7 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ select REGMAP_MMIO
help
- Select this option to enable STM32 Timer quadrature encoder
- and counter driver.
+ Select this option to enable the Microchip Timer Counter Block
+ capture driver.
To compile this driver as a module, choose M here: the
- module will be called stm32-timer-cnt.
+ module will be called microchip-tcb-capture.
+
+config RZ_MTU3_CNT
+ tristate "Renesas RZ/G2L MTU3a counter driver"
+ depends on RZ_MTU3 || COMPILE_TEST
+ help
+ Enable support for MTU3a counter driver found on Renesas RZ/G2L alike
+ SoCs. This IP supports both 16-bit and 32-bit phase counting mode
+ support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rz-mtu3-cnt.
config STM32_LPTIMER_CNT
tristate "STM32 LP Timer encoder counter driver"
@@ -59,47 +94,15 @@ config STM32_LPTIMER_CNT
To compile this driver as a module, choose M here: the
module will be called stm32-lptimer-cnt.
-config TI_EQEP
- tristate "TI eQEP counter driver"
- depends on (SOC_AM33XX || COMPILE_TEST)
- select REGMAP_MMIO
- help
- Select this option to enable the Texas Instruments Enhanced Quadrature
- Encoder Pulse (eQEP) counter driver.
-
- To compile this driver as a module, choose M here: the module will be
- called ti-eqep.
-
-config FTM_QUADDEC
- tristate "Flex Timer Module Quadrature decoder driver"
- depends on HAS_IOMEM && OF
- help
- Select this option to enable the Flex Timer Quadrature decoder
- driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ftm-quaddec.
-
-config MICROCHIP_TCB_CAPTURE
- tristate "Microchip Timer Counter Capture driver"
- depends on HAS_IOMEM && OF
- select REGMAP_MMIO
+config STM32_TIMER_CNT
+ tristate "STM32 Timer encoder counter driver"
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
help
- Select this option to enable the Microchip Timer Counter Block
- capture driver.
+ Select this option to enable STM32 Timer quadrature encoder
+ and counter driver.
To compile this driver as a module, choose M here: the
- module will be called microchip-tcb-capture.
-
-config INTEL_QEP
- tristate "Intel Quadrature Encoder Peripheral driver"
- depends on PCI
- help
- Select this option to enable the Intel Quadrature Encoder Peripheral
- driver.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-qep.
+ module will be called stm32-timer-cnt.
config TI_ECAP_CAPTURE
tristate "TI eCAP capture driver"
@@ -116,4 +119,15 @@ config TI_ECAP_CAPTURE
To compile this driver as a module, choose M here: the module
will be called ti-ecap-capture.
+config TI_EQEP
+ tristate "TI eQEP counter driver"
+ depends on (SOC_AM33XX || COMPILE_TEST)
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Quadrature
+ Encoder Pulse (eQEP) counter driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ti-eqep.
+
endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index b9a369e0d4fc..933fdd50b3e4 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -8,6 +8,7 @@ counter-y := counter-core.o counter-sysfs.o counter-chrdev.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_INTERRUPT_CNT) += interrupt-cnt.o
+obj-$(CONFIG_RZ_MTU3_CNT) += rz-mtu3-cnt.o
obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
obj-$(CONFIG_TI_EQEP) += ti-eqep.o
diff --git a/drivers/counter/rz-mtu3-cnt.c b/drivers/counter/rz-mtu3-cnt.c
new file mode 100644
index 000000000000..48c83933aa2f
--- /dev/null
+++ b/drivers/counter/rz-mtu3-cnt.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L MTU3a Counter driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/counter.h>
+#include <linux/mfd/rz-mtu3.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
+/*
+ * Register descriptions
+ * TSR: Timer Status Register
+ * TMDR1: Timer Mode Register 1
+ * TMDR3: Timer Mode Register 3
+ * TIOR: Timer I/O Control Register
+ * TCR: Timer Control Register
+ * TCNT: Timer Counter
+ * TGRA: Timer general register A
+ * TCNTLW: Timer Longword Counter
+ * TGRALW: Timer longword general register A
+ */
+
+#define RZ_MTU3_TSR_TCFD BIT(7) /* Count Direction Flag */
+
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_1 (4) /* Phase counting mode 1 */
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_2 (5) /* Phase counting mode 2 */
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_3 (6) /* Phase counting mode 3 */
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_4 (7) /* Phase counting mode 4 */
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_5 (9) /* Phase counting mode 5 */
+#define RZ_MTU3_TMDR1_PH_CNT_MODE_MASK (0xf)
+
+/*
+ * LWA: MTU1/MTU2 Combination Longword Access Control
+ * 0: 16-bit, 1: 32-bit
+ */
+#define RZ_MTU3_TMDR3_LWA (0)
+
+/*
+ * PHCKSEL: External Input Phase Clock Select
+ * 0: MTCLKA and MTCLKB, 1: MTCLKC and MTCLKD
+ */
+#define RZ_MTU3_TMDR3_PHCKSEL (1)
+
+#define RZ_MTU3_16_BIT_MTU1_CH (0)
+#define RZ_MTU3_16_BIT_MTU2_CH (1)
+#define RZ_MTU3_32_BIT_CH (2)
+
+#define RZ_MTU3_TIOR_NO_OUTPUT (0) /* Output prohibited */
+#define RZ_MTU3_TIOR_IC_BOTH (10) /* Input capture at both edges */
+
+#define SIGNAL_A_ID (0)
+#define SIGNAL_B_ID (1)
+#define SIGNAL_C_ID (2)
+#define SIGNAL_D_ID (3)
+
+#define RZ_MTU3_MAX_HW_CNTR_CHANNELS (2)
+#define RZ_MTU3_MAX_LOGICAL_CNTR_CHANNELS (3)
+
+/**
+ * struct rz_mtu3_cnt - MTU3 counter private data
+ *
+ * @clk: MTU3 module clock
+ * @lock: Lock to prevent concurrent access for ceiling and count
+ * @ch: HW channels for the counters
+ * @count_is_enabled: Enabled state of Counter value channel
+ * @mtu_16bit_max: Cache for 16-bit counters
+ * @mtu_32bit_max: Cache for 32-bit counters
+ */
+struct rz_mtu3_cnt {
+ struct clk *clk;
+ struct mutex lock;
+ struct rz_mtu3_channel *ch;
+ bool count_is_enabled[RZ_MTU3_MAX_LOGICAL_CNTR_CHANNELS];
+ union {
+ u16 mtu_16bit_max[RZ_MTU3_MAX_HW_CNTR_CHANNELS];
+ u32 mtu_32bit_max;
+ };
+};
+
+static const enum counter_function rz_mtu3_count_functions[] = {
+ COUNTER_FUNCTION_QUADRATURE_X4,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+};
+
+static inline size_t rz_mtu3_get_hw_ch(const size_t id)
+{
+ return (id == RZ_MTU3_32_BIT_CH) ? 0 : id;
+}
+
+static inline struct rz_mtu3_channel *rz_mtu3_get_ch(struct counter_device *counter, int id)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ const size_t ch_id = rz_mtu3_get_hw_ch(id);
+
+ return &priv->ch[ch_id];
+}
+
+static bool rz_mtu3_is_counter_invalid(struct counter_device *counter, int id)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ unsigned long tmdr;
+
+ pm_runtime_get_sync(priv->ch->dev);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+ pm_runtime_put(priv->ch->dev);
+
+ if (id == RZ_MTU3_32_BIT_CH && test_bit(RZ_MTU3_TMDR3_LWA, &tmdr))
+ return false;
+
+ if (id != RZ_MTU3_32_BIT_CH && !test_bit(RZ_MTU3_TMDR3_LWA, &tmdr))
+ return false;
+
+ return true;
+}
+
+static int rz_mtu3_lock_if_counter_is_valid(struct counter_device *counter,
+ struct rz_mtu3_channel *const ch,
+ struct rz_mtu3_cnt *const priv,
+ int id)
+{
+ mutex_lock(&priv->lock);
+
+ if (ch->is_busy && !priv->count_is_enabled[id]) {
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ if (rz_mtu3_is_counter_invalid(counter, id)) {
+ mutex_unlock(&priv->lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int rz_mtu3_lock_if_count_is_enabled(struct rz_mtu3_channel *const ch,
+ struct rz_mtu3_cnt *const priv,
+ int id)
+{
+ mutex_lock(&priv->lock);
+
+ if (ch->is_busy && !priv->count_is_enabled[id]) {
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_mtu3_count_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(ch->dev);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ *val = rz_mtu3_32bit_ch_read(ch, RZ_MTU3_TCNTLW);
+ else
+ *val = rz_mtu3_16bit_ch_read(ch, RZ_MTU3_TCNT);
+ pm_runtime_put(ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_count_write(struct counter_device *counter,
+ struct counter_count *count, const u64 val)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(ch->dev);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TCNTLW, val);
+ else
+ rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TCNT, val);
+ pm_runtime_put(ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_count_function_read_helper(struct rz_mtu3_channel *const ch,
+ struct rz_mtu3_cnt *const priv,
+ enum counter_function *function)
+{
+ u8 timer_mode;
+
+ pm_runtime_get_sync(ch->dev);
+ timer_mode = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TMDR1);
+ pm_runtime_put(ch->dev);
+
+ switch (timer_mode & RZ_MTU3_TMDR1_PH_CNT_MODE_MASK) {
+ case RZ_MTU3_TMDR1_PH_CNT_MODE_1:
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
+ return 0;
+ case RZ_MTU3_TMDR1_PH_CNT_MODE_2:
+ *function = COUNTER_FUNCTION_PULSE_DIRECTION;
+ return 0;
+ case RZ_MTU3_TMDR1_PH_CNT_MODE_4:
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_B;
+ return 0;
+ default:
+ /*
+ * TODO:
+ * - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_3
+ * - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_5
+ */
+ return -EINVAL;
+ }
+}
+
+static int rz_mtu3_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ ret = rz_mtu3_count_function_read_helper(ch, priv, function);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rz_mtu3_count_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ u8 timer_mode;
+ int ret;
+
+ ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ switch (function) {
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_1;
+ break;
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
+ timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_2;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
+ timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_4;
+ break;
+ default:
+ /*
+ * TODO:
+ * - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_3
+ * - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_5
+ */
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(ch->dev);
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, timer_mode);
+ pm_runtime_put(ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_count_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+ u8 tsr;
+
+ ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(ch->dev);
+ tsr = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TSR);
+ pm_runtime_put(ch->dev);
+
+ *direction = (tsr & RZ_MTU3_TSR_TCFD) ?
+ COUNTER_COUNT_DIRECTION_FORWARD : COUNTER_COUNT_DIRECTION_BACKWARD;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *ceiling)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ const size_t ch_id = rz_mtu3_get_hw_ch(count->id);
+ int ret;
+
+ ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ switch (count->id) {
+ case RZ_MTU3_16_BIT_MTU1_CH:
+ case RZ_MTU3_16_BIT_MTU2_CH:
+ *ceiling = priv->mtu_16bit_max[ch_id];
+ break;
+ case RZ_MTU3_32_BIT_CH:
+ *ceiling = priv->mtu_32bit_max;
+ break;
+ default:
+ /* should never reach this path */
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+}
+
+static int rz_mtu3_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 ceiling)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ const size_t ch_id = rz_mtu3_get_hw_ch(count->id);
+ int ret;
+
+ ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ switch (count->id) {
+ case RZ_MTU3_16_BIT_MTU1_CH:
+ case RZ_MTU3_16_BIT_MTU2_CH:
+ if (ceiling > U16_MAX) {
+ mutex_unlock(&priv->lock);
+ return -ERANGE;
+ }
+ priv->mtu_16bit_max[ch_id] = ceiling;
+ break;
+ case RZ_MTU3_32_BIT_CH:
+ if (ceiling > U32_MAX) {
+ mutex_unlock(&priv->lock);
+ return -ERANGE;
+ }
+ priv->mtu_32bit_max = ceiling;
+ break;
+ default:
+ /* should never reach this path */
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(ch->dev);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TGRALW, ceiling);
+ else
+ rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TGRA, ceiling);
+
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
+ pm_runtime_put(ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static void rz_mtu3_32bit_cnt_setting(struct counter_device *counter)
+{
+ struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
+ struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
+
+ /* Phase counting mode 1 is used as default in initialization. */
+ rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TMDR1, RZ_MTU3_TMDR1_PH_CNT_MODE_1);
+
+ rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
+ rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TIOR, RZ_MTU3_TIOR_IC_BOTH);
+
+ rz_mtu3_enable(ch1);
+ rz_mtu3_enable(ch2);
+}
+
+static void rz_mtu3_16bit_cnt_setting(struct counter_device *counter, int id)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
+
+ /* Phase counting mode 1 is used as default in initialization. */
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, RZ_MTU3_TMDR1_PH_CNT_MODE_1);
+
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TIOR, RZ_MTU3_TIOR_NO_OUTPUT);
+ rz_mtu3_enable(ch);
+}
+
+static int rz_mtu3_initialize_counter(struct counter_device *counter, int id)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
+ struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
+ struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
+
+ switch (id) {
+ case RZ_MTU3_16_BIT_MTU1_CH:
+ case RZ_MTU3_16_BIT_MTU2_CH:
+ if (!rz_mtu3_request_channel(ch))
+ return -EBUSY;
+
+ rz_mtu3_16bit_cnt_setting(counter, id);
+ return 0;
+ case RZ_MTU3_32_BIT_CH:
+ /*
+ * 32-bit phase counting need MTU1 and MTU2 to create 32-bit
+ * cascade counter.
+ */
+ if (!rz_mtu3_request_channel(ch1))
+ return -EBUSY;
+
+ if (!rz_mtu3_request_channel(ch2)) {
+ rz_mtu3_release_channel(ch1);
+ return -EBUSY;
+ }
+
+ rz_mtu3_32bit_cnt_setting(counter);
+ return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+}
+
+static void rz_mtu3_terminate_counter(struct counter_device *counter, int id)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
+ struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
+ struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
+
+ if (id == RZ_MTU3_32_BIT_CH) {
+ rz_mtu3_release_channel(ch2);
+ rz_mtu3_release_channel(ch1);
+ rz_mtu3_disable(ch2);
+ rz_mtu3_disable(ch1);
+ } else {
+ rz_mtu3_release_channel(ch);
+ rz_mtu3_disable(ch);
+ }
+}
+
+static int rz_mtu3_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
+ struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ *enable = rz_mtu3_is_enabled(ch1) && rz_mtu3_is_enabled(ch2);
+ else
+ *enable = rz_mtu3_is_enabled(ch);
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+{
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret = 0;
+
+ if (enable) {
+ pm_runtime_get_sync(ch->dev);
+ mutex_lock(&priv->lock);
+ ret = rz_mtu3_initialize_counter(counter, count->id);
+ if (ret == 0)
+ priv->count_is_enabled[count->id] = true;
+ mutex_unlock(&priv->lock);
+ } else {
+ mutex_lock(&priv->lock);
+ rz_mtu3_terminate_counter(counter, count->id);
+ priv->count_is_enabled[count->id] = false;
+ mutex_unlock(&priv->lock);
+ pm_runtime_put(ch->dev);
+ }
+
+ return ret;
+}
+
+static int rz_mtu3_lock_if_ch0_is_enabled(struct rz_mtu3_cnt *const priv)
+{
+ mutex_lock(&priv->lock);
+ if (priv->ch->is_busy && !(priv->count_is_enabled[RZ_MTU3_16_BIT_MTU1_CH] ||
+ priv->count_is_enabled[RZ_MTU3_32_BIT_CH])) {
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_mtu3_cascade_counts_enable_get(struct counter_device *counter,
+ u8 *cascade_enable)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ unsigned long tmdr;
+ int ret;
+
+ ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(priv->ch->dev);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+ pm_runtime_put(priv->ch->dev);
+ *cascade_enable = test_bit(RZ_MTU3_TMDR3_LWA, &tmdr);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_cascade_counts_enable_set(struct counter_device *counter,
+ u8 cascade_enable)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(priv->ch->dev);
+ rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
+ RZ_MTU3_TMDR3_LWA, cascade_enable);
+ pm_runtime_put(priv->ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_ext_input_phase_clock_select_get(struct counter_device *counter,
+ u32 *ext_input_phase_clock_select)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ unsigned long tmdr;
+ int ret;
+
+ ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(priv->ch->dev);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+ pm_runtime_put(priv->ch->dev);
+ *ext_input_phase_clock_select = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rz_mtu3_ext_input_phase_clock_select_set(struct counter_device *counter,
+ u32 ext_input_phase_clock_select)
+{
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret;
+
+ ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(priv->ch->dev);
+ rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
+ RZ_MTU3_TMDR3_PHCKSEL,
+ ext_input_phase_clock_select);
+ pm_runtime_put(priv->ch->dev);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static struct counter_comp rz_mtu3_count_ext[] = {
+ COUNTER_COMP_DIRECTION(rz_mtu3_count_direction_read),
+ COUNTER_COMP_ENABLE(rz_mtu3_count_enable_read,
+ rz_mtu3_count_enable_write),
+ COUNTER_COMP_CEILING(rz_mtu3_count_ceiling_read,
+ rz_mtu3_count_ceiling_write),
+};
+
+static const enum counter_synapse_action rz_mtu3_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static int rz_mtu3_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ const bool is_signal_ab = (synapse->signal->id == SIGNAL_A_ID) ||
+ (synapse->signal->id == SIGNAL_B_ID);
+ struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ enum counter_function function;
+ bool mtclkc_mtclkd;
+ unsigned long tmdr;
+ int ret;
+
+ ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
+ if (ret)
+ return ret;
+
+ ret = rz_mtu3_count_function_read_helper(ch, priv, &function);
+ if (ret) {
+ mutex_unlock(&priv->lock);
+ return ret;
+ }
+
+ /* Default action mode */
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+
+ if (count->id != RZ_MTU3_16_BIT_MTU1_CH) {
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+ mtclkc_mtclkd = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr);
+ if ((mtclkc_mtclkd && is_signal_ab) ||
+ (!mtclkc_mtclkd && !is_signal_ab)) {
+ mutex_unlock(&priv->lock);
+ return 0;
+ }
+ }
+
+ switch (function) {
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
+ /*
+ * Rising edges on signal A (signal C) updates the respective
+ * count. The input level of signal B (signal D) determines
+ * direction.
+ */
+ if (synapse->signal->id == SIGNAL_A_ID ||
+ synapse->signal->id == SIGNAL_C_ID)
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
+ /*
+ * Any state transition on quadrature pair signal B (signal D)
+ * updates the respective count.
+ */
+ if (synapse->signal->id == SIGNAL_B_ID ||
+ synapse->signal->id == SIGNAL_D_ID)
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ /* counts up/down on both edges of A (C) and B (D) signal */
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ default:
+ /* should never reach this path */
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static const struct counter_ops rz_mtu3_cnt_ops = {
+ .count_read = rz_mtu3_count_read,
+ .count_write = rz_mtu3_count_write,
+ .function_read = rz_mtu3_count_function_read,
+ .function_write = rz_mtu3_count_function_write,
+ .action_read = rz_mtu3_action_read,
+};
+
+#define RZ_MTU3_PHASE_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+}
+
+static struct counter_signal rz_mtu3_signals[] = {
+ RZ_MTU3_PHASE_SIGNAL(SIGNAL_A_ID, "MTU1 MTCLKA"),
+ RZ_MTU3_PHASE_SIGNAL(SIGNAL_B_ID, "MTU1 MTCLKB"),
+ RZ_MTU3_PHASE_SIGNAL(SIGNAL_C_ID, "MTU2 MTCLKC"),
+ RZ_MTU3_PHASE_SIGNAL(SIGNAL_D_ID, "MTU2 MTCLKD"),
+};
+
+static struct counter_synapse rz_mtu3_mtu1_count_synapses[] = {
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals,
+ },
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals + 1,
+ }
+};
+
+static struct counter_synapse rz_mtu3_mtu2_count_synapses[] = {
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals,
+ },
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals + 1,
+ },
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals + 2,
+ },
+ {
+ .actions_list = rz_mtu3_synapse_actions,
+ .num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
+ .signal = rz_mtu3_signals + 3,
+ }
+};
+
+static struct counter_count rz_mtu3_counts[] = {
+ {
+ .id = RZ_MTU3_16_BIT_MTU1_CH,
+ .name = "Channel 1 Count",
+ .functions_list = rz_mtu3_count_functions,
+ .num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
+ .synapses = rz_mtu3_mtu1_count_synapses,
+ .num_synapses = ARRAY_SIZE(rz_mtu3_mtu1_count_synapses),
+ .ext = rz_mtu3_count_ext,
+ .num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
+ },
+ {
+ .id = RZ_MTU3_16_BIT_MTU2_CH,
+ .name = "Channel 2 Count",
+ .functions_list = rz_mtu3_count_functions,
+ .num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
+ .synapses = rz_mtu3_mtu2_count_synapses,
+ .num_synapses = ARRAY_SIZE(rz_mtu3_mtu2_count_synapses),
+ .ext = rz_mtu3_count_ext,
+ .num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
+ },
+ {
+ .id = RZ_MTU3_32_BIT_CH,
+ .name = "Channel 1 and 2 (cascaded) Count",
+ .functions_list = rz_mtu3_count_functions,
+ .num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
+ .synapses = rz_mtu3_mtu2_count_synapses,
+ .num_synapses = ARRAY_SIZE(rz_mtu3_mtu2_count_synapses),
+ .ext = rz_mtu3_count_ext,
+ .num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
+ }
+};
+
+static const char *const rz_mtu3_ext_input_phase_clock_select[] = {
+ "MTCLKA-MTCLKB",
+ "MTCLKC-MTCLKD",
+};
+
+static DEFINE_COUNTER_ENUM(rz_mtu3_ext_input_phase_clock_select_enum,
+ rz_mtu3_ext_input_phase_clock_select);
+
+static struct counter_comp rz_mtu3_device_ext[] = {
+ COUNTER_COMP_DEVICE_BOOL("cascade_counts_enable",
+ rz_mtu3_cascade_counts_enable_get,
+ rz_mtu3_cascade_counts_enable_set),
+ COUNTER_COMP_DEVICE_ENUM("external_input_phase_clock_select",
+ rz_mtu3_ext_input_phase_clock_select_get,
+ rz_mtu3_ext_input_phase_clock_select_set,
+ rz_mtu3_ext_input_phase_clock_select_enum),
+};
+
+static int rz_mtu3_cnt_pm_runtime_suspend(struct device *dev)
+{
+ struct clk *const clk = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+static int rz_mtu3_cnt_pm_runtime_resume(struct device *dev)
+{
+ struct clk *const clk = dev_get_drvdata(dev);
+
+ clk_prepare_enable(clk);
+
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(rz_mtu3_cnt_pm_ops,
+ rz_mtu3_cnt_pm_runtime_suspend,
+ rz_mtu3_cnt_pm_runtime_resume, NULL);
+
+static void rz_mtu3_cnt_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+}
+
+static int rz_mtu3_cnt_probe(struct platform_device *pdev)
+{
+ struct rz_mtu3 *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct counter_device *counter;
+ struct rz_mtu3_channel *ch;
+ struct rz_mtu3_cnt *priv;
+ unsigned int i;
+ int ret;
+
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
+ return -ENOMEM;
+
+ priv = counter_priv(counter);
+ priv->clk = ddata->clk;
+ priv->mtu_32bit_max = U32_MAX;
+ priv->ch = &ddata->channels[RZ_MTU3_CHAN_1];
+ ch = &priv->ch[0];
+ for (i = 0; i < RZ_MTU3_MAX_HW_CNTR_CHANNELS; i++) {
+ ch->dev = dev;
+ priv->mtu_16bit_max[i] = U16_MAX;
+ ch++;
+ }
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv->clk);
+ clk_prepare_enable(priv->clk);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, rz_mtu3_cnt_pm_disable, dev);
+ if (ret < 0)
+ goto disable_clock;
+
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &rz_mtu3_cnt_ops;
+ counter->counts = rz_mtu3_counts;
+ counter->num_counts = ARRAY_SIZE(rz_mtu3_counts);
+ counter->signals = rz_mtu3_signals;
+ counter->num_signals = ARRAY_SIZE(rz_mtu3_signals);
+ counter->ext = rz_mtu3_device_ext;
+ counter->num_ext = ARRAY_SIZE(rz_mtu3_device_ext);
+
+ /* Register Counter device */
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to add counter\n");
+ goto disable_clock;
+ }
+
+ return 0;
+
+disable_clock:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static struct platform_driver rz_mtu3_cnt_driver = {
+ .probe = rz_mtu3_cnt_probe,
+ .driver = {
+ .name = "rz-mtu3-counter",
+ .pm = pm_ptr(&rz_mtu3_cnt_pm_ops),
+ },
+};
+module_platform_driver(rz_mtu3_cnt_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_ALIAS("platform:rz-mtu3-counter");
+MODULE_DESCRIPTION("Renesas RZ/G2L MTU3a counter driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2a84fc63371e..2c839bd2b051 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -3,7 +3,6 @@ menu "CPU Frequency scaling"
config CPU_FREQ
bool "CPU Frequency scaling"
- select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
@@ -37,7 +36,7 @@ config CPU_FREQ_STAT
choice
prompt "Default CPUFreq governor"
- default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -271,15 +270,6 @@ config LOONGSON2_CPUFREQ
Loongson2F and its successors support this feature.
If in doubt, say N.
-
-config LOONGSON1_CPUFREQ
- tristate "Loongson1 CPUFreq Driver"
- depends on LOONGSON1_LS1B
- help
- This option adds a CPUFreq driver for loongson1 processors which
- support software configurable cpu frequency.
-
- If in doubt, say N.
endif
if SPARC64
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0a0352d8fa45..123b4bbfcfee 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -95,7 +95,7 @@ config ARM_BRCMSTB_AVS_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
- a standard CPUfreq interface to to the firmware.
+ a standard CPUfreq interface to the firmware.
Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
@@ -189,84 +189,6 @@ config ARM_RASPBERRYPI_CPUFREQ
If in doubt, say N.
-config ARM_S3C_CPUFREQ
- bool
- help
- Internal configuration node for common cpufreq on Samsung SoC
-
-config ARM_S3C24XX_CPUFREQ
- bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
- depends on ARCH_S3C24XX
- select ARM_S3C_CPUFREQ
- help
- This enables the CPUfreq driver for the Samsung S3C24XX family
- of CPUs.
-
- For details, take a look at <file:Documentation/cpu-freq>.
-
- If in doubt, say N.
-
-config ARM_S3C24XX_CPUFREQ_DEBUG
- bool "Debug CPUfreq Samsung driver core"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_IODEBUG
- bool "Debug CPUfreq Samsung driver IO timing"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_DEBUGFS
- bool "Export debugfs for CPUFreq"
- depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
- help
- Export status information via debugfs.
-
-config ARM_S3C2410_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
- help
- CPU Frequency scaling support for S3C2410
-
-config ARM_S3C2412_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
- default y
- select S3C2412_IOTIMING
- help
- CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
-
-config ARM_S3C2416_CPUFREQ
- bool "S3C2416 CPU Frequency scaling support"
- depends on CPU_S3C2416
- help
- This adds the CPUFreq driver for the Samsung S3C2416 and
- S3C2450 SoC. The S3C2416 supports changing the rate of the
- armdiv clock source and also entering a so called dynamic
- voltage scaling mode in which it is possible to reduce the
- core voltage of the CPU.
-
- If in doubt, say N.
-
-config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR
- help
- Enable CPU voltage scaling when entering the dvs mode.
- It uses information gathered through existing hardware and
- tests but not documented in any datasheet.
-
- If in doubt, say N.
-
-config ARM_S3C2440_CPUFREQ
- bool "S3C2440/S3C2442 CPU Frequency scaling support"
- depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
- default y
- help
- CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
-
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
@@ -286,9 +208,6 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
-config ARM_SA1100_CPUFREQ
- bool
-
config ARM_SA1110_CPUFREQ
bool
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 32a7029e25ed..ef8510774913 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -71,15 +71,8 @@ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
-obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
-obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
-obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
-obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
-obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
@@ -111,7 +104,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 78adfb2ffff6..29904395e95f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -20,6 +20,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
+#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -965,7 +966,7 @@ static void __init acpi_cpufreq_boost_init(void)
acpi_cpufreq_driver.boost_enabled = boost_state(0);
}
-static int __init acpi_cpufreq_init(void)
+static int __init acpi_cpufreq_probe(struct platform_device *pdev)
{
int ret;
@@ -1010,13 +1011,32 @@ static int __init acpi_cpufreq_init(void)
return ret;
}
-static void __exit acpi_cpufreq_exit(void)
+static int acpi_cpufreq_remove(struct platform_device *pdev)
{
pr_debug("%s\n", __func__);
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
+
+ return 0;
+}
+
+static struct platform_driver acpi_cpufreq_platdrv = {
+ .driver = {
+ .name = "acpi-cpufreq",
+ },
+ .remove = acpi_cpufreq_remove,
+};
+
+static int __init acpi_cpufreq_init(void)
+{
+ return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe);
+}
+
+static void __exit acpi_cpufreq_exit(void)
+{
+ platform_driver_unregister(&acpi_cpufreq_platdrv);
}
module_param(acpi_pstate_strict, uint, 0644);
@@ -1027,18 +1047,4 @@ MODULE_PARM_DESC(acpi_pstate_strict,
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
-static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
- X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
- X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
-
-static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, },
- {ACPI_PROCESSOR_DEVICE_HID, },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, processor_device_ids);
-
-MODULE_ALIAS("acpi");
+MODULE_ALIAS("platform:acpi-cpufreq");
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 204e39006dda..5a3d4aa0f45a 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -59,8 +59,172 @@
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
+static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
-static int cppc_load __initdata;
+static struct cpufreq_driver amd_pstate_epp_driver;
+static int cppc_state = AMD_PSTATE_DISABLE;
+
+/*
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+ * the frequency that a core is going to operate during
+ * short periods of activity. EPP values will be utilized for
+ * different OS profiles (balanced, performance, power savings)
+ * display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+ NULL
+};
+
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0,
+ [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+ };
+
+typedef int (*cppc_mode_transition_fn)(int);
+
+static inline int get_mode_idx_from_str(const char *str, size_t size)
+{
+ int i;
+
+ for (i=0; i < AMD_PSTATE_MAX; i++) {
+ if (!strncmp(str, amd_pstate_mode_string[i], size))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static DEFINE_MUTEX(amd_pstate_limits_lock);
+static DEFINE_MUTEX(amd_pstate_driver_lock);
+
+static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+ u64 epp;
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (!cppc_req_cached) {
+ epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ &cppc_req_cached);
+ if (epp)
+ return epp;
+ }
+ epp = (cppc_req_cached >> 24) & 0xFF;
+ } else {
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return -EIO;
+ }
+ }
+
+ return (s16)(epp & 0xff);
+}
+
+static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = amd_pstate_get_epp(cpudata, 0);
+ if (epp < 0)
+ return epp;
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ index = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ index = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ index = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ index = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (!ret)
+ cpudata->epp_cached = epp;
+ } else {
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ cpudata->epp_cached = epp;
+ }
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index) {
+ pr_debug("EPP pref_index is invalid\n");
+ return -EINVAL;
+ }
+
+ if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+
+ if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(cpudata, epp);
+
+ return ret;
+}
static inline int pstate_enable(bool enable)
{
@@ -70,11 +234,21 @@ static inline int pstate_enable(bool enable)
static int cppc_enable(bool enable)
{
int cpu, ret = 0;
+ struct cppc_perf_ctrls perf_ctrls;
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
+
+ /* Enable autonomous mode for EPP */
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ /* Set desired perf as zero to allow EPP firmware control */
+ perf_ctrls.desired_perf = 0;
+ ret = cppc_set_perf(cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -135,7 +309,22 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
- return 0;
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+
+ ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ if (ret) {
+ pr_warn("failed to get auto_sel, ret: %d\n", ret);
+ return 0;
+ }
+
+ ret = cppc_set_auto_sel(cpudata->cpu,
+ (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
+
+ if (ret)
+ pr_warn("failed to set auto_sel, ret: %d\n", ret);
+
+ return ret;
}
DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
@@ -212,12 +401,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
}
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+ u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
{
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+
+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
+ min_perf = des_perf;
+ des_perf = 0;
+ }
+
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
@@ -272,7 +467,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
cpufreq_freq_transition_begin(policy, &freqs);
amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, false);
+ max_perf, false, policy->governor->flags);
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -306,7 +501,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (max_perf < min_perf)
max_perf = min_perf;
- amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
+ policy->governor->flags);
+ cpufreq_cpu_put(policy);
}
static int amd_get_min_freq(struct amd_cpudata *cpudata)
@@ -417,7 +614,7 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
return;
cpudata->boost_supported = true;
- amd_pstate_driver.boost_enabled = true;
+ current_pstate_driver->boost_enabled = true;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -500,6 +697,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
@@ -560,7 +759,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
if (max_freq < 0)
return max_freq;
- return sprintf(&buf[0], "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n", max_freq);
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
@@ -573,7 +772,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
if (freq < 0)
return freq;
- return sprintf(&buf[0], "%u\n", freq);
+ return sysfs_emit(buf, "%u\n", freq);
}
/*
@@ -588,13 +787,208 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
perf = READ_ONCE(cpudata->highest_perf);
- return sprintf(&buf[0], "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int offset = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+ sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+}
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ char str_preference[21];
+ ssize_t ret;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return -EINVAL;
+
+ mutex_lock(&amd_pstate_limits_lock);
+ ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ return ret ?: count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int preference;
+
+ preference = amd_pstate_get_energy_pref_index(cpudata);
+ if (preference < 0)
+ return preference;
+
+ return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+static void amd_pstate_driver_cleanup(void)
+{
+ amd_pstate_enable(false);
+ cppc_state = AMD_PSTATE_DISABLE;
+ current_pstate_driver = NULL;
+}
+
+static int amd_pstate_register_driver(int mode)
+{
+ int ret;
+
+ if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+ else if (mode == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+ else
+ return -EINVAL;
+
+ cppc_state = mode;
+ ret = cpufreq_register_driver(current_pstate_driver);
+ if (ret) {
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
+ return 0;
+}
+
+static int amd_pstate_unregister_driver(int dummy)
+{
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ return 0;
+}
+
+static int amd_pstate_change_mode_without_dvr_change(int mode)
+{
+ int cpu = 0;
+
+ cppc_state = mode;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+
+ for_each_present_cpu(cpu) {
+ cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
+ }
+
+ return 0;
+}
+
+static int amd_pstate_change_driver_mode(int mode)
+{
+ int ret;
+
+ ret = amd_pstate_unregister_driver(0);
+ if (ret)
+ return ret;
+
+ ret = amd_pstate_register_driver(mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
+ [AMD_PSTATE_DISABLE] = {
+ [AMD_PSTATE_DISABLE] = NULL,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
+ [AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
+ },
+ [AMD_PSTATE_PASSIVE] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = NULL,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
+ },
+ [AMD_PSTATE_ACTIVE] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_ACTIVE] = NULL,
+ [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
+ },
+ [AMD_PSTATE_GUIDED] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_GUIDED] = NULL,
+ },
+};
+
+static ssize_t amd_pstate_show_status(char *buf)
+{
+ if (!current_pstate_driver)
+ return sysfs_emit(buf, "disable\n");
+
+ return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
+}
+
+static int amd_pstate_update_status(const char *buf, size_t size)
+{
+ int mode_idx;
+
+ if (size > strlen("passive") || size < strlen("active"))
+ return -EINVAL;
+
+ mode_idx = get_mode_idx_from_str(buf, size);
+
+ if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
+ return -EINVAL;
+
+ if (mode_state_machine[cppc_state][mode_idx])
+ return mode_state_machine[cppc_state][mode_idx](mode_idx);
+
+ return 0;
+}
+
+static ssize_t show_status(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_show_status(buf);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+define_one_global_rw(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -603,6 +997,314 @@ static struct freq_attr *amd_pstate_attr[] = {
NULL,
};
+static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
+static struct attribute *pstate_global_attributes[] = {
+ &status.attr,
+ NULL
+};
+
+static const struct attribute_group amd_pstate_global_attr_group = {
+ .name = "amd_pstate",
+ .attrs = pstate_global_attributes,
+};
+
+static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct amd_cpudata *cpudata;
+ struct device *dev;
+ u64 value;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+ cpudata->epp_policy = 0;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ policy->fast_switch_possible = true;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_cap1_cached, value);
+ }
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+{
+ pr_debug("CPU %d exiting\n", policy->cpu);
+ policy->fast_switch_possible = false;
+ return 0;
+}
+
+static void amd_pstate_epp_init(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u32 max_perf, min_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_perf = max_perf;
+
+ /* Initial min/max values for CPPC Performance Controls Register */
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ /* CPPC EPP feature require to set zero to the desire perf bit */
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+ if (cpudata->epp_policy == cpudata->policy)
+ goto skip_epp;
+
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+ epp = amd_pstate_get_epp(cpudata, value);
+ if (epp < 0) {
+ /**
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+ goto skip_epp;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ epp = 0;
+
+ /* Set initial EPP value */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+skip_epp:
+ cpufreq_cpu_put(policy);
+}
+
+static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpudata->policy = policy->policy;
+
+ amd_pstate_epp_init(policy->cpu);
+
+ return 0;
+}
+
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
+ return 0;
+}
+
+static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ /* avoid suspending when EPP is not enabled */
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ return 0;
+
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
+
+ /* disable CPPC in lowlevel firmware */
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to suspend, return %d\n", ret);
+
+ return 0;
+}
+
+static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->suspended) {
+ mutex_lock(&amd_pstate_limits_lock);
+
+ /* enable amd pstate from suspend state*/
+ amd_pstate_epp_reenable(cpudata);
+
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
@@ -616,8 +1318,23 @@ static struct cpufreq_driver amd_pstate_driver = {
.attr = amd_pstate_attr,
};
+static struct cpufreq_driver amd_pstate_epp_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = amd_pstate_epp_verify_policy,
+ .setpolicy = amd_pstate_epp_set_policy,
+ .init = amd_pstate_epp_cpu_init,
+ .exit = amd_pstate_epp_cpu_exit,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
+ .suspend = amd_pstate_epp_suspend,
+ .resume = amd_pstate_epp_resume,
+ .name = "amd_pstate_epp",
+ .attr = amd_pstate_epp_attr,
+};
+
static int __init amd_pstate_init(void)
{
+ struct device *dev_root;
int ret;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
@@ -625,10 +1342,10 @@ static int __init amd_pstate_init(void)
/*
* by default the pstate driver is disabled to load
* enable the amd_pstate passive mode driver explicitly
- * with amd_pstate=passive in kernel command line
+ * with amd_pstate=passive or other modes in kernel command line
*/
- if (!cppc_load) {
- pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -644,7 +1361,8 @@ static int __init amd_pstate_init(void)
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
@@ -655,34 +1373,60 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable amd-pstate with return %d\n", ret);
+ pr_err("failed to enable with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(&amd_pstate_driver);
+ ret = cpufreq_register_driver(current_pstate_driver);
if (ret)
- pr_err("failed to register amd_pstate_driver with return %d\n",
- ret);
+ pr_err("failed to register with return %d\n", ret);
+
+ dev_root = bus_get_dev_root(&cpu_subsys);
+ if (dev_root) {
+ ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
+ put_device(dev_root);
+ if (ret) {
+ pr_err("sysfs attribute export failed with error %d.\n", ret);
+ goto global_attr_free;
+ }
+ }
+
+ return ret;
+global_attr_free:
+ cpufreq_unregister_driver(current_pstate_driver);
return ret;
}
device_initcall(amd_pstate_init);
static int __init amd_pstate_param(char *str)
{
+ size_t size;
+ int mode_idx;
+
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable")) {
- cppc_load = 0;
- pr_info("driver is explicitly disabled\n");
- } else if (!strcmp(str, "passive"))
- cppc_load = 1;
+ size = strlen(str);
+ mode_idx = get_mode_idx_from_str(str, size);
- return 0;
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
}
early_param("amd_pstate", amd_pstate_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index d1801281cdd9..021f423705e1 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -189,8 +189,8 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
*info = match->data;
*reg_base = of_iomap(args.np, 0);
- if (IS_ERR(*reg_base))
- return PTR_ERR(*reg_base);
+ if (!*reg_base)
+ return -ENOMEM;
return 0;
}
@@ -280,6 +280,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
policy->fast_switch_possible = true;
+ policy->suspend_freq = freq_table[0].frequency;
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
@@ -321,7 +322,6 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = apple_soc_cpufreq_get_rate,
.init = apple_soc_cpufreq_init,
.exit = apple_soc_cpufreq_exit,
@@ -329,6 +329,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
.attr = apple_soc_cpufreq_hw_attr,
+ .suspend = cpufreq_generic_suspend,
};
static int __init apple_soc_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index c10fc33b29b1..b74289a95a17 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -ENODEV;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4153150e20db..ffea6402189d 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -751,10 +751,7 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- int ret;
-
- ret = cpufreq_unregister_driver(&brcm_avs_driver);
- WARN_ON(ret);
+ cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 432dfb4e8027..022e3555407c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu);
- min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
if ((min_cap == 0) || (max_cap < min_cap))
return 0;
return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- min_cap = div_u64(max_cap * perf_caps->lowest_perf,
- perf_caps->highest_perf);
-
- perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
+ perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+ max_cap);
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 8ab672883043..338cf6cc6596 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -6,7 +6,6 @@
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "cpufreq-dt.h"
@@ -137,6 +136,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
+ { .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
@@ -150,6 +150,8 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
+ { .compatible = "qcom,sm6375", },
+ { .compatible = "qcom,sm7225", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
@@ -177,7 +179,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0);
bool ret = false;
- if (of_get_property(np, "operating-points-v2", NULL))
+ if (of_property_present(np, "operating-points-v2"))
ret = true;
of_node_put(np);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7e56a42750ea..6b52ebe5a890 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -73,6 +73,11 @@ static inline bool has_target(void)
return cpufreq_driver->target_index || cpufreq_driver->target;
}
+bool has_target_index(void)
+{
+ return !!cpufreq_driver->target_index;
+}
+
/* internal prototypes */
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_init_governor(struct cpufreq_policy *policy);
@@ -725,9 +730,9 @@ static ssize_t store_##file_name \
unsigned long val; \
int ret; \
\
- ret = sscanf(buf, "%lu", &val); \
- if (ret != 1) \
- return -EINVAL; \
+ ret = kstrtoul(buf, 0, &val); \
+ if (ret) \
+ return ret; \
\
ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \
@@ -993,7 +998,7 @@ static const struct sysfs_ops sysfs_ops = {
.store = store,
};
-static struct kobj_type ktype_cpufreq = {
+static const struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
@@ -1727,7 +1732,7 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
* MHz. In such cases it is better to avoid getting into
* unnecessary frequency updates.
*/
- if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
+ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
return policy->cur;
cpufreq_out_of_sync(policy, new_freq);
@@ -2904,12 +2909,12 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
-int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+void cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
- if (!cpufreq_driver || (driver != cpufreq_driver))
- return -EINVAL;
+ if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
+ return;
pr_debug("unregistering driver %s\n", driver->name);
@@ -2926,19 +2931,22 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_unlock();
-
- return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
struct cpufreq_governor *gov = cpufreq_default_governor();
+ struct device *dev_root;
if (cpufreq_disabled())
return -ENODEV;
- cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
+ dev_root = bus_get_dev_root(&cpu_subsys);
+ if (dev_root) {
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
+ put_device(dev_root);
+ }
BUG_ON(!cpufreq_global_kobject);
if (!strlen(default_governor))
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 9e97f60f8199..ebb3a8102681 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_unregister_driver(&davinci_driver);
+
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
- return cpufreq_unregister_driver(&davinci_driver);
+ return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 67e56cf638ef..c4d4643b6ca6 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -355,8 +355,13 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
{
int ret;
- if (!policy->freq_table)
+ if (!policy->freq_table) {
+ /* Freq table must be passed by drivers with target_index() */
+ if (has_target_index())
+ return -EINVAL;
+
return 0;
+ }
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
if (ret)
@@ -367,4 +372,3 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 76e553af2071..535867a7dfdd 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpu_dev = get_cpu_device(0);
- if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
+ if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
return -ENODEV;
if (of_machine_is_compatible("fsl,imx7ulp")) {
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ad4ce8493144..48e1772e98fd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -222,7 +222,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
u32 val;
int ret;
- if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
+ if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret)
return ret;
@@ -279,7 +279,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
u32 val;
int ret = 0;
- if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
+ if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret)
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fd73d6d2b808..2548ec92faa2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -452,20 +452,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
- /*
- * The _PSS table doesn't contain whole turbo frequency range.
- * This just contains +1 MHZ above the max non turbo frequency,
- * with control value corresponding to max turbo ratio. But
- * when cpufreq set policy is called, it will call with this
- * max frequency, which will cause a reduced performance as
- * this driver uses real max turbo frequency as the max
- * frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo state.
- * Also need to convert to MHz as _PSS freq is in MHz.
- */
- if (!global.turbo_disabled)
- cpu->acpi_perf_data.states[0].core_frequency =
- policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");
@@ -1487,10 +1473,13 @@ static struct kobject *intel_pstate_kobject;
static void __init intel_pstate_sysfs_expose_params(void)
{
+ struct device *dev_root = bus_get_dev_root(&cpu_subsys);
int rc;
- intel_pstate_kobject = kobject_create_and_add("intel_pstate",
- &cpu_subsys.dev_root->kobj);
+ if (dev_root) {
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate", &dev_root->kobj);
+ put_device(dev_root);
+ }
if (WARN_ON(!intel_pstate_kobject))
return;
@@ -2398,12 +2387,6 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
{}
};
-static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(SKYLAKE, core_funcs),
- {}
-};
-
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -2422,12 +2405,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->epp_default = -EINVAL;
if (hwp_active) {
- const struct x86_cpu_id *id;
-
intel_pstate_hwp_enable(cpu);
- id = x86_match_cpu(intel_pstate_hwp_boost_ids);
- if (id && intel_pstate_acpi_pm_profile_server())
+ if (intel_pstate_acpi_pm_profile_server())
hwp_boost = true;
}
} else if (hwp_active) {
@@ -3372,6 +3352,7 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = {
* AlderLake Mobile CPUs.
*/
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
{}
};
@@ -3530,4 +3511,3 @@ early_param("intel_pstate", intel_pstate_setup);
MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 70ad8fe1d78b..95588101efbd 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
deleted file mode 100644
index fb72d709db56..000000000000
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * CPU Frequency Scaling for Loongson 1 SoC
- *
- * Copyright (C) 2014-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <cpufreq.h>
-#include <loongson1.h>
-
-struct ls1x_cpufreq {
- struct device *dev;
- struct clk *clk; /* CPU clk */
- struct clk *mux_clk; /* MUX of CPU clk */
- struct clk *pll_clk; /* PLL clk */
- struct clk *osc_clk; /* OSC clk */
- unsigned int max_freq;
- unsigned int min_freq;
-};
-
-static struct ls1x_cpufreq *cpufreq;
-
-static int ls1x_cpufreq_notifier(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- if (val == CPUFREQ_POSTCHANGE)
- current_cpu_data.udelay_val = loops_per_jiffy;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block ls1x_cpufreq_notifier_block = {
- .notifier_call = ls1x_cpufreq_notifier
-};
-
-static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- unsigned int old_freq, new_freq;
-
- old_freq = policy->cur;
- new_freq = policy->freq_table[index].frequency;
-
- /*
- * The procedure of reconfiguring CPU clk is as below.
- *
- * - Reparent CPU clk to OSC clk
- * - Reset CPU clock (very important)
- * - Reconfigure CPU DIV
- * - Reparent CPU clk back to CPU DIV clk
- */
-
- clk_set_parent(policy->clk, cpufreq->osc_clk);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
- LS1X_CLK_PLL_DIV);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
- LS1X_CLK_PLL_DIV);
- clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
- clk_set_parent(policy->clk, cpufreq->mux_clk);
- dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
-
- return 0;
-}
-
-static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- struct cpufreq_frequency_table *freq_tbl;
- unsigned int pll_freq, freq;
- int steps, i;
-
- pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
-
- steps = 1 << DIV_CPU_WIDTH;
- freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
- if (!freq_tbl)
- return -ENOMEM;
-
- for (i = 0; i < (steps - 1); i++) {
- freq = pll_freq / (i + 1);
- if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
- freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- freq_tbl[i].frequency = freq;
- dev_dbg(cpu_dev,
- "cpufreq table: index %d: frequency %d\n", i,
- freq_tbl[i].frequency);
- }
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
-
- policy->clk = cpufreq->clk;
- cpufreq_generic_init(policy, freq_tbl, 0);
-
- return 0;
-}
-
-static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
-{
- kfree(policy->freq_table);
- return 0;
-}
-
-static struct cpufreq_driver ls1x_cpufreq_driver = {
- .name = "cpufreq-ls1x",
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = ls1x_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = ls1x_cpufreq_init,
- .exit = ls1x_cpufreq_exit,
- .attr = cpufreq_generic_attr,
-};
-
-static int ls1x_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-
- return 0;
-}
-
-static int ls1x_cpufreq_probe(struct platform_device *pdev)
-{
- struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
- struct clk *clk;
- int ret;
-
- if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- cpufreq =
- devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
-
- cpufreq->dev = &pdev->dev;
-
- clk = devm_clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->clk));
- return PTR_ERR(clk);
- }
- cpufreq->mux_clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->mux_clk));
- return PTR_ERR(clk);
- }
- cpufreq->pll_clk = clk;
-
- clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->osc_clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->osc_clk = clk;
-
- cpufreq->max_freq = pdata->max_freq;
- cpufreq->min_freq = pdata->min_freq;
-
- ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq driver: %d\n", ret);
- return ret;
- }
-
- ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq notifier: %d\n",ret);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
- }
-
- return ret;
-}
-
-static struct platform_driver ls1x_cpufreq_platdrv = {
- .probe = ls1x_cpufreq_probe,
- .remove = ls1x_cpufreq_remove,
- .driver = {
- .name = "ls1x-cpufreq",
- },
-};
-
-module_platform_driver(ls1x_cpufreq_platdrv);
-
-MODULE_ALIAS("platform:ls1x-cpufreq");
-MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
-MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 28d346062166..f9306410a07f 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -23,7 +23,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/time.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define DBG(fmt...) pr_debug(fmt)
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f80339779084..b22f5cc8a463 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -317,13 +317,16 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+
+ return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 7f2680bc9a0f..9a39a7ccfae9 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -373,13 +373,13 @@ static struct device *of_get_cci(struct device *cpu_dev)
struct platform_device *pdev;
np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
- if (IS_ERR_OR_NULL(np))
- return NULL;
+ if (!np)
+ return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(np);
of_node_put(np);
- if (IS_ERR_OR_NULL(pdev))
- return NULL;
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
return &pdev->dev;
}
@@ -401,7 +401,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->ccifreq_bound = false;
if (info->soc_data->ccifreq_supported) {
info->cci_dev = of_get_cci(info->cpu_dev);
- if (IS_ERR_OR_NULL(info->cci_dev)) {
+ if (IS_ERR(info->cci_dev)) {
ret = PTR_ERR(info->cci_dev);
dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
return -ENODEV;
@@ -420,7 +420,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = PTR_ERR(info->inter_clk);
dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get intermediate clk\n", cpu);
- goto out_free_resources;
+ goto out_free_mux_clock;
}
info->proc_reg = regulator_get_optional(cpu_dev, "proc");
@@ -428,13 +428,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = PTR_ERR(info->proc_reg);
dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get proc regulator\n", cpu);
- goto out_free_resources;
+ goto out_free_inter_clock;
}
ret = regulator_enable(info->proc_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
- goto out_free_resources;
+ goto out_free_proc_reg;
}
/* Both presence and absence of sram regulator are valid cases. */
@@ -442,14 +442,14 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (IS_ERR(info->sram_reg)) {
ret = PTR_ERR(info->sram_reg);
if (ret == -EPROBE_DEFER)
- goto out_free_resources;
+ goto out_disable_proc_reg;
info->sram_reg = NULL;
} else {
ret = regulator_enable(info->sram_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
- goto out_free_resources;
+ goto out_free_sram_reg;
}
}
@@ -458,13 +458,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (ret) {
dev_err(cpu_dev,
"cpu%d: failed to get OPP-sharing information\n", cpu);
- goto out_free_resources;
+ goto out_disable_sram_reg;
}
ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
- goto out_free_resources;
+ goto out_disable_sram_reg;
}
ret = clk_prepare_enable(info->cpu_clk);
@@ -533,43 +533,41 @@ out_disable_mux_clock:
out_free_opp_table:
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
-out_free_resources:
- if (regulator_is_enabled(info->proc_reg))
- regulator_disable(info->proc_reg);
- if (info->sram_reg && regulator_is_enabled(info->sram_reg))
+out_disable_sram_reg:
+ if (info->sram_reg)
regulator_disable(info->sram_reg);
- if (!IS_ERR(info->proc_reg))
- regulator_put(info->proc_reg);
- if (!IS_ERR(info->sram_reg))
+out_free_sram_reg:
+ if (info->sram_reg)
regulator_put(info->sram_reg);
- if (!IS_ERR(info->cpu_clk))
- clk_put(info->cpu_clk);
- if (!IS_ERR(info->inter_clk))
- clk_put(info->inter_clk);
+
+out_disable_proc_reg:
+ regulator_disable(info->proc_reg);
+
+out_free_proc_reg:
+ regulator_put(info->proc_reg);
+
+out_free_inter_clock:
+ clk_put(info->inter_clk);
+
+out_free_mux_clock:
+ clk_put(info->cpu_clk);
return ret;
}
static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
{
- if (!IS_ERR(info->proc_reg)) {
- regulator_disable(info->proc_reg);
- regulator_put(info->proc_reg);
- }
- if (!IS_ERR(info->sram_reg)) {
+ regulator_disable(info->proc_reg);
+ regulator_put(info->proc_reg);
+ if (info->sram_reg) {
regulator_disable(info->sram_reg);
regulator_put(info->sram_reg);
}
- if (!IS_ERR(info->cpu_clk)) {
- clk_disable_unprepare(info->cpu_clk);
- clk_put(info->cpu_clk);
- }
- if (!IS_ERR(info->inter_clk)) {
- clk_disable_unprepare(info->inter_clk);
- clk_put(info->inter_clk);
- }
-
+ clk_disable_unprepare(info->cpu_clk);
+ clk_put(info->cpu_clk);
+ clk_disable_unprepare(info->inter_clk);
+ clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
}
@@ -695,6 +693,15 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1360000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1360000,
+ .ccifreq_supported = false,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -713,20 +720,29 @@ static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
.ccifreq_supported = true,
};
+static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1310000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1310000,
+ .ccifreq_supported = false,
+};
+
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
{ .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
{ .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8516", .data = &mt8516_platform_data },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1b50df06c6bc..81649a1969b6 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -184,7 +184,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
static int omap_cpufreq_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&omap_driver);
+ cpufreq_unregister_driver(&omap_driver);
+
+ return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 9f3fc7a073d0..1d2cfea9858a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -31,6 +31,7 @@
#include <linux/cpufreq.h>
#include <linux/compiler.h>
#include <linux/slab.h>
+#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -384,7 +385,7 @@ out_free:
return ret;
}
-static int __init pcc_cpufreq_probe(void)
+static int __init pcc_cpufreq_evaluate(void)
{
acpi_status status;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -576,7 +577,7 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
.name = "pcc-cpufreq",
};
-static int __init pcc_cpufreq_init(void)
+static int __init pcc_cpufreq_probe(struct platform_device *pdev)
{
int ret;
@@ -587,9 +588,9 @@ static int __init pcc_cpufreq_init(void)
if (acpi_disabled)
return -ENODEV;
- ret = pcc_cpufreq_probe();
+ ret = pcc_cpufreq_evaluate();
if (ret) {
- pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n");
+ pr_debug("pcc_cpufreq_probe: PCCH evaluation failed\n");
return ret;
}
@@ -607,21 +608,35 @@ static int __init pcc_cpufreq_init(void)
return ret;
}
-static void __exit pcc_cpufreq_exit(void)
+static int pcc_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&pcc_cpufreq_driver);
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
+
+ return 0;
}
-static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, },
- {ACPI_PROCESSOR_DEVICE_HID, },
- {},
+static struct platform_driver pcc_cpufreq_platdrv = {
+ .driver = {
+ .name = "pcc-cpufreq",
+ },
+ .remove = pcc_cpufreq_remove,
};
-MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+static int __init pcc_cpufreq_init(void)
+{
+ return platform_driver_probe(&pcc_cpufreq_platdrv, pcc_cpufreq_probe);
+}
+
+static void __exit pcc_cpufreq_exit(void)
+{
+ platform_driver_unregister(&pcc_cpufreq_platdrv);
+}
+
+MODULE_ALIAS("platform:pcc-cpufreq");
MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
MODULE_VERSION(PCC_VERSION);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 4b8ee2014da6..ec75e79659ac 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/hardirq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -546,7 +546,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
- if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ if (!of_property_read_bool(cpunode, "dynamic-power-step"))
return 1;
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
@@ -576,7 +576,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
u32 pvr;
const u32 *value;
- if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ if (!of_property_read_bool(cpunode, "dynamic-power-step"))
return 1;
hi_freq = cur_freq;
@@ -632,7 +632,7 @@ static int __init pmac_cpufreq_setup(void)
/* Check for 7447A based MacRISC3 */
if (of_machine_is_compatible("MacRISC3") &&
- of_get_property(cpunode, "dynamic-power-step", NULL) &&
+ of_property_read_bool(cpunode, "dynamic-power-step") &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index ba9c31d98bd6..2cd2b06849a2 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/irq.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 340fed35e45d..a78d7a27b4b5 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -11,10 +11,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
-#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/units.h>
@@ -29,6 +28,8 @@
#define GT_IRQ_STATUS BIT(2)
+#define MAX_FREQ_DOMAINS 3
+
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
@@ -43,7 +44,6 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
- struct resource *res;
/*
* Mutex to synchronize between de-init sequence and re-starting LMh
@@ -58,8 +58,6 @@ struct qcom_cpufreq_data {
struct clk_hw cpu_clk;
bool per_core_dcvs;
-
- struct freq_qos_request throttle_freq_req;
};
static struct {
@@ -143,40 +141,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
+ unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
+ soc_data = qcom_cpufreq.soc_data;
- return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
}
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
- const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
- unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
- soc_data = qcom_cpufreq.soc_data;
- index = readl_relaxed(data->base + soc_data->reg_perf_state);
- index = min(index, LUT_MAX_ENTRIES - 1);
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return policy->freq_table[index].frequency;
+ return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -347,8 +347,6 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
-
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
@@ -441,14 +439,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
if (data->throttle_irq < 0)
return data->throttle_irq;
- ret = freq_qos_add_request(&policy->constraints,
- &data->throttle_freq_req, FREQ_QOS_MAX,
- FREQ_QOS_MAX_DEFAULT_VALUE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
- return ret;
- }
-
data->cancel_throttle = false;
data->policy = policy;
@@ -515,7 +505,6 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
if (data->throttle_irq <= 0)
return;
- freq_qos_remove_request(&data->throttle_freq_req);
free_irq(data->throttle_irq, data);
}
@@ -588,16 +577,12 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
- struct resource *res = data->res;
- void __iomem *base = data->base;
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
- iounmap(base);
- release_mem_region(res->start, resource_size(res));
return 0;
}
@@ -678,10 +663,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
- num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4);
- if (num_domains <= 0)
- return num_domains;
+ for (num_domains = 0; num_domains < MAX_FREQ_DOMAINS; num_domains++)
+ if (!platform_get_resource(pdev, IORESOURCE_MEM, num_domains))
+ break;
qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains,
GFP_KERNEL);
@@ -689,6 +673,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return -ENOMEM;
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
if (!clk_data)
@@ -699,17 +685,15 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
for (i = 0; i < num_domains; i++) {
struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
struct clk_init_data clk_init = {};
- struct resource *res;
void __iomem *base;
- base = devm_platform_get_and_ioremap_resource(pdev, i, &res);
+ base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(base)) {
- dev_err(dev, "Failed to map resource %pR\n", res);
+ dev_err(dev, "Failed to map resource index %d\n", i);
return PTR_ERR(base);
}
data->base = base;
- data->res = res;
/* Register CPU clock for each frequency domain */
clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
@@ -748,7 +732,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+
+ return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
deleted file mode 100644
index 5dcfbf0bfb74..000000000000
--- a/drivers/cpufreq/s3c2410-cpufreq.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 CPU Frequency scaling
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2410_CLKDIVN_PDIVN (1<<0)
-#define S3C2410_CLKDIVN_HDIVN (1<<1)
-
-/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
-
-static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- u32 clkdiv = 0;
-
- if (cfg->divs.h_divisor == 2)
- clkdiv |= S3C2410_CLKDIVN_HDIVN;
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2410_CLKDIVN_PDIVN;
-
- s3c24xx_write_clkdivn(clkdiv);
-}
-
-static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long hclk, fclk, pclk;
- unsigned int hdiv, pdiv;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- hclk_max = cfg->max.hclk;
-
- cfg->freq.armclk = fclk;
-
- s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
- __func__, fclk, hclk_max);
-
- hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
- hclk = fclk / hdiv;
-
- if (hclk > cfg->max.hclk) {
- s3c_freq_dbg("%s: hclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
- pclk = hclk / pdiv;
-
- if (pclk > cfg->max.pclk) {
- s3c_freq_dbg("%s: pclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv *= hdiv;
-
- /* record the result */
- cfg->divs.p_divisor = pdiv;
- cfg->divs.h_divisor = hdiv;
-
- return 0;
-}
-
-static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- /* transition latency is about 5ms worst-case, so
- * set 10ms to be sure */
- .latency = 10000000,
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 12,
-
- .need_pll = 1,
-
- .name = "s3c2410",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
-
- .set_fvco = s3c2410_set_fvco,
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2410_cpufreq_setdivs,
- .calc_divs = s3c2410_cpufreq_calcdivs,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2410_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- return s3c_cpufreq_register(&s3c2410_cpufreq_info);
-}
-
-static struct subsys_interface s3c2410_cpufreq_interface = {
- .name = "s3c2410_cpufreq",
- .subsys = &s3c2410_subsys,
- .add_dev = s3c2410_cpufreq_add,
-};
-
-static int __init s3c2410_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410_cpufreq_interface);
-}
-arch_initcall(s3c2410_cpufreq_init);
-
-static int s3c2410a_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- /* alter the maximum freq settings for S3C2410A. If a board knows
- * it only has a maximum of 200, then it should register its own
- * limits. */
-
- s3c2410_cpufreq_info.max.fclk = 266000000;
- s3c2410_cpufreq_info.max.hclk = 133000000;
- s3c2410_cpufreq_info.max.pclk = 66500000;
- s3c2410_cpufreq_info.name = "s3c2410a";
-
- return s3c2410_cpufreq_add(dev, sif);
-}
-
-static struct subsys_interface s3c2410a_cpufreq_interface = {
- .name = "s3c2410a_cpufreq",
- .subsys = &s3c2410a_subsys,
- .add_dev = s3c2410a_cpufreq_add,
-};
-
-static int __init s3c2410a_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410a_cpufreq_interface);
-}
-arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
deleted file mode 100644
index 5945945ead7c..000000000000
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2412 CPU Frequency scalling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2412_CLKDIVN_PDIVN (1<<2)
-#define S3C2412_CLKDIVN_HDIVN_MASK (3<<0)
-#define S3C2412_CLKDIVN_ARMDIVN (1<<3)
-#define S3C2412_CLKDIVN_DVSEN (1<<4)
-#define S3C2412_CLKDIVN_HALFHCLK (1<<5)
-#define S3C2412_CLKDIVN_USB48DIV (1<<6)
-#define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8)
-#define S3C2412_CLKDIVN_UARTDIV_SHIFT (8)
-#define S3C2412_CLKDIVN_I2SDIV_MASK (15<<12)
-#define S3C2412_CLKDIVN_I2SDIV_SHIFT (12)
-#define S3C2412_CLKDIVN_CAMDIV_MASK (15<<16)
-#define S3C2412_CLKDIVN_CAMDIV_SHIFT (16)
-
-/* our clock resources. */
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv, armdiv, dvs;
- unsigned long hclk, fclk, armclk, armdiv_clk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- /* We can't run hclk above armclk as at the best we have to
- * have armclk and hclk in dvs mode. */
-
- if (hclk_max > armclk)
- hclk_max = armclk;
-
- s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
- __func__, fclk, armclk, hclk_max);
- s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
- __func__, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->freq.pclk);
-
- armdiv = fclk / armclk;
-
- if (armdiv < 1)
- armdiv = 1;
- if (armdiv > 2)
- armdiv = 2;
-
- cfg->divs.arm_divisor = armdiv;
- armdiv_clk = fclk / armdiv;
-
- hdiv = armdiv_clk / hclk_max;
- if (hdiv < 1)
- hdiv = 1;
-
- cfg->freq.hclk = hclk = armdiv_clk / hdiv;
-
- /* set dvs depending on whether we reached armclk or not. */
- cfg->divs.dvs = dvs = armclk < armdiv_clk;
-
- /* update the actual armclk we achieved. */
- cfg->freq.armclk = dvs ? hclk : armdiv_clk;
-
- s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
- __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
-
- if (hdiv > 4)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- cfg->freq.pclk = hclk / pdiv;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv * armdiv;
- cfg->divs.p_divisor = pdiv * armdiv;
-
- return 0;
-
-invalid:
- return -EINVAL;
-}
-
-static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv;
- unsigned long olddiv;
-
- olddiv = clkdiv = s3c24xx_read_clkdivn();
-
- /* clear off current clock info */
-
- clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
- clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
- clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
-
- if (cfg->divs.arm_divisor == 2)
- clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
-
- clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2412_CLKDIVN_PDIVN;
-
- s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
- s3c24xx_write_clkdivn(clkdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-/* set the default cpu frequency information, based on an 200MHz part
- * as we have no other way of detecting the speed rating in software.
- */
-
-static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- .latency = 5000000, /* 5ms */
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 16,
-
- .name = "s3c2412",
- .set_refresh = s3c2412_cpufreq_setrefresh,
- .set_divs = s3c2412_cpufreq_setdivs,
- .calc_divs = s3c2412_cpufreq_calcdivs,
-
- .calc_iotiming = s3c2412_iotiming_calc,
- .set_iotiming = s3c2412_iotiming_set,
- .get_iotiming = s3c2412_iotiming_get,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
-};
-
-static int s3c2412_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- unsigned long fclk_rate;
-
- hclk = clk_get(NULL, "hclk");
- if (IS_ERR(hclk)) {
- pr_err("cannot find hclk clock\n");
- return -ENOENT;
- }
-
- fclk = clk_get(NULL, "fclk");
- if (IS_ERR(fclk)) {
- pr_err("cannot find fclk clock\n");
- goto err_fclk;
- }
-
- fclk_rate = clk_get_rate(fclk);
- if (fclk_rate > 200000000) {
- pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
- fclk_rate / 1000000);
- s3c2412_cpufreq_info.max.fclk = 266000000;
- s3c2412_cpufreq_info.max.hclk = 133000000;
- s3c2412_cpufreq_info.max.pclk = 66000000;
- }
-
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
- pr_err("cannot find arm clock\n");
- goto err_armclk;
- }
-
- xtal = clk_get(NULL, "xtal");
- if (IS_ERR(xtal)) {
- pr_err("cannot find xtal clock\n");
- goto err_xtal;
- }
-
- return s3c_cpufreq_register(&s3c2412_cpufreq_info);
-
-err_xtal:
- clk_put(armclk);
-err_armclk:
- clk_put(fclk);
-err_fclk:
- clk_put(hclk);
-
- return -ENOENT;
-}
-
-static struct subsys_interface s3c2412_cpufreq_interface = {
- .name = "s3c2412_cpufreq",
- .subsys = &s3c2412_subsys,
- .add_dev = s3c2412_cpufreq_add,
-};
-
-static int s3c2412_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2412_cpufreq_interface);
-}
-arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
deleted file mode 100644
index 5c221bc90210..000000000000
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ /dev/null
@@ -1,492 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * S3C2416/2450 CPUfreq Support
- *
- * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on s3c64xx_cpufreq.c
- *
- * Copyright 2009 Wolfson Microelectronics plc
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reboot.h>
-#include <linux/module.h>
-
-static DEFINE_MUTEX(cpufreq_lock);
-
-struct s3c2416_data {
- struct clk *armdiv;
- struct clk *armclk;
- struct clk *hclk;
-
- unsigned long regulator_latency;
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct regulator *vddarm;
-#endif
-
- struct cpufreq_frequency_table *freq_table;
-
- bool is_dvs;
- bool disable_dvs;
-};
-
-static struct s3c2416_data s3c2416_cpufreq;
-
-struct s3c2416_dvfs {
- unsigned int vddarm_min;
- unsigned int vddarm_max;
-};
-
-/* pseudo-frequency for dvs mode */
-#define FREQ_DVS 132333
-
-/* frequency to sleep and reboot in
- * it's essential to leave dvs, as some boards do not reconfigure the
- * regulator on reboot
- */
-#define FREQ_SLEEP 133333
-
-/* Sources for the ARMCLK */
-#define SOURCE_HCLK 0
-#define SOURCE_ARMDIV 1
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-/* S3C2416 only supports changing the voltage in the dvs-mode.
- * Voltages down to 1.0V seem to work, so we take what the regulator
- * can get us.
- */
-static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
- [SOURCE_HCLK] = { 950000, 1250000 },
- [SOURCE_ARMDIV] = { 1250000, 1350000 },
-};
-#endif
-
-static struct cpufreq_frequency_table s3c2416_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133333 },
- { 0, SOURCE_ARMDIV, 266666 },
- { 0, SOURCE_ARMDIV, 400000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static struct cpufreq_frequency_table s3c2450_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133500 },
- { 0, SOURCE_ARMDIV, 267000 },
- { 0, SOURCE_ARMDIV, 534000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (cpu != 0)
- return 0;
-
- /* return our pseudo-frequency when in dvs mode */
- if (s3c_freq->is_dvs)
- return FREQ_DVS;
-
- return clk_get_rate(s3c_freq->armclk) / 1000;
-}
-
-static int s3c2416_cpufreq_set_armdiv(struct s3c2416_data *s3c_freq,
- unsigned int freq)
-{
- int ret;
-
- if (clk_get_rate(s3c_freq->armdiv) / 1000 != freq) {
- ret = clk_set_rate(s3c_freq->armdiv, freq * 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set armdiv rate %dkHz: %d\n",
- freq, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: already in dvs mode, nothing to do\n");
- return 0;
- }
-
- pr_debug("cpufreq: switching armclk to hclk (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->hclk);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk to hclk: %d\n", ret);
- return ret;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- /* changing the core voltage is only allowed when in dvs mode */
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
-
- /* when lowering the voltage failed, there is nothing to do */
- if (ret != 0)
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- }
-#endif
-
- s3c_freq->is_dvs = 1;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (!s3c_freq->is_dvs) {
- pr_debug("cpufreq: not in dvs mode, so can't leave\n");
- return 0;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
- if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- return ret;
- }
- }
-#endif
-
- /* force armdiv to hclk frequency for transition from dvs*/
- if (clk_get_rate(s3c_freq->armdiv) > clk_get_rate(s3c_freq->hclk)) {
- pr_debug("cpufreq: force armdiv to hclk frequency (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
- clk_get_rate(s3c_freq->hclk) / 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
- clk_get_rate(s3c_freq->hclk) / 1000, ret);
- return ret;
- }
- }
-
- pr_debug("cpufreq: switching armclk parent to armdiv (%lukHz)\n",
- clk_get_rate(s3c_freq->armdiv) / 1000);
-
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->armdiv);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk clock parent to armdiv: %d\n",
- ret);
- return ret;
- }
-
- s3c_freq->is_dvs = 0;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- unsigned int new_freq;
- int idx, ret, to_dvs = 0;
-
- mutex_lock(&cpufreq_lock);
-
- idx = s3c_freq->freq_table[index].driver_data;
-
- if (idx == SOURCE_HCLK)
- to_dvs = 1;
-
- /* switching to dvs when it's not allowed */
- if (to_dvs && s3c_freq->disable_dvs) {
- pr_debug("cpufreq: entering dvs mode not allowed\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* When leavin dvs mode, always switch the armdiv to the hclk rate
- * The S3C2416 has stability issues when switching directly to
- * higher frequencies.
- */
- new_freq = (s3c_freq->is_dvs && !to_dvs)
- ? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[index].frequency;
-
- if (to_dvs) {
- pr_debug("cpufreq: enter dvs\n");
- ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
- } else if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs\n");
- ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
- } else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
- }
-
-out:
- mutex_unlock(&cpufreq_lock);
-
- return ret;
-}
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
-{
- int count, v, i, found;
- struct cpufreq_frequency_table *pos;
- struct s3c2416_dvfs *dvfs;
-
- count = regulator_count_voltages(s3c_freq->vddarm);
- if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
- return;
- }
-
- if (!count)
- goto out;
-
- cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
- dvfs = &s3c2416_dvfs_table[pos->driver_data];
- found = 0;
-
- /* Check only the min-voltage, more is always ok on S3C2416 */
- for (i = 0; i < count; i++) {
- v = regulator_list_voltage(s3c_freq->vddarm, i);
- if (v >= dvfs->vddarm_min)
- found = 1;
- }
-
- if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
-out:
- /* Guessed */
- s3c_freq->regulator_latency = 1 * 1000 * 1000;
-}
-#endif
-
-static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- int ret;
- struct cpufreq_policy *policy;
-
- mutex_lock(&cpufreq_lock);
-
- /* disable further changes */
- s3c_freq->disable_dvs = 1;
-
- mutex_unlock(&cpufreq_lock);
-
- /* some boards don't reconfigure the regulator on reboot, which
- * could lead to undervolting the cpu when the clock is reset.
- * Therefore we always leave the DVS mode on reboot.
- */
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs on reboot\n");
-
- policy = cpufreq_cpu_get(0);
- if (!policy) {
- pr_debug("cpufreq: get no policy for cpu0\n");
- return NOTIFY_BAD;
- }
-
- ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
- cpufreq_cpu_put(policy);
-
- if (ret < 0)
- return NOTIFY_BAD;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
- .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
-};
-
-static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_frequency_table *pos;
- struct clk *msysclk;
- unsigned long rate;
- int ret;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- msysclk = clk_get(NULL, "msysclk");
- if (IS_ERR(msysclk)) {
- ret = PTR_ERR(msysclk);
- pr_err("cpufreq: Unable to obtain msysclk: %d\n", ret);
- return ret;
- }
-
- /*
- * S3C2416 and S3C2450 share the same processor-ID and also provide no
- * other means to distinguish them other than through the rate of
- * msysclk. On S3C2416 msysclk runs at 800MHz and on S3C2450 at 533MHz.
- */
- rate = clk_get_rate(msysclk);
- if (rate == 800 * 1000 * 1000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2416 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2416_freq_table;
- policy->cpuinfo.max_freq = 400000;
- } else if (rate / 1000 == 534000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2450 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2450_freq_table;
- policy->cpuinfo.max_freq = 534000;
- }
-
- /* not needed anymore */
- clk_put(msysclk);
-
- if (s3c_freq->freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU, msysclk at %lukHz\n",
- rate / 1000);
- return -ENODEV;
- }
-
- s3c_freq->is_dvs = 0;
-
- s3c_freq->armdiv = clk_get(NULL, "armdiv");
- if (IS_ERR(s3c_freq->armdiv)) {
- ret = PTR_ERR(s3c_freq->armdiv);
- pr_err("cpufreq: Unable to obtain ARMDIV: %d\n", ret);
- return ret;
- }
-
- s3c_freq->hclk = clk_get(NULL, "hclk");
- if (IS_ERR(s3c_freq->hclk)) {
- ret = PTR_ERR(s3c_freq->hclk);
- pr_err("cpufreq: Unable to obtain HCLK: %d\n", ret);
- goto err_hclk;
- }
-
- /* chech hclk rate, we only support the common 133MHz for now
- * hclk could also run at 66MHz, but this not often used
- */
- rate = clk_get_rate(s3c_freq->hclk);
- if (rate < 133 * 1000 * 1000) {
- pr_err("cpufreq: HCLK not at 133MHz\n");
- ret = -EINVAL;
- goto err_armclk;
- }
-
- s3c_freq->armclk = clk_get(NULL, "armclk");
- if (IS_ERR(s3c_freq->armclk)) {
- ret = PTR_ERR(s3c_freq->armclk);
- pr_err("cpufreq: Unable to obtain ARMCLK: %d\n", ret);
- goto err_armclk;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- s3c_freq->vddarm = regulator_get(NULL, "vddarm");
- if (IS_ERR(s3c_freq->vddarm)) {
- ret = PTR_ERR(s3c_freq->vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- goto err_vddarm;
- }
-
- s3c2416_cpufreq_cfg_regulator(s3c_freq);
-#else
- s3c_freq->regulator_latency = 0;
-#endif
-
- cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
- /* special handling for dvs mode */
- if (pos->driver_data == 0) {
- if (!s3c_freq->hclk) {
- pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- } else {
- continue;
- }
- }
-
- /* Check for frequencies we can generate */
- rate = clk_round_rate(s3c_freq->armdiv,
- pos->frequency * 1000);
- rate /= 1000;
- if (rate != pos->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
- pos->frequency, rate);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
- /* Datasheet says PLL stabalisation time must be at least 300us,
- * so but add some fudge. (reference in LOCKCON0 register description)
- */
- cpufreq_generic_init(policy, s3c_freq->freq_table,
- (500 * 1000) + s3c_freq->regulator_latency);
- register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
-
- return 0;
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-err_vddarm:
- clk_put(s3c_freq->armclk);
-#endif
-err_armclk:
- clk_put(s3c_freq->hclk);
-err_hclk:
- clk_put(s3c_freq->armdiv);
-
- return ret;
-}
-
-static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = s3c2416_cpufreq_set_target,
- .get = s3c2416_cpufreq_get_speed,
- .init = s3c2416_cpufreq_driver_init,
- .name = "s3c2416",
- .attr = cpufreq_generic_attr,
-};
-
-static int __init s3c2416_cpufreq_init(void)
-{
- return cpufreq_register_driver(&s3c2416_cpufreq_driver);
-}
-module_init(s3c2416_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
deleted file mode 100644
index 2011fb9c03a4..000000000000
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- * Vincent Sanders <vince@simtec.co.uk>
- *
- * S3C2440/S3C2442 CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2440_CLKDIVN_PDIVN (1<<0)
-#define S3C2440_CLKDIVN_HDIVN_MASK (3<<1)
-#define S3C2440_CLKDIVN_HDIVN_1 (0<<1)
-#define S3C2440_CLKDIVN_HDIVN_2 (1<<1)
-#define S3C2440_CLKDIVN_HDIVN_4_8 (2<<1)
-#define S3C2440_CLKDIVN_HDIVN_3_6 (3<<1)
-#define S3C2440_CLKDIVN_UCLK (1<<3)
-
-#define S3C2440_CAMDIVN_CAMCLK_MASK (0xf<<0)
-#define S3C2440_CAMDIVN_CAMCLK_SEL (1<<4)
-#define S3C2440_CAMDIVN_HCLK3_HALF (1<<8)
-#define S3C2440_CAMDIVN_HCLK4_HALF (1<<9)
-#define S3C2440_CAMDIVN_DVSEN (1<<12)
-
-#define S3C2442_CAMDIVN_CAMCLK_DIV3 (1<<5)
-
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static inline int within_khz(unsigned long a, unsigned long b)
-{
- long diff = a - b;
-
- return (diff >= -1000 && diff <= 1000);
-}
-
-/**
- * s3c2440_cpufreq_calcdivs - calculate divider settings
- * @cfg: The cpu frequency settings.
- *
- * Calcualte the divider values for the given frequency settings
- * specified in @cfg. The values are stored in @cfg for later use
- * by the relevant set routine if the request settings can be reached.
- */
-static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv;
- unsigned long hclk, fclk, armclk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
- __func__, fclk, armclk, hclk_max);
-
- if (armclk > fclk) {
- pr_warn("%s: armclk > fclk\n", __func__);
- armclk = fclk;
- }
-
- /* if we are in DVS, we need HCLK to be <= ARMCLK */
- if (armclk < fclk && armclk < hclk_max)
- hclk_max = armclk;
-
- for (hdiv = 1; hdiv < 9; hdiv++) {
- if (hdiv == 5 || hdiv == 7)
- hdiv++;
-
- hclk = (fclk / hdiv);
- if (hclk <= hclk_max || within_khz(hclk, hclk_max))
- break;
- }
-
- s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
-
- if (hdiv > 8)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* calculate a valid armclk */
-
- if (armclk < hclk)
- armclk = hclk;
-
- /* if we're running armclk lower than fclk, this really means
- * that the system should go into dvs mode, which means that
- * armclk is connected to hclk. */
- if (armclk < fclk) {
- cfg->divs.dvs = 1;
- armclk = hclk;
- } else
- cfg->divs.dvs = 0;
-
- cfg->freq.armclk = armclk;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv;
- cfg->divs.p_divisor = pdiv;
-
- return 0;
-
- invalid:
- return -EINVAL;
-}
-
-#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
- S3C2440_CAMDIVN_HCLK4_HALF)
-
-/**
- * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
- * @cfg: The cpu frequency settings.
- *
- * Set the divisors from the settings in @cfg, which where generated
- * during the calculation phase by s3c2440_cpufreq_calcdivs().
- */
-static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv, camdiv;
-
- s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
- cfg->divs.h_divisor, cfg->divs.p_divisor);
-
- clkdiv = s3c24xx_read_clkdivn();
- camdiv = s3c2440_read_camdivn();
-
- clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
- camdiv &= ~CAMDIVN_HCLK_HALF;
-
- switch (cfg->divs.h_divisor) {
- case 1:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
- break;
-
- case 2:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
- break;
-
- case 6:
- camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
- fallthrough;
- case 3:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
- break;
-
- case 8:
- camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
- fallthrough;
- case 4:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
- break;
-
- default:
- BUG(); /* we don't expect to get here. */
- }
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2440_CLKDIVN_PDIVN;
-
- /* todo - set pclk. */
-
- /* Write the divisors first with hclk intentionally halved so that
- * when we write clkdiv we will under-frequency instead of over. We
- * then make a short delay and remove the hclk halving if necessary.
- */
-
- s3c2440_write_camdivn(camdiv | CAMDIVN_HCLK_HALF);
- s3c24xx_write_clkdivn(clkdiv);
-
- ndelay(20);
- s3c2440_write_camdivn(camdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
- int *divs,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- unsigned long freq;
- int index = 0;
- int div;
-
- for (div = *divs; div > 0; div = *divs++) {
- freq = fclk / div;
-
- if (freq > max_hclk && div != 1)
- continue;
-
- freq /= 1000; /* table is in kHz */
- index = s3c_cpufreq_addfreq(table, index, table_size, freq);
- if (index < 0)
- break;
- }
-
- return index;
-}
-
-static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
-
-static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- int ret;
-
- WARN_ON(cfg->info == NULL);
- WARN_ON(cfg->board == NULL);
-
- ret = run_freq_for(cfg->info->max.hclk,
- cfg->info->max.fclk,
- hclk_divs,
- table, table_size);
-
- s3c_freq_dbg("%s: returning %d\n", __func__, ret);
-
- return ret;
-}
-
-static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
- .max = {
- .fclk = 400000000,
- .hclk = 133333333,
- .pclk = 66666666,
- },
-
- .locktime_m = 300,
- .locktime_u = 300,
- .locktime_bits = 16,
-
- .name = "s3c244x",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
- .set_fvco = s3c2410_set_fvco,
-
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2440_cpufreq_setdivs,
- .calc_divs = s3c2440_cpufreq_calcdivs,
- .calc_freqtable = s3c2440_cpufreq_calctable,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2440_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- armclk = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
- pr_err("%s: failed to get clocks\n", __func__);
- return -ENOENT;
- }
-
- return s3c_cpufreq_register(&s3c2440_cpufreq_info);
-}
-
-static struct subsys_interface s3c2440_cpufreq_interface = {
- .name = "s3c2440_cpufreq",
- .subsys = &s3c2440_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2440_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2440_cpufreq_interface);
-}
-
-/* arch_initcall adds the clocks we need, so use subsys_initcall. */
-subsys_initcall(s3c2440_cpufreq_init);
-
-static struct subsys_interface s3c2442_cpufreq_interface = {
- .name = "s3c2442_cpufreq",
- .subsys = &s3c2442_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2442_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2442_cpufreq_interface);
-}
-subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
deleted file mode 100644
index 93971dfe7c75..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX CPU Frequency scaling - debugfs status support
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-
-static struct dentry *dbgfs_root;
-static struct dentry *dbgfs_file_io;
-static struct dentry *dbgfs_file_info;
-static struct dentry *dbgfs_file_board;
-
-#define print_ns(x) ((x) / 10), ((x) % 10)
-
-static void show_max(struct seq_file *seq, struct s3c_freq *f)
-{
- seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
- f->fclk, f->hclk, f->pclk, f->armclk);
-}
-
-static int board_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
- struct s3c_cpufreq_board *brd;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- brd = cfg->board;
- if (!brd) {
- seq_printf(seq, "no board definition set?\n");
- return 0;
- }
-
- seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
- seq_printf(seq, "auto_io=%u\n", brd->auto_io);
- seq_printf(seq, "need_io=%u\n", brd->need_io);
-
- show_max(seq, &brd->max);
-
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(board);
-
-static int info_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
- seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
- cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
- seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
- seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
- seq_printf(seq, "\n");
-
- show_max(seq, &cfg->max);
-
- seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
- cfg->divs.h_divisor, cfg->divs.p_divisor,
- cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
- seq_printf(seq, "\n");
-
- seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(info);
-
-static int io_show(struct seq_file *seq, void *p)
-{
- void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
- struct s3c_cpufreq_config *cfg;
- struct s3c_iotimings *iot;
- union s3c_iobank *iob;
- int bank;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- show_bank = cfg->info->debug_io_show;
- if (!show_bank) {
- seq_printf(seq, "no code to show bank timing\n");
- return 0;
- }
-
- iot = s3c_cpufreq_getiotimings();
- if (!iot) {
- seq_printf(seq, "no io timings registered\n");
- return 0;
- }
-
- seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
-
- for (bank = 0; bank < MAX_BANKS; bank++) {
- iob = &iot->bank[bank];
-
- seq_printf(seq, "bank %d: ", bank);
-
- if (!iob->io_2410) {
- seq_printf(seq, "nothing set\n");
- continue;
- }
-
- show_bank(seq, cfg, iob);
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(io);
-
-static int __init s3c_freq_debugfs_init(void)
-{
- dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
- if (IS_ERR(dbgfs_root)) {
- pr_err("%s: error creating debugfs root\n", __func__);
- return PTR_ERR(dbgfs_root);
- }
-
- dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
- NULL, &io_fops);
-
- dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
- NULL, &info_fops);
-
- dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
- NULL, &board_fops);
-
- return 0;
-}
-
-late_initcall(s3c_freq_debugfs_init);
-
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
deleted file mode 100644
index 7380c32b238e..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/slab.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-/* note, cpufreq support deals in kHz, no Hz */
-static struct cpufreq_driver s3c24xx_driver;
-static struct s3c_cpufreq_config cpu_cur;
-static struct s3c_iotimings s3c24xx_iotiming;
-static struct cpufreq_frequency_table *pll_reg;
-static unsigned int last_target = ~0;
-static unsigned int ftab_size;
-static struct cpufreq_frequency_table *ftab;
-
-static struct clk *_clk_mpll;
-static struct clk *_clk_xtal;
-static struct clk *clk_fclk;
-static struct clk *clk_hclk;
-static struct clk *clk_pclk;
-static struct clk *clk_arm;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
-struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
-{
- return &cpu_cur;
-}
-
-struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
-{
- return &s3c24xx_iotiming;
-}
-#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
-
-static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
-{
- unsigned long fclk, pclk, hclk, armclk;
-
- cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
- cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
- cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
- cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
-
- cfg->pll.driver_data = s3c24xx_read_mpllcon();
- cfg->pll.frequency = fclk;
-
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-
- cfg->divs.h_divisor = fclk / hclk;
- cfg->divs.p_divisor = fclk / pclk;
-}
-
-static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
-{
- unsigned long pll = cfg->pll.frequency;
-
- cfg->freq.fclk = pll;
- cfg->freq.hclk = pll / cfg->divs.h_divisor;
- cfg->freq.pclk = pll / cfg->divs.p_divisor;
-
- /* convert hclk into 10ths of nanoseconds for io calcs */
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-}
-
-static inline int closer(unsigned int target, unsigned int n, unsigned int c)
-{
- int diff_cur = abs(target - c);
- int diff_new = abs(target - n);
-
- return (diff_new < diff_cur);
-}
-
-static void s3c_cpufreq_show(const char *pfx,
- struct s3c_cpufreq_config *cfg)
-{
- s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
- pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->divs.h_divisor,
- cfg->freq.pclk, cfg->divs.p_divisor);
-}
-
-/* functions to wrapper the driver info calls to do the cpu specific work */
-
-static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->set_iotiming)
- (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
-}
-
-static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->calc_iotiming)
- return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
-
- return 0;
-}
-
-static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_refresh)(cfg);
-}
-
-static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_divs)(cfg);
-}
-
-static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- return (cfg->info->calc_divs)(cfg);
-}
-
-static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
-{
- cfg->mpll = _clk_mpll;
- (cfg->info->set_fvco)(cfg);
-}
-
-static inline void s3c_cpufreq_updateclk(struct clk *clk,
- unsigned int freq)
-{
- clk_set_rate(clk, freq);
-}
-
-static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
- unsigned int target_freq,
- struct cpufreq_frequency_table *pll)
-{
- struct s3c_cpufreq_freqs freqs;
- struct s3c_cpufreq_config cpu_new;
- unsigned long flags;
-
- cpu_new = cpu_cur; /* copy new from current */
-
- s3c_cpufreq_show("cur", &cpu_cur);
-
- /* TODO - check for DMA currently outstanding */
-
- cpu_new.pll = pll ? *pll : cpu_cur.pll;
-
- if (pll)
- freqs.pll_changing = 1;
-
- /* update our frequencies */
-
- cpu_new.freq.armclk = target_freq;
- cpu_new.freq.fclk = cpu_new.pll.frequency;
-
- if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
- pr_err("no divisors for %d\n", target_freq);
- goto err_notpossible;
- }
-
- s3c_freq_dbg("%s: got divs\n", __func__);
-
- s3c_cpufreq_calc(&cpu_new);
-
- s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
-
- if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
- if (s3c_cpufreq_calcio(&cpu_new) < 0) {
- pr_err("%s: no IO timings\n", __func__);
- goto err_notpossible;
- }
- }
-
- s3c_cpufreq_show("new", &cpu_new);
-
- /* setup our cpufreq parameters */
-
- freqs.old = cpu_cur.freq;
- freqs.new = cpu_new.freq;
-
- freqs.freqs.old = cpu_cur.freq.armclk / 1000;
- freqs.freqs.new = cpu_new.freq.armclk / 1000;
-
- /* update f/h/p clock settings before we issue the change
- * notification, so that drivers do not need to do anything
- * special if they want to recalculate on CPUFREQ_PRECHANGE. */
-
- s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
- s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
- s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
- s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
-
- /* start the frequency change */
- cpufreq_freq_transition_begin(policy, &freqs.freqs);
-
- /* If hclk is staying the same, then we do not need to
- * re-write the IO or the refresh timings whilst we are changing
- * speed. */
-
- local_irq_save(flags);
-
- /* is our memory clock slowing down? */
- if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
- /* not changing PLL, just set the divisors */
-
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
- /* slow the cpu down, then set divisors */
-
- s3c_cpufreq_setfvco(&cpu_new);
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- /* set the divisors, then speed up */
-
- s3c_cpufreq_setdivs(&cpu_new);
- s3c_cpufreq_setfvco(&cpu_new);
- }
- }
-
- /* did our memory clock speed up */
- if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- /* update our current settings */
- cpu_cur = cpu_new;
-
- local_irq_restore(flags);
-
- /* notify everyone we've done this */
- cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
-
- s3c_freq_dbg("%s: finished\n", __func__);
- return 0;
-
- err_notpossible:
- pr_err("no compatible settings for %d\n", target_freq);
- return -EINVAL;
-}
-
-/* s3c_cpufreq_target
- *
- * called by the cpufreq core to adjust the frequency that the CPU
- * is currently running at.
- */
-
-static int s3c_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_frequency_table *pll;
- unsigned int index;
-
- /* avoid repeated calls which cause a needless amout of duplicated
- * logging output (and CPU time as the calculation process is
- * done) */
- if (target_freq == last_target)
- return 0;
-
- last_target = target_freq;
-
- s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
- __func__, policy, target_freq, relation);
-
- if (ftab) {
- index = cpufreq_frequency_table_target(policy, target_freq,
- relation);
-
- s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
- target_freq, index, ftab[index].frequency);
- target_freq = ftab[index].frequency;
- }
-
- target_freq *= 1000; /* convert target to Hz */
-
- /* find the settings for our new frequency */
-
- if (!pll_reg || cpu_cur.lock_pll) {
- /* either we've not got any PLL values, or we've locked
- * to the current one. */
- pll = NULL;
- } else {
- struct cpufreq_policy tmp_policy;
-
- /* we keep the cpu pll table in Hz, to ensure we get an
- * accurate value for the PLL output. */
-
- tmp_policy.min = policy->min * 1000;
- tmp_policy.max = policy->max * 1000;
- tmp_policy.cpu = policy->cpu;
- tmp_policy.freq_table = pll_reg;
-
- /* cpufreq_frequency_table_target returns the index
- * of the table entry, not the value of
- * the table entry's index field. */
-
- index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
- relation);
- pll = pll_reg + index;
-
- s3c_freq_dbg("%s: target %u => %u\n",
- __func__, target_freq, pll->frequency);
-
- target_freq = pll->frequency;
- }
-
- return s3c_cpufreq_settarget(policy, target_freq, pll);
-}
-
-struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
-{
- struct clk *clk;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk))
- pr_err("failed to get clock '%s'\n", name);
-
- return clk;
-}
-
-static int s3c_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = clk_arm;
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
- policy->freq_table = ftab;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initclks(void)
-{
- _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
- _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
- clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
- IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
- pr_err("%s: could not get clock(s)\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
- __func__,
- clk_get_rate(clk_fclk) / 1000,
- clk_get_rate(clk_hclk) / 1000,
- clk_get_rate(clk_pclk) / 1000,
- clk_get_rate(clk_arm) / 1000);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static struct cpufreq_frequency_table suspend_pll;
-static unsigned int suspend_freq;
-
-static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- suspend_pll.frequency = clk_get_rate(_clk_mpll);
- suspend_pll.driver_data = s3c24xx_read_mpllcon();
- suspend_freq = clk_get_rate(clk_arm);
-
- return 0;
-}
-
-static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
-
- last_target = ~0; /* invalidate last_target setting */
-
- /* whilst we will be called later on, we try and re-set the
- * cpu frequencies as soon as possible so that we do not end
- * up resuming devices and then immediately having to re-set
- * a number of settings once these devices have restarted.
- *
- * as a note, it is expected devices are not used until they
- * have been un-suspended and at that time they should have
- * used the updated clock settings.
- */
-
- ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
- if (ret) {
- pr_err("%s: failed to reset pll/freq\n", __func__);
- return ret;
- }
-
- return 0;
-}
-#else
-#define s3c_cpufreq_resume NULL
-#define s3c_cpufreq_suspend NULL
-#endif
-
-static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .target = s3c_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = s3c_cpufreq_init,
- .suspend = s3c_cpufreq_suspend,
- .resume = s3c_cpufreq_resume,
- .name = "s3c24xx",
-};
-
-
-int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
-{
- if (!info || !info->name) {
- pr_err("%s: failed to pass valid information\n", __func__);
- return -EINVAL;
- }
-
- pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
- info->name);
-
- /* check our driver info has valid data */
-
- BUG_ON(info->set_refresh == NULL);
- BUG_ON(info->set_divs == NULL);
- BUG_ON(info->calc_divs == NULL);
-
- /* info->set_fvco is optional, depending on whether there
- * is a need to set the clock code. */
-
- cpu_cur.info = info;
-
- /* Note, driver registering should probably update locktime */
-
- return 0;
-}
-
-int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
-{
- struct s3c_cpufreq_board *ours;
-
- if (!board) {
- pr_info("%s: no board data\n", __func__);
- return -EINVAL;
- }
-
- /* Copy the board information so that each board can make this
- * initdata. */
-
- ours = kzalloc(sizeof(*ours), GFP_KERNEL);
- if (!ours)
- return -ENOMEM;
-
- *ours = *board;
- cpu_cur.board = ours;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_auto_io(void)
-{
- int ret;
-
- if (!cpu_cur.info->get_iotiming) {
- pr_err("%s: get_iotiming undefined\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: working out IO settings\n", __func__);
-
- ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
- if (ret)
- pr_err("%s: failed to get timings\n", __func__);
-
- return ret;
-}
-
-/* if one or is zero, then return the other, otherwise return the min */
-#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
-
-/**
- * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
- * @dst: The destination structure
- * @a: One argument.
- * @b: The other argument.
- *
- * Create a minimum of each frequency entry in the 'struct s3c_freq',
- * unless the entry is zero when it is ignored and the non-zero argument
- * used.
- */
-static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
- struct s3c_freq *a, struct s3c_freq *b)
-{
- dst->fclk = do_min(a->fclk, b->fclk);
- dst->hclk = do_min(a->hclk, b->hclk);
- dst->pclk = do_min(a->pclk, b->pclk);
- dst->armclk = do_min(a->armclk, b->armclk);
-}
-
-static inline u32 calc_locktime(u32 freq, u32 time_us)
-{
- u32 result;
-
- result = freq * time_us;
- result = DIV_ROUND_UP(result, 1000 * 1000);
-
- return result;
-}
-
-static void s3c_cpufreq_update_loctkime(void)
-{
- unsigned int bits = cpu_cur.info->locktime_bits;
- u32 rate = (u32)clk_get_rate(_clk_xtal);
- u32 val;
-
- if (bits == 0) {
- WARN_ON(1);
- return;
- }
-
- val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
- val |= calc_locktime(rate, cpu_cur.info->locktime_m);
-
- pr_info("%s: new locktime is 0x%08x\n", __func__, val);
- s3c24xx_write_locktime(val);
-}
-
-static int s3c_cpufreq_build_freq(void)
-{
- int size, ret;
-
- kfree(ftab);
-
- size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
- size++;
-
- ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
- if (!ftab)
- return -ENOMEM;
-
- ftab_size = size;
-
- ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
- s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initcall(void)
-{
- int ret = 0;
-
- if (cpu_cur.info && cpu_cur.board) {
- ret = s3c_cpufreq_initclks();
- if (ret)
- goto out;
-
- /* get current settings */
- s3c_cpufreq_getcur(&cpu_cur);
- s3c_cpufreq_show("cur", &cpu_cur);
-
- if (cpu_cur.board->auto_io) {
- ret = s3c_cpufreq_auto_io();
- if (ret) {
- pr_err("%s: failed to get io timing\n",
- __func__);
- goto out;
- }
- }
-
- if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
- pr_err("%s: no IO support registered\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- if (!cpu_cur.info->need_pll)
- cpu_cur.lock_pll = 1;
-
- s3c_cpufreq_update_loctkime();
-
- s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
- &cpu_cur.info->max);
-
- if (cpu_cur.info->calc_freqtable)
- s3c_cpufreq_build_freq();
-
- ret = cpufreq_register_driver(&s3c24xx_driver);
- }
-
- out:
- return ret;
-}
-
-late_initcall(s3c_cpufreq_initcall);
-
-/**
- * s3c_plltab_register - register CPU PLL table.
- * @plls: The list of PLL entries.
- * @plls_no: The size of the PLL entries @plls.
- *
- * Register the given set of PLLs with the system.
- */
-int s3c_plltab_register(struct cpufreq_frequency_table *plls,
- unsigned int plls_no)
-{
- struct cpufreq_frequency_table *vals;
- unsigned int size;
-
- size = sizeof(*vals) * (plls_no + 1);
-
- vals = kzalloc(size, GFP_KERNEL);
- if (vals) {
- memcpy(vals, plls, size);
- pll_reg = vals;
-
- /* write a terminating entry, we don't store it in the
- * table that is stored in the kernel */
- vals += plls_no;
- vals->frequency = CPUFREQ_TABLE_END;
-
- pr_info("%d PLL entries\n", plls_no);
- } else
- pr_err("no memory for PLL tables\n");
-
- return vals ? 0 : -ENOMEM;
-}
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
deleted file mode 100644
index 252b9fc26124..000000000000
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpu-sa1100.c: clock scaling for the SA1100
- *
- * Copyright (C) 2000 2001, The Delft University of Technology
- *
- * Authors:
- * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
- * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
- * - major rewrite for linux-2.3.99
- * - rewritten for the more generic power management scheme in
- * linux-2.4.5-rmk1
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl/), which is
- * sponsored by the Mobile Multi-media Communications
- * (http://www.mobimedia.org/) and Ubiquitous Communications
- * (http://www.ubicom.tudelft.nl/) projects.
- *
- * The authors can be reached at:
- *
- * Erik Mouw
- * Information and Communication Theory Group
- * Faculty of Information Technology and Systems
- * Delft University of Technology
- * P.O. Box 5031
- * 2600 GA Delft
- * The Netherlands
- *
- * Theory of operations
- * ====================
- *
- * Clock scaling can be used to lower the power consumption of the CPU
- * core. This will give you a somewhat longer running time.
- *
- * The SA-1100 has a single register to change the core clock speed:
- *
- * PPCR 0x90020014 PLL config
- *
- * However, the DRAM timings are closely related to the core clock
- * speed, so we need to change these, too. The used registers are:
- *
- * MDCNFG 0xA0000000 DRAM config
- * MDCAS0 0xA0000004 Access waveform
- * MDCAS1 0xA0000008 Access waveform
- * MDCAS2 0xA000000C Access waveform
- *
- * Care must be taken to change the DRAM parameters the correct way,
- * because otherwise the DRAM becomes unusable and the kernel will
- * crash.
- *
- * The simple solution to avoid a kernel crash is to put the actual
- * clock change in ROM and jump to that code from the kernel. The main
- * disadvantage is that the ROM has to be modified, which is not
- * possible on all SA-1100 platforms. Another disadvantage is that
- * jumping to ROM makes clock switching unnecessary complicated.
- *
- * The idea behind this driver is that the memory configuration can be
- * changed while running from DRAM (even with interrupts turned on!)
- * as long as all re-configuration steps yield a valid DRAM
- * configuration. The advantages are clear: it will run on all SA-1100
- * platforms, and the code is very simple.
- *
- * If you really want to understand what is going on in
- * sa1100_update_dram_timings(), you'll have to read sections 8.2,
- * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
- * Developers Manual" (available for free from Intel).
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-
-#include <asm/cputype.h>
-
-#include <mach/generic.h>
-#include <mach/hardware.h>
-
-struct sa1100_dram_regs {
- int speed;
- u32 mdcnfg;
- u32 mdcas0;
- u32 mdcas1;
- u32 mdcas2;
-};
-
-
-static struct cpufreq_driver sa1100_driver;
-
-static struct sa1100_dram_regs sa1100_dram_settings[] = {
- /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
- { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
- { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
- { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
- {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
- {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
- {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
- {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
- {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
- {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
- {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
- {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
- {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
- {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
- {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
- {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
- {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
- { 0, 0, 0, 0, 0 } /* last entry */
-};
-
-static void sa1100_update_dram_timings(int current_speed, int new_speed)
-{
- struct sa1100_dram_regs *settings = sa1100_dram_settings;
-
- /* find speed */
- while (settings->speed != 0) {
- if (new_speed == settings->speed)
- break;
-
- settings++;
- }
-
- if (settings->speed == 0) {
- panic("%s: couldn't find dram setting for speed %d\n",
- __func__, new_speed);
- }
-
- /* No risk, no fun: run with interrupts on! */
- if (new_speed > current_speed) {
- /* We're going FASTER, so first relax the memory
- * timings before changing the core frequency
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS2 = settings->mdcas2;
- MDCAS1 = settings->mdcas1;
- MDCAS0 = settings->mdcas0;
- MDCNFG = settings->mdcnfg;
- } else {
- /* We're going SLOWER: first decrease the core
- * frequency and then tighten the memory settings.
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS0 = settings->mdcas0;
- MDCAS1 = settings->mdcas1;
- MDCAS2 = settings->mdcas2;
- MDCNFG = settings->mdcnfg;
- }
-}
-
-static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
-{
- unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_freq;
-
- new_freq = sa11x0_freq_table[ppcr].frequency;
-
- if (new_freq > cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- PPCR = ppcr;
-
- if (new_freq < cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- return 0;
-}
-
-static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, sa11x0_freq_table, 0);
- return 0;
-}
-
-static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = sa1100_target,
- .get = sa11x0_getspeed,
- .init = sa1100_cpu_init,
- .name = "sa1100",
-};
-
-static int __init sa1100_dram_init(void)
-{
- if (cpu_is_sa1100())
- return cpufreq_register_driver(&sa1100_driver);
- else
- return -ENODEV;
-}
-
-arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 1a83c8678a63..bb7f591a8b05 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -344,14 +344,8 @@ static int __init sa1110_clk_init(void)
if (!name[0]) {
if (machine_is_assabet())
name = "TC59SM716-CL3";
- if (machine_is_pt_system3())
- name = "K4S641632D";
- if (machine_is_h3100())
- name = "KM416S4030CT";
if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
- if (machine_is_nanoengine())
- name = "MT48LC8M16A2TG-75";
}
sdram = sa1110_find_sdram(name);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 513a071845c2..f34e6382a4c5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -310,7 +310,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
- if (of_find_property(dev->of_node, "#clock-cells", NULL))
+ if (of_property_present(dev->of_node, "#clock-cells"))
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
#endif
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index c6fdf019dbde..78b875db6b66 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 1583a370da39..4321d7bbe769 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,9 +10,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 7a1ea6fdcab6..aae951d4e77c 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
@@ -221,4 +220,3 @@ module_init(tegra_cpufreq_init);
MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 4596c3e323aa..c8d03346068a 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <asm/smp_plat.h>
@@ -65,12 +66,36 @@ struct tegra_cpufreq_soc {
struct tegra194_cpufreq_data {
void __iomem *regs;
- struct cpufreq_frequency_table **tables;
+ struct cpufreq_frequency_table **bpmp_luts;
const struct tegra_cpufreq_soc *soc;
+ bool icc_dram_bw_scaling;
};
static struct workqueue_struct *read_counters_wq;
+static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ struct dev_pm_opp *opp;
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq_khz * KHZ, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret)
+ data->icc_dram_bw_scaling = false;
+
+ dev_pm_opp_put(opp);
+ return ret;
+}
+
static void tegra_get_cpu_mpidr(void *mpidr)
{
*((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
@@ -354,7 +379,7 @@ static unsigned int tegra194_get_speed(u32 cpu)
* to the last written ndiv value from freq_table. This is
* done to return consistent value.
*/
- cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
+ cpufreq_for_each_valid_entry(pos, data->bpmp_luts[clusterid]) {
if (pos->driver_data != ndiv)
continue;
@@ -369,16 +394,93 @@ static unsigned int tegra194_get_speed(u32 cpu)
return rate;
}
+static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *bpmp_lut,
+ struct cpufreq_frequency_table **opp_table)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *freq_table = NULL;
+ struct cpufreq_frequency_table *pos;
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+ int ret, max_opps;
+ int j = 0;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
+ return -ENODEV;
+ }
+
+ /* Initialize OPP table mentioned in operating-points-v2 property in DT */
+ ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
+ if (!ret) {
+ max_opps = dev_pm_opp_get_opp_count(cpu_dev);
+ if (max_opps <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ return max_opps;
+ }
+
+ /* Disable all opps and cross-validate against LUT later */
+ for (rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+ } else {
+ dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
+ data->icc_dram_bw_scaling = false;
+ return ret;
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return -ENOMEM;
+
+ /*
+ * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
+ * Enable only those DT OPP's which are present in LUT also.
+ */
+ cpufreq_for_each_valid_entry(pos, bpmp_lut) {
+ opp = dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * KHZ, false);
+ if (IS_ERR(opp))
+ continue;
+
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
+ if (ret < 0)
+ return ret;
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = pos->frequency;
+ j++;
+ }
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = CPUFREQ_TABLE_END;
+
+ *opp_table = &freq_table[0];
+
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ return ret;
+}
+
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 start_cpu, cpu;
u32 clusterid;
+ int ret;
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
-
- if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
+ if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
return -EINVAL;
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
@@ -387,9 +489,22 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
if (cpu_possible(cpu))
cpumask_set_cpu(cpu, policy->cpus);
}
- policy->freq_table = data->tables[clusterid];
policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
+ bpmp_lut = data->bpmp_luts[clusterid];
+
+ if (data->icc_dram_bw_scaling) {
+ ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
+ if (!ret) {
+ policy->freq_table = freq_table;
+ return 0;
+ }
+ }
+
+ data->icc_dram_bw_scaling = false;
+ policy->freq_table = bpmp_lut;
+ pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
+
return 0;
}
@@ -406,12 +521,16 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
*/
data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
+ if (data->icc_dram_bw_scaling)
+ tegra_cpufreq_set_bw(policy, tbl->frequency);
+
return 0;
}
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
@@ -438,8 +557,8 @@ static void tegra194_cpufreq_free_resources(void)
}
static struct cpufreq_frequency_table *
-init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
- unsigned int cluster_id)
+tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpmp,
+ unsigned int cluster_id)
{
struct cpufreq_frequency_table *freq_table;
struct mrq_cpu_ndiv_limits_response resp;
@@ -514,6 +633,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
const struct tegra_cpufreq_soc *soc;
struct tegra194_cpufreq_data *data;
struct tegra_bpmp *bpmp;
+ struct device *cpu_dev;
int err, i;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -529,9 +649,9 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
return -EINVAL;
}
- data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
- sizeof(*data->tables), GFP_KERNEL);
- if (!data->tables)
+ data->bpmp_luts = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
+ sizeof(*data->bpmp_luts), GFP_KERNEL);
+ if (!data->bpmp_luts)
return -ENOMEM;
if (soc->actmon_cntr_base) {
@@ -555,15 +675,26 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
}
for (i = 0; i < data->soc->num_clusters; i++) {
- data->tables[i] = init_freq_table(pdev, bpmp, i);
- if (IS_ERR(data->tables[i])) {
- err = PTR_ERR(data->tables[i]);
+ data->bpmp_luts[i] = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, i);
+ if (IS_ERR(data->bpmp_luts[i])) {
+ err = PTR_ERR(data->bpmp_luts[i]);
goto err_free_res;
}
}
tegra194_cpufreq_driver.driver_data = data;
+ /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -EPROBE_DEFER;
+
+ if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
+ err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (!err)
+ data->icc_dram_bw_scaling = true;
+ }
+
err = cpufreq_register_driver(&tegra194_cpufreq_driver);
if (!err)
goto put_bpmp;
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index ab7ac7df9e62..a573186704a5 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/types.h>
@@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0);
bool ret = false;
- if (of_get_property(np, "operating-points-v2", NULL))
+ if (of_property_present(np, "operating-points-v2"))
ret = true;
of_node_put(np);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index ff71dd662880..cac5997dca50 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -74,6 +74,7 @@ endmenu
config HALTPOLL_CPUIDLE
tristate "Halt poll cpuidle driver"
depends on X86 && KVM_GUEST
+ select CPU_IDLE_GOV_HALTPOLL
default y
help
This option enables halt poll cpuidle driver, which allows to poll
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 747aa537389b..a1ee475d180d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -24,6 +24,14 @@ config ARM_PSCI_CPUIDLE
It provides an idle driver that is capable of detecting and
managing idle states through the PSCI firmware interface.
+ The driver has limitations when used with PREEMPT_RT:
+ - If the idle states are described with the non-hierarchical layout,
+ all idle states are still available.
+
+ - If the idle states are described with the hierarchical layout,
+ only the idle states defined per CPU are available, but not the ones
+ being shared among a group of CPUs (aka cluster idle states).
+
config ARM_PSCI_CPUIDLE_DOMAIN
bool "PSCI CPU idle Domain"
depends on ARM_PSCI_CPUIDLE
@@ -102,6 +110,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -110,6 +119,7 @@ config ARM_TEGRA_CPUIDLE
config ARM_QCOM_SPM_CPUIDLE
bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 8c758920d699..7cfb980a357d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -31,8 +31,8 @@
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int arm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int arm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
/*
* Pass idle state index to arm_cpuidle_suspend which in turn
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index abe51185f243..74972deda0ea 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -64,7 +64,8 @@ static struct cpuidle_driver bl_idle_little_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 700,
.target_residency = 2500,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM little-cluster power down",
},
@@ -85,7 +86,8 @@ static struct cpuidle_driver bl_idle_big_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 500,
.target_residency = 2000,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM big-cluster power down",
},
@@ -120,15 +122,17 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int bl_enter_powerdown(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
cpu_pm_enter();
+ ct_cpuidle_enter();
cpu_suspend(0, bl_powerdown_finisher);
/* signals the MCPM core that CPU is out of low power state */
mcpm_cpu_powered_up();
+ ct_cpuidle_exit();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index 3a39a7f48b77..e66df22f9695 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -32,7 +32,7 @@ static int default_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
return index;
}
- default_idle();
+ arch_cpu_idle();
return index;
}
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 01a856971f05..563dba609b98 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -25,9 +25,9 @@
static int (*mvebu_v7_cpu_suspend)(int);
-static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int ret;
bool deepidle = false;
@@ -36,7 +36,10 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
deepidle = true;
+ ct_cpuidle_enter();
ret = mvebu_v7_cpu_suspend(deepidle);
+ ct_cpuidle_exit();
+
cpu_pm_exit();
if (ret)
@@ -53,6 +56,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 100,
.power_usage = 50,
.target_residency = 1000,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU IDLE",
.desc = "CPU power down",
},
@@ -61,7 +65,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 1000,
.power_usage = 5,
.target_residency = 10000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU DEEP IDLE",
.desc = "CPU and L2 Fabric power down",
},
@@ -76,7 +80,7 @@ static struct cpuidle_driver armada370_idle_driver = {
.exit_latency = 100,
.power_usage = 5,
.target_residency = 1000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "Deep Idle",
.desc = "CPU and L2 Fabric power down",
},
@@ -91,6 +95,7 @@ static struct cpuidle_driver armada38x_idle_driver = {
.exit_latency = 10,
.power_usage = 5,
.target_residency = 100,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "Idle",
.desc = "CPU and SCU power down",
},
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index c80cf9ddabd8..c2d6d9c3c930 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -64,8 +64,11 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
- /* Allow power off when OSI has been successfully enabled. */
- if (use_osi)
+ /*
+ * Allow power off when OSI has been successfully enabled.
+ * PREEMPT_RT is not yet ready to enter domain idle states.
+ */
+ if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT))
pd->power_off = psci_pd_power_off;
else
pd->flags |= GENPD_FLAG_ALWAYS_ON;
@@ -103,7 +106,8 @@ static void psci_pd_remove(void)
struct psci_pd_provider *pd_provider, *it;
struct generic_pm_domain *genpd;
- list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+ list_for_each_entry_safe_reverse(pd_provider, it,
+ &psci_pd_providers, link) {
of_genpd_del_provider(pd_provider->node);
genpd = of_genpd_remove_last(pd_provider->node);
@@ -162,7 +166,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
* initialize a genpd/genpd-of-provider pair when it's found.
*/
for_each_child_of_node(np, node) {
- if (!of_find_property(node, "#power-domain-cells", NULL))
+ if (!of_property_present(node, "#power-domain-cells"))
continue;
ret = psci_pd_init(node, use_osi);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 57bc3e3ae391..bf68920d038a 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/psci.h>
#include <linux/pm_domain.h>
@@ -49,14 +48,9 @@ static inline u32 psci_get_domain_state(void)
return __this_cpu_read(domain_state);
}
-static inline int psci_enter_state(int idx, u32 state)
-{
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
-}
-
-static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
@@ -69,12 +63,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
state = psci_get_domain_state();
if (!state)
@@ -82,12 +74,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -192,12 +182,12 @@ static void psci_idle_init_cpuhp(void)
pr_warn("Failed %d while setup cpuhp state\n", err);
}
-static int psci_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return psci_enter_state(idx, state[idx]);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
}
static const struct of_device_id psci_idle_state_match[] = {
@@ -231,6 +221,9 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (!psci_has_osi_support())
return 0;
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return 0;
+
data->dev = psci_dt_attach_cpu(cpu);
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
@@ -240,6 +233,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 1bad4d2b7be3..a7d33f3ee01e 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -33,16 +33,16 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
static u64 snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly;
-static int snooze_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle
+int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int index)
{
u64 snooze_exit_time;
set_thread_flag(TIF_POLLING_NRFLAG);
pseries_idle_prolog();
- local_irq_enable();
+ raw_local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
dev->poll_time_limit = false;
@@ -65,14 +65,14 @@ static int snooze_loop(struct cpuidle_device *dev,
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
- local_irq_disable();
+ raw_local_irq_disable();
pseries_idle_epilog();
return index;
}
-static void check_and_cede_processor(void)
+static __cpuidle void check_and_cede_processor(void)
{
/*
* Ensure our interrupt state is properly tracked,
@@ -216,9 +216,9 @@ static int __init parse_cede_parameters(void)
#define NR_DEDICATED_STATES 2 /* snooze, CEDE */
static u8 cede_latency_hint[NR_DEDICATED_STATES];
-static int dedicated_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle
+int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int index)
{
u8 old_latency_hint;
@@ -230,7 +230,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
HMT_medium();
check_and_cede_processor();
- local_irq_disable();
+ raw_local_irq_disable();
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->cede_latency_hint = old_latency_hint;
@@ -239,9 +239,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
return index;
}
-static int shared_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle
+int shared_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int index)
{
pseries_idle_prolog();
@@ -255,7 +255,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
*/
check_and_cede_processor();
- local_irq_disable();
+ raw_local_irq_disable();
pseries_idle_epilog();
return index;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index beedf22cbe78..1fc9968eae19 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -11,13 +11,12 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <soc/qcom/spm.h>
#include <asm/proc-fns.h>
@@ -58,8 +57,8 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
return ret;
}
-static int spm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..e8094fc92491 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+#include <linux/cpuhotplug.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
@@ -15,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -93,8 +93,8 @@ static int sbi_suspend(u32 state)
return sbi_suspend_finisher(state, 0, 0);
}
-static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
u32 state = states[idx];
@@ -106,9 +106,9 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
idx, state);
}
-static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
u32 *states = data->states;
@@ -121,12 +121,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
+
+ ct_cpuidle_enter();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -135,12 +135,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- ct_irq_enter_irqson();
+ ct_cpuidle_exit();
+
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -251,6 +251,7 @@ static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle =
sbi_enter_s2idle_domain_idle_state;
@@ -496,7 +497,7 @@ static int sbi_genpd_probe(struct device_node *np)
* initialize a genpd/genpd-of-provider pair when it's found.
*/
for_each_child_of_node(np, node) {
- if (!of_find_property(node, "#power-domain-cells", NULL))
+ if (!of_property_present(node, "#power-domain-cells"))
continue;
ret = sbi_pd_init(node);
@@ -547,8 +548,8 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
for_each_possible_cpu(cpu) {
np = of_cpu_device_node_get(cpu);
if (np &&
- of_find_property(np, "power-domains", NULL) &&
- of_find_property(np, "power-domain-names", NULL)) {
+ of_property_present(np, "power-domains") &&
+ of_property_present(np, "power-domain-names")) {
continue;
} else {
sbi_cpuidle_use_osi = false;
@@ -612,7 +613,7 @@ static int __init sbi_cpuidle_init(void)
* 2) SBI HSM extension is available
*/
if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
- sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+ !sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 9845629aeb6d..b203a93deac5 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -160,8 +160,8 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
return 0;
}
-static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
- int index, unsigned int cpu)
+static __cpuidle int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
{
int err;
@@ -180,9 +180,11 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
local_fiq_disable();
- RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
+ tegra_pm_set_cpu_in_lp2();
cpu_pm_enter();
+ ct_cpuidle_enter();
+
switch (index) {
case TEGRA_C7:
err = tegra_cpuidle_c7_enter();
@@ -197,8 +199,10 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
break;
}
+ ct_cpuidle_exit();
+
cpu_pm_exit();
- RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
+ tegra_pm_clear_cpu_in_lp2();
local_fiq_enable();
return err ?: index;
@@ -222,10 +226,11 @@ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
return index;
}
-static int tegra_cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
+ bool do_rcu = drv->states[index].flags & CPUIDLE_FLAG_RCU_IDLE;
unsigned int cpu = cpu_logical_map(dev->cpu);
int ret;
@@ -233,9 +238,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
if (dev->states_usage[index].disable)
return -1;
- if (index == TEGRA_C1)
+ if (index == TEGRA_C1) {
+ if (do_rcu)
+ ct_cpuidle_enter();
ret = arm_cpuidle_simple_enter(dev, drv, index);
- else
+ if (do_rcu)
+ ct_cpuidle_exit();
+ } else
ret = tegra_cpuidle_state_enter(dev, index, cpu);
if (ret < 0) {
@@ -285,7 +294,8 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 2000,
.target_residency = 2200,
.power_usage = 100,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C7",
.desc = "CPU core powered off",
},
@@ -295,6 +305,7 @@ static struct cpuidle_driver tegra_idle_driver = {
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE |
CPUIDLE_FLAG_COUPLED,
.name = "CC6",
.desc = "CPU cluster powered off",
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6eceb1988243..8e929f6602ce 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/idle.h>
#include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
@@ -136,11 +137,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_s2idle_proper(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, int index)
+static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
{
- ktime_t time_start, time_end;
struct cpuidle_state *target_state = &drv->states[index];
+ ktime_t time_start, time_end;
+
+ instrumentation_begin();
time_start = ns_to_ktime(local_clock());
@@ -151,13 +154,18 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+ raw_local_irq_disable();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
tick_unfreeze();
start_critical_timings();
@@ -165,6 +173,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
+ instrumentation_end();
}
/**
@@ -199,8 +208,9 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter
*/
-int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
- int index)
+noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int entered_state;
@@ -208,6 +218,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
+ instrumentation_begin();
+
/*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
@@ -234,11 +246,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
+
+ /*
+ * NOTE!!
+ *
+ * For cpuidle_state::enter() methods that do *NOT* set
+ * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions
+ * must be marked either noinstr or __cpuidle.
+ *
+ * For cpuidle_state::enter() methods that *DO* set
+ * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the
+ * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all
+ * functions called within the RCU-idle region.
+ */
entered_state = target_state->enter(dev, drv, index);
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+
+ if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
+ raw_local_irq_disable();
+
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
start_critical_timings();
sched_clock_idle_wakeup_event();
@@ -248,12 +282,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
- if (broadcast) {
- if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
-
+ if (broadcast)
tick_broadcast_exit();
- }
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
@@ -305,6 +335,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->states_usage[index].rejected++;
}
+ instrumentation_end();
+
return entered_state;
}
@@ -394,7 +426,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* @dev: the cpuidle device
*
*/
-u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+__cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
@@ -776,7 +808,7 @@ static int __init cpuidle_init(void)
if (cpuidle_disabled())
return -ENODEV;
- return cpuidle_add_interface(cpu_subsys.dev_root);
+ return cpuidle_add_interface();
}
module_param(off, int, 0444);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 9f336af17fa6..52701d9588f1 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -30,7 +30,7 @@ extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
struct device;
-extern int cpuidle_add_interface(struct device *dev);
+extern int cpuidle_add_interface(void);
extern void cpuidle_remove_interface(struct device *dev);
extern int cpuidle_add_device_sysfs(struct cpuidle_device *device);
extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index f70aa17e2a8e..d9cda7f6ccb9 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -183,11 +183,15 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
else if (s->target_residency_ns < 0)
s->target_residency_ns = 0;
+ else
+ s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
if (s->exit_latency > 0)
s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
+ else
+ s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
}
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 7ca3d7d9b5ea..12fec92a85fd 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include "dt_idle_states.h"
@@ -77,7 +76,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
if (err)
desc = state_node->name;
- idle_state->flags = 0;
+ idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
if (of_property_read_bool(state_node, "local-timer-stop"))
idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
/*
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index d9262db79cae..987fc5f3997d 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,8 +2,13 @@
/*
* Timer events oriented CPU idle governor
*
+ * TEO governor:
* Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * Util-awareness mechanism:
+ * Copyright (C) 2022 Arm Ltd.
+ * Author: Kajetan Puchalski <kajetan.puchalski@arm.com>
*/
/**
@@ -99,15 +104,56 @@
* select the given idle state instead of the candidate one.
*
* 3. By default, select the candidate state.
+ *
+ * Util-awareness mechanism:
+ *
+ * The idea behind the util-awareness extension is that there are two distinct
+ * scenarios for the CPU which should result in two different approaches to idle
+ * state selection - utilized and not utilized.
+ *
+ * In this case, 'utilized' means that the average runqueue util of the CPU is
+ * above a certain threshold.
+ *
+ * When the CPU is utilized while going into idle, more likely than not it will
+ * be woken up to do more work soon and so a shallower idle state should be
+ * selected to minimise latency and maximise performance. When the CPU is not
+ * being utilized, the usual metrics-based approach to selecting the deepest
+ * available idle state should be preferred to take advantage of the power
+ * saving.
+ *
+ * In order to achieve this, the governor uses a utilization threshold.
+ * The threshold is computed per-CPU as a percentage of the CPU's capacity
+ * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%)
+ * seems to be getting the best results.
+ *
+ * Before selecting the next idle state, the governor compares the current CPU
+ * util to the precomputed util threshold. If it's below, it defaults to the
+ * TEO metrics mechanism. If it's above, the closest shallower idle state will
+ * be selected instead, as long as is not a polling state.
*/
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/topology.h>
#include <linux/tick.h>
/*
+ * The number of bits to shift the CPU's capacity by in order to determine
+ * the utilized threshold.
+ *
+ * 6 was chosen based on testing as the number that achieved the best balance
+ * of power and performance on average.
+ *
+ * The resulting threshold is high enough to not be triggered by background
+ * noise and low enough to react quickly when activity starts to ramp up.
+ */
+#define UTIL_THRESHOLD_SHIFT 6
+
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -137,9 +183,11 @@ struct teo_bin {
* @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
- * @total: Grand total of the "intercepts" and "hits" mertics for all bins.
+ * @total: Grand total of the "intercepts" and "hits" metrics for all bins.
* @next_recent_idx: Index of the next @recent_idx entry to update.
* @recent_idx: Indices of bins corresponding to recent "intercepts".
+ * @util_threshold: Threshold above which the CPU is considered utilized
+ * @utilized: Whether the last sleep on the CPU happened while utilized
*/
struct teo_cpu {
s64 time_span_ns;
@@ -148,11 +196,30 @@ struct teo_cpu {
unsigned int total;
int next_recent_idx;
int recent_idx[NR_RECENT];
+ unsigned long util_threshold;
+ bool utilized;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
+ * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
+ * @cpu: Target CPU
+ * @cpu_data: Governor CPU data for the target CPU
+ */
+#ifdef CONFIG_SMP
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return sched_cpu_util(cpu) > cpu_data->util_threshold;
+}
+#else
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return false;
+}
+#endif
+
+/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
@@ -258,15 +325,17 @@ static s64 teo_middle_of_bin(int idx, struct cpuidle_driver *drv)
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
* @duration_ns: Idle duration value to match.
+ * @no_poll: Don't consider polling states.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- s64 duration_ns)
+ s64 duration_ns, bool no_poll)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable ||
+ (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
continue;
state_idx = i;
@@ -321,6 +390,22 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
+ cpu_data->utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
+ /*
+ * If the CPU is being utilized over the threshold and there are only 2
+ * states to choose from, the metrics need not be considered, so choose
+ * the shallowest non-polling state and exit.
+ */
+ if (drv->state_count < 3 && cpu_data->utilized) {
+ for (i = 0; i < drv->state_count; ++i) {
+ if (!dev->states_usage[i].disable &&
+ !(drv->states[i].flags & CPUIDLE_FLAG_POLLING)) {
+ idx = i;
+ goto end;
+ }
+ }
+ }
+
/*
* Find the deepest idle state whose target residency does not exceed
* the current sleep length and the deepest idle state not deeper than
@@ -452,6 +537,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx > constraint_idx)
idx = constraint_idx;
+ /*
+ * If the CPU is being utilized over the threshold, choose a shallower
+ * non-polling state to improve latency
+ */
+ if (cpu_data->utilized)
+ idx = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
+
end:
/*
* Don't stop the tick if the selected state is a polling one or if the
@@ -469,7 +561,7 @@ end:
*/
if (idx > idx0 &&
drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
}
return idx;
@@ -508,9 +600,11 @@ static int teo_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu);
int i;
memset(cpu_data, 0, sizeof(*cpu_data));
+ cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT;
for (i = 0; i < NR_RECENT; i++)
cpu_data->recent_idx[i] = -1;
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index f7e83613ae94..bdcfeaecd228 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -13,11 +13,13 @@
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start = local_clock();
+ u64 time_start;
+
+ time_start = local_clock();
dev->poll_time_limit = false;
- local_irq_enable();
+ raw_local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit;
@@ -36,6 +38,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
}
}
}
+ raw_local_irq_disable();
+
current_clr_polling();
return index;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2b496a53cbca..d6f5da61cb7d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -119,11 +119,18 @@ static struct attribute_group cpuidle_attr_group = {
/**
* cpuidle_add_interface - add CPU global sysfs attributes
- * @dev: the target device
*/
-int cpuidle_add_interface(struct device *dev)
+int cpuidle_add_interface(void)
{
- return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
+ struct device *dev_root = bus_get_dev_root(&cpu_subsys);
+ int retval;
+
+ if (!dev_root)
+ return -EINVAL;
+
+ retval = sysfs_create_group(&dev_root->kobj, &cpuidle_attr_group);
+ put_device(dev_root);
+ return retval;
}
/**
@@ -200,7 +207,7 @@ static void cpuidle_sysfs_release(struct kobject *kobj)
complete(&kdev->kobj_unregister);
}
-static struct kobj_type ktype_cpuidle = {
+static const struct kobj_type ktype_cpuidle = {
.sysfs_ops = &cpuidle_sysfs_ops,
.release = cpuidle_sysfs_release,
};
@@ -447,7 +454,7 @@ static void cpuidle_state_sysfs_release(struct kobject *kobj)
complete(&state_obj->kobj_unregister);
}
-static struct kobj_type ktype_state_cpuidle = {
+static const struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
.default_groups = cpuidle_state_default_groups,
.release = cpuidle_state_sysfs_release,
@@ -594,7 +601,7 @@ static struct attribute *cpuidle_driver_default_attrs[] = {
};
ATTRIBUTE_GROUPS(cpuidle_driver_default);
-static struct kobj_type ktype_driver_cpuidle = {
+static const struct kobj_type ktype_driver_cpuidle = {
.sysfs_ops = &cpuidle_driver_sysfs_ops,
.default_groups = cpuidle_driver_default_groups,
.release = cpuidle_driver_sysfs_release,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dfb103f81a64..9c440cd0fed0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -240,21 +240,6 @@ config CRYPTO_DEV_TALITOS2
Say 'Y' here to use the Freescale Security Engine (SEC)
version 2 and following as found on MPC83xx, MPC85xx, etc ...
-config CRYPTO_DEV_IXP4XX
- tristate "Driver for IXP4xx crypto hardware acceleration"
- depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
- select CRYPTO_AES
- select CRYPTO_DES
- select CRYPTO_ECB
- select CRYPTO_CBC
- select CRYPTO_CTR
- select CRYPTO_LIB_DES
- select CRYPTO_AEAD
- select CRYPTO_AUTHENC
- select CRYPTO_SKCIPHER
- help
- Driver for the IXP4xx NPE crypto engine.
-
config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
@@ -390,16 +375,6 @@ if CRYPTO_DEV_NX
source "drivers/crypto/nx/Kconfig"
endif
-config CRYPTO_DEV_UX500
- tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
- depends on ARCH_U8500
- help
- Driver for ST-Ericsson UX500 crypto engine.
-
-if CRYPTO_DEV_UX500
- source "drivers/crypto/ux500/Kconfig"
-endif # if CRYPTO_DEV_UX500
-
config CRYPTO_DEV_ATMEL_AUTHENC
bool "Support for Atmel IPSEC/SSL hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
@@ -512,10 +487,10 @@ config CRYPTO_DEV_MXS_DCP
To compile this driver as a module, choose M here: the module
will be called mxs-dcp.
-source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
+source "drivers/crypto/intel/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
@@ -784,7 +759,7 @@ config CRYPTO_DEV_ARTPEC6
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell family of security processors"
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
- default n
+ depends on HAS_IOMEM
select CRYPTO_HASH
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
@@ -820,6 +795,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
+ select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -830,7 +806,6 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
-source "drivers/crypto/keembay/Kconfig"
source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fa8bf1be1a8c..51d36701e785 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
-obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@@ -33,7 +32,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
@@ -43,7 +41,6 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
@@ -52,4 +49,4 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
-obj-y += keembay/
+obj-y += intel/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 9f6594699835..a6865ff4d400 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -118,6 +118,7 @@ static const struct ce_variant ce_d1_variant = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
{ "ram", 0, 400000000 },
+ { "trng", 0, 0 },
},
.esr = ESR_D1,
.prng = CE_ALG_PRNG,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 8177aaba4434..27029fb77e29 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -105,7 +105,7 @@
#define MAX_SG 8
-#define CE_MAX_CLOCKS 3
+#define CE_MAX_CLOCKS 4
#define MAXFLOW 4
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 902f6be057ec..16966cc94e24 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
}
rctx->p_iv[i] = a;
/* we need to setup all others IVs only in the decrypt way */
- if (rctx->op_dir & SS_ENCRYPTION)
+ if (rctx->op_dir == SS_ENCRYPTION)
return 0;
todo = min(len, sg_dma_len(sg));
len -= todo;
@@ -452,7 +452,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
kfree_sensitive(op->key);
op->keylen = keylen;
- op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ op->key = kmemdup(key, keylen, GFP_KERNEL);
if (!op->key)
return -ENOMEM;
@@ -475,7 +475,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
kfree_sensitive(op->key);
op->keylen = keylen;
- op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ op->key = kmemdup(key, keylen, GFP_KERNEL);
if (!op->key)
return -ENOMEM;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index ac2329e2b0e5..c9dc06f97857 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -527,7 +528,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
init_completion(&ss->flows[i].complete);
ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].biv) {
err = -ENOMEM;
goto error_engine;
@@ -535,7 +536,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (j = 0; j < MAX_SG; j++) {
ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].iv[j]) {
err = -ENOMEM;
goto error_engine;
@@ -544,13 +545,15 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
/* the padding could be up to two block. */
ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].pad) {
err = -ENOMEM;
goto error_engine;
}
- ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
- GFP_KERNEL | GFP_DMA);
+ ss->flows[i].result =
+ devm_kmalloc(ss->dev, max(SHA256_DIGEST_SIZE,
+ dma_get_cache_alignment()),
+ GFP_KERNEL);
if (!ss->flows[i].result) {
err = -ENOMEM;
goto error_engine;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 36a82b22953c..577bf636f7fb 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -79,10 +79,10 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memcpy(tfmctx->key, key, keylen);
}
- tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!tfmctx->ipad)
return -ENOMEM;
- tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ tfmctx->opad = kzalloc(bs, GFP_KERNEL);
if (!tfmctx->opad) {
ret = -ENOMEM;
goto err_opad;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index dd677e9ed06f..70c7b5d571b8 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -11,6 +11,8 @@
*/
#include "sun8i-ss.h"
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/pm_runtime.h>
#include <crypto/internal/rng.h>
@@ -25,7 +27,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
ctx->seed = NULL;
}
if (!ctx->seed)
- ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ ctx->seed = kmalloc(slen, GFP_KERNEL);
if (!ctx->seed)
return -ENOMEM;
@@ -58,6 +60,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
struct rng_alg *alg = crypto_rng_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ unsigned int todo_with_padding;
struct sun8i_ss_dev *ss;
dma_addr_t dma_iv, dma_dst;
unsigned int todo;
@@ -81,7 +84,11 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
todo -= todo % PRNG_DATA_SIZE;
- d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ todo_with_padding = ALIGN(todo, dma_get_cache_alignment());
+ if (todo_with_padding < todo || todo < dlen)
+ return -EOVERFLOW;
+
+ d = kzalloc(todo_with_padding, GFP_KERNEL);
if (!d)
return -ENOMEM;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 280f4b0e7133..d553f3f1efbe 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{
struct skcipher_request *req;
struct scatterlist *dst;
- dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req);
@@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
- addr = dma_map_page(dev->core_dev->device, sg_page(dst),
- dst->offset, dst->length, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ahash_req->base.tfm);
+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
- crypto_tfm_ctx(ahash_req->base.tfm));
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1103,7 +1101,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
u32 clr_val)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
index ae2710ae8d8f..db6c5b4cdc40 100644
--- a/drivers/crypto/aspeed/Kconfig
+++ b/drivers/crypto/aspeed/Kconfig
@@ -46,3 +46,14 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
crypto driver.
Supports AES/DES symmetric-key encryption and decryption
with ECB/CBC/CFB/OFB/CTR options.
+
+config CRYPTO_DEV_ASPEED_ACRY
+ bool "Enable Aspeed ACRY RSA Engine"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_ENGINE
+ select CRYPTO_RSA
+ help
+ Select here to enable Aspeed ECC/RSA Engine (ACRY)
+ RSA driver.
+ Supports 256 bits to 4096 bits RSA encryption/decryption
+ and signature/verification.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
index a0ed40ddaad1..15862752c053 100644
--- a/drivers/crypto/aspeed/Makefile
+++ b/drivers/crypto/aspeed/Makefile
@@ -5,3 +5,7 @@ obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
aspeed_crypto-objs := aspeed-hace.o \
$(hace-hash-y) \
$(hace-crypto-y)
+
+aspeed_acry-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += $(aspeed_acry-y)
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
new file mode 100644
index 000000000000..470122c87fea
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/scatterwalk.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/count_zeros.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/regmap.h>
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define ACRY_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define ACRY_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/*****************************
+ * *
+ * ACRY register definitions *
+ * *
+ * ***************************/
+#define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */
+#define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */
+#define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */
+#define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */
+#define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */
+#define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */
+#define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */
+
+/* rsa trigger */
+#define ACRY_CMD_RSA_TRIGGER BIT(0)
+#define ACRY_CMD_DMA_RSA_TRIGGER BIT(1)
+
+/* rsa dma cmd */
+#define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4)
+#define ACRY_CMD_DMEM_AHB BIT(8)
+#define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0
+
+/* rsa key len */
+#define RSA_E_BITS_LEN(x) ((x) << 16)
+#define RSA_M_BITS_LEN(x) (x)
+
+/* acry isr */
+#define ACRY_RSA_ISR BIT(1)
+
+#define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */
+#define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */
+#define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+#define BYTES_PER_DWORD 4
+
+/*****************************
+ * *
+ * AHBC register definitions *
+ * *
+ * ***************************/
+#define AHBC_REGION_PROT 0x240
+#define REGION_ACRYM BIT(23)
+
+#define ast_acry_write(acry, val, offset) \
+ writel((val), (acry)->regs + (offset))
+
+#define ast_acry_read(acry, offset) \
+ readl((acry)->regs + (offset))
+
+struct aspeed_acry_dev;
+
+typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *);
+
+struct aspeed_acry_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct regmap *ahbc;
+
+ struct akcipher_request *req;
+ struct tasklet_struct done_task;
+ aspeed_acry_fn_t resume;
+ unsigned long flags;
+
+ /* ACRY output SRAM buffer */
+ void __iomem *acry_sram;
+
+ /* ACRY input DMA buffer */
+ void *buf_addr;
+ dma_addr_t buf_dma_addr;
+
+ struct crypto_engine *crypt_engine_rsa;
+
+ /* ACRY SRAM memory mapped */
+ int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
+ int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
+ int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN];
+};
+
+struct aspeed_acry_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct aspeed_acry_dev *acry_dev;
+
+ struct rsa_key key;
+ int enc;
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
+
+ aspeed_acry_fn_t trigger;
+
+ struct crypto_akcipher *fallback_tfm;
+};
+
+struct aspeed_acry_alg {
+ struct aspeed_acry_dev *acry_dev;
+ struct akcipher_alg akcipher;
+};
+
+enum aspeed_rsa_key_mode {
+ ASPEED_RSA_EXP_MODE = 0,
+ ASPEED_RSA_MOD_MODE,
+ ASPEED_RSA_DATA_MODE,
+};
+
+static inline struct akcipher_request *
+ akcipher_request_cast(struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static int aspeed_acry_do_fallback(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ int err;
+
+ akcipher_request_set_tfm(req, ctx->fallback_tfm);
+
+ if (ctx->enc)
+ err = crypto_akcipher_encrypt(req);
+ else
+ err = crypto_akcipher_decrypt(req);
+
+ akcipher_request_set_tfm(req, cipher);
+
+ return err;
+}
+
+static bool aspeed_acry_need_fallback(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+
+ return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN;
+}
+
+static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev,
+ struct akcipher_request *req)
+{
+ if (aspeed_acry_need_fallback(req)) {
+ ACRY_DBG(acry_dev, "SW fallback\n");
+ return aspeed_acry_do_fallback(req);
+ }
+
+ return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req);
+}
+
+static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct akcipher_request *req = akcipher_request_cast(areq);
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ acry_dev->req = req;
+ acry_dev->flags |= CRYPTO_FLAGS_BUSY;
+
+ return ctx->trigger(acry_dev);
+}
+
+static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err)
+{
+ struct akcipher_request *req = acry_dev->req;
+
+ acry_dev->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err);
+
+ return err;
+}
+
+/*
+ * Copy Data to DMA buffer for engine used.
+ */
+static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev,
+ u8 *buf, struct scatterlist *src,
+ size_t nbytes)
+{
+ static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
+ int i = 0, j;
+ int data_idx;
+
+ ACRY_DBG(acry_dev, "\n");
+
+ scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0);
+
+ for (j = nbytes - 1; j >= 0; j--) {
+ data_idx = acry_dev->data_byte_mapping[i];
+ buf[data_idx] = dram_buffer[j];
+ i++;
+ }
+
+ for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) {
+ data_idx = acry_dev->data_byte_mapping[i];
+ buf[data_idx] = 0;
+ }
+}
+
+/*
+ * Copy Exp/Mod to DMA buffer for engine used.
+ *
+ * Params:
+ * - mode 0 : Exponential
+ * - mode 1 : Modulus
+ *
+ * Example:
+ * - DRAM memory layout:
+ * D[0], D[4], D[8], D[12]
+ * - ACRY SRAM memory layout should reverse the order of source data:
+ * D[12], D[8], D[4], D[0]
+ */
+static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
+ const void *xbuf, size_t nbytes,
+ enum aspeed_rsa_key_mode mode)
+{
+ const u8 *src = xbuf;
+ __le32 *dw_buf = buf;
+ int nbits, ndw;
+ int i, j, idx;
+ u32 data = 0;
+
+ ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
+
+ if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return -ENOMEM;
+
+ /* Remove the leading zeros */
+ while (nbytes > 0 && src[0] == 0) {
+ src++;
+ nbytes--;
+ }
+
+ nbits = nbytes * 8;
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8);
+
+ /* double-world alignment */
+ ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD);
+
+ if (nbytes > 0) {
+ i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD;
+ i %= BYTES_PER_DWORD;
+
+ for (j = ndw; j > 0; j--) {
+ for (; i < BYTES_PER_DWORD; i++) {
+ data <<= 8;
+ data |= *src++;
+ }
+
+ i = 0;
+
+ if (mode == ASPEED_RSA_EXP_MODE)
+ idx = acry_dev->exp_dw_mapping[j - 1];
+ else /* mode == ASPEED_RSA_MOD_MODE */
+ idx = acry_dev->mod_dw_mapping[j - 1];
+
+ dw_buf[idx] = cpu_to_le32(data);
+ }
+ }
+
+ return nbits;
+}
+
+static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev)
+{
+ struct akcipher_request *req = acry_dev->req;
+ u8 __iomem *sram_buffer = acry_dev->acry_sram;
+ struct scatterlist *out_sg = req->dst;
+ static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
+ int leading_zero = 1;
+ int result_nbytes;
+ int i = 0, j;
+ int data_idx;
+
+ /* Set Data Memory to AHB(CPU) Access Mode */
+ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
+
+ /* Disable ACRY SRAM protection */
+ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
+ REGION_ACRYM, 0);
+
+ result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN;
+
+ for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) {
+ data_idx = acry_dev->data_byte_mapping[j];
+ if (readb(sram_buffer + data_idx) == 0 && leading_zero) {
+ result_nbytes--;
+ } else {
+ leading_zero = 0;
+ dram_buffer[i] = readb(sram_buffer + data_idx);
+ i++;
+ }
+ }
+
+ ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n",
+ result_nbytes, req->dst_len);
+
+ if (result_nbytes <= req->dst_len) {
+ scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes,
+ 1);
+ req->dst_len = result_nbytes;
+
+ } else {
+ dev_err(acry_dev->dev, "RSA engine error!\n");
+ }
+
+ memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+
+ return aspeed_acry_complete(acry_dev, 0);
+}
+
+static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev)
+{
+ struct akcipher_request *req = acry_dev->req;
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ int ne, nm;
+
+ if (!ctx->n || !ctx->n_sz) {
+ dev_err(acry_dev->dev, "%s: key n is not set\n", __func__);
+ return -EINVAL;
+ }
+
+ memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+
+ /* Copy source data to DMA buffer */
+ aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr,
+ req->src, req->src_len);
+
+ nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n,
+ ctx->n_sz, ASPEED_RSA_MOD_MODE);
+ if (ctx->enc) {
+ if (!ctx->e || !ctx->e_sz) {
+ dev_err(acry_dev->dev, "%s: key e is not set\n",
+ __func__);
+ return -EINVAL;
+ }
+ /* Copy key e to DMA buffer */
+ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
+ ctx->e, ctx->e_sz,
+ ASPEED_RSA_EXP_MODE);
+ } else {
+ if (!ctx->d || !ctx->d_sz) {
+ dev_err(acry_dev->dev, "%s: key d is not set\n",
+ __func__);
+ return -EINVAL;
+ }
+ /* Copy key d to DMA buffer */
+ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
+ ctx->key.d, ctx->key.d_sz,
+ ASPEED_RSA_EXP_MODE);
+ }
+
+ ast_acry_write(acry_dev, acry_dev->buf_dma_addr,
+ ASPEED_ACRY_DMA_SRC_BASE);
+ ast_acry_write(acry_dev, (ne << 16) + nm,
+ ASPEED_ACRY_RSA_KEY_LEN);
+ ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE,
+ ASPEED_ACRY_DMA_LEN);
+
+ acry_dev->resume = aspeed_acry_rsa_transfer;
+
+ /* Enable ACRY SRAM protection */
+ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
+ REGION_ACRYM, REGION_ACRYM);
+
+ ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK);
+ ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA |
+ ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD);
+
+ /* Trigger RSA engines */
+ ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER |
+ ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER);
+
+ return 0;
+}
+
+static int aspeed_acry_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ ctx->trigger = aspeed_acry_rsa_trigger;
+ ctx->enc = 1;
+
+ return aspeed_acry_handle_queue(acry_dev, req);
+}
+
+static int aspeed_acry_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ ctx->trigger = aspeed_acry_rsa_trigger;
+ ctx->enc = 0;
+
+ return aspeed_acry_handle_queue(acry_dev, req);
+}
+
+static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
+{
+ return kmemdup(src, len, GFP_KERNEL);
+}
+
+static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->n_sz = len;
+ ctx->n = aspeed_rsa_key_copy(value, len);
+ if (!ctx->n)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->e_sz = len;
+ ctx->e = aspeed_rsa_key_copy(value, len);
+ if (!ctx->e)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->d_sz = len;
+ ctx->d = aspeed_rsa_key_copy(value, len);
+ if (!ctx->d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx)
+{
+ kfree_sensitive(ctx->n);
+ kfree_sensitive(ctx->e);
+ kfree_sensitive(ctx->d);
+ ctx->n_sz = 0;
+ ctx->e_sz = 0;
+ ctx->d_sz = 0;
+}
+
+static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, int priv)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+ int ret;
+
+ if (priv)
+ ret = rsa_parse_priv_key(&ctx->key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&ctx->key, key, keylen);
+
+ if (ret) {
+ dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n",
+ ret);
+ return ret;
+ }
+
+ /* Aspeed engine supports up to 4096 bits,
+ * Use software fallback instead.
+ */
+ if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return 0;
+
+ ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
+ if (ret)
+ goto err;
+
+ ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
+ if (ret)
+ goto err;
+
+ if (priv) {
+ ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(acry_dev->dev, "rsa set key failed\n");
+ aspeed_rsa_key_free(ctx);
+
+ return ret;
+}
+
+static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return aspeed_acry_rsa_setkey(tfm, key, keylen, 0);
+}
+
+static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return aspeed_acry_rsa_setkey(tfm, key, keylen, 1);
+}
+
+static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return crypto_akcipher_maxsize(ctx->fallback_tfm);
+
+ return ctx->n_sz;
+}
+
+static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_acry_alg *acry_alg;
+
+ acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
+
+ ctx->acry_dev = acry_alg->acry_dev;
+
+ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ crypto_free_akcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
+ {
+ .akcipher = {
+ .encrypt = aspeed_acry_rsa_enc,
+ .decrypt = aspeed_acry_rsa_dec,
+ .sign = aspeed_acry_rsa_dec,
+ .verify = aspeed_acry_rsa_enc,
+ .set_pub_key = aspeed_acry_rsa_set_pub_key,
+ .set_priv_key = aspeed_acry_rsa_set_priv_key,
+ .max_size = aspeed_acry_rsa_max_size,
+ .init = aspeed_acry_rsa_init_tfm,
+ .exit = aspeed_acry_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "aspeed-rsa",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct aspeed_acry_ctx),
+ },
+ },
+ },
+};
+
+static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
+ aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
+ rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ if (rc) {
+ ACRY_DBG(acry_dev, "Failed to register %s\n",
+ aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
+ }
+ }
+}
+
+static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
+ crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+}
+
+/* ACRY interrupt service routine. */
+static irqreturn_t aspeed_acry_irq(int irq, void *dev)
+{
+ struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev;
+ u32 sts;
+
+ sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS);
+ ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS);
+
+ ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts);
+
+ if (sts & ACRY_RSA_ISR) {
+ /* Stop RSA engine */
+ ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER);
+
+ if (acry_dev->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&acry_dev->done_task);
+ else
+ dev_err(acry_dev->dev, "RSA no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ACRY SRAM has its own memory layout.
+ * Set the DRAM to SRAM indexing for future used.
+ */
+static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev)
+{
+ int i, j = 0;
+
+ for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) {
+ acry_dev->exp_dw_mapping[i] = j;
+ acry_dev->mod_dw_mapping[i] = j + 4;
+ acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4;
+ acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1;
+ acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2;
+ acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3;
+ j++;
+ j = j % 4 ? j : j + 8;
+ }
+}
+
+static void aspeed_acry_done_task(unsigned long data)
+{
+ struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data;
+
+ (void)acry_dev->resume(acry_dev);
+}
+
+static const struct of_device_id aspeed_acry_of_matches[] = {
+ { .compatible = "aspeed,ast2600-acry", },
+ {},
+};
+
+static int aspeed_acry_probe(struct platform_device *pdev)
+{
+ struct aspeed_acry_dev *acry_dev;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
+ GFP_KERNEL);
+ if (!acry_dev)
+ return -ENOMEM;
+
+ acry_dev->dev = dev;
+
+ platform_set_drvdata(pdev, acry_dev);
+
+ acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(acry_dev->regs))
+ return PTR_ERR(acry_dev->regs);
+
+ acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(acry_dev->acry_sram))
+ return PTR_ERR(acry_dev->acry_sram);
+
+ /* Get irq number and register it */
+ acry_dev->irq = platform_get_irq(pdev, 0);
+ if (acry_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0,
+ dev_name(dev), acry_dev);
+ if (rc) {
+ dev_err(dev, "Failed to request irq.\n");
+ return rc;
+ }
+
+ acry_dev->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(acry_dev->clk)) {
+ dev_err(dev, "Failed to get acry clk\n");
+ return PTR_ERR(acry_dev->clk);
+ }
+
+ acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "aspeed,ahbc");
+ if (IS_ERR(acry_dev->ahbc)) {
+ dev_err(dev, "Failed to get AHBC regmap\n");
+ return -ENODEV;
+ }
+
+ /* Initialize crypto hardware engine structure for RSA */
+ acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true);
+ if (!acry_dev->crypt_engine_rsa) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(acry_dev->crypt_engine_rsa);
+ if (rc)
+ goto err_engine_rsa_start;
+
+ tasklet_init(&acry_dev->done_task, aspeed_acry_done_task,
+ (unsigned long)acry_dev);
+
+ /* Set Data Memory to AHB(CPU) Access Mode */
+ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
+
+ /* Initialize ACRY SRAM index */
+ aspeed_acry_sram_mapping(acry_dev);
+
+ acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
+ &acry_dev->buf_dma_addr,
+ GFP_KERNEL);
+ if (!acry_dev->buf_addr) {
+ rc = -ENOMEM;
+ goto err_engine_rsa_start;
+ }
+
+ aspeed_acry_register(acry_dev);
+
+ dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_rsa_start:
+ crypto_engine_exit(acry_dev->crypt_engine_rsa);
+clk_exit:
+ clk_disable_unprepare(acry_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_acry_remove(struct platform_device *pdev)
+{
+ struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
+
+ aspeed_acry_unregister(acry_dev);
+ crypto_engine_exit(acry_dev->crypt_engine_rsa);
+ tasklet_kill(&acry_dev->done_task);
+ clk_disable_unprepare(acry_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
+
+static struct platform_driver aspeed_acry_driver = {
+ .probe = aspeed_acry_probe,
+ .remove = aspeed_acry_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_acry_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_acry_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 656cb92c8bb6..d2871e1de9c2 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -99,7 +99,6 @@ static int aspeed_hace_probe(struct platform_device *pdev)
const struct of_device_id *hace_dev_id;
struct aspeed_engine_hash *hash_engine;
struct aspeed_hace_dev *hace_dev;
- struct resource *res;
int rc;
hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
@@ -118,11 +117,9 @@ static int aspeed_hace_probe(struct platform_device *pdev)
hash_engine = &hace_dev->hash_engine;
crypto_engine = &hace_dev->crypto_engine;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
platform_set_drvdata(pdev, hace_dev);
- hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ hace_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(hace_dev->regs))
return PTR_ERR(hace_dev->regs);
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index f2cde23b56ae..05d0a15d546d 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -183,7 +183,7 @@ struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
- struct aspeed_sha_hmac_ctx base[0];
+ struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 886bf258544c..143d33fbb316 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -493,17 +493,11 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
if (req->cryptlen < ivsize)
return;
- if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ if (rctx->mode & AES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst)
- memcpy(req->iv, rctx->lastc, ivsize);
- else
- scatterwalk_map_and_copy(req->iv, req->src,
- req->cryptlen - ivsize,
- ivsize, 0);
- }
+ else
+ memcpy(req->iv, rctx->lastc, ivsize);
}
static inline struct atmel_aes_ctr_ctx *
@@ -554,7 +548,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
}
if (dd->is_async)
- dd->areq->complete(dd->areq, err);
+ crypto_request_complete(dd->areq, err);
tasklet_schedule(&dd->queue_task);
@@ -955,7 +949,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(areq->tfm);
@@ -1146,7 +1140,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if (opmode != AES_FLAGS_ECB &&
- !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ !(mode & AES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1341,7 +1335,7 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.init = atmel_aes_init_tfm,
@@ -1879,7 +1873,7 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
@@ -2510,6 +2504,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
case 0x700:
+ case 0x600:
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 53100fb9b07b..aac64b555204 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -3,7 +3,7 @@
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#include <linux/delay.h>
@@ -313,11 +313,10 @@ static struct kpp_alg atmel_ecdh_nist_p256 = {
static int atmel_ecc_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
- ret = atmel_i2c_probe(client, id);
+ ret = atmel_i2c_probe(client);
if (ret)
return ret;
@@ -411,6 +410,6 @@ static void __exit atmel_ecc_exit(void)
module_init(atmel_ecc_init);
module_exit(atmel_ecc_exit);
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 81ce09bedda8..83a9093eff25 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -3,7 +3,7 @@
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#include <linux/bitrev.h>
@@ -59,7 +59,7 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
* Read the word from Configuration zone that contains the lock bytes
* (UserExtra, Selector, LockValue, LockConfig).
*/
- cmd->param1 = CONFIG_ZONE;
+ cmd->param1 = CONFIGURATION_ZONE;
cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
cmd->count = READ_COUNT;
@@ -324,7 +324,7 @@ free_cmd:
return ret;
}
-int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+int atmel_i2c_probe(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv;
struct device *dev = &client->dev;
@@ -390,6 +390,6 @@ static void __exit atmel_i2c_exit(void)
module_init(atmel_i2c_init);
module_exit(atmel_i2c_exit);
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index 48929efe2a5b..c0bd429ee2c7 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#ifndef __ATMEL_I2C_H__
@@ -63,7 +63,7 @@ struct atmel_i2c_cmd {
#define STATUS_WAKE_SUCCESSFUL 0x11
/* Definitions for eeprom organization */
-#define CONFIG_ZONE 0
+#define CONFIGURATION_ZONE 0
/* Definitions for Indexes common to all commands */
#define RSP_DATA_IDX 1 /* buffer index of data in response */
@@ -167,7 +167,7 @@ struct atmel_i2c_work_data {
struct atmel_i2c_cmd cmd;
};
-int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
+int atmel_i2c_probe(struct i2c_client *client);
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
void (*cbk)(struct atmel_i2c_work_data *work_data,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index ca4b01926d1b..6bef634d3c86 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -292,7 +292,7 @@ static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
clk_disable(dd->iclk);
if ((dd->is_async || dd->force_complete) && req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
/* handle new request */
tasklet_schedule(&dd->queue_task);
@@ -1080,7 +1080,7 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(async_req->tfm);
@@ -1948,14 +1948,32 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+ struct scatterlist *sgbuf;
size_t hs = ctx->hash_size;
size_t i, num_words = hs / sizeof(u32);
bool use_dma = false;
u32 mr;
/* Special case for empty message. */
- if (!req->nbytes)
- return atmel_sha_complete(dd, -EINVAL); // TODO:
+ if (!req->nbytes) {
+ req->nbytes = 0;
+ ctx->bufcnt = 0;
+ ctx->digcnt[0] = 0;
+ ctx->digcnt[1] = 0;
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ atmel_sha_fill_padding(ctx, 64);
+ break;
+
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
+ atmel_sha_fill_padding(ctx, 128);
+ break;
+ }
+ sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt);
+ }
/* Check DMA threshold and alignment. */
if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
@@ -1985,12 +2003,20 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ /* Special case for empty message. */
+ if (!req->nbytes) {
+ sgbuf = &dd->tmp;
+ req->nbytes = ctx->bufcnt;
+ } else {
+ sgbuf = req->src;
+ }
+
/* Process data. */
if (use_dma)
- return atmel_sha_dma_start(dd, req->src, req->nbytes,
+ return atmel_sha_dma_start(dd, sgbuf, req->nbytes,
atmel_sha_hmac_final_done);
- return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
+ return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true,
atmel_sha_hmac_final_done);
}
@@ -2099,10 +2125,9 @@ struct atmel_sha_authenc_reqctx {
unsigned int digestlen;
};
-static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
- int err)
+static void atmel_sha_authenc_complete(void *data, int err)
{
- struct ahash_request *req = areq->data;
+ struct ahash_request *req = data;
struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
@@ -2509,6 +2534,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
case 0x700:
+ case 0x600:
case 0x510:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 272a06f0b588..44a185a84760 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -93,11 +93,10 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
- ret = atmel_i2c_probe(client, id);
+ ret = atmel_i2c_probe(client);
if (ret)
return ret;
@@ -127,7 +126,7 @@ static void atmel_sha204a_remove(struct i2c_client *client)
kfree((void *)i2c_priv->hwrng.priv);
}
-static const struct of_device_id atmel_sha204a_dt_ids[] = {
+static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
{ .compatible = "atmel,atsha204", },
{ .compatible = "atmel,atsha204a", },
{ /* sentinel */ }
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 8b7bc1076e0d..c9ded8be9c39 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -565,17 +565,12 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
if (req->cryptlen < ivsize)
return;
- if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+ if (rctx->mode & TDES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst)
- memcpy(req->iv, rctx->lastc, ivsize);
- else
- scatterwalk_map_and_copy(req->iv, req->src,
- req->cryptlen - ivsize,
- ivsize, 0);
- }
+ else
+ memcpy(req->iv, rctx->lastc, ivsize);
+
}
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
@@ -590,7 +585,7 @@ static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
atmel_tdes_set_iv_as_last_ciphertext_block(dd);
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
}
static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
@@ -619,7 +614,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = skcipher_request_cast(async_req);
@@ -722,7 +717,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
- !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ !(mode & TDES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 51c66afbe677..8493a45e1bd4 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1621,7 +1621,7 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
crypto_skcipher_ctx(cipher);
int ret;
- ret = xts_check_key(&cipher->base, key, keylen);
+ ret = xts_verify_key(cipher, key, keylen);
if (ret)
return ret;
@@ -2143,13 +2143,13 @@ static void artpec6_crypto_task(unsigned long data)
list_for_each_entry_safe(req, n, &complete_in_progress,
complete_in_progress) {
- req->req->complete(req->req, -EINPROGRESS);
+ crypto_request_complete(req->req, -EINPROGRESS);
}
}
static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
{
- req->complete(req, 0);
+ crypto_request_complete(req, 0);
}
static void
@@ -2161,7 +2161,7 @@ artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
cipher_req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- req->complete(req, 0);
+ skcipher_request_complete(cipher_req, 0);
}
static void
@@ -2173,7 +2173,7 @@ artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
cipher_req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- req->complete(req, 0);
+ skcipher_request_complete(cipher_req, 0);
}
static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
@@ -2211,12 +2211,12 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
}
}
- req->complete(req, result);
+ aead_request_complete(areq, result);
}
static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
{
- req->complete(req, 0);
+ crypto_request_complete(req, 0);
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c8c799428fe0..70b911baab26 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1614,7 +1614,7 @@ static void finish_req(struct iproc_reqctx_s *rctx, int err)
spu_chunk_cleanup(rctx);
if (areq)
- areq->complete(areq, err);
+ crypto_request_complete(areq, err);
}
/**
@@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req)
return payload_len > ctx->max_payload;
}
-static void aead_complete(struct crypto_async_request *areq, int err)
-{
- struct aead_request *req =
- container_of(areq, struct aead_request, base);
- struct iproc_reqctx_s *rctx = aead_request_ctx(req);
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
-
- flow_log("%s() err:%d\n", __func__, err);
-
- areq->tfm = crypto_aead_tfm(aead);
-
- areq->complete = rctx->old_complete;
- areq->data = rctx->old_data;
-
- areq->complete(areq, err);
-}
-
static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
- int err;
- u32 req_flags;
+ struct aead_request *subreq;
flow_log("%s() enc:%u\n", __func__, is_encrypt);
- if (ctx->fallback_cipher) {
- /* Store the cipher tfm and then use the fallback tfm */
- rctx->old_tfm = tfm;
- aead_request_set_tfm(req, ctx->fallback_cipher);
- /*
- * Save the callback and chain ourselves in, so we can restore
- * the tfm
- */
- rctx->old_complete = req->base.complete;
- rctx->old_data = req->base.data;
- req_flags = aead_request_flags(req);
- aead_request_set_callback(req, req_flags, aead_complete, req);
- err = is_encrypt ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
-
- if (err == 0) {
- /*
- * fallback was synchronous (did not return
- * -EINPROGRESS). So restore request state here.
- */
- aead_request_set_callback(req, req_flags,
- rctx->old_complete, req);
- req->base.data = rctx->old_data;
- aead_request_set_tfm(req, aead);
- flow_log("%s() fallback completed successfully\n\n",
- __func__);
- }
- } else {
- err = -EINVAL;
- }
+ if (!ctx->fallback_cipher)
+ return -EINVAL;
- return err;
+ subreq = &rctx->req;
+ aead_request_set_tfm(subreq, ctx->fallback_cipher);
+ aead_request_set_callback(subreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return is_encrypt ? crypto_aead_encrypt(req) :
+ crypto_aead_decrypt(req);
}
static int aead_enqueue(struct aead_request *req, bool is_encrypt)
@@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm)
static int aead_cra_init(struct crypto_aead *aead)
{
+ unsigned int reqsize = sizeof(struct iproc_reqctx_s);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
@@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead)
flow_log("%s()\n", __func__);
- crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
ctx->is_esp = false;
ctx->salt_len = 0;
ctx->salt_offset = 0;
@@ -4263,22 +4226,29 @@ static int aead_cra_init(struct crypto_aead *aead)
get_random_bytes(ctx->iv, MAX_IV_SIZE);
flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
- if (!err) {
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- flow_log("%s() creating fallback cipher\n", __func__);
-
- ctx->fallback_cipher =
- crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->fallback_cipher)) {
- pr_err("%s() Error: failed to allocate fallback for %s\n",
- __func__, alg->cra_name);
- return PTR_ERR(ctx->fallback_cipher);
- }
- }
+ if (err)
+ goto out;
+
+ if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK))
+ goto reqsize;
+
+ flow_log("%s() creating fallback cipher\n", __func__);
+
+ ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_cipher)) {
+ pr_err("%s() Error: failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fallback_cipher);
}
+ reqsize += crypto_aead_reqsize(ctx->fallback_cipher);
+
+reqsize:
+ crypto_aead_set_reqsize(aead, reqsize);
+
+out:
return err;
}
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index d6d87332140a..e36881c983cf 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -339,15 +339,12 @@ struct iproc_reqctx_s {
/* hmac context */
bool is_sw_hmac;
- /* aead context */
- struct crypto_tfm *old_tfm;
- crypto_completion_t old_complete;
- void *old_data;
-
gfp_t gfp;
/* Buffers used to build SPU request and response messages */
struct spu_msg_buf msg_buf;
+
+ struct aead_request req;
};
/*
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 1f65df489847..87781c1534ee 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -83,7 +83,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
}
- desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -104,7 +104,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ecc15bc521db..feb86013dbf6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2023 NXP
*
* Based on talitos crypto API driver.
*
@@ -59,6 +59,12 @@
#include <crypto/engine.h>
#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* crypto alg
@@ -998,6 +1004,13 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
crypto_finalize_aead_request(jrp->engine, req, ecode);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+
+ return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1025,8 +1038,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* This is used e.g. by the CTS mode.
*/
if (ivsize && !ecode) {
- memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1379,8 +1391,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
@@ -1608,6 +1619,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ unsigned int aligned_size;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
@@ -1681,8 +1693,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/*
* allocate space for base edesc and hw desc commands, link tables, IV
*/
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
+ aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
+ aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
+ aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
+ (dma_get_cache_alignment() - 1);
+ aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
+ edesc = kzalloc(aligned_size, flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1701,7 +1717,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
- iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
@@ -3526,13 +3542,14 @@ int caam_algapi_init(struct device *ctrldev)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
u32 cha_vid, cha_inst, aes_rn;
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_vid = rd_reg32(&perfmon->cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ cha_inst = rd_reg32(&perfmon->cha_num_ls);
des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
@@ -3540,23 +3557,23 @@ int caam_algapi_init(struct device *ctrldev)
ccha_inst = 0;
ptha_inst = 0;
- aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
- CHA_ID_LS_AES_MASK;
+ aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
+ struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
u32 aesa, mdha;
- aesa = rd_reg32(&priv->ctrl->vreg.aesa);
- mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ aesa = rd_reg32(&vreg->aesa);
+ mdha = rd_reg32(&vreg->mdha);
aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
- des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
aes_inst = aesa & CHA_VER_NUM_MASK;
md_inst = mdha & CHA_VER_NUM_MASK;
- ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
- ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c37b67be0492..743ce50c14f2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -20,6 +20,11 @@
#include "caamalg_desc.h"
#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
/*
* crypto alg
@@ -959,7 +964,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return (struct aead_edesc *)drv_ctx;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = qi_cache_alloc(GFP_DMA | flags);
+ edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1202,6 +1207,12 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
false);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+ return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct skcipher_edesc *edesc;
@@ -1234,8 +1245,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
* This is used e.g. by the CTS mode.
*/
if (!ecode)
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1257,6 +1267,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
+ unsigned int len;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
@@ -1317,8 +1328,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+
+ len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes;
+ len = ALIGN(len, dma_get_cache_alignment());
+ len += ivsize;
+
+ if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1327,7 +1342,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
+ edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1335,9 +1350,16 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = skcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
@@ -1349,13 +1371,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = skcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 1b0dd742c53f..5c8d35edaa1c 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -16,7 +16,9 @@
#include "caamalg_desc.h"
#include "caamhash_desc.h"
#include "dpseci-debugfs.h"
+#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
+#include <linux/kernel.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
@@ -370,7 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct dpaa2_sg_entry *sg_table;
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1189,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -3220,14 +3222,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret = -ENOMEM;
struct dpaa2_fl_entry *in_fle, *out_fle;
- req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
if (!req_ctx)
return -ENOMEM;
in_fle = &req_ctx->fd_flt[1];
out_fle = &req_ctx->fd_flt[0];
- flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL);
if (!flc)
goto err_flc;
@@ -3316,7 +3318,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -3411,7 +3419,7 @@ static void ahash_done(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_bi(void *cbk_ctx, u32 status)
@@ -3449,7 +3457,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
@@ -3476,7 +3484,7 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
@@ -3514,7 +3522,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static int ahash_update_ctx(struct ahash_request *req)
@@ -3560,7 +3568,7 @@ static int ahash_update_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -3654,7 +3662,7 @@ static int ahash_final_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return -ENOMEM;
@@ -3743,7 +3751,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -3836,7 +3844,7 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -3913,7 +3921,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret = -ENOMEM;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return ret;
@@ -4012,7 +4020,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4125,7 +4133,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -4230,7 +4238,7 @@ static int ahash_update_first(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4926,6 +4934,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
{
struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
struct device *dev = priv->dev;
+ unsigned int alignmask;
int err;
/*
@@ -4936,13 +4945,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
!(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
return 0;
- priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
- GFP_KERNEL | GFP_DMA);
+ alignmask = DPAA2_CSCN_ALIGN - 1;
+ alignmask |= dma_get_cache_alignment() - 1;
+ priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
+ GFP_KERNEL);
if (!priv->cscn_mem)
return -ENOMEM;
- priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
- priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, priv->cscn_dma)) {
dev_err(dev, "Error mapping CSCN memory area\n");
@@ -5174,7 +5184,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
priv->domain = iommu_get_domain_for_dev(dev);
qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
- 0, SLAB_CACHE_DMA, NULL);
+ 0, 0, NULL);
if (!qi_cache) {
dev_err(dev, "Can't allocate SEC cache\n");
return -ENOMEM;
@@ -5451,7 +5461,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
DPAA2_CSCN_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
dev_dbg_ratelimited(dev, "Dropping request\n");
return -EBUSY;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index d35253407ade..abb502bb675c 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -7,13 +7,14 @@
#ifndef _CAAMALG_QI2_H_
#define _CAAMALG_QI2_H_
+#include <crypto/internal/skcipher.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <linux/threads.h>
#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
-#include <crypto/skcipher.h>
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
@@ -36,8 +37,6 @@
* @tx_queue_attr: array of Tx queue attributes
* @cscn_mem: pointer to memory region containing the congestion SCN
* it's size is larger than to accommodate alignment
- * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
- * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
* @cscn_dma: dma address used by the QMAN to write CSCN messages
* @dev: device associated with the DPSECI object
* @mc_io: pointer to MC portal's I/O object
@@ -58,7 +57,6 @@ struct dpaa2_caam_priv {
/* congestion */
void *cscn_mem;
- void *cscn_mem_aligned;
dma_addr_t cscn_dma;
struct device *dev;
@@ -158,7 +156,7 @@ struct ahash_edesc {
struct caam_flc {
u32 flc[16];
u32 sh_desc[MAX_SDLEN];
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
enum optype {
ENCRYPT = 0,
@@ -180,7 +178,7 @@ enum optype {
* @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
*/
struct caam_request {
- struct dpaa2_fl_entry fd_flt[2];
+ struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t fd_flt_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 1050e965a438..80deb003f0a5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -66,6 +66,8 @@
#include "key_gen.h"
#include "caamhash_desc.h"
#include <crypto/engine.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#define CAAM_CRA_PRIORITY 3000
@@ -365,7 +367,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dma_addr_t key_dma;
int ret;
- desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
@@ -432,7 +434,13 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -606,7 +614,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
}
@@ -668,7 +676,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
@@ -702,7 +710,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
if (!edesc) {
dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
return NULL;
@@ -1948,12 +1956,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
* presence and attributes of MD block.
*/
if (priv->era < 10) {
- md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
+
+ md_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ md_inst = (rd_reg32(&perfmon->cha_num_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
} else {
- u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_inst = mdha & CHA_VER_NUM_MASK;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index aef031946f33..72afc249d42f 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -16,6 +16,8 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@@ -310,8 +312,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc, hw desc commands and link tables */
- edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
if (!edesc)
goto dst_fail;
@@ -898,7 +899,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
if (!nbytes)
return NULL;
- dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+ dst = kzalloc(dstlen, GFP_KERNEL);
if (!dst)
return NULL;
@@ -910,7 +911,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
/**
* caam_read_raw_data - Read a raw byte stream as a positive integer.
* The function skips buffer's leading zeros, copies the remained data
- * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * to a buffer allocated in the GFP_KERNEL zone and returns
* the address of the new buffer.
*
* @buf : The data to read
@@ -923,7 +924,7 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
if (!*nbytes)
return NULL;
- return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
+ return kmemdup(buf, *nbytes, GFP_KERNEL);
}
static int caam_rsa_check_key_length(unsigned int len)
@@ -949,13 +950,13 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -983,6 +984,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct caam_rsa_key *rsa_key = &ctx->key;
size_t p_sz = raw_key->p_sz;
size_t q_sz = raw_key->q_sz;
+ unsigned aligned_size;
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
@@ -994,11 +996,13 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
goto free_p;
rsa_key->q_sz = q_sz;
- rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
+ rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp1)
goto free_q;
- rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
+ rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp2)
goto free_tmp1;
@@ -1051,17 +1055,17 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
if (!rsa_key->d)
goto err;
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -1164,10 +1168,10 @@ int caam_pkc_init(struct device *ctrldev)
/* Determine public key hardware accelerator presence. */
if (priv->era < 10) {
- pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
} else {
- pkha = rd_reg32(&priv->ctrl->vreg.pkha);
+ pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
pk_inst = pkha & CHA_VER_NUM_MASK;
/*
@@ -1185,8 +1189,7 @@ int caam_pkc_init(struct device *ctrldev)
return 0;
/* allocate zero buffer, used for padding input */
- zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
- GFP_KERNEL);
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
if (!zero_buffer)
return -ENOMEM;
diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c
index 4839e66300a2..6e4c1191cb28 100644
--- a/drivers/crypto/caam/caamprng.c
+++ b/drivers/crypto/caam/caamprng.c
@@ -8,6 +8,8 @@
#include <linux/completion.h>
#include <crypto/internal/rng.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include "compat.h"
#include "regs.h"
#include "intern.h"
@@ -75,6 +77,7 @@ static int caam_prng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
+ unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment());
struct caam_prng_ctx ctx;
struct device *jrdev;
dma_addr_t dst_dma;
@@ -82,7 +85,10 @@ static int caam_prng_generate(struct crypto_rng *tfm,
u8 *buf;
int ret;
- buf = kzalloc(dlen, GFP_KERNEL);
+ if (aligned_dlen < dlen)
+ return -EOVERFLOW;
+
+ buf = kzalloc(aligned_dlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -94,7 +100,7 @@ static int caam_prng_generate(struct crypto_rng *tfm,
return ret;
}
- desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
goto out1;
@@ -156,7 +162,7 @@ static int caam_prng_seed(struct crypto_rng *tfm,
return ret;
}
- desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
if (!desc) {
caam_jr_free(jrdev);
return -ENOMEM;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 1f0e82050976..50eb55da45c2 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -12,6 +12,8 @@
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kfifo.h>
#include "compat.h"
@@ -176,17 +178,18 @@ static int caam_init(struct hwrng *rng)
int err;
ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_sync)
return -ENOMEM;
ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_async)
return -ENOMEM;
- if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
- GFP_DMA | GFP_KERNEL))
+ if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE,
+ dma_get_cache_alignment()),
+ GFP_KERNEL))
return -ENOMEM;
INIT_WORK(&ctx->worker, caam_rng_worker);
@@ -224,10 +227,10 @@ int caam_rng_init(struct device *ctrldev)
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
- rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
else
- rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+ rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
if (!rng_inst)
return 0;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 32253a064d0f..bedcc2ab3a00 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,7 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*/
#include <linux/device.h>
@@ -199,7 +199,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
u32 *desc, status;
int sh_idx, ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -276,7 +276,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
+
+ /* Clear the contents before using the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
- /* Clear the contents before recreating the descriptor */
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);
@@ -395,7 +397,7 @@ start_rng:
RTMCTL_SAMP_MODE_RAW_ES_SC);
}
-static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
{
static const struct {
u16 ip_id;
@@ -421,12 +423,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
u16 ip_id;
int i;
- ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+ ccbvid = rd_reg32(&perfmon->ccb_id);
era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
if (era) /* This is '0' prior to CAAM ERA-6 */
return era;
- id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+ id_ms = rd_reg32(&perfmon->caam_id_ms);
ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
@@ -444,9 +446,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
*
- * @ctrl: controller region
+ * @perfmon: Performance Monitor Registers
*/
-static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era(struct caam_perfmon __iomem *perfmon)
{
struct device_node *caam_node;
int ret;
@@ -459,7 +461,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
if (!ret)
return prop;
else
- return caam_get_era_from_hw(ctrl);
+ return caam_get_era_from_hw(perfmon);
}
/*
@@ -626,12 +628,14 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
+ struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
bool pr_support = false;
+ bool reg_access = true;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -645,6 +649,17 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
if (imx_soc_match) {
+ /*
+ * Until Layerscape and i.MX OP-TEE get in sync,
+ * only i.MX OP-TEE use cases disallow access to
+ * caam page 0 (controller) registers.
+ */
+ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+ ctrlpriv->optee_en = !!np;
+ of_node_put(np);
+
+ reg_access = !ctrlpriv->optee_en;
+
if (!imx_soc_match->data) {
dev_err(dev, "No clock data provided for i.MX SoC");
return -EINVAL;
@@ -665,10 +680,38 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ u32 reg;
+
+ if (of_property_read_u32_index(np, "reg", 0, &reg)) {
+ dev_err(dev, "%s read reg property error\n",
+ np->full_name);
+ continue;
+ }
+
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl + reg);
+
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
+ /*
+ * Wherever possible, instead of accessing registers from the global page,
+ * use the alias registers in the first (cf. DT nodes order)
+ * job ring's page.
+ */
+ perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
- if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+ comp_params = rd_reg32(&perfmon->comp_parms_ms);
+ if (reg_access && comp_params & CTPR_MS_PS &&
+ rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
caam_ptr_sz = sizeof(u64);
else
caam_ptr_sz = sizeof(u32);
@@ -733,6 +776,9 @@ static int caam_probe(struct platform_device *pdev)
}
#endif
+ if (!reg_access)
+ goto set_dma_mask;
+
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
@@ -772,13 +818,14 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
+set_dma_mask:
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
return ret;
}
- ctrlpriv->era = caam_get_era(ctrl);
+ ctrlpriv->era = caam_get_era(perfmon);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
@@ -789,7 +836,7 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_debugfs_init(ctrlpriv, dfs_root);
+ caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -808,26 +855,16 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
return -ENOMEM;
}
- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
+ if (!reg_access)
+ goto report_live;
+
+ comp_params = rd_reg32(&perfmon->comp_parms_ls);
ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
/*
@@ -836,15 +873,21 @@ static int caam_probe(struct platform_device *pdev)
* check both here.
*/
if (ctrlpriv->era < 10) {
- rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+ rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
- (rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
+ (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
} else {
- rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+ struct version_regs __iomem *vreg;
+
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
CHA_VER_VID_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
- (rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
+ (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
/*
@@ -923,10 +966,11 @@ static int caam_probe(struct platform_device *pdev)
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
+report_live:
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
- (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+ caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
+ (u64)rd_reg32(&perfmon->caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 806bb20d2aa1..6358d3cabf57 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#include <linux/debugfs.h>
#include "compat.h"
@@ -42,16 +42,15 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
}
#endif
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
+ struct dentry *root)
{
- struct caam_perfmon *perfmon;
-
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
- perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
ctrlpriv->ctl = debugfs_create_dir("ctl", root);
@@ -78,6 +77,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
&perfmon->status, &caam_fops_u32_ro);
+ if (ctrlpriv->optee_en)
+ return;
+
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 661d768acdbf..8b5d1acd21a7 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -1,16 +1,19 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#ifndef CAAM_DEBUGFS_H
#define CAAM_DEBUGFS_H
struct dentry;
struct caam_drv_private;
+struct caam_perfmon;
#ifdef CONFIG_DEBUG_FS
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon, struct dentry *root);
#else
static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
struct dentry *root)
{}
#endif
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 62ce6421bb3f..824c94d44f94 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -163,7 +163,8 @@ static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
- if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+ /* Avoid gcc warning: memcpy with data == NULL */
+ if (!IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG) || data)
memcpy(offset, data, len);
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
index 0eca8c2fd916..020a9d8a8a07 100644
--- a/drivers/crypto/caam/dpseci-debugfs.c
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -8,7 +8,7 @@
static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
{
- struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+ struct dpaa2_caam_priv *priv = file->private;
u32 fqid, fcnt, bcnt;
int i, err;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 572cf66c887a..86ed1b91c22d 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -94,6 +94,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
+ u8 optee_en; /* Nonzero if OP-TEE f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 724fdec18bf9..96dea5304d22 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,7 +4,7 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#include <linux/of_irq.h>
@@ -72,19 +72,27 @@ static void caam_jr_crypto_engine_exit(void *data)
crypto_engine_exit(jrpriv->engine);
}
-static int caam_reset_hw_jr(struct device *dev)
+/*
+ * Put the CAAM in quiesce, ie stop
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ /* Check the current status */
+ if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
+ goto wait_quiesce_completion;
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ /* Reset the field */
+ clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
+
+ /* initiate flush / park (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
+
+wait_quiesce_completion:
while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
JRINT_ERR_HALT_INPROGRESS) && --timeout)
cpu_relax();
@@ -95,8 +103,35 @@ static int caam_reset_hw_jr(struct device *dev)
return -EIO;
}
+ return 0;
+}
+
+/*
+ * Flush the job ring, so the jobs running will be stopped, jobs queued will be
+ * invalidated and the CAAM will no longer fetch fron input ring.
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_flush(struct device *dev)
+{
+ return caam_jr_stop_processing(dev, JRCR_RESET);
+}
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+ int err;
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ err = caam_jr_flush(dev);
+ if (err)
+ return err;
+
/* initiate reset */
- timeout = 100000;
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
cpu_relax();
@@ -163,6 +198,11 @@ static int caam_jr_remove(struct platform_device *pdev)
return ret;
}
+static void caam_jr_platform_shutdown(struct platform_device *pdev)
+{
+ caam_jr_remove(pdev);
+}
+
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@@ -618,6 +658,7 @@ static struct platform_driver caam_jr_driver = {
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
+ .shutdown = caam_jr_platform_shutdown,
};
static int __init jr_driver_init(void)
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index b0e8a4939b4f..88cc4fe2a585 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -64,7 +64,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
if (local_max > max_keylen)
return -EINVAL;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index c36f27376d7e..2ad2c1035856 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -8,7 +8,13 @@
*/
#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <soc/fsl/qman.h>
#include "debugfs.h"
@@ -614,7 +620,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
struct qman_fq *fq;
int ret;
- fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+ fq = kzalloc(sizeof(*fq), GFP_KERNEL);
if (!fq)
return -ENOMEM;
@@ -755,8 +761,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
napi_enable(irqtask);
}
- qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
- SLAB_CACHE_DMA, NULL);
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ dma_get_cache_alignment(), 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 5894f16f8fe3..a96e3d213c06 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -9,6 +9,8 @@
#ifndef __QI_H__
#define __QI_H__
+#include <crypto/algapi.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/qman.h>
#include "compat.h"
#include "desc.h"
@@ -58,8 +60,10 @@ enum optype {
* @qidev: device pointer for CAAM/QI backend
*/
struct caam_drv_ctx {
- u32 prehdr[2];
- u32 sh_desc[MAX_SDLEN];
+ struct {
+ u32 prehdr[2];
+ u32 sh_desc[MAX_SDLEN];
+ } __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
@@ -67,7 +71,7 @@ struct caam_drv_ctx {
int cpu;
enum optype op_type;
struct device *qidev;
-} ____cacheline_aligned;
+};
/**
* caam_drv_req - The request structure the driver application should fill while
@@ -88,7 +92,7 @@ struct caam_drv_req {
struct caam_drv_ctx *drv_ctx;
caam_qi_cbk cbk;
void *app_ctx;
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
/**
* caam_drv_ctx_init - Initialise a CAAM/QI driver context
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 9eca0c302186..ee476c6c7f82 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -28,7 +28,7 @@ static void cvm_callback(u32 status, void *arg)
{
struct crypto_async_request *req = (struct crypto_async_request *)arg;
- req->complete(req, !status);
+ crypto_request_complete(req, !status);
}
static inline void update_input_iv(struct cpt_request_info *req_info,
@@ -232,13 +232,12 @@ static int cvm_decrypt(struct skcipher_request *req)
static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
const u8 *key1 = key;
const u8 *key2 = key + (keylen / 2);
- err = xts_check_key(tfm, key, keylen);
+ err = xts_verify_key(cipher, key, keylen);
if (err)
return err;
ctx->key_len = keylen;
@@ -289,8 +288,7 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
ctx->cipher_type = cipher_type;
if (!cvm_validate_keylen(ctx, keylen)) {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 0653484df23f..b0e53034164a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -199,7 +199,7 @@ static void nitrox_aead_callback(void *arg, int err)
err = -EINVAL;
}
- areq->base.complete(&areq->base, err);
+ aead_request_complete(areq, err);
}
static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen)
@@ -434,7 +434,7 @@ static void nitrox_rfc4106_callback(void *arg, int err)
err = -EINVAL;
}
- areq->base.complete(&areq->base, err);
+ aead_request_complete(areq, err);
}
static int nitrox_rfc4106_enc(struct aead_request *areq)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 432a61aca0c5..65114f766e7d 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/list.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 248b4fff1c72..138261dcd032 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -337,12 +337,11 @@ static int nitrox_3des_decrypt(struct skcipher_request *skreq)
static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
struct flexi_crypto_context *fctx;
int aes_keylen, ret;
- ret = xts_check_key(tfm, key, keylen);
+ ret = xts_verify_key(cipher, key, keylen);
if (ret)
return ret;
@@ -362,8 +361,7 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
struct flexi_crypto_context *fctx;
int aes_keylen;
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index db362fe472ea..f6196495e862 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -10,7 +10,8 @@ ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
- tee-dev.o
+ tee-dev.o \
+ platform-access.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 73442a382f68..ecd58b38c46e 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -146,7 +146,7 @@ static void ccp_crypto_complete(void *data, int err)
/* Only propagate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
}
return;
@@ -159,18 +159,18 @@ static void ccp_crypto_complete(void *data, int err)
held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
/* Transition the state from -EBUSY to -EINPROGRESS first */
if (crypto_cmd->ret == -EBUSY)
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
/* Completion callbacks */
ret = err;
if (ctx->complete)
ret = ctx->complete(req, ret);
- req->complete(req, ret);
+ crypto_request_complete(req, ret);
/* Submit the next cmd */
while (held) {
@@ -186,12 +186,12 @@ static void ccp_crypto_complete(void *data, int err)
ctx = crypto_tfm_ctx_dma(held->req->tfm);
if (ctx->complete)
ret = ctx->complete(held->req, ret);
- held->req->complete(held->req, ret);
+ crypto_request_complete(held->req, ret);
next = ccp_crypto_cmd_complete(held, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
kfree(held);
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 9f753cb4f5f1..b386a7063818 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
- if (dma_chan->client_count)
- dma_release_channel(dma_chan);
-
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
+static void ccp_dma_release_channels(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- ccp_dma_release(ccp);
+ ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
new file mode 100644
index 000000000000..939c924fc383
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ *
+ * Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c
+ * developed by Jan Dabros <jsd@semihalf.com> and Copyright (C) 2022 Google Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "platform-access.h"
+
+#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define DOORBELL_CMDRESP_STS GENMASK(7, 0)
+
+/* Recovery field should be equal 0 to start sending commands */
+static int check_recovery(u32 __iomem *cmd)
+{
+ return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd));
+}
+
+static int wait_cmd(u32 __iomem *cmd)
+{
+ u32 tmp, expected;
+
+ /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
+ expected = FIELD_PREP(PSP_CMDRESP_RESP, 1);
+
+ /*
+ * Check for readiness of PSP mailbox in a tight loop in order to
+ * process further as soon as command was consumed.
+ */
+ return readl_poll_timeout(cmd, tmp, (tmp & expected), 0,
+ PSP_CMD_TIMEOUT_US);
+}
+
+int psp_check_platform_access_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_platform_access_status);
+
+int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
+ struct psp_request *req)
+{
+ struct psp_device *psp = psp_get_master_device();
+ u32 __iomem *cmd, *lo, *hi;
+ struct psp_platform_access_device *pa_dev;
+ phys_addr_t req_addr;
+ u32 cmd_reg;
+ int ret;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+ cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
+ lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
+ hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
+
+ mutex_lock(&pa_dev->mailbox_mutex);
+
+ if (check_recovery(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is in recovery\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (wait_cmd(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is not done processing command\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * Fill mailbox with address of command-response buffer, which will be
+ * used for sending i2c requests as well as reading status returned by
+ * PSP. Use physical address of buffer, since PSP will map this region.
+ */
+ req_addr = __psp_pa(req);
+ iowrite32(lower_32_bits(req_addr), lo);
+ iowrite32(upper_32_bits(req_addr), hi);
+
+ print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ /* Write command register to trigger processing */
+ cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg);
+ iowrite32(cmd_reg, cmd);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* Ensure it was triggered by this driver */
+ if (ioread32(lo) != lower_32_bits(req_addr) ||
+ ioread32(hi) != upper_32_bits(req_addr)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Store the status in request header for caller to investigate */
+ cmd_reg = ioread32(cmd);
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (req->header.status) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&pa_dev->mailbox_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_send_platform_access_msg);
+
+int psp_ring_platform_doorbell(int msg, u32 *result)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_platform_access_device *pa_dev;
+ u32 __iomem *button, *cmd;
+ int ret, val;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+ button = psp->io_regs + pa_dev->vdata->doorbell_button_reg;
+ cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg;
+
+ mutex_lock(&pa_dev->doorbell_mutex);
+
+ if (wait_cmd(cmd)) {
+ dev_err(psp->dev, "doorbell command not done processing\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd);
+ iowrite32(PSP_DRBL_RING, button);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd));
+ if (val) {
+ if (result)
+ *result = val;
+ ret = -EIO;
+ goto unlock;
+ }
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pa_dev->doorbell_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell);
+
+void platform_access_dev_destroy(struct psp_device *psp)
+{
+ struct psp_platform_access_device *pa_dev = psp->platform_access_data;
+
+ if (!pa_dev)
+ return;
+
+ mutex_destroy(&pa_dev->mailbox_mutex);
+ mutex_destroy(&pa_dev->doorbell_mutex);
+ psp->platform_access_data = NULL;
+}
+
+int platform_access_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_platform_access_device *pa_dev;
+
+ pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL);
+ if (!pa_dev)
+ return -ENOMEM;
+
+ psp->platform_access_data = pa_dev;
+ pa_dev->psp = psp;
+ pa_dev->dev = dev;
+
+ pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access;
+
+ mutex_init(&pa_dev->mailbox_mutex);
+ mutex_init(&pa_dev->doorbell_mutex);
+
+ dev_dbg(dev, "platform access enabled\n");
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/platform-access.h b/drivers/crypto/ccp/platform-access.h
new file mode 100644
index 000000000000..a83f03beb869
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_PLATFORM_ACCESS_H__
+#define __PSP_PLATFORM_ACCESS_H__
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_platform_access_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct platform_access_vdata *vdata;
+
+ struct mutex mailbox_mutex;
+ struct mutex doorbell_mutex;
+
+ void *platform_access_data;
+};
+
+void platform_access_dev_destroy(struct psp_device *psp);
+int platform_access_dev_init(struct psp_device *psp);
+
+#endif /* __PSP_PLATFORM_ACCESS_H__ */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index c9c741ac8442..e3d6955d3265 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -14,6 +14,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "platform-access.h"
struct psp_device *psp_master;
@@ -42,18 +43,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+ /* Clear the interrupt status by writing the same value we read. */
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+
/* invoke subdevice interrupt handlers */
if (status) {
if (psp->sev_irq_handler)
psp->sev_irq_handler(irq, psp->sev_irq_data, status);
-
- if (psp->tee_irq_handler)
- psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
- /* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
-
return IRQ_HANDLED;
}
@@ -105,6 +103,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static void psp_init_platform_access(struct psp_device *psp)
+{
+ int ret;
+
+ ret = platform_access_dev_init(psp);
+ if (ret) {
+ dev_warn(psp->dev, "platform access init failed: %d\n", ret);
+ return;
+ }
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -121,6 +130,9 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (psp->vdata->platform_access)
+ psp_init_platform_access(psp);
+
return 0;
}
@@ -201,6 +213,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ platform_access_dev_destroy(psp);
+
sp_free_psp_irq(sp, psp);
if (sp->clear_psp_master_device)
@@ -219,18 +233,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp)
psp_set_sev_irq_handler(psp, NULL, NULL);
}
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data)
-{
- psp->tee_irq_data = data;
- psp->tee_irq_handler = handler;
-}
-
-void psp_clear_tee_irq_handler(struct psp_device *psp)
-{
- psp_set_tee_irq_handler(psp, NULL, NULL);
-}
-
struct psp_device *psp_get_master_device(void)
{
struct sp_device *sp = sp_get_psp_master_device();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index d528eb04c3ef..505e4bdeaca8 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -17,9 +17,6 @@
#include "sp-dev.h"
-#define PSP_CMDRESP_RESP BIT(31)
-#define PSP_CMDRESP_ERR_MASK 0xffff
-
#define MAX_PSP_NAME_LEN 16
extern struct psp_device *psp_master;
@@ -40,11 +37,9 @@ struct psp_device {
psp_irq_handler_t sev_irq_handler;
void *sev_irq_data;
- psp_irq_handler_t tee_irq_handler;
- void *tee_irq_data;
-
void *sev_data;
void *tee_data;
+ void *platform_access_data;
unsigned int capability;
};
@@ -53,10 +48,6 @@ void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data);
void psp_clear_sev_irq_handler(struct psp_device *psp);
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data);
-void psp_clear_tee_irq_handler(struct psp_device *psp);
-
struct psp_device *psp_get_master_device(void);
#define PSP_CAPABILITY_SEV BIT(0)
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 06fc7156c04f..f97166fba9d9 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -7,6 +7,7 @@
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -24,8 +25,10 @@
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
+#include <linux/psp.h>
#include <asm/smp.h>
+#include <asm/cacheflush.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -56,6 +59,7 @@ MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on m
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
@@ -100,7 +104,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status)
/* Check if it is SEV command completion: */
reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
+ if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
sev->int_rcvd = 1;
wake_up(&sev->int_queue);
}
@@ -344,9 +348,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
- reg = cmd;
- reg <<= SEV_CMDRESP_CMD_SHIFT;
- reg |= SEV_CMDRESP_IOC;
+ reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@@ -364,11 +366,11 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
psp_timeout = psp_cmd_timeout;
if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+ *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
+ cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);
@@ -440,12 +442,19 @@ static int __sev_init_ex_locked(int *error)
return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
}
+static inline int __sev_do_init_locked(int *psp_ret)
+{
+ if (sev_init_ex_buffer)
+ return __sev_init_ex_locked(psp_ret);
+ else
+ return __sev_init_locked(psp_ret);
+}
+
static int __sev_platform_init_locked(int *error)
{
+ int rc = 0, psp_ret = SEV_RET_NO_FW_CALL;
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc = 0, psp_ret = -1;
- int (*init_function)(int *error);
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -456,15 +465,12 @@ static int __sev_platform_init_locked(int *error)
return 0;
if (sev_init_ex_buffer) {
- init_function = __sev_init_ex_locked;
rc = sev_read_init_ex_file();
if (rc)
return rc;
- } else {
- init_function = __sev_init_locked;
}
- rc = init_function(&psp_ret);
+ rc = __sev_do_init_locked(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
* Initialization command returned an integrity check failure
@@ -473,9 +479,11 @@ static int __sev_platform_init_locked(int *error)
* initialization function should succeed by replacing the state
* with a reset state.
*/
- dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
- rc = init_function(&psp_ret);
+ dev_err(sev->dev,
+"SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
+ rc = __sev_do_init_locked(&psp_ret);
}
+
if (error)
*error = psp_ret;
@@ -881,7 +889,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
if (input.address && input.length) {
- id_blob = kzalloc(input.length, GFP_KERNEL);
+ /*
+ * The length of the ID shouldn't be assumed by software since
+ * it may change in the future. The allocation size is limited
+ * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator.
+ * If the allocation fails, simply return ENOMEM rather than
+ * warning in the kernel log.
+ */
+ id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
if (!id_blob)
return -ENOMEM;
@@ -1327,7 +1342,10 @@ void sev_pci_init(void)
/* Obtain the TMR memory area for SEV-ES use */
sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
- if (!sev_es_tmr)
+ if (sev_es_tmr)
+ /* Must flush the cache before giving it to the firmware */
+ clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
+ else
dev_warn(sev->dev,
"SEV: TMR allocation failed, SEV-ES support unavailable\n");
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 666c21eb81ab..778c95155e74 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -25,8 +25,8 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
+#define SEV_CMDRESP_CMD GENMASK(26, 16)
#define SEV_CMD_COMPLETE BIT(1)
-#define SEV_CMDRESP_CMD_SHIFT 16
#define SEV_CMDRESP_IOC BIT(0)
struct sev_misc_dev {
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 20377e67f65d..1253a0217985 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -53,9 +53,19 @@ struct tee_vdata {
const unsigned int ring_rptr_reg;
};
+struct platform_access_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int doorbell_button_reg;
+ const unsigned int doorbell_cmd_reg;
+
+};
+
struct psp_vdata {
const struct sev_vdata *sev;
const struct tee_vdata *tee;
+ const struct platform_access_vdata *platform_access;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 084d052fddcc..aa15bc4cac2b 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -342,52 +342,61 @@ static int __maybe_unused sp_pci_resume(struct device *dev)
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
static const struct sev_vdata sevv1 = {
- .cmdresp_reg = 0x10580,
- .cmdbuff_addr_lo_reg = 0x105e0,
- .cmdbuff_addr_hi_reg = 0x105e4,
+ .cmdresp_reg = 0x10580, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */
};
static const struct sev_vdata sevv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .cmdresp_reg = 0x10980, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */
};
static const struct tee_vdata teev1 = {
- .cmdresp_reg = 0x10544,
- .cmdbuff_addr_lo_reg = 0x10548,
- .cmdbuff_addr_hi_reg = 0x1054c,
- .ring_wptr_reg = 0x10550,
- .ring_rptr_reg = 0x10554,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
+ .ring_wptr_reg = 0x10550, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10554, /* C2PMSG_21 */
+};
+
+static const struct platform_access_vdata pa_v1 = {
+ .cmdresp_reg = 0x10570, /* C2PMSG_28 */
+ .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
+ .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
};
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
- .feature_reg = 0x105fc,
- .inten_reg = 0x10610,
- .intsts_reg = 0x10614,
+ .feature_reg = 0x105fc, /* C2PMSG_63 */
+ .inten_reg = 0x10610, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv3 = {
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .platform_access = &pa_v1,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
#endif
@@ -451,9 +460,9 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
- { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5c9d47f3be37..5560bf8329a1 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -8,12 +8,13 @@
* Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/gfp.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include <linux/psp-tee.h>
#include "psp-dev.h"
@@ -69,7 +70,7 @@ static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
while (--nloop) {
*reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
- if (*reg & PSP_CMDRESP_RESP)
+ if (FIELD_GET(PSP_CMDRESP_RESP, *reg))
return 0;
usleep_range(10000, 10100);
@@ -149,9 +150,9 @@ static int tee_init_ring(struct psp_tee_device *tee)
goto free_buf;
}
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
tee_free_ring(tee);
ret = -EIO;
}
@@ -179,9 +180,9 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
if (ret) {
dev_err(tee->dev, "tee: ring destroy command timed out\n");
- } else if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
}
free_ring:
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 309da6334a0a..2cd44d7457a4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -460,7 +460,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
}
if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
- xts_check_key(tfm, key, keylen)) {
+ xts_verify_key(sktfm, key, keylen)) {
dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index d489c6f80892..c57f929805d5 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -350,9 +350,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Get device resources */
/* First CC registers space */
- req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
- new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev,
+ 0, &req_mem_cc_regs);
if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 68d65773ef2b..0eade4fa6695 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -220,7 +220,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req,
reqctx->verify = VERIFY_HW;
}
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ aead_request_complete(req, err);
return err;
}
@@ -1235,7 +1235,7 @@ complete:
complete(&ctx->cbc_aes_aio_done);
}
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
return err;
}
@@ -2132,7 +2132,7 @@ unmap:
out:
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
/*
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 7e7a8f01ea6b..8e4a49b7ab4f 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -879,7 +879,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
static void hifn_init_dma(struct hifn_device *dev)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
u32 dptr = dev->desc_dma;
int i;
@@ -1072,7 +1072,7 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
u8 *buf, unsigned dlen, unsigned slen,
u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
struct hifn_crypt_command *cry_cmd;
u8 *buf_pos = buf;
u16 cmd_len;
@@ -1113,7 +1113,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, struct hifn_request_context *rctx,
void *priv, unsigned int nbytes)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
@@ -1231,7 +1231,7 @@ err_out:
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size, int last)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@@ -1264,7 +1264,7 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
static void hifn_setup_res_desc(struct hifn_device *dev)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
@@ -1290,7 +1290,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev)
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size, int last)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@@ -1705,12 +1705,12 @@ static void hifn_process_ready(struct skcipher_request *req, int error)
hifn_cipher_walk_exit(&rctx->walk);
}
- req->base.complete(&req->base, error);
+ skcipher_request_complete(req, error);
}
static void hifn_clear_rings(struct hifn_device *dev, int error)
{
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int i, u;
dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
@@ -1784,7 +1784,7 @@ static void hifn_work(struct work_struct *work)
spin_lock_irqsave(&dev->lock, flags);
if (dev->active == 0) {
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
dev->flags &= ~HIFN_FLAG_CMD_BUSY;
@@ -1815,7 +1815,7 @@ static void hifn_work(struct work_struct *work)
if (reset) {
if (++dev->reset >= 5) {
int i;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
dev_info(&dev->pdev->dev,
"r: %08x, active: %d, started: %d, "
@@ -1848,8 +1848,8 @@ static void hifn_work(struct work_struct *work)
static irqreturn_t hifn_interrupt(int irq, void *data)
{
- struct hifn_device *dev = (struct hifn_device *)data;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_device *dev = data;
+ struct hifn_dma *dma = dev->desc_virt;
u32 dmacsr, restart;
dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
@@ -1914,7 +1914,7 @@ static void hifn_flush(struct hifn_device *dev)
unsigned long flags;
struct crypto_async_request *async_req;
struct skcipher_request *req;
- struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+ struct hifn_dma *dma = dev->desc_virt;
int i;
for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
@@ -2054,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev)
break;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = skcipher_request_cast(async_req);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 743ce4fc3158..e8690c223584 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_SM4_GENERIC
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
@@ -42,7 +42,7 @@ config CRYPTO_DEV_HISI_SEC2
config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 || COMPILE_TEST
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ACPI
help
@@ -51,7 +51,7 @@ config CRYPTO_DEV_HISI_QM
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HiSilicon ZIP accelerator"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
@@ -62,7 +62,7 @@ config CRYPTO_DEV_HISI_ZIP
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE accelerator"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
@@ -82,3 +82,10 @@ config CRYPTO_DEV_HISI_TRNG
select CRYPTO_RNG
help
Support for HiSilicon TRNG Driver.
+
+config CRYPTO_DEV_HISTB_TRNG
+ tristate "Support for HiSTB TRNG Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select HW_RANDOM
+ help
+ Support for HiSTB TRNG Driver.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 8595a5a5d228..fc51e0edec69 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
-obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
+obj-y += trng/
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 923f9c279265..5d0adfb54a34 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 007ac7a69ce7..ad0c042b5e66 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <asm/page.h>
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
@@ -95,8 +94,6 @@
#define QM_VFT_CFG_RDY 0x10006c
#define QM_VFT_CFG_OP_WR 0x100058
#define QM_VFT_CFG_TYPE 0x10005c
-#define QM_SQC_VFT 0x0
-#define QM_CQC_VFT 0x1
#define QM_VFT_CFG 0x100060
#define QM_VFT_CFG_OP_ENABLE 0x100054
#define QM_PM_CTRL 0x100148
@@ -118,7 +115,7 @@
#define QM_SQC_VFT_BASE_SHIFT_V2 28
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
-#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
+#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -164,7 +161,6 @@
/* interfunction communication */
#define QM_IFC_READY_STATUS 0x100128
-#define QM_IFC_C_STS_M 0x10012C
#define QM_IFC_INT_SET_P 0x100130
#define QM_IFC_INT_CFG 0x100134
#define QM_IFC_INT_SOURCE_P 0x100138
@@ -198,7 +194,6 @@
#define PCI_BAR_2 2
#define PCI_BAR_4 4
-#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
#define QM_DBG_READ_LEN 256
@@ -212,8 +207,6 @@
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
#define QM_QOS_PARAM_NUM 2
-#define QM_QOS_VAL_NUM 1
-#define QM_QOS_BDF_PARAM_NUM 4
#define QM_QOS_MAX_VAL 1000
#define QM_QOS_RATE 100
#define QM_QOS_EXPAND_RATE 1000
@@ -225,38 +218,34 @@
#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
#define QM_SHAPER_CBS_B 1
-#define QM_SHAPER_CBS_S 16
#define QM_SHAPER_VFT_OFFSET 6
-#define WAIT_FOR_QOS_VF 100
#define QM_QOS_MIN_ERROR_RATE 5
-#define QM_QOS_TYPICAL_NUM 8
#define QM_SHAPER_MIN_CBS_S 8
#define QM_QOS_TICK 0x300U
#define QM_QOS_DIVISOR_CLK 0x1f40U
#define QM_QOS_MAX_CIR_B 200
#define QM_QOS_MIN_CIR_B 100
#define QM_QOS_MAX_CIR_U 6
-#define QM_QOS_MAX_CIR_S 11
#define QM_AUTOSUSPEND_DELAY 3000
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
- (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
- ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
- ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
+ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
+ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
+ ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
- (((priority) << QM_SQ_PRIORITY_SHIFT) | \
- ((orders) << QM_SQ_ORDERS_SHIFT) | \
+ (((priority) << QM_SQ_PRIORITY_SHIFT) | \
+ ((orders) << QM_SQ_ORDERS_SHIFT) | \
(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
- (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
- ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
- ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
+ (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
+ ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
+ ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
@@ -367,6 +356,16 @@ struct hisi_qm_resource {
struct list_head list;
};
+/**
+ * struct qm_hw_err - Structure describing the device errors
+ * @list: hardware error list
+ * @timestamp: timestamp when the error occurred
+ */
+struct qm_hw_err {
+ struct list_head list;
+ unsigned long long timestamp;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -706,7 +705,7 @@ static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
- ((u64)index << QM_DB_INDEX_SHIFT_V2) |
+ ((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
writeq(doorbell, io_base);
@@ -905,7 +904,7 @@ static void qm_work_process(struct work_struct *work)
}
}
-static bool do_qm_irq(struct hisi_qm *qm)
+static bool do_qm_eq_irq(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data;
@@ -925,12 +924,12 @@ static bool do_qm_irq(struct hisi_qm *qm)
return false;
}
-static irqreturn_t qm_irq(int irq, void *data)
+static irqreturn_t qm_eq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
bool ret;
- ret = do_qm_irq(qm);
+ ret = do_qm_eq_irq(qm);
if (ret)
return IRQ_HANDLED;
@@ -1304,7 +1303,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
- *number = (QM_SQC_VFT_NUM_MASK_v2 &
+ *number = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
return 0;
@@ -1892,8 +1891,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
* @qm: The qm we create a qp from.
* @alg_type: Accelerator specific algorithm type in sqc.
*
- * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
- * qp memory fails.
+ * Return created qp, negative error code if failed.
*/
static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{
@@ -2062,7 +2060,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
* @arg: Accelerator specific argument.
*
* After this function, qp can receive request from user. Return 0 if
- * successful, Return -EBUSY if failed.
+ * successful, negative error code if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
@@ -2363,7 +2361,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
return -EINVAL;
}
- vma->vm_flags |= VM_IO;
+ vm_flags_set(vma, VM_IO);
return remap_pfn_range(vma, vma->vm_start,
phys_base >> PAGE_SHIFT,
@@ -2469,6 +2467,113 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
return -EINVAL;
}
+/**
+ * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
+ * according to user's configuration of error threshold.
+ * @qm: the uacce device
+ */
+static int qm_hw_err_isolate(struct hisi_qm *qm)
+{
+ struct qm_hw_err *err, *tmp, *hw_err;
+ struct qm_err_isolate *isolate;
+ u32 count = 0;
+
+ isolate = &qm->isolate_data;
+
+#define SECONDS_PER_HOUR 3600
+
+ /* All the hw errs are processed by PF driver */
+ if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
+ return 0;
+
+ hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL);
+ if (!hw_err)
+ return -ENOMEM;
+
+ /*
+ * Time-stamp every slot AER error. Then check the AER error log when the
+ * next device AER error occurred. if the device slot AER error count exceeds
+ * the setting error threshold in one hour, the isolated state will be set
+ * to true. And the AER error logs that exceed one hour will be cleared.
+ */
+ mutex_lock(&isolate->isolate_lock);
+ hw_err->timestamp = jiffies;
+ list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) {
+ if ((hw_err->timestamp - err->timestamp) / HZ >
+ SECONDS_PER_HOUR) {
+ list_del(&err->list);
+ kfree(err);
+ } else {
+ count++;
+ }
+ }
+ list_add(&hw_err->list, &isolate->qm_hw_errs);
+ mutex_unlock(&isolate->isolate_lock);
+
+ if (count >= isolate->err_threshold)
+ isolate->is_isolate = true;
+
+ return 0;
+}
+
+static void qm_hw_err_destroy(struct hisi_qm *qm)
+{
+ struct qm_hw_err *err, *tmp;
+
+ mutex_lock(&qm->isolate_data.isolate_lock);
+ list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+}
+
+static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qm *pf_qm;
+
+ if (uacce->is_vf)
+ pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ else
+ pf_qm = qm;
+
+ return pf_qm->isolate_data.is_isolate ?
+ UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL;
+}
+
+static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
+{
+ struct hisi_qm *qm = uacce->priv;
+
+ /* Must be set by PF */
+ if (uacce->is_vf)
+ return -EPERM;
+
+ if (qm->isolate_data.is_isolate)
+ return -EPERM;
+
+ qm->isolate_data.err_threshold = num;
+
+ /* After the policy is updated, need to reset the hardware err list */
+ qm_hw_err_destroy(qm);
+
+ return 0;
+}
+
+static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qm *pf_qm;
+
+ if (uacce->is_vf) {
+ pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ return pf_qm->isolate_data.err_threshold;
+ }
+
+ return qm->isolate_data.err_threshold;
+}
+
static const struct uacce_ops uacce_qm_ops = {
.get_available_instances = hisi_qm_get_available_instances,
.get_queue = hisi_qm_uacce_get_queue,
@@ -2478,8 +2583,22 @@ static const struct uacce_ops uacce_qm_ops = {
.mmap = hisi_qm_uacce_mmap,
.ioctl = hisi_qm_uacce_ioctl,
.is_q_updated = hisi_qm_is_q_updated,
+ .get_isolate_state = hisi_qm_get_isolate_state,
+ .isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
+ .isolate_err_threshold_read = hisi_qm_isolate_threshold_read,
};
+static void qm_remove_uacce(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ if (qm->use_sva) {
+ qm_hw_err_destroy(qm);
+ uacce_remove(uacce);
+ qm->uacce = NULL;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2506,8 +2625,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
qm->use_sva = true;
} else {
/* only consider sva case */
- uacce_remove(uacce);
- qm->uacce = NULL;
+ qm_remove_uacce(qm);
return -EINVAL;
}
@@ -2540,6 +2658,8 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
+ mutex_init(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -3074,7 +3194,6 @@ static int qm_stop_started_qp(struct hisi_qm *qm)
return 0;
}
-
/**
* qm_clear_queues() - Clear all queues memory in a qm.
* @qm: The qm in which the queues will be cleared.
@@ -3371,7 +3490,7 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
act_q_num = q_num;
}
- act_q_num = min_t(int, act_q_num, max_qp_num);
+ act_q_num = min(act_q_num, max_qp_num);
ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
if (ret) {
for (j = num_vfs; j > i; j--)
@@ -3558,7 +3677,7 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
qos_val = ir / QM_QOS_RATE;
ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
- ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
+ ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
err_get_status:
clear_bit(QM_RESETTING, &qm->misc_ctl);
@@ -3571,7 +3690,7 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
unsigned long *val,
unsigned int *fun_index)
{
- struct bus_type *bus_type = qm->pdev->dev.bus;
+ const struct bus_type *bus_type = qm->pdev->dev.bus;
char tbuf_bdf[QM_DBG_READ_LEN] = {0};
char val_buf[QM_DBG_READ_LEN] = {0};
struct pci_dev *pdev;
@@ -4029,6 +4148,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
+ if (qm->use_sva) {
+ ret = qm_hw_err_isolate(qm);
+ if (ret)
+ pci_err(pdev, "failed to isolate hw err!\n");
+ }
+
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to stop by vfs in soft reset!\n");
@@ -4049,13 +4174,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
-
qm->err_ini->close_axi_master_ooo(qm);
-
} else if (qm->err_status.is_dev_ecc_mbit &&
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
-
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
qm->io_base + QM_RAS_NFE_ENABLE);
@@ -4336,21 +4458,25 @@ static int qm_controller_reset(struct hisi_qm *qm)
qm->err_ini->show_last_dfx_regs(qm);
ret = qm_soft_reset(qm);
- if (ret) {
- pci_err(pdev, "Controller reset failed (%d)\n", ret);
- qm_reset_bit_clear(qm);
- return ret;
- }
+ if (ret)
+ goto err_reset;
ret = qm_controller_reset_done(qm);
- if (ret) {
- qm_reset_bit_clear(qm);
- return ret;
- }
+ if (ret)
+ goto err_reset;
pci_info(pdev, "Controller reset complete\n");
return 0;
+
+err_reset:
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ qm_reset_bit_clear(qm);
+
+ /* if resetting fails, isolate the device */
+ if (qm->use_sva)
+ qm->isolate_data.is_isolate = true;
+ return ret;
}
/**
@@ -4499,7 +4625,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-
/**
* hisi_qm_dev_shutdown() - Shutdown device.
* @pdev: The device will be shutdown.
@@ -4903,7 +5028,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
return 0;
irq_vector = val & QM_IRQ_VECTOR_MASK;
- ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
if (ret)
dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
@@ -5271,10 +5396,7 @@ int hisi_qm_init(struct hisi_qm *qm)
err_free_qm_memory:
hisi_qm_memory_uninit(qm);
err_alloc_uacce:
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
+ qm_remove_uacce(qm);
err_irq_register:
qm_irqs_unregister(qm);
err_pci_init:
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 490e1542305e..1189effcdad0 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -504,8 +504,8 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
kfifo_avail(&ctx->queue->softqueue) >
backlog_req->num_elements)) {
sec_send_request(backlog_req, ctx->queue);
- backlog_req->req_base->complete(backlog_req->req_base,
- -EINPROGRESS);
+ crypto_request_complete(backlog_req->req_base,
+ -EINPROGRESS);
list_del(&backlog_req->backlog_head);
}
}
@@ -534,7 +534,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
if (skreq->src != skreq->dst)
dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
DMA_BIDIRECTIONAL);
- skreq->base.complete(&skreq->base, sec_req->err);
+ skcipher_request_complete(skreq, sec_req->err);
}
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index f5bfc9755a4a..074e50ef512c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1459,12 +1459,11 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
break;
backlog_sk_req = backlog_req->c_req.sk_req;
- backlog_sk_req->base.complete(&backlog_sk_req->base,
- -EINPROGRESS);
+ skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
}
- sk_req->base.complete(&sk_req->base, err);
+ skcipher_request_complete(sk_req, err);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
@@ -1736,12 +1735,11 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
break;
backlog_aead_req = backlog_req->aead_req.aead_req;
- backlog_aead_req->base.complete(&backlog_aead_req->base,
- -EINPROGRESS);
+ aead_request_complete(backlog_aead_req, -EINPROGRESS);
atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
}
- a_req->base.complete(&a_req->base, err);
+ aead_request_complete(a_req, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 93572c0d4faa..77f9f131b850 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 2b6f2281cfd6..3df7a256e919 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
HISI_ACC_SGL_ALIGN_SIZE);
/*
- * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/
- block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
- PAGE_SHIFT + MAX_ORDER - 1 : 31);
+ block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ?
+ PAGE_SHIFT + MAX_ORDER : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
@@ -124,9 +124,8 @@ err_free_mem:
for (j = 0; j < i; j++) {
dma_free_coherent(dev, block_size, block[j].sgl,
block[j].sgl_dma);
- memset(block + j, 0, sizeof(*block));
}
- kfree(pool);
+ kfree_sensitive(pool);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
@@ -250,7 +249,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
-
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
diff --git a/drivers/crypto/hisilicon/trng/Makefile b/drivers/crypto/hisilicon/trng/Makefile
index d909079f351c..cf20b057c66b 100644
--- a/drivers/crypto/hisilicon/trng/Makefile
+++ b/drivers/crypto/hisilicon/trng/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o
hisi-trng-v2-objs = trng.o
+
+obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o
+histb-trng-objs += trng-stb.o
diff --git a/drivers/crypto/hisilicon/trng/trng-stb.c b/drivers/crypto/hisilicon/trng/trng-stb.c
new file mode 100644
index 000000000000..29200a7d3d81
--- /dev/null
+++ b/drivers/crypto/hisilicon/trng/trng-stb.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Device driver for True RNG in HiSTB SoCs
+ *
+ * Copyright (c) 2023 David Yang
+ */
+
+#include <crypto/internal/rng.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#define HISTB_TRNG_CTRL 0x0
+#define RNG_SOURCE GENMASK(1, 0)
+#define DROP_ENABLE BIT(5)
+#define POST_PROCESS_ENABLE BIT(7)
+#define POST_PROCESS_DEPTH GENMASK(15, 8)
+#define HISTB_TRNG_NUMBER 0x4
+#define HISTB_TRNG_STAT 0x8
+#define DATA_COUNT GENMASK(2, 0) /* max 4 */
+
+struct histb_trng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+};
+
+/*
+ * Observed:
+ * depth = 1 -> ~1ms
+ * depth = 255 -> ~16ms
+ */
+static int histb_trng_wait(void __iomem *base)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val,
+ val & DATA_COUNT, 1000, 30 * 1000);
+}
+
+static void histb_trng_init(void __iomem *base, unsigned int depth)
+{
+ u32 val;
+
+ val = readl_relaxed(base + HISTB_TRNG_CTRL);
+
+ val &= ~RNG_SOURCE;
+ val |= 2;
+
+ val &= ~POST_PROCESS_DEPTH;
+ val |= min(depth, 0xffu) << 8;
+
+ val |= POST_PROCESS_ENABLE;
+ val |= DROP_ENABLE;
+
+ writel_relaxed(val, base + HISTB_TRNG_CTRL);
+}
+
+static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng);
+ void __iomem *base = priv->base;
+
+ for (int i = 0; i < max; i += sizeof(u32)) {
+ if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) {
+ if (!wait)
+ return i;
+ if (histb_trng_wait(base)) {
+ pr_err("failed to generate random number, generated %d\n",
+ i);
+ return i ? i : -ETIMEDOUT;
+ }
+ }
+ *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER);
+ }
+
+ return max;
+}
+
+static unsigned int histb_trng_get_depth(void __iomem *base)
+{
+ return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+}
+
+static ssize_t
+depth_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+
+ return sprintf(buf, "%d\n", histb_trng_get_depth(base));
+}
+
+static ssize_t
+depth_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+ unsigned int depth;
+
+ if (kstrtouint(buf, 0, &depth))
+ return -ERANGE;
+
+ histb_trng_init(base, depth);
+ return count;
+}
+
+static DEVICE_ATTR_RW(depth);
+
+static struct attribute *histb_trng_attrs[] = {
+ &dev_attr_depth.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(histb_trng);
+
+static int histb_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct histb_trng_priv *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return -ENOMEM;
+
+ histb_trng_init(base, 144);
+ if (histb_trng_wait(base)) {
+ dev_err(dev, "cannot bring up device\n");
+ return -ENODEV;
+ }
+
+ priv->base = base;
+ priv->rng.name = pdev->name;
+ priv->rng.read = histb_trng_read;
+ ret = devm_hwrng_register(dev, &priv->rng);
+ if (ret) {
+ dev_err(dev, "failed to register hwrng: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_set_drvdata(dev, priv);
+ return 0;
+}
+
+static const struct of_device_id histb_trng_of_match[] = {
+ { .compatible = "hisilicon,histb-trng", },
+ { }
+};
+
+static struct platform_driver histb_trng_driver = {
+ .probe = histb_trng_probe,
+ .driver = {
+ .name = "histb-trng",
+ .of_match_table = histb_trng_of_match,
+ .dev_groups = histb_trng_groups,
+ },
+};
+
+module_platform_driver(histb_trng_driver);
+
+MODULE_DESCRIPTION("HiSTB True RNG");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 1549bec3aea5..f3ce34198775 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 9629e98bd68b..359aa2b41016 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -157,9 +157,9 @@ static inline void img_hash_write(struct img_hash_dev *hdev,
writel_relaxed(value, hdev->io_base + offset);
}
-static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
+static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
{
- return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
+ return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
}
static void img_hash_start(struct img_hash_dev *hdev, bool dma)
@@ -209,7 +209,7 @@ static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
static void img_hash_dma_callback(void *data)
{
- struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+ struct img_hash_dev *hdev = data;
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->bufcnt) {
@@ -283,10 +283,10 @@ static int img_hash_finish(struct ahash_request *req)
static void img_hash_copy_hash(struct ahash_request *req)
{
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
- u32 *hash = (u32 *)ctx->digest;
+ __be32 *hash = (__be32 *)ctx->digest;
int i;
- for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
+ for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
hash[i] = img_hash_read_result_queue(ctx->hdev);
}
@@ -308,7 +308,7 @@ static void img_hash_finish_req(struct ahash_request *req, int err)
DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
if (req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static int img_hash_write_via_dma(struct img_hash_dev *hdev)
@@ -526,7 +526,7 @@ static int img_hash_handle_queue(struct img_hash_dev *hdev,
return res;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
hdev->req = req;
@@ -927,7 +927,7 @@ finish:
img_hash_finish_req(hdev->req, err);
}
-static const struct of_device_id img_hash_match[] = {
+static const struct of_device_id img_hash_match[] __maybe_unused = {
{ .compatible = "img,hash-accelerator" },
{}
};
@@ -966,8 +966,7 @@ static int img_hash_probe(struct platform_device *pdev)
}
/* Write port (DMA or CPU) */
- hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+ hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
if (IS_ERR(hdev->cpu_addr)) {
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index ae6110376e21..9ff02b5abc4a 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -474,7 +474,7 @@ release_fw:
goto retry_fw;
}
- dev_dbg(priv->dev, "Firmware load failed.\n");
+ dev_err(priv->dev, "Firmware load failed.\n");
return ret;
}
@@ -850,7 +850,7 @@ handle_req:
goto request_failed;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
/* In case the send() helper did not issue any command to push
* to the engine because the input data was cached, continue to
@@ -970,17 +970,6 @@ void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
} while (!cdesc->last_seg);
}
-void safexcel_inv_complete(struct crypto_async_request *req, int error)
-{
- struct safexcel_inv_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring)
@@ -1050,7 +1039,7 @@ handle_results:
if (should_complete) {
local_bh_disable();
- req->complete(req, ret);
+ crypto_request_complete(req, ret);
local_bh_enable();
}
@@ -1639,19 +1628,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1665,7 +1658,8 @@ static int safexcel_probe_generic(void *pdev,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}
priv->ring[i].irq = irq;
@@ -1677,8 +1671,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1695,16 +1691,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 6c2fc662f64f..47ef6c7cd02c 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -884,11 +884,6 @@ struct safexcel_alg_template {
} alg;
};
-struct safexcel_inv_result {
- struct completion completion;
- int error;
-};
-
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void *rdp);
@@ -927,7 +922,6 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req);
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
unsigned int keylen, const char *alg,
unsigned int state_sz);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 32a37e3850c5..272c28b5a088 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1091,13 +1091,12 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- struct safexcel_inv_result *result)
+ struct crypto_wait *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ring = ctx->base.ring;
-
- init_completion(&result->completion);
+ int err;
ctx = crypto_tfm_ctx(base->tfm);
ctx->base.exit_inv = true;
@@ -1110,13 +1109,13 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion(&result->completion);
+ err = crypto_wait_req(-EINPROGRESS, result);
- if (result->error) {
+ if (err) {
dev_warn(priv->dev,
"cipher: sync: invalidate: completion error %d\n",
- result->error);
- return result->error;
+ err);
+ return err;
}
return 0;
@@ -1126,12 +1125,12 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct skcipher_request));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
@@ -1141,12 +1140,12 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct aead_request));
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
aead_request_set_tfm(req, __crypto_aead_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ca46328472d4..e17577b785c3 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -625,15 +625,16 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
int ring = ctx->base.ring;
+ int err;
memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
@@ -647,12 +648,11 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion(&result.completion);
+ err = crypto_wait_req(-EINPROGRESS, &result);
- if (result.error) {
- dev_warn(priv->dev, "hash: completion error (%d)\n",
- result.error);
- return result.error;
+ if (err) {
+ dev_warn(priv->dev, "hash: completion error (%d)\n", err);
+ return err;
}
return 0;
@@ -1042,27 +1042,11 @@ static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
return safexcel_ahash_finup(areq);
}
-struct safexcel_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
-{
- struct safexcel_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int safexcel_hmac_init_pad(struct ahash_request *areq,
unsigned int blocksize, const u8 *key,
unsigned int keylen, u8 *ipad, u8 *opad)
{
- struct safexcel_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret, i;
u8 *keydup;
@@ -1075,16 +1059,12 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
return -ENOMEM;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(areq, &sg, ipad, keylen);
- init_completion(&result.completion);
ret = crypto_ahash_digest(areq);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion_interruptible(&result.completion);
- ret = result.error;
- }
+ ret = crypto_wait_req(ret, &result);
/* Avoid leaking */
kfree_sensitive(keydup);
@@ -1109,16 +1089,15 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
static int safexcel_hmac_init_iv(struct ahash_request *areq,
unsigned int blocksize, u8 *pad, void *state)
{
- struct safexcel_ahash_result result;
struct safexcel_ahash_req *req;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(areq, &sg, pad, blocksize);
- init_completion(&result.completion);
ret = crypto_ahash_init(areq);
if (ret)
@@ -1129,14 +1108,9 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- return ret;
-
- wait_for_completion_interruptible(&result.completion);
- if (result.error)
- return result.error;
+ ret = crypto_wait_req(ret, &result);
- return crypto_ahash_export(areq, state);
+ return ret ?: crypto_ahash_export(areq, state);
}
static int __safexcel_hmac_setkey(const char *alg, const u8 *key,
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
new file mode 100644
index 000000000000..3d90c87d4094
--- /dev/null
+++ b/drivers/crypto/intel/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+source "drivers/crypto/intel/keembay/Kconfig"
+source "drivers/crypto/intel/ixp4xx/Kconfig"
+source "drivers/crypto/intel/qat/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
new file mode 100644
index 000000000000..b3d0352ae188
--- /dev/null
+++ b/drivers/crypto/intel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += keembay/
+obj-y += ixp4xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/intel/ixp4xx/Kconfig b/drivers/crypto/intel/ixp4xx/Kconfig
new file mode 100644
index 000000000000..af3cc5688328
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Kconfig
@@ -0,0 +1,14 @@
+config CRYPTO_DEV_IXP4XX
+ tristate "Driver for IXP4xx crypto hardware acceleration"
+ depends on (ARCH_IXP4XX || COMPILE_TEST) && IXP4XX_QMGR && IXP4XX_NPE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
+ select CRYPTO_LIB_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ help
+ Driver for the IXP4xx NPE crypto engine.
diff --git a/drivers/crypto/intel/ixp4xx/Makefile b/drivers/crypto/intel/ixp4xx/Makefile
new file mode 100644
index 000000000000..74ebefd93046
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 984b3cc0237c..ed15379a9818 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -118,9 +118,9 @@ struct crypt_ctl {
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
- dma_addr_t icv_rev_aes; /* icv or rev aes */
- dma_addr_t src_buf;
- dma_addr_t dst_buf;
+ u32 icv_rev_aes; /* icv or rev aes */
+ u32 src_buf;
+ u32 dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
@@ -263,7 +263,9 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
- BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
+ BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
+ IS_ENABLED(CONFIG_64BIT)) &&
+ sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
@@ -382,7 +384,7 @@ static void one_packet(dma_addr_t phys)
if (req_ctx->hmac_virt)
finish_scattered_hmac(crypt);
- req->base.complete(&req->base, failed);
+ aead_request_complete(req, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
@@ -407,7 +409,7 @@ static void one_packet(dma_addr_t phys)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
- req->base.complete(&req->base, failed);
+ skcipher_request_complete(req, failed);
break;
}
case CTL_FLAG_GEN_ICV:
@@ -1169,10 +1171,11 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
if (unlikely(lastlen < authsize)) {
+ dma_addr_t dma;
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
- req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
- &crypt->icv_rev_aes);
+ req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
+ crypt->icv_rev_aes = dma;
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
if (!encrypt) {
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/intel/keembay/Kconfig
index 1cd62f9c3e3a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/intel/keembay/Kconfig
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/intel/keembay/Makefile
index 7c12c3c138bd..7c12c3c138bd 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/intel/keembay/Makefile
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9953f5590ac4..ae31be00357a 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1580,8 +1580,6 @@ static int kmb_ocs_aes_remove(struct platform_device *pdev)
struct ocs_aes_dev *aes_dev;
aes_dev = platform_get_drvdata(pdev);
- if (!aes_dev)
- return -ENODEV;
unregister_aes_algs(aes_dev);
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 2269df17514c..2269df17514c 100644
--- a/drivers/crypto/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index d4bcbed1f546..d4bcbed1f546 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
diff --git a/drivers/crypto/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..be9f32fc8f42 100644
--- a/drivers/crypto/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
diff --git a/drivers/crypto/keembay/ocs-aes.h b/drivers/crypto/intel/keembay/ocs-aes.h
index c035fc48b7ed..c035fc48b7ed 100644
--- a/drivers/crypto/keembay/ocs-aes.h
+++ b/drivers/crypto/intel/keembay/ocs-aes.h
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/intel/keembay/ocs-hcu.c
index deb9bd460ee6..deb9bd460ee6 100644
--- a/drivers/crypto/keembay/ocs-hcu.c
+++ b/drivers/crypto/intel/keembay/ocs-hcu.c
diff --git a/drivers/crypto/keembay/ocs-hcu.h b/drivers/crypto/intel/keembay/ocs-hcu.h
index fbbbb92a0592..fbbbb92a0592 100644
--- a/drivers/crypto/keembay/ocs-hcu.h
+++ b/drivers/crypto/intel/keembay/ocs-hcu.h
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 1220cc86f910..1220cc86f910 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 258c8a626ce0..258c8a626ce0 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
diff --git a/drivers/crypto/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index ff9c8b5897ea..ff9c8b5897ea 100644
--- a/drivers/crypto/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 834a705180c0..7324b86a4f40 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -28,13 +28,31 @@ static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
{0x100, ADF_4XXX_ADMIN_OBJ},
};
+static struct adf_fw_config adf_402xx_fw_cy_config[] = {
+ {0xF0, ADF_402XX_SYM_OBJ},
+ {0xF, ADF_402XX_ASYM_OBJ},
+ {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_dc_config[] = {
+ {0xF0, ADF_402XX_DC_OBJ},
+ {0xF, ADF_402XX_DC_OBJ},
+ {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
+static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
};
+static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x0
+};
+
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
@@ -206,9 +224,16 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_1;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- return thrd_to_arb_map;
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return thrd_to_arb_map_cy;
+ case SVC_DC:
+ return thrd_to_arb_map_dc;
+ }
+
+ return NULL;
}
static void get_arb_info(struct arb_info *arb_info)
@@ -286,7 +311,7 @@ static u32 uof_get_num_objs(void)
return ARRAY_SIZE(adf_4xxx_fw_cy_config);
}
-static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
@@ -298,6 +323,18 @@ static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
return NULL;
}
+static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_402xx_fw_cy_config[obj_num].obj_name;
+ case SVC_DC:
+ return adf_402xx_fw_dc_config[obj_num].obj_name;
+ }
+
+ return NULL;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
@@ -310,7 +347,7 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
return 0;
}
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
@@ -337,8 +374,6 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->get_admin_info = get_admin_info;
hw_data->get_accel_cap = get_accel_cap;
hw_data->get_sku = get_sku;
- hw_data->fw_name = ADF_4XXX_FW;
- hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_send_admin_init;
@@ -349,8 +384,19 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ switch (dev_id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->uof_get_name = uof_get_name_402xx;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ }
hw_data->uof_get_num_objs = uof_get_num_objs;
- hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index e98428ba78e2..085e259c245a 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -56,6 +56,13 @@
#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+/* Firmware for 402XXX */
+#define ADF_402XX_FW "qat_402xx.bin"
+#define ADF_402XX_MMP "qat_402xx_mmp.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
@@ -68,7 +75,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
};
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index b3a4c7b23864..ceb87327a5fe 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -16,6 +16,7 @@
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
{ PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -330,7 +331,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
accel_dev->hw_device = hw_data;
- adf_init_hw_data_4xxx(accel_dev->hw_device);
+ adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
@@ -403,38 +404,24 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state.\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err;
}
- ret = adf_sysfs_init(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = hw_data->dev_config(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
+ goto out_err_dev_stop;
- ret = adf_dev_start(accel_dev);
+ ret = adf_sysfs_init(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@@ -448,9 +435,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 92ef416ccc78..92ef416ccc78 100644
--- a/drivers/crypto/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index c55c51a07677..475643654e64 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -75,7 +75,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 336a06f11dbd..336a06f11dbd 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 1f4fbf4562b2..bb4dca735ab5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index b6d76825a92c..b6d76825a92c 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04de..84d9486e04de 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index 6b4bf181d15b..6b4bf181d15b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index cf4ef83e186f..e8cc10f64134 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index d581f7c87d6c..d581f7c87d6c 100644
--- a/drivers/crypto/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index b7aa19d2fa80..e14270703670 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -77,7 +77,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
index 008c0a3a9769..008c0a3a9769 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 4ccaf298250c..ca18ae14c099 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 446c3d638605..446c3d638605 100644
--- a/drivers/crypto/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57fc7..751d7aa57fc7 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index a1a62c003ebf..a1a62c003ebf 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 0e642c94b929..37566309df94 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 1fb8d50f509f..1fb8d50f509f 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 284f5aad3ee0..bd19e6460899 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -21,6 +21,8 @@
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -188,7 +190,7 @@ struct adf_hw_device_data {
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
- const u32 *(*get_arb_mapping)(void);
+ const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
int (*init_device)(struct adf_accel_dev *accel_dev);
int (*enable_pm)(struct adf_accel_dev *accel_dev);
bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
@@ -310,6 +312,7 @@ struct adf_accel_dev {
u8 pf_compat_ver;
} vf;
};
+ struct mutex state_lock; /* protect state of the device */
bool is_vf;
u32 accel_id;
};
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 4ce2b666929e..4ce2b666929e 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 3b6184c35081..3b6184c35081 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index fe9bb2f3536a..04af32a2811c 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
@@ -90,9 +89,7 @@ static void adf_device_reset_worker(struct work_struct *work)
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
@@ -173,40 +170,6 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-/**
- * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
- * @accel_dev: Pointer to acceleration device.
- *
- * Function enables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- */
-void adf_enable_aer(struct adf_accel_dev *accel_dev)
-{
- struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
- pci_enable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_enable_aer);
-
-/**
- * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
- * @accel_dev: Pointer to acceleration device.
- *
- * Function disables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_disable_aer(struct adf_accel_dev *accel_dev)
-{
- struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
- pci_disable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_disable_aer);
-
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 1931e5b37f2b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
index 376cde61a60e..376cde61a60e 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 6e5de1dab97b..6e5de1dab97b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 5d8c3bdb258c..5d8c3bdb258c 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
index 421f4fb8b4dd..421f4fb8b4dd 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 7189265573c0..db79759bee61 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -52,11 +52,9 @@ struct service_hndl {
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
-int adf_dev_init(struct adf_accel_dev *accel_dev);
-int adf_dev_start(struct adf_accel_dev *accel_dev);
-void adf_dev_stop(struct adf_accel_dev *accel_dev);
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -88,8 +86,6 @@ int adf_ae_start(struct adf_accel_dev *accel_dev);
int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
-void adf_enable_aer(struct adf_accel_dev *accel_dev);
-void adf_disable_aer(struct adf_accel_dev *accel_dev);
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 9190532b27eb..29c4422f243c 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -8,7 +10,6 @@
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
-#include <linux/crypto.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -56,7 +57,7 @@ static int adf_chr_drv_create(void)
return -EFAULT;
}
- adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+ adf_ctl_drv.drv_class = class_create(DEVICE_NAME);
if (IS_ERR(adf_ctl_drv.drv_class)) {
pr_err("QAT: class_create failed for adf_ctl\n");
goto err_chrdev_unreg;
@@ -243,8 +244,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!accel_dev->is_vf)
continue;
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
}
@@ -253,8 +253,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!adf_dev_started(accel_dev))
continue;
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
}
}
@@ -308,23 +307,16 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (!accel_dev)
goto out;
- if (!adf_dev_started(accel_dev)) {
- dev_info(&GET_DEV(accel_dev),
- "Starting acceleration device qat_dev%d.\n",
- ctl_data->device_id);
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
- } else {
- dev_info(&GET_DEV(accel_dev),
- "Acceleration device qat_dev%d already started.\n",
- ctl_data->device_id);
- }
+ dev_info(&GET_DEV(accel_dev),
+ "Starting acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+
+ ret = adf_dev_up(accel_dev, false);
+
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
ctl_data->device_id);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
}
out:
kfree(ctl_data);
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 4c752eed10fe..86ee36feefad 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
+ mutex_init(&accel_dev->state_lock);
unlock:
mutex_unlock(&table_lock);
return ret;
@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
}
}
unlock:
+ mutex_destroy(&accel_dev->state_lock);
list_del(&accel_dev->list);
mutex_unlock(&table_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
index eeb30da7587a..eeb30da7587a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
index 4bf9da2de68a..4bf9da2de68a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
index 47261b1c1da6..47261b1c1da6 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
index 6eae023354d7..6eae023354d7 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_dc.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5a1..d1884547b5a1 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index e4bc07529be4..e4bc07529be4 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
index 70ef11963938..70ef11963938 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..a716545a764c 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
index 5859238e37de..5859238e37de 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
index 0b1a6774412e..0b1a6774412e 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_dc.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a62938fd..3148a62938fd 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 4fb4b3df5a18..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3ee..8e8efe93f3ee 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..17d1b774d4a8 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
index 7037c0892a8a..7037c0892a8a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pm.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index f8f8a9ee29e5..f8f8a9ee29e5 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index 64e4596a24f4..da6956699246 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -36,7 +36,7 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
/* Map worker threads to service arbiters */
- thd_2_arb_cfg = hw_data->get_arb_mapping();
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
for_each_set_bit(i, &ae_mask, hw_data->num_engines)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index cef7bb8ec007..0985f64ab11a 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -56,7 +56,7 @@ int adf_service_unregister(struct service_hndl *service)
*
* Return: 0 on success, error code otherwise.
*/
-int adf_dev_init(struct adf_accel_dev *accel_dev)
+static int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -146,7 +146,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(adf_dev_init);
/**
* adf_dev_start() - Start acceleration service for the given accel device
@@ -158,7 +157,7 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
*
* Return: 0 on success, error code otherwise.
*/
-int adf_dev_start(struct adf_accel_dev *accel_dev)
+static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
@@ -219,7 +218,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
}
return 0;
}
-EXPORT_SYMBOL_GPL(adf_dev_start);
/**
* adf_dev_stop() - Stop acceleration service for the given accel device
@@ -231,7 +229,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
*
* Return: void
*/
-void adf_dev_stop(struct adf_accel_dev *accel_dev)
+static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -276,7 +274,6 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
}
-EXPORT_SYMBOL_GPL(adf_dev_stop);
/**
* adf_dev_shutdown() - shutdown acceleration services and data strucutures
@@ -285,7 +282,7 @@ EXPORT_SYMBOL_GPL(adf_dev_stop);
* Cleanup the ring data structures and the admin comms and arbitration
* services.
*/
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
@@ -343,7 +340,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
-EXPORT_SYMBOL_GPL(adf_dev_shutdown);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
@@ -375,7 +371,7 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
return 0;
}
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
@@ -400,3 +396,85 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
return 0;
}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+ accel_dev->accel_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+ }
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+ accel_dev->accel_id);
+ ret = -EALREADY;
+ goto out;
+ }
+
+ if (config && GET_HW_DATA(accel_dev)->dev_config) {
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = adf_dev_init(accel_dev);
+ if (unlikely(ret))
+ goto out;
+
+ ret = adf_dev_start(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
+
+int adf_dev_restart(struct adf_accel_dev *accel_dev)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EFAULT;
+
+ adf_dev_down(accel_dev, false);
+
+ ret = adf_dev_up(accel_dev, false);
+ /* if device is already up return success*/
+ if (ret == -EALREADY)
+ return 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index ad9e135b8560..ad9e135b8560 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
index 204a42438992..204a42438992 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 14c069f0d71a..14c069f0d71a 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
index e8982d1ac896..e8982d1ac896 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 388e58bcbcaf..388e58bcbcaf 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
index 165d266d023d..165d266d023d 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
index c5f6d77d4bb8..c5f6d77d4bb8 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
index 2be048e2287b..2be048e2287b 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
index 1141258db4b6..1141258db4b6 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
index 71bc0e3f1d93..71bc0e3f1d93 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
index 1015155b6374..1015155b6374 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
index f6ee9b38c0e1..f6ee9b38c0e1 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index d85a90cc387b..f44025bb6f99 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -159,7 +159,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret)
return ret;
}
@@ -184,13 +184,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (!accel_dev->pf.vf_info)
return -ENOMEM;
- if (adf_dev_init(accel_dev)) {
- dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
- if (adf_dev_start(accel_dev)) {
+ if (adf_dev_up(accel_dev, false)) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
accel_dev->accel_id);
return -EFAULT;
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index e8b078e719c2..3eb6611ab1b1 100644
--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
switch (ret) {
case DEV_DOWN:
- if (!adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already down\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret < 0)
return -EINVAL;
break;
case DEV_UP:
- if (adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already up\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Starting device qat_dev%d\n", accel_id);
- ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
- if (!ret)
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
-
+ ret = adf_dev_up(accel_dev, true);
if (ret < 0) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
- adf_dev_shutdown_cache_cfg(accel_dev);
+ adf_dev_down(accel_dev, true);
return ret;
}
break;
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4e0..630d0483c4e0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/intel/qat/qat_common/adf_transport.h
index e6ef6f9b7691..e6ef6f9b7691 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.h
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
index 3b6b0267bbec..d3667dbd9826 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
@@ -37,7 +37,7 @@
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-/* Minimum ring bufer size for memory allocation */
+/* Minimum ring buffer size for memory allocation */
#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index 08bca1c506c0..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
index 8b2c92ba7ca1..8b2c92ba7ca1 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index 8c95fcd8e64b..b05c3957a160 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -71,8 +71,7 @@ static void adf_dev_stop_async(struct work_struct *work)
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
index c141160421e1..c141160421e1 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index a03d43fef2b3..a03d43fef2b3 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 56cb827f93ea..56cb827f93ea 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
index 28fa17f14be4..28fa17f14be4 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..7eb5daef4f88 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
index 9dddae0009fc..9dddae0009fc 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
index 20b2ee1fc65a..20b2ee1fc65a 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hal.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index 4042739bb6fa..4042739bb6fa 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
index 7ea8962272f2..7ea8962272f2 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
index 208d4554283b..208d4554283b 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 69482abdb8b9..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index b4b9f0aa59b9..538dcbfbcd26 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -435,8 +435,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
- keylen = round_up(keylen, 16);
memcpy(cd->ucs_aes.key, key, keylen);
+ keylen = round_up(keylen, 16);
} else {
memcpy(cd->aes.key, key, keylen);
}
@@ -676,7 +676,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EBADMSG;
- areq->base.complete(&areq->base, res);
+ aead_request_complete(areq, res);
}
static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
@@ -752,7 +752,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
- sreq->base.complete(&sreq->base, res);
+ skcipher_request_complete(sreq, res);
}
void qat_alg_callback(void *resp)
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
index ff5b4347f783..bb80455b3e81 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
+#include <crypto/algapi.h>
#include "adf_transport.h"
#include "qat_algs_send.h"
#include "qat_crypto.h"
@@ -34,7 +35,7 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
break;
}
list_del(&req->list);
- req->base->complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
index 0baca16e1eff..0baca16e1eff 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 935a7e012946..935a7e012946 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 2e89ff08041b..76baed0a76c0 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < bl->num_bufs; i++)
- dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, bl_dma_dir);
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
if (blp != blpout) {
for (i = 0; i < blout->num_mapped_bufs; i++) {
- dma_unmap_single(dev, blout->bufers[i].addr,
- blout->bufers[i].len,
+ dma_unmap_single(dev, blout->buffers[i].addr,
+ blout->buffers[i].len,
DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -53,6 +53,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf,
dma_addr_t extra_dst_buff,
size_t sz_extra_dst_buff,
+ unsigned int sskip,
+ unsigned int dskip,
gfp_t flags)
{
struct device *dev = &GET_DEV(accel_dev);
@@ -63,8 +65,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n);
+ size_t sz_out, sz = struct_size(bufl, buffers, n);
int node = dev_to_node(&GET_DEV(accel_dev));
+ unsigned int left;
int bufl_dma_dir;
if (unlikely(!n))
@@ -86,7 +89,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < n; i++)
- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+ left = sskip;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -94,13 +99,21 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- bufl_dma_dir);
- bufl->bufers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ bufl_dma_dir);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_in;
sg_nctr++;
+ if (left) {
+ bufl->buffers[y].len -= left;
+ left = 0;
+ }
}
bufl->num_bufs = sg_nctr;
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
@@ -111,12 +124,14 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sz = sz;
/* Handle out of place operation */
if (sgl != sglout) {
- struct qat_alg_buf *bufers;
+ struct qat_alg_buf *buffers;
int extra_buff = extra_dst_buff ? 1 : 0;
int n_sglout = sg_nents(sglout);
n = n_sglout + extra_buff;
- sz_out = struct_size(buflout, bufers, n);
+ sz_out = struct_size(buflout, buffers, n);
+ left = dskip;
+
sg_nctr = 0;
if (n > QAT_MAX_BUFF_DESC) {
@@ -129,9 +144,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sgl_dst_valid = true;
}
- bufers = buflout->bufers;
+ buffers = buflout->buffers;
for (i = 0; i < n; i++)
- bufers[i].addr = DMA_MAPPING_ERROR;
+ buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n_sglout, i) {
int y = sg_nctr;
@@ -139,17 +154,25 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
- bufers[y].len = sg->length;
+ buffers[y].len = sg->length;
sg_nctr++;
+ if (left) {
+ buffers[y].len -= left;
+ left = 0;
+ }
}
if (extra_buff) {
- bufers[sg_nctr].addr = extra_dst_buff;
- bufers[sg_nctr].len = sz_extra_dst_buff;
+ buffers[sg_nctr].addr = extra_dst_buff;
+ buffers[sg_nctr].len = sz_extra_dst_buff;
}
buflout->num_bufs = sg_nctr;
@@ -174,11 +197,11 @@ err_out:
n = sg_nents(sglout);
for (i = 0; i < n; i++) {
- if (buflout->bufers[i].addr == extra_dst_buff)
+ if (buflout->buffers[i].addr == extra_dst_buff)
break;
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
+ if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+ dma_unmap_single(dev, buflout->buffers[i].addr,
+ buflout->buffers[i].len,
DMA_FROM_DEVICE);
}
@@ -191,9 +214,9 @@ err_in:
n = sg_nents(sgl);
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
- dma_unmap_single(dev, bufl->bufers[i].addr,
- bufl->bufers[i].len,
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
bufl_dma_dir);
if (!buf->sgl_src_valid)
@@ -212,15 +235,19 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
{
dma_addr_t extra_dst_buff = 0;
size_t sz_extra_dst_buff = 0;
+ unsigned int sskip = 0;
+ unsigned int dskip = 0;
if (params) {
extra_dst_buff = params->extra_dst_buff;
sz_extra_dst_buff = params->sz_extra_dst_buff;
+ sskip = params->sskip;
+ dskip = params->dskip;
}
return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
extra_dst_buff, sz_extra_dst_buff,
- flags);
+ sskip, dskip, flags);
}
static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
@@ -231,9 +258,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
int i;
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bl->bufers[i].addr))
- dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, bl->buffers[i].addr))
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, DMA_FROM_DEVICE);
}
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
@@ -248,13 +275,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
size_t sz;
n = sg_nents(sgl);
- sz = struct_size(bufl, bufers, n);
+ sz = struct_size(bufl, buffers, n);
bufl = kzalloc_node(sz, GFP_KERNEL, node);
if (unlikely(!bufl))
return -ENOMEM;
for (i = 0; i < n; i++)
- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
sg_nctr = 0;
for_each_sg(sgl, sg, n, i) {
@@ -263,11 +290,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- bufl->bufers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_FROM_DEVICE);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_map;
sg_nctr++;
}
@@ -280,9 +307,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
err_map:
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
- dma_unmap_single(dev, bufl->bufers[i].addr,
- bufl->bufers[i].len,
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
DMA_FROM_DEVICE);
kfree(bufl);
*bl = NULL;
@@ -351,7 +378,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
if (ret)
return ret;
- new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs);
+ new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
/* Map new firmware SGL descriptor */
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index 8ca5e52ee9e2..d87e4f35ac39 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -18,7 +18,7 @@ struct qat_alg_buf_list {
u64 resrvd;
u32 num_bufs;
u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
+ struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
@@ -42,6 +42,8 @@ struct qat_request_buffs {
struct qat_sgl_to_bufl_params {
dma_addr_t extra_dst_buff;
size_t sz_extra_dst_buff;
+ unsigned int sskip;
+ unsigned int dskip;
};
void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index 1480d36a8d2b..b533984906ec 100644
--- a/drivers/crypto/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -13,6 +13,15 @@
#include "qat_compression.h"
#include "qat_algs_send.h"
+#define QAT_RFC_1950_HDR_SIZE 2
+#define QAT_RFC_1950_FOOTER_SIZE 4
+#define QAT_RFC_1950_CM_DEFLATE 8
+#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
+#define QAT_RFC_1950_CM_MASK 0x0f
+#define QAT_RFC_1950_CM_OFFSET 4
+#define QAT_RFC_1950_DICT_MASK 0x20
+#define QAT_RFC_1950_COMP_HDR 0x785e
+
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
@@ -21,9 +30,12 @@ enum direction {
COMPRESSION = 1,
};
+struct qat_compression_req;
+
struct qat_compression_ctx {
u8 comp_ctx[QAT_COMP_CTX_SIZE];
struct qat_compression_instance *inst;
+ int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
};
struct qat_dst {
@@ -94,7 +106,70 @@ static void qat_comp_resubmit(struct work_struct *work)
err:
qat_bl_free_bufl(accel_dev, qat_bufs);
- areq->base.complete(&areq->base, ret);
+ acomp_request_complete(areq, ret);
+}
+
+static int parse_zlib_header(u16 zlib_h)
+{
+ int ret = -EINVAL;
+ __be16 header;
+ u8 *header_p;
+ u8 cmf, flg;
+
+ header = cpu_to_be16(zlib_h);
+ header_p = (u8 *)&header;
+
+ flg = header_p[0];
+ cmf = header_p[1];
+
+ if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
+ return ret;
+
+ if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
+ return ret;
+
+ if (flg & QAT_RFC_1950_DICT_MASK)
+ return ret;
+
+ return 0;
+}
+
+static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
+ void *resp)
+{
+ struct acomp_req *areq = qat_req->acompress_req;
+ enum direction dir = qat_req->dir;
+ __be32 qat_produced_adler;
+
+ qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
+
+ if (dir == COMPRESSION) {
+ __be16 zlib_header;
+
+ zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
+ scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_HDR_SIZE;
+
+ scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
+ QAT_RFC_1950_FOOTER_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
+ } else {
+ __be32 decomp_adler;
+ int footer_offset;
+ int consumed;
+
+ consumed = qat_comp_get_consumed_ctr(resp);
+ footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
+ if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
+ QAT_RFC_1950_FOOTER_SIZE, 0);
+
+ if (qat_produced_adler != decomp_adler)
+ return -EBADMSG;
+ }
+ return 0;
}
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
@@ -167,9 +242,12 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
res = 0;
areq->dlen = produced;
+ if (ctx->qat_comp_callback)
+ res = ctx->qat_comp_callback(qat_req, resp);
+
end:
qat_bl_free_bufl(accel_dev, &qat_req->buf);
- areq->base.complete(&areq->base, res);
+ acomp_request_complete(areq, res);
}
void qat_comp_alg_callback(void *resp)
@@ -215,24 +293,39 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
memset(ctx, 0, sizeof(*ctx));
}
-static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
- enum direction dir)
+static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = qat_comp_alg_init_tfm(acomp_tfm);
+ ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
+
+ return ret;
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
+ unsigned int shdr, unsigned int sftr,
+ unsigned int dhdr, unsigned int dftr)
{
struct qat_compression_req *qat_req = acomp_request_ctx(areq);
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst = ctx->inst;
- struct qat_sgl_to_bufl_params *p_params = NULL;
gfp_t f = qat_algs_alloc_flags(&areq->base);
- struct qat_sgl_to_bufl_params params;
- unsigned int slen = areq->slen;
- unsigned int dlen = areq->dlen;
+ struct qat_sgl_to_bufl_params params = {0};
+ int slen = areq->slen - shdr - sftr;
+ int dlen = areq->dlen - dhdr - dftr;
dma_addr_t sfbuf, dfbuf;
u8 *req = qat_req->req;
size_t ovf_buff_sz;
int ret;
+ params.sskip = shdr;
+ params.dskip = dhdr;
+
if (!areq->src || !slen)
return -EINVAL;
@@ -254,6 +347,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
if (!areq->dst)
return -ENOMEM;
+ dlen -= dhdr + dftr;
areq->dlen = dlen;
qat_req->dst.resubmitted = false;
}
@@ -262,11 +356,10 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
params.extra_dst_buff = inst->dc_data->ovf_buff_p;
ovf_buff_sz = inst->dc_data->ovf_buff_sz;
params.sz_extra_dst_buff = ovf_buff_sz;
- p_params = &params;
}
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
- &qat_req->buf, p_params, f);
+ &qat_req->buf, &params, f);
if (unlikely(ret))
return ret;
@@ -299,12 +392,49 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
static int qat_comp_alg_compress(struct acomp_req *req)
{
- return qat_comp_alg_compress_decompress(req, COMPRESSION);
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
}
static int qat_comp_alg_decompress(struct acomp_req *req)
{
- return qat_comp_alg_compress_decompress(req, DECOMPRESSION);
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
+{
+ if (!req->dst && req->dlen != 0)
+ return -EINVAL;
+
+ if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EINVAL;
+
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
+ QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE);
+}
+
+static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ u16 zlib_header;
+ int ret;
+
+ if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
+
+ ret = parse_zlib_header(zlib_header);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
+ return ret;
+ }
+
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE, 0, 0);
}
static struct acomp_alg qat_acomp[] = { {
@@ -322,6 +452,21 @@ static struct acomp_alg qat_acomp[] = { {
.decompress = qat_comp_alg_decompress,
.dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
+}, {
+ .base = {
+ .cra_name = "zlib-deflate",
+ .cra_driver_name = "qat_zlib_deflate",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_rfc1950_init_tfm,
+ .exit = qat_comp_alg_exit_tfm,
+ .compress = qat_comp_alg_rfc1950_compress,
+ .decompress = qat_comp_alg_rfc1950_decompress,
+ .dst_free = sgl_free,
+ .reqsize = sizeof(struct qat_compression_req),
} };
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 404e32c5e778..404e32c5e778 100644
--- a/drivers/crypto/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 9fd10f4242f8..3f1f35283266 100644
--- a/drivers/crypto/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -72,7 +72,7 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
}
if (!accel_dev) {
- pr_info("QAT: Could not find a device on node %d\n", node);
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..aebac2302dcf 100644
--- a/drivers/crypto/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
index e31199eade5b..40c8e74d1cf9 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
@@ -70,7 +70,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
if (!accel_dev) {
- pr_info("QAT: Could not find a device on node %d\n", node);
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
if (adf_dev_started(tmp_dev) &&
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
index 6a0e961bb9dc..6a0e961bb9dc 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 7bba35280dac..cbb946a80076 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -696,6 +696,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index b7f7869ef8b2..3ba8ca20b3d7 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -732,6 +732,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C3XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 38d6f8e1624a..38d6f8e1624a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index bc80bb475118..1ebe0b351fae 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -106,7 +106,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static const u32 *adf_get_arbiter_mapping(void)
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 7b674bbe4192..7b674bbe4192 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index ebeb17b67fcd..e18860ab5c8e 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -193,34 +193,20 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- adf_enable_aer(accel_dev);
-
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_disable_aer;
+ goto out_err_free_reg;
}
- ret = hw_data->dev_config(accel_dev);
- if (ret)
- goto out_err_disable_aer;
-
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -239,9 +225,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
- adf_disable_aer(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 0153c85ce743..0153c85ce743 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16ece..70e56cc16ece 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 6973fa967bc8..6973fa967bc8 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index c1485e702b3e..96854a1cd87e 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -173,20 +173,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
- ret = adf_dev_init(accel_dev);
- if (ret)
- goto out_err_dev_shutdown;
-
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
- adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -206,8 +200,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5cd332880653..b61e35b932e5 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -66,7 +66,7 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
return;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
@@ -106,7 +106,7 @@ mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
{
ctx->ops->cleanup(req);
local_bh_disable();
- req->complete(req, res);
+ crypto_request_complete(req, res);
local_bh_enable();
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index c72b0672fc71..8d84ad45571c 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -1104,47 +1104,27 @@ struct ahash_alg mv_sha256_alg = {
}
};
-struct mv_cesa_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
- int error)
-{
- struct mv_cesa_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
void *state, unsigned int blocksize)
{
- struct mv_cesa_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- mv_cesa_hmac_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(req, &sg, pad, blocksize);
- init_completion(&result.completion);
ret = crypto_ahash_init(req);
if (ret)
return ret;
ret = crypto_ahash_update(req);
- if (ret && ret != -EINPROGRESS)
- return ret;
+ ret = crypto_wait_req(ret, &result);
- wait_for_completion_interruptible(&result.completion);
- if (result.error)
- return result.error;
+ if (ret)
+ return ret;
ret = crypto_ahash_export(req, state);
if (ret)
@@ -1158,7 +1138,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
u8 *ipad, u8 *opad,
unsigned int blocksize)
{
- struct mv_cesa_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
int i;
@@ -1172,17 +1152,12 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
return -ENOMEM;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- mv_cesa_hmac_ahash_complete,
- &result);
+ crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(req, &sg, ipad, keylen);
- init_completion(&result.completion);
ret = crypto_ahash_digest(req);
- if (ret == -EINPROGRESS) {
- wait_for_completion_interruptible(&result.completion);
- ret = result.error;
- }
+ ret = crypto_wait_req(ret, &result);
/* Set the memory region to 0 to avoid any leak. */
kfree_sensitive(keydup);
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index f0b5537038c2..388a06e180d6 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -168,7 +168,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
req);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
}
if (res || tdma->cur_dma == tdma_cur)
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 80ba77c793a7..1c2c870e887a 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -138,7 +138,7 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
complete:
if (areq)
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
@@ -188,7 +188,7 @@ static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
pdev = cpt_info->pdev;
do_request_cleanup(pdev, cpt_info);
}
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
}
@@ -398,7 +398,7 @@ static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key1 = key;
int ret;
- ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index 965297e96954..f0f2942c1d27 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptcommon.o rvu_cptpf.o rvu_cptvf.o
+rvu_cptcommon-objs := cn10k_cpt.o otx2_cptlf.o otx2_cpt_mbox_common.o
rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
- otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
- cn10k_cpt.o otx2_cpt_devlink.o
-rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
- otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
- otx2_cptvf_algs.o cn10k_cpt.o
+ otx2_cptpf_ucode.o otx2_cpt_devlink.o
+rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o \
+ otx2_cptvf_reqmgr.o otx2_cptvf_algs.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 1499ef75b5c2..93d22b328991 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -7,6 +7,9 @@
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+
static struct cpt_hw_ops otx2_hw_ops = {
.send_cmd = otx2_cpt_send_cmd,
.cpt_get_compcode = otx2_cpt_get_compcode,
@@ -19,8 +22,8 @@ static struct cpt_hw_ops cn10k_hw_ops = {
.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
};
-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
- struct otx2_cptlf_info *lf)
+static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf)
{
void __iomem *lmtline = lf->lmtline;
u64 val = (lf->slot & 0x7FF);
@@ -68,6 +71,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
@@ -91,3 +95,4 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index c091392b47e0..aaefc7e38e06 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -28,8 +28,6 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
}
-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
- struct otx2_cptlf_info *lf);
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 5012b7e669f0..6019066a6451 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -145,8 +145,6 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index a317319696ef..115997475beb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -19,6 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
@@ -36,14 +37,17 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val, int blkaddr)
+static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
+ struct pci_dev *pdev, u64 reg,
+ u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -91,6 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
@@ -103,6 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
@@ -115,6 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
@@ -170,6 +177,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
@@ -202,6 +210,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
@@ -216,3 +225,4 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index c8350fcd60fa..71e5f79431af 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -274,6 +274,8 @@ void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
}
cptlf_disable_intrs(lfs);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
@@ -321,6 +323,7 @@ free_irq:
otx2_cptlf_unregister_interrupts(lfs);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -334,6 +337,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -366,6 +370,7 @@ free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
@@ -422,6 +427,7 @@ clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
@@ -431,3 +437,8 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell RVU CPT Common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index a402ccfac557..ddf6e913c1c4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -831,6 +831,8 @@ static struct pci_driver otx2_cpt_pci_driver = {
module_pci_driver(otx2_cpt_pci_driver);
+MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 30b423605c9c..e27ddd3c4e55 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -120,7 +120,7 @@ static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
otx2_cpt_info_destroy(pdev, inst_info);
}
if (areq)
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
@@ -170,7 +170,7 @@ static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
pdev = inst_info->pdev;
otx2_cpt_info_destroy(pdev, inst_info);
}
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
}
@@ -412,7 +412,7 @@ static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key1 = key;
int ret;
- ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 3411e664cf50..392e9fee05e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -429,6 +429,8 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
+MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d6f9e2fe863d..f6b7bce0e656 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -413,11 +413,11 @@ static int dcp_chan_thread_aes(void *data)
set_current_state(TASK_RUNNING);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
- arq->complete(arq, ret);
+ crypto_request_complete(arq, ret);
}
}
@@ -709,11 +709,11 @@ static int dcp_chan_thread_sha(void *data)
set_current_state(TASK_RUNNING);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (arq) {
ret = dcp_sha_req_to_buf(arq);
- arq->complete(arq, ret);
+ crypto_request_complete(arq, ret);
}
}
@@ -1022,21 +1022,15 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
/* DCP clock is optional, only used on some SOCs */
- sdcp->dcp_clk = devm_clk_get(dev, "dcp");
- if (IS_ERR(sdcp->dcp_clk)) {
- if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
- return PTR_ERR(sdcp->dcp_clk);
- sdcp->dcp_clk = NULL;
- }
- ret = clk_prepare_enable(sdcp->dcp_clk);
- if (ret)
- return ret;
+ sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
+ if (IS_ERR(sdcp->dcp_clk))
+ return PTR_ERR(sdcp->dcp_clk);
/* Restart the DCP block. */
ret = stmp_reset_block(sdcp->base);
if (ret) {
dev_err(dev, "Failed reset\n");
- goto err_disable_unprepare_clk;
+ return ret;
}
/* Initialize control register. */
@@ -1076,7 +1070,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
- goto err_disable_unprepare_clk;
+ return ret;
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1134,9 +1128,6 @@ err_destroy_aes_thread:
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
-err_disable_unprepare_clk:
- clk_disable_unprepare(sdcp->dcp_clk);
-
return ret;
}
@@ -1156,8 +1147,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
- clk_disable_unprepare(sdcp->dcp_clk);
-
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index f34c75a862f2..8c859872c183 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -72,7 +72,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in,
unsigned int inlen, unsigned char *out,
unsigned int *outlenp, void *workmem, int fc);
-/**
+/*
* setup_indirect_dde - Setup an indirect DDE
*
* The DDE is setup with the DDE count, byte count, and address of
@@ -89,7 +89,7 @@ static void setup_indirect_dde(struct data_descriptor_entry *dde,
dde->address = cpu_to_be64(nx842_get_pa(ddl));
}
-/**
+/*
* setup_direct_dde - Setup single DDE from buffer
*
* The DDE is setup with the buffer and length. The buffer must be properly
@@ -111,7 +111,7 @@ static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
return l;
}
-/**
+/*
* setup_ddl - Setup DDL from buffer
*
* Returns:
@@ -181,9 +181,6 @@ static int setup_ddl(struct data_descriptor_entry *dde,
CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
(unsigned long)be64_to_cpu((csb)->address))
-/**
- * wait_for_csb
- */
static int wait_for_csb(struct nx842_workmem *wmem,
struct coprocessor_status_block *csb)
{
@@ -632,8 +629,8 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
* @inlen: input buffer size
* @out: output buffer pointer
* @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
+ * @wmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
*
* Returns: see @nx842_powernv_exec()
*/
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 3ea334b7f820..35f2d0d8507e 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -123,14 +123,16 @@ struct ibm_nx842_counters {
atomic64_t decomp_times[32];
};
-static struct nx842_devdata {
+struct nx842_devdata {
struct vio_dev *vdev;
struct device *dev;
struct ibm_nx842_counters *counters;
unsigned int max_sg_len;
unsigned int max_sync_size;
unsigned int max_sync_sg;
-} __rcu *devdata;
+};
+
+static struct nx842_devdata __rcu *devdata;
static DEFINE_SPINLOCK(devdata_mutex);
#define NX842_COUNTER_INC(_x) \
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index d3780be44a76..fce49c0dee3e 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -22,6 +23,8 @@
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
+#define QCE_DEFAULT_MEM_BANDWIDTH 393600
+
static const struct qce_algo_ops *qce_ops[] = {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
@@ -107,7 +110,7 @@ static int qce_handle_queue(struct qce_device *qce,
if (backlog) {
spin_lock_bh(&qce->lock);
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
spin_unlock_bh(&qce->lock);
}
@@ -132,7 +135,7 @@ static void qce_tasklet_req_done(unsigned long data)
spin_unlock_irqrestore(&qce->lock, flags);
if (req)
- req->complete(req, qce->result);
+ crypto_request_complete(req, qce->result);
qce_handle_queue(qce, NULL);
}
@@ -206,22 +209,30 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get(qce->dev, "core");
+ qce->core = devm_clk_get_optional(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
- ret = clk_prepare_enable(qce->core);
+ qce->mem_path = devm_of_icc_get(qce->dev, "memory");
+ if (IS_ERR(qce->mem_path))
+ return PTR_ERR(qce->mem_path);
+
+ ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
if (ret)
return ret;
+ ret = clk_prepare_enable(qce->core);
+ if (ret)
+ goto err_mem_path_disable;
+
ret = clk_prepare_enable(qce->iface);
if (ret)
goto err_clks_core;
@@ -260,6 +271,9 @@ err_clks_iface:
clk_disable_unprepare(qce->iface);
err_clks_core:
clk_disable_unprepare(qce->core);
+err_mem_path_disable:
+ icc_set_bw(qce->mem_path, 0, 0);
+
return ret;
}
@@ -279,6 +293,7 @@ static int qce_crypto_remove(struct platform_device *pdev)
static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{ .compatible = "qcom,crypto-v5.4", },
+ { .compatible = "qcom,qce", },
{}
};
MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 085774cdf641..228fcd69ec51 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -35,6 +35,7 @@ struct qce_device {
void __iomem *base;
struct device *dev;
struct clk *core, *iface, *bus;
+ struct icc_path *mem_path;
struct qce_dma_data dma;
int burst_size;
unsigned int pipe_pair_id;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index b79e49aa724f..1c4d5fb05d69 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -499,7 +499,7 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
/* Calls the completion. Cannot be called with dev->lock hold. */
static void s5p_aes_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -1355,7 +1355,7 @@ static void s5p_hash_finish_req(struct ahash_request *req, int err)
spin_unlock_irqrestore(&dd->hash_lock, flags);
if (req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
/**
@@ -1397,7 +1397,7 @@ retry:
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
dd->hash_req = req;
@@ -1991,7 +1991,7 @@ static void s5p_tasklet_cb(unsigned long data)
spin_unlock_irqrestore(&dev->lock, flags);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
dev->req = skcipher_request_cast(async_req);
dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f4bc06c24ad8..df5f9d675c57 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1037,7 +1037,7 @@ static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
static void sa_aes_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct skcipher_request *req;
u32 *result;
__be32 *mdptr;
@@ -1351,7 +1351,7 @@ static int sa_decrypt(struct skcipher_request *req)
static void sa_sha_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int authsize;
@@ -1689,7 +1689,7 @@ static void sa_sha_cra_exit(struct crypto_tfm *tfm)
static void sa_aead_dma_in_callback(void *data)
{
- struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct sa_rx_data *rxd = data;
struct aead_request *req;
struct crypto_aead *tfm;
unsigned int start;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 7ab20fb95166..4c799df3e883 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1035,7 +1035,7 @@ static int sahara_sha_process(struct ahash_request *req)
static int sahara_queue_manage(void *data)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct sahara_dev *dev = data;
struct crypto_async_request *async_req;
struct crypto_async_request *backlog;
int ret = 0;
@@ -1049,7 +1049,7 @@ static int sahara_queue_manage(void *data)
spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (async_req) {
if (crypto_tfm_alg_type(async_req->tfm) ==
@@ -1065,7 +1065,7 @@ static int sahara_queue_manage(void *data)
ret = sahara_aes_process(req);
}
- async_req->complete(async_req, ret);
+ crypto_request_complete(async_req, ret);
continue;
}
@@ -1270,7 +1270,7 @@ static struct ahash_alg sha_v4_algs[] = {
static irqreturn_t sahara_irq_handler(int irq, void *data)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct sahara_dev *dev = data;
unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 4208338e72b6..6b8d731092a4 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -597,7 +597,6 @@ static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp)
static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
{
- unsigned int i;
size_t written;
size_t len;
u32 alen = cryp->areq->assoclen;
@@ -623,8 +622,8 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
cryp->header_in -= written;
@@ -1363,18 +1362,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
u32 out_tag[AES_BLOCK_32];
/* Get and write tag */
- for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
-
+ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
if (crypto_memneq(in_tag, out_tag, cryp->authsize))
ret = -EBADMSG;
@@ -1415,12 +1410,9 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32];
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
-
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
@@ -1429,14 +1421,11 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32] = {0};
scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_in), 0);
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
-
+ writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
@@ -1480,8 +1469,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
* Same code as stm32_cryp_irq_read_data(), but we want to store
* block value
*/
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1499,8 +1487,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* f) write padded data */
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
/* g) Empty fifo out */
err = stm32_cryp_wait_output(cryp);
@@ -1580,8 +1567,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
* Same code as stm32_cryp_irq_read_data(), but we want to store
* block value
*/
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1660,15 +1646,14 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32] = {0};
size_t written;
written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
cryp->header_in -= written;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index d33006d43f76..f0df32382719 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -7,7 +7,6 @@
*/
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -32,6 +31,7 @@
#define HASH_CR 0x00
#define HASH_DIN 0x04
#define HASH_STR 0x08
+#define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04))
#define HASH_IMR 0x20
#define HASH_SR 0x24
#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
@@ -54,6 +54,10 @@
#define HASH_CR_ALGO_SHA224 0x40000
#define HASH_CR_ALGO_SHA256 0x40080
+#define HASH_CR_UX500_EMPTYMSG BIT(20)
+#define HASH_CR_UX500_ALGO_SHA1 BIT(7)
+#define HASH_CR_UX500_ALGO_SHA256 0x0
+
/* Interrupt */
#define HASH_DINIE BIT(0)
#define HASH_DCIE BIT(1)
@@ -63,7 +67,7 @@
#define HASH_MASK_DATA_INPUT BIT(1)
/* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER 53
+#define HASH_CSR_REGISTER_NUMBER 54
/* Status Flags */
#define HASH_SR_DATA_INPUT_READY BIT(0)
@@ -91,7 +95,7 @@
#define HASH_FLAGS_SHA1 BIT(19)
#define HASH_FLAGS_SHA224 BIT(20)
#define HASH_FLAGS_SHA256 BIT(21)
-#define HASH_FLAGS_ERRORS BIT(22)
+#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
#define HASH_OP_UPDATE 1
@@ -115,21 +119,31 @@ enum stm32_hash_data_format {
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
+ struct crypto_shash *xtfm;
unsigned long flags;
u8 key[HASH_MAX_KEY_SIZE];
int keylen;
};
+struct stm32_hash_state {
+ u32 flags;
+
+ u16 bufcnt;
+ u16 buflen;
+
+ u8 buffer[HASH_BUFLEN] __aligned(4);
+
+ /* hash state */
+ u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+};
+
struct stm32_hash_request_ctx {
struct stm32_hash_dev *hdev;
- unsigned long flags;
unsigned long op;
u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
- size_t bufcnt;
- size_t buflen;
/* DMA */
struct scatterlist *sg;
@@ -143,10 +157,7 @@ struct stm32_hash_request_ctx {
u8 data_type;
- u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
-
- /* Export Context */
- u32 *hw_context;
+ struct stm32_hash_state state;
};
struct stm32_hash_algs_info {
@@ -157,6 +168,10 @@ struct stm32_hash_algs_info {
struct stm32_hash_pdata {
struct stm32_hash_algs_info *algs_info;
size_t algs_info_size;
+ bool has_sr;
+ bool has_mdmat;
+ bool broken_emptymsg;
+ bool ux500;
};
struct stm32_hash_dev {
@@ -168,11 +183,11 @@ struct stm32_hash_dev {
phys_addr_t phys_base;
u32 dma_mode;
u32 dma_maxburst;
+ bool polled;
struct ahash_request *req;
struct crypto_engine *engine;
- int err;
unsigned long flags;
struct dma_chan *dma_lch;
@@ -208,6 +223,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
{
u32 status;
+ /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
+ if (!hdev->pdata->has_sr)
+ return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
+ !(status & HASH_STR_DCAL), 10, 10000);
+
return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
!(status & HASH_SR_BUSY), 10, 10000);
}
@@ -249,27 +269,34 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
-static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_state *state = &rctx->state;
u32 reg = HASH_CR_INIT;
if (!(hdev->flags & HASH_FLAGS_INIT)) {
- switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ switch (state->flags & HASH_FLAGS_ALGO_MASK) {
case HASH_FLAGS_MD5:
reg |= HASH_CR_ALGO_MD5;
break;
case HASH_FLAGS_SHA1:
- reg |= HASH_CR_ALGO_SHA1;
+ if (hdev->pdata->ux500)
+ reg |= HASH_CR_UX500_ALGO_SHA1;
+ else
+ reg |= HASH_CR_ALGO_SHA1;
break;
case HASH_FLAGS_SHA224:
reg |= HASH_CR_ALGO_SHA224;
break;
case HASH_FLAGS_SHA256:
- reg |= HASH_CR_ALGO_SHA256;
+ if (hdev->pdata->ux500)
+ reg |= HASH_CR_UX500_ALGO_SHA256;
+ else
+ reg |= HASH_CR_ALGO_SHA256;
break;
default:
reg |= HASH_CR_ALGO_MD5;
@@ -277,14 +304,15 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
- if (rctx->flags & HASH_FLAGS_HMAC) {
+ if (state->flags & HASH_FLAGS_HMAC) {
hdev->flags |= HASH_FLAGS_HMAC;
reg |= HASH_CR_MODE;
if (ctx->keylen > HASH_LONG_KEY)
reg |= HASH_CR_LKEY;
}
- stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+ if (!hdev->polled)
+ stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
stm32_hash_write(hdev, HASH_CR, reg);
@@ -296,11 +324,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
{
+ struct stm32_hash_state *state = &rctx->state;
size_t count;
- while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ while ((state->bufcnt < state->buflen) && rctx->total) {
count = min(rctx->sg->length - rctx->offset, rctx->total);
- count = min(count, rctx->buflen - rctx->bufcnt);
+ count = min_t(size_t, count, state->buflen - state->bufcnt);
if (count <= 0) {
if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -311,10 +340,10 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
}
}
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
- rctx->offset, count, 0);
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt,
+ rctx->sg, rctx->offset, count, 0);
- rctx->bufcnt += count;
+ state->bufcnt += count;
rctx->offset += count;
rctx->total -= count;
@@ -331,13 +360,23 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
const u8 *buf, size_t length, int final)
{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
unsigned int count, len32;
const u32 *buffer = (const u32 *)buf;
u32 reg;
- if (final)
+ if (final) {
hdev->flags |= HASH_FLAGS_FINAL;
+ /* Do not process empty messages if hw is buggy. */
+ if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
+ hdev->pdata->broken_emptymsg) {
+ state->flags |= HASH_FLAGS_EMPTY;
+ return 0;
+ }
+ }
+
len32 = DIV_ROUND_UP(length, sizeof(u32));
dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
@@ -345,7 +384,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
hdev->flags |= HASH_FLAGS_CPU;
- stm32_hash_write_ctrl(hdev);
+ stm32_hash_write_ctrl(hdev, length);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -362,6 +401,9 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
stm32_hash_write(hdev, HASH_DIN, buffer[count]);
if (final) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
stm32_hash_set_nblw(hdev, length);
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
@@ -380,29 +422,48 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
+ u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
+ int i;
- dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+ dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
- final = (rctx->flags & HASH_FLAGS_FINUP);
+ final = state->flags & HASH_FLAGS_FINAL;
- while ((rctx->total >= rctx->buflen) ||
- (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ while ((rctx->total >= state->buflen) ||
+ (state->bufcnt + rctx->total >= state->buflen)) {
stm32_hash_append_sg(rctx);
- bufcnt = rctx->bufcnt;
- rctx->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+ bufcnt = state->bufcnt;
+ state->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
+ if (err)
+ return err;
}
stm32_hash_append_sg(rctx);
if (final) {
- bufcnt = rctx->bufcnt;
- rctx->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
- (rctx->flags & HASH_FLAGS_FINUP));
+ bufcnt = state->bufcnt;
+ state->bufcnt = 0;
+ return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
}
+ if (!(hdev->flags & HASH_FLAGS_INIT))
+ return 0;
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ state->flags |= HASH_FLAGS_INIT;
+
return err;
}
@@ -431,11 +492,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
reg = stm32_hash_read(hdev, HASH_CR);
- if (mdma)
- reg |= HASH_CR_MDMAT;
- else
- reg &= ~HASH_CR_MDMAT;
-
+ if (!hdev->pdata->has_mdmat) {
+ if (mdma)
+ reg |= HASH_CR_MDMAT;
+ else
+ reg &= ~HASH_CR_MDMAT;
+ }
reg |= HASH_CR_DMAE;
stm32_hash_write(hdev, HASH_CR, reg);
@@ -543,10 +605,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ u32 *buffer = (void *)rctx->state.buffer;
struct scatterlist sg[1], *tsg;
int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
- u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@@ -556,7 +618,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
if (rctx->nents < 0)
return -EINVAL;
- stm32_hash_write_ctrl(hdev);
+ stm32_hash_write_ctrl(hdev, rctx->total);
if (hdev->flags & HASH_FLAGS_HMAC) {
err = stm32_hash_hmac_dma_send(hdev);
@@ -574,7 +636,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
ncp = sg_pcopy_to_buffer(
rctx->sg, rctx->nents,
- rctx->buffer, sg->length - len,
+ rctx->state.buffer, sg->length - len,
rctx->total - sg->length + len);
sg->length = len;
@@ -685,47 +747,52 @@ static int stm32_hash_init(struct ahash_request *req)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_state *state = &rctx->state;
rctx->hdev = hdev;
- rctx->flags = HASH_FLAGS_CPU;
+ state->flags = HASH_FLAGS_CPU;
rctx->digcnt = crypto_ahash_digestsize(tfm);
switch (rctx->digcnt) {
case MD5_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_MD5;
+ state->flags |= HASH_FLAGS_MD5;
break;
case SHA1_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA1;
+ state->flags |= HASH_FLAGS_SHA1;
break;
case SHA224_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA224;
+ state->flags |= HASH_FLAGS_SHA224;
break;
case SHA256_DIGEST_SIZE:
- rctx->flags |= HASH_FLAGS_SHA256;
+ state->flags |= HASH_FLAGS_SHA256;
break;
default:
return -EINVAL;
}
- rctx->bufcnt = 0;
- rctx->buflen = HASH_BUFLEN;
+ rctx->state.bufcnt = 0;
+ rctx->state.buflen = HASH_BUFLEN;
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
- memset(rctx->buffer, 0, HASH_BUFLEN);
-
if (ctx->flags & HASH_FLAGS_HMAC)
- rctx->flags |= HASH_FLAGS_HMAC;
+ state->flags |= HASH_FLAGS_HMAC;
- dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+ dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
return 0;
}
static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct stm32_hash_state *state = &rctx->state;
+
+ if (!(state->flags & HASH_FLAGS_CPU))
+ return stm32_hash_dma_send(hdev);
+
return stm32_hash_update_cpu(hdev);
}
@@ -733,27 +800,58 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
{
struct ahash_request *req = hdev->req;
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- int err;
- int buflen = rctx->bufcnt;
+ struct stm32_hash_state *state = &rctx->state;
+ int buflen = state->bufcnt;
- rctx->bufcnt = 0;
+ if (state->flags & HASH_FLAGS_FINUP)
+ return stm32_hash_update_req(hdev);
- if (!(rctx->flags & HASH_FLAGS_CPU))
- err = stm32_hash_dma_send(hdev);
- else
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
+ state->bufcnt = 0;
+ return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
+}
- return err;
+static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
+ int ret;
+
+ dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
+ ctx->keylen);
+
+ if (!ctx->xtfm) {
+ dev_err(hdev->dev, "no fallback engine\n");
+ return;
+ }
+
+ if (ctx->keylen) {
+ ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
+ if (ret) {
+ dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
+ return;
+ }
+ }
+
+ ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
+ if (ret)
+ dev_err(hdev->dev, "shash digest error\n");
}
static void stm32_hash_copy_hash(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
+ struct stm32_hash_dev *hdev = rctx->hdev;
__be32 *hash = (void *)rctx->digest;
unsigned int i, hashsize;
- switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
+ return stm32_hash_emptymsg_fallback(req);
+
+ switch (state->flags & HASH_FLAGS_ALGO_MASK) {
case HASH_FLAGS_MD5:
hashsize = MD5_DIGEST_SIZE;
break;
@@ -770,9 +868,14 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
return;
}
- for (i = 0; i < hashsize / sizeof(u32); i++)
- hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
- HASH_HREG(i)));
+ for (i = 0; i < hashsize / sizeof(u32); i++) {
+ if (hdev->pdata->ux500)
+ hash[i] = cpu_to_be32(stm32_hash_read(hdev,
+ HASH_UX500_HREG(i)));
+ else
+ hash[i] = cpu_to_be32(stm32_hash_read(hdev,
+ HASH_HREG(i)));
+ }
}
static int stm32_hash_finish(struct ahash_request *req)
@@ -795,13 +898,6 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
stm32_hash_copy_hash(req);
err = stm32_hash_finish(req);
- hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
- HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
- HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
- HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
- HASH_FLAGS_HMAC_KEY);
- } else {
- rctx->flags |= HASH_FLAGS_ERRORS;
}
pm_runtime_mark_last_busy(hdev->dev);
@@ -810,73 +906,70 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
crypto_finalize_hash_request(hdev->engine, req, err);
}
-static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
- struct stm32_hash_request_ctx *rctx)
-{
- pm_runtime_get_sync(hdev->dev);
-
- if (!(HASH_FLAGS_INIT & hdev->flags)) {
- stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
- stm32_hash_write(hdev, HASH_STR, 0);
- stm32_hash_write(hdev, HASH_DIN, 0);
- stm32_hash_write(hdev, HASH_IMR, 0);
- hdev->err = 0;
- }
-
- return 0;
-}
-
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
-
static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
struct ahash_request *req)
{
return crypto_transfer_hash_request_to_engine(hdev->engine, req);
}
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
+ struct stm32_hash_state *state = &rctx->state;
+ int err = 0;
if (!hdev)
return -ENODEV;
- hdev->req = req;
-
- rctx = ahash_request_ctx(req);
-
dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
rctx->op, req->nbytes);
- return stm32_hash_hw_init(hdev, rctx);
-}
+ pm_runtime_get_sync(hdev->dev);
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
- int err = 0;
+ hdev->req = req;
+ hdev->flags = 0;
+
+ if (state->flags & HASH_FLAGS_INIT) {
+ u32 *preg = rctx->state.hw_context;
+ u32 reg;
+ int i;
+
+ if (!hdev->pdata->ux500)
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
- if (!hdev)
- return -ENODEV;
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
- hdev->req = req;
+ hdev->flags |= HASH_FLAGS_INIT;
- rctx = ahash_request_ctx(req);
+ if (state->flags & HASH_FLAGS_HMAC)
+ hdev->flags |= HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_KEY;
+ }
if (rctx->op == HASH_OP_UPDATE)
err = stm32_hash_update_req(hdev);
else if (rctx->op == HASH_OP_FINAL)
err = stm32_hash_final_req(hdev);
+ /* If we have an IRQ, wait for that, else poll for completion */
+ if (err == -EINPROGRESS && hdev->polled) {
+ if (stm32_hash_wait_busy(hdev))
+ err = -ETIMEDOUT;
+ else {
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ err = 0;
+ }
+ }
+
if (err != -EINPROGRESS)
/* done task will not finish it, so do it here */
stm32_hash_finish_req(req, err);
@@ -898,15 +991,16 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
static int stm32_hash_update(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
- if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+ if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
return 0;
rctx->total = req->nbytes;
rctx->sg = req->src;
rctx->offset = 0;
- if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ if ((state->bufcnt + rctx->total < state->buflen)) {
stm32_hash_append_sg(rctx);
return 0;
}
@@ -917,8 +1011,9 @@ static int stm32_hash_update(struct ahash_request *req)
static int stm32_hash_final(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
- rctx->flags |= HASH_FLAGS_FINUP;
+ state->flags |= HASH_FLAGS_FINAL;
return stm32_hash_enqueue(req, HASH_OP_FINAL);
}
@@ -928,25 +1023,21 @@ static int stm32_hash_finup(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- int err1, err2;
-
- rctx->flags |= HASH_FLAGS_FINUP;
+ struct stm32_hash_state *state = &rctx->state;
- if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
- rctx->flags &= ~HASH_FLAGS_CPU;
+ if (!req->nbytes)
+ goto out;
- err1 = stm32_hash_update(req);
-
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
- return err1;
+ state->flags |= HASH_FLAGS_FINUP;
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- err2 = stm32_hash_final(req);
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+ state->flags &= ~HASH_FLAGS_CPU;
- return err1 ?: err2;
+out:
+ return stm32_hash_final(req);
}
static int stm32_hash_digest(struct ahash_request *req)
@@ -957,32 +1048,8 @@ static int stm32_hash_digest(struct ahash_request *req)
static int stm32_hash_export(struct ahash_request *req, void *out)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- u32 *preg;
- unsigned int i;
-
- pm_runtime_get_sync(hdev->dev);
-
- while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
- cpu_relax();
-
- rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
- sizeof(u32),
- GFP_KERNEL);
-
- preg = rctx->hw_context;
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
- *preg++ = stm32_hash_read(hdev, HASH_STR);
- *preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
-
- memcpy(out, rctx, sizeof(*rctx));
+ memcpy(out, &rctx->state, sizeof(rctx->state));
return 0;
}
@@ -990,31 +1057,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
static int stm32_hash_import(struct ahash_request *req, const void *in)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- const u32 *preg = in;
- u32 reg;
- unsigned int i;
-
- memcpy(rctx, in, sizeof(*rctx));
-
- preg = rctx->hw_context;
-
- pm_runtime_get_sync(hdev->dev);
-
- stm32_hash_write(hdev, HASH_IMR, *preg++);
- stm32_hash_write(hdev, HASH_STR, *preg++);
- stm32_hash_write(hdev, HASH_CR, *preg);
- reg = *preg++ | HASH_CR_INIT;
- stm32_hash_write(hdev, HASH_CR, reg);
-
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- stm32_hash_write(hdev, HASH_CSR(i), *preg++);
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
- kfree(rctx->hw_context);
+ stm32_hash_init(req);
+ memcpy(&rctx->state, in, sizeof(rctx->state));
return 0;
}
@@ -1034,6 +1079,29 @@ static int stm32_hash_setkey(struct crypto_ahash *tfm,
return 0;
}
+static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_shash *xtfm;
+
+ /* The fallback is only needed on Ux500 */
+ if (!hdev->pdata->ux500)
+ return 0;
+
+ xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(xtfm)) {
+ dev_err(hdev->dev, "failed to allocate %s fallback\n",
+ name);
+ return PTR_ERR(xtfm);
+ }
+ dev_info(hdev->dev, "allocated %s fallback\n", name);
+ ctx->xtfm = xtfm;
+
+ return 0;
+}
+
static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
const char *algs_hmac_name)
{
@@ -1048,9 +1116,8 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
ctx->flags |= HASH_FLAGS_HMAC;
ctx->enginectx.op.do_one_request = stm32_hash_one_request;
- ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- return 0;
+
+ return stm32_hash_init_fallback(tfm);
}
static int stm32_hash_cra_init(struct crypto_tfm *tfm)
@@ -1078,6 +1145,14 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
return stm32_hash_cra_init_algs(tfm, "sha256");
}
+static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->xtfm)
+ crypto_free_shash(ctx->xtfm);
+}
+
static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
@@ -1121,7 +1196,7 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct ahash_alg algs_md5_sha1[] = {
+static struct ahash_alg algs_md5[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1132,7 +1207,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
@@ -1143,6 +1218,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1158,7 +1234,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
@@ -1169,10 +1245,14 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_md5_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
+};
+
+static struct ahash_alg algs_sha1[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1183,7 +1263,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
@@ -1194,6 +1274,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1209,7 +1290,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
@@ -1220,13 +1301,14 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha1_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
};
-static struct ahash_alg algs_sha224_sha256[] = {
+static struct ahash_alg algs_sha224[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1237,7 +1319,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
@@ -1248,6 +1330,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1263,7 +1346,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
@@ -1274,10 +1357,14 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha224_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
+};
+
+static struct ahash_alg algs_sha256[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1288,7 +1375,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.import = stm32_hash_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
@@ -1299,6 +1386,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1314,7 +1402,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
@@ -1325,6 +1413,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha256_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1370,36 +1459,74 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
return 0;
}
+static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
+ {
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
+ .algs_info = stm32_hash_algs_info_ux500,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
+ .broken_emptymsg = true,
+ .ux500 = true,
+};
+
static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
{
- .algs_list = algs_md5_sha1,
- .size = ARRAY_SIZE(algs_md5_sha1),
+ .algs_list = algs_md5,
+ .size = ARRAY_SIZE(algs_md5),
+ },
+ {
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
},
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
.algs_info = stm32_hash_algs_info_stm32f4,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+ .has_sr = true,
+ .has_mdmat = true,
};
static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
{
- .algs_list = algs_md5_sha1,
- .size = ARRAY_SIZE(algs_md5_sha1),
+ .algs_list = algs_md5,
+ .size = ARRAY_SIZE(algs_md5),
},
{
- .algs_list = algs_sha224_sha256,
- .size = ARRAY_SIZE(algs_sha224_sha256),
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha224,
+ .size = ARRAY_SIZE(algs_sha224),
+ },
+ {
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
},
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
.algs_info = stm32_hash_algs_info_stm32f7,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+ .has_sr = true,
+ .has_mdmat = true,
};
static const struct of_device_id stm32_hash_of_match[] = {
{
+ .compatible = "stericsson,ux500-hash",
+ .data = &stm32_hash_pdata_ux500,
+ },
+ {
.compatible = "st,stm32f456-hash",
.data = &stm32_hash_pdata_stm32f4,
},
@@ -1441,8 +1568,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (!hdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdev->io_base = devm_ioremap_resource(dev, res);
+ hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hdev->io_base))
return PTR_ERR(hdev->io_base);
@@ -1452,16 +1578,23 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (ret)
return ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO)
return irq;
- ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
- stm32_hash_irq_thread, IRQF_ONESHOT,
- dev_name(dev), hdev);
- if (ret) {
- dev_err(dev, "Cannot grab IRQ\n");
- return ret;
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(dev, irq,
+ stm32_hash_irq_handler,
+ stm32_hash_irq_thread,
+ IRQF_ONESHOT,
+ dev_name(dev), hdev);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+ } else {
+ dev_info(dev, "No IRQ, use polling mode\n");
+ hdev->polled = true;
}
hdev->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1503,9 +1636,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
case 0:
break;
case -ENOENT:
- dev_dbg(dev, "DMA mode not available\n");
+ case -ENODEV:
+ dev_info(dev, "DMA mode not available\n");
break;
default:
+ dev_err(dev, "DMA init error %d\n", ret);
goto err_dma;
}
@@ -1524,7 +1659,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (ret)
goto err_engine_start;
- hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+ if (hdev->pdata->ux500)
+ /* FIXME: implement DMA mode for Ux500 */
+ hdev->dma_mode = 0;
+ else
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
/* Register algos */
ret = stm32_hash_register_algs(hdev);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 71db6450b6aa..bb27f011cf31 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1393,7 +1393,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
- edesc = kmalloc(alloc_len, GFP_DMA | flags);
+ edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
if (!edesc)
return ERR_PTR(-ENOMEM);
if (ivsize) {
@@ -1560,7 +1560,7 @@ static void skcipher_done(struct device *dev,
kfree(edesc);
- areq->base.complete(&areq->base, err);
+ skcipher_request_complete(areq, err);
}
static int common_nonsnoop(struct talitos_edesc *edesc,
@@ -1759,7 +1759,7 @@ static void ahash_done(struct device *dev,
kfree(edesc);
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
/*
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
deleted file mode 100644
index dcbd7404768f..000000000000
--- a/drivers/crypto/ux500/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-
-config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
-
-config CRYPTO_DEV_UX500_DEBUG
- bool "Activate ux500 platform debug-mode for crypto and hash block"
- depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
- help
- Say Y if you want to add debug prints to ux500_hash and
- ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
deleted file mode 100644
index f1aa4edf66f4..000000000000
--- a/drivers/crypto/ux500/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-
-obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
deleted file mode 100644
index a8f088724772..000000000000
--- a/drivers/crypto/ux500/hash/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_hash_core.o := -DDEBUG
-endif
-
-obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
-ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
deleted file mode 100644
index 7c9bcc15125f..000000000000
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen (shujuan.chen@stericsson.com)
- * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
- * Author: Berne Hebark (berne.hebark@stericsson.com))
- */
-#ifndef _HASH_ALG_H
-#define _HASH_ALG_H
-
-#include <linux/bitops.h>
-
-#define HASH_BLOCK_SIZE 64
-#define HASH_DMA_FIFO 4
-#define HASH_DMA_ALIGN_SIZE 4
-#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
-#define HASH_BYTES_PER_WORD 4
-
-/* Maximum value of the length's high word */
-#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
-
-/* Power on Reset values HASH registers */
-#define HASH_RESET_CR_VALUE 0x0
-#define HASH_RESET_STR_VALUE 0x0
-
-/* Number of context swap registers */
-#define HASH_CSR_COUNT 52
-
-#define HASH_RESET_CSRX_REG_VALUE 0x0
-#define HASH_RESET_CSFULL_REG_VALUE 0x0
-#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
-
-#define HASH_RESET_INDEX_VAL 0x0
-#define HASH_RESET_BIT_INDEX_VAL 0x0
-#define HASH_RESET_BUFFER_VAL 0x0
-#define HASH_RESET_LEN_HIGH_VAL 0x0
-#define HASH_RESET_LEN_LOW_VAL 0x0
-
-/* Control register bitfields */
-#define HASH_CR_RESUME_MASK 0x11FCF
-
-#define HASH_CR_SWITCHON_POS 31
-#define HASH_CR_SWITCHON_MASK BIT(31)
-
-#define HASH_CR_EMPTYMSG_POS 20
-#define HASH_CR_EMPTYMSG_MASK BIT(20)
-
-#define HASH_CR_DINF_POS 12
-#define HASH_CR_DINF_MASK BIT(12)
-
-#define HASH_CR_NBW_POS 8
-#define HASH_CR_NBW_MASK 0x00000F00UL
-
-#define HASH_CR_LKEY_POS 16
-#define HASH_CR_LKEY_MASK BIT(16)
-
-#define HASH_CR_ALGO_POS 7
-#define HASH_CR_ALGO_MASK BIT(7)
-
-#define HASH_CR_MODE_POS 6
-#define HASH_CR_MODE_MASK BIT(6)
-
-#define HASH_CR_DATAFORM_POS 4
-#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
-
-#define HASH_CR_DMAE_POS 3
-#define HASH_CR_DMAE_MASK BIT(3)
-
-#define HASH_CR_INIT_POS 2
-#define HASH_CR_INIT_MASK BIT(2)
-
-#define HASH_CR_PRIVN_POS 1
-#define HASH_CR_PRIVN_MASK BIT(1)
-
-#define HASH_CR_SECN_POS 0
-#define HASH_CR_SECN_MASK BIT(0)
-
-/* Start register bitfields */
-#define HASH_STR_DCAL_POS 8
-#define HASH_STR_DCAL_MASK BIT(8)
-#define HASH_STR_DEFAULT 0x0
-
-#define HASH_STR_NBLW_POS 0
-#define HASH_STR_NBLW_MASK 0x0000001FUL
-
-#define HASH_NBLW_MAX_VAL 0x1F
-
-/* PrimeCell IDs */
-#define HASH_P_ID0 0xE0
-#define HASH_P_ID1 0x05
-#define HASH_P_ID2 0x38
-#define HASH_P_ID3 0x00
-#define HASH_CELL_ID0 0x0D
-#define HASH_CELL_ID1 0xF0
-#define HASH_CELL_ID2 0x05
-#define HASH_CELL_ID3 0xB1
-
-#define HASH_SET_BITS(reg_name, mask) \
- writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
-
-#define HASH_CLEAR_BITS(reg_name, mask) \
- writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
-
-#define HASH_PUT_BITS(reg, val, shift, mask) \
- writel_relaxed(((readl(reg) & ~(mask)) | \
- (((u32)val << shift) & (mask))), reg)
-
-#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
-
-#define HASH_INITIALIZE \
- HASH_PUT_BITS( \
- &device_data->base->cr, \
- 0x01, HASH_CR_INIT_POS, \
- HASH_CR_INIT_MASK)
-
-#define HASH_SET_DATA_FORMAT(data_format) \
- HASH_PUT_BITS( \
- &device_data->base->cr, \
- (u32) (data_format), HASH_CR_DATAFORM_POS, \
- HASH_CR_DATAFORM_MASK)
-#define HASH_SET_NBLW(val) \
- HASH_PUT_BITS( \
- &device_data->base->str, \
- (u32) (val), HASH_STR_NBLW_POS, \
- HASH_STR_NBLW_MASK)
-#define HASH_SET_DCAL \
- HASH_PUT_BITS( \
- &device_data->base->str, \
- 0x01, HASH_STR_DCAL_POS, \
- HASH_STR_DCAL_MASK)
-
-/* Hardware access method */
-enum hash_mode {
- HASH_MODE_CPU,
- HASH_MODE_DMA
-};
-
-/**
- * struct uint64 - Structure to handle 64 bits integers.
- * @high_word: Most significant bits.
- * @low_word: Least significant bits.
- *
- * Used to handle 64 bits integers.
- */
-struct uint64 {
- u32 high_word;
- u32 low_word;
-};
-
-/**
- * struct hash_register - Contains all registers in ux500 hash hardware.
- * @cr: HASH control register (0x000).
- * @din: HASH data input register (0x004).
- * @str: HASH start register (0x008).
- * @hx: HASH digest register 0..7 (0x00c-0x01C).
- * @padding0: Reserved (0x02C).
- * @itcr: Integration test control register (0x080).
- * @itip: Integration test input register (0x084).
- * @itop: Integration test output register (0x088).
- * @padding1: Reserved (0x08C).
- * @csfull: HASH context full register (0x0F8).
- * @csdatain: HASH context swap data input register (0x0FC).
- * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
- * @padding2: Reserved (0x1D0).
- * @periphid0: HASH peripheral identification register 0 (0xFE0).
- * @periphid1: HASH peripheral identification register 1 (0xFE4).
- * @periphid2: HASH peripheral identification register 2 (0xFE8).
- * @periphid3: HASH peripheral identification register 3 (0xFEC).
- * @cellid0: HASH PCell identification register 0 (0xFF0).
- * @cellid1: HASH PCell identification register 1 (0xFF4).
- * @cellid2: HASH PCell identification register 2 (0xFF8).
- * @cellid3: HASH PCell identification register 3 (0xFFC).
- *
- * The device communicates to the HASH via 32-bit-wide control registers
- * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
- * with the registers used.
- */
-struct hash_register {
- u32 cr;
- u32 din;
- u32 str;
- u32 hx[8];
-
- u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
-
- u32 itcr;
- u32 itip;
- u32 itop;
-
- u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
-
- u32 csfull;
- u32 csdatain;
- u32 csrx[HASH_CSR_COUNT];
-
- u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
-
- u32 periphid0;
- u32 periphid1;
- u32 periphid2;
- u32 periphid3;
-
- u32 cellid0;
- u32 cellid1;
- u32 cellid2;
- u32 cellid3;
-};
-
-/**
- * struct hash_state - Hash context state.
- * @temp_cr: Temporary HASH Control Register.
- * @str_reg: HASH Start Register.
- * @din_reg: HASH Data Input Register.
- * @csr[52]: HASH Context Swap Registers 0-39.
- * @csfull: HASH Context Swap Registers 40 ie Status flags.
- * @csdatain: HASH Context Swap Registers 41 ie Input data.
- * @buffer: Working buffer for messages going to the hardware.
- * @length: Length of the part of message hashed so far (floor(N/64) * 64).
- * @index: Valid number of bytes in buffer (N % 64).
- * @bit_index: Valid number of bits in buffer (N % 8).
- *
- * This structure is used between context switches, i.e. when ongoing jobs are
- * interupted with new jobs. When this happens we need to store intermediate
- * results in software.
- *
- * WARNING: "index" is the member of the structure, to be sure that "buffer"
- * is aligned on a 4-bytes boundary. This is highly implementation dependent
- * and MUST be checked whenever this code is ported on new platforms.
- */
-struct hash_state {
- u32 temp_cr;
- u32 str_reg;
- u32 din_reg;
- u32 csr[52];
- u32 csfull;
- u32 csdatain;
- u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
- struct uint64 length;
- u8 index;
- u8 bit_index;
-};
-
-/**
- * enum hash_device_id - HASH device ID.
- * @HASH_DEVICE_ID_0: Hash hardware with ID 0
- * @HASH_DEVICE_ID_1: Hash hardware with ID 1
- */
-enum hash_device_id {
- HASH_DEVICE_ID_0 = 0,
- HASH_DEVICE_ID_1 = 1
-};
-
-/**
- * enum hash_data_format - HASH data format.
- * @HASH_DATA_32_BITS: 32 bits data format
- * @HASH_DATA_16_BITS: 16 bits data format
- * @HASH_DATA_8_BITS: 8 bits data format.
- * @HASH_DATA_1_BITS: 1 bit data format.
- */
-enum hash_data_format {
- HASH_DATA_32_BITS = 0x0,
- HASH_DATA_16_BITS = 0x1,
- HASH_DATA_8_BITS = 0x2,
- HASH_DATA_1_BIT = 0x3
-};
-
-/**
- * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
- * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
- * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
- */
-enum hash_algo {
- HASH_ALGO_SHA1 = 0x0,
- HASH_ALGO_SHA256 = 0x1
-};
-
-/**
- * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
- * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
- * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
- */
-enum hash_op {
- HASH_OPER_MODE_HASH = 0x0,
- HASH_OPER_MODE_HMAC = 0x1
-};
-
-/**
- * struct hash_config - Configuration data for the hardware.
- * @data_format: Format of data entered into the hash data in register.
- * @algorithm: Algorithm selection bit.
- * @oper_mode: Operating mode selection bit.
- */
-struct hash_config {
- int data_format;
- int algorithm;
- int oper_mode;
-};
-
-/**
- * struct hash_dma - Structure used for dma.
- * @mask: DMA capabilities bitmap mask.
- * @complete: Used to maintain state for a "completion".
- * @chan_mem2hash: DMA channel.
- * @cfg_mem2hash: DMA channel configuration.
- * @sg_len: Scatterlist length.
- * @sg: Scatterlist.
- * @nents: Number of sg entries.
- */
-struct hash_dma {
- dma_cap_mask_t mask;
- struct completion complete;
- struct dma_chan *chan_mem2hash;
- void *cfg_mem2hash;
- int sg_len;
- struct scatterlist *sg;
- int nents;
-};
-
-/**
- * struct hash_ctx - The context used for hash calculations.
- * @key: The key used in the operation.
- * @keylen: The length of the key.
- * @state: The state of the current calculations.
- * @config: The current configuration.
- * @digestsize: The size of current digest.
- * @device: Pointer to the device structure.
- */
-struct hash_ctx {
- u8 *key;
- u32 keylen;
- struct hash_config config;
- int digestsize;
- struct hash_device_data *device;
-};
-
-/**
- * struct hash_ctx - The request context used for hash calculations.
- * @state: The state of the current calculations.
- * @dma_mode: Used in special cases (workaround), e.g. need to change to
- * cpu mode, if not supported/working in dma mode.
- * @updated: Indicates if hardware is initialized for new operations.
- */
-struct hash_req_ctx {
- struct hash_state state;
- bool dma_mode;
- u8 updated;
-};
-
-/**
- * struct hash_device_data - structure for a hash device.
- * @base: Pointer to virtual base address of the hash device.
- * @phybase: Pointer to physical memory location of the hash device.
- * @list_node: For inclusion in klist.
- * @dev: Pointer to the device dev structure.
- * @ctx_lock: Spinlock for current_ctx.
- * @current_ctx: Pointer to the currently allocated context.
- * @power_state: TRUE = power state on, FALSE = power state off.
- * @power_state_lock: Spinlock for power_state.
- * @regulator: Pointer to the device's power control.
- * @clk: Pointer to the device's clock control.
- * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
- * @dma: Structure used for dma.
- */
-struct hash_device_data {
- struct hash_register __iomem *base;
- phys_addr_t phybase;
- struct klist_node list_node;
- struct device *dev;
- spinlock_t ctx_lock;
- struct hash_ctx *current_ctx;
- bool power_state;
- spinlock_t power_state_lock;
- struct regulator *regulator;
- struct clk *clk;
- bool restore_dev_state;
- struct hash_state state; /* Used for saving and resuming state */
- struct hash_dma dma;
-};
-
-int hash_check_hw(struct hash_device_data *device_data);
-
-int hash_setconfiguration(struct hash_device_data *device_data,
- struct hash_config *config);
-
-void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
-
-void hash_get_digest(struct hash_device_data *device_data,
- u8 *digest, int algorithm);
-
-int hash_hw_update(struct ahash_request *req);
-
-int hash_save_state(struct hash_device_data *device_data,
- struct hash_state *state);
-
-int hash_resume_state(struct hash_device_data *device_data,
- const struct hash_state *state);
-
-#endif
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
deleted file mode 100644
index f104e8a43036..000000000000
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ /dev/null
@@ -1,1966 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- * Support for Nomadik hardware crypto engine.
-
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
- */
-
-#define pr_fmt(fmt) "hashX hashX: " fmt
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/klist.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/crypto.h>
-
-#include <linux/regulator/consumer.h>
-#include <linux/dmaengine.h>
-#include <linux/bitops.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <linux/platform_data/crypto-ux500.h>
-
-#include "hash_alg.h"
-
-static int hash_mode;
-module_param(hash_mode, int, 0);
-MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
-
-/* HMAC-SHA1, no key */
-static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
- 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
- 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
- 0x70, 0x69, 0x0e, 0x1d
-};
-
-/* HMAC-SHA256, no key */
-static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
- 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
- 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
- 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
- 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
-};
-
-/**
- * struct hash_driver_data - data specific to the driver.
- *
- * @device_list: A list of registered devices to choose from.
- * @device_allocation: A semaphore initialized with number of devices.
- */
-struct hash_driver_data {
- struct klist device_list;
- struct semaphore device_allocation;
-};
-
-static struct hash_driver_data driver_data;
-
-/* Declaration of functions */
-/**
- * hash_messagepad - Pads a message and write the nblw bits.
- * @device_data: Structure for the hash device.
- * @message: Last word of a message
- * @index_bytes: The number of bytes in the last message
- *
- * This function manages the final part of the digest calculation, when less
- * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
- *
- */
-static void hash_messagepad(struct hash_device_data *device_data,
- const u32 *message, u8 index_bytes);
-
-/**
- * release_hash_device - Releases a previously allocated hash device.
- * @device_data: Structure for the hash device.
- *
- */
-static void release_hash_device(struct hash_device_data *device_data)
-{
- spin_lock(&device_data->ctx_lock);
- device_data->current_ctx->device = NULL;
- device_data->current_ctx = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- /*
- * The down_interruptible part for this semaphore is called in
- * cryp_get_device_data.
- */
- up(&driver_data.device_allocation);
-}
-
-static void hash_dma_setup_channel(struct hash_device_data *device_data,
- struct device *dev)
-{
- struct hash_platform_data *platform_data = dev->platform_data;
- struct dma_slave_config conf = {
- .direction = DMA_MEM_TO_DEV,
- .dst_addr = device_data->phybase + HASH_DMA_FIFO,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
- .dst_maxburst = 16,
- };
-
- dma_cap_zero(device_data->dma.mask);
- dma_cap_set(DMA_SLAVE, device_data->dma.mask);
-
- device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
- device_data->dma.chan_mem2hash =
- dma_request_channel(device_data->dma.mask,
- platform_data->dma_filter,
- device_data->dma.cfg_mem2hash);
-
- dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
-
- init_completion(&device_data->dma.complete);
-}
-
-static void hash_dma_callback(void *data)
-{
- struct hash_ctx *ctx = data;
-
- complete(&ctx->device->dma.complete);
-}
-
-static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
- int len, enum dma_data_direction direction)
-{
- struct dma_async_tx_descriptor *desc = NULL;
- struct dma_chan *channel = NULL;
-
- if (direction != DMA_TO_DEVICE) {
- dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
- __func__);
- return -EFAULT;
- }
-
- sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
-
- channel = ctx->device->dma.chan_mem2hash;
- ctx->device->dma.sg = sg;
- ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
- ctx->device->dma.sg, ctx->device->dma.nents,
- direction);
-
- if (!ctx->device->dma.sg_len) {
- dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
- __func__);
- return -EFAULT;
- }
-
- dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
- __func__);
- desc = dmaengine_prep_slave_sg(channel,
- ctx->device->dma.sg, ctx->device->dma.sg_len,
- DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(ctx->device->dev,
- "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
- return -EFAULT;
- }
-
- desc->callback = hash_dma_callback;
- desc->callback_param = ctx;
-
- dmaengine_submit(desc);
- dma_async_issue_pending(channel);
-
- return 0;
-}
-
-static void hash_dma_done(struct hash_ctx *ctx)
-{
- struct dma_chan *chan;
-
- chan = ctx->device->dma.chan_mem2hash;
- dmaengine_terminate_all(chan);
- dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
- ctx->device->dma.nents, DMA_TO_DEVICE);
-}
-
-static int hash_dma_write(struct hash_ctx *ctx,
- struct scatterlist *sg, int len)
-{
- int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
- if (error) {
- dev_dbg(ctx->device->dev,
- "%s: hash_set_dma_transfer() failed\n", __func__);
- return error;
- }
-
- return len;
-}
-
-/**
- * get_empty_message_digest - Returns a pre-calculated digest for
- * the empty message.
- * @device_data: Structure for the hash device.
- * @zero_hash: Buffer to return the empty message digest.
- * @zero_hash_size: Hash size of the empty message digest.
- * @zero_digest: True if zero_digest returned.
- */
-static int get_empty_message_digest(
- struct hash_device_data *device_data,
- u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
-{
- int ret = 0;
- struct hash_ctx *ctx = device_data->current_ctx;
- *zero_digest = false;
-
- /**
- * Caller responsible for ctx != NULL.
- */
-
- if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
- if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
- memcpy(zero_hash, &sha1_zero_message_hash[0],
- SHA1_DIGEST_SIZE);
- *zero_hash_size = SHA1_DIGEST_SIZE;
- *zero_digest = true;
- } else if (HASH_ALGO_SHA256 ==
- ctx->config.algorithm) {
- memcpy(zero_hash, &sha256_zero_message_hash[0],
- SHA256_DIGEST_SIZE);
- *zero_hash_size = SHA256_DIGEST_SIZE;
- *zero_digest = true;
- } else {
- dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
- if (!ctx->keylen) {
- if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
- memcpy(zero_hash, &zero_message_hmac_sha1[0],
- SHA1_DIGEST_SIZE);
- *zero_hash_size = SHA1_DIGEST_SIZE;
- *zero_digest = true;
- } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
- memcpy(zero_hash, &zero_message_hmac_sha256[0],
- SHA256_DIGEST_SIZE);
- *zero_hash_size = SHA256_DIGEST_SIZE;
- *zero_digest = true;
- } else {
- dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- } else {
- dev_dbg(device_data->dev,
- "%s: Continue hash calculation, since hmac key available\n",
- __func__);
- }
- }
-out:
-
- return ret;
-}
-
-/**
- * hash_disable_power - Request to disable power and clock.
- * @device_data: Structure for the hash device.
- * @save_device_state: If true, saves the current hw state.
- *
- * This function request for disabling power (regulator) and clock,
- * and could also save current hw state.
- */
-static int hash_disable_power(struct hash_device_data *device_data,
- bool save_device_state)
-{
- int ret = 0;
- struct device *dev = device_data->dev;
-
- spin_lock(&device_data->power_state_lock);
- if (!device_data->power_state)
- goto out;
-
- if (save_device_state) {
- hash_save_state(device_data,
- &device_data->state);
- device_data->restore_dev_state = true;
- }
-
- clk_disable(device_data->clk);
- ret = regulator_disable(device_data->regulator);
- if (ret)
- dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
-
- device_data->power_state = false;
-
-out:
- spin_unlock(&device_data->power_state_lock);
-
- return ret;
-}
-
-/**
- * hash_enable_power - Request to enable power and clock.
- * @device_data: Structure for the hash device.
- * @restore_device_state: If true, restores a previous saved hw state.
- *
- * This function request for enabling power (regulator) and clock,
- * and could also restore a previously saved hw state.
- */
-static int hash_enable_power(struct hash_device_data *device_data,
- bool restore_device_state)
-{
- int ret = 0;
- struct device *dev = device_data->dev;
-
- spin_lock(&device_data->power_state_lock);
- if (!device_data->power_state) {
- ret = regulator_enable(device_data->regulator);
- if (ret) {
- dev_err(dev, "%s: regulator_enable() failed!\n",
- __func__);
- goto out;
- }
- ret = clk_enable(device_data->clk);
- if (ret) {
- dev_err(dev, "%s: clk_enable() failed!\n", __func__);
- ret = regulator_disable(
- device_data->regulator);
- goto out;
- }
- device_data->power_state = true;
- }
-
- if (device_data->restore_dev_state) {
- if (restore_device_state) {
- device_data->restore_dev_state = false;
- hash_resume_state(device_data, &device_data->state);
- }
- }
-out:
- spin_unlock(&device_data->power_state_lock);
-
- return ret;
-}
-
-/**
- * hash_get_device_data - Checks for an available hash device and return it.
- * @ctx: Structure for the hash context.
- * @device_data: Structure for the hash device.
- *
- * This function check for an available hash device and return it to
- * the caller.
- * Note! Caller need to release the device, calling up().
- */
-static int hash_get_device_data(struct hash_ctx *ctx,
- struct hash_device_data **device_data)
-{
- int ret;
- struct klist_iter device_iterator;
- struct klist_node *device_node;
- struct hash_device_data *local_device_data = NULL;
-
- /* Wait until a device is available */
- ret = down_interruptible(&driver_data.device_allocation);
- if (ret)
- return ret; /* Interrupted */
-
- /* Select a device */
- klist_iter_init(&driver_data.device_list, &device_iterator);
- device_node = klist_next(&device_iterator);
- while (device_node) {
- local_device_data = container_of(device_node,
- struct hash_device_data, list_node);
- spin_lock(&local_device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (local_device_data->current_ctx) {
- device_node = klist_next(&device_iterator);
- } else {
- local_device_data->current_ctx = ctx;
- ctx->device = local_device_data;
- spin_unlock(&local_device_data->ctx_lock);
- break;
- }
- spin_unlock(&local_device_data->ctx_lock);
- }
- klist_iter_exit(&device_iterator);
-
- if (!device_node) {
- /**
- * No free device found.
- * Since we allocated a device with down_interruptible, this
- * should not be able to happen.
- * Number of available devices, which are contained in
- * device_allocation, is therefore decremented by not doing
- * an up(device_allocation).
- */
- return -EBUSY;
- }
-
- *device_data = local_device_data;
-
- return 0;
-}
-
-/**
- * hash_hw_write_key - Writes the key to the hardware registries.
- *
- * @device_data: Structure for the hash device.
- * @key: Key to be written.
- * @keylen: The lengt of the key.
- *
- * Note! This function DOES NOT write to the NBLW registry, even though
- * specified in the hw design spec. Either due to incorrect info in the
- * spec or due to a bug in the hw.
- */
-static void hash_hw_write_key(struct hash_device_data *device_data,
- const u8 *key, unsigned int keylen)
-{
- u32 word = 0;
- int nwords = 1;
-
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- while (keylen >= 4) {
- u32 *key_word = (u32 *)key;
-
- HASH_SET_DIN(key_word, nwords);
- keylen -= 4;
- key += 4;
- }
-
- /* Take care of the remaining bytes in the last word */
- if (keylen) {
- word = 0;
- while (keylen) {
- word |= (key[keylen - 1] << (8 * (keylen - 1)));
- keylen--;
- }
-
- HASH_SET_DIN(&word, nwords);
- }
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- HASH_SET_DCAL;
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-}
-
-/**
- * init_hash_hw - Initialise the hash hardware for a new calculation.
- * @device_data: Structure for the hash device.
- * @ctx: The hash context.
- *
- * This function will enable the bits needed to clear and start a new
- * calculation.
- */
-static int init_hash_hw(struct hash_device_data *device_data,
- struct hash_ctx *ctx)
-{
- int ret = 0;
-
- ret = hash_setconfiguration(device_data, &ctx->config);
- if (ret) {
- dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
- __func__);
- return ret;
- }
-
- hash_begin(device_data, ctx);
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
- hash_hw_write_key(device_data, ctx->key, ctx->keylen);
-
- return ret;
-}
-
-/**
- * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
- *
- * @sg: Scatterlist.
- * @size: Size in bytes.
- * @aligned: True if sg data aligned to work in DMA mode.
- *
- */
-static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
-{
- int nents = 0;
- bool aligned_data = true;
-
- while (size > 0 && sg) {
- nents++;
- size -= sg->length;
-
- /* hash_set_dma_transfer will align last nent */
- if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
- (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
- aligned_data = false;
-
- sg = sg_next(sg);
- }
-
- if (aligned)
- *aligned = aligned_data;
-
- if (size != 0)
- return -EFAULT;
-
- return nents;
-}
-
-/**
- * hash_dma_valid_data - checks for dma valid sg data.
- * @sg: Scatterlist.
- * @datasize: Datasize in bytes.
- *
- * NOTE! This function checks for dma valid sg data, since dma
- * only accept datasizes of even wordsize.
- */
-static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
-{
- bool aligned;
-
- /* Need to include at least one nent, else error */
- if (hash_get_nents(sg, datasize, &aligned) < 1)
- return false;
-
- return aligned;
-}
-
-/**
- * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- *
- * Initialize structures.
- */
-static int ux500_hash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- if (!ctx->key)
- ctx->keylen = 0;
-
- memset(&req_ctx->state, 0, sizeof(struct hash_state));
- req_ctx->updated = 0;
- if (hash_mode == HASH_MODE_DMA) {
- if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
- req_ctx->dma_mode = false; /* Don't use DMA */
-
- pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
- __func__, HASH_DMA_ALIGN_SIZE);
- } else {
- if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
- hash_dma_valid_data(req->src, req->nbytes)) {
- req_ctx->dma_mode = true;
- } else {
- req_ctx->dma_mode = false;
- pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
- __func__,
- HASH_DMA_PERFORMANCE_MIN_SIZE);
- }
- }
- }
- return 0;
-}
-
-/**
- * hash_processblock - This function processes a single block of 512 bits (64
- * bytes), word aligned, starting at message.
- * @device_data: Structure for the hash device.
- * @message: Block (512 bits) of message to be written to
- * the HASH hardware.
- * @length: Message length
- *
- */
-static void hash_processblock(struct hash_device_data *device_data,
- const u32 *message, int length)
-{
- int len = length / HASH_BYTES_PER_WORD;
- /*
- * NBLW bits. Reset the number of bits in last word (NBLW).
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- /*
- * Write message data to the HASH_DIN register.
- */
- HASH_SET_DIN(message, len);
-}
-
-/**
- * hash_messagepad - Pads a message and write the nblw bits.
- * @device_data: Structure for the hash device.
- * @message: Last word of a message.
- * @index_bytes: The number of bytes in the last message.
- *
- * This function manages the final part of the digest calculation, when less
- * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
- *
- */
-static void hash_messagepad(struct hash_device_data *device_data,
- const u32 *message, u8 index_bytes)
-{
- int nwords = 1;
-
- /*
- * Clear hash str register, only clear NBLW
- * since DCAL will be reset by hardware.
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- /* Main loop */
- while (index_bytes >= 4) {
- HASH_SET_DIN(message, nwords);
- index_bytes -= 4;
- message++;
- }
-
- if (index_bytes)
- HASH_SET_DIN(message, nwords);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
- HASH_SET_NBLW(index_bytes * 8);
- dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
- __func__, readl_relaxed(&device_data->base->din),
- readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
- HASH_SET_DCAL;
- dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
- __func__, readl_relaxed(&device_data->base->din),
- readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-}
-
-/**
- * hash_incrementlength - Increments the length of the current message.
- * @ctx: Hash context
- * @incr: Length of message processed already
- *
- * Overflow cannot occur, because conditions for overflow are checked in
- * hash_hw_update.
- */
-static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
-{
- ctx->state.length.low_word += incr;
-
- /* Check for wrap-around */
- if (ctx->state.length.low_word < incr)
- ctx->state.length.high_word++;
-}
-
-/**
- * hash_setconfiguration - Sets the required configuration for the hash
- * hardware.
- * @device_data: Structure for the hash device.
- * @config: Pointer to a configuration structure.
- */
-int hash_setconfiguration(struct hash_device_data *device_data,
- struct hash_config *config)
-{
- int ret = 0;
-
- if (config->algorithm != HASH_ALGO_SHA1 &&
- config->algorithm != HASH_ALGO_SHA256)
- return -EPERM;
-
- /*
- * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
- * to be written to HASH_DIN is considered as 32 bits.
- */
- HASH_SET_DATA_FORMAT(config->data_format);
-
- /*
- * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
- */
- switch (config->algorithm) {
- case HASH_ALGO_SHA1:
- HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
- break;
-
- case HASH_ALGO_SHA256:
- HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
- break;
-
- default:
- dev_err(device_data->dev, "%s: Incorrect algorithm\n",
- __func__);
- return -EPERM;
- }
-
- /*
- * MODE bit. This bit selects between HASH or HMAC mode for the
- * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
- */
- if (HASH_OPER_MODE_HASH == config->oper_mode)
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_MODE_MASK);
- else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
- HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
- if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
- /* Truncate key to blocksize */
- dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_LKEY_MASK);
- } else {
- dev_dbg(device_data->dev, "%s: LKEY cleared\n",
- __func__);
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_LKEY_MASK);
- }
- } else { /* Wrong hash mode */
- ret = -EPERM;
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- }
- return ret;
-}
-
-/**
- * hash_begin - This routine resets some globals and initializes the hash
- * hardware.
- * @device_data: Structure for the hash device.
- * @ctx: Hash context.
- */
-void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
-{
- /* HW and SW initializations */
- /* Note: there is no need to initialize buffer and digest members */
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- /*
- * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
- * prepare the initialize the HASH accelerator to compute the message
- * digest of a new message.
- */
- HASH_INITIALIZE;
-
- /*
- * NBLW bits. Reset the number of bits in last word (NBLW).
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-}
-
-static int hash_process_data(struct hash_device_data *device_data,
- struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
- int msg_length, u8 *data_buffer, u8 *buffer,
- u8 *index)
-{
- int ret = 0;
- u32 count;
-
- do {
- if ((*index + msg_length) < HASH_BLOCK_SIZE) {
- for (count = 0; count < msg_length; count++) {
- buffer[*index + count] =
- *(data_buffer + count);
- }
- *index += msg_length;
- msg_length = 0;
- } else {
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data,
- &device_data->state);
- memmove(req_ctx->state.buffer,
- device_data->state.buffer,
- HASH_BLOCK_SIZE);
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_resume_state() failed!\n",
- __func__);
- goto out;
- }
- } else {
- ret = init_hash_hw(device_data, ctx);
- if (ret) {
- dev_err(device_data->dev,
- "%s: init_hash_hw() failed!\n",
- __func__);
- goto out;
- }
- req_ctx->updated = 1;
- }
- /*
- * If 'data_buffer' is four byte aligned and
- * local buffer does not have any data, we can
- * write data directly from 'data_buffer' to
- * HW peripheral, otherwise we first copy data
- * to a local buffer
- */
- if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
- (0 == *index))
- hash_processblock(device_data,
- (const u32 *)data_buffer,
- HASH_BLOCK_SIZE);
- else {
- for (count = 0;
- count < (u32)(HASH_BLOCK_SIZE - *index);
- count++) {
- buffer[*index + count] =
- *(data_buffer + count);
- }
- hash_processblock(device_data,
- (const u32 *)buffer,
- HASH_BLOCK_SIZE);
- }
- hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
- data_buffer += (HASH_BLOCK_SIZE - *index);
-
- msg_length -= (HASH_BLOCK_SIZE - *index);
- *index = 0;
-
- ret = hash_save_state(device_data,
- &device_data->state);
-
- memmove(device_data->state.buffer,
- req_ctx->state.buffer,
- HASH_BLOCK_SIZE);
- if (ret) {
- dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
- __func__);
- goto out;
- }
- }
- } while (msg_length != 0);
-out:
-
- return ret;
-}
-
-/**
- * hash_dma_final - The hash dma final function for SHA1/SHA256.
- * @req: The hash request for the job.
- */
-static int hash_dma_final(struct ahash_request *req)
-{
- int ret = 0;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct hash_device_data *device_data;
- u8 digest[SHA256_DIGEST_SIZE];
- int bytes_written = 0;
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
- (unsigned long)ctx);
-
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data, &device_data->state);
-
- if (ret) {
- dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
- __func__);
- goto out;
- }
- } else {
- ret = hash_setconfiguration(device_data, &ctx->config);
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_setconfiguration() failed!\n",
- __func__);
- goto out;
- }
-
- /* Enable DMA input */
- if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_DMAE_MASK);
- } else {
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_DMAE_MASK);
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_PRIVN_MASK);
- }
-
- HASH_INITIALIZE;
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
- hash_hw_write_key(device_data, ctx->key, ctx->keylen);
-
- /* Number of bits in last word = (nbytes * 8) % 32 */
- HASH_SET_NBLW((req->nbytes * 8) % 32);
- req_ctx->updated = 1;
- }
-
- /* Store the nents in the dma struct. */
- ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
- if (!ctx->device->dma.nents) {
- dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
- __func__);
- ret = ctx->device->dma.nents;
- goto out;
- }
-
- bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
- if (bytes_written != req->nbytes) {
- dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
- __func__);
- ret = bytes_written;
- goto out;
- }
-
- wait_for_completion(&ctx->device->dma.complete);
- hash_dma_done(ctx);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
- unsigned int keylen = ctx->keylen;
- u8 *key = ctx->key;
-
- dev_dbg(device_data->dev, "%s: keylen: %d\n",
- __func__, ctx->keylen);
- hash_hw_write_key(device_data, key, keylen);
- }
-
- hash_get_digest(device_data, digest, ctx->config.algorithm);
- memcpy(req->result, digest, ctx->digestsize);
-
-out:
- release_hash_device(device_data);
-
- /**
- * Allocated in setkey, and only used in HMAC.
- */
- kfree(ctx->key);
-
- return ret;
-}
-
-/**
- * hash_hw_final - The final hash calculation function
- * @req: The hash request for the job.
- */
-static int hash_hw_final(struct ahash_request *req)
-{
- int ret = 0;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct hash_device_data *device_data;
- u8 digest[SHA256_DIGEST_SIZE];
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
- (unsigned long)ctx);
-
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data, &device_data->state);
-
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_resume_state() failed!\n", __func__);
- goto out;
- }
- } else if (req->nbytes == 0 && ctx->keylen == 0) {
- u8 zero_hash[SHA256_DIGEST_SIZE];
- u32 zero_hash_size = 0;
- bool zero_digest = false;
- /**
- * Use a pre-calculated empty message digest
- * (workaround since hw return zeroes, hw bug!?)
- */
- ret = get_empty_message_digest(device_data, &zero_hash[0],
- &zero_hash_size, &zero_digest);
- if (!ret && likely(zero_hash_size == ctx->digestsize) &&
- zero_digest) {
- memcpy(req->result, &zero_hash[0], ctx->digestsize);
- goto out;
- } else if (!ret && !zero_digest) {
- dev_dbg(device_data->dev,
- "%s: HMAC zero msg with key, continue...\n",
- __func__);
- } else {
- dev_err(device_data->dev,
- "%s: ret=%d, or wrong digest size? %s\n",
- __func__, ret,
- zero_hash_size == ctx->digestsize ?
- "true" : "false");
- /* Return error */
- goto out;
- }
- } else if (req->nbytes == 0 && ctx->keylen > 0) {
- ret = -EPERM;
- dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
- __func__);
- goto out;
- }
-
- if (!req_ctx->updated) {
- ret = init_hash_hw(device_data, ctx);
- if (ret) {
- dev_err(device_data->dev,
- "%s: init_hash_hw() failed!\n", __func__);
- goto out;
- }
- }
-
- if (req_ctx->state.index) {
- hash_messagepad(device_data, req_ctx->state.buffer,
- req_ctx->state.index);
- } else {
- HASH_SET_DCAL;
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
- }
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
- unsigned int keylen = ctx->keylen;
- u8 *key = ctx->key;
-
- dev_dbg(device_data->dev, "%s: keylen: %d\n",
- __func__, ctx->keylen);
- hash_hw_write_key(device_data, key, keylen);
- }
-
- hash_get_digest(device_data, digest, ctx->config.algorithm);
- memcpy(req->result, digest, ctx->digestsize);
-
-out:
- release_hash_device(device_data);
-
- /**
- * Allocated in setkey, and only used in HMAC.
- */
- kfree(ctx->key);
-
- return ret;
-}
-
-/**
- * hash_hw_update - Updates current HASH computation hashing another part of
- * the message.
- * @req: Byte array containing the message to be hashed (caller
- * allocated).
- */
-int hash_hw_update(struct ahash_request *req)
-{
- int ret = 0;
- u8 index = 0;
- u8 *buffer;
- struct hash_device_data *device_data;
- u8 *data_buffer;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_hash_walk walk;
- int msg_length;
-
- index = req_ctx->state.index;
- buffer = (u8 *)req_ctx->state.buffer;
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- msg_length = crypto_hash_walk_first(req, &walk);
-
- /* Empty message ("") is correct indata */
- if (msg_length == 0) {
- ret = 0;
- goto release_dev;
- }
-
- /* Check if ctx->state.length + msg_length
- overflows */
- if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
- HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
- pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
- ret = crypto_hash_walk_done(&walk, -EPERM);
- goto release_dev;
- }
-
- /* Main loop */
- while (0 != msg_length) {
- data_buffer = walk.data;
- ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
- data_buffer, buffer, &index);
-
- if (ret) {
- dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
- __func__);
- crypto_hash_walk_done(&walk, ret);
- goto release_dev;
- }
-
- msg_length = crypto_hash_walk_done(&walk, 0);
- }
-
- req_ctx->state.index = index;
- dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
- __func__, req_ctx->state.index, req_ctx->state.bit_index);
-
-release_dev:
- release_hash_device(device_data);
-
- return ret;
-}
-
-/**
- * hash_resume_state - Function that resumes the state of an calculation.
- * @device_data: Pointer to the device structure.
- * @device_state: The state to be restored in the hash hardware
- */
-int hash_resume_state(struct hash_device_data *device_data,
- const struct hash_state *device_state)
-{
- u32 temp_cr;
- s32 count;
- int hash_mode = HASH_OPER_MODE_HASH;
-
- if (NULL == device_state) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -EPERM;
- }
-
- /* Check correctness of index and length members */
- if (device_state->index > HASH_BLOCK_SIZE ||
- (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -EPERM;
- }
-
- /*
- * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
- * prepare the initialize the HASH accelerator to compute the message
- * digest of a new message.
- */
- HASH_INITIALIZE;
-
- temp_cr = device_state->temp_cr;
- writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
-
- if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
- hash_mode = HASH_OPER_MODE_HMAC;
- else
- hash_mode = HASH_OPER_MODE_HASH;
-
- for (count = 0; count < HASH_CSR_COUNT; count++) {
- if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
- break;
-
- writel_relaxed(device_state->csr[count],
- &device_data->base->csrx[count]);
- }
-
- writel_relaxed(device_state->csfull, &device_data->base->csfull);
- writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
-
- writel_relaxed(device_state->str_reg, &device_data->base->str);
- writel_relaxed(temp_cr, &device_data->base->cr);
-
- return 0;
-}
-
-/**
- * hash_save_state - Function that saves the state of hardware.
- * @device_data: Pointer to the device structure.
- * @device_state: The strucure where the hardware state should be saved.
- */
-int hash_save_state(struct hash_device_data *device_data,
- struct hash_state *device_state)
-{
- u32 temp_cr;
- u32 count;
- int hash_mode = HASH_OPER_MODE_HASH;
-
- if (NULL == device_state) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -ENOTSUPP;
- }
-
- /* Write dummy value to force digest intermediate calculation. This
- * actually makes sure that there isn't any ongoing calculation in the
- * hardware.
- */
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- temp_cr = readl_relaxed(&device_data->base->cr);
-
- device_state->str_reg = readl_relaxed(&device_data->base->str);
-
- device_state->din_reg = readl_relaxed(&device_data->base->din);
-
- if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
- hash_mode = HASH_OPER_MODE_HMAC;
- else
- hash_mode = HASH_OPER_MODE_HASH;
-
- for (count = 0; count < HASH_CSR_COUNT; count++) {
- if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
- break;
-
- device_state->csr[count] =
- readl_relaxed(&device_data->base->csrx[count]);
- }
-
- device_state->csfull = readl_relaxed(&device_data->base->csfull);
- device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
-
- device_state->temp_cr = temp_cr;
-
- return 0;
-}
-
-/**
- * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
- * @device_data:
- *
- */
-int hash_check_hw(struct hash_device_data *device_data)
-{
- /* Checking Peripheral Ids */
- if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
- HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
- HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
- HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
- HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
- HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
- HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
- HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
- return 0;
- }
-
- dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
- return -ENOTSUPP;
-}
-
-/**
- * hash_get_digest - Gets the digest.
- * @device_data: Pointer to the device structure.
- * @digest: User allocated byte array for the calculated digest.
- * @algorithm: The algorithm in use.
- */
-void hash_get_digest(struct hash_device_data *device_data,
- u8 *digest, int algorithm)
-{
- u32 temp_hx_val, count;
- int loop_ctr;
-
- if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
- dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
- __func__, algorithm);
- return;
- }
-
- if (algorithm == HASH_ALGO_SHA1)
- loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
- else
- loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
-
- dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
- __func__, (unsigned long)digest);
-
- /* Copy result into digest array */
- for (count = 0; count < loop_ctr; count++) {
- temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
- digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
- digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
- digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
- digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
- }
-}
-
-/**
- * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- */
-static int ahash_update(struct ahash_request *req)
-{
- int ret = 0;
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
- ret = hash_hw_update(req);
- /* Skip update for DMA, all data will be passed to DMA in final */
-
- if (ret) {
- pr_err("%s: hash_hw_update() failed!\n", __func__);
- }
-
- return ret;
-}
-
-/**
- * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- */
-static int ahash_final(struct ahash_request *req)
-{
- int ret = 0;
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- pr_debug("%s: data size: %d\n", __func__, req->nbytes);
-
- if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
- ret = hash_dma_final(req);
- else
- ret = hash_hw_final(req);
-
- if (ret) {
- pr_err("%s: hash_hw/dma_final() failed\n", __func__);
- }
-
- return ret;
-}
-
-static int hash_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen, int alg)
-{
- int ret = 0;
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- /**
- * Freed in final.
- */
- ctx->key = kmemdup(key, keylen, GFP_KERNEL);
- if (!ctx->key) {
- pr_err("%s: Failed to allocate ctx->key for %d\n",
- __func__, alg);
- return -ENOMEM;
- }
- ctx->keylen = keylen;
-
- return ret;
-}
-
-static int ahash_sha1_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA1;
- ctx->config.oper_mode = HASH_OPER_MODE_HASH;
- ctx->digestsize = SHA1_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int ahash_sha256_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA256;
- ctx->config.oper_mode = HASH_OPER_MODE_HASH;
- ctx->digestsize = SHA256_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int ahash_sha1_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = ahash_sha1_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int ahash_sha256_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = ahash_sha256_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int ahash_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int ahash_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int hmac_sha1_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA1;
- ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
- ctx->digestsize = SHA1_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int hmac_sha256_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA256;
- ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
- ctx->digestsize = SHA256_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int hmac_sha1_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = hmac_sha1_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int hmac_sha256_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = hmac_sha256_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int hmac_sha1_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen)
-{
- return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
-}
-
-static int hmac_sha256_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen)
-{
- return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
-}
-
-struct hash_algo_template {
- struct hash_config conf;
- struct ahash_alg hash;
-};
-
-static int hash_cra_init(struct crypto_tfm *tfm)
-{
- struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct hash_algo_template *hash_alg;
-
- hash_alg = container_of(__crypto_ahash_alg(alg),
- struct hash_algo_template,
- hash);
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct hash_req_ctx));
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = hash_alg->conf.algorithm;
- ctx->config.oper_mode = hash_alg->conf.oper_mode;
-
- ctx->digestsize = hash_alg->hash.halg.digestsize;
-
- return 0;
-}
-
-static struct hash_algo_template hash_algs[] = {
- {
- .conf.algorithm = HASH_ALGO_SHA1,
- .conf.oper_mode = HASH_OPER_MODE_HASH,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = ahash_sha1_digest,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA256,
- .conf.oper_mode = HASH_OPER_MODE_HASH,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = ahash_sha256_digest,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA1,
- .conf.oper_mode = HASH_OPER_MODE_HMAC,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = hmac_sha1_digest,
- .setkey = hmac_sha1_setkey,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA256,
- .conf.oper_mode = HASH_OPER_MODE_HMAC,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = hmac_sha256_digest,
- .setkey = hmac_sha256_setkey,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- }
-};
-
-static int ahash_algs_register_all(struct hash_device_data *device_data)
-{
- int ret;
- int i;
- int count;
-
- for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
- ret = crypto_register_ahash(&hash_algs[i].hash);
- if (ret) {
- count = i;
- dev_err(device_data->dev, "%s: alg registration failed\n",
- hash_algs[i].hash.halg.base.cra_driver_name);
- goto unreg;
- }
- }
- return 0;
-unreg:
- for (i = 0; i < count; i++)
- crypto_unregister_ahash(&hash_algs[i].hash);
- return ret;
-}
-
-static void ahash_algs_unregister_all(struct hash_device_data *device_data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
- crypto_unregister_ahash(&hash_algs[i].hash);
-}
-
-/**
- * ux500_hash_probe - Function that probes the hash hardware.
- * @pdev: The platform device.
- */
-static int ux500_hash_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res = NULL;
- struct hash_device_data *device_data;
- struct device *dev = &pdev->dev;
-
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
- if (!device_data) {
- ret = -ENOMEM;
- goto out;
- }
-
- device_data->dev = dev;
- device_data->current_ctx = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
- ret = -ENODEV;
- goto out;
- }
-
- device_data->phybase = res->start;
- device_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(device_data->base)) {
- ret = PTR_ERR(device_data->base);
- goto out;
- }
- spin_lock_init(&device_data->ctx_lock);
- spin_lock_init(&device_data->power_state_lock);
-
- /* Enable power for HASH1 hardware block */
- device_data->regulator = regulator_get(dev, "v-ape");
- if (IS_ERR(device_data->regulator)) {
- dev_err(dev, "%s: regulator_get() failed!\n", __func__);
- ret = PTR_ERR(device_data->regulator);
- device_data->regulator = NULL;
- goto out;
- }
-
- /* Enable the clock for HASH1 hardware block */
- device_data->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(device_data->clk)) {
- dev_err(dev, "%s: clk_get() failed!\n", __func__);
- ret = PTR_ERR(device_data->clk);
- goto out_regulator;
- }
-
- ret = clk_prepare(device_data->clk);
- if (ret) {
- dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
- goto out_regulator;
- }
-
- /* Enable device power (and clock) */
- ret = hash_enable_power(device_data, false);
- if (ret) {
- dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
- goto out_clk_unprepare;
- }
-
- ret = hash_check_hw(device_data);
- if (ret) {
- dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
- goto out_power;
- }
-
- if (hash_mode == HASH_MODE_DMA)
- hash_dma_setup_channel(device_data, dev);
-
- platform_set_drvdata(pdev, device_data);
-
- /* Put the new device into the device list... */
- klist_add_tail(&device_data->list_node, &driver_data.device_list);
- /* ... and signal that a new device is available. */
- up(&driver_data.device_allocation);
-
- ret = ahash_algs_register_all(device_data);
- if (ret) {
- dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
- __func__);
- goto out_power;
- }
-
- dev_info(dev, "successfully registered\n");
- return 0;
-
-out_power:
- hash_disable_power(device_data, false);
-
-out_clk_unprepare:
- clk_unprepare(device_data->clk);
-
-out_regulator:
- regulator_put(device_data->regulator);
-
-out:
- return ret;
-}
-
-/**
- * ux500_hash_remove - Function that removes the hash device from the platform.
- * @pdev: The platform device.
- */
-static int ux500_hash_remove(struct platform_device *pdev)
-{
- struct hash_device_data *device_data;
- struct device *dev = &pdev->dev;
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- /* Try to decrease the number of available devices. */
- if (down_trylock(&driver_data.device_allocation))
- return -EBUSY;
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (device_data->current_ctx) {
- /* The device is busy */
- spin_unlock(&device_data->ctx_lock);
- /* Return the device to the pool. */
- up(&driver_data.device_allocation);
- return -EBUSY;
- }
-
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- ahash_algs_unregister_all(device_data);
-
- if (hash_disable_power(device_data, false))
- dev_err(dev, "%s: hash_disable_power() failed\n",
- __func__);
-
- clk_unprepare(device_data->clk);
- regulator_put(device_data->regulator);
-
- return 0;
-}
-
-/**
- * ux500_hash_shutdown - Function that shutdown the hash device.
- * @pdev: The platform device
- */
-static void ux500_hash_shutdown(struct platform_device *pdev)
-{
- struct hash_device_data *device_data;
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
- __func__);
- return;
- }
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (!device_data->current_ctx) {
- if (down_trylock(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
- __func__);
- /**
- * (Allocate the device)
- * Need to set this to non-null (dummy) value,
- * to avoid usage if context switching.
- */
- device_data->current_ctx++;
- }
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- ahash_algs_unregister_all(device_data);
-
- if (hash_disable_power(device_data, false))
- dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
- __func__);
-}
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * ux500_hash_suspend - Function that suspends the hash device.
- * @dev: Device to suspend.
- */
-static int ux500_hash_suspend(struct device *dev)
-{
- int ret;
- struct hash_device_data *device_data;
- struct hash_ctx *temp_ctx = NULL;
-
- device_data = dev_get_drvdata(dev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- spin_lock(&device_data->ctx_lock);
- if (!device_data->current_ctx)
- device_data->current_ctx++;
- spin_unlock(&device_data->ctx_lock);
-
- if (device_data->current_ctx == ++temp_ctx) {
- if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(dev, "%s: down_interruptible() failed\n",
- __func__);
- ret = hash_disable_power(device_data, false);
-
- } else {
- ret = hash_disable_power(device_data, true);
- }
-
- if (ret)
- dev_err(dev, "%s: hash_disable_power()\n", __func__);
-
- return ret;
-}
-
-/**
- * ux500_hash_resume - Function that resume the hash device.
- * @dev: Device to resume.
- */
-static int ux500_hash_resume(struct device *dev)
-{
- int ret = 0;
- struct hash_device_data *device_data;
- struct hash_ctx *temp_ctx = NULL;
-
- device_data = dev_get_drvdata(dev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- spin_lock(&device_data->ctx_lock);
- if (device_data->current_ctx == ++temp_ctx)
- device_data->current_ctx = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- if (!device_data->current_ctx)
- up(&driver_data.device_allocation);
- else
- ret = hash_enable_power(device_data, true);
-
- if (ret)
- dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
-
-static const struct of_device_id ux500_hash_match[] = {
- { .compatible = "stericsson,ux500-hash" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ux500_hash_match);
-
-static struct platform_driver hash_driver = {
- .probe = ux500_hash_probe,
- .remove = ux500_hash_remove,
- .shutdown = ux500_hash_shutdown,
- .driver = {
- .name = "hash1",
- .of_match_table = ux500_hash_match,
- .pm = &ux500_hash_pm,
- }
-};
-
-/**
- * ux500_hash_mod_init - The kernel module init function.
- */
-static int __init ux500_hash_mod_init(void)
-{
- klist_init(&driver_data.device_list, NULL, NULL);
- /* Initialize the semaphore to 0 devices (locked state) */
- sema_init(&driver_data.device_allocation, 0);
-
- return platform_driver_register(&hash_driver);
-}
-
-/**
- * ux500_hash_mod_fini - The kernel module exit function.
- */
-static void __exit ux500_hash_mod_fini(void)
-{
- platform_driver_unregister(&hash_driver);
-}
-
-module_init(ux500_hash_mod_init);
-module_exit(ux500_hash_mod_fini);
-
-MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("sha1-all");
-MODULE_ALIAS_CRYPTO("sha256-all");
-MODULE_ALIAS_CRYPTO("hmac-sha1-all");
-MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index b2979be613b8..6963344f6a3a 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -116,7 +116,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
struct virtio_crypto_session_input *input;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
- pkey = kmemdup(key, keylen, GFP_ATOMIC);
+ pkey = kmemdup(key, keylen, GFP_KERNEL);
if (!pkey)
return -ENOMEM;
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index e553ccadbcbc..e5876286828b 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
err = 0;
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 0ac53c422c31..ff4e78117b31 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -104,19 +104,29 @@ config CXL_SUSPEND
depends on SUSPEND && CXL_MEM
config CXL_REGION
- bool
+ bool "CXL: Region Support"
default CXL_BUS
# For MAX_PHYSMEM_BITS
depends on SPARSEMEM
select MEMREGION
select GET_FREE_REGION
+ help
+ Enable the CXL core to enumerate and provision CXL regions. A CXL
+ region is defined by one or more CXL expanders that decode a given
+ system-physical address range. For CXL regions established by
+ platform-firmware this option enables memory error handling to
+ identify the devices participating in a given interleaved memory
+ range. Otherwise, platform-firmware managed CXL is enabled by being
+ placed in the system address map and does not need a driver.
+
+ If unsure say 'y'
config CXL_REGION_INVALIDATION_TEST
bool "CXL: Region Cache Management Bypass (TEST)"
depends on CXL_REGION
help
CXL Region management and security operations potentially invalidate
- the content of CPU caches without notifiying those caches to
+ the content of CPU caches without notifying those caches to
invalidate the affected cachelines. The CXL Region driver attempts
to invalidate caches when those events occur. If that invalidation
fails the region will fail to enable. Reasons for cache
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index ad0849af42d7..7e1765b09e04 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -19,7 +19,7 @@ struct cxl_cxims_data {
/*
* Find a targets entry (n) in the host bridge interleave list.
- * CXL Specfication 3.0 Table 9-22
+ * CXL Specification 3.0 Table 9-22
*/
static int cxl_xor_calc_n(u64 hpa, struct cxl_cxims_data *cximsd, int iw,
int ig)
@@ -731,9 +731,9 @@ static void __exit cxl_acpi_exit(void)
cxl_bus_drain();
}
-module_init(cxl_acpi_init);
+/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
+subsys_initcall(cxl_acpi_init);
module_exit(cxl_acpi_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(ACPI);
-MODULE_SOFTDEP("pre: cxl_pmem");
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 79c7257f4107..ca4ae31d8f57 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_CXL_BUS) += cxl_core.o
obj-$(CONFIG_CXL_SUSPEND) += suspend.o
ccflags-y += -I$(srctree)/drivers/cxl
+CFLAGS_trace.o = -DTRACE_INCLUDE_PATH=. -I$(src)
+
cxl_core-y := port.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
@@ -10,4 +12,5 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 8c04672dca56..27f0968449de 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -11,18 +11,26 @@ extern struct attribute_group cxl_base_attribute_group;
#ifdef CONFIG_CXL_REGION
extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_create_ram_region;
extern struct device_attribute dev_attr_delete_region;
extern struct device_attribute dev_attr_region;
extern const struct device_type cxl_pmem_region_type;
+extern const struct device_type cxl_dax_region_type;
extern const struct device_type cxl_region_type;
void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
#define CXL_REGION_TYPE(x) (&cxl_region_type)
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
+#define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
int cxl_region_init(void);
void cxl_region_exit(void);
+int cxl_get_poison_by_endpoint(struct cxl_port *port);
#else
+static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
+{
+ return 0;
+}
static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
{
}
@@ -37,6 +45,7 @@ static inline void cxl_region_exit(void)
#define CXL_REGION_TYPE(x) NULL
#define SET_CXL_REGION_ATTR(x)
#define CXL_PMEM_REGION_TYPE(x) NULL
+#define CXL_DAX_REGION_TYPE(x) NULL
#endif
struct cxl_send_command;
@@ -56,11 +65,14 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
extern struct rw_semaphore cxl_dpa_rwsem;
-bool is_switch_decoder(struct device *dev);
-struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
-
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
+enum cxl_poison_trace_type {
+ CXL_POISON_TRACE_LIST,
+ CXL_POISON_TRACE_INJECT,
+ CXL_POISON_TRACE_CLEAR,
+};
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index dcc16d7cb8f3..7889ff203a34 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
-#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -93,19 +92,66 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
cxl_probe_component_regs(&port->dev, crb, &map.component_map);
if (!map.component_map.hdm_decoder.valid) {
- dev_err(&port->dev, "HDM decoder registers invalid\n");
- return -ENXIO;
+ dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
+ /* unique error code to indicate no HDM decoder capability */
+ return -ENODEV;
}
return cxl_map_component_regs(&port->dev, regs, &map,
BIT(CXL_CM_CAP_CAP_ID_HDM));
}
+static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
+{
+ struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+ int i;
+
+ if (!info)
+ return false;
+
+ cxlhdm = dev_get_drvdata(&info->port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
+
+ if (!hdm)
+ return true;
+
+ /*
+ * If HDM decoders are present and the driver is in control of
+ * Mem_Enable skip DVSEC based emulation
+ */
+ if (!info->mem_enabled)
+ return false;
+
+ /*
+ * If any decoders are committed already, there should not be any
+ * emulated DVSEC decoders.
+ */
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+ dev_dbg(&info->port->dev,
+ "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
+ info->port->id, i,
+ FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
+ readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return false;
+ }
+
+ return true;
+}
+
/**
* devm_cxl_setup_hdm - map HDM decoder component registers
* @port: cxl_port to map
+ * @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
+struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
@@ -115,10 +161,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
-
cxlhdm->port = port;
+ dev_set_drvdata(dev, cxlhdm);
+
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
+ if (!crb && info && info->mem_enabled) {
+ cxlhdm->decoder_count = info->ranges;
+ return cxlhdm;
+ } else if (!crb) {
dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO);
}
@@ -134,7 +184,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
- dev_set_drvdata(dev, cxlhdm);
+ /*
+ * Now that the hdm capability is parsed, decide if range
+ * register emulation is needed and fixup cxlhdm accordingly.
+ */
+ if (should_emulate_decoders(info)) {
+ dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
+ info->ranges > 1 ? "s" : "");
+ cxlhdm->decoder_count = info->ranges;
+ }
return cxlhdm;
}
@@ -219,8 +277,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
lockdep_assert_held_write(&cxl_dpa_rwsem);
- if (!len)
- goto success;
+ if (!len) {
+ dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
+ port->id, cxled->cxld.id);
+ return -EINVAL;
+ }
if (cxled->dpa_res) {
dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
@@ -273,13 +334,12 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
cxled->mode = CXL_DECODER_MIXED;
}
-success:
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
}
-static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
{
@@ -295,6 +355,7 @@ static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
@@ -676,15 +737,64 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
+ /* Userspace is now responsible for reconfiguring this decoder */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ }
+
+ return 0;
+}
+
+static int cxl_setup_hdm_decoder_from_dvsec(
+ struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+ int which, struct cxl_endpoint_dvsec_info *info)
+{
+ struct cxl_endpoint_decoder *cxled;
+ u64 len;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ len = range_len(&info->dvsec_range[which]);
+ if (!len)
+ return -ENOENT;
+
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->commit = NULL;
+ cxld->reset = NULL;
+ cxld->hpa_range = info->dvsec_range[which];
+
+ /*
+ * Set the emulated decoder as locked pending additional support to
+ * change the range registers at run time.
+ */
+ cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
+ port->commit_end = cxld->id;
+
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+ return rc;
+ }
+ *dpa_base += len;
+ cxled->state = CXL_DECODER_STATE_AUTO;
+
return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which,
- u64 *dpa_base)
+ u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
- struct cxl_endpoint_decoder *cxled = NULL;
- u64 size, base, skip, dpa_size;
+ u64 size, base, skip, dpa_size, lo, hi;
+ struct cxl_endpoint_decoder *cxled;
bool committed;
u32 remainder;
int i, rc;
@@ -694,12 +804,17 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
unsigned char target_id[8];
} target_list;
- if (is_endpoint_decoder(&cxld->dev))
- cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ if (should_emulate_decoders(info))
+ return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+ which, info);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
- base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
- size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
+ base = (hi << 32) + lo;
+ lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
+ size = (hi << 32) + lo;
committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
cxld->commit = cxl_decoder_commit;
cxld->reset = cxl_decoder_reset;
@@ -732,6 +847,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id);
return -ENXIO;
}
+
+ if (size == 0) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed with zero size\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
port->commit_end = cxld->id;
} else {
/* unless / until type-2 drivers arrive, assume type-3 */
@@ -754,9 +876,14 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- if (!cxled) {
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
+ port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
+ cxld->interleave_ways, cxld->interleave_granularity);
+
+ if (!info) {
+ lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
+ target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
target_map[i] = target_list.target_id[i];
@@ -773,7 +900,10 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id, size, cxld->interleave_ways);
return -ENXIO;
}
- skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
+ skip = (hi << 32) + lo;
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
@@ -783,21 +913,21 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return rc;
}
*dpa_base += dpa_size + skip;
+
+ cxled->state = CXL_DECODER_STATE_AUTO;
+
return 0;
}
-/**
- * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
- * @cxlhdm: Structure to populate with HDM capabilities
- */
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
+static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- struct cxl_port *port = cxlhdm->port;
- int i, committed;
- u64 dpa_base = 0;
+ int committed, i;
u32 ctrl;
+ if (!hdm)
+ return;
+
/*
* Since the register resource was recently claimed via request_region()
* be careful about trusting the "not-committed" status until the commit
@@ -814,6 +944,22 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
/* ensure that future checks of committed can be trusted */
if (committed != cxlhdm->decoder_count)
msleep(20);
+}
+
+/**
+ * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
+ * @cxlhdm: Structure to populate with HDM capabilities
+ * @info: cached DVSEC range register info
+ */
+int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ struct cxl_port *port = cxlhdm->port;
+ int i;
+ u64 dpa_base = 0;
+
+ cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
@@ -826,7 +972,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
cxled = cxl_endpoint_decoder_alloc(port);
if (IS_ERR(cxled)) {
dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
+ "Failed to allocate decoder%d.%d\n",
+ port->id, i);
return PTR_ERR(cxled);
}
cxld = &cxled->cxld;
@@ -836,21 +983,26 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
cxlsd = cxl_switch_decoder_alloc(port, target_count);
if (IS_ERR(cxlsd)) {
dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
+ "Failed to allocate decoder%d.%d\n",
+ port->id, i);
return PTR_ERR(cxlsd);
}
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
+ rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
+ &dpa_base, info);
if (rc) {
+ dev_warn(&port->dev,
+ "Failed to initialize decoder%d.%d\n",
+ port->id, i);
put_device(&cxld->dev);
return rc;
}
rc = add_hdm_decoder(port, cxld, target_map);
if (rc) {
dev_warn(&port->dev,
- "Failed to add decoder to port\n");
+ "Failed to add decoder%d.%d\n", port->id, i);
return rc;
}
}
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index b03fba212799..23b9ff920d7e 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
-#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/security.h>
#include <linux/debugfs.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
+#include <asm/unaligned.h>
+#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
+#include "trace.h"
static bool cxl_raw_allow_all;
@@ -59,12 +62,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
- CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
- CXL_CMD(INJECT_POISON, 0x8, 0, 0),
- CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
- CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
- CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
};
/*
@@ -85,6 +83,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
*
* CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
* is kept up to date with patrol notifications and error management.
+ *
+ * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
+ * driver orchestration for safety.
*/
static u16 cxl_disabled_raw_commands[] = {
CXL_MBOX_OP_ACTIVATE_FW,
@@ -93,6 +94,9 @@ static u16 cxl_disabled_raw_commands[] = {
CXL_MBOX_OP_SET_SHUTDOWN_STATE,
CXL_MBOX_OP_SCAN_MEDIA,
CXL_MBOX_OP_GET_SCAN_MEDIA,
+ CXL_MBOX_OP_GET_POISON,
+ CXL_MBOX_OP_INJECT_POISON,
+ CXL_MBOX_OP_CLEAR_POISON,
};
/*
@@ -117,6 +121,43 @@ static bool cxl_is_security_command(u16 opcode)
return false;
}
+static bool cxl_is_poison_command(u16 opcode)
+{
+#define CXL_MBOX_OP_POISON_CMDS 0x43
+
+ if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
+ return true;
+
+ return false;
+}
+
+static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
+ u16 opcode)
+{
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_POISON:
+ set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
+ break;
+ case CXL_MBOX_OP_INJECT_POISON:
+ set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
+ break;
+ case CXL_MBOX_OP_CLEAR_POISON:
+ set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
+ break;
+ case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
+ set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
+ break;
+ case CXL_MBOX_OP_SCAN_MEDIA:
+ set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
+ break;
+ case CXL_MBOX_OP_GET_SCAN_MEDIA:
+ set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
+ break;
+ default:
+ break;
+ }
+}
+
static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
{
struct cxl_mem_command *c;
@@ -170,6 +211,12 @@ int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
out_size = mbox_cmd->size_out;
min_out = mbox_cmd->min_out;
rc = cxlds->mbox_send(cxlds, mbox_cmd);
+ /*
+ * EIO is reserved for a payload size mismatch and mbox_send()
+ * may not return this error.
+ */
+ if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
+ return -ENXIO;
if (rc)
return rc;
@@ -445,9 +492,14 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
* structures.
*/
cxl_for_each_cmd(cmd) {
- const struct cxl_command_info *info = &cmd->info;
+ struct cxl_command_info info = cmd->info;
+
+ if (test_bit(info.id, cxlmd->cxlds->enabled_cmds))
+ info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
+ if (test_bit(info.id, cxlmd->cxlds->exclusive_cmds))
+ info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
- if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
+ if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
return -EFAULT;
if (j == n_commands)
@@ -550,9 +602,9 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
return 0;
}
-static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
+static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8 *out)
{
- u32 remaining = size;
+ u32 remaining = *size;
u32 offset = 0;
while (remaining) {
@@ -576,6 +628,17 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+
+ /*
+ * The output payload length that indicates the number
+ * of valid bytes can be smaller than the Log buffer
+ * size.
+ */
+ if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
+ offset += mbox_cmd.size_out;
+ break;
+ }
+
if (rc < 0)
return rc;
@@ -584,6 +647,8 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8
offset += xfer_size;
}
+ *size = offset;
+
return 0;
}
@@ -608,13 +673,19 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
- if (!cmd) {
+ if (!cmd && !cxl_is_poison_command(opcode)) {
dev_dbg(cxlds->dev,
- "Opcode 0x%04x unsupported by driver", opcode);
+ "Opcode 0x%04x unsupported by driver\n", opcode);
continue;
}
- set_bit(cmd->info.id, cxlds->enabled_cmds);
+ if (cmd)
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
+
+ if (cxl_is_poison_command(opcode))
+ cxl_set_poison_cmd_enabled(&cxlds->poison, opcode);
+
+ dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode);
}
}
@@ -694,7 +765,7 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
goto out;
}
- rc = cxl_xfer_log(cxlds, &uuid, size, log);
+ rc = cxl_xfer_log(cxlds, &uuid, &size, log);
if (rc) {
kvfree(log);
goto out;
@@ -717,6 +788,203 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+static const uuid_t gen_media_event_uuid =
+ UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
+
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+static const uuid_t dram_event_uuid =
+ UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+static const uuid_t mem_mod_event_uuid =
+ UUID_INIT(0xfe927475, 0xdd59, 0x4339,
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
+
+static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ struct cxl_event_record_raw *record)
+{
+ uuid_t *id = &record->hdr.id;
+
+ if (uuid_equal(id, &gen_media_event_uuid)) {
+ struct cxl_event_gen_media *rec =
+ (struct cxl_event_gen_media *)record;
+
+ trace_cxl_general_media(cxlmd, type, rec);
+ } else if (uuid_equal(id, &dram_event_uuid)) {
+ struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
+
+ trace_cxl_dram(cxlmd, type, rec);
+ } else if (uuid_equal(id, &mem_mod_event_uuid)) {
+ struct cxl_event_mem_module *rec =
+ (struct cxl_event_mem_module *)record;
+
+ trace_cxl_memory_module(cxlmd, type, rec);
+ } else {
+ /* For unknown record types print just the header */
+ trace_cxl_generic_event(cxlmd, type, record);
+ }
+}
+
+static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
+ enum cxl_event_log_type log,
+ struct cxl_get_event_payload *get_pl)
+{
+ struct cxl_mbox_clear_event_payload *payload;
+ u16 total = le16_to_cpu(get_pl->record_count);
+ u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
+ size_t pl_size = struct_size(payload, handles, max_handles);
+ struct cxl_mbox_cmd mbox_cmd;
+ u16 cnt;
+ int rc = 0;
+ int i;
+
+ /* Payload size may limit the max handles */
+ if (pl_size > cxlds->payload_size) {
+ max_handles = (cxlds->payload_size - sizeof(*payload)) /
+ sizeof(__le16);
+ pl_size = struct_size(payload, handles, max_handles);
+ }
+
+ payload = kvzalloc(pl_size, GFP_KERNEL);
+ if (!payload)
+ return -ENOMEM;
+
+ *payload = (struct cxl_mbox_clear_event_payload) {
+ .event_log = log,
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
+ .payload_in = payload,
+ .size_in = pl_size,
+ };
+
+ /*
+ * Clear Event Records uses u8 for the handle cnt while Get Event
+ * Record can return up to 0xffff records.
+ */
+ i = 0;
+ for (cnt = 0; cnt < total; cnt++) {
+ payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+ dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
+ log, le16_to_cpu(payload->handles[i]));
+
+ if (i == max_handles) {
+ payload->nr_recs = i;
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ goto free_pl;
+ i = 0;
+ }
+ }
+
+ /* Clear what is left if any */
+ if (i) {
+ payload->nr_recs = i;
+ mbox_cmd.size_in = struct_size(payload, handles, i);
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ goto free_pl;
+ }
+
+free_pl:
+ kvfree(payload);
+ return rc;
+}
+
+static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
+ enum cxl_event_log_type type)
+{
+ struct cxl_get_event_payload *payload;
+ struct cxl_mbox_cmd mbox_cmd;
+ u8 log_type = type;
+ u16 nr_rec;
+
+ mutex_lock(&cxlds->event.log_lock);
+ payload = cxlds->event.buf;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = cxlds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
+
+ do {
+ int rc, i;
+
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc) {
+ dev_err_ratelimited(cxlds->dev,
+ "Event log '%d': Failed to query event records : %d",
+ type, rc);
+ break;
+ }
+
+ nr_rec = le16_to_cpu(payload->record_count);
+ if (!nr_rec)
+ break;
+
+ for (i = 0; i < nr_rec; i++)
+ cxl_event_trace_record(cxlds->cxlmd, type,
+ &payload->records[i]);
+
+ if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
+ trace_cxl_overflow(cxlds->cxlmd, type, payload);
+
+ rc = cxl_clear_event_record(cxlds, type, payload);
+ if (rc) {
+ dev_err_ratelimited(cxlds->dev,
+ "Event log '%d': Failed to clear events : %d",
+ type, rc);
+ break;
+ }
+ } while (nr_rec);
+
+ mutex_unlock(&cxlds->event.log_lock);
+}
+
+/**
+ * cxl_mem_get_event_records - Get Event Records from the device
+ * @cxlds: The device data for the operation
+ * @status: Event Status register value identifying which events are available.
+ *
+ * Retrieve all event records available on the device, report them as trace
+ * events, and clear them.
+ *
+ * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
+ * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
+ */
+void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
+{
+ dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
+
+ if (status & CXLDEV_EVENT_STATUS_FATAL)
+ cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
+ if (status & CXLDEV_EVENT_STATUS_FAIL)
+ cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
+ if (status & CXLDEV_EVENT_STATUS_WARN)
+ cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
+ if (status & CXLDEV_EVENT_STATUS_INFO)
+ cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
+
/**
* cxl_mem_get_partition_info - Get partition info
* @cxlds: The device data for the operation
@@ -770,6 +1038,7 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
struct cxl_mbox_cmd mbox_cmd;
+ u32 val;
int rc;
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -793,6 +1062,11 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
cxlds->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+ if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) {
+ val = get_unaligned_le24(id.poison_list_max_mer);
+ cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
+ }
+
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
@@ -857,6 +1131,117 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
+int cxl_set_timestamp(struct cxl_dev_state *cxlds)
+{
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_mbox_set_timestamp_in pi;
+ int rc;
+
+ pi.timestamp = cpu_to_le64(ktime_get_real_ns());
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ };
+
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ /*
+ * Command is optional. Devices may have another way of providing
+ * a timestamp, or may return all 0s in timestamp fields.
+ * Don't report an error if this command isn't supported
+ */
+ if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
+
+int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
+ struct cxl_region *cxlr)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mbox_poison_out *po;
+ struct cxl_mbox_poison_in pi;
+ struct cxl_mbox_cmd mbox_cmd;
+ int nr_records = 0;
+ int rc;
+
+ rc = mutex_lock_interruptible(&cxlds->poison.lock);
+ if (rc)
+ return rc;
+
+ po = cxlds->poison.list_out;
+ pi.offset = cpu_to_le64(offset);
+ pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = cxlds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
+ do {
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ break;
+
+ for (int i = 0; i < le16_to_cpu(po->count); i++)
+ trace_cxl_poison(cxlmd, cxlr, &po->record[i],
+ po->flags, po->overflow_ts,
+ CXL_POISON_TRACE_LIST);
+
+ /* Protect against an uncleared _FLAG_MORE */
+ nr_records = nr_records + le16_to_cpu(po->count);
+ if (nr_records >= cxlds->poison.max_errors) {
+ dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
+ nr_records);
+ break;
+ }
+ } while (po->flags & CXL_POISON_FLAG_MORE);
+
+ mutex_unlock(&cxlds->poison.lock);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
+
+static void free_poison_buf(void *buf)
+{
+ kvfree(buf);
+}
+
+/* Get Poison List output buffer is protected by cxlds->poison.lock */
+static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds)
+{
+ cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+ if (!cxlds->poison.list_out)
+ return -ENOMEM;
+
+ return devm_add_action_or_reset(cxlds->dev, free_poison_buf,
+ cxlds->poison.list_out);
+}
+
+int cxl_poison_state_init(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds))
+ return 0;
+
+ rc = cxl_poison_alloc_buf(cxlds);
+ if (rc) {
+ clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds);
+ return rc;
+ }
+
+ mutex_init(&cxlds->poison.lock);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
+
struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
{
struct cxl_dev_state *cxlds;
@@ -868,6 +1253,7 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
}
mutex_init(&cxlds->mbox_mutex);
+ mutex_init(&cxlds->event.log_lock);
cxlds->dev = dev;
return cxlds;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a74a93310d26..057a43267290 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -6,6 +6,7 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <cxlmem.h>
+#include "trace.h"
#include "core.h"
static DECLARE_RWSEM(cxl_memdev_rwsem);
@@ -27,7 +28,7 @@ static void cxl_memdev_release(struct device *dev)
kfree(cxlmd);
}
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
kgid_t *gid)
{
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
@@ -106,6 +107,232 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(numa_node);
+static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ u64 offset, length;
+ int rc = 0;
+
+ /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
+ if (resource_size(&cxlds->pmem_res)) {
+ offset = cxlds->pmem_res.start;
+ length = resource_size(&cxlds->pmem_res);
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ if (rc)
+ return rc;
+ }
+ if (resource_size(&cxlds->ram_res)) {
+ offset = cxlds->ram_res.start;
+ length = resource_size(&cxlds->ram_res);
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ /*
+ * Invalid Physical Address is not an error for
+ * volatile addresses. Device support is optional.
+ */
+ if (rc == -EFAULT)
+ rc = 0;
+ }
+ return rc;
+}
+
+int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port;
+ int rc;
+
+ port = dev_get_drvdata(&cxlmd->dev);
+ if (!port || !is_cxl_endpoint(port))
+ return -EINVAL;
+
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc)
+ return rc;
+
+ if (port->commit_end == -1) {
+ /* No regions mapped to this memdev */
+ rc = cxl_get_poison_by_memdev(cxlmd);
+ } else {
+ /* Regions mapped, collect poison by endpoint */
+ rc = cxl_get_poison_by_endpoint(port);
+ }
+ up_read(&cxl_dpa_rwsem);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+
+struct cxl_dpa_to_region_context {
+ struct cxl_region *cxlr;
+ u64 dpa;
+};
+
+static int __cxl_dpa_to_region(struct device *dev, void *arg)
+{
+ struct cxl_dpa_to_region_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ u64 dpa = ctx->dpa;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return 0;
+
+ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ return 0;
+
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxled->cxld.region->dev));
+
+ ctx->cxlr = cxled->cxld.region;
+
+ return 1;
+}
+
+static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dpa_to_region_context ctx;
+ struct cxl_port *port;
+
+ ctx = (struct cxl_dpa_to_region_context) {
+ .dpa = dpa,
+ };
+ port = dev_get_drvdata(&cxlmd->dev);
+ if (port && is_cxl_endpoint(port) && port->commit_end != -1)
+ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+
+ return ctx.cxlr;
+}
+
+static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ if (!resource_size(&cxlds->dpa_res)) {
+ dev_dbg(cxlds->dev, "device has no dpa resource\n");
+ return -EINVAL;
+ }
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
+ dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
+ dpa, &cxlds->dpa_res);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(dpa, 64)) {
+ dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mbox_inject_poison inject;
+ struct cxl_poison_record record;
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_region *cxlr;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc)
+ return rc;
+
+ rc = cxl_validate_poison_dpa(cxlmd, dpa);
+ if (rc)
+ goto out;
+
+ inject.address = cpu_to_le64(dpa);
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_INJECT_POISON,
+ .size_in = sizeof(inject),
+ .payload_in = &inject,
+ };
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ goto out;
+
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ dev_warn_once(cxlds->dev,
+ "poison inject dpa:%#llx region: %s\n", dpa,
+ dev_name(&cxlr->dev));
+
+ record = (struct cxl_poison_record) {
+ .address = cpu_to_le64(dpa),
+ .length = cpu_to_le32(1),
+ };
+ trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
+out:
+ up_read(&cxl_dpa_rwsem);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
+
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mbox_clear_poison clear;
+ struct cxl_poison_record record;
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_region *cxlr;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc)
+ return rc;
+
+ rc = cxl_validate_poison_dpa(cxlmd, dpa);
+ if (rc)
+ goto out;
+
+ /*
+ * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
+ * is defined to accept 64 bytes of write-data, along with the
+ * address to clear. This driver uses zeroes as write-data.
+ */
+ clear = (struct cxl_mbox_clear_poison) {
+ .address = cpu_to_le64(dpa)
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_CLEAR_POISON,
+ .size_in = sizeof(clear),
+ .payload_in = &clear,
+ };
+
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ goto out;
+
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n",
+ dpa, dev_name(&cxlr->dev));
+
+ record = (struct cxl_poison_record) {
+ .address = cpu_to_le64(dpa),
+ .length = cpu_to_le32(1),
+ };
+ trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
+out:
+ up_read(&cxl_dpa_rwsem);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
+
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_serial.attr,
&dev_attr_firmware_version.attr,
@@ -162,7 +389,7 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-bool is_cxl_memdev(struct device *dev)
+bool is_cxl_memdev(const struct device *dev)
{
return dev->type == &cxl_memdev_type;
}
@@ -242,10 +469,11 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
if (!cxlmd)
return ERR_PTR(-ENOMEM);
- rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
if (rc < 0)
goto err;
cxlmd->id = rc;
+ cxlmd->depth = -1;
dev = &cxlmd->dev;
device_initialize(dev);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 57764e9cd19d..f332fe7af92b 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -9,6 +9,7 @@
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
+#include "trace.h"
/**
* DOC: cxl core pci
@@ -141,11 +142,10 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
-static int wait_for_valid(struct cxl_dev_state *cxlds)
+static int wait_for_valid(struct pci_dev *pdev, int d)
{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int d = cxlds->cxl_dvsec, rc;
u32 val;
+ int rc;
/*
* Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
@@ -213,11 +213,6 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
}
-static bool range_contains(struct range *r1, struct range *r2)
-{
- return r1->start <= r2->start && r1->end >= r2->end;
-}
-
/* require dvsec ranges to be covered by a locked platform window */
static int dvsec_range_allowed(struct device *dev, void *arg)
{
@@ -229,8 +224,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
cxld = to_cxl_decoder(dev);
- if (!(cxld->flags & CXL_DECODER_F_LOCK))
- return 0;
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
@@ -260,94 +253,11 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
- struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+int cxl_dvsec_rr_decode(struct device *dev, int d,
+ struct cxl_endpoint_dvsec_info *info)
{
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- struct cxl_port *port = cxlhdm->port;
- struct device *dev = cxlds->dev;
- struct cxl_port *root;
- int i, rc, allowed;
- u32 global_ctrl;
-
- global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
-
- /*
- * If the HDM Decoder Capability is already enabled then assume
- * that some other agent like platform firmware set it up.
- */
- if (global_ctrl & CXL_HDM_DECODER_ENABLE) {
- rc = devm_cxl_enable_mem(&port->dev, cxlds);
- if (rc)
- return false;
- return true;
- }
-
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
- return false;
- }
-
- for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
- struct device *cxld_dev;
-
- cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
- dvsec_range_allowed);
- if (!cxld_dev) {
- dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
- continue;
- }
- dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
- put_device(cxld_dev);
- allowed++;
- }
-
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
- }
-
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
- if (info->mem_enabled)
- return false;
-
- rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
- if (rc)
- return false;
-
- rc = devm_cxl_enable_mem(&port->dev, cxlds);
- if (rc)
- return false;
-
- return true;
-}
-
-/**
- * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
- * @cxlds: Device state
- * @cxlhdm: Mapped HDM decoder Capability
- *
- * Try to enable the endpoint's HDM Decoder Capability
- */
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
-{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- struct cxl_endpoint_dvsec_info info = { 0 };
+ struct pci_dev *pdev = to_pci_dev(dev);
int hdm_count, rc, i, ranges = 0;
- struct device *dev = &pdev->dev;
- int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
if (!d) {
@@ -378,7 +288,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
if (!hdm_count || hdm_count > 2)
return -EINVAL;
- rc = wait_for_valid(cxlds);
+ rc = wait_for_valid(pdev, d);
if (rc) {
dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
return rc;
@@ -389,9 +299,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
* disabled, and they will remain moot after the HDM Decoder
* capability is enabled.
*/
- info.mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
- if (!info.mem_enabled)
- goto hdm_init;
+ info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
+ if (!info->mem_enabled)
+ return 0;
for (i = 0; i < hdm_count; i++) {
u64 base, size;
@@ -410,6 +320,13 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
return rc;
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
+ if (!size) {
+ info->dvsec_range[i] = (struct range) {
+ .start = 0,
+ .end = CXL_RESOURCE_NONE,
+ };
+ continue;
+ }
rc = pci_read_config_dword(
pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
@@ -425,29 +342,94 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
- info.dvsec_range[i] = (struct range) {
+ info->dvsec_range[i] = (struct range) {
.start = base,
.end = base + size - 1
};
- if (size)
- ranges++;
+ ranges++;
}
- info.ranges = ranges;
+ info->ranges = ranges;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
+
+/**
+ * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
+ * @cxlds: Device state
+ * @cxlhdm: Mapped HDM decoder Capability
+ * @info: Cached DVSEC range registers info
+ *
+ * Try to enable the endpoint's HDM Decoder Capability
+ */
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ struct cxl_port *port = cxlhdm->port;
+ struct device *dev = cxlds->dev;
+ struct cxl_port *root;
+ int i, rc, allowed;
+ u32 global_ctrl = 0;
+
+ if (hdm)
+ global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
/*
- * If DVSEC ranges are being used instead of HDM decoder registers there
- * is no use in trying to manage those.
+ * If the HDM Decoder Capability is already enabled then assume
+ * that some other agent like platform firmware set it up.
*/
-hdm_init:
- if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) {
- dev_err(dev,
- "Legacy range registers configuration prevents HDM operation.\n");
- return -EBUSY;
+ if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
+ return devm_cxl_enable_mem(&port->dev, cxlds);
+ else if (!hdm)
+ return -ENODEV;
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
}
- return 0;
+ for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
+ struct device *cxld_dev;
+
+ cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
+ dvsec_range_allowed);
+ if (!cxld_dev) {
+ dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
+ continue;
+ }
+ dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
+ put_device(cxld_dev);
+ allowed++;
+ }
+
+ if (!allowed) {
+ cxl_set_mem_enable(cxlds, 0);
+ info->mem_enabled = 0;
+ }
+
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. If at least one DVSEC range is enabled and allowed, skip HDM
+ * Decoder Capability Enable.
+ */
+ if (info->mem_enabled)
+ return 0;
+
+ rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enable_mem(&port->dev, cxlds);
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
@@ -459,79 +441,33 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
-static struct pci_doe_mb *find_cdat_doe(struct device *uport)
-{
- struct cxl_memdev *cxlmd;
- struct cxl_dev_state *cxlds;
- unsigned long index;
- void *entry;
-
- cxlmd = to_cxl_memdev(uport);
- cxlds = cxlmd->cxlds;
-
- xa_for_each(&cxlds->doe_mbs, index, entry) {
- struct pci_doe_mb *cur = entry;
-
- if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
- CXL_DOE_PROTOCOL_TABLE_ACCESS))
- return cur;
- }
-
- return NULL;
-}
-
-#define CDAT_DOE_REQ(entry_handle) \
+#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
-static void cxl_doe_task_complete(struct pci_doe_task *task)
-{
- complete(task->private);
-}
-
-struct cdat_doe_task {
- u32 request_pl;
- u32 response_pl[32];
- struct completion c;
- struct pci_doe_task task;
-};
-
-#define DECLARE_CDAT_DOE_TASK(req, cdt) \
-struct cdat_doe_task cdt = { \
- .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
- .request_pl = req, \
- .task = { \
- .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
- .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
- .request_pl = &cdt.request_pl, \
- .request_pl_sz = sizeof(cdt.request_pl), \
- .response_pl = cdt.response_pl, \
- .response_pl_sz = sizeof(cdt.response_pl), \
- .complete = cxl_doe_task_complete, \
- .private = &cdt.c, \
- } \
-}
-
static int cxl_cdat_get_length(struct device *dev,
struct pci_doe_mb *cdat_doe,
size_t *length)
{
- DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
+ __le32 request = CDAT_DOE_REQ(0);
+ __le32 response[2];
int rc;
- rc = pci_doe_submit_task(cdat_doe, &t.task);
+ rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS,
+ &request, sizeof(request),
+ &response, sizeof(response));
if (rc < 0) {
- dev_err(dev, "DOE submit failed: %d", rc);
+ dev_err(dev, "DOE failed: %d", rc);
return rc;
}
- wait_for_completion(&t.c);
- if (t.task.rv < sizeof(u32))
+ if (rc < sizeof(response))
return -EIO;
- *length = t.response_pl[1];
+ *length = le32_to_cpu(response[1]);
dev_dbg(dev, "CDAT length %zu\n", *length);
return 0;
@@ -539,44 +475,56 @@ static int cxl_cdat_get_length(struct device *dev,
static int cxl_cdat_read_table(struct device *dev,
struct pci_doe_mb *cdat_doe,
- struct cxl_cdat *cdat)
+ void *cdat_table, size_t *cdat_length)
{
- size_t length = cdat->length;
- u32 *data = cdat->table;
+ size_t length = *cdat_length + sizeof(__le32);
+ __le32 *data = cdat_table;
int entry_handle = 0;
+ __le32 saved_dw = 0;
do {
- DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ __le32 request = CDAT_DOE_REQ(entry_handle);
+ struct cdat_entry_header *entry;
size_t entry_dw;
- u32 *entry;
int rc;
- rc = pci_doe_submit_task(cdat_doe, &t.task);
+ rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS,
+ &request, sizeof(request),
+ data, length);
if (rc < 0) {
- dev_err(dev, "DOE submit failed: %d", rc);
+ dev_err(dev, "DOE failed: %d", rc);
return rc;
}
- wait_for_completion(&t.c);
- /* 1 DW header + 1 DW data min */
- if (t.task.rv < (2 * sizeof(u32)))
+
+ /* 1 DW Table Access Response Header + CDAT entry */
+ entry = (struct cdat_entry_header *)(data + 1);
+ if ((entry_handle == 0 &&
+ rc != sizeof(__le32) + sizeof(struct cdat_header)) ||
+ (entry_handle > 0 &&
+ (rc < sizeof(__le32) + sizeof(*entry) ||
+ rc != sizeof(__le32) + le16_to_cpu(entry->length))))
return -EIO;
/* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
- t.response_pl[0]);
- entry = t.response_pl + 1;
- entry_dw = t.task.rv / sizeof(u32);
+ le32_to_cpu(data[0]));
+ entry_dw = rc / sizeof(__le32);
/* Skip Header */
entry_dw -= 1;
- entry_dw = min(length / sizeof(u32), entry_dw);
- /* Prevent length < 1 DW from causing a buffer overflow */
- if (entry_dw) {
- memcpy(data, entry, entry_dw * sizeof(u32));
- length -= entry_dw * sizeof(u32);
- data += entry_dw;
- }
+ /*
+ * Table Access Response Header overwrote the last DW of
+ * previous entry, so restore that DW
+ */
+ *data = saved_dw;
+ length -= entry_dw * sizeof(__le32);
+ data += entry_dw;
+ saved_dw = *data;
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+ /* Length in CDAT header may exceed concatenation of CDAT entries */
+ *cdat_length -= length - sizeof(__le32);
+
return 0;
}
@@ -588,13 +536,19 @@ static int cxl_cdat_read_table(struct device *dev,
*/
void read_cdat_data(struct cxl_port *port)
{
- struct pci_doe_mb *cdat_doe;
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
+ struct device *host = cxlmd->dev.parent;
struct device *dev = &port->dev;
- struct device *uport = port->uport;
+ struct pci_doe_mb *cdat_doe;
size_t cdat_length;
+ void *cdat_table;
int rc;
- cdat_doe = find_cdat_doe(uport);
+ if (!dev_is_pci(host))
+ return;
+ cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
+ PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!cdat_doe) {
dev_dbg(dev, "No CDAT mailbox\n");
return;
@@ -607,18 +561,130 @@ void read_cdat_data(struct cxl_port *port)
return;
}
- port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
- if (!port->cdat.table)
+ cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
+ GFP_KERNEL);
+ if (!cdat_table)
return;
- port->cdat.length = cdat_length;
- rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
+ rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
if (rc) {
/* Don't leave table data allocated on error */
- devm_kfree(dev, port->cdat.table);
- port->cdat.table = NULL;
- port->cdat.length = 0;
+ devm_kfree(dev, cdat_table);
dev_err(dev, "CDAT data read error\n");
+ return;
}
+
+ port->cdat.table = cdat_table + sizeof(__le32);
+ port->cdat.length = cdat_length;
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
+
+void cxl_cor_error_detected(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ void __iomem *addr;
+ u32 status;
+
+ if (!cxlds->regs.ras)
+ return;
+
+ addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
+ status = readl(addr);
+ if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
+ writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
+ trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
+/* CXL spec rev3.0 8.2.4.16.1 */
+static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
+{
+ void __iomem *addr;
+ u32 *log_addr;
+ int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
+
+ addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
+ log_addr = log;
+
+ for (i = 0; i < log_u32_size; i++) {
+ *log_addr = readl(addr);
+ log_addr++;
+ addr += sizeof(u32);
+ }
+}
+
+/*
+ * Log the state of the RAS status registers and prepare them to log the
+ * next error status. Return 1 if reset needed.
+ */
+static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
+{
+ u32 hl[CXL_HEADERLOG_SIZE_U32];
+ void __iomem *addr;
+ u32 status;
+ u32 fe;
+
+ if (!cxlds->regs.ras)
+ return false;
+
+ addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
+ status = readl(addr);
+ if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
+ return false;
+
+ /* If multiple errors, log header points to first error from ctrl reg */
+ if (hweight32(status) > 1) {
+ void __iomem *rcc_addr =
+ cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ readl(rcc_addr)));
+ } else {
+ fe = status;
+ }
+
+ header_log_copy(cxlds, hl);
+ trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
+ writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
+
+ return true;
+}
+
+pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct device *dev = &cxlmd->dev;
+ bool ue;
+
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_report_and_clear(cxlds);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ if (ue) {
+ device_release_driver(dev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(&pdev->dev,
+ "%s: frozen state error detected, disable CXL.mem\n",
+ dev_name(dev));
+ device_release_driver(dev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(&pdev->dev,
+ "failure state error detected, request disconnect\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index f3d2169b6731..f8c38d997252 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(start);
+ struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
struct device *dev;
if (!port)
@@ -227,34 +227,16 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
return cxl_nvd;
}
-static void cxl_nvd_unregister(void *_cxl_nvd)
+static void cxlmd_release_nvdimm(void *_cxlmd)
{
- struct cxl_nvdimm *cxl_nvd = _cxl_nvd;
- struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
- /*
- * Either the bridge is in ->remove() context under the device_lock(),
- * or cxlmd_release_nvdimm() is cancelling the bridge's release action
- * for @cxl_nvd and doing it itself (while manually holding the bridge
- * lock).
- */
- device_lock_assert(&cxl_nvb->dev);
cxl_nvd->cxlmd = NULL;
cxlmd->cxl_nvd = NULL;
+ cxlmd->cxl_nvb = NULL;
device_unregister(&cxl_nvd->dev);
-}
-
-static void cxlmd_release_nvdimm(void *_cxlmd)
-{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
-
- device_lock(&cxl_nvb->dev);
- if (cxlmd->cxl_nvd)
- devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister,
- cxlmd->cxl_nvd);
- device_unlock(&cxl_nvb->dev);
put_device(&cxl_nvb->dev);
}
@@ -271,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
struct device *dev;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb)
return -ENODEV;
@@ -293,22 +275,6 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
- /*
- * The two actions below arrange for @cxl_nvd to be deleted when either
- * the top-level PMEM bridge goes down, or the endpoint device goes
- * through ->remove().
- */
- device_lock(&cxl_nvb->dev);
- if (cxl_nvb->dev.driver)
- rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister,
- cxl_nvd);
- else
- rc = -ENXIO;
- device_unlock(&cxl_nvb->dev);
-
- if (rc)
- goto err_alloc;
-
/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index b631a0520456..da2068475fa2 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
-#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -38,7 +37,7 @@ static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(devtype);
-static int cxl_device_id(struct device *dev)
+static int cxl_device_id(const struct device *dev)
{
if (dev->type == &cxl_nvdimm_bridge_type)
return CXL_DEVICE_NVDIMM_BRIDGE;
@@ -46,6 +45,8 @@ static int cxl_device_id(struct device *dev)
return CXL_DEVICE_NVDIMM;
if (dev->type == CXL_PMEM_REGION_TYPE())
return CXL_DEVICE_PMEM_REGION;
+ if (dev->type == CXL_DAX_REGION_TYPE())
+ return CXL_DEVICE_DAX_REGION;
if (is_cxl_port(dev)) {
if (is_cxl_root(to_cxl_port(dev)))
return CXL_DEVICE_ROOT;
@@ -180,17 +181,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- switch (cxled->mode) {
- case CXL_DECODER_RAM:
- return sysfs_emit(buf, "ram\n");
- case CXL_DECODER_PMEM:
- return sysfs_emit(buf, "pmem\n");
- case CXL_DECODER_NONE:
- return sysfs_emit(buf, "none\n");
- case CXL_DECODER_MIXED:
- default:
- return sysfs_emit(buf, "mixed\n");
- }
+ return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
}
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
@@ -304,6 +295,7 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
SET_CXL_REGION_ATTR(create_pmem_region)
+ SET_CXL_REGION_ATTR(create_ram_region)
SET_CXL_REGION_ATTR(delete_region)
NULL,
};
@@ -315,6 +307,13 @@ static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
return (cxlrd->cxlsd.cxld.flags & flags) == flags;
}
+static bool can_create_ram(struct cxl_root_decoder *cxlrd)
+{
+ unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
+
+ return (cxlrd->cxlsd.cxld.flags & flags) == flags;
+}
+
static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -323,7 +322,11 @@ static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *
if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
return 0;
- if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
+ if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
+ return 0;
+
+ if (a == CXL_REGION_ATTR(delete_region) &&
+ !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
return 0;
return a->mode;
@@ -444,6 +447,7 @@ bool is_endpoint_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_endpoint_type;
}
+EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
bool is_root_decoder(struct device *dev)
{
@@ -455,6 +459,7 @@ bool is_switch_decoder(struct device *dev)
{
return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
+EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
@@ -482,6 +487,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_switch_decoder, cxld.dev);
}
+EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
static void cxl_ep_release(struct cxl_ep *ep)
{
@@ -523,13 +529,13 @@ static const struct device_type cxl_port_type = {
.groups = cxl_port_attribute_groups,
};
-bool is_cxl_port(struct device *dev)
+bool is_cxl_port(const struct device *dev)
{
return dev->type == &cxl_port_type;
}
EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
-struct cxl_port *to_cxl_port(struct device *dev)
+struct cxl_port *to_cxl_port(const struct device *dev)
{
if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
"not a cxl_port device\n"))
@@ -583,6 +589,29 @@ static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
return devm_add_action_or_reset(host, cxl_unlink_uport, port);
}
+static void cxl_unlink_parent_dport(void *_port)
+{
+ struct cxl_port *port = _port;
+
+ sysfs_remove_link(&port->dev.kobj, "parent_dport");
+}
+
+static int devm_cxl_link_parent_dport(struct device *host,
+ struct cxl_port *port,
+ struct cxl_dport *parent_dport)
+{
+ int rc;
+
+ if (!parent_dport)
+ return 0;
+
+ rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport->kobj,
+ "parent_dport");
+ if (rc)
+ return rc;
+ return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
+}
+
static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport,
@@ -692,6 +721,10 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
if (rc)
return ERR_PTR(rc);
+ rc = devm_cxl_link_parent_dport(host, port, parent_dport);
+ if (rc)
+ return ERR_PTR(rc);
+
return port;
err:
@@ -789,41 +822,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
-/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
-static int match_root_child(struct device *dev, const void *match)
+struct cxl_port *find_cxl_root(struct cxl_port *port)
{
- const struct device *iter = NULL;
- struct cxl_dport *dport;
- struct cxl_port *port;
-
- if (!dev_is_cxl_root_child(dev))
- return 0;
+ struct cxl_port *iter = port;
- port = to_cxl_port(dev);
- iter = match;
- while (iter) {
- dport = cxl_find_dport_by_dev(port, iter);
- if (dport)
- break;
- iter = iter->parent;
- }
+ while (iter && !is_cxl_root(iter))
+ iter = to_cxl_port(iter->dev.parent);
- return !!iter;
-}
-
-struct cxl_port *find_cxl_root(struct device *dev)
-{
- struct device *port_dev;
- struct cxl_port *root;
-
- port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
- if (!port_dev)
+ if (!iter)
return NULL;
-
- root = to_cxl_port(port_dev->parent);
- get_device(&root->dev);
- put_device(port_dev);
- return root;
+ get_device(&iter->dev);
+ return iter;
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
@@ -1137,7 +1146,7 @@ static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
}
/*
- * All users of grandparent() are using it to walk PCIe-like swich port
+ * All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
* upstream switch port and N bridges representing downstream switch ports. When
* bridges stack the grand-parent of a downstream switch port is another
@@ -1164,6 +1173,7 @@ static void delete_endpoint(void *data)
device_lock(parent);
if (parent->driver && !endpoint->dead) {
+ devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
@@ -1179,6 +1189,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
get_device(&endpoint->dev);
dev_set_drvdata(dev, endpoint);
+ cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
@@ -1194,6 +1205,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
*/
static void delete_switch_port(struct cxl_port *port)
{
+ devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
devm_release_action(port->dev.parent, cxl_unlink_uport, port);
devm_release_action(port->dev.parent, unregister_port, port);
}
@@ -1212,50 +1224,55 @@ static void reap_dports(struct cxl_port *port)
}
}
+struct detach_ctx {
+ struct cxl_memdev *cxlmd;
+ int depth;
+};
+
+static int port_has_memdev(struct device *dev, const void *data)
+{
+ const struct detach_ctx *ctx = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ if (port->depth != ctx->depth)
+ return 0;
+
+ return !!cxl_ep_load(port, ctx->cxlmd);
+}
+
static void cxl_detach_ep(void *data)
{
struct cxl_memdev *cxlmd = data;
- struct device *iter;
- for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
- struct device *dport_dev = grandparent(iter);
+ for (int i = cxlmd->depth - 1; i >= 1; i--) {
struct cxl_port *port, *parent_port;
+ struct detach_ctx ctx = {
+ .cxlmd = cxlmd,
+ .depth = i,
+ };
+ struct device *dev;
struct cxl_ep *ep;
bool died = false;
- if (!dport_dev)
- break;
-
- port = find_cxl_port(dport_dev, NULL);
- if (!port)
- continue;
-
- if (is_cxl_root(port)) {
- put_device(&port->dev);
+ dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
+ port_has_memdev);
+ if (!dev)
continue;
- }
+ port = to_cxl_port(dev);
parent_port = to_cxl_port(port->dev.parent);
device_lock(&parent_port->dev);
- if (!parent_port->dev.driver) {
- /*
- * The bottom-up race to delete the port lost to a
- * top-down port disable, give up here, because the
- * parent_port ->remove() will have cleaned up all
- * descendants.
- */
- device_unlock(&parent_port->dev);
- put_device(&port->dev);
- continue;
- }
-
device_lock(&port->dev);
ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
cxl_ep_remove(port, ep);
if (ep && !port->dead && xa_empty(&port->endpoints) &&
- !is_cxl_root(parent_port)) {
+ !is_cxl_root(parent_port) && parent_port->dev.driver) {
/*
* This was the last ep attached to a dynamically
* enumerated port. Block new cxl_add_ep() and garbage
@@ -1591,6 +1608,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
}
cxlrd->calc_hb = calc_hb;
+ mutex_init(&cxlrd->range_lock);
cxld = &cxlsd->cxld;
cxld->dev.type = &cxl_decoder_root_type;
@@ -1826,7 +1844,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv)
}
EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
-static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
cxl_device_id(dev));
@@ -1884,7 +1902,7 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
/* for user tooling to ensure port disable work has completed */
-static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count)
+static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{
if (sysfs_streq(buf, "1")) {
flush_workqueue(cxl_bus_wq);
@@ -1974,6 +1992,6 @@ static void cxl_core_exit(void)
debugfs_remove_recursive(cxl_debugfs);
}
-module_init(cxl_core_init);
+subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 02f28da519e3..f822de44bee0 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uuid.h>
+#include <linux/sort.h>
#include <linux/idr.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -45,7 +46,10 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
+ if (cxlr->mode != CXL_DECODER_PMEM)
+ rc = sysfs_emit(buf, "\n");
+ else
+ rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
up_read(&cxl_region_rwsem);
return rc;
@@ -130,8 +134,12 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
- int rc;
+ int rc = 0;
+
+ if (cxlds->rcd)
+ goto endpoint_reset;
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -143,11 +151,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- rc = cxld->reset(cxld);
+ if (cxld->reset)
+ rc = cxld->reset(cxld);
if (rc)
return rc;
}
+endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
@@ -156,6 +166,22 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
return 0;
}
+static int commit_decoder(struct cxl_decoder *cxld)
+{
+ struct cxl_switch_decoder *cxlsd = NULL;
+
+ if (cxld->commit)
+ return cxld->commit(cxld);
+
+ if (is_switch_decoder(&cxld->dev))
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+
+ if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
+ "->commit() is required\n"))
+ return -ENXIO;
+ return 0;
+}
+
static int cxl_region_decode_commit(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
@@ -174,8 +200,7 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = to_cxl_port(iter->dev.parent)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- if (cxld->commit)
- rc = cxld->commit(cxld);
+ rc = commit_decoder(cxld);
if (rc)
break;
}
@@ -186,7 +211,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- cxld->reset(cxld);
+ if (cxld->reset)
+ cxld->reset(cxld);
}
cxled->cxld.reset(&cxled->cxld);
@@ -285,8 +311,12 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
struct device *dev = kobj_to_dev(kobj);
struct cxl_region *cxlr = to_cxl_region(dev);
+ /*
+ * Support tooling that expects to find a 'uuid' attribute for all
+ * regions regardless of mode.
+ */
if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
- return 0;
+ return 0444;
return a->mode;
}
@@ -399,7 +429,7 @@ static ssize_t interleave_granularity_store(struct device *dev,
* When the host-bridge is interleaved, disallow region granularity !=
* root granularity. Regions with a granularity less than the root
* interleave result in needing multiple endpoints to support a single
- * slot in the interleave (possible to suport in the future). Regions
+ * slot in the interleave (possible to support in the future). Regions
* with a granularity greater than the root interleave result in invalid
* DPA translations (invalid to support).
*/
@@ -443,6 +473,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(resource);
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
+}
+static DEVICE_ATTR_RO(mode);
+
static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
@@ -493,7 +532,12 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
if (device_is_registered(&cxlr->dev))
lockdep_assert_held_write(&cxl_region_rwsem);
if (p->res) {
- remove_resource(p->res);
+ /*
+ * Autodiscovered regions may not have been able to insert their
+ * resource.
+ */
+ if (p->res->parent)
+ remove_resource(p->res);
kfree(p->res);
p->res = NULL;
}
@@ -570,6 +614,7 @@ static struct attribute *cxl_region_attrs[] = {
&dev_attr_interleave_granularity.attr,
&dev_attr_resource.attr,
&dev_attr_size.attr,
+ &dev_attr_mode.attr,
NULL,
};
@@ -991,10 +1036,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
int i, distance;
/*
- * Passthrough ports impose no distance requirements between
+ * Passthrough decoders impose no distance requirements between
* peers
*/
- if (port->nr_dports == 1)
+ if (cxl_rr->nr_targets == 1)
distance = 0;
else
distance = p->nr_targets / cxl_rr->nr_targets;
@@ -1073,12 +1118,35 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return rc;
}
- cxld->interleave_ways = iw;
- cxld->interleave_granularity = ig;
- cxld->hpa_range = (struct range) {
- .start = p->res->start,
- .end = p->res->end,
- };
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ if (cxld->interleave_ways != iw ||
+ cxld->interleave_granularity != ig ||
+ cxld->hpa_range.start != p->res->start ||
+ cxld->hpa_range.end != p->res->end ||
+ ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
+ dev_err(&cxlr->dev,
+ "%s:%s %s expected iw: %d ig: %d %pr\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ __func__, iw, ig, p->res);
+ dev_err(&cxlr->dev,
+ "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ __func__, cxld->interleave_ways,
+ cxld->interleave_granularity,
+ (cxld->flags & CXL_DECODER_F_ENABLE) ?
+ "enabled" :
+ "disabled",
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return -ENXIO;
+ }
+ } else {
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+ }
dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
dev_name(&port->dev), iw, ig);
add_target:
@@ -1089,7 +1157,17 @@ add_target:
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
return -ENXIO;
}
- cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
+ dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlsd->cxld.dev),
+ dev_name(ep->dport->dport),
+ cxl_rr->nr_targets_set);
+ return -ENXIO;
+ }
+ } else
+ cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
inc = 1;
out_target_set:
cxl_rr->nr_targets_set += inc;
@@ -1126,14 +1204,26 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
int i;
+ /*
+ * In the auto-discovery case skip automatic teardown since the
+ * address space is already active
+ */
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
+ return;
+
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ if (cxlds->rcd)
+ continue;
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1149,22 +1239,32 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
+ int i, rc, rch = 0, vh = 0;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
- int i, rc;
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ /* validate that all targets agree on topology */
+ if (!cxlds->rcd) {
+ vh++;
+ } else {
+ rch++;
+ continue;
+ }
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
/*
- * Descend the topology tree programming targets while
- * looking for conflicts.
+ * Descend the topology tree programming / validating
+ * targets while looking for conflicts.
*/
for (ep = cxl_ep_load(iter, cxlmd); iter;
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
@@ -1176,32 +1276,22 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
}
}
+ if (rch && vh) {
+ dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
+ cxl_region_teardown_targets(cxlr);
+ return -ENXIO;
+ }
+
return 0;
}
-static int cxl_region_attach(struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled, int pos)
+static int cxl_region_validate_position(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ int pos)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *ep_port, *root_port, *iter;
struct cxl_region_params *p = &cxlr->params;
- struct cxl_dport *dport;
- int i, rc = -ENXIO;
-
- if (cxled->mode == CXL_DECODER_DEAD) {
- dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
- return -ENODEV;
- }
-
- /* all full of members, or interleave config not established? */
- if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
- dev_dbg(&cxlr->dev, "region already active\n");
- return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
- dev_dbg(&cxlr->dev, "interleave config missing\n");
- return -ENXIO;
- }
+ int i;
if (pos < 0 || pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
@@ -1240,6 +1330,256 @@ static int cxl_region_attach(struct cxl_region *cxlr,
}
}
+ return 0;
+}
+
+static int cxl_region_attach_position(struct cxl_region *cxlr,
+ struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled,
+ const struct cxl_dport *dport, int pos)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *iter;
+ int rc;
+
+ if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+ return -ENXIO;
+ }
+
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+ return rc;
+}
+
+static int cxl_region_attach_auto(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (cxled->state != CXL_DECODER_STATE_AUTO) {
+ dev_err(&cxlr->dev,
+ "%s: unable to add decoder to autodetected region\n",
+ dev_name(&cxled->cxld.dev));
+ return -EINVAL;
+ }
+
+ if (pos >= 0) {
+ dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
+ dev_name(&cxled->cxld.dev), pos);
+ return -EINVAL;
+ }
+
+ if (p->nr_targets >= p->interleave_ways) {
+ dev_err(&cxlr->dev, "%s: no more target slots available\n",
+ dev_name(&cxled->cxld.dev));
+ return -ENXIO;
+ }
+
+ /*
+ * Temporarily record the endpoint decoder into the target array. Yes,
+ * this means that userspace can view devices in the wrong position
+ * before the region activates, and must be careful to understand when
+ * it might be racing region autodiscovery.
+ */
+ pos = p->nr_targets;
+ p->targets[pos] = cxled;
+ cxled->pos = pos;
+ p->nr_targets++;
+
+ return 0;
+}
+
+static struct cxl_port *next_port(struct cxl_port *port)
+{
+ if (!port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
+
+static int decoder_match_range(struct device *dev, void *data)
+{
+ struct cxl_endpoint_decoder *cxled = data;
+ struct cxl_switch_decoder *cxlsd;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
+}
+
+static void find_positions(const struct cxl_switch_decoder *cxlsd,
+ const struct cxl_port *iter_a,
+ const struct cxl_port *iter_b, int *a_pos,
+ int *b_pos)
+{
+ int i;
+
+ for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
+ if (cxlsd->target[i] == iter_a->parent_dport)
+ *a_pos = i;
+ else if (cxlsd->target[i] == iter_b->parent_dport)
+ *b_pos = i;
+ if (*a_pos >= 0 && *b_pos >= 0)
+ break;
+ }
+}
+
+static int cmp_decode_pos(const void *a, const void *b)
+{
+ struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+ struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+ struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
+ struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
+ struct cxl_port *port_a = cxled_to_port(cxled_a);
+ struct cxl_port *port_b = cxled_to_port(cxled_b);
+ struct cxl_port *iter_a, *iter_b, *port = NULL;
+ struct cxl_switch_decoder *cxlsd;
+ struct device *dev;
+ int a_pos, b_pos;
+ unsigned int seq;
+
+ /* Exit early if any prior sorting failed */
+ if (cxled_a->pos < 0 || cxled_b->pos < 0)
+ return 0;
+
+ /*
+ * Walk up the hierarchy to find a shared port, find the decoder that
+ * maps the range, compare the relative position of those dport
+ * mappings.
+ */
+ for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
+ struct cxl_port *next_a, *next_b;
+
+ next_a = next_port(iter_a);
+ if (!next_a)
+ break;
+
+ for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
+ next_b = next_port(iter_b);
+ if (next_a != next_b)
+ continue;
+ port = next_a;
+ break;
+ }
+
+ if (port)
+ break;
+ }
+
+ if (!port) {
+ dev_err(cxlmd_a->dev.parent,
+ "failed to find shared port with %s\n",
+ dev_name(cxlmd_b->dev.parent));
+ goto err;
+ }
+
+ dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
+ if (!dev) {
+ struct range *range = &cxled_a->cxld.hpa_range;
+
+ dev_err(port->uport,
+ "failed to find decoder that maps %#llx-%#llx\n",
+ range->start, range->end);
+ goto err;
+ }
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ do {
+ seq = read_seqbegin(&cxlsd->target_lock);
+ find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
+ } while (read_seqretry(&cxlsd->target_lock, seq));
+
+ put_device(dev);
+
+ if (a_pos < 0 || b_pos < 0) {
+ dev_err(port->uport,
+ "failed to find shared decoder for %s and %s\n",
+ dev_name(cxlmd_a->dev.parent),
+ dev_name(cxlmd_b->dev.parent));
+ goto err;
+ }
+
+ dev_dbg(port->uport, "%s comes %s %s\n", dev_name(cxlmd_a->dev.parent),
+ a_pos - b_pos < 0 ? "before" : "after",
+ dev_name(cxlmd_b->dev.parent));
+
+ return a_pos - b_pos;
+err:
+ cxled_a->pos = -1;
+ return 0;
+}
+
+static int cxl_region_sort_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+ sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
+ NULL);
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ /*
+ * Record that sorting failed, but still continue to restore
+ * cxled->pos with its ->targets[] position so that follow-on
+ * code paths can reliably do p->targets[cxled->pos] to
+ * self-reference their entry.
+ */
+ if (cxled->pos < 0)
+ rc = -ENXIO;
+ cxled->pos = i;
+ }
+
+ dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
+ return rc;
+}
+
+static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_port *ep_port, *root_port;
+ struct cxl_dport *dport;
+ int rc = -ENXIO;
+
+ if (cxled->mode != cxlr->mode) {
+ dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
+ dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
+ return -EINVAL;
+ }
+
+ if (cxled->mode == CXL_DECODER_DEAD) {
+ dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+ return -ENODEV;
+ }
+
+ /* all full of members, or interleave config not established? */
+ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "region already active\n");
+ return -EBUSY;
+ } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "interleave config missing\n");
+ return -ENXIO;
+ }
+
ep_port = cxled_to_port(cxled);
root_port = cxlrd_to_port(cxlrd);
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
@@ -1250,13 +1590,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
- if (cxlrd->calc_hb(cxlrd, pos) != dport) {
- dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
- dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
- dev_name(&cxlrd->cxlsd.cxld.dev));
- return -ENXIO;
- }
-
if (cxled->cxld.target_type != cxlr->type) {
dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
@@ -1280,13 +1613,58 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -EINVAL;
}
- for (iter = ep_port; !is_cxl_root(iter);
- iter = to_cxl_port(iter->dev.parent)) {
- rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ int i;
+
+ rc = cxl_region_attach_auto(cxlr, cxled, pos);
if (rc)
- goto err;
+ return rc;
+
+ /* await more targets to arrive... */
+ if (p->nr_targets < p->interleave_ways)
+ return 0;
+
+ /*
+ * All targets are here, which implies all PCI enumeration that
+ * affects this region has been completed. Walk the topology to
+ * sort the devices into their relative region decode position.
+ */
+ rc = cxl_region_sort_targets(cxlr);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ ep_port = cxled_to_port(cxled);
+ dport = cxl_find_dport_by_dev(root_port,
+ ep_port->host_bridge);
+ rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
+ dport, i);
+ if (rc)
+ return rc;
+ }
+
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+ return rc;
+
+ /*
+ * If target setup succeeds in the autodiscovery case
+ * then the region is already committed.
+ */
+ p->state = CXL_CONFIG_COMMIT;
+
+ return 0;
}
+ rc = cxl_region_validate_position(cxlr, cxled, pos);
+ if (rc)
+ return rc;
+
+ rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
+ if (rc)
+ return rc;
+
p->targets[pos] = cxled;
cxled->pos = pos;
p->nr_targets++;
@@ -1296,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
+ set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -1309,10 +1688,8 @@ static int cxl_region_attach(struct cxl_region *cxlr,
err_decrement:
p->nr_targets--;
-err:
- for (iter = ep_port; !is_cxl_root(iter);
- iter = to_cxl_port(iter->dev.parent))
- cxl_port_detach_region(iter, cxlr, cxled);
+ cxled->pos = -1;
+ p->targets[pos] = NULL;
return rc;
}
@@ -1384,31 +1761,23 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
up_write(&cxl_region_rwsem);
}
-static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
+static int attach_target(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ unsigned int state)
{
- struct device *dev;
- int rc;
-
- dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
- if (!dev)
- return -ENODEV;
-
- if (!is_endpoint_decoder(dev)) {
- put_device(dev);
- return -EINVAL;
- }
+ int rc = 0;
- rc = down_write_killable(&cxl_region_rwsem);
+ if (state == TASK_INTERRUPTIBLE)
+ rc = down_write_killable(&cxl_region_rwsem);
+ else
+ down_write(&cxl_region_rwsem);
if (rc)
- goto out;
+ return rc;
+
down_read(&cxl_dpa_rwsem);
- rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
- if (rc == 0)
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+ rc = cxl_region_attach(cxlr, cxled, pos);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
-out:
- put_device(dev);
return rc;
}
@@ -1446,8 +1815,23 @@ static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
if (sysfs_streq(buf, "\n"))
rc = detach_target(cxlr, pos);
- else
- rc = attach_target(cxlr, buf, pos);
+ else {
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
+ if (!dev)
+ return -ENODEV;
+
+ if (!is_endpoint_decoder(dev)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
+ TASK_INTERRUPTIBLE);
+out:
+ put_device(dev);
+ }
if (rc < 0)
return rc;
@@ -1651,6 +2035,15 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
struct device *dev;
int rc;
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
+ default:
+ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+ return ERR_PTR(-EINVAL);
+ }
+
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
@@ -1679,12 +2072,38 @@ err:
return ERR_PTR(rc);
}
+static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
+{
+ return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+}
+
static ssize_t create_pmem_region_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ return __create_region_show(to_cxl_root_decoder(dev), buf);
+}
- return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+static ssize_t create_ram_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __create_region_show(to_cxl_root_decoder(dev), buf);
+}
+
+static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
+ enum cxl_decoder_mode mode, int id)
+{
+ int rc;
+
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
+ memregion_free(rc);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_EXPANDER);
}
static ssize_t create_pmem_region_store(struct device *dev,
@@ -1693,29 +2112,39 @@ static ssize_t create_pmem_region_store(struct device *dev,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_region *cxlr;
- int id, rc;
+ int rc, id;
rc = sscanf(buf, "region%d\n", &id);
if (rc != 1)
return -EINVAL;
- rc = memregion_alloc(GFP_KERNEL);
- if (rc < 0)
- return rc;
+ cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
- if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
- memregion_free(rc);
- return -EBUSY;
- }
+ return len;
+}
+DEVICE_ATTR_RW(create_pmem_region);
+
+static ssize_t create_ram_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_region *cxlr;
+ int rc, id;
- cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
- CXL_DECODER_EXPANDER);
+ rc = sscanf(buf, "region%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
return len;
}
-DEVICE_ATTR_RW(create_pmem_region);
+DEVICE_ATTR_RW(create_ram_region);
static ssize_t region_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1809,6 +2238,130 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+struct cxl_poison_context {
+ struct cxl_port *port;
+ enum cxl_decoder_mode mode;
+ u64 offset;
+};
+
+static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
+ struct cxl_poison_context *ctx)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ u64 offset, length;
+ int rc = 0;
+
+ /*
+ * Collect poison for the remaining unmapped resources
+ * after poison is collected by committed endpoints.
+ *
+ * Knowing that PMEM must always follow RAM, get poison
+ * for unmapped resources based on the last decoder's mode:
+ * ram: scan remains of ram range, then any pmem range
+ * pmem: scan remains of pmem range
+ */
+
+ if (ctx->mode == CXL_DECODER_RAM) {
+ offset = ctx->offset;
+ length = resource_size(&cxlds->ram_res) - offset;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ if (rc == -EFAULT)
+ rc = 0;
+ if (rc)
+ return rc;
+ }
+ if (ctx->mode == CXL_DECODER_PMEM) {
+ offset = ctx->offset;
+ length = resource_size(&cxlds->dpa_res) - offset;
+ if (!length)
+ return 0;
+ } else if (resource_size(&cxlds->pmem_res)) {
+ offset = cxlds->pmem_res.start;
+ length = resource_size(&cxlds->pmem_res);
+ } else {
+ return 0;
+ }
+
+ return cxl_mem_get_poison(cxlmd, offset, length, NULL);
+}
+
+static int poison_by_decoder(struct device *dev, void *arg)
+{
+ struct cxl_poison_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ u64 offset, length;
+ int rc = 0;
+
+ if (!is_endpoint_decoder(dev))
+ return rc;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return rc;
+
+ /*
+ * Regions are only created with single mode decoders: pmem or ram.
+ * Linux does not support mixed mode decoders. This means that
+ * reading poison per endpoint decoder adheres to the requirement
+ * that poison reads of pmem and ram must be separated.
+ * CXL 3.0 Spec 8.2.9.8.4.1
+ */
+ if (cxled->mode == CXL_DECODER_MIXED) {
+ dev_dbg(dev, "poison list read unsupported in mixed mode\n");
+ return rc;
+ }
+
+ cxlmd = cxled_to_memdev(cxled);
+ if (cxled->skip) {
+ offset = cxled->dpa_res->start - cxled->skip;
+ length = cxled->skip;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ rc = 0;
+ if (rc)
+ return rc;
+ }
+
+ offset = cxled->dpa_res->start;
+ length = cxled->dpa_res->end - offset + 1;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
+ if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ rc = 0;
+ if (rc)
+ return rc;
+
+ /* Iterate until commit_end is reached */
+ if (cxled->cxld.id == ctx->port->commit_end) {
+ ctx->offset = cxled->dpa_res->end + 1;
+ ctx->mode = cxled->mode;
+ return 1;
+ }
+
+ return 0;
+}
+
+int cxl_get_poison_by_endpoint(struct cxl_port *port)
+{
+ struct cxl_poison_context ctx;
+ int rc = 0;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ ctx = (struct cxl_poison_context) {
+ .port = port
+ };
+
+ rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
+ if (rc == 1)
+ rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport), &ctx);
+
+ up_read(&cxl_region_rwsem);
+ return rc;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
@@ -1847,7 +2400,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all.
*/
if (i == 0) {
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) {
cxlr_pmem = ERR_PTR(-ENODEV);
goto out;
@@ -1876,6 +2429,75 @@ out:
return cxlr_pmem;
}
+static void cxl_dax_region_release(struct device *dev)
+{
+ struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
+
+ kfree(cxlr_dax);
+}
+
+static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_dax_region_type = {
+ .name = "cxl_dax_region",
+ .release = cxl_dax_region_release,
+ .groups = cxl_dax_region_attribute_groups,
+};
+
+static bool is_cxl_dax_region(struct device *dev)
+{
+ return dev->type == &cxl_dax_region_type;
+}
+
+struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
+ "not a cxl_dax_region device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_dax_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
+
+static struct lock_class_key cxl_dax_region_key;
+
+static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_dax_region *cxlr_dax;
+ struct device *dev;
+
+ down_read(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT) {
+ cxlr_dax = ERR_PTR(-ENXIO);
+ goto out;
+ }
+
+ cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
+ if (!cxlr_dax) {
+ cxlr_dax = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cxlr_dax->hpa_range.start = p->res->start;
+ cxlr_dax->hpa_range.end = p->res->end;
+
+ dev = &cxlr_dax->dev;
+ cxlr_dax->cxlr = cxlr;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlr->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_dax_region_type;
+out:
+ up_read(&cxl_region_rwsem);
+
+ return cxlr_dax;
+}
+
static void cxlr_pmem_unregister(void *_cxlr_pmem)
{
struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
@@ -1960,6 +2582,227 @@ err_bridge:
return rc;
}
+static void cxlr_dax_unregister(void *_cxlr_dax)
+{
+ struct cxl_dax_region *cxlr_dax = _cxlr_dax;
+
+ device_unregister(&cxlr_dax->dev);
+}
+
+static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
+{
+ struct cxl_dax_region *cxlr_dax;
+ struct device *dev;
+ int rc;
+
+ cxlr_dax = cxl_dax_region_alloc(cxlr);
+ if (IS_ERR(cxlr_dax))
+ return PTR_ERR(cxlr_dax);
+
+ dev = &cxlr_dax->dev;
+ rc = dev_set_name(dev, "dax_region%d", cxlr->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
+ cxlr_dax);
+err:
+ put_device(dev);
+ return rc;
+}
+
+static int match_decoder_by_range(struct device *dev, void *data)
+{
+ struct range *r1, *r2 = data;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ return range_contains(r1, r2);
+}
+
+static int match_region_by_range(struct device *dev, void *data)
+{
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ struct range *r = data;
+ int rc = 0;
+
+ if (!is_cxl_region(dev))
+ return 0;
+
+ cxlr = to_cxl_region(dev);
+ p = &cxlr->params;
+
+ down_read(&cxl_region_rwsem);
+ if (p->res && p->res->start == r->start && p->res->end == r->end)
+ rc = 1;
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+/* Establish an empty region covering the given HPA range */
+static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxlrd_to_port(cxlrd);
+ struct range *hpa = &cxled->cxld.hpa_range;
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ struct resource *res;
+ int rc;
+
+ do {
+ cxlr = __create_region(cxlrd, cxled->mode,
+ atomic_read(&cxlrd->region_id));
+ } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
+
+ if (IS_ERR(cxlr)) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s: %s failed assign region: %ld\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__, PTR_ERR(cxlr));
+ return cxlr;
+ }
+
+ down_write(&cxl_region_rwsem);
+ p = &cxlr->params;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s: %s autodiscovery interrupted\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__);
+ rc = -EBUSY;
+ goto err;
+ }
+
+ set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
+
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
+ dev_name(&cxlr->dev));
+ rc = insert_resource(cxlrd->res, res);
+ if (rc) {
+ /*
+ * Platform-firmware may not have split resources like "System
+ * RAM" on CXL window boundaries see cxl_region_iomem_release()
+ */
+ dev_warn(cxlmd->dev.parent,
+ "%s:%s: %s %s cannot insert resource\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__, dev_name(&cxlr->dev));
+ }
+
+ p->res = res;
+ p->interleave_ways = cxled->cxld.interleave_ways;
+ p->interleave_granularity = cxled->cxld.interleave_granularity;
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
+ if (rc)
+ goto err;
+
+ dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
+ dev_name(&cxlr->dev), p->res, p->interleave_ways,
+ p->interleave_granularity);
+
+ /* ...to match put_device() in cxl_add_to_region() */
+ get_device(&cxlr->dev);
+ up_write(&cxl_region_rwsem);
+
+ return cxlr;
+
+err:
+ up_write(&cxl_region_rwsem);
+ devm_release_action(port->uport, unregister_region, cxlr);
+ return ERR_PTR(rc);
+}
+
+int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct range *hpa = &cxled->cxld.hpa_range;
+ struct cxl_decoder *cxld = &cxled->cxld;
+ struct device *cxlrd_dev, *region_dev;
+ struct cxl_root_decoder *cxlrd;
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ bool attach = false;
+ int rc;
+
+ cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
+ match_decoder_by_range);
+ if (!cxlrd_dev) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return -ENXIO;
+ }
+
+ cxlrd = to_cxl_root_decoder(cxlrd_dev);
+
+ /*
+ * Ensure that if multiple threads race to construct_region() for @hpa
+ * one does the construction and the others add to that.
+ */
+ mutex_lock(&cxlrd->range_lock);
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev) {
+ cxlr = construct_region(cxlrd, cxled);
+ region_dev = &cxlr->dev;
+ } else
+ cxlr = to_cxl_region(region_dev);
+ mutex_unlock(&cxlrd->range_lock);
+
+ rc = PTR_ERR_OR_ZERO(cxlr);
+ if (rc)
+ goto out;
+
+ attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
+
+ down_read(&cxl_region_rwsem);
+ p = &cxlr->params;
+ attach = p->state == CXL_CONFIG_COMMIT;
+ up_read(&cxl_region_rwsem);
+
+ if (attach) {
+ /*
+ * If device_attach() fails the range may still be active via
+ * the platform-firmware memory map, otherwise the driver for
+ * regions is local to this file, so driver matching can't fail.
+ */
+ if (device_attach(&cxlr->dev) < 0)
+ dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
+ p->res);
+ }
+
+ put_device(region_dev);
+out:
+ put_device(cxlrd_dev);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
+
static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
{
if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
@@ -1967,7 +2810,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn(
+ dev_warn_once(
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
@@ -1984,6 +2827,15 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return 0;
}
+static int is_system_ram(struct resource *res, void *arg)
+{
+ struct cxl_region *cxlr = arg;
+ struct cxl_region_params *p = &cxlr->params;
+
+ dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
+ return 1;
+}
+
static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
@@ -2017,6 +2869,17 @@ out:
switch (cxlr->mode) {
case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr);
+ case CXL_DECODER_RAM:
+ /*
+ * The region can not be manged by CXL if any portion of
+ * it is already online as 'System RAM'
+ */
+ if (walk_iomem_res_desc(IORES_DESC_NONE,
+ IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+ p->res->start, p->res->end, cxlr,
+ is_system_ram) > 0)
+ return 0;
+ return devm_cxl_add_dax_region(cxlr);
default:
dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
cxlr->mode);
diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c
new file mode 100644
index 000000000000..d0403dc3c8ab
--- /dev/null
+++ b/drivers/cxl/core/trace.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include <cxl.h>
+#include "core.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int gran = p->interleave_granularity;
+ int ways = p->interleave_ways;
+ u64 offset;
+
+ /* Is the hpa within this region at all */
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in region\n", hpa);
+ return false;
+ }
+
+ /* Is the hpa in an expected chunk for its pos(-ition) */
+ offset = hpa - p->res->start;
+ offset = do_div(offset, gran * ways);
+ if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
+ return true;
+
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
+
+ return false;
+}
+
+static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
+ struct cxl_region_params *p = &cxlr->params;
+ int pos = cxled->pos;
+ u16 eig = 0;
+ u8 eiw = 0;
+
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ /*
+ * The device position in the region interleave set was removed
+ * from the offset at HPA->DPA translation. To reconstruct the
+ * HPA, place the 'pos' in the offset.
+ *
+ * The placement of 'pos' in the HPA is determined by interleave
+ * ways and granularity and is defined in the CXL Spec 3.0 Section
+ * 8.2.4.19.13 Implementation Note: Device Decode Logic
+ */
+
+ /* Remove the dpa base */
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+
+ mask_upper = GENMASK_ULL(51, eig + 8);
+
+ if (eiw < 8) {
+ hpa_offset = (dpa_offset & mask_upper) << eiw;
+ hpa_offset |= pos << (eig + 8);
+ } else {
+ bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
+ bits_upper = bits_upper * 3;
+ hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
+ }
+
+ /* The lower bits remain unchanged */
+ hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+
+ /* Apply the hpa_offset to the region base address */
+ hpa = hpa_offset + p->res->start;
+
+ if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
+ return ULLONG_MAX;
+
+ return hpa;
+}
+
+u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxlmd == cxled_to_memdev(cxled))
+ break;
+ }
+ if (!cxled || cxlmd != cxled_to_memdev(cxled))
+ return ULLONG_MAX;
+
+ return cxl_dpa_to_hpa(dpa, cxlr, cxled);
+}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
new file mode 100644
index 000000000000..a0b5819bc70b
--- /dev/null
+++ b/drivers/cxl/core/trace.h
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cxl
+
+#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CXL_EVENTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+#include <asm-generic/unaligned.h>
+
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+
+#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
+#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
+#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
+#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
+#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
+#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
+#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
+#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
+#define CXL_RAS_UC_REINIT_THRESH BIT(8)
+#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
+#define CXL_RAS_UC_POISON BIT(10)
+#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
+#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
+#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
+#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
+
+#define show_uc_errs(status) __print_flags(status, " | ", \
+ { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
+ { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
+ { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
+ { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
+ { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
+ { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
+ { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
+ { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
+ { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
+ { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
+ { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
+ { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
+ { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
+ { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
+ { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
+)
+
+TRACE_EVENT(cxl_aer_uncorrectable_error,
+ TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl),
+ TP_ARGS(cxlmd, status, fe, hl),
+ TP_STRUCT__entry(
+ __string(memdev, dev_name(&cxlmd->dev))
+ __string(host, dev_name(cxlmd->dev.parent))
+ __field(u64, serial)
+ __field(u32, status)
+ __field(u32, first_error)
+ __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
+ ),
+ TP_fast_assign(
+ __assign_str(memdev, dev_name(&cxlmd->dev));
+ __assign_str(host, dev_name(cxlmd->dev.parent));
+ __entry->serial = cxlmd->cxlds->serial;
+ __entry->status = status;
+ __entry->first_error = fe;
+ /*
+ * Embed the 512B headerlog data for user app retrieval and
+ * parsing, but no need to print this in the trace buffer.
+ */
+ memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
+ ),
+ TP_printk("memdev=%s host=%s serial=%lld: status: '%s' first_error: '%s'",
+ __get_str(memdev), __get_str(host), __entry->serial,
+ show_uc_errs(__entry->status),
+ show_uc_errs(__entry->first_error)
+ )
+);
+
+#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
+#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
+#define CXL_RAS_CE_CRC_THRESH BIT(2)
+#define CLX_RAS_CE_RETRY_THRESH BIT(3)
+#define CXL_RAS_CE_CACHE_POISON BIT(4)
+#define CXL_RAS_CE_MEM_POISON BIT(5)
+#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
+
+#define show_ce_errs(status) __print_flags(status, " | ", \
+ { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
+ { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
+ { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
+ { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
+ { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
+ { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
+ { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
+)
+
+TRACE_EVENT(cxl_aer_correctable_error,
+ TP_PROTO(const struct cxl_memdev *cxlmd, u32 status),
+ TP_ARGS(cxlmd, status),
+ TP_STRUCT__entry(
+ __string(memdev, dev_name(&cxlmd->dev))
+ __string(host, dev_name(cxlmd->dev.parent))
+ __field(u64, serial)
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(memdev, dev_name(&cxlmd->dev));
+ __assign_str(host, dev_name(cxlmd->dev.parent));
+ __entry->serial = cxlmd->cxlds->serial;
+ __entry->status = status;
+ ),
+ TP_printk("memdev=%s host=%s serial=%lld: status: '%s'",
+ __get_str(memdev), __get_str(host), __entry->serial,
+ show_ce_errs(__entry->status)
+ )
+);
+
+#define cxl_event_log_type_str(type) \
+ __print_symbolic(type, \
+ { CXL_EVENT_TYPE_INFO, "Informational" }, \
+ { CXL_EVENT_TYPE_WARN, "Warning" }, \
+ { CXL_EVENT_TYPE_FAIL, "Failure" }, \
+ { CXL_EVENT_TYPE_FATAL, "Fatal" })
+
+TRACE_EVENT(cxl_overflow,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_get_event_payload *payload),
+
+ TP_ARGS(cxlmd, log, payload),
+
+ TP_STRUCT__entry(
+ __string(memdev, dev_name(&cxlmd->dev))
+ __string(host, dev_name(cxlmd->dev.parent))
+ __field(int, log)
+ __field(u64, serial)
+ __field(u64, first_ts)
+ __field(u64, last_ts)
+ __field(u16, count)
+ ),
+
+ TP_fast_assign(
+ __assign_str(memdev, dev_name(&cxlmd->dev));
+ __assign_str(host, dev_name(cxlmd->dev.parent));
+ __entry->serial = cxlmd->cxlds->serial;
+ __entry->log = log;
+ __entry->count = le16_to_cpu(payload->overflow_err_count);
+ __entry->first_ts = le64_to_cpu(payload->first_overflow_timestamp);
+ __entry->last_ts = le64_to_cpu(payload->last_overflow_timestamp);
+ ),
+
+ TP_printk("memdev=%s host=%s serial=%lld: log=%s : %u records from %llu to %llu",
+ __get_str(memdev), __get_str(host), __entry->serial,
+ cxl_event_log_type_str(__entry->log), __entry->count,
+ __entry->first_ts, __entry->last_ts)
+
+);
+
+/*
+ * Common Event Record Format
+ * CXL 3.0 section 8.2.9.2.1; Table 8-42
+ */
+#define CXL_EVENT_RECORD_FLAG_PERMANENT BIT(2)
+#define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3)
+#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
+#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
+#define show_hdr_flags(flags) __print_flags(flags, " | ", \
+ { CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
+ { CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
+ { CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
+ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \
+)
+
+/*
+ * Define macros for the common header of each CXL event.
+ *
+ * Tracepoints using these macros must do 3 things:
+ *
+ * 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry
+ * 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign;
+ * pass the dev, log, and CXL event header
+ * 3) Use CXL_EVT_TP_printk() instead of TP_printk()
+ *
+ * See the generic_event tracepoint as an example.
+ */
+#define CXL_EVT_TP_entry \
+ __string(memdev, dev_name(&cxlmd->dev)) \
+ __string(host, dev_name(cxlmd->dev.parent)) \
+ __field(int, log) \
+ __field_struct(uuid_t, hdr_uuid) \
+ __field(u64, serial) \
+ __field(u32, hdr_flags) \
+ __field(u16, hdr_handle) \
+ __field(u16, hdr_related_handle) \
+ __field(u64, hdr_timestamp) \
+ __field(u8, hdr_length) \
+ __field(u8, hdr_maint_op_class)
+
+#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
+ __assign_str(memdev, dev_name(&(cxlmd)->dev)); \
+ __assign_str(host, dev_name((cxlmd)->dev.parent)); \
+ __entry->log = (l); \
+ __entry->serial = (cxlmd)->cxlds->serial; \
+ memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t)); \
+ __entry->hdr_length = (hdr).length; \
+ __entry->hdr_flags = get_unaligned_le24((hdr).flags); \
+ __entry->hdr_handle = le16_to_cpu((hdr).handle); \
+ __entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
+ __entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
+ __entry->hdr_maint_op_class = (hdr).maint_op_class
+
+#define CXL_EVT_TP_printk(fmt, ...) \
+ TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
+ "len=%d flags='%s' handle=%x related_handle=%x " \
+ "maint_op_class=%u : " fmt, \
+ __get_str(memdev), __get_str(host), __entry->serial, \
+ cxl_event_log_type_str(__entry->log), \
+ __entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
+ show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
+ __entry->hdr_related_handle, __entry->hdr_maint_op_class, \
+ ##__VA_ARGS__)
+
+TRACE_EVENT(cxl_generic_event,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_record_raw *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+ __array(u8, data, CXL_EVENT_RECORD_DATA_LENGTH)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
+ ),
+
+ CXL_EVT_TP_printk("%s",
+ __print_hex(__entry->data, CXL_EVENT_RECORD_DATA_LENGTH))
+);
+
+/*
+ * Physical Address field masks
+ *
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ *
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CXL_DPA_FLAGS_MASK 0x3F
+#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
+
+#define CXL_DPA_VOLATILE BIT(0)
+#define CXL_DPA_NOT_REPAIRABLE BIT(1)
+#define show_dpa_flags(flags) __print_flags(flags, "|", \
+ { CXL_DPA_VOLATILE, "VOLATILE" }, \
+ { CXL_DPA_NOT_REPAIRABLE, "NOT_REPAIRABLE" } \
+)
+
+/*
+ * General Media Event Record - GMER
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0)
+#define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1)
+#define CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW BIT(2)
+#define show_event_desc_flags(flags) __print_flags(flags, "|", \
+ { CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, "UNCORRECTABLE_EVENT" }, \
+ { CXL_GMER_EVT_DESC_THRESHOLD_EVENT, "THRESHOLD_EVENT" }, \
+ { CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW, "POISON_LIST_OVERFLOW" } \
+)
+
+#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
+#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
+#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
+#define show_mem_event_type(type) __print_symbolic(type, \
+ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+)
+
+#define CXL_GMER_TRANS_UNKNOWN 0x00
+#define CXL_GMER_TRANS_HOST_READ 0x01
+#define CXL_GMER_TRANS_HOST_WRITE 0x02
+#define CXL_GMER_TRANS_HOST_SCAN_MEDIA 0x03
+#define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06
+#define show_trans_type(type) __print_symbolic(type, \
+ { CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \
+ { CXL_GMER_TRANS_HOST_READ, "Host Read" }, \
+ { CXL_GMER_TRANS_HOST_WRITE, "Host Write" }, \
+ { CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \
+ { CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \
+)
+
+#define CXL_GMER_VALID_CHANNEL BIT(0)
+#define CXL_GMER_VALID_RANK BIT(1)
+#define CXL_GMER_VALID_DEVICE BIT(2)
+#define CXL_GMER_VALID_COMPONENT BIT(3)
+#define show_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_GMER_VALID_RANK, "RANK" }, \
+ { CXL_GMER_VALID_DEVICE, "DEVICE" }, \
+ { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \
+)
+
+TRACE_EVENT(cxl_general_media,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_gen_media *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+ /* General Media */
+ __field(u64, dpa)
+ __field(u8, descriptor)
+ __field(u8, type)
+ __field(u8, transaction_type)
+ __field(u8, channel)
+ __field(u32, device)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u16, validity_flags)
+ /* Following are out of order to pack trace record */
+ __field(u8, rank)
+ __field(u8, dpa_flags)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+
+ /* General Media */
+ __entry->dpa = le64_to_cpu(rec->phys_addr);
+ __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK;
+ /* Mask after flags have been parsed */
+ __entry->dpa &= CXL_DPA_MASK;
+ __entry->descriptor = rec->descriptor;
+ __entry->type = rec->type;
+ __entry->transaction_type = rec->transaction_type;
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->device = get_unaligned_le24(rec->device);
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ __entry->validity_flags = get_unaligned_le16(&rec->validity_flags);
+ ),
+
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
+ "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
+ "device=%x comp_id=%s validity_flags='%s'",
+ __entry->dpa, show_dpa_flags(__entry->dpa_flags),
+ show_event_desc_flags(__entry->descriptor),
+ show_mem_event_type(__entry->type),
+ show_trans_type(__entry->transaction_type),
+ __entry->channel, __entry->rank, __entry->device,
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_valid_flags(__entry->validity_flags)
+ )
+);
+
+/*
+ * DRAM Event Record - DER
+ *
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+/*
+ * DRAM Event Record defines many fields the same as the General Media Event
+ * Record. Reuse those definitions as appropriate.
+ */
+#define CXL_DER_VALID_CHANNEL BIT(0)
+#define CXL_DER_VALID_RANK BIT(1)
+#define CXL_DER_VALID_NIBBLE BIT(2)
+#define CXL_DER_VALID_BANK_GROUP BIT(3)
+#define CXL_DER_VALID_BANK BIT(4)
+#define CXL_DER_VALID_ROW BIT(5)
+#define CXL_DER_VALID_COLUMN BIT(6)
+#define CXL_DER_VALID_CORRECTION_MASK BIT(7)
+#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_DER_VALID_RANK, "RANK" }, \
+ { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_DER_VALID_BANK, "BANK" }, \
+ { CXL_DER_VALID_ROW, "ROW" }, \
+ { CXL_DER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \
+)
+
+TRACE_EVENT(cxl_dram,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_dram *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+ /* DRAM */
+ __field(u64, dpa)
+ __field(u8, descriptor)
+ __field(u8, type)
+ __field(u8, transaction_type)
+ __field(u8, channel)
+ __field(u16, validity_flags)
+ __field(u16, column) /* Out of order to pack trace record */
+ __field(u32, nibble_mask)
+ __field(u32, row)
+ __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
+ __field(u8, rank) /* Out of order to pack trace record */
+ __field(u8, bank_group) /* Out of order to pack trace record */
+ __field(u8, bank) /* Out of order to pack trace record */
+ __field(u8, dpa_flags) /* Out of order to pack trace record */
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+
+ /* DRAM */
+ __entry->dpa = le64_to_cpu(rec->phys_addr);
+ __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK;
+ __entry->dpa &= CXL_DPA_MASK;
+ __entry->descriptor = rec->descriptor;
+ __entry->type = rec->type;
+ __entry->transaction_type = rec->transaction_type;
+ __entry->validity_flags = get_unaligned_le16(rec->validity_flags);
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask);
+ __entry->bank_group = rec->bank_group;
+ __entry->bank = rec->bank;
+ __entry->row = get_unaligned_le24(rec->row);
+ __entry->column = get_unaligned_le16(rec->column);
+ memcpy(__entry->cor_mask, &rec->correction_mask,
+ CXL_EVENT_DER_CORRECTION_MASK_SIZE);
+ ),
+
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
+ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
+ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
+ "validity_flags='%s'",
+ __entry->dpa, show_dpa_flags(__entry->dpa_flags),
+ show_event_desc_flags(__entry->descriptor),
+ show_mem_event_type(__entry->type),
+ show_trans_type(__entry->transaction_type),
+ __entry->channel, __entry->rank, __entry->nibble_mask,
+ __entry->bank_group, __entry->bank,
+ __entry->row, __entry->column,
+ __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
+ show_dram_valid_flags(__entry->validity_flags)
+ )
+);
+
+/*
+ * Memory Module Event Record - MMER
+ *
+ * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CXL_MMER_HEALTH_STATUS_CHANGE 0x00
+#define CXL_MMER_MEDIA_STATUS_CHANGE 0x01
+#define CXL_MMER_LIFE_USED_CHANGE 0x02
+#define CXL_MMER_TEMP_CHANGE 0x03
+#define CXL_MMER_DATA_PATH_ERROR 0x04
+#define CXL_MMER_LSA_ERROR 0x05
+#define show_dev_evt_type(type) __print_symbolic(type, \
+ { CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \
+ { CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \
+ { CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \
+ { CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \
+ { CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_MMER_LSA_ERROR, "LSA Error" } \
+)
+
+/*
+ * Device Health Information - DHI
+ *
+ * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+#define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0)
+#define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1)
+#define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2)
+#define show_health_status_flags(flags) __print_flags(flags, "|", \
+ { CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \
+ { CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \
+ { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \
+)
+
+#define CXL_DHI_MS_NORMAL 0x00
+#define CXL_DHI_MS_NOT_READY 0x01
+#define CXL_DHI_MS_WRITE_PERSISTENCY_LOST 0x02
+#define CXL_DHI_MS_ALL_DATA_LOST 0x03
+#define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS 0x04
+#define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN 0x05
+#define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT 0x06
+#define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS 0x07
+#define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN 0x08
+#define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT 0x09
+#define show_media_status(ms) __print_symbolic(ms, \
+ { CXL_DHI_MS_NORMAL, \
+ "Normal" }, \
+ { CXL_DHI_MS_NOT_READY, \
+ "Not Ready" }, \
+ { CXL_DHI_MS_WRITE_PERSISTENCY_LOST, \
+ "Write Persistency Lost" }, \
+ { CXL_DHI_MS_ALL_DATA_LOST, \
+ "All Data Lost" }, \
+ { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS, \
+ "Write Persistency Loss in the Event of Power Loss" }, \
+ { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN, \
+ "Write Persistency Loss in Event of Shutdown" }, \
+ { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT, \
+ "Write Persistency Loss Imminent" }, \
+ { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS, \
+ "All Data Loss in Event of Power Loss" }, \
+ { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN, \
+ "All Data loss in the Event of Shutdown" }, \
+ { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT, \
+ "All Data Loss Imminent" } \
+)
+
+#define CXL_DHI_AS_NORMAL 0x0
+#define CXL_DHI_AS_WARNING 0x1
+#define CXL_DHI_AS_CRITICAL 0x2
+#define show_two_bit_status(as) __print_symbolic(as, \
+ { CXL_DHI_AS_NORMAL, "Normal" }, \
+ { CXL_DHI_AS_WARNING, "Warning" }, \
+ { CXL_DHI_AS_CRITICAL, "Critical" } \
+)
+#define show_one_bit_status(as) __print_symbolic(as, \
+ { CXL_DHI_AS_NORMAL, "Normal" }, \
+ { CXL_DHI_AS_WARNING, "Warning" } \
+)
+
+#define CXL_DHI_AS_LIFE_USED(as) (as & 0x3)
+#define CXL_DHI_AS_DEV_TEMP(as) ((as & 0xC) >> 2)
+#define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4)
+#define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5)
+
+TRACE_EVENT(cxl_memory_module,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_mem_module *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+
+ /* Memory Module Event */
+ __field(u8, event_type)
+
+ /* Device Health Info */
+ __field(u8, health_status)
+ __field(u8, media_status)
+ __field(u8, life_used)
+ __field(u32, dirty_shutdown_cnt)
+ __field(u32, cor_vol_err_cnt)
+ __field(u32, cor_per_err_cnt)
+ __field(s16, device_temp)
+ __field(u8, add_status)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+
+ /* Memory Module Event */
+ __entry->event_type = rec->event_type;
+
+ /* Device Health Info */
+ __entry->health_status = rec->info.health_status;
+ __entry->media_status = rec->info.media_status;
+ __entry->life_used = rec->info.life_used;
+ __entry->dirty_shutdown_cnt = get_unaligned_le32(rec->info.dirty_shutdown_cnt);
+ __entry->cor_vol_err_cnt = get_unaligned_le32(rec->info.cor_vol_err_cnt);
+ __entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt);
+ __entry->device_temp = get_unaligned_le16(rec->info.device_temp);
+ __entry->add_status = rec->info.add_status;
+ ),
+
+ CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \
+ "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
+ "as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \
+ "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u",
+ show_dev_evt_type(__entry->event_type),
+ show_health_status_flags(__entry->health_status),
+ show_media_status(__entry->media_status),
+ show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)),
+ show_two_bit_status(CXL_DHI_AS_DEV_TEMP(__entry->add_status)),
+ show_one_bit_status(CXL_DHI_AS_COR_VOL_ERR_CNT(__entry->add_status)),
+ show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)),
+ __entry->life_used, __entry->device_temp,
+ __entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt,
+ __entry->cor_per_err_cnt
+ )
+);
+
+#define show_poison_trace_type(type) \
+ __print_symbolic(type, \
+ { CXL_POISON_TRACE_LIST, "List" }, \
+ { CXL_POISON_TRACE_INJECT, "Inject" }, \
+ { CXL_POISON_TRACE_CLEAR, "Clear" })
+
+#define __show_poison_source(source) \
+ __print_symbolic(source, \
+ { CXL_POISON_SOURCE_UNKNOWN, "Unknown" }, \
+ { CXL_POISON_SOURCE_EXTERNAL, "External" }, \
+ { CXL_POISON_SOURCE_INTERNAL, "Internal" }, \
+ { CXL_POISON_SOURCE_INJECTED, "Injected" }, \
+ { CXL_POISON_SOURCE_VENDOR, "Vendor" })
+
+#define show_poison_source(source) \
+ (((source > CXL_POISON_SOURCE_INJECTED) && \
+ (source != CXL_POISON_SOURCE_VENDOR)) ? "Reserved" \
+ : __show_poison_source(source))
+
+#define show_poison_flags(flags) \
+ __print_flags(flags, "|", \
+ { CXL_POISON_FLAG_MORE, "More" }, \
+ { CXL_POISON_FLAG_OVERFLOW, "Overflow" }, \
+ { CXL_POISON_FLAG_SCANNING, "Scanning" })
+
+#define __cxl_poison_addr(record) \
+ (le64_to_cpu(record->address))
+#define cxl_poison_record_dpa(record) \
+ (__cxl_poison_addr(record) & CXL_POISON_START_MASK)
+#define cxl_poison_record_source(record) \
+ (__cxl_poison_addr(record) & CXL_POISON_SOURCE_MASK)
+#define cxl_poison_record_dpa_length(record) \
+ (le32_to_cpu(record->length) * CXL_POISON_LEN_MULT)
+#define cxl_poison_overflow(flags, time) \
+ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0)
+
+u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
+
+TRACE_EVENT(cxl_poison,
+
+ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
+ const struct cxl_poison_record *record, u8 flags,
+ __le64 overflow_ts, enum cxl_poison_trace_type trace_type),
+
+ TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
+
+ TP_STRUCT__entry(
+ __string(memdev, dev_name(&cxlmd->dev))
+ __string(host, dev_name(cxlmd->dev.parent))
+ __field(u64, serial)
+ __field(u8, trace_type)
+ __string(region, region)
+ __field(u64, overflow_ts)
+ __field(u64, hpa)
+ __field(u64, dpa)
+ __field(u32, dpa_length)
+ __array(char, uuid, 16)
+ __field(u8, source)
+ __field(u8, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(memdev, dev_name(&cxlmd->dev));
+ __assign_str(host, dev_name(cxlmd->dev.parent));
+ __entry->serial = cxlmd->cxlds->serial;
+ __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts);
+ __entry->dpa = cxl_poison_record_dpa(record);
+ __entry->dpa_length = cxl_poison_record_dpa_length(record);
+ __entry->source = cxl_poison_record_source(record);
+ __entry->trace_type = trace_type;
+ __entry->flags = flags;
+ if (region) {
+ __assign_str(region, dev_name(&region->dev));
+ memcpy(__entry->uuid, &region->params.uuid, 16);
+ __entry->hpa = cxl_trace_hpa(region, cxlmd,
+ __entry->dpa);
+ } else {
+ __assign_str(region, "");
+ memset(__entry->uuid, 0, 16);
+ __entry->hpa = ULLONG_MAX;
+ }
+ ),
+
+ TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \
+ "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \
+ "source=%s flags=%s overflow_time=%llu",
+ __get_str(memdev),
+ __get_str(host),
+ __entry->serial,
+ show_poison_trace_type(__entry->trace_type),
+ __get_str(region),
+ __entry->uuid,
+ __entry->hpa,
+ __entry->dpa,
+ __entry->dpa_length,
+ show_poison_source(__entry->source),
+ show_poison_flags(__entry->flags),
+ __entry->overflow_ts
+ )
+);
+
+#endif /* _CXL_EVENTS_H */
+
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 1b1cf459ac77..044a92d9813e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -130,6 +130,7 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
#define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0))
#define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4
#define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0))
+#define CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK BIT(8)
#define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8
#define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0))
#define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC
@@ -140,6 +141,8 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
#define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0)
#define CXL_RAS_HEADER_LOG_OFFSET 0x18
#define CXL_RAS_CAPABILITY_LENGTH 0x58
+#define CXL_HEADERLOG_SIZE SZ_512
+#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
@@ -154,6 +157,22 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
#define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
#define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
+/* CXL 3.0 8.2.8.3.1 Event Status Register */
+#define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00
+#define CXLDEV_EVENT_STATUS_INFO BIT(0)
+#define CXLDEV_EVENT_STATUS_WARN BIT(1)
+#define CXLDEV_EVENT_STATUS_FAIL BIT(2)
+#define CXLDEV_EVENT_STATUS_FATAL BIT(3)
+
+#define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \
+ CXLDEV_EVENT_STATUS_WARN | \
+ CXLDEV_EVENT_STATUS_FAIL | \
+ CXLDEV_EVENT_STATUS_FATAL)
+
+/* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
+#define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0)
+#define CXLDEV_EVENT_INT_MSGNUM_MASK GENMASK(7, 4)
+
/* CXL 2.0 8.2.8.4 Mailbox Registers */
#define CXLDEV_MBOX_CAPS_OFFSET 0x00
#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
@@ -259,6 +278,8 @@ resource_size_t cxl_rcrb_to_component(struct device *dev,
* cxl_decoder flags that define the type of memory / devices this
* decoder supports as well as configuration lock status See "CXL 2.0
* 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
+ * Additionally indicate whether decoder settings were autodetected,
+ * user customized.
*/
#define CXL_DECODER_F_RAM BIT(0)
#define CXL_DECODER_F_PMEM BIT(1)
@@ -318,12 +339,36 @@ enum cxl_decoder_mode {
CXL_DECODER_DEAD,
};
+static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
+{
+ static const char * const names[] = {
+ [CXL_DECODER_NONE] = "none",
+ [CXL_DECODER_RAM] = "ram",
+ [CXL_DECODER_PMEM] = "pmem",
+ [CXL_DECODER_MIXED] = "mixed",
+ };
+
+ if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED)
+ return names[mode];
+ return "mixed";
+}
+
+/*
+ * Track whether this decoder is reserved for region autodiscovery, or
+ * free for userspace provisioning.
+ */
+enum cxl_decoder_state {
+ CXL_DECODER_STATE_MANUAL,
+ CXL_DECODER_STATE_AUTO,
+};
+
/**
* struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
* @cxld: base cxl_decoder_object
* @dpa_res: actively claimed DPA span of this decoder
* @skip: offset into @dpa_res where @cxld.hpa_range maps
* @mode: which memory type / access-mode-partition this decoder targets
+ * @state: autodiscovery state
* @pos: interleave position in @cxld.region
*/
struct cxl_endpoint_decoder {
@@ -331,6 +376,7 @@ struct cxl_endpoint_decoder {
struct resource *dpa_res;
resource_size_t skip;
enum cxl_decoder_mode mode;
+ enum cxl_decoder_state state;
int pos;
};
@@ -364,6 +410,7 @@ typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd,
* @region_id: region id for next region provisioning event
* @calc_hb: which host bridge covers the n'th position by granularity
* @platform_data: platform specific configuration data
+ * @range_lock: sync region autodiscovery by address range
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
@@ -371,6 +418,7 @@ struct cxl_root_decoder {
atomic_t region_id;
cxl_calc_hb_fn calc_hb;
void *platform_data;
+ struct mutex range_lock;
struct cxl_switch_decoder cxlsd;
};
@@ -420,6 +468,13 @@ struct cxl_region_params {
*/
#define CXL_REGION_F_INCOHERENT 0
+/*
+ * Indicate whether this region has been assembled by autodetection or
+ * userspace assembly. Prevent endpoint decoders outside of automatic
+ * detection from being added to the region.
+ */
+#define CXL_REGION_F_AUTO 1
+
/**
* struct cxl_region - CXL region
* @dev: This region's device
@@ -475,6 +530,12 @@ struct cxl_pmem_region {
struct cxl_pmem_region_mapping mapping[];
};
+struct cxl_dax_region {
+ struct device dev;
+ struct cxl_region *cxlr;
+ struct range hpa_range;
+};
+
/**
* struct cxl_port - logical collection of upstream port devices and
* downstream port devices to construct a CXL memory
@@ -588,8 +649,8 @@ static inline bool is_cxl_root(struct cxl_port *port)
return port->uport == port->dev.parent;
}
-bool is_cxl_port(struct device *dev);
-struct cxl_port *to_cxl_port(struct device *dev);
+bool is_cxl_port(const struct device *dev);
+struct cxl_port *to_cxl_port(const struct device *dev);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *bus);
@@ -597,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct device *dev);
+struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -615,8 +676,10 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
struct cxl_decoder *to_cxl_decoder(struct device *dev);
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
+bool is_switch_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev);
struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets,
@@ -630,10 +693,28 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
+/**
+ * struct cxl_endpoint_dvsec_info - Cached DVSEC info
+ * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
+ * @ranges: Number of active HDM ranges this device uses.
+ * @port: endpoint port associated with this info instance
+ * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
+ */
+struct cxl_endpoint_dvsec_info {
+ bool mem_enabled;
+ int ranges;
+ struct cxl_port *port;
+ struct range dvsec_range[2];
+};
+
struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
+struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info);
+int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+int cxl_dvsec_rr_decode(struct device *dev, int dvsec,
+ struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -667,6 +748,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_MEMORY_EXPANDER 5
#define CXL_DEVICE_REGION 6
#define CXL_DEVICE_PMEM_REGION 7
+#define CXL_DEVICE_DAX_REGION 8
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
@@ -678,11 +760,14 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
+int cxl_add_to_region(struct cxl_port *root,
+ struct cxl_endpoint_decoder *cxled);
+struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@@ -692,6 +777,15 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
+static inline int cxl_add_to_region(struct cxl_port *root,
+ struct cxl_endpoint_decoder *cxled)
+{
+ return 0;
+}
+static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
+{
+ return NULL;
+}
#endif
/*
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index ab138004f644..db12b6313afb 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -4,6 +4,7 @@
#define __CXL_MEM_H__
#include <uapi/linux/cxl_mem.h>
#include <linux/cdev.h>
+#include <linux/uuid.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -38,6 +39,7 @@
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @id: id number of this memdev instance.
+ * @depth: endpoint port depth
*/
struct cxl_memdev {
struct device dev;
@@ -47,6 +49,7 @@ struct cxl_memdev {
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
int id;
+ int depth;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -72,13 +75,16 @@ cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
return to_cxl_memdev(port->uport);
}
-bool is_cxl_memdev(struct device *dev);
+bool is_cxl_memdev(const struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
return is_cxl_memdev(port->uport);
}
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
+int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped);
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
@@ -121,7 +127,7 @@ struct cxl_mbox_cmd {
};
/*
- * Per CXL 2.0 Section 8.2.8.4.5.1
+ * Per CXL 3.0 Section 8.2.8.4.5.1
*/
#define CMD_CMD_RC_TABLE \
C(SUCCESS, 0, NULL), \
@@ -139,14 +145,22 @@ struct cxl_mbox_cmd {
C(FWROLLBACK, -ENXIO, "rolled back to the previous active FW"), \
C(FWRESET, -ENXIO, "FW failed to activate, needs cold reset"), \
C(HANDLE, -ENXIO, "one or more Event Record Handles were invalid"), \
- C(PADDR, -ENXIO, "physical address specified is invalid"), \
+ C(PADDR, -EFAULT, "physical address specified is invalid"), \
C(POISONLMT, -ENXIO, "poison injection limit has been reached"), \
C(MEDIAFAILURE, -ENXIO, "permanent issue with the media"), \
C(ABORT, -ENXIO, "background cmd was aborted by device"), \
C(SECURITY, -ENXIO, "not valid in the current security state"), \
C(PASSPHRASE, -ENXIO, "phrase doesn't match current set passphrase"), \
C(MBUNSUPPORTED, -ENXIO, "unsupported on the mailbox it was issued on"),\
- C(PAYLOADLEN, -ENXIO, "invalid payload length")
+ C(PAYLOADLEN, -ENXIO, "invalid payload length"), \
+ C(LOG, -ENXIO, "invalid or unsupported log page"), \
+ C(INTERRUPTED, -ENXIO, "asynchronous event occured"), \
+ C(FEATUREVERSION, -ENXIO, "unsupported feature version"), \
+ C(FEATURESELVALUE, -ENXIO, "unsupported feature selection value"), \
+ C(FEATURETRANSFERIP, -ENXIO, "feature transfer in progress"), \
+ C(FEATURETRANSFEROOO, -ENXIO, "feature transfer out of order"), \
+ C(RESOURCEEXHAUSTED, -ENXIO, "resources are exhausted"), \
+ C(EXTLIST, -ENXIO, "invalid Extent List"), \
#undef C
#define C(a, b, c) CXL_MBOX_CMD_RC_##a
@@ -182,15 +196,62 @@ static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd)
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
- * struct cxl_endpoint_dvsec_info - Cached DVSEC info
- * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE
- * @ranges: Number of active HDM ranges this device uses.
- * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
+ * Event Interrupt Policy
+ *
+ * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
*/
-struct cxl_endpoint_dvsec_info {
- bool mem_enabled;
- int ranges;
- struct range dvsec_range[2];
+enum cxl_event_int_mode {
+ CXL_INT_NONE = 0x00,
+ CXL_INT_MSI_MSIX = 0x01,
+ CXL_INT_FW = 0x02
+};
+struct cxl_event_interrupt_policy {
+ u8 info_settings;
+ u8 warn_settings;
+ u8 failure_settings;
+ u8 fatal_settings;
+} __packed;
+
+/**
+ * struct cxl_event_state - Event log driver state
+ *
+ * @event_buf: Buffer to receive event data
+ * @event_log_lock: Serialize event_buf and log use
+ */
+struct cxl_event_state {
+ struct cxl_get_event_payload *buf;
+ struct mutex log_lock;
+};
+
+/* Device enabled poison commands */
+enum poison_cmd_enabled_bits {
+ CXL_POISON_ENABLED_LIST,
+ CXL_POISON_ENABLED_INJECT,
+ CXL_POISON_ENABLED_CLEAR,
+ CXL_POISON_ENABLED_SCAN_CAPS,
+ CXL_POISON_ENABLED_SCAN_MEDIA,
+ CXL_POISON_ENABLED_SCAN_RESULTS,
+ CXL_POISON_ENABLED_MAX
+};
+
+/**
+ * struct cxl_poison_state - Driver poison state info
+ *
+ * @max_errors: Maximum media error records held in device cache
+ * @enabled_cmds: All poison commands enabled in the CEL
+ * @list_out: The poison list payload returned by device
+ * @lock: Protect reads of the poison list
+ *
+ * Reads of the poison list are synchronized to ensure that a reader
+ * does not get an incomplete list because their request overlapped
+ * (was interrupted or preceded by) another read request of the same
+ * DPA range. CXL Spec 3.0 Section 8.2.9.8.4.1
+ */
+struct cxl_poison_state {
+ u32 max_errors;
+ DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);
+ struct cxl_mbox_poison_out *list_out;
+ struct mutex lock; /* Protect reads of poison list */
};
/**
@@ -227,7 +288,8 @@ struct cxl_endpoint_dvsec_info {
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
- * @doe_mbs: PCI DOE mailbox array
+ * @event: event log driver state
+ * @poison: poison driver state info
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
@@ -264,7 +326,8 @@ struct cxl_dev_state {
resource_size_t component_reg_phys;
u64 serial;
- struct xarray doe_mbs;
+ struct cxl_event_state event;
+ struct cxl_poison_state poison;
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
@@ -272,8 +335,13 @@ struct cxl_dev_state {
enum cxl_opcode {
CXL_MBOX_OP_INVALID = 0x0000,
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
+ CXL_MBOX_OP_GET_EVENT_RECORD = 0x0100,
+ CXL_MBOX_OP_CLEAR_EVENT_RECORD = 0x0101,
+ CXL_MBOX_OP_GET_EVT_INT_POLICY = 0x0102,
+ CXL_MBOX_OP_SET_EVT_INT_POLICY = 0x0103,
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
+ CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
CXL_MBOX_OP_IDENTIFY = 0x4000,
@@ -347,6 +415,136 @@ struct cxl_mbox_identify {
u8 qos_telemetry_caps;
} __packed;
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ uuid_t id;
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 reserved[15];
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_record_raw {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * Get Event Records output payload
+ * CXL rev 3.0 section 8.2.9.2.2; Table 8-50
+ */
+#define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0)
+#define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1)
+struct cxl_get_event_payload {
+ u8 flags;
+ u8 reserved1;
+ __le16 overflow_err_count;
+ __le64 first_overflow_timestamp;
+ __le64 last_overflow_timestamp;
+ __le16 record_count;
+ u8 reserved2[10];
+ struct cxl_event_record_raw records[];
+} __packed;
+
+/*
+ * CXL rev 3.0 section 8.2.9.2.2; Table 8-49
+ */
+enum cxl_event_log_type {
+ CXL_EVENT_TYPE_INFO = 0x00,
+ CXL_EVENT_TYPE_WARN,
+ CXL_EVENT_TYPE_FAIL,
+ CXL_EVENT_TYPE_FATAL,
+ CXL_EVENT_TYPE_MAX
+};
+
+/*
+ * Clear Event Records input payload
+ * CXL rev 3.0 section 8.2.9.2.3; Table 8-51
+ */
+struct cxl_mbox_clear_event_payload {
+ u8 event_log; /* enum cxl_event_log_type */
+ u8 clear_flags;
+ u8 nr_recs;
+ u8 reserved[3];
+ __le16 handles[];
+} __packed;
+#define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 reserved[46];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 reserved[0x17];
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 reserved[0x3d];
+} __packed;
+
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
__le64 active_persistent_cap;
@@ -372,6 +570,67 @@ struct cxl_mbox_set_partition_info {
#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+/* Set Timestamp CXL 3.0 Spec 8.2.9.4.2 */
+struct cxl_mbox_set_timestamp_in {
+ __le64 timestamp;
+
+} __packed;
+
+/* Get Poison List CXL 3.0 Spec 8.2.9.8.4.1 */
+struct cxl_mbox_poison_in {
+ __le64 offset;
+ __le64 length;
+} __packed;
+
+struct cxl_mbox_poison_out {
+ u8 flags;
+ u8 rsvd1;
+ __le64 overflow_ts;
+ __le16 count;
+ u8 rsvd2[20];
+ struct cxl_poison_record {
+ __le64 address;
+ __le32 length;
+ __le32 rsvd;
+ } __packed record[];
+} __packed;
+
+/*
+ * Get Poison List address field encodes the starting
+ * address of poison, and the source of the poison.
+ */
+#define CXL_POISON_START_MASK GENMASK_ULL(63, 6)
+#define CXL_POISON_SOURCE_MASK GENMASK(2, 0)
+
+/* Get Poison List record length is in units of 64 bytes */
+#define CXL_POISON_LEN_MULT 64
+
+/* Kernel defined maximum for a list of poison errors */
+#define CXL_POISON_LIST_MAX 1024
+
+/* Get Poison List: Payload out flags */
+#define CXL_POISON_FLAG_MORE BIT(0)
+#define CXL_POISON_FLAG_OVERFLOW BIT(1)
+#define CXL_POISON_FLAG_SCANNING BIT(2)
+
+/* Get Poison List: Poison Source */
+#define CXL_POISON_SOURCE_UNKNOWN 0
+#define CXL_POISON_SOURCE_EXTERNAL 1
+#define CXL_POISON_SOURCE_INTERNAL 2
+#define CXL_POISON_SOURCE_INJECTED 3
+#define CXL_POISON_SOURCE_VENDOR 7
+
+/* Inject & Clear Poison CXL 3.0 Spec 8.2.9.8.4.2/3 */
+struct cxl_mbox_inject_poison {
+ __le64 address;
+};
+
+/* Clear Poison CXL 3.0 Spec 8.2.9.8.4.3 */
+struct cxl_mbox_clear_poison {
+ __le64 address;
+ u8 write_data[CXL_POISON_LEN_MULT];
+} __packed;
+
/**
* struct cxl_mem_command - Driver representation of a memory device command
* @info: Command information as it exists for the UAPI
@@ -393,7 +652,6 @@ struct cxl_mem_command {
struct cxl_command_info info;
enum cxl_opcode opcode;
u32 flags;
-#define CXL_CMD_FLAG_NONE 0
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
@@ -441,6 +699,15 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
+void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status);
+int cxl_set_timestamp(struct cxl_dev_state *cxlds);
+int cxl_poison_state_init(struct cxl_dev_state *cxlds);
+int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
+ struct cxl_region *cxlr);
+int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 920909791bb9..0465ef963cd6 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -53,6 +53,12 @@
#define CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK GENMASK(15, 8)
#define CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK GENMASK(31, 16)
+/*
+ * NOTE: Currently all the functions which are enabled for CXL require their
+ * vectors to be in the first 16. Use this as the default max.
+ */
+#define CXL_PCI_DEFAULT_MAX_VECTORS 16
+
/* Register Block Identifier (RBI) */
enum cxl_regloc_type {
CXL_REGLOC_RBI_EMPTY = 0,
@@ -62,8 +68,26 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
+struct cdat_header {
+ __le32 length;
+ u8 revision;
+ u8 checksum;
+ u8 reserved[6];
+ __le32 sequence;
+} __packed;
+
+struct cdat_entry_header {
+ u8 type;
+ u8 reserved;
+ __le16 length;
+} __packed;
+
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
+void cxl_cor_error_detected(struct pci_dev *pdev);
+pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 39c4b54f0715..10caf180b3fa 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -94,6 +94,26 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
return 0;
}
+static int cxl_debugfs_poison_inject(void *data, u64 dpa)
+{
+ struct cxl_memdev *cxlmd = data;
+
+ return cxl_inject_poison(cxlmd, dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
+ cxl_debugfs_poison_inject, "%llx\n");
+
+static int cxl_debugfs_poison_clear(void *data, u64 dpa)
+{
+ struct cxl_memdev *cxlmd = data;
+
+ return cxl_clear_poison(cxlmd, dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
+ cxl_debugfs_poison_clear, "%llx\n");
+
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
@@ -117,6 +137,14 @@ static int cxl_mem_probe(struct device *dev)
dentry = cxl_debugfs_create_dir(dev_name(dev));
debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
+
+ if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds))
+ debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
+ &cxl_poison_inject_fops);
+ if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds))
+ debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
+ &cxl_poison_clear_fops);
+
rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
if (rc)
return rc;
@@ -176,10 +204,53 @@ unlock:
return devm_add_action_or_reset(dev, enable_suspend, NULL);
}
+static ssize_t trigger_poison_list_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool trigger;
+ int rc;
+
+ if (kstrtobool(buf, &trigger) || !trigger)
+ return -EINVAL;
+
+ rc = cxl_trigger_poison_list(to_cxl_memdev(dev));
+
+ return rc ? rc : len;
+}
+static DEVICE_ATTR_WO(trigger_poison_list);
+
+static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ if (a == &dev_attr_trigger_poison_list.attr) {
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (!test_bit(CXL_POISON_ENABLED_LIST,
+ to_cxl_memdev(dev)->cxlds->poison.enabled_cmds))
+ return 0;
+ }
+ return a->mode;
+}
+
+static struct attribute *cxl_mem_attrs[] = {
+ &dev_attr_trigger_poison_list.attr,
+ NULL
+};
+
+static struct attribute_group cxl_mem_group = {
+ .attrs = cxl_mem_attrs,
+ .is_visible = cxl_mem_visible,
+};
+
+__ATTRIBUTE_GROUPS(cxl_mem);
+
static struct cxl_driver cxl_mem_driver = {
.name = "cxl_mem",
.probe = cxl_mem_probe,
.id = CXL_DEVICE_MEMORY_EXPANDER,
+ .drv = {
+ .dev_groups = cxl_mem_groups,
+ },
};
module_cxl_driver(cxl_mem_driver);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 33083a522fd1..f7a5b8e9c102 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -8,14 +8,11 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/pci-doe.h>
#include <linux/aer.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/cxl.h>
/**
* DOC: cxl pci
@@ -162,7 +159,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
/* #4 */
- dev_dbg(dev, "Sending command\n");
+ dev_dbg(dev, "Sending command: 0x%04x\n", mbox_cmd->opcode);
writel(CXLDEV_MBOX_CTRL_DOORBELL,
cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
@@ -359,68 +356,304 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
-static void cxl_pci_destroy_doe(void *mbs)
+/*
+ * Assume that any RCIEP that emits the CXL memory expander class code
+ * is an RCD
+ */
+static bool is_cxl_restricted(struct pci_dev *pdev)
{
- xa_destroy(mbs);
+ return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
-static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
+/*
+ * CXL v3.0 6.2.3 Table 6-4
+ * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
+ * mode, otherwise it's 68B flits mode.
+ */
+static bool cxl_pci_flit_256(struct pci_dev *pdev)
{
- struct device *dev = cxlds->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 off = 0;
+ u16 lnksta2;
- xa_init(&cxlds->doe_mbs);
- if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
- dev_err(dev, "Failed to create XArray for DOE's\n");
- return;
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
+ return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
+}
+
+static int cxl_pci_ras_unmask(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ void __iomem *addr;
+ u32 orig_val, val, mask;
+ u16 cap;
+ int rc;
+
+ if (!cxlds->regs.ras) {
+ dev_dbg(&pdev->dev, "No RAS registers.\n");
+ return 0;
}
- /*
- * Mailbox creation is best effort. Higher layers must determine if
- * the lack of a mailbox for their protocol is a device failure or not.
- */
- pci_doe_for_each_off(pdev, off) {
- struct pci_doe_mb *doe_mb;
-
- doe_mb = pcim_doe_create_mb(pdev, off);
- if (IS_ERR(doe_mb)) {
- dev_err(dev, "Failed to create MB object for MB @ %x\n",
- off);
- continue;
- }
+ /* BIOS has CXL error control */
+ if (!host_bridge->native_cxl_error)
+ return -ENXIO;
- if (!pci_request_config_region_exclusive(pdev, off,
- PCI_DOE_CAP_SIZEOF,
- dev_name(dev)))
- pci_err(pdev, "Failed to exclude DOE registers\n");
+ rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
+ if (rc)
+ return rc;
- if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
- dev_err(dev, "xa_insert failed to insert MB @ %x\n",
- off);
- continue;
- }
+ if (cap & PCI_EXP_DEVCTL_URRE) {
+ addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
+ orig_val = readl(addr);
+
+ mask = CXL_RAS_UNCORRECTABLE_MASK_MASK;
+ if (!cxl_pci_flit_256(pdev))
+ mask &= ~CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
+ val = orig_val & ~mask;
+ writel(val, addr);
+ dev_dbg(&pdev->dev,
+ "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
+ orig_val, val);
+ }
- dev_dbg(dev, "Created DOE mailbox @%x\n", off);
+ if (cap & PCI_EXP_DEVCTL_CERE) {
+ addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
+ orig_val = readl(addr);
+ val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
+ writel(val, addr);
+ dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n",
+ orig_val, val);
}
+
+ return 0;
+}
+
+static void free_event_buf(void *buf)
+{
+ kvfree(buf);
}
/*
- * Assume that any RCIEP that emits the CXL memory expander class code
- * is an RCD
+ * There is a single buffer for reading event logs from the mailbox. All logs
+ * share this buffer protected by the cxlds->event_log_lock.
*/
-static bool is_cxl_restricted(struct pci_dev *pdev)
+static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
{
- return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
+ struct cxl_get_event_payload *buf;
+
+ buf = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cxlds->event.buf = buf;
+
+ return devm_add_action_or_reset(cxlds->dev, free_event_buf, buf);
+}
+
+static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+{
+ int nvecs;
+
+ /*
+ * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
+ * not generate INTx messages if that function participates in
+ * CXL.cache or CXL.mem.
+ *
+ * Additionally pci_alloc_irq_vectors() handles calling
+ * pci_free_irq_vectors() automatically despite not being called
+ * pcim_*. See pci_setup_msi_context().
+ */
+ nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvecs < 1) {
+ dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+struct cxl_dev_id {
+ struct cxl_dev_state *cxlds;
+};
+
+static irqreturn_t cxl_event_thread(int irq, void *id)
+{
+ struct cxl_dev_id *dev_id = id;
+ struct cxl_dev_state *cxlds = dev_id->cxlds;
+ u32 status;
+
+ do {
+ /*
+ * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
+ * ignore the reserved upper 32 bits
+ */
+ status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
+ /* Ignore logs unknown to the driver */
+ status &= CXLDEV_EVENT_STATUS_ALL;
+ if (!status)
+ break;
+ cxl_mem_get_event_records(cxlds, status);
+ cond_resched();
+ } while (status);
+
+ return IRQ_HANDLED;
}
-static void disable_aer(void *pdev)
+static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
{
- pci_disable_pcie_error_reporting(pdev);
+ struct device *dev = cxlds->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct cxl_dev_id *dev_id;
+ int irq;
+
+ if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
+ return -ENXIO;
+
+ /* dev_id must be globally unique and must contain the cxlds */
+ dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
+ if (!dev_id)
+ return -ENOMEM;
+ dev_id->cxlds = cxlds;
+
+ irq = pci_irq_vector(pdev,
+ FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
+ if (irq < 0)
+ return irq;
+
+ return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
+ IRQF_SHARED | IRQF_ONESHOT, NULL,
+ dev_id);
+}
+
+static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
+ struct cxl_event_interrupt_policy *policy)
+{
+ struct cxl_mbox_cmd mbox_cmd = {
+ .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
+ .payload_out = policy,
+ .size_out = sizeof(*policy),
+ };
+ int rc;
+
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc < 0)
+ dev_err(cxlds->dev, "Failed to get event interrupt policy : %d",
+ rc);
+
+ return rc;
+}
+
+static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
+ struct cxl_event_interrupt_policy *policy)
+{
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ *policy = (struct cxl_event_interrupt_policy) {
+ .info_settings = CXL_INT_MSI_MSIX,
+ .warn_settings = CXL_INT_MSI_MSIX,
+ .failure_settings = CXL_INT_MSI_MSIX,
+ .fatal_settings = CXL_INT_MSI_MSIX,
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
+ .payload_in = policy,
+ .size_in = sizeof(*policy),
+ };
+
+ rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc < 0) {
+ dev_err(cxlds->dev, "Failed to set event interrupt policy : %d",
+ rc);
+ return rc;
+ }
+
+ /* Retrieve final interrupt settings */
+ return cxl_event_get_int_policy(cxlds, policy);
+}
+
+static int cxl_event_irqsetup(struct cxl_dev_state *cxlds)
+{
+ struct cxl_event_interrupt_policy policy;
+ int rc;
+
+ rc = cxl_event_config_msgnums(cxlds, &policy);
+ if (rc)
+ return rc;
+
+ rc = cxl_event_req_irq(cxlds, policy.info_settings);
+ if (rc) {
+ dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n");
+ return rc;
+ }
+
+ rc = cxl_event_req_irq(cxlds, policy.warn_settings);
+ if (rc) {
+ dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n");
+ return rc;
+ }
+
+ rc = cxl_event_req_irq(cxlds, policy.failure_settings);
+ if (rc) {
+ dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n");
+ return rc;
+ }
+
+ rc = cxl_event_req_irq(cxlds, policy.fatal_settings);
+ if (rc) {
+ dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static bool cxl_event_int_is_fw(u8 setting)
+{
+ u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
+
+ return mode == CXL_INT_FW;
+}
+
+static int cxl_event_config(struct pci_host_bridge *host_bridge,
+ struct cxl_dev_state *cxlds)
+{
+ struct cxl_event_interrupt_policy policy;
+ int rc;
+
+ /*
+ * When BIOS maintains CXL error reporting control, it will process
+ * event records. Only one agent can do so.
+ */
+ if (!host_bridge->native_cxl_error)
+ return 0;
+
+ rc = cxl_mem_alloc_event_buf(cxlds);
+ if (rc)
+ return rc;
+
+ rc = cxl_event_get_int_policy(cxlds, &policy);
+ if (rc)
+ return rc;
+
+ if (cxl_event_int_is_fw(policy.info_settings) ||
+ cxl_event_int_is_fw(policy.warn_settings) ||
+ cxl_event_int_is_fw(policy.failure_settings) ||
+ cxl_event_int_is_fw(policy.fatal_settings)) {
+ dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n");
+ return -EBUSY;
+ }
+
+ rc = cxl_event_irqsetup(cxlds);
+ if (rc)
+ return rc;
+
+ cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+
+ return 0;
}
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
struct cxl_dev_state *cxlds;
@@ -436,6 +669,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = pcim_enable_device(pdev);
if (rc)
return rc;
+ pci_set_master(pdev);
cxlds = cxl_dev_state_create(&pdev->dev);
if (IS_ERR(cxlds))
@@ -469,8 +703,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->component_reg_phys = map.resource;
- devm_cxl_pci_create_doe(cxlds);
-
rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
&map, BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
@@ -484,6 +716,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
+ rc = cxl_set_timestamp(cxlds);
+ if (rc)
+ return rc;
+
+ rc = cxl_poison_state_init(cxlds);
+ if (rc)
+ return rc;
+
rc = cxl_dev_state_identify(cxlds);
if (rc)
return rc;
@@ -492,16 +732,22 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
+ rc = cxl_alloc_irq_vectors(pdev);
+ if (rc)
+ return rc;
+
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (cxlds->regs.ras) {
- pci_enable_pcie_error_reporting(pdev);
- rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
- if (rc)
- return rc;
- }
+ rc = cxl_event_config(host_bridge, cxlds);
+ if (rc)
+ return rc;
+
+ rc = cxl_pci_ras_unmask(pdev);
+ if (rc)
+ dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
+
pci_save_state(pdev);
return rc;
@@ -514,96 +760,6 @@ static const struct pci_device_id cxl_mem_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
-/* CXL spec rev3.0 8.2.4.16.1 */
-static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
-{
- void __iomem *addr;
- u32 *log_addr;
- int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
-
- addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
- log_addr = log;
-
- for (i = 0; i < log_u32_size; i++) {
- *log_addr = readl(addr);
- log_addr++;
- addr += sizeof(u32);
- }
-}
-
-/*
- * Log the state of the RAS status registers and prepare them to log the
- * next error status. Return 1 if reset needed.
- */
-static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
-{
- struct cxl_memdev *cxlmd = cxlds->cxlmd;
- struct device *dev = &cxlmd->dev;
- u32 hl[CXL_HEADERLOG_SIZE_U32];
- void __iomem *addr;
- u32 status;
- u32 fe;
-
- if (!cxlds->regs.ras)
- return false;
-
- addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
- status = readl(addr);
- if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
- return false;
-
- /* If multiple errors, log header points to first error from ctrl reg */
- if (hweight32(status) > 1) {
- addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
- fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(addr)));
- } else {
- fe = status;
- }
-
- header_log_copy(cxlds, hl);
- trace_cxl_aer_uncorrectable_error(dev, status, fe, hl);
- writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
-
- return true;
-}
-
-static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
- struct cxl_memdev *cxlmd = cxlds->cxlmd;
- struct device *dev = &cxlmd->dev;
- bool ue;
-
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_report_and_clear(cxlds);
-
- switch (state) {
- case pci_channel_io_normal:
- if (ue) {
- device_release_driver(dev);
- return PCI_ERS_RESULT_NEED_RESET;
- }
- return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- dev_warn(&pdev->dev,
- "%s: frozen state error detected, disable CXL.mem\n",
- dev_name(dev));
- device_release_driver(dev);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- dev_warn(&pdev->dev,
- "failure state error detected, request disconnect\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
@@ -628,25 +784,6 @@ static void cxl_error_resume(struct pci_dev *pdev)
dev->driver ? "successful" : "failed");
}
-static void cxl_cor_error_detected(struct pci_dev *pdev)
-{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
- struct cxl_memdev *cxlmd = cxlds->cxlmd;
- struct device *dev = &cxlmd->dev;
- void __iomem *addr;
- u32 status;
-
- if (!cxlds->regs.ras)
- return;
-
- addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
- status = readl(addr);
- if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
- writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
- trace_cxl_aer_correctable_error(dev, status);
- }
-}
-
static const struct pci_error_handlers cxl_error_handlers = {
.error_detected = cxl_error_detected,
.slot_reset = cxl_slot_reset,
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index eedefebc4283..71cfa1fdf902 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -76,6 +76,7 @@ static int cxl_nvdimm_probe(struct device *dev)
return rc;
set_bit(NDD_LABELING, &flags);
+ set_bit(NDD_REGISTER_SYNC, &flags);
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
@@ -225,11 +226,35 @@ static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
}
+static int detach_nvdimm(struct device *dev, void *data)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ bool release = false;
+
+ if (!is_cxl_nvdimm(dev))
+ return 0;
+
+ device_lock(dev);
+ if (!dev->driver)
+ goto out;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
+ release = true;
+out:
+ device_unlock(dev);
+ if (release)
+ device_release_driver(dev);
+ return 0;
+}
+
static void unregister_nvdimm_bus(void *_cxl_nvb)
{
struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus;
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, detach_nvdimm);
+
cxl_nvb->nvdimm_bus = NULL;
nvdimm_bus_unregister(nvdimm_bus);
}
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 5453771bf330..eb57324c4ad4 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,57 +30,124 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int cxl_port_probe(struct device *dev)
+static int discover_region(struct device *dev, void *root)
+{
+ struct cxl_endpoint_decoder *cxled;
+ int rc;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
+ return 0;
+
+ if (cxled->state != CXL_DECODER_STATE_AUTO)
+ return 0;
+
+ /*
+ * Region enumeration is opportunistic, if this add-event fails,
+ * continue to the next endpoint decoder.
+ */
+ rc = cxl_add_to_region(root, cxled);
+ if (rc)
+ dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
+ cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
+
+ return 0;
+}
+
+static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_port *port = to_cxl_port(dev);
struct cxl_hdm *cxlhdm;
int rc;
+ rc = devm_cxl_port_enumerate_dports(port);
+ if (rc < 0)
+ return rc;
- if (!is_cxl_endpoint(port)) {
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
- if (rc == 1)
- return devm_cxl_add_passthrough_decoder(port);
- }
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
- cxlhdm = devm_cxl_setup_hdm(port);
- if (IS_ERR(cxlhdm))
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
return PTR_ERR(cxlhdm);
+ }
- if (is_cxl_endpoint(port)) {
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ if (rc == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
- /* Cache the data early to ensure is_visible() works */
- read_cdat_data(port);
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
- get_device(&cxlmd->dev);
- rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
- if (rc)
- return rc;
+static int cxl_endpoint_port_probe(struct cxl_port *port)
+{
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ struct cxl_port *root;
+ int rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm);
- if (rc)
- return rc;
+ rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
+ if (rc < 0)
+ return rc;
- rc = cxl_await_media_ready(cxlds);
- if (rc) {
- dev_err(dev, "Media not active (%d)\n", rc);
- return rc;
- }
- }
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm))
+ return PTR_ERR(cxlhdm);
+
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
+ get_device(&cxlmd->dev);
+ rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
+ if (rc)
+ return rc;
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
- rc = devm_cxl_enumerate_decoders(cxlhdm);
+ rc = cxl_await_media_ready(cxlds);
if (rc) {
- dev_err(dev, "Couldn't enumerate decoders (%d)\n", rc);
+ dev_err(&port->dev, "Media not active (%d)\n", rc);
return rc;
}
+ rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ /*
+ * This can't fail in practice as CXL root exit unregisters all
+ * descendant ports and that in turn synchronizes with cxl_port_probe()
+ */
+ root = find_cxl_root(port);
+
+ /*
+ * Now that all endpoint decoders are successfully enumerated, try to
+ * assemble regions from committed decoders
+ */
+ device_for_each_child(&port->dev, root, discover_region);
+ put_device(&root->dev);
+
return 0;
}
+static int cxl_port_probe(struct device *dev)
+{
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if (is_cxl_endpoint(port))
+ return cxl_endpoint_port_probe(port);
+ return cxl_switch_port_probe(port);
+}
+
static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5fdf269a822e..a88744244149 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
- select SRCU
default m if NVDIMM_DAX
if DAX
@@ -45,12 +44,25 @@ config DEV_DAX_HMEM
Say M if unsure.
+config DEV_DAX_CXL
+ tristate "CXL DAX: direct access to CXL RAM regions"
+ depends on CXL_BUS && CXL_REGION && DEV_DAX
+ default CXL_REGION && DEV_DAX
+ help
+ CXL RAM regions are either mapped by platform-firmware
+ and published in the initial system-memory map as "System RAM", mapped
+ by platform-firmware as "Soft Reserved", or dynamically provisioned
+ after boot by the CXL driver. In the latter two cases a device-dax
+ instance is created to access that unmapped-by-default address range.
+ Per usual it can remain as dedicated access via a device interface, or
+ converted to "System RAM" via the dax_kmem facility.
+
config DEV_DAX_HMEM_DEVICES
- depends on DEV_DAX_HMEM && DAX=y
+ depends on DEV_DAX_HMEM && DAX
def_bool y
config DEV_DAX_KMEM
- tristate "KMEM DAX: volatile-use of persistent memory"
+ tristate "KMEM DAX: map dax-devices as System-RAM"
default DEV_DAX
depends on DEV_DAX
depends on MEMORY_HOTPLUG # for add_memory() and friends
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 90a56ca3b345..5ed5c39857c8 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -3,10 +3,12 @@ obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+obj-$(CONFIG_DEV_DAX_CXL) += dax_cxl.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
dax_pmem-y := pmem.o
+dax_cxl-y := cxl.o
obj-y += hmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 1dad813ee4a6..227800053309 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -18,7 +18,7 @@ struct dax_id {
char dev_name[DAX_NAME_LEN];
};
-static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
/*
* We only ever expect to handle device-dax instances, i.e. the
@@ -56,6 +56,25 @@ static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
return match;
}
+static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev)
+{
+ enum dax_driver_type type = DAXDRV_DEVICE_TYPE;
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM)
+ type = DAXDRV_KMEM_TYPE;
+
+ if (dax_drv->type == type)
+ return 1;
+
+ /* default to device mode if dax_kmem is disabled */
+ if (dax_drv->type == DAXDRV_DEVICE_TYPE &&
+ !IS_ENABLED(CONFIG_DEV_DAX_KMEM))
+ return 1;
+
+ return 0;
+}
+
enum id_action {
ID_REMOVE,
ID_ADD,
@@ -216,14 +235,9 @@ static int dax_bus_match(struct device *dev, struct device_driver *drv)
{
struct dax_device_driver *dax_drv = to_dax_drv(drv);
- /*
- * All but the 'device-dax' driver, which has 'match_always'
- * set, requires an exact id match.
- */
- if (dax_drv->match_always)
+ if (dax_match_id(dax_drv, dev))
return 1;
-
- return dax_match_id(dax_drv, dev);
+ return dax_match_type(dax_drv, dev);
}
/*
@@ -427,8 +441,8 @@ static void unregister_dev_dax(void *dev)
dev_dbg(dev, "%s\n", __func__);
kill_dev_dax(dev_dax);
- free_dev_dax_ranges(dev_dax);
device_del(dev);
+ free_dev_dax_ranges(dev_dax);
put_device(dev);
}
@@ -1413,13 +1427,10 @@ err_id:
}
EXPORT_SYMBOL_GPL(devm_create_dev_dax);
-static int match_always_count;
-
int __dax_driver_register(struct dax_device_driver *dax_drv,
struct module *module, const char *mod_name)
{
struct device_driver *drv = &dax_drv->drv;
- int rc = 0;
/*
* dax_bus_probe() calls dax_drv->probe() unconditionally.
@@ -1434,26 +1445,7 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
drv->mod_name = mod_name;
drv->bus = &dax_bus_type;
- /* there can only be one default driver */
- mutex_lock(&dax_bus_lock);
- match_always_count += dax_drv->match_always;
- if (match_always_count > 1) {
- match_always_count--;
- WARN_ON(1);
- rc = -EINVAL;
- }
- mutex_unlock(&dax_bus_lock);
- if (rc)
- return rc;
-
- rc = driver_register(drv);
- if (rc && dax_drv->match_always) {
- mutex_lock(&dax_bus_lock);
- match_always_count -= dax_drv->match_always;
- mutex_unlock(&dax_bus_lock);
- }
-
- return rc;
+ return driver_register(drv);
}
EXPORT_SYMBOL_GPL(__dax_driver_register);
@@ -1463,7 +1455,6 @@ void dax_driver_unregister(struct dax_device_driver *dax_drv)
struct dax_id *dax_id, *_id;
mutex_lock(&dax_bus_lock);
- match_always_count -= dax_drv->match_always;
list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
list_del(&dax_id->list);
kfree(dax_id);
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index fbb940293d6d..8cd79ab34292 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -11,7 +11,10 @@ struct dax_device;
struct dax_region;
void dax_region_put(struct dax_region *dax_region);
-#define IORESOURCE_DAX_STATIC (1UL << 0)
+/* dax bus specific ioresource flags */
+#define IORESOURCE_DAX_STATIC BIT(0)
+#define IORESOURCE_DAX_KMEM BIT(1)
+
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct range *range, int target_node, unsigned int align,
unsigned long flags);
@@ -25,10 +28,15 @@ struct dev_dax_data {
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
+enum dax_driver_type {
+ DAXDRV_KMEM_TYPE,
+ DAXDRV_DEVICE_TYPE,
+};
+
struct dax_device_driver {
struct device_driver drv;
struct list_head ids;
- int match_always;
+ enum dax_driver_type type;
int (*probe)(struct dev_dax *dev);
void (*remove)(struct dev_dax *dev);
};
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
new file mode 100644
index 000000000000..ccdf8de85bd5
--- /dev/null
+++ b/drivers/dax/cxl.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+#include <linux/module.h>
+#include <linux/dax.h>
+
+#include "../cxl/cxl.h"
+#include "bus.h"
+
+static int cxl_dax_region_probe(struct device *dev)
+{
+ struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
+ int nid = phys_to_target_node(cxlr_dax->hpa_range.start);
+ struct cxl_region *cxlr = cxlr_dax->cxlr;
+ struct dax_region *dax_region;
+ struct dev_dax_data data;
+ struct dev_dax *dev_dax;
+
+ if (nid == NUMA_NO_NODE)
+ nid = memory_add_physaddr_to_nid(cxlr_dax->hpa_range.start);
+
+ dax_region = alloc_dax_region(dev, cxlr->id, &cxlr_dax->hpa_range, nid,
+ PMD_SIZE, IORESOURCE_DAX_KMEM);
+ if (!dax_region)
+ return -ENOMEM;
+
+ data = (struct dev_dax_data) {
+ .dax_region = dax_region,
+ .id = -1,
+ .size = range_len(&cxlr_dax->hpa_range),
+ };
+ dev_dax = devm_create_dev_dax(&data);
+ if (IS_ERR(dev_dax))
+ return PTR_ERR(dev_dax);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+ return 0;
+}
+
+static struct cxl_driver cxl_dax_region_driver = {
+ .name = "cxl_dax_region",
+ .probe = cxl_dax_region_probe,
+ .id = CXL_DEVICE_DAX_REGION,
+ .drv = {
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_cxl_driver(cxl_dax_region_driver);
+MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 5494d745ced5..af9930c03c9c 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ vm_flags_set(vma, VM_HUGEPAGE);
return 0;
}
@@ -475,8 +475,7 @@ EXPORT_SYMBOL_GPL(dev_dax_probe);
static struct dax_device_driver device_dax_driver = {
.probe = dev_dax_probe,
- /* all probe actions are unwound by devm, so .remove isn't necessary */
- .match_always = 1,
+ .type = DAXDRV_DEVICE_TYPE,
};
static int __init dax_init(void)
diff --git a/drivers/dax/hmem/Makefile b/drivers/dax/hmem/Makefile
index 57377b4c3d47..d4c4cd6bccd7 100644
--- a/drivers/dax/hmem/Makefile
+++ b/drivers/dax/hmem/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
+# device_hmem.o deliberately precedes dax_hmem.o for initcall ordering
obj-$(CONFIG_DEV_DAX_HMEM_DEVICES) += device_hmem.o
+obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
device_hmem-y := device.o
dax_hmem-y := hmem.o
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index 903325aac991..f9e1a76a04a9 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -8,6 +8,8 @@
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
+static bool platform_initialized;
+static DEFINE_MUTEX(hmem_resource_lock);
static struct resource hmem_active = {
.name = "HMEM devices",
.start = 0,
@@ -15,80 +17,66 @@ static struct resource hmem_active = {
.flags = IORESOURCE_MEM,
};
-void hmem_register_device(int target_nid, struct resource *r)
+int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ struct resource *res;
+ int rc = 0;
+
+ mutex_lock(&hmem_resource_lock);
+ for (res = hmem_active.child; res; res = res->sibling) {
+ rc = fn(host, (int) res->desc, res);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&hmem_resource_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(walk_hmem_resources);
+
+static void __hmem_register_resource(int target_nid, struct resource *res)
{
- /* define a clean / non-busy resource for the platform device */
- struct resource res = {
- .start = r->start,
- .end = r->end,
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_SOFT_RESERVED,
- };
struct platform_device *pdev;
- struct memregion_info info;
- int rc, id;
+ struct resource *new;
+ int rc;
- if (nohmem)
+ new = __request_region(&hmem_active, res->start, resource_size(res), "",
+ 0);
+ if (!new) {
+ pr_debug("hmem range %pr already active\n", res);
return;
+ }
- rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
- IORES_DESC_SOFT_RESERVED);
- if (rc != REGION_INTERSECTS)
- return;
+ new->desc = target_nid;
- id = memregion_alloc(GFP_KERNEL);
- if (id < 0) {
- pr_err("memregion allocation failure for %pr\n", &res);
+ if (platform_initialized)
return;
- }
- pdev = platform_device_alloc("hmem", id);
+ pdev = platform_device_alloc("hmem_platform", 0);
if (!pdev) {
- pr_err("hmem device allocation failure for %pr\n", &res);
- goto out_pdev;
- }
-
- if (!__request_region(&hmem_active, res.start, resource_size(&res),
- dev_name(&pdev->dev), 0)) {
- dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
- goto out_active;
- }
-
- pdev->dev.numa_node = numa_map_to_online_node(target_nid);
- info = (struct memregion_info) {
- .target_node = target_nid,
- };
- rc = platform_device_add_data(pdev, &info, sizeof(info));
- if (rc < 0) {
- pr_err("hmem memregion_info allocation failure for %pr\n", &res);
- goto out_resource;
- }
-
- rc = platform_device_add_resources(pdev, &res, 1);
- if (rc < 0) {
- pr_err("hmem resource allocation failure for %pr\n", &res);
- goto out_resource;
+ pr_err_once("failed to register device-dax hmem_platform device\n");
+ return;
}
rc = platform_device_add(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "device add failed for %pr\n", &res);
- goto out_resource;
- }
+ if (rc)
+ platform_device_put(pdev);
+ else
+ platform_initialized = true;
+}
- return;
+void hmem_register_resource(int target_nid, struct resource *res)
+{
+ if (nohmem)
+ return;
-out_resource:
- __release_region(&hmem_active, res.start, resource_size(&res));
-out_active:
- platform_device_put(pdev);
-out_pdev:
- memregion_free(id);
+ mutex_lock(&hmem_resource_lock);
+ __hmem_register_resource(target_nid, res);
+ mutex_unlock(&hmem_resource_lock);
}
static __init int hmem_register_one(struct resource *res, void *data)
{
- hmem_register_device(phys_to_target_node(res->start), res);
+ hmem_register_resource(phys_to_target_node(res->start), res);
return 0;
}
@@ -104,4 +92,4 @@ static __init int hmem_init(void)
* As this is a fallback for address ranges unclaimed by the ACPI HMAT
* parsing it must be at an initcall level greater than hmat_init().
*/
-late_initcall(hmem_init);
+device_initcall(hmem_init);
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 1bf040dbc834..e5fe8b39fb94 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -3,6 +3,7 @@
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
+#include <linux/dax.h>
#include "../bus.h"
static bool region_idle;
@@ -10,30 +11,32 @@ module_param_named(region_idle, region_idle, bool, 0644);
static int dax_hmem_probe(struct platform_device *pdev)
{
+ unsigned long flags = IORESOURCE_DAX_KMEM;
struct device *dev = &pdev->dev;
struct dax_region *dax_region;
struct memregion_info *mri;
struct dev_dax_data data;
struct dev_dax *dev_dax;
- struct resource *res;
- struct range range;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+ /*
+ * @region_idle == true indicates that an administrative agent
+ * wants to manipulate the range partitioning before the devices
+ * are created, so do not send them to the dax_kmem driver by
+ * default.
+ */
+ if (region_idle)
+ flags = 0;
mri = dev->platform_data;
- range.start = res->start;
- range.end = res->end;
- dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node,
- PMD_SIZE, 0);
+ dax_region = alloc_dax_region(dev, pdev->id, &mri->range,
+ mri->target_node, PMD_SIZE, flags);
if (!dax_region)
return -ENOMEM;
data = (struct dev_dax_data) {
.dax_region = dax_region,
.id = -1,
- .size = region_idle ? 0 : resource_size(res),
+ .size = region_idle ? 0 : range_len(&mri->range),
};
dev_dax = devm_create_dev_dax(&data);
if (IS_ERR(dev_dax))
@@ -44,22 +47,131 @@ static int dax_hmem_probe(struct platform_device *pdev)
return 0;
}
-static int dax_hmem_remove(struct platform_device *pdev)
-{
- /* devm handles teardown */
- return 0;
-}
-
static struct platform_driver dax_hmem_driver = {
.probe = dax_hmem_probe,
- .remove = dax_hmem_remove,
.driver = {
.name = "hmem",
},
};
-module_platform_driver(dax_hmem_driver);
+static void release_memregion(void *data)
+{
+ memregion_free((long) data);
+}
+
+static void release_hmem(void *pdev)
+{
+ platform_device_unregister(pdev);
+}
+
+static int hmem_register_device(struct device *host, int target_nid,
+ const struct resource *res)
+{
+ struct platform_device *pdev;
+ struct memregion_info info;
+ long id;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_REGION) &&
+ region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
+ IORES_DESC_CXL) != REGION_DISJOINT) {
+ dev_dbg(host, "deferring range to CXL: %pr\n", res);
+ return 0;
+ }
+
+ rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return 0;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ dev_err(host, "memregion allocation failure for %pr\n", res);
+ return -ENOMEM;
+ }
+ rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
+ if (rc)
+ return rc;
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ dev_err(host, "device allocation failure for %pr\n", res);
+ return -ENOMEM;
+ }
+
+ pdev->dev.numa_node = numa_map_to_online_node(target_nid);
+ info = (struct memregion_info) {
+ .target_node = target_nid,
+ .range = {
+ .start = res->start,
+ .end = res->end,
+ },
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ dev_err(host, "memregion_info allocation failure for %pr\n",
+ res);
+ goto out_put;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
+ res);
+ goto out_put;
+ }
+
+ return devm_add_action_or_reset(host, release_hmem, pdev);
+
+out_put:
+ platform_device_put(pdev);
+ return rc;
+}
+
+static int dax_hmem_platform_probe(struct platform_device *pdev)
+{
+ return walk_hmem_resources(&pdev->dev, hmem_register_device);
+}
+
+static struct platform_driver dax_hmem_platform_driver = {
+ .probe = dax_hmem_platform_probe,
+ .driver = {
+ .name = "hmem_platform",
+ },
+};
+
+static __init int dax_hmem_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&dax_hmem_platform_driver);
+ if (rc)
+ return rc;
+
+ rc = platform_driver_register(&dax_hmem_driver);
+ if (rc)
+ platform_driver_unregister(&dax_hmem_platform_driver);
+
+ return rc;
+}
+
+static __exit void dax_hmem_exit(void)
+{
+ platform_driver_unregister(&dax_hmem_driver);
+ platform_driver_unregister(&dax_hmem_platform_driver);
+}
+
+module_init(dax_hmem_init);
+module_exit(dax_hmem_exit);
+
+/* Allow for CXL to define its own dax regions */
+#if IS_ENABLED(CONFIG_CXL_REGION)
+#if IS_MODULE(CONFIG_CXL_ACPI)
+MODULE_SOFTDEP("pre: cxl_acpi");
+#endif
+#endif
MODULE_ALIAS("platform:hmem*");
+MODULE_ALIAS("platform:hmem_platform*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 4852a2dbdb27..7b36db6f1cbd 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -146,7 +146,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
i, range.start, range.end);
- release_resource(res);
+ remove_resource(res);
kfree(res);
data->res[i] = NULL;
if (mapped)
@@ -195,7 +195,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
rc = remove_memory(range.start, range_len(&range));
if (rc == 0) {
- release_resource(data->res[i]);
+ remove_resource(data->res[i]);
kfree(data->res[i]);
data->res[i] = NULL;
success++;
@@ -239,6 +239,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
static struct dax_device_driver device_dax_kmem_driver = {
.probe = dev_dax_kmem_probe,
.remove = dev_dax_kmem_remove,
+ .type = DAXDRV_KMEM_TYPE,
};
static int __init dax_kmem_init(void)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index da4438f3188c..c4c4728a36e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
-
+ *
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index c40c2ebfdae9..ed3dac546dd6 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -294,9 +294,7 @@ EXPORT_SYMBOL_GPL(dca3_get_tag);
*/
u8 dca_get_tag(int cpu)
{
- struct device *dev = NULL;
-
- return dca_common_get_tag(dev, cpu);
+ return dca_common_get_tag(NULL, cpu);
}
EXPORT_SYMBOL_GPL(dca_get_tag);
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 21ebd0af268b..fcc83ede0909 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -74,7 +74,7 @@ int __init dca_sysfs_init(void)
idr_init(&dca_idr);
spin_lock_init(&dca_idr_lock);
- dca_class = class_create(THIS_MODULE, "dca");
+ dca_class = class_create("dca");
if (IS_ERR(dca_class)) {
idr_destroy(&dca_idr);
return PTR_ERR(dca_class);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 9754d8b31621..3c4862a752b5 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
- select SRCU
select PM_OPP
help
A device may have a list of frequencies and voltages available.
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index f041edccd107..3ebac2496679 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -469,7 +469,7 @@ ATTRIBUTE_GROUPS(devfreq_event);
static int __init devfreq_event_init(void)
{
- devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+ devfreq_event_class = class_create("devfreq-event");
if (IS_ERR(devfreq_event_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_event_class);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 817c71da391a..e36cbb920ec8 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1988,7 +1988,7 @@ DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
static int __init devfreq_init(void)
{
- devfreq_class = class_create(THIS_MODULE, "devfreq");
+ devfreq_class = class_create("devfreq");
if (IS_ERR(devfreq_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index a443e7c42daf..896a6cc93b00 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -621,8 +621,7 @@ static int exynos_ppmu_parse_dt(struct platform_device *pdev,
}
/* Maps the memory mapped IO to control PPMU register */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 027e8f336acc..88414445adf3 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -432,7 +432,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
goto err;
/* Create child platform device for the interconnect provider */
- if (of_get_property(dev->of_node, "#interconnect-cells", NULL)) {
+ if (of_property_present(dev->of_node, "#interconnect-cells")) {
bus->icc_pdev = platform_device_register_data(
dev, "exynos-generic-icc",
PLATFORM_DEVID_AUTO, NULL, 0);
@@ -513,7 +513,7 @@ static struct platform_driver exynos_bus_platdrv = {
.driver = {
.name = "exynos-bus",
.pm = &exynos_bus_pm,
- .of_match_table = of_match_ptr(exynos_bus_of_match),
+ .of_match_table = exynos_bus_of_match,
},
};
module_platform_driver(exynos_bus_platdrv);
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index f69d68122b9b..6cfbbf0720bd 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -112,7 +112,7 @@ static void dma_buf_sysfs_release(struct kobject *kobj)
kfree(sysfs_entry);
}
-static struct kobj_type dma_buf_ktype = {
+static const struct kobj_type dma_buf_ktype = {
.sysfs_ops = &dma_buf_stats_sysfs_ops,
.release = dma_buf_sysfs_release,
.default_groups = dma_buf_stats_default_groups,
@@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
kset_unregister(dma_buf_stats_kset);
}
-int dma_buf_stats_setup(struct dma_buf *dmabuf)
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
- if (!dmabuf || !dmabuf->file)
- return -EINVAL;
-
if (!dmabuf->exp_name) {
pr_err("exporter name must not be empty if stats needed\n");
return -EINVAL;
@@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
/* create the directory for buffer stats */
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
- "%lu", file_inode(dmabuf->file)->i_ino);
+ "%lu", file_inode(file)->i_ino);
if (ret)
goto err_sysfs_dmabuf;
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h
index a49c6e2650cc..7a8a995b75ba 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.h
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.h
@@ -13,7 +13,7 @@
int dma_buf_init_sysfs_statistics(void);
void dma_buf_uninit_sysfs_statistics(void);
-int dma_buf_stats_setup(struct dma_buf *dmabuf);
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
#else
@@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
static inline void dma_buf_uninit_sysfs_statistics(void) {}
-static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
+static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
return 0;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b6c36914e7c6..aa4ea8530cb3 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
return -EINVAL;
dmabuf = file->private_data;
-
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
+ if (dmabuf) {
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+ }
return 0;
}
@@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
-static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+static struct file *dma_buf_getfile(size_t size, int flags)
{
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
- struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+ struct file *file;
if (IS_ERR(inode))
return ERR_CAST(inode);
- inode->i_size = dmabuf->size;
- inode_set_bytes(inode, dmabuf->size);
+ inode->i_size = size;
+ inode_set_bytes(inode, size);
/*
* The ->i_ino acquired from get_next_ino() is not unique thus
@@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->private_data = dmabuf;
- file->f_path.dentry->d_fsdata = dmabuf;
return file;
@@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
size_t alloc_size = sizeof(struct dma_buf);
int ret;
- if (!exp_info->resv)
- alloc_size += sizeof(struct dma_resv);
- else
- /* prevent &dma_buf[1] == dma_buf->resv */
- alloc_size += 1;
-
- if (WARN_ON(!exp_info->priv
- || !exp_info->ops
- || !exp_info->ops->map_dma_buf
- || !exp_info->ops->unmap_dma_buf
- || !exp_info->ops->release)) {
+ if (WARN_ON(!exp_info->priv || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- }
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
(exp_info->ops->pin || exp_info->ops->unpin)))
@@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
+ file = dma_buf_getfile(exp_info->size, exp_info->flags);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_module;
+ }
+
+ if (!exp_info->resv)
+ alloc_size += sizeof(struct dma_resv);
+ else
+ /* prevent &dma_buf[1] == dma_buf->resv */
+ alloc_size += 1;
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
ret = -ENOMEM;
- goto err_module;
+ goto err_file;
}
dmabuf->priv = exp_info->priv;
@@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
+ INIT_LIST_HEAD(&dmabuf->attachments);
if (!resv) {
- resv = (struct dma_resv *)&dmabuf[1];
- dma_resv_init(resv);
+ dmabuf->resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(dmabuf->resv);
+ } else {
+ dmabuf->resv = resv;
}
- dmabuf->resv = resv;
- file = dma_buf_getfile(dmabuf, exp_info->flags);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ ret = dma_buf_stats_setup(dmabuf, file);
+ if (ret)
goto err_dmabuf;
- }
+ file->private_data = dmabuf;
+ file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- INIT_LIST_HEAD(&dmabuf->attachments);
-
mutex_lock(&db_list.lock);
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
return dmabuf;
-err_sysfs:
- /*
- * Set file->f_path.dentry->d_fsdata to NULL so that when
- * dma_buf_release() gets invoked by dentry_ops, it exits
- * early before calling the release() dma_buf op.
- */
- file->f_path.dentry->d_fsdata = NULL;
- fput(file);
err_dmabuf:
+ if (!resv)
+ dma_resv_fini(dmabuf->resv);
kfree(dmabuf);
+err_file:
+ fput(file);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
@@ -834,7 +828,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - dma_buf_attach()
* - dma_buf_dynamic_attach()
* - dma_buf_detach()
- * - dma_buf_export(
+ * - dma_buf_export()
* - dma_buf_fd()
* - dma_buf_get()
* - dma_buf_put()
@@ -1263,7 +1257,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
*
* @dmabuf: [in] buffer which is moving
*
- * Informs all attachmenst that they need to destroy and recreated all their
+ * Informs all attachments that they need to destroy and recreate all their
* mappings.
*/
void dma_buf_move_notify(struct dma_buf *dmabuf)
@@ -1281,11 +1275,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
*
- * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ * There are multiple reasons for supporting CPU access to a dma buffer object:
*
* - Fallback operations in the kernel, for example when a device is connected
* over USB and the kernel needs to shuffle the data around first before
- * sending it away. Cache coherency is handled by braketing any transactions
+ * sending it away. Cache coherency is handled by bracketing any transactions
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
@@ -1312,7 +1306,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* replace ION buffers mmap support was needed.
*
* There is no special interfaces, userspace simply calls mmap on the dma-buf
- * fd. But like for CPU access there's a need to braket the actual access,
+ * fd. But like for CPU access there's a need to bracket the actual access,
* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
* be restarted.
@@ -1386,10 +1380,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* After the cpu access is complete the caller should call
- * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
* This function will also wait for any DMA transactions tracked through
@@ -1429,7 +1423,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* This terminates CPU access started with dma_buf_begin_cpu_access().
*
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 5c8a7084577b..9b3ce8948351 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence)
dma_fence_free(fence);
}
+static void dma_fence_array_set_deadline(struct dma_fence *fence,
+ ktime_t deadline)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ dma_fence_set_deadline(array->fences[i], deadline);
+}
+
const struct dma_fence_ops dma_fence_array_ops = {
.get_driver_name = dma_fence_array_get_driver_name,
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
.release = dma_fence_array_release,
+ .set_deadline = dma_fence_array_set_deadline,
};
EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index a0d920576ba6..9663ba1bb6ac 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence)
dma_fence_free(fence);
}
+
+static void dma_fence_chain_set_deadline(struct dma_fence *fence,
+ ktime_t deadline)
+{
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence *f = dma_fence_chain_contained(fence);
+
+ dma_fence_set_deadline(f, deadline);
+ }
+}
+
const struct dma_fence_ops dma_fence_chain_ops = {
.use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
@@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = {
.enable_signaling = dma_fence_chain_enable_signaling,
.signaled = dma_fence_chain_signaled,
.release = dma_fence_chain_release,
+ .set_deadline = dma_fence_chain_set_deadline,
};
EXPORT_SYMBOL(dma_fence_chain_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 406b4e26f538..f177c56269bb 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
+ &fence->flags);
dma_fence_signal(fence);
@@ -913,6 +913,65 @@ err_free_cb:
EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
+ * DOC: deadline hints
+ *
+ * In an ideal world, it would be possible to pipeline a workload sufficiently
+ * that a utilization based device frequency governor could arrive at a minimum
+ * frequency that meets the requirements of the use-case, in order to minimize
+ * power consumption. But in the real world there are many workloads which
+ * defy this ideal. For example, but not limited to:
+ *
+ * * Workloads that ping-pong between device and CPU, with alternating periods
+ * of CPU waiting for device, and device waiting on CPU. This can result in
+ * devfreq and cpufreq seeing idle time in their respective domains and in
+ * result reduce frequency.
+ *
+ * * Workloads that interact with a periodic time based deadline, such as double
+ * buffered GPU rendering vs vblank sync'd page flipping. In this scenario,
+ * missing a vblank deadline results in an *increase* in idle time on the GPU
+ * (since it has to wait an additional vblank period), sending a signal to
+ * the GPU's devfreq to reduce frequency, when in fact the opposite is what is
+ * needed.
+ *
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * The deadline hint provides a way for the waiting driver, or userspace, to
+ * convey an appropriate sense of urgency to the signaling driver.
+ *
+ * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
+ * facing APIs). The time could either be some point in the future (such as
+ * the vblank based deadline for page-flipping, or the start of a compositor's
+ * composition cycle), or the current time to indicate an immediate deadline
+ * hint (Ie. forward progress cannot be made until this fence is signaled).
+ *
+ * Multiple deadlines may be set on a given fence, even in parallel. See the
+ * documentation for &dma_fence_ops.set_deadline.
+ *
+ * The deadline hint is just that, a hint. The driver that created the fence
+ * may react by increasing frequency, making different scheduling choices, etc.
+ * Or doing nothing at all.
+ */
+
+/**
+ * dma_fence_set_deadline - set desired fence-wait deadline hint
+ * @fence: the fence that is to be waited on
+ * @deadline: the time by which the waiter hopes for the fence to be
+ * signaled
+ *
+ * Give the fence signaler a hint about an upcoming deadline, such as
+ * vblank, by which point the waiter would prefer the fence to be
+ * signaled by. This is intended to give feedback to the fence signaler
+ * to aid in power management decisions, such as boosting GPU frequency
+ * if a periodic vblank deadline is approaching but the fence is not
+ * yet signaled..
+ */
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+ if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
+ fence->ops->set_deadline(fence, deadline);
+}
+EXPORT_SYMBOL(dma_fence_set_deadline);
+
+/**
* dma_fence_describe - Dump fence describtion into seq_file
* @fence: the 6fence to describe
* @seq: the seq_file to put the textual description into
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index c9e41e8a9e27..84ae708fafe7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -314,7 +314,7 @@ static int dma_heap_init(void)
if (ret)
return ret;
- dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ dma_heap_class = class_create(DEVNAME);
if (IS_ERR(dma_heap_class)) {
unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
return PTR_ERR(dma_heap_class);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 1c76aed8e262..2a594b754af1 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
+/**
+ * dma_resv_set_deadline - Set a deadline on reservation's objects fences
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ * @deadline: the requested deadline (MONOTONIC)
+ *
+ * May be called without holding the dma_resv lock. Sets @deadline on
+ * all fences filtered by @usage.
+ */
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, obj, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_set_deadline(fence, deadline);
+ }
+ dma_resv_iter_end(&cursor);
+}
+EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
/**
* dma_resv_test_signaled - Test if a reservation object's fences have been
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 1131fb943992..a7f048048864 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -407,4 +407,3 @@ static int add_default_cma_heap(void)
}
module_init(add_default_cma_heap);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index e8bd10e60998..ee7059399e9c 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -41,12 +41,11 @@ struct dma_heap_attachment {
bool mapped;
};
-#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
-#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
-static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
@@ -440,4 +439,3 @@ static int system_heap_create(void)
return 0;
}
module_init(system_heap_create);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 283816fbd72f..01f2e86f3f7c 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <linux/iosys-map.h>
static int list_limit = 1024;
module_param(list_limit, int, 0644);
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
+static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+ void *vaddr;
+
+ dma_resv_assert_held(buf->resv);
+
+ vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+ return 0;
+}
+
+static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ dma_resv_assert_held(buf->resv);
+
+ vm_unmap_ram(map->vaddr, ubuf->pagecount);
+}
+
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
enum dma_data_direction direction)
{
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
.mmap = mmap_udmabuf,
+ .vmap = vmap_udmabuf,
+ .vunmap = vunmap_udmabuf,
.begin_cpu_access = begin_cpu_udmabuf,
.end_cpu_access = end_cpu_udmabuf,
};
@@ -402,4 +430,3 @@ module_init(udmabuf_dev_init)
module_exit(udmabuf_dev_exit)
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b6d48d54f42f..f5f422f9b850 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -245,7 +245,7 @@ config FSL_RAID
config HISI_DMA
tristate "HiSilicon DMA Engine support"
- depends on ARM64 || COMPILE_TEST
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -610,18 +610,6 @@ config SPRD_DMA
help
Enable support for the on-chip DMA controller on Spreadtrum platform.
-config S3C24XX_DMAC
- bool "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support for the Samsung S3C24XX DMA controller driver. The
- DMA controller is having multiple DMA channels which can be
- configured for different peripherals like audio, UART, SPI.
- The DMA controller can transfer data from memory to peripheral,
- periphal to memory, periphal to periphal and memory to memory.
-
config TXX9_DMAC
tristate "Toshiba TXx9 SoC DMA support"
depends on MACH_TX49XX
@@ -635,6 +623,7 @@ config TEGRA186_GPC_DMA
depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
depends on IOMMU_API
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support for the NVIDIA Tegra General Purpose Central DMA controller.
The DMA controller has multiple DMA channels which can be configured
@@ -728,6 +717,20 @@ config XILINX_DMA
the scatter gather interface with multiple channels independent
configuration support.
+config XILINX_XDMA
+ tristate "Xilinx DMA/Bridge Subsystem DMA Engine"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for Xilinx DMA/Bridge Subsystem DMA engine. The DMA
+ provides high performance block data movement between Host memory
+ and the DMA subsystem. These direct memory transfers can be both in
+ the Host to Card (H2C) and Card to Host (C2H) transfers.
+ The core also provides up to 16 user interrupt wires that generate
+ interrupts to the host.
+
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5b55ada052a7..a4fd1ce29510 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -70,7 +70,6 @@ obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 90f28bda29c8..4cf8da77bdd9 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -75,6 +75,7 @@
#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
admac_stop_chan(adchan);
admac_reset_rings(adchan);
- adchan->current_tx = NULL;
+ if (adchan->current_tx) {
+ list_add_tail(&adchan->current_tx->node, &adchan->to_free);
+ adchan->current_tx = NULL;
+ }
/*
* Descriptors can only be freed after the tasklet
* has been killed (in admac_synchronize).
@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
static irqreturn_t admac_interrupt(int irq, void *devid)
{
struct admac_data *ad = devid;
- u32 rx_intstate, tx_intstate;
+ u32 rx_intstate, tx_intstate, global_intstate;
int i;
rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+ global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
- if (!tx_intstate && !rx_intstate)
+ if (!tx_intstate && !rx_intstate && !global_intstate)
return IRQ_NONE;
for (i = 0; i < ad->nchannels; i += 2) {
@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
rx_intstate >>= 1;
}
+ if (global_intstate) {
+ dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
+ global_intstate);
+ writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+ }
+
return IRQ_HANDLED;
}
@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index d6c9781cd46a..7da6d9b6098e 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include "dmaengine.h"
@@ -186,6 +187,7 @@
enum atc_status {
AT_XDMAC_CHAN_IS_CYCLIC = 0,
AT_XDMAC_CHAN_IS_PAUSED,
+ AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
};
struct at_xdmac_layout {
@@ -240,9 +242,11 @@ struct at_xdmac_chan {
struct at_xdmac {
struct dma_device dma;
void __iomem *regs;
+ struct device *dev;
int irq;
struct clk *clk;
u32 save_gim;
+ u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
const struct at_xdmac_layout *layout;
struct at_xdmac_chan chan[];
@@ -345,6 +349,11 @@ static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
}
+static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
+{
+ return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
+}
+
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
@@ -361,13 +370,65 @@ MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
+static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ if (!desc->active_xfer)
+ continue;
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+ }
+}
+
+static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+ int ret;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ if (!desc->active_xfer)
+ continue;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
{
- return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return false;
+
+ ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
+ return ret;
}
-static void at_xdmac_off(struct at_xdmac *atxdmac)
+static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
{
+ struct dma_chan *chan, *_chan;
+ struct at_xdmac_chan *atchan;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
+
at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
/* Wait that all chans are disabled. */
@@ -375,6 +436,18 @@ static void at_xdmac_off(struct at_xdmac *atxdmac)
cpu_relax();
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
+
+ /* Decrement runtime PM ref counter for each active descriptor. */
+ if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
+ device_node) {
+ atchan = to_at_xdmac_chan(chan);
+ at_xdmac_runtime_suspend_descriptors(atchan);
+ }
+ }
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
}
/* Call with lock hold. */
@@ -383,6 +456,11 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
@@ -462,7 +540,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
-
}
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -1456,14 +1533,14 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
static enum dma_status
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ struct dma_tx_state *txstate)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
- int residue, retry;
+ int residue, retry, pm_status;
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@@ -1473,6 +1550,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if (ret == DMA_COMPLETE || !txstate)
return ret;
+ pm_status = pm_runtime_resume_and_get(atxdmac->dev);
+ if (pm_status < 0)
+ return DMA_ERROR;
+
spin_lock_irqsave(&atchan->lock, flags);
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
@@ -1590,6 +1671,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
return ret;
}
@@ -1636,6 +1719,11 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *bad_desc;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
/*
* The descriptor currently at the head of the active list is
@@ -1665,12 +1753,16 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
/* Then continue with usual descriptor management */
}
static void at_xdmac_tasklet(struct tasklet_struct *t)
{
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
u32 error_mask;
@@ -1720,6 +1812,13 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
at_xdmac_advance_work(atchan);
spin_unlock_irq(&atchan->lock);
+
+ /*
+ * Decrement runtime PM ref counter incremented in
+ * at_xdmac_start_xfer().
+ */
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
@@ -1806,46 +1905,98 @@ static int at_xdmac_device_config(struct dma_chan *chan,
return ret;
}
+static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
+ struct at_xdmac_chan *atchan)
+{
+ at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
+ (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
+ cpu_relax();
+}
+
+static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&atchan->lock, flags);
+ set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
+ at_xdmac_device_pause_set(atxdmac, atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+}
+
static int at_xdmac_device_pause(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
return 0;
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
- at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
- while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
- & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
- cpu_relax();
+
+ at_xdmac_device_pause_set(atxdmac, atchan);
+ /* Decrement runtime PM ref counter for each active descriptor. */
+ at_xdmac_runtime_suspend_descriptors(atchan);
+
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
return 0;
}
+static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&atchan->lock, flags);
+ at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+}
+
static int at_xdmac_device_resume(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
- if (!at_xdmac_chan_is_paused(atchan)) {
- spin_unlock_irqrestore(&atchan->lock, flags);
- return 0;
- }
+ if (!at_xdmac_chan_is_paused(atchan))
+ goto unlock;
+
+ /* Increment runtime PM ref counter for each active descriptor. */
+ ret = at_xdmac_runtime_resume_descriptors(atchan);
+ if (ret < 0)
+ goto unlock;
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+
+unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
- return 0;
+ return ret;
}
static int at_xdmac_device_terminate_all(struct dma_chan *chan)
@@ -1854,9 +2005,14 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@@ -1867,12 +2023,24 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_del(&desc->xfer_node);
list_splice_tail_init(&desc->descs_list,
&atchan->free_descs_list);
+ /*
+ * We incremented the runtime PM reference count on
+ * at_xdmac_start_xfer() for this descriptor. Now it's time
+ * to release it.
+ */
+ if (desc->active_xfer) {
+ pm_runtime_put_autosuspend(atxdmac->dev);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ }
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
return 0;
}
@@ -1974,23 +2142,36 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
{
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
if (at_xdmac_chan_is_cyclic(atchan)) {
- if (!at_xdmac_chan_is_paused(atchan))
- at_xdmac_device_pause(chan);
+ if (!at_xdmac_chan_is_paused(atchan)) {
+ dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
+ __func__, chan->chan_id);
+ at_xdmac_device_pause_internal(atchan);
+ at_xdmac_runtime_suspend_descriptors(atchan);
+ }
atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
- at_xdmac_off(atxdmac);
+ at_xdmac_off(atxdmac, false);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_noidle(atxdmac->dev);
clk_disable_unprepare(atxdmac->clk);
+
return 0;
}
@@ -2000,13 +2181,14 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
struct at_xdmac_chan *atchan;
struct dma_chan *chan, *_chan;
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
- int i;
- int ret;
+ int i, ret;
ret = clk_prepare_enable(atxdmac->clk);
if (ret)
return ret;
+ pm_runtime_get_noresume(atxdmac->dev);
+
at_xdmac_axi_config(pdev);
/* Clear pending interrupts. */
@@ -2019,20 +2201,59 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
+
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) {
- if (at_xdmac_chan_is_paused(atchan))
- at_xdmac_device_resume(chan);
+ /*
+ * Resume only channels not explicitly paused by
+ * consumers.
+ */
+ if (at_xdmac_chan_is_paused_internal(atchan)) {
+ ret = at_xdmac_runtime_resume_descriptors(atchan);
+ if (ret < 0)
+ return ret;
+ at_xdmac_device_resume_internal(atchan);
+ }
+
+ /*
+ * We may resume from a deep sleep state where power
+ * to DMA controller is cut-off. Thus, restore the
+ * suspend state of channels set though dmaengine API.
+ */
+ else if (at_xdmac_chan_is_paused(atchan))
+ at_xdmac_device_pause_set(atxdmac, atchan);
+
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
wmb();
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ if (atxdmac->save_gs & atchan->mask)
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
+{
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
+
+ clk_disable(atxdmac->clk);
+
return 0;
}
+static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
+{
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
+
+ return clk_enable(atxdmac->clk);
+}
+
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
@@ -2071,6 +2292,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->regs = base;
atxdmac->irq = irq;
+ atxdmac->dev = &pdev->dev;
atxdmac->layout = of_device_get_match_data(&pdev->dev);
if (!atxdmac->layout)
@@ -2135,11 +2357,20 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- /* Disable all chans and interrupts. */
- at_xdmac_off(atxdmac);
+ platform_set_drvdata(pdev, atxdmac);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
/* Init channels. */
INIT_LIST_HEAD(&atxdmac->dma.channels);
+
+ /* Disable all chans and interrupts. */
+ at_xdmac_off(atxdmac, true);
+
for (i = 0; i < nr_channels; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
@@ -2159,12 +2390,11 @@ static int at_xdmac_probe(struct platform_device *pdev)
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
cpu_relax();
}
- platform_set_drvdata(pdev, atxdmac);
ret = dma_async_device_register(&atxdmac->dma);
if (ret) {
dev_err(&pdev->dev, "fail to register DMA engine device\n");
- goto err_clk_disable;
+ goto err_pm_disable;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -2179,10 +2409,18 @@ static int at_xdmac_probe(struct platform_device *pdev)
at_xdmac_axi_config(pdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_dma_unregister:
dma_async_device_unregister(&atxdmac->dma);
+err_pm_disable:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(atxdmac->clk);
err_free_irq:
@@ -2195,9 +2433,12 @@ static int at_xdmac_remove(struct platform_device *pdev)
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
int i;
- at_xdmac_off(atxdmac);
+ at_xdmac_off(atxdmac, true);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&atxdmac->dma);
+ pm_runtime_disable(atxdmac->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
clk_disable_unprepare(atxdmac->clk);
free_irq(atxdmac->irq, atxdmac);
@@ -2215,6 +2456,8 @@ static int at_xdmac_remove(struct platform_device *pdev)
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
.prepare = atmel_xdmac_prepare,
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
+ SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
+ atmel_xdmac_runtime_resume, NULL)
};
static const struct of_device_id atmel_xdmac_dt_ids[] = {
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 630dfbb01a40..0807fb9eb262 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
- struct resource *res;
void __iomem *base;
int rc;
int i, j;
@@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index c465758e7193..0553956f7456 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -38,7 +38,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
{
int rv;
const u32 *regaddr_p;
- u64 regaddr64, size64;
+ struct resource res;
unsigned int psize;
/* Create our state struct */
@@ -56,21 +56,18 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
}
/* Get address and size of the sram */
- regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
- if (!regaddr_p) {
+ rv = of_address_to_resource(sram_node, 0, &res);
+ if (rv) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Invalid device node !\n", owner);
- rv = -EINVAL;
goto error_free;
}
- regaddr64 = of_translate_address(sram_node, regaddr_p);
-
- bcom_sram->base_phys = (phys_addr_t) regaddr64;
- bcom_sram->size = (unsigned int) size64;
+ bcom_sram->base_phys = res.start;
+ bcom_sram->size = resource_size(&res);
/* Request region */
- if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
+ if (!request_mem_region(res.start, resource_size(&res), owner)) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Couldn't request region !\n", owner);
rv = -EBUSY;
@@ -79,7 +76,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
/* Map SRAM */
/* sram is not really __iomem */
- bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
+ bcom_sram->base_virt = (void *)ioremap(res.start, resource_size(&res));
if (!bcom_sram->base_virt) {
printk(KERN_ERR "%s: bcom_sram_init: "
@@ -120,7 +117,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
return 0;
error_release:
- release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+ release_mem_region(res.start, resource_size(&res));
error_free:
kfree(bcom_sram);
bcom_sram = NULL;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index f30dabc99795..a812b9b00e6b 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
struct axi_dmac *dmac;
- struct resource *res;
struct regmap *regmap;
unsigned int version;
int ret;
@@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (dmac->irq == 0)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->base = devm_ioremap_resource(&pdev->dev, res);
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c741b6431958..826b98284fa1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -172,7 +172,7 @@ static ssize_t memcpy_count_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->memcpy_count;
- err = sprintf(buf, "%lu\n", count);
+ err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -194,7 +194,7 @@ static ssize_t bytes_transferred_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->bytes_transferred;
- err = sprintf(buf, "%lu\n", count);
+ err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -212,7 +212,7 @@ static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
mutex_lock(&dma_list_mutex);
chan = dev_to_dma_chan(dev);
if (chan)
- err = sprintf(buf, "%d\n", chan->client_count);
+ err = sysfs_emit(buf, "%d\n", chan->client_count);
else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
- goto out;
+ chan->client_count++;
+ return 0;
}
if (!try_module_get(owner))
@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
+ chan->client_count++;
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
-out:
- chan->client_count++;
return 0;
err_out:
@@ -1322,11 +1323,8 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
-static void dmam_device_release(struct device *dev, void *res)
+static void dmaenginem_async_device_unregister(void *device)
{
- struct dma_device *device;
-
- device = *(struct dma_device **)res;
dma_async_device_unregister(device);
}
@@ -1338,22 +1336,13 @@ static void dmam_device_release(struct device *dev, void *res)
*/
int dmaenginem_async_device_register(struct dma_device *device)
{
- void *p;
int ret;
- p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
ret = dma_async_device_register(device);
- if (!ret) {
- *(struct dma_device **)p = device;
- devres_add(device->dev, p);
- } else {
- devres_free(p);
- }
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
}
EXPORT_SYMBOL(dmaenginem_async_device_register);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a183d93bd7e2..6937cc0c0b65 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -21,10 +21,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +48,10 @@
DMA_SLAVE_BUSWIDTH_32_BYTES | \
DMA_SLAVE_BUSWIDTH_64_BYTES)
+#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
+#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
+#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
+
static inline void
axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
{
@@ -86,7 +92,8 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan,
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
+ if (chan->chip->dw->hdata->reg_map_8_channels &&
+ !chan->chip->dw->hdata->use_cfg2) {
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
@@ -325,8 +332,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
completed_length = completed_blocks * len;
bytes = length - completed_length;
- } else {
- bytes = vd_to_axi_desc(vdesc)->length;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -1018,6 +1023,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@@ -1032,6 +1042,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
@@ -1136,7 +1147,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
axi_chan_disable(chan);
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 1000, 10000);
+ !(val & chan_active), 1000, 50000);
if (ret == -ETIMEDOUT)
dev_warn(dchan2dev(dchan),
"%s failed to stop\n", axi_chan_name(chan));
@@ -1363,11 +1374,11 @@ static int parse_device_properties(struct axi_dma_chip *chip)
static int dw_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
- struct resource *mem;
struct dw_axi_dma *dw;
struct dw_axi_dma_hcfg *hdata;
+ struct reset_control *resets;
+ unsigned int flags;
u32 i;
int ret;
@@ -1391,17 +1402,29 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(chip->dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
+ flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->apb_regs))
return PTR_ERR(chip->apb_regs);
}
+ if (flags & AXI_DMA_FLAG_HAS_RESETS) {
+ resets = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(resets))
+ return PTR_ERR(resets);
+
+ ret = reset_control_deassert(resets);
+ if (ret)
+ return ret;
+ }
+
+ chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
+
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
if (IS_ERR(chip->core_clk))
return PTR_ERR(chip->core_clk);
@@ -1552,8 +1575,15 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
};
static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,axi-dma-1.01a" },
- { .compatible = "intel,kmb-axi-dma" },
+ {
+ .compatible = "snps,axi-dma-1.01a"
+ }, {
+ .compatible = "intel,kmb-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
+ }, {
+ .compatible = "starfive,jh7110-axi-dma",
+ .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
+ },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index e9d5eb0fd594..eb267cb24f67 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -33,6 +33,7 @@ struct dw_axi_dma_hcfg {
/* Register map for DMAX_NUM_CHANNELS <= 8 */
bool reg_map_8_channels;
bool restrict_axi_burst_len;
+ bool use_cfg2;
};
struct axi_dma_chan {
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig
index 7ff17b2db6a1..2b6f2679508d 100644
--- a/drivers/dma/dw-edma/Kconfig
+++ b/drivers/dma/dw-edma/Kconfig
@@ -9,11 +9,14 @@ config DW_EDMA
Support the Synopsys DesignWare eDMA controller, normally
implemented on endpoints SoCs.
+if DW_EDMA
+
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
- select DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
as a reference design to whom desires to use this IP.
+
+endif # DW_EDMA
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c54b24ff5206..7d2b73ef0872 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -39,6 +39,17 @@ struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
return container_of(vd, struct dw_edma_desc, vd);
}
+static inline
+u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
+{
+ struct dw_edma_chip *chip = chan->dw->chip;
+
+ if (chip->ops->pci_address)
+ return chip->ops->pci_address(chip->dev, cpu_addr);
+
+ return cpu_addr;
+}
+
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *burst;
@@ -170,7 +181,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
}
-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
@@ -178,16 +189,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
vd = vchan_next_desc(&chan->vc);
if (!vd)
- return;
+ return 0;
desc = vd2dw_edma_desc(vd);
if (!desc)
- return;
+ return 0;
child = list_first_entry_or_null(&desc->chunk->list,
struct dw_edma_chunk, list);
if (!child)
- return;
+ return 0;
dw_edma_v0_core_start(child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
@@ -195,6 +206,26 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
list_del(&child->list);
kfree(child);
desc->chunks_alloc--;
+
+ return 1;
+}
+
+static void dw_edma_device_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if (chan->dir == EDMA_DIR_READ)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ } else {
+ if (chan->dir == EDMA_DIR_WRITE)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ }
}
static int dw_edma_device_config(struct dma_chan *dchan,
@@ -277,9 +308,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
unsigned long flags;
+ if (!chan->configured)
+ return;
+
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->configured && chan->request == EDMA_REQ_NONE &&
- chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
+ if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
+ chan->status == EDMA_ST_IDLE) {
chan->status = EDMA_ST_BUSY;
dw_edma_start_transfer(chan);
}
@@ -327,11 +361,12 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
- phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
+ u64 src_addr, dst_addr;
+ size_t fsz = 0;
u32 cnt = 0;
int i;
@@ -381,9 +416,9 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->xfer.sg.len < 1)
return NULL;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (!xfer->xfer.il->numf)
+ if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
return NULL;
- if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
return NULL;
} else {
return NULL;
@@ -405,16 +440,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
dst_addr = chan->config.dst_addr;
}
+ if (dir == DMA_DEV_TO_MEM)
+ src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
+ else
+ dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
+
if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (xfer->xfer.il->numf > 0)
- cnt = xfer->xfer.il->numf;
- else
- cnt = xfer->xfer.il->frame_size;
+ cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
+ fsz = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
@@ -436,7 +474,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
else if (xfer->type == EDMA_XFER_INTERLEAVED)
- burst->sz = xfer->xfer.il->sgl[i].size;
+ burst->sz = xfer->xfer.il->sgl[i % fsz].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
@@ -455,6 +493,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->dar = dst_addr;
}
} else {
burst->dar = dst_addr;
@@ -470,25 +510,24 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->sar = src_addr;
}
}
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
- } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
- xfer->xfer.il->frame_size > 0) {
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
struct dma_interleaved_template *il = xfer->xfer.il;
- struct data_chunk *dc = &il->sgl[i];
+ struct data_chunk *dc = &il->sgl[i % fsz];
- if (il->src_sgl) {
- src_addr += burst->sz;
+ src_addr += burst->sz;
+ if (il->src_sgl)
src_addr += dmaengine_get_src_icg(il, dc);
- }
- if (il->dst_sgl) {
- dst_addr += burst->sz;
+ dst_addr += burst->sz;
+ if (il->dst_sgl)
dst_addr += dmaengine_get_dst_icg(il, dc);
- }
}
}
@@ -568,14 +607,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
switch (chan->request) {
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
- if (desc->chunks_alloc) {
- chan->status = EDMA_ST_BUSY;
- dw_edma_start_transfer(chan);
- } else {
+ if (!desc->chunks_alloc) {
list_del(&vd->node);
vchan_cookie_complete(vd);
- chan->status = EDMA_ST_IDLE;
}
+
+ /* Continue transferring if there are remaining chunks or issued requests.
+ */
+ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
break;
case EDMA_REQ_STOP:
@@ -701,92 +740,76 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
}
}
-static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
- u32 wr_alloc, u32 rd_alloc)
+static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
{
struct dw_edma_chip *chip = dw->chip;
- struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 alloc, off_alloc;
- u32 i, j, cnt;
- int err = 0;
+ u32 i, ch_cnt;
u32 pos;
- if (write) {
- i = 0;
- cnt = dw->wr_ch_cnt;
- dma = &dw->wr_edma;
- alloc = wr_alloc;
- off_alloc = 0;
- } else {
- i = dw->wr_ch_cnt;
- cnt = dw->rd_ch_cnt;
- dma = &dw->rd_edma;
- alloc = rd_alloc;
- off_alloc = wr_alloc;
- }
+ ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+ dma = &dw->dma;
INIT_LIST_HEAD(&dma->channels);
- for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
+
+ for (i = 0; i < ch_cnt; i++) {
chan = &dw->chan[i];
- dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
- if (!dt_region)
- return -ENOMEM;
+ chan->dw = dw;
- chan->vc.chan.private = dt_region;
+ if (i < dw->wr_ch_cnt) {
+ chan->id = i;
+ chan->dir = EDMA_DIR_WRITE;
+ } else {
+ chan->id = i - dw->wr_ch_cnt;
+ chan->dir = EDMA_DIR_READ;
+ }
- chan->dw = dw;
- chan->id = j;
- chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- if (write)
- chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
+ if (chan->dir == EDMA_DIR_WRITE)
+ chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
else
- chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- write ? "write" : "read", j, chan->ll_max);
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
+ else if (chan->dir == EDMA_DIR_WRITE)
+ pos = chan->id % wr_alloc;
else
- pos = off_alloc + (j % alloc);
+ pos = wr_alloc + chan->id % rd_alloc;
irq = &dw->irq[pos];
- if (write)
- irq->wr_mask |= BIT(j);
+ if (chan->dir == EDMA_DIR_WRITE)
+ irq->wr_mask |= BIT(chan->id);
else
- irq->rd_mask |= BIT(j);
+ irq->rd_mask |= BIT(chan->id);
irq->dw = dw;
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- write ? "write" : "read", j,
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
chan->vc.desc_free = vchan_free_desc;
- vchan_init(&chan->vc, dma);
+ chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
+ &dw->chip->dt_region_wr[chan->id] :
+ &dw->chip->dt_region_rd[chan->id];
- if (write) {
- dt_region->paddr = chip->dt_region_wr[j].paddr;
- dt_region->vaddr = chip->dt_region_wr[j].vaddr;
- dt_region->sz = chip->dt_region_wr[j].sz;
- } else {
- dt_region->paddr = chip->dt_region_rd[j].paddr;
- dt_region->vaddr = chip->dt_region_rd[j].vaddr;
- dt_region->sz = chip->dt_region_rd[j].sz;
- }
+ vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
}
@@ -797,16 +820,16 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
+ dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma->chancnt = cnt;
/* Set DMA channel callbacks */
dma->dev = chip->dev;
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
dma->device_free_chan_resources = dw_edma_free_chan_resources;
+ dma->device_caps = dw_edma_device_caps;
dma->device_config = dw_edma_device_config;
dma->device_pause = dw_edma_device_pause;
dma->device_resume = dw_edma_device_resume;
@@ -820,9 +843,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_set_max_seg_size(dma->dev, U32_MAX);
/* Register DMA device */
- err = dma_async_device_register(dma);
-
- return err;
+ return dma_async_device_register(dma);
}
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
@@ -893,10 +914,8 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw_edma_interrupt_read,
IRQF_SHARED, dw->name,
&dw->irq[i]);
- if (err) {
- dw->nr_irqs = i;
- return err;
- }
+ if (err)
+ goto err_irq_free;
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[i].msi);
@@ -905,6 +924,14 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw->nr_irqs = i;
}
+ return 0;
+
+err_irq_free:
+ for (i--; i >= 0; i--) {
+ irq = chip->ops->irq_vector(dev, i);
+ free_irq(irq, &dw->irq[i]);
+ }
+
return err;
}
@@ -951,7 +978,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (!dw->chan)
return -ENOMEM;
- snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
+ snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
+ dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
@@ -961,13 +989,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
return err;
- /* Setup write channels */
- err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
- if (err)
- goto err_irq_free;
-
- /* Setup read channels */
- err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
+ /* Setup write/read channels */
+ err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -993,6 +1016,10 @@ int dw_edma_remove(struct dw_edma_chip *chip)
struct dw_edma *dw = chip->dw;
int i;
+ /* Skip removal if no private data found */
+ if (!dw)
+ return -ENODEV;
+
/* Disable eDMA */
dw_edma_v0_core_off(dw);
@@ -1001,23 +1028,13 @@ int dw_edma_remove(struct dw_edma_chip *chip)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+ dma_async_device_unregister(&dw->dma);
+ list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
- dma_async_device_unregister(&dw->rd_edma);
- list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
- vc.chan.device_node) {
- tasklet_kill(&chan->vc.task);
- list_del(&chan->vc.chan.device_node);
- }
-
- /* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(dw);
-
return 0;
}
EXPORT_SYMBOL_GPL(dw_edma_remove);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 85df2d511907..0ab2b6dba880 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -96,12 +96,11 @@ struct dw_edma_irq {
};
struct dw_edma {
- char name[20];
+ char name[32];
- struct dma_device wr_edma;
- u16 wr_ch_cnt;
+ struct dma_device dma;
- struct dma_device rd_edma;
+ u16 wr_ch_cnt;
u16 rd_ch_cnt;
struct dw_edma_irq *irq;
@@ -112,9 +111,6 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index d6b5e2463884..2b40f2b44f5e 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -95,8 +95,23 @@ static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
return pci_irq_vector(to_pci_dev(dev), nr);
}
+static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus_region region;
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = cpu_addr,
+ .end = cpu_addr,
+ };
+
+ pcibios_resource_to_bus(pdev->bus, &region, &res);
+ return region.start;
+}
+
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
+ .pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
@@ -207,7 +222,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->id = pdev->devfn;
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
@@ -226,21 +240,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -251,21 +265,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -289,24 +303,24 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
- chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
+ chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
- chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
+ chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
- chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
+ chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
- chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
+ chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 77e6cfe52e0a..32f834a3848a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
@@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_32(dw, rd_##name, value); \
} while (0)
-#ifdef CONFIG_64BIT
-
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))
@@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_64(dw, rd_##name, value); \
} while (0)
-#endif /* CONFIG_64BIT */
-
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))
@@ -161,72 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_32(ll, value) \
- writel(value, ll)
-
-#ifdef CONFIG_64BIT
-
-static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
- u64 value, void __iomem *addr)
-{
- if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
- u32 viewport_sel;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&dw->lock, flags);
-
- viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
- if (dir == EDMA_DIR_READ)
- viewport_sel |= BIT(31);
-
- writel(viewport_sel,
- &(__dw_regs(dw)->type.legacy.viewport_sel));
- writeq(value, addr);
-
- raw_spin_unlock_irqrestore(&dw->lock, flags);
- } else {
- writeq(value, addr);
- }
-}
-
-static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
- const void __iomem *addr)
-{
- u32 value;
-
- if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
- u32 viewport_sel;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&dw->lock, flags);
-
- viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
- if (dir == EDMA_DIR_READ)
- viewport_sel |= BIT(31);
-
- writel(viewport_sel,
- &(__dw_regs(dw)->type.legacy.viewport_sel));
- value = readq(addr);
-
- raw_spin_unlock_irqrestore(&dw->lock, flags);
- } else {
- value = readq(addr);
- }
-
- return value;
-}
-
-#define SET_CH_64(dw, dir, ch, name, value) \
- writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
-
-#define GET_CH_64(dw, dir, ch, name) \
- readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-
-#define SET_LL_64(ll, value) \
- writeq(value, ll)
-
-#endif /* CONFIG_64BIT */
-
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
@@ -298,17 +230,53 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
GET_RW_32(dw, dir, int_status));
}
+static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
+ u32 control, u32 size, u64 sar, u64 dar)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
+
+ lli->control = control;
+ lli->transfer_size = size;
+ lli->sar.reg = sar;
+ lli->dar.reg = dar;
+ } else {
+ struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &lli->control);
+ writel(size, &lli->transfer_size);
+ writeq(sar, &lli->sar.reg);
+ writeq(dar, &lli->dar.reg);
+ }
+}
+
+static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ int i, u32 control, u64 pointer)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
+
+ llp->control = control;
+ llp->llp.reg = pointer;
+ } else {
+ struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &llp->control);
+ writeq(pointer, &llp->llp.reg);
+ }
+}
+
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma_v0_lli __iomem *lli;
- struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
int j;
- lli = chunk->ll_region.vaddr;
-
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -320,41 +288,16 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_EDMA_V0_RIE;
}
- /* Channel control */
- SET_LL_32(&lli[i].control, control);
- /* Transfer size */
- SET_LL_32(&lli[i].transfer_size, child->sz);
- /* SAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].sar.reg, child->sar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
- SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
- #endif /* CONFIG_64BIT */
- /* DAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].dar.reg, child->dar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
- SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
- #endif /* CONFIG_64BIT */
- i++;
+
+ dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
}
- llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
- /* Channel control */
- SET_LL_32(&llp->control, control);
- /* Linked list */
- #ifdef CONFIG_64BIT
- SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
- #else /* CONFIG_64BIT */
- SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
- SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
+ dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -504,8 +447,3 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
-
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
-{
- dw_edma_v0_debugfs_off(dw);
-}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 75aec6d31b21..ab96a1f48080 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,5 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 5226c9014703..0745d9e7d259 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -13,76 +13,79 @@
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
-#define REGS_ADDR(name) \
- ((void __force *)&regs->name)
-#define REGISTER(name) \
- { #name, REGS_ADDR(name) }
-
-#define WR_REGISTER(name) \
- { #name, REGS_ADDR(wr_##name) }
-#define RD_REGISTER(name) \
- { #name, REGS_ADDR(rd_##name) }
-
-#define WR_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.wr_##name) }
+#define REGS_ADDR(dw, name) \
+ ({ \
+ struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
+ \
+ (void __iomem *)&__regs->name; \
+ })
+
+#define REGS_CH_ADDR(dw, name, _dir, _ch) \
+ ({ \
+ struct dw_edma_v0_ch_regs __iomem *__ch_regs; \
+ \
+ if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \
+ __ch_regs = REGS_ADDR(dw, type.legacy.ch); \
+ else if (_dir == EDMA_DIR_READ) \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \
+ else \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \
+ \
+ (void __iomem *)&__ch_regs->name; \
+ })
+
+#define REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, name) }
+
+#define CTX_REGISTER(dw, name, dir, ch) \
+ { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch }
+
+#define WR_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, wr_##name) }
+#define RD_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, rd_##name) }
+
+#define WR_REGISTER_LEGACY(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.rd_##name) }
+ { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) }
-#define WR_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.wr_##name) }
-#define RD_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.rd_##name) }
+#define WR_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) }
+#define RD_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dw_edma *dw;
-static struct dw_edma_v0_regs __iomem *regs;
-
-static struct {
- void __iomem *start;
- void __iomem *end;
-} lim[2][EDMA_V0_MAX_NR_CH];
-
-struct debugfs_entries {
+struct dw_edma_debugfs_entry {
+ struct dw_edma *dw;
const char *name;
- dma_addr_t *reg;
+ void __iomem *reg;
+ enum dw_edma_dir dir;
+ u16 ch;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
- void __iomem *reg = (void __force __iomem *)data;
+ struct dw_edma_debugfs_entry *entry = data;
+ struct dw_edma *dw = entry->dw;
+ void __iomem *reg = entry->reg;
+
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
- reg >= (void __iomem *)&regs->type.legacy.ch) {
- void __iomem *ptr = &regs->type.legacy.ch;
- u32 viewport_sel = 0;
+ reg >= REGS_ADDR(dw, type.legacy.ch)) {
unsigned long flags;
- u16 ch;
-
- for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
- ptr += (reg - lim[0][ch].start);
- goto legacy_sel_wr;
- }
-
- for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
- ptr += (reg - lim[1][ch].start);
- goto legacy_sel_rd;
- }
-
- return 0;
-legacy_sel_rd:
- viewport_sel = BIT(31);
-legacy_sel_wr:
- viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ u32 viewport_sel;
+
+ viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0;
+ viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch);
raw_spin_lock_irqsave(&dw->lock, flags);
- writel(viewport_sel, &regs->type.legacy.viewport_sel);
- *val = readl(ptr);
+ writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel));
+ *val = readl(reg);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
@@ -93,222 +96,197 @@ legacy_sel_wr:
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
-static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
- int nr_entries, struct dentry *dir)
+static void dw_edma_debugfs_create_x32(struct dw_edma *dw,
+ const struct dw_edma_debugfs_entry ini[],
+ int nr_entries, struct dentry *dent)
{
+ struct dw_edma_debugfs_entry *entries;
int i;
+ entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
+ GFP_KERNEL);
+ if (!entries)
+ return;
+
for (i = 0; i < nr_entries; i++) {
- if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
- entries[i].reg, &fops_x32))
- break;
+ entries[i] = ini[i];
+
+ debugfs_create_file_unsafe(entries[i].name, 0444, dent,
+ &entries[i], &fops_x32);
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
- struct dentry *dir)
+static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ u16 ch, struct dentry *dent)
{
- int nr_entries;
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ch_control1),
- REGISTER(ch_control2),
- REGISTER(transfer_size),
- REGISTER(sar.lsb),
- REGISTER(sar.msb),
- REGISTER(dar.lsb),
- REGISTER(dar.msb),
- REGISTER(llp.lsb),
- REGISTER(llp.msb),
+ struct dw_edma_debugfs_entry debugfs_regs[] = {
+ CTX_REGISTER(dw, ch_control1, dir, ch),
+ CTX_REGISTER(dw, ch_control2, dir, ch),
+ CTX_REGISTER(dw, transfer_size, dir, ch),
+ CTX_REGISTER(dw, sar.lsb, dir, ch),
+ CTX_REGISTER(dw, sar.msb, dir, ch),
+ CTX_REGISTER(dw, dar.lsb, dir, ch),
+ CTX_REGISTER(dw, dar.msb, dir, ch),
+ CTX_REGISTER(dw, llp.lsb, dir, ch),
+ CTX_REGISTER(dw, llp.msb, dir, ch),
};
+ int nr_entries;
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
-static void dw_edma_debugfs_regs_wr(struct dentry *dir)
+static noinline_for_stack void
+dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- WR_REGISTER(engine_en),
- WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight.lsb),
- WR_REGISTER(ch_arb_weight.msb),
+ WR_REGISTER(dw, engine_en),
+ WR_REGISTER(dw, doorbell),
+ WR_REGISTER(dw, ch_arb_weight.lsb),
+ WR_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- WR_REGISTER(int_status),
- WR_REGISTER(int_mask),
- WR_REGISTER(int_clear),
- WR_REGISTER(err_status),
- WR_REGISTER(done_imwr.lsb),
- WR_REGISTER(done_imwr.msb),
- WR_REGISTER(abort_imwr.lsb),
- WR_REGISTER(abort_imwr.msb),
- WR_REGISTER(ch01_imwr_data),
- WR_REGISTER(ch23_imwr_data),
- WR_REGISTER(ch45_imwr_data),
- WR_REGISTER(ch67_imwr_data),
- WR_REGISTER(linked_list_err_en),
+ WR_REGISTER(dw, int_status),
+ WR_REGISTER(dw, int_mask),
+ WR_REGISTER(dw, int_clear),
+ WR_REGISTER(dw, err_status),
+ WR_REGISTER(dw, done_imwr.lsb),
+ WR_REGISTER(dw, done_imwr.msb),
+ WR_REGISTER(dw, abort_imwr.lsb),
+ WR_REGISTER(dw, abort_imwr.msb),
+ WR_REGISTER(dw, ch01_imwr_data),
+ WR_REGISTER(dw, ch23_imwr_data),
+ WR_REGISTER(dw, ch45_imwr_data),
+ WR_REGISTER(dw, ch67_imwr_data),
+ WR_REGISTER(dw, linked_list_err_en),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
- WR_REGISTER_UNROLL(ch0_pwr_en),
- WR_REGISTER_UNROLL(ch1_pwr_en),
- WR_REGISTER_UNROLL(ch2_pwr_en),
- WR_REGISTER_UNROLL(ch3_pwr_en),
- WR_REGISTER_UNROLL(ch4_pwr_en),
- WR_REGISTER_UNROLL(ch5_pwr_en),
- WR_REGISTER_UNROLL(ch6_pwr_en),
- WR_REGISTER_UNROLL(ch7_pwr_en),
+ WR_REGISTER_UNROLL(dw, engine_chgroup),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ WR_REGISTER_UNROLL(dw, ch0_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch1_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch2_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch3_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch4_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch5_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch6_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(WRITE_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[0][i].start = &regs->type.unroll.ch[i].wr;
- lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs_rd(struct dentry *dir)
+static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
+ struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- RD_REGISTER(engine_en),
- RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight.lsb),
- RD_REGISTER(ch_arb_weight.msb),
+ RD_REGISTER(dw, engine_en),
+ RD_REGISTER(dw, doorbell),
+ RD_REGISTER(dw, ch_arb_weight.lsb),
+ RD_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- RD_REGISTER(int_status),
- RD_REGISTER(int_mask),
- RD_REGISTER(int_clear),
- RD_REGISTER(err_status.lsb),
- RD_REGISTER(err_status.msb),
- RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr.lsb),
- RD_REGISTER(done_imwr.msb),
- RD_REGISTER(abort_imwr.lsb),
- RD_REGISTER(abort_imwr.msb),
- RD_REGISTER(ch01_imwr_data),
- RD_REGISTER(ch23_imwr_data),
- RD_REGISTER(ch45_imwr_data),
- RD_REGISTER(ch67_imwr_data),
+ RD_REGISTER(dw, int_status),
+ RD_REGISTER(dw, int_mask),
+ RD_REGISTER(dw, int_clear),
+ RD_REGISTER(dw, err_status.lsb),
+ RD_REGISTER(dw, err_status.msb),
+ RD_REGISTER(dw, linked_list_err_en),
+ RD_REGISTER(dw, done_imwr.lsb),
+ RD_REGISTER(dw, done_imwr.msb),
+ RD_REGISTER(dw, abort_imwr.lsb),
+ RD_REGISTER(dw, abort_imwr.msb),
+ RD_REGISTER(dw, ch01_imwr_data),
+ RD_REGISTER(dw, ch23_imwr_data),
+ RD_REGISTER(dw, ch45_imwr_data),
+ RD_REGISTER(dw, ch67_imwr_data),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
- RD_REGISTER_UNROLL(ch0_pwr_en),
- RD_REGISTER_UNROLL(ch1_pwr_en),
- RD_REGISTER_UNROLL(ch2_pwr_en),
- RD_REGISTER_UNROLL(ch3_pwr_en),
- RD_REGISTER_UNROLL(ch4_pwr_en),
- RD_REGISTER_UNROLL(ch5_pwr_en),
- RD_REGISTER_UNROLL(ch6_pwr_en),
- RD_REGISTER_UNROLL(ch7_pwr_en),
+ RD_REGISTER_UNROLL(dw, engine_chgroup),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ RD_REGISTER_UNROLL(dw, ch0_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch1_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch2_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch3_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch4_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch5_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch6_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(READ_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(READ_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[1][i].start = &regs->type.unroll.ch[i].rd;
- lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs(void)
+static void dw_edma_debugfs_regs(struct dw_edma *dw)
{
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ctrl_data_arb_prior),
- REGISTER(ctrl),
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
+ REGISTER(dw, ctrl_data_arb_prior),
+ REGISTER(dw, ctrl),
};
- struct dentry *regs_dir;
+ struct dentry *regs_dent;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
- dw_edma_debugfs_regs_wr(regs_dir);
- dw_edma_debugfs_regs_rd(regs_dir);
+ dw_edma_debugfs_regs_wr(dw, regs_dent);
+ dw_edma_debugfs_regs_rd(dw, regs_dent);
}
-void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
+void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
- dw = _dw;
- if (!dw)
- return;
-
- regs = dw->chip->reg_base;
- if (!regs)
- return;
-
- dw->debugfs = debugfs_create_dir(dw->name, NULL);
- if (!dw->debugfs)
+ if (!debugfs_initialized())
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
- debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
-
- dw_edma_debugfs_regs();
-}
-
-void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
-{
- dw = _dw;
- if (!dw)
- return;
+ debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
- debugfs_remove_recursive(dw->debugfs);
- dw->debugfs = NULL;
+ dw_edma_debugfs_regs(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 3391b86edf5a..fb3342d97d6d 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,15 +13,10 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-
-static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
-{
-}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 97ba3bfc10b1..5f7d690e3dba 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -889,7 +889,8 @@ static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
return NULL;
}
-static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
+static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
+ enum dma_status *status)
{
struct dw_desc *desc;
unsigned long flags;
@@ -903,6 +904,8 @@ static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
residue = desc->residue;
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
residue -= dwc_get_sent(dwc);
+ if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
+ *status = DMA_PAUSED;
} else {
residue = desc->total_len;
}
@@ -932,11 +935,7 @@ dwc_tx_status(struct dma_chan *chan,
if (ret == DMA_COMPLETE)
return ret;
- dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
-
- if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
- return DMA_PAUSED;
-
+ dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
return ret;
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d19ea885c63e..5338a94f1a69 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1431,4 +1431,3 @@ subsys_initcall(ep93xx_dma_module_init);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("EP93xx DMA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 76cbf54aec58..e40769666e39 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
const struct fsl_edma_drvdata *drvdata = NULL;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
- struct resource *res;
int len, chans;
int ret, i;
@@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
@@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
- fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
+ 1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
/* on error: disable all previously enabled clks */
fsl_disable_clocks(fsl_edma, i);
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 045ead46ec8f..eddb2688f234 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1119,7 +1119,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
int ret, i;
int blk_num, blk_off;
u32 len, chans, queues;
- struct resource *res;
struct fsl_qdma_chan *fsl_chan;
struct fsl_qdma_engine *fsl_qdma;
struct device_node *np = pdev->dev.of_node;
@@ -1183,18 +1182,15 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->status[i])
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_qdma->ctrl_base))
return PTR_ERR(fsl_qdma->ctrl_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fsl_qdma->status_base))
return PTR_ERR(fsl_qdma->status_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(fsl_qdma->block_base))
return PTR_ERR(fsl_qdma->block_base);
fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index c33087c5cd02..0ac634a51c5e 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -137,8 +137,11 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
u32 status_err, u32 status_xfer)
{
struct idma64_chan *idma64c = &idma64->chan[c];
+ struct dma_chan_percpu *stat;
struct idma64_desc *desc;
+ stat = this_cpu_ptr(idma64c->vchan.chan.local);
+
spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
@@ -149,6 +152,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
dma_writel(idma64, CLEAR(XFER), idma64c->mask);
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
+ stat->bytes_transferred += desc->length;
idma64_start_transfer(idma64c);
}
@@ -627,7 +631,6 @@ static int idma64_platform_probe(struct platform_device *pdev)
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
- struct resource *mem;
int ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -638,8 +641,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index a1e9f2b3a37c..dc096839ac63 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,7 +1,7 @@
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
obj-$(CONFIG_INTEL_IDXD) += idxd.o
-idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e13e92609943..ecbf67c2ad2b 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -11,7 +11,9 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/iommu.h>
+#include <linux/highmem.h>
#include <uapi/linux/idxd.h>
+#include <linux/xarray.h>
#include "registers.h"
#include "idxd.h"
@@ -22,6 +24,13 @@ struct idxd_cdev_context {
};
/*
+ * Since user file names are global in DSA devices, define their ida's as
+ * global to avoid conflict file names.
+ */
+static DEFINE_IDA(file_ida);
+static DEFINE_MUTEX(ida_lock);
+
+/*
* ictx is an array based off of accelerator types. enum idxd_type
* is used as index
*/
@@ -34,8 +43,119 @@ struct idxd_user_context {
struct idxd_wq *wq;
struct task_struct *task;
unsigned int pasid;
+ struct mm_struct *mm;
unsigned int flags;
struct iommu_sva *sva;
+ struct idxd_dev idxd_dev;
+ u64 counters[COUNTER_MAX];
+ int id;
+ pid_t pid;
+};
+
+static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
+static void idxd_xa_pasid_remove(struct idxd_user_context *ctx);
+
+static inline struct idxd_user_context *dev_to_uctx(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_user_context, idxd_dev);
+}
+
+static ssize_t cr_faults_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_user_context *ctx = dev_to_uctx(dev);
+
+ return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]);
+}
+static DEVICE_ATTR_RO(cr_faults);
+
+static ssize_t cr_fault_failures_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_user_context *ctx = dev_to_uctx(dev);
+
+ return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]);
+}
+static DEVICE_ATTR_RO(cr_fault_failures);
+
+static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_user_context *ctx = dev_to_uctx(dev);
+
+ return sysfs_emit(buf, "%u\n", ctx->pid);
+}
+static DEVICE_ATTR_RO(pid);
+
+static struct attribute *cdev_file_attributes[] = {
+ &dev_attr_cr_faults.attr,
+ &dev_attr_cr_fault_failures.attr,
+ &dev_attr_pid.attr,
+ NULL
+};
+
+static umode_t cdev_file_attr_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct idxd_user_context *ctx = dev_to_uctx(dev);
+ struct idxd_wq *wq = ctx->wq;
+
+ if (!wq_pasid_enabled(wq))
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group cdev_file_attribute_group = {
+ .attrs = cdev_file_attributes,
+ .is_visible = cdev_file_attr_visible,
+};
+
+static const struct attribute_group *cdev_file_attribute_groups[] = {
+ &cdev_file_attribute_group,
+ NULL
+};
+
+static void idxd_file_dev_release(struct device *dev)
+{
+ struct idxd_user_context *ctx = dev_to_uctx(dev);
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ mutex_lock(&ida_lock);
+ ida_free(&file_ida, ctx->id);
+ mutex_unlock(&ida_lock);
+
+ /* Wait for in-flight operations to complete. */
+ if (wq_shared(wq)) {
+ idxd_device_drain_pasid(idxd, ctx->pasid);
+ } else {
+ if (device_user_pasid_enabled(idxd)) {
+ /* The wq disable in the disable pasid function will drain the wq */
+ rc = idxd_wq_disable_pasid(wq);
+ if (rc < 0)
+ dev_err(dev, "wq disable pasid failed.\n");
+ } else {
+ idxd_wq_drain(wq);
+ }
+ }
+
+ if (ctx->sva) {
+ idxd_cdev_evl_drain_pasid(wq, ctx->pasid);
+ iommu_sva_unbind_device(ctx->sva);
+ idxd_xa_pasid_remove(ctx);
+ }
+ kfree(ctx);
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_put(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
+static struct device_type idxd_cdev_file_type = {
+ .name = "idxd_file",
+ .release = idxd_file_dev_release,
+ .groups = cdev_file_attribute_groups,
};
static void idxd_cdev_dev_release(struct device *dev)
@@ -68,15 +188,46 @@ static inline struct idxd_wq *inode_wq(struct inode *inode)
return idxd_cdev->wq;
}
+static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
+{
+ struct idxd_wq *wq = ctx->wq;
+ void *ptr;
+
+ mutex_lock(&wq->uc_lock);
+ ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
+ if (ptr != (void *)ctx)
+ dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
+ ctx->pasid);
+ mutex_unlock(&wq->uc_lock);
+}
+
+void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
+{
+ struct idxd_user_context *ctx;
+
+ if (index >= COUNTER_MAX)
+ return;
+
+ mutex_lock(&wq->uc_lock);
+ ctx = xa_load(&wq->upasid_xa, pasid);
+ if (!ctx) {
+ mutex_unlock(&wq->uc_lock);
+ return;
+ }
+ ctx->counters[index]++;
+ mutex_unlock(&wq->uc_lock);
+}
+
static int idxd_cdev_open(struct inode *inode, struct file *filp)
{
struct idxd_user_context *ctx;
struct idxd_device *idxd;
struct idxd_wq *wq;
- struct device *dev;
+ struct device *dev, *fdev;
int rc = 0;
struct iommu_sva *sva;
unsigned int pasid;
+ struct idxd_cdev *idxd_cdev;
wq = inode_wq(inode);
idxd = wq->idxd;
@@ -97,6 +248,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq;
filp->private_data = ctx;
+ ctx->pid = current->pid;
if (device_user_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm);
@@ -108,65 +260,118 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
- iommu_sva_unbind_device(sva);
rc = -EINVAL;
- goto failed;
+ goto failed_get_pasid;
}
ctx->sva = sva;
ctx->pasid = pasid;
+ ctx->mm = current->mm;
+
+ mutex_lock(&wq->uc_lock);
+ rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
+ mutex_unlock(&wq->uc_lock);
+ if (rc < 0)
+ dev_warn(dev, "PASID entry already exist in xarray.\n");
if (wq_dedicated(wq)) {
rc = idxd_wq_set_pasid(wq, pasid);
if (rc < 0) {
iommu_sva_unbind_device(sva);
dev_err(dev, "wq set pasid failed: %d\n", rc);
- goto failed;
+ goto failed_set_pasid;
}
}
}
+ idxd_cdev = wq->idxd_cdev;
+ mutex_lock(&ida_lock);
+ ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
+ mutex_unlock(&ida_lock);
+ if (ctx->id < 0) {
+ dev_warn(dev, "ida alloc failure\n");
+ goto failed_ida;
+ }
+ ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE;
+ fdev = user_ctx_dev(ctx);
+ device_initialize(fdev);
+ fdev->parent = cdev_dev(idxd_cdev);
+ fdev->bus = &dsa_bus_type;
+ fdev->type = &idxd_cdev_file_type;
+
+ rc = dev_set_name(fdev, "file%d", ctx->id);
+ if (rc < 0) {
+ dev_warn(dev, "set name failure\n");
+ goto failed_dev_name;
+ }
+
+ rc = device_add(fdev);
+ if (rc < 0) {
+ dev_warn(dev, "file device add failure\n");
+ goto failed_dev_add;
+ }
+
idxd_wq_get(wq);
mutex_unlock(&wq->wq_lock);
return 0;
- failed:
+failed_dev_add:
+failed_dev_name:
+ put_device(fdev);
+failed_ida:
+failed_set_pasid:
+ if (device_user_pasid_enabled(idxd))
+ idxd_xa_pasid_remove(ctx);
+failed_get_pasid:
+ if (device_user_pasid_enabled(idxd))
+ iommu_sva_unbind_device(sva);
+failed:
mutex_unlock(&wq->wq_lock);
kfree(ctx);
return rc;
}
+static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_evl *evl = idxd->evl;
+ union evl_status_reg status;
+ u16 h, t, size;
+ int ent_size = evl_ent_size(idxd);
+ struct __evl_entry *entry_head;
+
+ if (!evl)
+ return;
+
+ spin_lock(&evl->lock);
+ status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = status.tail;
+ h = evl->head;
+ size = evl->size;
+
+ while (h != t) {
+ entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
+ if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id)
+ set_bit(h, evl->bmap);
+ h = (h + 1) % size;
+ }
+ spin_unlock(&evl->lock);
+
+ drain_workqueue(wq->wq);
+}
+
static int idxd_cdev_release(struct inode *node, struct file *filep)
{
struct idxd_user_context *ctx = filep->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
- int rc;
dev_dbg(dev, "%s called\n", __func__);
filep->private_data = NULL;
- /* Wait for in-flight operations to complete. */
- if (wq_shared(wq)) {
- idxd_device_drain_pasid(idxd, ctx->pasid);
- } else {
- if (device_user_pasid_enabled(idxd)) {
- /* The wq disable in the disable pasid function will drain the wq */
- rc = idxd_wq_disable_pasid(wq);
- if (rc < 0)
- dev_err(dev, "wq disable pasid failed.\n");
- } else {
- idxd_wq_drain(wq);
- }
- }
+ device_unregister(user_ctx_dev(ctx));
- if (ctx->sva)
- iommu_sva_unbind_device(ctx->sva);
- kfree(ctx);
- mutex_lock(&wq->wq_lock);
- idxd_wq_put(wq);
- mutex_unlock(&wq->wq_lock);
return 0;
}
@@ -201,7 +406,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- vma->vm_flags |= VM_DONTCOPY;
+ vm_flags_set(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -297,6 +502,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev;
+ ida_destroy(&file_ida);
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
@@ -330,6 +536,13 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
}
mutex_lock(&wq->wq_lock);
+
+ wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
+ if (!wq->wq) {
+ rc = -ENOMEM;
+ goto wq_err;
+ }
+
wq->type = IDXD_WQT_USER;
rc = drv_enable_wq(wq);
if (rc < 0)
@@ -348,7 +561,9 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
err_cdev:
drv_disable_wq(wq);
err:
+ destroy_workqueue(wq->wq);
wq->type = IDXD_WQT_NONE;
+wq_err:
mutex_unlock(&wq->wq_lock);
return rc;
}
@@ -361,6 +576,8 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
idxd_wq_del_cdev(wq);
drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
+ destroy_workqueue(wq->wq);
+ wq->wq = NULL;
mutex_unlock(&wq->wq_lock);
}
@@ -407,3 +624,70 @@ void idxd_cdev_remove(void)
ida_destroy(&ictx[i].minor_ida);
}
}
+
+/**
+ * idxd_copy_cr - copy completion record to user address space found by wq and
+ * PASID
+ * @wq: work queue
+ * @pasid: PASID
+ * @addr: user fault address to write
+ * @cr: completion record
+ * @len: number of bytes to copy
+ *
+ * This is called by a work that handles completion record fault.
+ *
+ * Return: number of bytes copied.
+ */
+int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
+ void *cr, int len)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int left = len, status_size = 1;
+ struct idxd_user_context *ctx;
+ struct mm_struct *mm;
+
+ mutex_lock(&wq->uc_lock);
+
+ ctx = xa_load(&wq->upasid_xa, pasid);
+ if (!ctx) {
+ dev_warn(dev, "No user context\n");
+ goto out;
+ }
+
+ mm = ctx->mm;
+ /*
+ * The completion record fault handling work is running in kernel
+ * thread context. It temporarily switches to the mm to copy cr
+ * to addr in the mm.
+ */
+ kthread_use_mm(mm);
+ left = copy_to_user((void __user *)addr + status_size, cr + status_size,
+ len - status_size);
+ /*
+ * Copy status only after the rest of completion record is copied
+ * successfully so that the user gets the complete completion record
+ * when a non-zero status is polled.
+ */
+ if (!left) {
+ u8 status;
+
+ /*
+ * Ensure that the completion record's status field is written
+ * after the rest of the completion record has been written.
+ * This ensures that the user receives the correct completion
+ * record information once polling for a non-zero status.
+ */
+ wmb();
+ status = *(u8 *)cr;
+ if (put_user(status, (u8 __user *)addr))
+ left += status_size;
+ } else {
+ left += status_size;
+ }
+ kthread_unuse_mm(mm);
+
+out:
+ mutex_unlock(&wq->uc_lock);
+
+ return len - left;
+}
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
index 3df21615f888..5fd38d1b9d28 100644
--- a/drivers/dma/idxd/compat.c
+++ b/drivers/dma/idxd/compat.c
@@ -16,7 +16,7 @@ extern void device_driver_detach(struct device *dev);
static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count)
{
- struct bus_type *bus = drv->bus;
+ const struct bus_type *bus = drv->bus;
struct device *dev;
int rc = -ENODEV;
@@ -32,7 +32,7 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count)
{
- struct bus_type *bus = drv->bus;
+ const struct bus_type *bus = drv->bus;
struct device *dev;
struct device_driver *alt_drv = NULL;
int rc = -ENODEV;
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
new file mode 100644
index 000000000000..9cfbd9b14c4c
--- /dev/null
+++ b/drivers/dma/idxd/debugfs.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+static struct dentry *idxd_debugfs_dir;
+
+static void dump_event_entry(struct idxd_device *idxd, struct seq_file *s,
+ u16 index, int *count, bool processed)
+{
+ struct idxd_evl *evl = idxd->evl;
+ struct dsa_evl_entry *entry;
+ struct dsa_completion_record *cr;
+ u64 *raw;
+ int i;
+ int evl_strides = evl_ent_size(idxd) / sizeof(u64);
+
+ entry = (struct dsa_evl_entry *)evl->log + index;
+
+ if (!entry->e.desc_valid)
+ return;
+
+ seq_printf(s, "Event Log entry %d (real index %u) processed: %u\n",
+ *count, index, processed);
+
+ seq_printf(s, "desc valid %u wq idx valid %u\n"
+ "batch %u fault rw %u priv %u error 0x%x\n"
+ "wq idx %u op %#x pasid %u batch idx %u\n"
+ "fault addr %#llx\n",
+ entry->e.desc_valid, entry->e.wq_idx_valid,
+ entry->e.batch, entry->e.fault_rw, entry->e.priv,
+ entry->e.error, entry->e.wq_idx, entry->e.operation,
+ entry->e.pasid, entry->e.batch_idx, entry->e.fault_addr);
+
+ cr = &entry->cr;
+ seq_printf(s, "status %#x result %#x fault_info %#x bytes_completed %u\n"
+ "fault addr %#llx inv flags %#x\n\n",
+ cr->status, cr->result, cr->fault_info, cr->bytes_completed,
+ cr->fault_addr, cr->invalid_flags);
+
+ raw = (u64 *)entry;
+
+ for (i = 0; i < evl_strides; i++)
+ seq_printf(s, "entry[%d] = %#llx\n", i, raw[i]);
+
+ seq_puts(s, "\n");
+ *count += 1;
+}
+
+static int debugfs_evl_show(struct seq_file *s, void *d)
+{
+ struct idxd_device *idxd = s->private;
+ struct idxd_evl *evl = idxd->evl;
+ union evl_status_reg evl_status;
+ u16 h, t, evl_size, i;
+ int count = 0;
+ bool processed = true;
+
+ if (!evl || !evl->log)
+ return 0;
+
+ spin_lock(&evl->lock);
+
+ h = evl->head;
+ evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = evl_status.tail;
+ evl_size = evl->size;
+
+ seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
+ evl_status.head, evl_status.tail, evl_status.int_pending);
+
+ i = t;
+ while (1) {
+ i = (i + 1) % evl_size;
+ if (i == t)
+ break;
+
+ if (processed && i == h)
+ processed = false;
+ dump_event_entry(idxd, s, i, &count, processed);
+ }
+
+ spin_unlock(&evl->lock);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(debugfs_evl);
+
+int idxd_device_init_debugfs(struct idxd_device *idxd)
+{
+ if (IS_ERR_OR_NULL(idxd_debugfs_dir))
+ return 0;
+
+ idxd->dbgfs_dir = debugfs_create_dir(dev_name(idxd_confdev(idxd)), idxd_debugfs_dir);
+ if (IS_ERR(idxd->dbgfs_dir))
+ return PTR_ERR(idxd->dbgfs_dir);
+
+ if (idxd->evl) {
+ idxd->dbgfs_evl_file = debugfs_create_file("event_log", 0400,
+ idxd->dbgfs_dir, idxd,
+ &debugfs_evl_fops);
+ if (IS_ERR(idxd->dbgfs_evl_file)) {
+ debugfs_remove_recursive(idxd->dbgfs_dir);
+ idxd->dbgfs_dir = NULL;
+ return PTR_ERR(idxd->dbgfs_evl_file);
+ }
+ }
+
+ return 0;
+}
+
+void idxd_device_remove_debugfs(struct idxd_device *idxd)
+{
+ debugfs_remove_recursive(idxd->dbgfs_dir);
+}
+
+int idxd_init_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return 0;
+
+ idxd_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(idxd_debugfs_dir))
+ return PTR_ERR(idxd_debugfs_dir);
+ return 0;
+}
+
+void idxd_remove_debugfs(void)
+{
+ debugfs_remove_recursive(idxd_debugfs_dir);
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 06f5d3783d77..5abbcc61c528 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -699,9 +699,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->num_engines = 0;
group->num_wqs = 0;
group->use_rdbuf_limit = false;
- group->rdbufs_allowed = 0;
+ /*
+ * The default value is the same as the value of
+ * total read buffers in GRPCAP.
+ */
+ group->rdbufs_allowed = idxd->max_rdbufs;
group->rdbufs_reserved = 0;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
@@ -748,6 +752,101 @@ void idxd_device_clear_state(struct idxd_device *idxd)
spin_unlock(&idxd->dev_lock);
}
+static int idxd_device_evl_setup(struct idxd_device *idxd)
+{
+ union gencfg_reg gencfg;
+ union evlcfg_reg evlcfg;
+ union genctrl_reg genctrl;
+ struct device *dev = &idxd->pdev->dev;
+ void *addr;
+ dma_addr_t dma_addr;
+ int size;
+ struct idxd_evl *evl = idxd->evl;
+ unsigned long *bmap;
+ int rc;
+
+ if (!evl)
+ return 0;
+
+ size = evl_size(idxd);
+
+ bmap = bitmap_zalloc(size, GFP_KERNEL);
+ if (!bmap) {
+ rc = -ENOMEM;
+ goto err_bmap;
+ }
+
+ /*
+ * Address needs to be page aligned. However, dma_alloc_coherent() provides
+ * at minimal page size aligned address. No manual alignment required.
+ */
+ addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (!addr) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ memset(addr, 0, size);
+
+ spin_lock(&evl->lock);
+ evl->log = addr;
+ evl->dma = dma_addr;
+ evl->log_size = size;
+ evl->bmap = bmap;
+
+ memset(&evlcfg, 0, sizeof(evlcfg));
+ evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
+ evlcfg.size = evl->size;
+
+ iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
+ iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.evl_int_en = 1;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+ gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ gencfg.evl_en = 1;
+ iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+ spin_unlock(&evl->lock);
+ return 0;
+
+err_alloc:
+ bitmap_free(bmap);
+err_bmap:
+ return rc;
+}
+
+static void idxd_device_evl_free(struct idxd_device *idxd)
+{
+ union gencfg_reg gencfg;
+ union genctrl_reg genctrl;
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_evl *evl = idxd->evl;
+
+ gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ if (!gencfg.evl_en)
+ return;
+
+ spin_lock(&evl->lock);
+ gencfg.evl_en = 0;
+ iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.evl_int_en = 0;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+ dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
+ bitmap_free(evl->bmap);
+ evl->log = NULL;
+ evl->size = IDXD_EVL_SIZE_MIN;
+ spin_unlock(&evl->lock);
+}
+
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -868,12 +967,16 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->priority = wq->priority;
if (idxd->hw.gen_cap.block_on_fault &&
- test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
+ test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
+ !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
wq->wqcfg->bof = 1;
if (idxd->hw.wq_cap.wq_ats_support)
wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
+ if (idxd->hw.wq_cap.wq_prs_support)
+ wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
+
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
@@ -934,11 +1037,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
- if (group->rdbufs_allowed)
- group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
- else
- group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
-
+ group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}
@@ -1172,8 +1271,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
spin_unlock(&ie->list_lock);
list_for_each_entry_safe(desc, itr, &flist, list) {
+ struct dma_async_tx_descriptor *tx;
+
list_del(&desc->list);
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ /*
+ * wq is being disabled. Any remaining descriptors are
+ * likely to be stuck and can be dropped. callback could
+ * point to code that is no longer accessible, for example
+ * if dmatest module has been unloaded.
+ */
+ tx = &desc->txd;
+ tx->callback = NULL;
+ tx->callback_result = NULL;
idxd_dma_complete_txd(desc, ctype, true);
}
}
@@ -1183,7 +1293,7 @@ static void idxd_device_set_perm_entry(struct idxd_device *idxd,
{
union msix_perm mperm;
- if (ie->pasid == INVALID_IOASID)
+ if (ie->pasid == IOMMU_PASID_INVALID)
return;
mperm.bits = 0;
@@ -1213,7 +1323,7 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
idxd_device_clear_perm_entry(idxd, ie);
ie->vector = -1;
ie->int_handle = INVALID_INT_HANDLE;
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
}
int idxd_wq_request_irq(struct idxd_wq *wq)
@@ -1229,7 +1339,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
ie = &wq->ie;
ie->vector = pci_irq_vector(pdev, ie->id);
- ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
+ ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
idxd_device_set_perm_entry(idxd, ie);
rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
@@ -1254,7 +1364,7 @@ err_int_handle:
free_irq(ie->vector, ie);
err_irq:
idxd_device_clear_perm_entry(idxd, ie);
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
return rc;
}
@@ -1390,8 +1500,7 @@ err_res_alloc:
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal:
- rc = idxd_wq_disable(wq, false);
- if (rc < 0)
+ if (idxd_wq_disable(wq, false))
dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
err:
return rc;
@@ -1408,11 +1517,11 @@ void drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
- idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
+ idxd_wq_free_resources(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
@@ -1441,15 +1550,24 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
if (rc < 0)
return -ENXIO;
+ rc = idxd_device_evl_setup(idxd);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
+ return rc;
+ }
+
/* Start device */
rc = idxd_device_enable(idxd);
- if (rc < 0)
+ if (rc < 0) {
+ idxd_device_evl_free(idxd);
return rc;
+ }
/* Setup DMA device without channels */
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
idxd_device_disable(idxd);
+ idxd_device_evl_free(idxd);
idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
return rc;
}
@@ -1478,6 +1596,7 @@ void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
idxd_device_disable(idxd);
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
idxd_device_reset(idxd);
+ idxd_device_evl_free(idxd);
}
static enum idxd_dev_type dev_types[] = {
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index e0874cb4721c..eb35ca313684 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -63,12 +63,6 @@ static void op_flag_setup(unsigned long flags, u32 *desc_flags)
*desc_flags |= IDXD_OP_FLAG_RCI;
}
-static inline void set_completion_address(struct idxd_desc *desc,
- u64 *compl_addr)
-{
- *compl_addr = desc->compl_dma;
-}
-
static inline void idxd_prep_desc_common(struct idxd_wq *wq,
struct dsa_hw_desc *hw, char opcode,
u64 addr_f1, u64 addr_f2, u64 len,
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 7ced8d283d98..5428a2e1b1ec 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -10,9 +10,9 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
-#include <linux/ioasid.h>
#include <linux/bitmap.h>
#include <linux/perf_event.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -32,6 +32,7 @@ enum idxd_dev_type {
IDXD_DEV_GROUP,
IDXD_DEV_ENGINE,
IDXD_DEV_CDEV,
+ IDXD_DEV_CDEV_FILE,
IDXD_DEV_MAX_TYPE,
};
@@ -127,6 +128,12 @@ struct idxd_pmu {
#define IDXD_MAX_PRIORITY 0xf
+enum {
+ COUNTER_FAULTS = 0,
+ COUNTER_FAULT_FAILS,
+ COUNTER_MAX
+};
+
enum idxd_wq_state {
IDXD_WQ_DISABLED = 0,
IDXD_WQ_ENABLED,
@@ -136,6 +143,7 @@ enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
WQ_FLAG_ATS_DISABLE,
+ WQ_FLAG_PRS_DISABLE,
};
enum idxd_wq_type {
@@ -185,6 +193,7 @@ struct idxd_wq {
struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
+ struct workqueue_struct *wq;
struct idxd_device *idxd;
int id;
struct idxd_irq_entry ie;
@@ -214,6 +223,10 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
+
+ /* Lock to protect upasid_xa access. */
+ struct mutex uc_lock;
+ struct xarray upasid_xa;
};
struct idxd_engine {
@@ -232,6 +245,7 @@ struct idxd_hw {
union engine_cap_reg engine_cap;
struct opcap opcap;
u32 cmd_cap;
+ union iaa_cap_reg iaa_cap;
};
enum idxd_device_state {
@@ -258,6 +272,32 @@ struct idxd_driver_data {
struct device_type *dev_type;
int compl_size;
int align;
+ int evl_cr_off;
+ int cr_status_off;
+ int cr_result_off;
+};
+
+struct idxd_evl {
+ /* Lock to protect event log access. */
+ spinlock_t lock;
+ void *log;
+ dma_addr_t dma;
+ /* Total size of event log = number of entries * entry size. */
+ unsigned int log_size;
+ /* The number of entries in the event log. */
+ u16 size;
+ u16 head;
+ unsigned long *bmap;
+ bool batch_fail[IDXD_MAX_BATCH_IDENT];
+};
+
+struct idxd_evl_fault {
+ struct work_struct work;
+ struct idxd_wq *wq;
+ u8 status;
+
+ /* make this last member always */
+ struct __evl_entry entry[];
};
struct idxd_device {
@@ -316,8 +356,24 @@ struct idxd_device {
struct idxd_pmu *idxd_pmu;
unsigned long *opcap_bmap;
+ struct idxd_evl *evl;
+ struct kmem_cache *evl_cache;
+
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_evl_file;
};
+static inline unsigned int evl_ent_size(struct idxd_device *idxd)
+{
+ return idxd->hw.gen_cap.evl_support ?
+ (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
+}
+
+static inline unsigned int evl_size(struct idxd_device *idxd)
+{
+ return idxd->evl->size * evl_ent_size(idxd);
+}
+
/* IDXD software descriptor */
struct idxd_desc {
union {
@@ -351,6 +407,7 @@ enum idxd_completion_status {
#define engine_confdev(engine) &engine->idxd_dev.conf_dev
#define group_confdev(group) &group->idxd_dev.conf_dev
#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
+#define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev)
#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
@@ -598,6 +655,7 @@ int idxd_register_driver(void);
void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
+void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
/* device interrupt control */
irqreturn_t idxd_misc_thread(int vec, void *data);
@@ -662,6 +720,9 @@ void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
+int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
+ void *buf, int len);
+void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
/* perfmon */
#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
@@ -678,4 +739,10 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
+/* debugfs */
+int idxd_device_init_debugfs(struct idxd_device *idxd);
+void idxd_device_remove_debugfs(struct idxd_device *idxd);
+int idxd_init_debugfs(void);
+void idxd_remove_debugfs(void);
+
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 529ea09c9094..1aa823974cda 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -9,7 +9,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
-#include <linux/aer.h>
#include <linux/fs.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
@@ -47,6 +46,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
.compl_size = sizeof(struct dsa_completion_record),
.align = 32,
.dev_type = &dsa_device_type,
+ .evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+ .cr_status_off = offsetof(struct dsa_completion_record, status),
+ .cr_result_off = offsetof(struct dsa_completion_record, result),
},
[IDXD_TYPE_IAX] = {
.name_prefix = "iax",
@@ -54,6 +56,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
.compl_size = sizeof(struct iax_completion_record),
.align = 64,
.dev_type = &iax_device_type,
+ .evl_cr_off = offsetof(struct iax_evl_entry, cr),
+ .cr_status_off = offsetof(struct iax_completion_record, status),
+ .cr_result_off = offsetof(struct iax_completion_record, error_code),
},
};
@@ -105,7 +110,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
ie = idxd_get_ie(idxd, msix_idx);
ie->id = msix_idx;
ie->int_handle = INVALID_INT_HANDLE;
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
spin_lock_init(&ie->list_lock);
init_llist_head(&ie->pending_llist);
@@ -200,6 +205,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
+ mutex_init(&wq->uc_lock);
+ xa_init(&wq->upasid_xa);
idxd->wqs[i] = wq;
}
@@ -295,13 +302,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
}
idxd->groups[i] = group;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
group->tc_a = -1;
group->tc_b = -1;
}
+ /*
+ * The default value is the same as the value of
+ * total read buffers in GRPCAP.
+ */
+ group->rdbufs_allowed = idxd->max_rdbufs;
}
return 0;
@@ -327,6 +339,33 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
destroy_workqueue(idxd->wq);
}
+static int idxd_init_evl(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_evl *evl;
+
+ if (idxd->hw.gen_cap.evl_support == 0)
+ return 0;
+
+ evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
+ if (!evl)
+ return -ENOMEM;
+
+ spin_lock_init(&evl->lock);
+ evl->size = IDXD_EVL_SIZE_MIN;
+
+ idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
+ sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
+ 0, 0, NULL);
+ if (!idxd->evl_cache) {
+ kfree(evl);
+ return -ENOMEM;
+ }
+
+ idxd->evl = evl;
+ return 0;
+}
+
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -352,8 +391,14 @@ static int idxd_setup_internals(struct idxd_device *idxd)
goto err_wkq_create;
}
+ rc = idxd_init_evl(idxd);
+ if (rc < 0)
+ goto err_evl;
+
return 0;
+ err_evl:
+ destroy_workqueue(idxd->wq);
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
put_device(group_confdev(idxd->groups[i]));
@@ -384,7 +429,7 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
-static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
+void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
{
int i, j, nr;
@@ -456,6 +501,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
}
multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
+
+ /* read iaa cap */
+ if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
+ idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
@@ -511,6 +560,27 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->sva = NULL;
}
+static int idxd_enable_sva(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+ if (ret)
+ return ret;
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (ret)
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+
+ return ret;
+}
+
+static void idxd_disable_sva(struct pci_dev *pdev)
+{
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+}
+
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -525,7 +595,7 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
+ if (idxd_enable_sva(pdev)) {
dev_warn(dev, "Unable to turn on user SVA feature.\n");
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
@@ -573,21 +643,19 @@ static int idxd_probe(struct idxd_device *idxd)
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(pdev);
return rc;
}
static void idxd_cleanup(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
-
perfmon_pmu_remove(idxd);
idxd_cleanup_interrupts(idxd);
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(idxd->pdev);
}
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -637,6 +705,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dev_register;
}
+ rc = idxd_device_init_debugfs(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD debugfs failed to setup\n");
+
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
@@ -699,13 +771,14 @@ static void idxd_remove(struct pci_dev *pdev)
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
+ idxd_device_remove_debugfs(idxd);
irq_entry = idxd_get_ie(idxd, 0);
free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(pdev);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
@@ -756,6 +829,10 @@ static int __init idxd_init_module(void)
if (err)
goto err_cdev_register;
+ err = idxd_init_debugfs();
+ if (err)
+ goto err_debugfs;
+
err = pci_register_driver(&idxd_pci_driver);
if (err)
goto err_pci_register;
@@ -763,6 +840,8 @@ static int __init idxd_init_module(void)
return 0;
err_pci_register:
+ idxd_remove_debugfs();
+err_debugfs:
idxd_cdev_remove();
err_cdev_register:
idxd_driver_unregister(&idxd_user_drv);
@@ -783,5 +862,6 @@ static void __exit idxd_exit_module(void)
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
perfmon_exit();
+ idxd_remove_debugfs();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index aa314ebec587..b501320a9c7a 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -7,6 +7,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/iommu.h>
+#include <linux/sched/mm.h>
#include <uapi/linux/idxd.h>
#include "../dmaengine.h"
#include "idxd.h"
@@ -80,7 +82,7 @@ static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
desc.opcode = DSA_OPCODE_DRAIN;
desc.priv = 1;
- if (ie->pasid != INVALID_IOASID)
+ if (ie->pasid != IOMMU_PASID_INVALID)
desc.pasid = ie->pasid;
desc.int_handle = ie->int_handle;
portal = idxd_wq_portal_addr(wq);
@@ -217,13 +219,187 @@ static void idxd_int_handle_revoke(struct work_struct *work)
kfree(revoke);
}
-static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
+static void idxd_evl_fault_work(struct work_struct *work)
{
+ struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work);
+ struct idxd_wq *wq = fault->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_evl *evl = idxd->evl;
+ struct __evl_entry *entry_head = fault->entry;
+ void *cr = (void *)entry_head + idxd->data->evl_cr_off;
+ int cr_size = idxd->data->compl_size;
+ u8 *status = (u8 *)cr + idxd->data->cr_status_off;
+ u8 *result = (u8 *)cr + idxd->data->cr_result_off;
+ int copied, copy_size;
+ bool *bf;
+
+ switch (fault->status) {
+ case DSA_COMP_CRA_XLAT:
+ if (entry_head->batch && entry_head->first_err_in_batch)
+ evl->batch_fail[entry_head->batch_id] = false;
+
+ copy_size = cr_size;
+ idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
+ break;
+ case DSA_COMP_BATCH_EVL_ERR:
+ bf = &evl->batch_fail[entry_head->batch_id];
+
+ copy_size = entry_head->rcr || *bf ? cr_size : 0;
+ if (*bf) {
+ if (*status == DSA_COMP_SUCCESS)
+ *status = DSA_COMP_BATCH_FAIL;
+ *result = 1;
+ *bf = false;
+ }
+ idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
+ break;
+ case DSA_COMP_DRAIN_EVL:
+ copy_size = cr_size;
+ break;
+ default:
+ copy_size = 0;
+ dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status);
+ break;
+ }
+
+ if (copy_size == 0)
+ return;
+
+ /*
+ * Copy completion record to fault_addr in user address space
+ * that is found by wq and PASID.
+ */
+ copied = idxd_copy_cr(wq, entry_head->pasid, entry_head->fault_addr,
+ cr, copy_size);
+ /*
+ * The task that triggered the page fault is unknown currently
+ * because multiple threads may share the user address
+ * space or the task exits already before this fault.
+ * So if the copy fails, SIGSEGV can not be sent to the task.
+ * Just print an error for the failure. The user application
+ * waiting for the completion record will time out on this
+ * failure.
+ */
+ switch (fault->status) {
+ case DSA_COMP_CRA_XLAT:
+ if (copied != copy_size) {
+ idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
+ dev_dbg_ratelimited(dev, "Failed to write to completion record: (%d:%d)\n",
+ copy_size, copied);
+ if (entry_head->batch)
+ evl->batch_fail[entry_head->batch_id] = true;
+ }
+ break;
+ case DSA_COMP_BATCH_EVL_ERR:
+ if (copied != copy_size) {
+ idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
+ dev_dbg_ratelimited(dev, "Failed to write to batch completion record: (%d:%d)\n",
+ copy_size, copied);
+ }
+ break;
+ case DSA_COMP_DRAIN_EVL:
+ if (copied != copy_size)
+ dev_dbg_ratelimited(dev, "Failed to write to drain completion record: (%d:%d)\n",
+ copy_size, copied);
+ break;
+ }
+
+ kmem_cache_free(idxd->evl_cache, fault);
+}
+
+static void process_evl_entry(struct idxd_device *idxd,
+ struct __evl_entry *entry_head, unsigned int index)
+{
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_evl *evl = idxd->evl;
+ u8 status;
+
+ if (test_bit(index, evl->bmap)) {
+ clear_bit(index, evl->bmap);
+ } else {
+ status = DSA_COMP_STATUS(entry_head->error);
+
+ if (status == DSA_COMP_CRA_XLAT || status == DSA_COMP_DRAIN_EVL ||
+ status == DSA_COMP_BATCH_EVL_ERR) {
+ struct idxd_evl_fault *fault;
+ int ent_size = evl_ent_size(idxd);
+
+ if (entry_head->rci)
+ dev_dbg(dev, "Completion Int Req set, ignoring!\n");
+
+ if (!entry_head->rcr && status == DSA_COMP_DRAIN_EVL)
+ return;
+
+ fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC);
+ if (fault) {
+ struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx];
+
+ fault->wq = wq;
+ fault->status = status;
+ memcpy(&fault->entry, entry_head, ent_size);
+ INIT_WORK(&fault->work, idxd_evl_fault_work);
+ queue_work(wq->wq, &fault->work);
+ } else {
+ dev_warn(dev, "Failed to service fault work.\n");
+ }
+ } else {
+ dev_warn_ratelimited(dev, "Device error %#x operation: %#x fault addr: %#llx\n",
+ status, entry_head->operation,
+ entry_head->fault_addr);
+ }
+ }
+}
+
+static void process_evl_entries(struct idxd_device *idxd)
+{
+ union evl_status_reg evl_status;
+ unsigned int h, t;
+ struct idxd_evl *evl = idxd->evl;
+ struct __evl_entry *entry_head;
+ unsigned int ent_size = evl_ent_size(idxd);
+ u32 size;
+
+ evl_status.bits = 0;
+ evl_status.int_pending = 1;
+
+ spin_lock(&evl->lock);
+ /* Clear interrupt pending bit */
+ iowrite32(evl_status.bits_upper32,
+ idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
+ h = evl->head;
+ evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = evl_status.tail;
+ size = idxd->evl->size;
+
+ while (h != t) {
+ entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
+ process_evl_entry(idxd, entry_head, h);
+ h = (h + 1) % size;
+ }
+
+ evl->head = h;
+ evl_status.head = h;
+ iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ spin_unlock(&evl->lock);
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = ie_to_idxd(irq_entry);
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
u32 val = 0;
int i;
bool err = false;
+ u32 cause;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (!cause)
+ return IRQ_NONE;
+
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_HALT_STATE)
goto halt;
@@ -295,13 +471,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
perfmon_counter_overflow(idxd);
}
+ if (cause & IDXD_INTC_EVL) {
+ val |= IDXD_INTC_EVL;
+ process_evl_entries(idxd);
+ }
+
val ^= cause;
if (val)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
if (!err)
- return 0;
+ goto out;
halt:
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
@@ -324,33 +505,10 @@ halt:
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- return -ENXIO;
}
}
- return 0;
-}
-
-irqreturn_t idxd_misc_thread(int vec, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = ie_to_idxd(irq_entry);
- int rc;
- u32 cause;
-
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- if (cause)
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
- while (cause) {
- rc = process_misc_interrupts(idxd, cause);
- if (rc < 0)
- break;
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- if (cause)
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- }
-
+out:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index fe3b8d04f9db..7b54a3939ea1 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -3,6 +3,8 @@
#ifndef _IDXD_REGISTERS_H_
#define _IDXD_REGISTERS_H_
+#include <uapi/linux/idxd.h>
+
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
@@ -31,7 +33,9 @@ union gen_cap_reg {
u64 rsvd:3;
u64 dest_readback:1;
u64 drain_readback:1;
- u64 rsvd2:6;
+ u64 rsvd2:3;
+ u64 evl_support:2;
+ u64 batch_continuation:1;
u64 max_xfer_shift:5;
u64 max_batch_shift:4;
u64 max_ims_mult:6;
@@ -55,7 +59,8 @@ union wq_cap_reg {
u64 occupancy:1;
u64 occupancy_int:1;
u64 op_config:1;
- u64 rsvd3:9;
+ u64 wq_prs_support:1;
+ u64 rsvd4:8;
};
u64 bits;
} __packed;
@@ -117,7 +122,8 @@ union gencfg_reg {
u32 rdbuf_limit:8;
u32 rsvd:4;
u32 user_int_en:1;
- u32 rsvd2:19;
+ u32 evl_en:1;
+ u32 rsvd2:18;
};
u32 bits;
} __packed;
@@ -127,7 +133,8 @@ union genctrl_reg {
struct {
u32 softerr_int_en:1;
u32 halt_int_en:1;
- u32 rsvd:30;
+ u32 evl_int_en:1;
+ u32 rsvd:29;
};
u32 bits;
} __packed;
@@ -162,6 +169,7 @@ enum idxd_device_reset_type {
#define IDXD_INTC_OCCUPY 0x04
#define IDXD_INTC_PERFMON_OVFL 0x08
#define IDXD_INTC_HALT_STATE 0x10
+#define IDXD_INTC_EVL 0x20
#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
#define IDXD_CMD_OFFSET 0xa0
@@ -276,6 +284,45 @@ union sw_err_reg {
u64 bits[4];
} __packed;
+union iaa_cap_reg {
+ struct {
+ u64 dec_aecs_format_ver:1;
+ u64 drop_init_bits:1;
+ u64 chaining:1;
+ u64 force_array_output_mod:1;
+ u64 load_part_aecs:1;
+ u64 comp_early_abort:1;
+ u64 nested_comp:1;
+ u64 diction_comp:1;
+ u64 header_gen:1;
+ u64 crypto_gcm:1;
+ u64 crypto_cfb:1;
+ u64 crypto_xts:1;
+ u64 rsvd:52;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_IAACAP_OFFSET 0x180
+
+#define IDXD_EVLCFG_OFFSET 0xe0
+union evlcfg_reg {
+ struct {
+ u64 pasid_en:1;
+ u64 priv:1;
+ u64 rsvd:10;
+ u64 base_addr:52;
+
+ u64 size:16;
+ u64 pasid:20;
+ u64 rsvd2:28;
+ };
+ u64 bits[2];
+} __packed;
+
+#define IDXD_EVL_SIZE_MIN 0x0040
+#define IDXD_EVL_SIZE_MAX 0xffff
+
union msix_perm {
struct {
u32 rsvd:2;
@@ -325,7 +372,7 @@ union wqcfg {
u32 mode:1; /* shared or dedicated */
u32 bof:1; /* block on fault */
u32 wq_ats_disable:1;
- u32 rsvd2:1;
+ u32 wq_prs_disable:1;
u32 priority:4;
u32 pasid:20;
u32 pasid_en:1;
@@ -513,4 +560,73 @@ union filter_cfg {
u64 val;
} __packed;
+#define IDXD_EVLSTATUS_OFFSET 0xf0
+
+union evl_status_reg {
+ struct {
+ u32 head:16;
+ u32 rsvd:16;
+ u32 tail:16;
+ u32 rsvd2:14;
+ u32 int_pending:1;
+ u32 rsvd3:1;
+ };
+ struct {
+ u32 bits_lower32;
+ u32 bits_upper32;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_MAX_BATCH_IDENT 256
+
+struct __evl_entry {
+ u64 rsvd:2;
+ u64 desc_valid:1;
+ u64 wq_idx_valid:1;
+ u64 batch:1;
+ u64 fault_rw:1;
+ u64 priv:1;
+ u64 err_info_valid:1;
+ u64 error:8;
+ u64 wq_idx:8;
+ u64 batch_id:8;
+ u64 operation:8;
+ u64 pasid:20;
+ u64 rsvd2:4;
+
+ u16 batch_idx;
+ u16 rsvd3;
+ union {
+ /* Invalid Flags 0x11 */
+ u32 invalid_flags;
+ /* Invalid Int Handle 0x19 */
+ /* Page fault 0x1a */
+ /* Page fault 0x06, 0x1f, only operand_id */
+ /* Page fault before drain or in batch, 0x26, 0x27 */
+ struct {
+ u16 int_handle;
+ u16 rci:1;
+ u16 ims:1;
+ u16 rcr:1;
+ u16 first_err_in_batch:1;
+ u16 rsvd4_2:9;
+ u16 operand_id:3;
+ };
+ };
+ u64 fault_addr;
+ u64 rsvd5;
+} __packed;
+
+struct dsa_evl_entry {
+ struct __evl_entry e;
+ struct dsa_completion_record cr;
+} __packed;
+
+struct iax_evl_entry {
+ struct __evl_entry e;
+ u64 rsvd[4];
+ struct iax_completion_record cr;
+} __packed;
+
#endif
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3229dfc78650..293739ac5596 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)
@@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)
@@ -822,10 +822,14 @@ static ssize_t wq_block_on_fault_store(struct device *dev,
if (rc < 0)
return rc;
- if (bof)
+ if (bof) {
+ if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
+ return -EOPNOTSUPP;
+
set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
- else
+ } else {
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ }
return count;
}
@@ -1109,6 +1113,44 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
static struct device_attribute dev_attr_wq_ats_disable =
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags));
+}
+
+static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ bool prs_dis;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (!idxd->hw.wq_cap.wq_prs_support)
+ return -EOPNOTSUPP;
+
+ rc = kstrtobool(buf, &prs_dis);
+ if (rc < 0)
+ return rc;
+
+ if (prs_dis) {
+ set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
+ /* when PRS is disabled, BOF needs to be off as well */
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ } else {
+ clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
+ }
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_prs_disable =
+ __ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store);
+
static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
@@ -1239,6 +1281,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
+ &dev_attr_wq_prs_disable.attr,
&dev_attr_wq_occupancy.attr,
&dev_attr_wq_enqcmds_retries.attr,
&dev_attr_wq_op_config.attr,
@@ -1260,6 +1303,13 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
idxd->data->type == IDXD_TYPE_IAX;
}
+static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return attr == &dev_attr_wq_prs_disable.attr &&
+ !idxd->hw.wq_cap.wq_prs_support;
+}
+
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1273,6 +1323,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
return 0;
+ if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+ return 0;
+
return attr->mode;
}
@@ -1292,6 +1345,7 @@ static void idxd_conf_wq_release(struct device *dev)
bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
+ xa_destroy(&wq->upasid_xa);
kfree(wq);
}
@@ -1452,15 +1506,13 @@ static ssize_t errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- int i, out = 0;
+ DECLARE_BITMAP(swerr_bmap, 256);
+ bitmap_zero(swerr_bmap, 256);
spin_lock(&idxd->dev_lock);
- for (i = 0; i < 4; i++)
- out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
+ multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4);
spin_unlock(&idxd->dev_lock);
- out--;
- out += sysfs_emit_at(buf, out, "\n");
- return out;
+ return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap);
}
static DEVICE_ATTR_RO(errors);
@@ -1563,6 +1615,59 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att
}
static DEVICE_ATTR_RW(cmd_status);
+static ssize_t iaa_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->hw.version < DEVICE_VERSION_2)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits);
+}
+static DEVICE_ATTR_RO(iaa_cap);
+
+static ssize_t event_log_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (!idxd->evl)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", idxd->evl->size);
+}
+
+static ssize_t event_log_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+ unsigned long val;
+ int rc;
+
+ if (!idxd->evl)
+ return -EOPNOTSUPP;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
+ (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
+ return -EINVAL;
+
+ idxd->evl->size = val;
+ return count;
+}
+static DEVICE_ATTR_RW(event_log_size);
+
static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
struct idxd_device *idxd)
{
@@ -1585,6 +1690,21 @@ static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
idxd->data->type == IDXD_TYPE_IAX;
}
+static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return attr == &dev_attr_iaa_cap.attr &&
+ (idxd->data->type != IDXD_TYPE_IAX ||
+ idxd->hw.version < DEVICE_VERSION_2);
+}
+
+static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return (attr == &dev_attr_event_log_size.attr &&
+ !idxd->hw.gen_cap.evl_support);
+}
+
static umode_t idxd_device_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1597,6 +1717,12 @@ static umode_t idxd_device_attr_visible(struct kobject *kobj,
if (idxd_device_attr_read_buffers_invisible(attr, idxd))
return 0;
+ if (idxd_device_attr_iaa_cap_invisible(attr, idxd))
+ return 0;
+
+ if (idxd_device_attr_event_log_size_invisible(attr, idxd))
+ return 0;
+
return attr->mode;
}
@@ -1622,6 +1748,8 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_read_buffer_limit.attr,
&dev_attr_cdev_major.attr,
&dev_attr_cmd_status.attr,
+ &dev_attr_iaa_cap.attr,
+ &dev_attr_event_log_size.attr,
NULL,
};
@@ -1643,6 +1771,8 @@ static void idxd_conf_device_release(struct device *dev)
bitmap_free(idxd->wq_enable_map);
kfree(idxd->wqs);
kfree(idxd->engines);
+ kfree(idxd->evl);
+ kmem_cache_destroy(idxd->evl_cache);
ida_free(&idxd_ida, idxd->id);
bitmap_free(idxd->opcap_bmap);
kfree(idxd);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index e4ea107ce78c..ad084552640f 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev)
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
- struct resource *res;
unsigned int i;
u32 val;
int ret;
@@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdma->regs = devm_ioremap_resource(&pdev->dev, res);
+ mdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdma->regs))
return PTR_ERR(mdma->regs);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 65c6094ce063..f040751690af 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -750,7 +750,6 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
break;
- memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
dma_async_tx_descriptor_init(&desc->desc, chan);
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
@@ -1038,7 +1037,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
- struct resource *res;
int ret, i;
int irq, irq_err;
@@ -1049,8 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->dev = &pdev->dev;
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ imxdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imxdma->base))
return PTR_ERR(imxdma->base);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fbea5f62dd98..7a912f90c2a9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -954,7 +954,10 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
desc = sdmac->desc;
if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP) {
- sdma_update_channel_loop(sdmac);
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ sdma_update_channel_loop(sdmac);
+ else
+ vchan_cyclic_callback(&desc->vd);
} else {
mxc_sdma_handle_channel_normal(sdmac);
vchan_cookie_complete(&desc->vd);
@@ -1074,6 +1077,10 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break;
+ case IMX_DMATYPE_HDMI:
+ emi_2_per = sdma->script_addrs->hdmi_dma_addr;
+ sdmac->is_ram_script = true;
+ break;
default:
dev_err(sdma->dev, "Unsupported transfer type %d\n",
peripheral_type);
@@ -1125,11 +1132,16 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral
* and watermark level
*/
- context->gReg[0] = sdmac->event_mask[1];
- context->gReg[1] = sdmac->event_mask[0];
- context->gReg[2] = sdmac->per_addr;
- context->gReg[6] = sdmac->shp_addr;
- context->gReg[7] = sdmac->watermark_level;
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ context->gReg[4] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ } else {
+ context->gReg[0] = sdmac->event_mask[1];
+ context->gReg[1] = sdmac->event_mask[0];
+ context->gReg[2] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ context->gReg[7] = sdmac->watermark_level;
+ }
bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
@@ -1513,7 +1525,7 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
desc->sdmac = sdmac;
desc->num_bd = bds;
- if (sdma_alloc_bd(desc))
+ if (bds && sdma_alloc_bd(desc))
goto err_desc_out;
/* No slave_config called in MEMCPY case, so do here */
@@ -1521,10 +1533,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
- goto err_desc_out;
+ goto err_bd_out;
return desc;
+err_bd_out:
+ sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
@@ -1678,13 +1692,16 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int num_periods = buf_len / period_len;
+ int num_periods = 0;
int channel = sdmac->channel;
int i = 0, buf = 0;
struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ num_periods = buf_len / period_len;
+
sdma_config_write(chan, &sdmac->slave_config, direction);
desc = sdma_transfer_init(sdmac, direction, num_periods);
@@ -1701,6 +1718,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
goto err_bd_out;
}
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+
while (buf < buf_len) {
struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
@@ -1761,6 +1781,10 @@ static int sdma_config_write(struct dma_chan *chan,
sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
SDMA_WATERMARK_LEVEL_HWML;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ } else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->per_address2 = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = 0;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -2167,7 +2191,6 @@ static int sdma_probe(struct platform_device *pdev)
const char *fw_name;
int ret;
int irq;
- struct resource *iores;
struct resource spba_res;
int i;
struct sdma_engine *sdma;
@@ -2190,8 +2213,7 @@ static int sdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
+ sdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdma->regs))
return PTR_ERR(sdma->regs);
@@ -2232,6 +2254,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 5d707ff63554..c4602bfc9c74 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -15,7 +15,6 @@
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/dca.h>
-#include <linux/aer.h>
#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
@@ -1191,13 +1190,13 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
/* disable relaxed ordering */
- err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
+ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
if (err)
return pcibios_err_to_errno(err);
/* clear relaxed ordering enable */
- val16 &= ~IOAT_DEVCTRL_ROE;
- err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
+ val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
if (err)
return pcibios_err_to_errno(err);
@@ -1380,15 +1379,11 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (is_skx_ioat(pdev))
device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
-
- if (device->version >= IOAT_VER_3_3)
- pci_enable_pcie_error_reporting(pdev);
} else
return -ENODEV;
if (err) {
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
- pci_disable_pcie_error_reporting(pdev);
return -ENODEV;
}
@@ -1411,7 +1406,6 @@ static void ioat_remove(struct pci_dev *pdev)
device->dca = NULL;
}
- pci_disable_pcie_error_reporting(pdev);
ioat_dma_remove(device);
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f55a5f92f185..54cf0ad39887 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -14,13 +14,6 @@
#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
-/* PCIe config registers */
-
-/* EXPCAPID + N */
-#define IOAT_DEVCTRL_OFFSET 0x8
-/* relaxed ordering enable */
-#define IOAT_DEVCTRL_ROE 0x10
-
/* MMIO Device Registers */
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index baab1ca9f621..d799b99c18bd 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1797,6 +1797,5 @@ static int __init ipu_init(void)
subsys_initcall(ipu_init);
MODULE_DESCRIPTION("IPU core driver");
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 9b9184f964be..1709d159af7e 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
}
}
-static int ldma_cfg_init(struct ldma_dev *d)
+static int ldma_parse_dt(struct ldma_dev *d)
{
struct fwnode_handle *fwnode = dev_fwnode(d->dev);
struct ldma_port *p;
@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
p->ldev = d;
}
- ret = ldma_cfg_init(d);
- if (ret)
- return ret;
-
dma_dev->dev = &pdev->dev;
ch_mask = (unsigned long)d->channels_mask;
@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
ldma_dma_init_v3X(j, d);
}
+ ret = ldma_parse_dt(d);
+ if (ret)
+ return ret;
+
dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
dma_dev->device_free_chan_resources = ldma_free_chan_resources;
dma_dev->device_terminate_all = ldma_terminate_all;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index e12b754e6398..ebd8733f72ad 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
@@ -210,9 +209,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mutex_init(&mcf_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f7717c44b887..69cc61c0b262 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_device *hsdma;
struct mtk_hsdma_vchan *vc;
struct dma_device *dd;
- struct resource *res;
int i, err;
hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
@@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
dd = &hsdma->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsdma->base = devm_ioremap_resource(&pdev->dev, res);
+ hsdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsdma->base))
return PTR_ERR(hsdma->base);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index e8d71b35593e..ebdfdcbb4f7a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op)
struct mmp_pdma_device *pdev;
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index a262e0eb4cc9..d49fa6bc6775 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/platform_data/dma-mmp_tdma.h>
+#include <linux/genalloc.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
@@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
enum mmp_tdma_type type;
const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
- struct resource *iores;
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
@@ -663,17 +662,13 @@ static int mmp_tdma_probe(struct platform_device *pdev)
irq_num++;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ tdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
INIT_LIST_HEAD(&tdev->device.channels);
- if (pdev->dev.of_node)
- pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
- else
- pool = sram_get_gpool("asram");
+ pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
if (!pool) {
dev_err(&pdev->dev, "asram pool not available\n");
return -ENOMEM;
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 7459382a8353..7565ad98ba66 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
@@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dma_base_addr = devm_ioremap_resource(dev, res);
+ dma_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dma_base_addr))
return PTR_ERR(dma_base_addr);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 113834e1167b..0e1e9ca1c005 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev)
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
- struct resource *res;
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
@@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (!xor_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xor_dev->dma_base))
return PTR_ERR(xor_dev->dma_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(xor_dev->glob_base))
return PTR_ERR(xor_dev->glob_base);
@@ -742,32 +739,18 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (ret)
return ret;
- xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
- if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
- if (!IS_ERR(xor_dev->reg_clk)) {
- ret = clk_prepare_enable(xor_dev->reg_clk);
- if (ret)
- return ret;
- } else {
- return PTR_ERR(xor_dev->reg_clk);
- }
- }
+ xor_dev->reg_clk = devm_clk_get_optional_enabled(&pdev->dev, "reg");
+ if (IS_ERR(xor_dev->reg_clk))
+ return PTR_ERR(xor_dev->reg_clk);
- xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
- ret = EPROBE_DEFER;
- goto disable_reg_clk;
- }
- if (!IS_ERR(xor_dev->clk)) {
- ret = clk_prepare_enable(xor_dev->clk);
- if (ret)
- goto disable_reg_clk;
- }
+ xor_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(xor_dev->clk))
+ return PTR_ERR(xor_dev->clk);
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
mv_xor_v2_set_msi_msg);
if (ret)
- goto disable_clk;
+ return ret;
xor_dev->irq = msi_get_virq(&pdev->dev, 0);
@@ -869,10 +852,6 @@ free_hw_desq:
xor_dev->hw_desq_virt, xor_dev->hw_desq);
free_msi_irqs:
platform_msi_domain_free_irqs(&pdev->dev);
-disable_clk:
- clk_disable_unprepare(xor_dev->clk);
-disable_reg_clk:
- clk_disable_unprepare(xor_dev->reg_clk);
return ret;
}
@@ -892,9 +871,6 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
- clk_disable_unprepare(xor_dev->clk);
- clk_disable_unprepare(xor_dev->reg_clk);
-
return 0;
}
@@ -920,4 +896,3 @@ static struct platform_driver mv_xor_v2_driver = {
module_platform_driver(mv_xor_v2_driver);
MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index dc147cc2436e..acc4d53e4630 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
- struct resource *iores;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
@@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev)
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
+ mxs_dma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index a7063e9cd551..e72e8c10355e 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
- struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev)
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nbpf->base = devm_ioremap_resource(dev, iomem);
+ nbpf->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nbpf->base))
return PTR_ERR(nbpf->base);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index ac61ecda2926..775a7f408b9a 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -264,7 +264,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
}
/* Silently fail if there is not even the "dmas" property */
- if (!of_find_property(np, "dmas", NULL))
+ if (!of_property_present(np, "dmas"))
return ERR_PTR(-ENODEV);
count = of_property_count_strings(np, "dma-names");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 6b5e91f26afc..686c270ef710 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4299,9 +4299,8 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1)
continue;
- size += scnprintf(buf + size, PAGE_SIZE - size,
- "PPC440SP(E)-ADMA.%d: %s\n", i,
- ppc_adma_errors[ppc440spe_adma_devices[i]]);
+ size += sysfs_emit_at(buf, size, "PPC440SP(E)-ADMA.%d: %s\n",
+ i, ppc_adma_errors[ppc440spe_adma_devices[i]]);
}
return size;
}
@@ -4309,9 +4308,8 @@ static DRIVER_ATTR_RO(devices);
static ssize_t enable_show(struct device_driver *dev, char *buf)
{
- return snprintf(buf, PAGE_SIZE,
- "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
- ppc440spe_r6_enabled ? "EN" : "DIS");
+ return sysfs_emit(buf, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
+ ppc440spe_r6_enabled ? "EN" : "DIS");
}
static ssize_t enable_store(struct device_driver *dev, const char *buf,
@@ -4362,7 +4360,7 @@ static ssize_t poly_show(struct device_driver *dev, char *buf)
reg &= 0xFF;
#endif
- size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
+ size = sysfs_emit(buf, "PPC440SP(e) RAID-6 driver "
"uses 0x1%02x polynomial.\n", reg);
return size;
}
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 377da23012ac..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
+ unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
- mutex_lock(&cmd_q->q_mutex);
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
- mutex_unlock(&cmd_q->q_mutex);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
- mutex_init(&cmd_q->q_mutex);
+ spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index cc22d162ce25..1aa65e5de0f3 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
- if (engine_is_idle)
+ if (engine_is_idle && desc)
pt_cmd_callback(desc, 0);
}
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index d093c43b7d13..21b4bf895200 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -196,7 +196,7 @@ struct pt_cmd_queue {
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
- struct mutex q_mutex ____cacheline_aligned;
+ spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx;
unsigned int qsize;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 22a392fe6d32..1b046d9a3a26 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1346,7 +1346,6 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -1358,8 +1357,7 @@ static int pxad_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(&op->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 2ff787df513e..1e47d27e1f81 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
const struct of_device_id *match;
- struct resource *iores;
int ret, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->layout = match->data;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ bdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdev->regs))
return PTR_ERR(bdev->regs);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 061add832295..932628b319c8 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
} else if (spi->cmd == SPI_TX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
} else { /* SPI_DUPLEX */
@@ -1965,7 +1966,6 @@ error_alloc_ev_ring:
error_config_int:
gpi_free_ring(&gpii->ev_ring, gpii);
exit_gpi_init:
- mutex_unlock(&gpii->ctrl_lock);
return ret;
}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 62026607f3f8..05e96b31d871 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
deleted file mode 100644
index a09eeb545f7d..000000000000
--- a/drivers/dma/s3c24xx-dma.c
+++ /dev/null
@@ -1,1428 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * based on amba-pl08x.c
- *
- * Copyright (c) 2006 ARM Ltd.
- * Copyright (c) 2010 ST-Ericsson SA
- *
- * Author: Peter Pearse <peter.pearse@arm.com>
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
- * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
- * that can be routed to any of the 4 to 8 hardware-channels.
- *
- * Therefore on these DMA controllers the number of channels
- * and the number of incoming DMA signals are two totally different things.
- * It is usually not possible to theoretically handle all physical signals,
- * so a multiplexing scheme with possible denial of use is necessary.
- *
- * Open items:
- * - bursts
- */
-
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
-#include <linux/platform_data/dma-s3c24xx.h>
-
-#include "dmaengine.h"
-#include "virt-dma.h"
-
-#define MAX_DMA_CHANNELS 8
-
-#define S3C24XX_DISRC 0x00
-#define S3C24XX_DISRCC 0x04
-#define S3C24XX_DISRCC_INC_INCREMENT 0
-#define S3C24XX_DISRCC_INC_FIXED BIT(0)
-#define S3C24XX_DISRCC_LOC_AHB 0
-#define S3C24XX_DISRCC_LOC_APB BIT(1)
-
-#define S3C24XX_DIDST 0x08
-#define S3C24XX_DIDSTC 0x0c
-#define S3C24XX_DIDSTC_INC_INCREMENT 0
-#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
-#define S3C24XX_DIDSTC_LOC_AHB 0
-#define S3C24XX_DIDSTC_LOC_APB BIT(1)
-#define S3C24XX_DIDSTC_INT_TC0 0
-#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
-
-#define S3C24XX_DCON 0x10
-
-#define S3C24XX_DCON_TC_MASK 0xfffff
-#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
-#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
-#define S3C24XX_DCON_DSZ_WORD (2 << 20)
-#define S3C24XX_DCON_DSZ_MASK (3 << 20)
-#define S3C24XX_DCON_DSZ_SHIFT 20
-#define S3C24XX_DCON_AUTORELOAD 0
-#define S3C24XX_DCON_NORELOAD BIT(22)
-#define S3C24XX_DCON_HWTRIG BIT(23)
-#define S3C24XX_DCON_HWSRC_SHIFT 24
-#define S3C24XX_DCON_SERV_SINGLE 0
-#define S3C24XX_DCON_SERV_WHOLE BIT(27)
-#define S3C24XX_DCON_TSZ_UNIT 0
-#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
-#define S3C24XX_DCON_INT BIT(29)
-#define S3C24XX_DCON_SYNC_PCLK 0
-#define S3C24XX_DCON_SYNC_HCLK BIT(30)
-#define S3C24XX_DCON_DEMAND 0
-#define S3C24XX_DCON_HANDSHAKE BIT(31)
-
-#define S3C24XX_DSTAT 0x14
-#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
-#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
-
-#define S3C24XX_DMASKTRIG 0x20
-#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
-#define S3C24XX_DMASKTRIG_ON BIT(1)
-#define S3C24XX_DMASKTRIG_STOP BIT(2)
-
-#define S3C24XX_DMAREQSEL 0x24
-#define S3C24XX_DMAREQSEL_HW BIT(0)
-
-/*
- * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
- * for a DMA source. Instead only specific channels are valid.
- * All of these SoCs have 4 physical channels and the number of request
- * source bits is 3. Additionally we also need 1 bit to mark the channel
- * as valid.
- * Therefore we separate the chansel element of the channel data into 4
- * parts of 4 bits each, to hold the information if the channel is valid
- * and the hw request source to use.
- *
- * Example:
- * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
- * For it the chansel field would look like
- *
- * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
- * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
- * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
- */
-#define S3C24XX_CHANSEL_WIDTH 4
-#define S3C24XX_CHANSEL_VALID BIT(3)
-#define S3C24XX_CHANSEL_REQ_MASK 7
-
-/*
- * struct soc_data - vendor-specific config parameters for individual SoCs
- * @stride: spacing between the registers of each channel
- * @has_reqsel: does the controller use the newer requestselection mechanism
- * @has_clocks: are controllable dma-clocks present
- */
-struct soc_data {
- int stride;
- bool has_reqsel;
- bool has_clocks;
-};
-
-/*
- * enum s3c24xx_dma_chan_state - holds the virtual channel states
- * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
- * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum s3c24xx_dma_chan_state {
- S3C24XX_DMA_CHAN_IDLE,
- S3C24XX_DMA_CHAN_RUNNING,
- S3C24XX_DMA_CHAN_WAITING,
-};
-
-/*
- * struct s3c24xx_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct s3c24xx_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- size_t len;
- struct list_head node;
-};
-
-/*
- * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
- * @vd: virtual DMA descriptor
- * @dsg_list: list of children sg's
- * @at: sg currently being transfered
- * @width: transfer width
- * @disrcc: value for source control register
- * @didstc: value for destination control register
- * @dcon: base value for dcon register
- * @cyclic: indicate cyclic transfer
- */
-struct s3c24xx_txd {
- struct virt_dma_desc vd;
- struct list_head dsg_list;
- struct list_head *at;
- u8 width;
- u32 disrcc;
- u32 didstc;
- u32 dcon;
- bool cyclic;
-};
-
-struct s3c24xx_dma_chan;
-
-/*
- * struct s3c24xx_dma_phy - holder for the physical channels
- * @id: physical index to this channel
- * @valid: does the channel have all required elements
- * @base: virtual memory base (remapped) for the this channel
- * @irq: interrupt for this channel
- * @clk: clock for this channel
- * @lock: a lock to use when altering an instance of this struct
- * @serving: virtual channel currently being served by this physicalchannel
- * @host: a pointer to the host (internal use)
- */
-struct s3c24xx_dma_phy {
- unsigned int id;
- bool valid;
- void __iomem *base;
- int irq;
- struct clk *clk;
- spinlock_t lock;
- struct s3c24xx_dma_chan *serving;
- struct s3c24xx_dma_engine *host;
-};
-
-/*
- * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
- * @id: the id of the channel
- * @name: name of the channel
- * @vc: wrapped virtual channel
- * @phy: the physical channel utilized by this channel, if there is one
- * @runtime_addr: address for RX/TX according to the runtime config
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- */
-struct s3c24xx_dma_chan {
- int id;
- const char *name;
- struct virt_dma_chan vc;
- struct s3c24xx_dma_phy *phy;
- struct dma_slave_config cfg;
- struct s3c24xx_txd *at;
- struct s3c24xx_dma_engine *host;
- enum s3c24xx_dma_chan_state state;
- bool slave;
-};
-
-/*
- * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
- * @pdev: the corresponding platform device
- * @pdata: platform data passed in from the platform/machine
- * @base: virtual memory base (remapped)
- * @slave: slave engine for this instance
- * @memcpy: memcpy engine for this instance
- * @phy_chans: array of data for the physical channels
- */
-struct s3c24xx_dma_engine {
- struct platform_device *pdev;
- const struct s3c24xx_dma_platdata *pdata;
- struct soc_data *sdata;
- void __iomem *base;
- struct dma_device slave;
- struct dma_device memcpy;
- struct s3c24xx_dma_phy *phy_chans;
-};
-
-/*
- * Physical channel handling
- */
-
-/*
- * Check whether a certain channel is busy or not.
- */
-static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
-{
- unsigned int val = readl(phy->base + S3C24XX_DSTAT);
- return val & S3C24XX_DSTAT_STAT_BUSY;
-}
-
-static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- int phyvalid;
-
- /* every phy is valid for memcopy channels */
- if (!s3cchan->slave)
- return true;
-
- /* On newer variants all phys can be used for all virtual channels */
- if (s3cdma->sdata->has_reqsel)
- return true;
-
- phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
- return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
-}
-
-/*
- * Allocate a physical channel for a virtual channel
- *
- * Try to locate a physical channel to be used for this transfer. If all
- * are taken return NULL and the requester will have to cope by using
- * some fallback PIO mode or retrying later.
- */
-static
-struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = NULL;
- unsigned long flags;
- int i;
- int ret;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- phy = &s3cdma->phy_chans[i];
-
- if (!phy->valid)
- continue;
-
- if (!s3c24xx_dma_phy_valid(s3cchan, phy))
- continue;
-
- spin_lock_irqsave(&phy->lock, flags);
-
- if (!phy->serving) {
- phy->serving = s3cchan;
- spin_unlock_irqrestore(&phy->lock, flags);
- break;
- }
-
- spin_unlock_irqrestore(&phy->lock, flags);
- }
-
- /* No physical channel available, cope with it */
- if (i == s3cdma->pdata->num_phy_channels) {
- dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
- return NULL;
- }
-
- /* start the phy clock */
- if (s3cdma->sdata->has_clocks) {
- ret = clk_enable(phy->clk);
- if (ret) {
- dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
- phy->id, ret);
- phy->serving = NULL;
- return NULL;
- }
- }
-
- return phy;
-}
-
-/*
- * Mark the physical channel as free.
- *
- * This drops the link between the physical and virtual channel.
- */
-static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = phy->host;
-
- if (s3cdma->sdata->has_clocks)
- clk_disable(phy->clk);
-
- phy->serving = NULL;
-}
-
-/*
- * Stops the channel by writing the stop bit.
- * This should not be used for an on-going transfer, but as a method of
- * shutting down a channel (eg, when it's no longer used) or terminating a
- * transfer.
- */
-static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
-{
- writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Virtual channel handling
- */
-
-static inline
-struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
-}
-
-static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct s3c24xx_txd *txd = s3cchan->at;
- u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
-
- return tc * txd->width;
-}
-
-static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
- int ret = 0;
-
- /* Reject definitely invalid configurations */
- if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
- config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
- return -EINVAL;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->slave) {
- ret = -EINVAL;
- goto out;
- }
-
- s3cchan->cfg = *config;
-
-out:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
-}
-
-/*
- * Transfer handling
- */
-
-static inline
-struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
-{
- return container_of(tx, struct s3c24xx_txd, vd.tx);
-}
-
-static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
-{
- struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
-
- if (txd) {
- INIT_LIST_HEAD(&txd->dsg_list);
- txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
- }
-
- return txd;
-}
-
-static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
-{
- struct s3c24xx_sg *dsg, *_dsg;
-
- list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
- list_del(&dsg->node);
- kfree(dsg);
- }
-
- kfree(txd);
-}
-
-static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_txd *txd)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- u32 dcon = txd->dcon;
- u32 val;
-
- /* transfer-size and -count from len and width */
- switch (txd->width) {
- case 1:
- dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
- break;
- case 2:
- dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
- break;
- case 4:
- dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
- break;
- }
-
- if (s3cchan->slave) {
- struct s3c24xx_dma_channel *cdata =
- &pdata->channels[s3cchan->id];
-
- if (s3cdma->sdata->has_reqsel) {
- writel_relaxed((cdata->chansel << 1) |
- S3C24XX_DMAREQSEL_HW,
- phy->base + S3C24XX_DMAREQSEL);
- } else {
- int csel = cdata->chansel >> (phy->id *
- S3C24XX_CHANSEL_WIDTH);
-
- csel &= S3C24XX_CHANSEL_REQ_MASK;
- dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
- dcon |= S3C24XX_DCON_HWTRIG;
- }
- } else {
- if (s3cdma->sdata->has_reqsel)
- writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
- }
-
- writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
- writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
- writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
- writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
- writel_relaxed(dcon, phy->base + S3C24XX_DCON);
-
- val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
- val &= ~S3C24XX_DMASKTRIG_STOP;
- val |= S3C24XX_DMASKTRIG_ON;
-
- /* trigger the dma operation for memcpy transfers */
- if (!s3cchan->slave)
- val |= S3C24XX_DMASKTRIG_SWTRIG;
-
- writel(val, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Set the initial DMA register values and start first sg.
- */
-static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
-
- list_del(&txd->vd.node);
-
- s3cchan->at = txd;
-
- /* Wait for channel inactive */
- while (s3c24xx_dma_phy_busy(phy))
- cpu_relax();
-
- /* point to the first element of the sg list */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
-}
-
-/*
- * Try to allocate a physical channel. When successful, assign it to
- * this virtual channel, and initiate the next descriptor. The
- * virtual channel lock must be held at this point.
- */
-static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy;
-
- phy = s3c24xx_dma_get_phy(s3cchan);
- if (!phy) {
- dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
- s3cchan->name);
- s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
- return;
- }
-
- dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
-
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
- struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
-
- dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- /*
- * We do this without taking the lock; we're really only concerned
- * about whether this pointer is NULL or not, and we're guaranteed
- * that this will only be called when it _already_ is non-NULL.
- */
- phy->serving = s3cchan;
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-/*
- * Free a physical DMA channel, potentially reallocating it to another
- * virtual channel if we have any pending.
- */
-static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_chan *p, *next;
-
-retry:
- next = NULL;
-
- /* Find a waiting virtual channel for the next transfer. */
- list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING) {
- next = p;
- break;
- }
-
- if (!next) {
- list_for_each_entry(p, &s3cdma->slave.channels,
- vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING &&
- s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
- next = p;
- break;
- }
- }
-
- /* Ensure that the physical channel is stopped */
- s3c24xx_dma_terminate_phy(s3cchan->phy);
-
- if (next) {
- bool success;
-
- /*
- * Eww. We know this isn't going to deadlock
- * but lockdep probably doesn't.
- */
- spin_lock(&next->vc.lock);
- /* Re-check the state now that we have the lock */
- success = next->state == S3C24XX_DMA_CHAN_WAITING;
- if (success)
- s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
- spin_unlock(&next->vc.lock);
-
- /* If the state changed, try to find another channel */
- if (!success)
- goto retry;
- } else {
- /* No more jobs, so free up the physical channel */
- s3c24xx_dma_put_phy(s3cchan->phy);
- }
-
- s3cchan->phy = NULL;
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-}
-
-static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
-{
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
-
- if (!s3cchan->slave)
- dma_descriptor_unmap(&vd->tx);
-
- s3c24xx_dma_free_txd(txd);
-}
-
-static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
-{
- struct s3c24xx_dma_phy *phy = data;
- struct s3c24xx_dma_chan *s3cchan = phy->serving;
- struct s3c24xx_txd *txd;
-
- dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
-
- /*
- * Interrupts happen to notify the completion of a transfer and the
- * channel should have moved into its stop state already on its own.
- * Therefore interrupts on channels not bound to a virtual channel
- * should never happen. Nevertheless send a terminate command to the
- * channel if the unlikely case happens.
- */
- if (unlikely(!s3cchan)) {
- dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
- phy->id);
-
- s3c24xx_dma_terminate_phy(phy);
-
- return IRQ_HANDLED;
- }
-
- spin_lock(&s3cchan->vc.lock);
- txd = s3cchan->at;
- if (txd) {
- /* when more sg's are in this txd, start the next one */
- if (!list_is_last(txd->at, &txd->dsg_list)) {
- txd->at = txd->at->next;
- if (txd->cyclic)
- vchan_cyclic_callback(&txd->vd);
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- } else if (!txd->cyclic) {
- s3cchan->at = NULL;
- vchan_cookie_complete(&txd->vd);
-
- /*
- * And start the next descriptor (if any),
- * otherwise free this channel.
- */
- if (vchan_next_desc(&s3cchan->vc))
- s3c24xx_dma_start_next_txd(s3cchan);
- else
- s3c24xx_dma_phy_free(s3cchan);
- } else {
- vchan_cyclic_callback(&txd->vd);
-
- /* Cyclic: reset at beginning */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- }
- }
- spin_unlock(&s3cchan->vc.lock);
-
- return IRQ_HANDLED;
-}
-
-/*
- * The DMA ENGINE API
- */
-
-static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- LIST_HEAD(head);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->phy && !s3cchan->at) {
- dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
- s3cchan->id);
- ret = -EINVAL;
- goto unlock;
- }
-
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-
- /* Mark physical channel as free */
- if (s3cchan->phy)
- s3c24xx_dma_phy_free(s3cchan);
-
- /* Dequeue current job */
- if (s3cchan->at) {
- vchan_terminate_vdesc(&s3cchan->at->vd);
- s3cchan->at = NULL;
- }
-
- /* Dequeue jobs not yet fired as well */
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
-
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-
- return 0;
-
-unlock:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- return ret;
-}
-
-static void s3c24xx_dma_synchronize(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
-
- vchan_synchronize(&s3cchan->vc);
-}
-
-static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
-{
- /* Ensure all queued descriptors are freed */
- vchan_free_chan_resources(to_virt_chan(chan));
-}
-
-static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct virt_dma_desc *vd;
- unsigned long flags;
- enum dma_status ret;
- size_t bytes = 0;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
-
- /*
- * There's no point calculating the residue if there's
- * no txstate to store the value.
- */
- if (ret == DMA_COMPLETE || !txstate) {
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
- }
-
- vd = vchan_find_desc(&s3cchan->vc, cookie);
- if (vd) {
- /* On the issued list, so hasn't been processed yet */
- txd = to_s3c24xx_txd(&vd->tx);
-
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- } else {
- /*
- * Currently running, so sum over the pending sg's and
- * the currently active one.
- */
- txd = s3cchan->at;
-
- dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- list_for_each_entry_from(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
-
- bytes += s3c24xx_dma_getbytes_chan(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- /*
- * This cookie not complete yet
- * Get number of bytes left in the active transactions and queue
- */
- dma_set_residue(txstate, bytes);
-
- /* Whether waiting or running, we're in progress */
- return ret;
-}
-
-/*
- * Initialize a descriptor to be used by memcpy submit
- */
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- int src_mod, dest_mod;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
- len, s3cchan->name);
-
- if ((len & S3C24XX_DCON_TC_MASK) != len) {
- dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->src_addr = src;
- dsg->dst_addr = dest;
- dsg->len = len;
-
- /*
- * Determine a suitable transfer width.
- * The DMA controller cannot fetch/store information which is not
- * naturally aligned on the bus, i.e., a 4 byte fetch must start at
- * an address divisible by 4 - more generally addr % width must be 0.
- */
- src_mod = src % 4;
- dest_mod = dest % 4;
- switch (len % 4) {
- case 0:
- txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
- break;
- case 2:
- txd->width = ((src_mod == 2 || src_mod == 0) &&
- (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
- break;
- default:
- txd->width = 1;
- break;
- }
-
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
- txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
- S3C24XX_DCON_SERV_WHOLE;
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- unsigned sg_len;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int i;
-
- dev_dbg(&s3cdma->pdev->dev,
- "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
- size, period, s3cchan->name);
-
- if (!is_slave_direction(direction)) {
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- txd->cyclic = 1;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- }
-
- sg_len = size / period;
-
- for (i = 0; i < sg_len; i++) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = period;
- /* Check last period length */
- if (i == sg_len - 1)
- dsg->len = size - period * i;
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = addr + period * i;
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = addr + period * i;
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct scatterlist *sg;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int tmp;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
- sg_dma_len(sgl), s3cchan->name);
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else if (direction == DMA_DEV_TO_MEM) {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- } else {
- s3c24xx_dma_free_txd(txd);
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-/*
- * Slave transactions callback to the slave device to allow
- * synchronization of slave DMA signals with the DMAC enable
- */
-static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- if (vchan_issue_pending(&s3cchan->vc)) {
- if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
- s3c24xx_dma_phy_alloc_and_start(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-}
-
-/*
- * Bringup and teardown
- */
-
-/*
- * Initialise the DMAC memcpy/slave channels.
- * Make a local wrapper to hold required data
- */
-static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
- struct dma_device *dmadev, unsigned int channels, bool slave)
-{
- struct s3c24xx_dma_chan *chan;
- int i;
-
- INIT_LIST_HEAD(&dmadev->channels);
-
- /*
- * Register as many memcpy as we have physical channels,
- * we won't always be able to use all but the code will have
- * to cope with that situation.
- */
- for (i = 0; i < channels; i++) {
- chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- chan->id = i;
- chan->host = s3cdma;
- chan->state = S3C24XX_DMA_CHAN_IDLE;
-
- if (slave) {
- chan->slave = true;
- chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
- if (!chan->name)
- return -ENOMEM;
- } else {
- chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
- if (!chan->name)
- return -ENOMEM;
- }
- dev_dbg(dmadev->dev,
- "initialize virtual channel \"%s\"\n",
- chan->name);
-
- chan->vc.desc_free = s3c24xx_dma_desc_free;
- vchan_init(&chan->vc, dmadev);
- }
- dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
- i, slave ? "slave" : "memcpy");
- return i;
-}
-
-static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
-{
- struct s3c24xx_dma_chan *chan = NULL;
- struct s3c24xx_dma_chan *next;
-
- list_for_each_entry_safe(chan,
- next, &dmadev->channels, vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
- tasklet_kill(&chan->vc.task);
- }
-}
-
-/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
-static struct soc_data soc_s3c2410 = {
- .stride = 0x40,
- .has_reqsel = false,
- .has_clocks = false,
-};
-
-/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2412 = {
- .stride = 0x40,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2443 = {
- .stride = 0x100,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
- {
- .name = "s3c2410-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2410,
- }, {
- .name = "s3c2412-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2412,
- }, {
- .name = "s3c2443-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2443,
- },
- { },
-};
-
-static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
-{
- return (struct soc_data *)
- platform_get_device_id(pdev)->driver_data;
-}
-
-static int s3c24xx_dma_probe(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma;
- struct soc_data *sdata;
- struct resource *res;
- int ret;
- int i;
-
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
- /* Basic sanity check */
- if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
- pdata->num_phy_channels, MAX_DMA_CHANNELS);
- return -EINVAL;
- }
-
- sdata = s3c24xx_dma_get_soc_data(pdev);
- if (!sdata)
- return -EINVAL;
-
- s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
- if (!s3cdma)
- return -ENOMEM;
-
- s3cdma->pdev = pdev;
- s3cdma->pdata = pdata;
- s3cdma->sdata = sdata;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(s3cdma->base))
- return PTR_ERR(s3cdma->base);
-
- s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
- pdata->num_phy_channels,
- sizeof(struct s3c24xx_dma_phy),
- GFP_KERNEL);
- if (!s3cdma->phy_chans)
- return -ENOMEM;
-
- /* acquire irqs and clocks for all physical channels */
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- char clk_name[6];
-
- phy->id = i;
- phy->base = s3cdma->base + (i * sdata->stride);
- phy->host = s3cdma;
-
- phy->irq = platform_get_irq(pdev, i);
- if (phy->irq < 0)
- continue;
-
- ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
- 0, pdev->name, phy);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
- i, ret);
- continue;
- }
-
- if (sdata->has_clocks) {
- sprintf(clk_name, "dma.%d", i);
- phy->clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(phy->clk) && sdata->has_clocks) {
- dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
- i, PTR_ERR(phy->clk));
- continue;
- }
-
- ret = clk_prepare(phy->clk);
- if (ret) {
- dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
- i, ret);
- continue;
- }
- }
-
- spin_lock_init(&phy->lock);
- phy->valid = true;
-
- dev_dbg(&pdev->dev, "physical channel %d is %s\n",
- i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
- }
-
- /* Initialize memcpy engine */
- dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
- s3cdma->memcpy.dev = &pdev->dev;
- s3cdma->memcpy.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
- s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
-
- /* Initialize slave engine for SoC internal dedicated peripherals */
- dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
- s3cdma->slave.dev = &pdev->dev;
- s3cdma->slave.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
- s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
- s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
- s3cdma->slave.filter.map = pdata->slave_map;
- s3cdma->slave.filter.mapcnt = pdata->slavecnt;
- s3cdma->slave.filter.fn = s3c24xx_dma_filter;
-
- /* Register as many memcpy channels as there are physical channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
- pdata->num_phy_channels, false);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate memcpy channels - %d\n",
- __func__, ret);
- goto err_memcpy;
- }
-
- /* Register slave channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
- pdata->num_channels, true);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate slave channels - %d\n",
- __func__, ret);
- goto err_slave;
- }
-
- ret = dma_async_device_register(&s3cdma->memcpy);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register memcpy as an async device - %d\n",
- __func__, ret);
- goto err_memcpy_reg;
- }
-
- ret = dma_async_device_register(&s3cdma->slave);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register slave as an async device - %d\n",
- __func__, ret);
- goto err_slave_reg;
- }
-
- platform_set_drvdata(pdev, s3cdma);
- dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
- pdata->num_phy_channels);
-
- return 0;
-
-err_slave_reg:
- dma_async_device_unregister(&s3cdma->memcpy);
-err_memcpy_reg:
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
-err_slave:
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-err_memcpy:
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return ret;
-}
-
-static void s3c24xx_dma_free_irq(struct platform_device *pdev,
- struct s3c24xx_dma_engine *s3cdma)
-{
- int i;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
-
- devm_free_irq(&pdev->dev, phy->irq, phy);
- }
-}
-
-static int s3c24xx_dma_remove(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
- struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
- int i;
-
- dma_async_device_unregister(&s3cdma->slave);
- dma_async_device_unregister(&s3cdma->memcpy);
-
- s3c24xx_dma_free_irq(pdev, s3cdma);
-
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return 0;
-}
-
-static struct platform_driver s3c24xx_dma_driver = {
- .driver = {
- .name = "s3c24xx-dma",
- },
- .id_table = s3c24xx_dma_driver_ids,
- .probe = s3c24xx_dma_probe,
- .remove = s3c24xx_dma_remove,
-};
-
-module_platform_driver(s3c24xx_dma_driver);
-
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
-{
- struct s3c24xx_dma_chan *s3cchan;
-
- if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
- return false;
-
- s3cchan = to_s3c24xx_dma_chan(chan);
-
- return s3cchan->id == (uintptr_t)param;
-}
-EXPORT_SYMBOL(s3c24xx_dma_filter);
-
-MODULE_DESCRIPTION("S3C24XX DMA Driver");
-MODULE_AUTHOR("Heiko Stuebner");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 6b524eb6bcf3..d1c6956af452 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
- desc->in_use = true;
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
struct sf_pdma_desc *desc;
desc = to_sf_pdma_desc(vdesc);
- desc->in_use = false;
+ kfree(desc);
}
static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
@@ -494,7 +493,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
- struct resource *res;
int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -519,8 +517,7 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index dcb3687bd5da..5c398a83b491 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -78,7 +78,6 @@ struct sf_pdma_desc {
u64 src_addr;
struct virt_dma_desc vdesc;
struct sf_pdma_chan *chan;
- bool in_use;
enum dma_transfer_direction dirn;
struct dma_async_tx_descriptor *async_tx;
};
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 476847a4916b..9479f29692d3 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -20,6 +20,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -66,8 +67,6 @@ struct rz_dmac_chan {
struct rz_dmac_desc *desc;
int descs_allocated;
- enum dma_slave_buswidth src_word_size;
- enum dma_slave_buswidth dst_word_size;
dma_addr_t src_per_address;
dma_addr_t dst_per_address;
@@ -92,6 +91,7 @@ struct rz_dmac_chan {
struct rz_dmac {
struct dma_device engine;
struct device *dev;
+ struct reset_control *rstc;
void __iomem *base;
void __iomem *ext_base;
@@ -601,9 +601,7 @@ static int rz_dmac_config(struct dma_chan *chan,
u32 val;
channel->src_per_address = config->src_addr;
- channel->src_word_size = config->src_addr_width;
channel->dst_per_address = config->dst_addr;
- channel->dst_word_size = config->dst_addr_width;
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
if (val == CHCFG_DS_INVALID)
@@ -889,6 +887,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
/* Initialize the channels. */
INIT_LIST_HEAD(&dmac->engine.channels);
+ dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(dmac->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
+ "failed to get resets\n");
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
@@ -896,6 +899,10 @@ static int rz_dmac_probe(struct platform_device *pdev)
goto err_pm_disable;
}
+ ret = reset_control_deassert(dmac->rstc);
+ if (ret)
+ goto err_pm_runtime_put;
+
for (i = 0; i < dmac->n_channels; i++) {
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
if (ret < 0)
@@ -940,6 +947,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
dma_register_err:
of_dma_controller_free(pdev->dev.of_node);
err:
+ reset_control_assert(dmac->rstc);
channel_num = i ? i - 1 : 0;
for (i = 0; i < channel_num; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -950,6 +958,7 @@ err:
channel->lmdesc.base_dma);
}
+err_pm_runtime_put:
pm_runtime_put(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
@@ -972,6 +981,7 @@ static int rz_dmac_remove(struct platform_device *pdev)
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&dmac->engine);
+ reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 158e5e7defae..588c5f409a80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -1047,6 +1047,5 @@ static void __exit shdma_exit(void)
}
module_exit(shdma_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SH-DMA driver base library");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 5edaeb89d1e6..b14cf350b669 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- struct resource *mem;
unsigned int i;
int ret;
@@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index ee3cbbf51006..e415bd9f4f2b 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct stm32_dmamux_data *stm32_dmamux;
- struct resource *res;
void __iomem *iomem;
struct reset_control *rst;
int i, count, ret;
@@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
pm_runtime_get_noresume(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
@@ -400,4 +398,3 @@ arch_initcall(stm32_dmamux_init);
MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index b9d4c843635f..1d0e9dd72ab3 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1580,7 +1580,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct stm32_mdma_device *dmadev;
struct dma_device *dd;
struct device_node *of_node;
- struct resource *res;
struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@@ -1622,8 +1621,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
count);
dmadev->nr_ahb_addr_masks = count;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
@@ -1816,4 +1814,3 @@ subsys_initcall(stm32_mdma_init);
MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index f291b1b4db32..e86c8829513a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1144,15 +1144,13 @@ handle_pending:
static int sun4i_dma_probe(struct platform_device *pdev)
{
struct sun4i_dma_dev *priv;
- struct resource *res;
int i, j, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index b7557f437936..ebfd29888b2f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -1283,7 +1284,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
- struct resource *res;
int ret, i;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
@@ -1294,8 +1294,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc->cfg)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdc->base = devm_ioremap_resource(&pdev->dev, res);
+ sdc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdc->base))
return PTR_ERR(sdc->base);
@@ -1334,6 +1333,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&sdc->pending);
spin_lock_init(&sdc->lock);
+ dma_set_max_seg_size(&pdev->dev, SZ_32M - 1);
+
dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 1d1180db6d4e..8f67f453a492 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -711,6 +711,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
return err;
}
+ vchan_terminate_vdesc(&tdc->dma_desc->vd);
tegra_dma_disable(tdc);
tdc->dma_desc = NULL;
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index eaafcbe4ca94..cc6b91f48979 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -233,11 +233,6 @@ static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
writel(val, tdma->base_addr + reg);
}
-static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
-{
- return readl(tdma->base_addr + reg);
-}
-
static inline void tdc_write(struct tegra_dma_channel *tdc,
u32 reg, u32 val)
{
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index ae39b52012b2..b97004036071 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+ tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
@@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
- struct resource *res;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index b53d05b11ca5..acc950bf609c 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -10,6 +10,8 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j7200.o \
k3-psil-am64.o \
k3-psil-j721s2.o \
- k3-psil-am62.o
+ k3-psil-am62.o \
+ k3-psil-am62a.o \
+ k3-psil-j784s4.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 695915dba707..c3555cfb0681 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
- struct resource *mem;
int index;
int irq;
int ret;
@@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (index < 0)
return index;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
- cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
+ cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(cdd->ctrl_mem))
return PTR_ERR(cdd->ctrl_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
- cdd->sched_mem = devm_ioremap_resource(dev, mem);
+ cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1);
if (IS_ERR(cdd->sched_mem))
return PTR_ERR(cdd->sched_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
- cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
+ cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2);
if (IS_ERR(cdd->qmgr_mem))
return PTR_ERR(cdd->qmgr_mem);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index fa06d7e6d8e3..9ea91c640c32 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -318,14 +318,6 @@ static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
edma_write(ecc, offset, val);
}
-static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
-{
- unsigned val = edma_read(ecc, offset);
-
- val &= and;
- edma_write(ecc, offset, val);
-}
-
static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
{
unsigned val = edma_read(ecc, offset);
diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
new file mode 100644
index 000000000000..ca9d71f91422
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62a.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62a_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62a_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ PSIL_PDMA_XY_PKT(0xc30c),
+ PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62a_ep_map = {
+ .name = "am62a",
+ .src = am62a_src_ep_map,
+ .src_count = ARRAY_SIZE(am62a_src_ep_map),
+ .dst = am62a_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62a_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j784s4.c b/drivers/dma/ti/k3-psil-j784s4.c
new file mode 100644
index 000000000000..12bfa2478f92
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j784s4.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j784s4_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4620),
+ PSIL_PDMA_XY_PKT(0x4621),
+ PSIL_PDMA_XY_PKT(0x4622),
+ PSIL_PDMA_XY_PKT(0x4623),
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x462a),
+ PSIL_PDMA_XY_PKT(0x462b),
+ PSIL_PDMA_XY_PKT(0x462c),
+ PSIL_PDMA_XY_PKT(0x462d),
+ PSIL_PDMA_XY_PKT(0x462e),
+ PSIL_PDMA_XY_PKT(0x462f),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0x4640),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4900),
+ PSIL_CSI2RX(0x4901),
+ PSIL_CSI2RX(0x4902),
+ PSIL_CSI2RX(0x4903),
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
+ PSIL_CSI2RX(0x4980),
+ PSIL_CSI2RX(0x4981),
+ PSIL_CSI2RX(0x4982),
+ PSIL_CSI2RX(0x4983),
+ PSIL_CSI2RX(0x4984),
+ PSIL_CSI2RX(0x4985),
+ PSIL_CSI2RX(0x4986),
+ PSIL_CSI2RX(0x4987),
+ PSIL_CSI2RX(0x4988),
+ PSIL_CSI2RX(0x4989),
+ PSIL_CSI2RX(0x498a),
+ PSIL_CSI2RX(0x498b),
+ PSIL_CSI2RX(0x498c),
+ PSIL_CSI2RX(0x498d),
+ PSIL_CSI2RX(0x498e),
+ PSIL_CSI2RX(0x498f),
+ PSIL_CSI2RX(0x4990),
+ PSIL_CSI2RX(0x4991),
+ PSIL_CSI2RX(0x4992),
+ PSIL_CSI2RX(0x4993),
+ PSIL_CSI2RX(0x4994),
+ PSIL_CSI2RX(0x4995),
+ PSIL_CSI2RX(0x4996),
+ PSIL_CSI2RX(0x4997),
+ PSIL_CSI2RX(0x4998),
+ PSIL_CSI2RX(0x4999),
+ PSIL_CSI2RX(0x499a),
+ PSIL_CSI2RX(0x499b),
+ PSIL_CSI2RX(0x499c),
+ PSIL_CSI2RX(0x499d),
+ PSIL_CSI2RX(0x499e),
+ PSIL_CSI2RX(0x499f),
+ /* MAIN_CPSW9G */
+ PSIL_ETHERNET(0x4a00),
+ /* MAIN-SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
+ /* MCU_CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* MCU_SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j784s4_dst_ep_map[] = {
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0xc640),
+ PSIL_ETHERNET(0xc641),
+ PSIL_ETHERNET(0xc642),
+ PSIL_ETHERNET(0xc643),
+ PSIL_ETHERNET(0xc644),
+ PSIL_ETHERNET(0xc645),
+ PSIL_ETHERNET(0xc646),
+ PSIL_ETHERNET(0xc647),
+ /* MAIN_CPSW9G */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* MAIN-SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ PSIL_PDMA_XY_PKT(0xc608),
+ PSIL_PDMA_XY_PKT(0xc609),
+ PSIL_PDMA_XY_PKT(0xc60a),
+ PSIL_PDMA_XY_PKT(0xc60b),
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0xc620),
+ PSIL_PDMA_XY_PKT(0xc621),
+ PSIL_PDMA_XY_PKT(0xc622),
+ PSIL_PDMA_XY_PKT(0xc623),
+ PSIL_PDMA_XY_PKT(0xc624),
+ PSIL_PDMA_XY_PKT(0xc625),
+ PSIL_PDMA_XY_PKT(0xc626),
+ PSIL_PDMA_XY_PKT(0xc627),
+ PSIL_PDMA_XY_PKT(0xc628),
+ PSIL_PDMA_XY_PKT(0xc629),
+ PSIL_PDMA_XY_PKT(0xc62a),
+ PSIL_PDMA_XY_PKT(0xc62b),
+ PSIL_PDMA_XY_PKT(0xc62c),
+ PSIL_PDMA_XY_PKT(0xc62d),
+ PSIL_PDMA_XY_PKT(0xc62e),
+ PSIL_PDMA_XY_PKT(0xc62f),
+ /* MCU_CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j784s4_ep_map = {
+ .name = "j784s4",
+ .src = j784s4_src_ep_map,
+ .src_count = ARRAY_SIZE(j784s4_src_ep_map),
+ .dst = j784s4_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j784s4_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index 74fa9ec02968..c383723d1c8f 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -43,5 +43,7 @@ extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
+extern struct psil_ep_map am62a_ep_map;
+extern struct psil_ep_map j784s4_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 8b6533a1eeeb..c11389d67a3f 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -24,6 +24,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_ep_map },
{ .family = "J721S2", .data = &j721s2_ep_map },
{ .family = "AM62X", .data = &am62_ep_map },
+ { .family = "AM62AX", .data = &am62a_ep_map },
+ { .family = "J784S4", .data = &j784s4_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index ce8b80bb34d7..fc3a2a05ab7b 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -135,6 +135,7 @@ struct udma_match_data {
u32 flags;
u32 statictr_z_mask;
u8 burst_size[3];
+ struct udma_soc_data *soc_data;
};
struct udma_soc_data {
@@ -304,6 +305,8 @@ struct udma_chan {
/* Channel configuration parameters */
struct udma_chan_config config;
+ /* Channel configuration parameters (backup) */
+ struct udma_chan_config backup_config;
/* dmapool for packet mode descriptors */
bool use_dma_pool;
@@ -762,11 +765,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
if (uc->desc->dir == DMA_DEV_TO_MEM) {
udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
} else {
udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- if (!uc->bchan)
+ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
}
@@ -2962,6 +2966,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
struct scatterlist *sgent;
struct cppi5_tr_type15_t *tr_req = NULL;
enum dma_slave_buswidth dev_width;
+ u32 csf = CPPI5_TR_CSF_SUPR_EVT;
u16 tr_cnt0, tr_cnt1;
dma_addr_t dev_addr;
struct udma_desc *d;
@@ -3032,6 +3037,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
asel = 0;
+ csf |= CPPI5_TR_CSF_EOL_ICNT0;
} else {
asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
dev_addr |= asel;
@@ -3055,7 +3061,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
uc->config.tr_trigger_type,
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
@@ -3101,8 +3107,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
uc->config.tr_trigger_type,
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
@@ -3146,8 +3151,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->residue += sg_len;
}
- cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
- CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
return d;
}
@@ -3676,6 +3680,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int num_tr;
size_t tr_size = sizeof(struct cppi5_tr_type15_t);
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+ u32 csf = CPPI5_TR_CSF_SUPR_EVT;
if (uc->config.dir != DMA_MEM_TO_MEM) {
dev_err(chan->device->dev,
@@ -3706,13 +3711,15 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ } else {
+ csf |= CPPI5_TR_CSF_EOL_ICNT0;
}
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[0].flags, csf);
tr_req[0].addr = src;
tr_req[0].icnt0 = tr0_cnt0;
@@ -3731,7 +3738,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (num_tr == 2) {
cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[1].flags, csf);
tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
tr_req[1].icnt0 = tr1_cnt0;
@@ -3746,8 +3753,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
tr_req[1].dicnt3 = 1;
}
- cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
- CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
if (uc->config.metadata_size)
d->vd.tx.metadata_ops = &metadata_ops;
@@ -4295,6 +4301,25 @@ static struct udma_match_data j721e_mcu_data = {
},
};
+static struct udma_soc_data am62a_dmss_csi_soc_data = {
+ .oes = {
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
+static struct udma_match_data am62a_bcdma_csirx_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &am62a_dmss_csi_soc_data,
+};
+
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
@@ -4344,6 +4369,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am64-dmss-pktdma",
.data = &am64_pktdma_data,
},
+ {
+ .compatible = "ti,am62a-dmss-bcdma-csirx",
+ .data = &am62a_bcdma_csirx_data,
+ },
{ /* Sentinel */ },
};
@@ -4386,6 +4415,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_soc_data },
{ .family = "J721S2", .data = &j721e_soc_data},
{ .family = "AM62X", .data = &am64_soc_data },
+ { .family = "AM62AX", .data = &am64_soc_data },
+ { .family = "J784S4", .data = &j721e_soc_data },
{ /* sentinel */ }
};
@@ -4774,7 +4805,10 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].num = rm_res->desc[i].num;
}
}
+ } else {
+ i = 0;
}
+
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
@@ -5270,12 +5304,15 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
+ ud->soc_data = ud->match_data->soc_data;
+ if (!ud->soc_data) {
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
}
- ud->soc_data = soc->data;
ret = udma_get_mmrs(pdev, ud);
if (ret)
@@ -5344,7 +5381,6 @@ static int udma_probe(struct platform_device *pdev)
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi.domain) {
- dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
@@ -5491,11 +5527,63 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
+static int udma_pm_suspend(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_device *dma_dev = &ud->ddev;
+ struct dma_chan *chan;
+ struct udma_chan *uc;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ uc = to_udma_chan(chan);
+ /* backup the channel configuration */
+ memcpy(&uc->backup_config, &uc->config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Suspending channel %s\n",
+ dma_chan_name(chan));
+ ud->ddev.device_free_chan_resources(chan);
+ }
+ }
+
+ return 0;
+}
+
+static int udma_pm_resume(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_device *dma_dev = &ud->ddev;
+ struct dma_chan *chan;
+ struct udma_chan *uc;
+ int ret;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ uc = to_udma_chan(chan);
+ /* restore the channel configuration */
+ memcpy(&uc->config, &uc->backup_config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Resuming channel %s\n",
+ dma_chan_name(chan));
+ ret = ud->ddev.device_alloc_chan_resources(chan);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops udma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
+};
+
static struct platform_driver udma_driver = {
.driver = {
.name = "ti-udma",
.of_match_table = udma_of_match,
.suppress_bind_attrs = true,
+ .pm = &udma_pm_ops,
},
.probe = udma_probe,
};
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 27f5019bdc1e..02e1c08c596d 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev)
{
const struct omap_dma_config *conf;
struct omap_dmadev *od;
- struct resource *res;
int rc, i, irq;
u32 val;
@@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev)
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 767bb45f641f..ebaa93644c94 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_XDMA) += xdma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
new file mode 100644
index 000000000000..dd98b4526b90
--- /dev/null
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DMA_XDMA_REGS_H
+#define __DMA_XDMA_REGS_H
+
+/* The length of register space exposed to host */
+#define XDMA_REG_SPACE_LEN 65536
+
+/*
+ * maximum number of DMA channels for each direction:
+ * Host to Card (H2C) or Card to Host (C2H)
+ */
+#define XDMA_MAX_CHANNELS 4
+
+/*
+ * macros to define the number of descriptor blocks can be used in one
+ * DMA transfer request.
+ * the DMA engine uses a linked list of descriptor blocks that specify the
+ * source, destination, and length of the DMA transfers.
+ */
+#define XDMA_DESC_BLOCK_NUM BIT(7)
+#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1)
+
+/* descriptor definitions */
+#define XDMA_DESC_ADJACENT 32
+#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1)
+#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8)
+#define XDMA_DESC_MAGIC 0xad4bUL
+#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16)
+#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0)
+#define XDMA_DESC_STOPPED BIT(0)
+#define XDMA_DESC_COMPLETED BIT(1)
+#define XDMA_DESC_BLEN_BITS 28
+#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
+
+/* macros to construct the descriptor control word */
+#define XDMA_DESC_CONTROL(adjacent, flag) \
+ (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \
+ FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \
+ FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
+#define XDMA_DESC_CONTROL_LAST \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+
+/*
+ * Descriptor for a single contiguous memory block transfer.
+ *
+ * Multiple descriptors are linked by means of the next pointer. An additional
+ * extra adjacent number gives the amount of extra contiguous descriptors.
+ *
+ * The descriptors are in root complex memory, and the bytes in the 32-bit
+ * words must be in little-endian byte ordering.
+ */
+struct xdma_hw_desc {
+ __le32 control;
+ __le32 bytes;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le64 next_desc;
+};
+
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 4096
+
+/*
+ * Channel registers
+ */
+#define XDMA_CHAN_IDENTIFIER 0x0
+#define XDMA_CHAN_CONTROL 0x4
+#define XDMA_CHAN_CONTROL_W1S 0x8
+#define XDMA_CHAN_CONTROL_W1C 0xc
+#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_COMPLETED_DESC 0x48
+#define XDMA_CHAN_ALIGNMENTS 0x4c
+#define XDMA_CHAN_INTR_ENABLE 0x90
+#define XDMA_CHAN_INTR_ENABLE_W1S 0x94
+#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c
+
+#define XDMA_CHAN_STRIDE 0x100
+#define XDMA_CHAN_H2C_OFFSET 0x0
+#define XDMA_CHAN_C2H_OFFSET 0x1000
+#define XDMA_CHAN_H2C_TARGET 0x0
+#define XDMA_CHAN_C2H_TARGET 0x1
+
+/* macro to check if channel is available */
+#define XDMA_CHAN_MAGIC 0x1fc0
+#define XDMA_CHAN_CHECK_TARGET(id, target) \
+ (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target))
+
+/* bits of the channel control register */
+#define CHAN_CTRL_RUN_STOP BIT(0)
+#define CHAN_CTRL_IE_DESC_STOPPED BIT(1)
+#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2)
+#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
+#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
+#define CHAN_CTRL_POLL_MODE_WB BIT(26)
+
+#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \
+ CHAN_CTRL_IE_DESC_STOPPED | \
+ CHAN_CTRL_IE_DESC_COMPLETED | \
+ CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+/* bits of the channel interrupt enable mask */
+#define CHAN_IM_DESC_ERROR BIT(19)
+#define CHAN_IM_READ_ERROR BIT(9)
+#define CHAN_IM_IDLE_STOPPED BIT(6)
+#define CHAN_IM_MAGIC_STOPPED BIT(4)
+#define CHAN_IM_DESC_COMPLETED BIT(2)
+#define CHAN_IM_DESC_STOPPED BIT(1)
+
+#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \
+ CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \
+ CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED)
+
+/*
+ * Channel SGDMA registers
+ */
+#define XDMA_SGDMA_IDENTIFIER 0x4000
+#define XDMA_SGDMA_DESC_LO 0x4080
+#define XDMA_SGDMA_DESC_HI 0x4084
+#define XDMA_SGDMA_DESC_ADJ 0x4088
+#define XDMA_SGDMA_DESC_CREDIT 0x408c
+
+/* bits of the SG DMA control register */
+#define XDMA_CTRL_RUN_STOP BIT(0)
+#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
+#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
+#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
+#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
+#define XDMA_CTRL_POLL_MODE_WB BIT(26)
+
+/*
+ * interrupt registers
+ */
+#define XDMA_IRQ_IDENTIFIER 0x2000
+#define XDMA_IRQ_USER_INT_EN 0x2004
+#define XDMA_IRQ_USER_INT_EN_W1S 0x2008
+#define XDMA_IRQ_USER_INT_EN_W1C 0x200c
+#define XDMA_IRQ_CHAN_INT_EN 0x2010
+#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014
+#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018
+#define XDMA_IRQ_USER_INT_REQ 0x2040
+#define XDMA_IRQ_CHAN_INT_REQ 0x2044
+#define XDMA_IRQ_USER_INT_PEND 0x2048
+#define XDMA_IRQ_CHAN_INT_PEND 0x204c
+#define XDMA_IRQ_USER_VEC_NUM 0x2080
+#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0
+
+#define XDMA_IRQ_VEC_SHIFT 8
+
+#endif /* __DMA_XDMA_REGS_H */
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
new file mode 100644
index 000000000000..93ee298d52b8
--- /dev/null
+++ b/drivers/dma/xilinx/xdma.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx DMA/Bridge Subsystem
+ *
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
+ * between Host memory and the DMA subsystem. It does this by operating on
+ * 'descriptors' that contain information about the source, destination and
+ * amount of data to transfer. These direct memory transfers can be both in
+ * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
+ * configured to have a single AXI4 Master interface shared by all channels
+ * or one AXI4-Stream interface for each channel enabled. Memory transfers are
+ * specified on a per-channel basis in descriptor linked lists, which the DMA
+ * fetches from host memory and processes. Events such as descriptor completion
+ * and errors are signaled using interrupts. The core also provides up to 16
+ * user interrupt wires that generate interrupts to the host.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/regmap.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/amd_xdma.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "../virt-dma.h"
+#include "xdma-regs.h"
+
+/* mmio regmap config for all XDMA registers */
+static const struct regmap_config xdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = XDMA_REG_SPACE_LEN,
+};
+
+/**
+ * struct xdma_desc_block - Descriptor block
+ * @virt_addr: Virtual address of block start
+ * @dma_addr: DMA address of block start
+ */
+struct xdma_desc_block {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_chan - Driver specific DMA channel structure
+ * @vchan: Virtual channel
+ * @xdev_hdl: Pointer to DMA device structure
+ * @base: Offset of channel registers
+ * @desc_pool: Descriptor pool
+ * @busy: Busy flag of the channel
+ * @dir: Transferring direction of the channel
+ * @cfg: Transferring config of the channel
+ * @irq: IRQ assigned to the channel
+ */
+struct xdma_chan {
+ struct virt_dma_chan vchan;
+ void *xdev_hdl;
+ u32 base;
+ struct dma_pool *desc_pool;
+ bool busy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ u32 irq;
+};
+
+/**
+ * struct xdma_desc - DMA desc structure
+ * @vdesc: Virtual DMA descriptor
+ * @chan: DMA channel pointer
+ * @dir: Transferring direction of the request
+ * @dev_addr: Physical address on DMA device side
+ * @desc_blocks: Hardware descriptor blocks
+ * @dblk_num: Number of hardware descriptor blocks
+ * @desc_num: Number of hardware descriptors
+ * @completed_desc_num: Completed hardware descriptors
+ */
+struct xdma_desc {
+ struct virt_dma_desc vdesc;
+ struct xdma_chan *chan;
+ enum dma_transfer_direction dir;
+ u64 dev_addr;
+ struct xdma_desc_block *desc_blocks;
+ u32 dblk_num;
+ u32 desc_num;
+ u32 completed_desc_num;
+};
+
+#define XDMA_DEV_STATUS_REG_DMA BIT(0)
+#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
+
+/**
+ * struct xdma_device - DMA device structure
+ * @pdev: Platform device pointer
+ * @dma_dev: DMA device structure
+ * @rmap: MMIO regmap for DMA registers
+ * @h2c_chans: Host to Card channels
+ * @c2h_chans: Card to Host channels
+ * @h2c_chan_num: Number of H2C channels
+ * @c2h_chan_num: Number of C2H channels
+ * @irq_start: Start IRQ assigned to device
+ * @irq_num: Number of IRQ assigned to device
+ * @status: Initialization status
+ */
+struct xdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *rmap;
+ struct xdma_chan *h2c_chans;
+ struct xdma_chan *c2h_chans;
+ u32 h2c_chan_num;
+ u32 c2h_chan_num;
+ u32 irq_start;
+ u32 irq_num;
+ u32 status;
+};
+
+#define xdma_err(xdev, fmt, args...) \
+ dev_err(&(xdev)->pdev->dev, fmt, ##args)
+#define XDMA_CHAN_NUM(_xd) ({ \
+ typeof(_xd) (xd) = (_xd); \
+ ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
+
+/* Get the last desc in a desc block */
+static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
+{
+ return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
+}
+
+/**
+ * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ u32 last_blk_desc, desc_control;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
+ for (i = 1; i < sw_desc->dblk_num; i++) {
+ block = &sw_desc->desc_blocks[i - 1];
+ desc = xdma_blk_last_desc(block);
+
+ if (!(i & XDMA_DESC_BLOCK_MASK)) {
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+ continue;
+ }
+ desc->control = cpu_to_le32(desc_control);
+ desc->next_desc = cpu_to_le64(block[1].dma_addr);
+ }
+
+ /* update the last block */
+ last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
+ desc = xdma_blk_last_desc(block);
+ desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
+ desc->control = cpu_to_le32(desc_control);
+ }
+
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
+ desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+}
+
+static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xdma_chan, vchan.chan);
+}
+
+static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct xdma_desc, vdesc);
+}
+
+/**
+ * xdma_channel_init - Initialize DMA channel registers
+ * @chan: DMA channel pointer
+ */
+static int xdma_channel_init(struct xdma_chan *chan)
+{
+ struct xdma_device *xdev = chan->xdev_hdl;
+ int ret;
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_NON_INCR_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
+ CHAN_IM_ALL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_free_desc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void xdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct xdma_desc *sw_desc;
+ int i;
+
+ sw_desc = to_xdma_desc(vdesc);
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ if (!sw_desc->desc_blocks[i].virt_addr)
+ break;
+ dma_pool_free(sw_desc->chan->desc_pool,
+ sw_desc->desc_blocks[i].virt_addr,
+ sw_desc->desc_blocks[i].dma_addr);
+ }
+ kfree(sw_desc->desc_blocks);
+ kfree(sw_desc);
+}
+
+/**
+ * xdma_alloc_desc - Allocate descriptor
+ * @chan: DMA channel pointer
+ * @desc_num: Number of hardware descriptors
+ */
+static struct xdma_desc *
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
+{
+ struct xdma_desc *sw_desc;
+ struct xdma_hw_desc *desc;
+ dma_addr_t dma_addr;
+ u32 dblk_num;
+ void *addr;
+ int i, j;
+
+ sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->chan = chan;
+ sw_desc->desc_num = desc_num;
+ dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
+ sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
+ GFP_NOWAIT);
+ if (!sw_desc->desc_blocks)
+ goto failed;
+
+ sw_desc->dblk_num = dblk_num;
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
+ if (!addr)
+ goto failed;
+
+ sw_desc->desc_blocks[i].virt_addr = addr;
+ sw_desc->desc_blocks[i].dma_addr = dma_addr;
+ for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
+ desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
+ }
+
+ xdma_link_desc_blocks(sw_desc);
+
+ return sw_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_xfer_start - Start DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_start(struct xdma_chan *xchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct xdma_desc_block *block;
+ u32 val, completed_blocks;
+ struct xdma_desc *desc;
+ int ret;
+
+ /*
+ * check if there is not any submitted descriptor or channel is busy.
+ * vchan lock should be held where this function is called.
+ */
+ if (!vd || xchan->busy)
+ return -EINVAL;
+
+ /* clear run stop bit to get ready for transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ desc = to_xdma_desc(vd);
+ if (desc->dir != xchan->dir) {
+ xdma_err(xdev, "incorrect request direction");
+ return -EINVAL;
+ }
+
+ /* set DMA engine to the first descriptor block */
+ completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
+ block = &desc->desc_blocks[completed_blocks];
+ val = lower_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
+ if (ret)
+ return ret;
+
+ val = upper_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
+ if (ret)
+ return ret;
+
+ if (completed_blocks + 1 == desc->dblk_num)
+ val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ else
+ val = XDMA_DESC_ADJACENT - 1;
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
+ if (ret)
+ return ret;
+
+ /* kick off DMA transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
+ CHAN_CTRL_START);
+ if (ret)
+ return ret;
+
+ xchan->busy = true;
+ return 0;
+}
+
+/**
+ * xdma_alloc_channels - Detect and allocate DMA channels
+ * @xdev: DMA device pointer
+ * @dir: Channel direction
+ */
+static int xdma_alloc_channels(struct xdma_device *xdev,
+ enum dma_transfer_direction dir)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
+ struct xdma_chan **chans, *xchan;
+ u32 base, identifier, target;
+ u32 *chan_num;
+ int i, j, ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ base = XDMA_CHAN_H2C_OFFSET;
+ target = XDMA_CHAN_H2C_TARGET;
+ chans = &xdev->h2c_chans;
+ chan_num = &xdev->h2c_chan_num;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ base = XDMA_CHAN_C2H_OFFSET;
+ target = XDMA_CHAN_C2H_TARGET;
+ chans = &xdev->c2h_chans;
+ chan_num = &xdev->c2h_chan_num;
+ } else {
+ xdma_err(xdev, "invalid direction specified");
+ return -EINVAL;
+ }
+
+ /* detect number of available DMA channels */
+ for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ /* check if it is available DMA channel */
+ if (XDMA_CHAN_CHECK_TARGET(identifier, target))
+ (*chan_num)++;
+ }
+
+ if (!*chan_num) {
+ xdma_err(xdev, "does not probe any channel");
+ return -EINVAL;
+ }
+
+ *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
+ GFP_KERNEL);
+ if (!*chans)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
+ continue;
+
+ if (j == *chan_num) {
+ xdma_err(xdev, "invalid channel number");
+ return -EIO;
+ }
+
+ /* init channel structure and hardware */
+ xchan = &(*chans)[j];
+ xchan->xdev_hdl = xdev;
+ xchan->base = base + i * XDMA_CHAN_STRIDE;
+ xchan->dir = dir;
+
+ ret = xdma_channel_init(xchan);
+ if (ret)
+ return ret;
+ xchan->vchan.desc_free = xdma_free_desc;
+ vchan_init(&xchan->vchan, &xdev->dma_dev);
+
+ j++;
+ }
+
+ dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
+ (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
+
+ return 0;
+}
+
+/**
+ * xdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_issue_pending(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+ if (vchan_issue_pending(&xdma_chan->vchan))
+ xdma_xfer_start(xdma_chan);
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+}
+
+/**
+ * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
+ * @chan: DMA channel pointer
+ * @sgl: Transfer scatter gather list
+ * @sg_len: Length of scatter gather list
+ * @dir: Transfer direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct dma_async_tx_descriptor *tx_desc;
+ u32 desc_num = 0, i, len, rest;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+ struct xdma_desc *sw_desc;
+ u64 dev_addr, *src, *dst;
+ struct scatterlist *sg;
+ u64 addr;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dblk = sw_desc->desc_blocks;
+ desc = dblk->virt_addr;
+ desc_num = 1;
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ rest = sg_dma_len(sg);
+
+ do {
+ len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+
+ if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
+ dblk++;
+ desc = dblk->virt_addr;
+ } else {
+ desc++;
+ }
+
+ desc_num++;
+ dev_addr += len;
+ addr += len;
+ rest -= len;
+ } while (rest);
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_device_config - Configure the DMA channel
+ * @chan: DMA channel
+ * @cfg: channel configuration
+ */
+static int xdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+/**
+ * xdma_free_chan_resources - Free channel resources
+ * @chan: DMA channel
+ */
+static void xdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_free_chan_resources(&xdma_chan->vchan);
+ dma_pool_destroy(xdma_chan->desc_pool);
+ xdma_chan->desc_pool = NULL;
+}
+
+/**
+ * xdma_alloc_chan_resources - Allocate channel resources
+ * @chan: DMA channel
+ */
+static int xdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ struct device *dev = xdev->dma_dev.dev;
+
+ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+ if (!dev) {
+ xdma_err(xdev, "unable to find pci device");
+ return -EINVAL;
+ }
+
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
+ dev, XDMA_DESC_BLOCK_SIZE,
+ XDMA_DESC_BLOCK_ALIGN, 0);
+ if (!xdma_chan->desc_pool) {
+ xdma_err(xdev, "unable to allocate descriptor pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_channel_isr - XDMA channel interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to the DMA channel structure
+ */
+static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+{
+ struct xdma_chan *xchan = dev_id;
+ u32 complete_desc_num = 0;
+ struct xdma_device *xdev;
+ struct virt_dma_desc *vd;
+ struct xdma_desc *desc;
+ int ret;
+
+ spin_lock(&xchan->vchan.lock);
+
+ /* get submitted request */
+ vd = vchan_next_desc(&xchan->vchan);
+ if (!vd)
+ goto out;
+
+ xchan->busy = false;
+ desc = to_xdma_desc(vd);
+ xdev = xchan->xdev_hdl;
+
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
+ &complete_desc_num);
+ if (ret)
+ goto out;
+
+ desc->completed_desc_num += complete_desc_num;
+ /*
+ * if all data blocks are transferred, remove and complete the request
+ */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
+
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
+
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+
+out:
+ spin_unlock(&xchan->vchan.lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xdma_irq_fini - Uninitialize IRQ
+ * @xdev: DMA device pointer
+ */
+static void xdma_irq_fini(struct xdma_device *xdev)
+{
+ int i;
+
+ /* disable interrupt */
+ regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
+
+ /* free irq handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ for (i = 0; i < xdev->c2h_chan_num; i++)
+ free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
+}
+
+/**
+ * xdma_set_vector_reg - configure hardware IRQ registers
+ * @xdev: DMA device pointer
+ * @vec_tbl_start: Start of IRQ registers
+ * @irq_start: Start of IRQ
+ * @irq_num: Number of IRQ
+ */
+static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
+ u32 irq_start, u32 irq_num)
+{
+ u32 shift, i, val = 0;
+ int ret;
+
+ /* Each IRQ register is 32 bit and contains 4 IRQs */
+ while (irq_num > 0) {
+ for (i = 0; i < 4; i++) {
+ shift = XDMA_IRQ_VEC_SHIFT * i;
+ val |= irq_start << shift;
+ irq_start++;
+ irq_num--;
+ }
+
+ /* write IRQ register */
+ ret = regmap_write(xdev->rmap, vec_tbl_start, val);
+ if (ret)
+ return ret;
+ vec_tbl_start += sizeof(u32);
+ val = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_irq_init - initialize IRQs
+ * @xdev: DMA device pointer
+ */
+static int xdma_irq_init(struct xdma_device *xdev)
+{
+ u32 irq = xdev->irq_start;
+ u32 user_irq_start;
+ int i, j, ret;
+
+ /* return failure if there are not enough IRQs */
+ if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
+ xdma_err(xdev, "not enough irq");
+ return -EINVAL;
+ }
+
+ /* setup H2C interrupt handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-h2c-channel", &xdev->h2c_chans[i]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ i, irq, ret);
+ goto failed_init_h2c;
+ }
+ xdev->h2c_chans[i].irq = irq;
+ irq++;
+ }
+
+ /* setup C2H interrupt handler */
+ for (j = 0; j < xdev->c2h_chan_num; j++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-c2h-channel", &xdev->c2h_chans[j]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ j, irq, ret);
+ goto failed_init_c2h;
+ }
+ xdev->c2h_chans[j].irq = irq;
+ irq++;
+ }
+
+ /* config hardware IRQ registers */
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
+ XDMA_CHAN_NUM(xdev));
+ if (ret) {
+ xdma_err(xdev, "failed to set channel vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+
+ /* config user IRQ registers if needed */
+ user_irq_start = XDMA_CHAN_NUM(xdev);
+ if (xdev->irq_num > user_irq_start) {
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
+ user_irq_start,
+ xdev->irq_num - user_irq_start);
+ if (ret) {
+ xdma_err(xdev, "failed to set user vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+ }
+
+ /* enable interrupt */
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
+ if (ret)
+ goto failed_init_c2h;
+
+ return 0;
+
+failed_init_c2h:
+ while (j--)
+ free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
+failed_init_h2c:
+ while (i--)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ return ret;
+}
+
+static bool xdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_chan_info *chan_info = param;
+
+ return chan_info->dir == xdma_chan->dir;
+}
+
+/**
+ * xdma_disable_user_irq - Disable user interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
+}
+EXPORT_SYMBOL(xdma_disable_user_irq);
+
+/**
+ * xdma_enable_user_irq - Enable user logic interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+ int ret;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return -EINVAL;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_enable_user_irq);
+
+/**
+ * xdma_get_user_irq - Get system IRQ number
+ * @pdev: Pointer to the platform_device structure
+ * @user_irq_index: User logic IRQ wire index
+ *
+ * Return: The system IRQ number allocated for the given wire index.
+ */
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq index");
+ return -EINVAL;
+ }
+
+ return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
+}
+EXPORT_SYMBOL(xdma_get_user_irq);
+
+/**
+ * xdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_remove(struct platform_device *pdev)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
+ xdma_irq_fini(xdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
+ dma_async_device_unregister(&xdev->dma_dev);
+
+ return 0;
+}
+
+/**
+ * xdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct xdma_device *xdev;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret = -ENODEV;
+
+ if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
+ dev_err(&pdev->dev, "invalid max dma channels %d",
+ pdata->max_dma_channels);
+ return -EINVAL;
+ }
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xdev);
+ xdev->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get irq resource");
+ goto failed;
+ }
+ xdev->irq_start = res->start;
+ xdev->irq_num = res->end - res->start + 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get io resource");
+ goto failed;
+ }
+
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!reg_base) {
+ xdma_err(xdev, "ioremap failed");
+ goto failed;
+ }
+
+ xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+ &xdma_regmap_config);
+ if (!xdev->rmap) {
+ xdma_err(xdev, "config regmap failed: %d", ret);
+ goto failed;
+ }
+ INIT_LIST_HEAD(&xdev->dma_dev.channels);
+
+ ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ xdma_err(xdev, "config H2C channels failed: %d", ret);
+ goto failed;
+ }
+
+ ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ xdma_err(xdev, "config C2H channels failed: %d", ret);
+ goto failed;
+ }
+
+ dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+
+ xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
+ xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
+ xdev->dma_dev.device_tx_status = dma_cookie_status;
+ xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
+ xdev->dma_dev.device_config = xdma_device_config;
+ xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.filter.map = pdata->device_map;
+ xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
+ xdev->dma_dev.filter.fn = xdma_filter_fn;
+
+ ret = dma_async_device_register(&xdev->dma_dev);
+ if (ret) {
+ xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_REG_DMA;
+
+ ret = xdma_irq_init(xdev);
+ if (ret) {
+ xdma_err(xdev, "failed to init msix: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
+
+ return 0;
+
+failed:
+ xdma_remove(pdev);
+
+ return ret;
+}
+
+static const struct platform_device_id xdma_id_table[] = {
+ { "xdma", 0},
+ { },
+};
+
+static struct platform_driver xdma_driver = {
+ .driver = {
+ .name = "xdma",
+ },
+ .id_table = xdma_id_table,
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("AMD XDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a8d23cdf883e..ac09f0e5f58d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3143,8 +3143,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
+ if (err < 0) {
+ of_node_put(child);
goto error;
+ }
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 21472a5d7636..9360f43b8e0f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
struct platform_device *pdev)
{
struct zynqmp_dma_chan *chan;
- struct resource *res;
struct device_node *node = pdev->dev.of_node;
int err;
@@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
chan->dev = zdev->dev;
chan->zdev = zdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ chan->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chan->regs))
return PTR_ERR(chan->regs);
@@ -1062,7 +1060,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
zdev->dev = &pdev->dev;
INIT_LIST_HEAD(&zdev->common.channels);
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA not available for address range\n");
+ return ret;
+ }
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4cfdefbd744d..68f576700911 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -542,4 +542,12 @@ config EDAC_DMC520
Support for error detection and correction on the
SoCs with ARM DMC-520 DRAM controller.
+config EDAC_ZYNQMP
+ tristate "Xilinx ZynqMP OCM Controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ This driver supports error detection and correction for the
+ Xilinx ZynqMP OCM (On Chip Memory) controller. It can also be
+ built as a module. In that case it will be called zynqmp_edac.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 2d1641a27a28..9b025c5b3061 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
+obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index e7e8e624a436..8b31cd54bdb6 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -2149,10 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
}
edac->sb_irq = platform_get_irq(pdev, 0);
- if (edac->sb_irq < 0) {
- dev_err(&pdev->dev, "No SBERR IRQ resource\n");
+ if (edac->sb_irq < 0)
return edac->sb_irq;
- }
irq_set_chained_handler_and_data(edac->sb_irq,
altr_edac_a10_irq_handler,
@@ -2184,10 +2182,9 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
}
#else
edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ if (edac->db_irq < 0)
return edac->db_irq;
- }
+
irq_set_chained_handler_and_data(edac->db_irq,
altr_edac_a10_irq_handler, edac);
#endif
@@ -2226,6 +2223,5 @@ static struct platform_driver altr_edac_a10_driver = {
};
module_platform_driver(altr_edac_a10_driver);
-MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index e3318e5575a3..5c4292e65b96 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,11 +13,9 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
-static struct amd64_family_type *fam_type;
-
-static inline u32 get_umc_reg(u32 reg)
+static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
{
- if (!fam_type->flags.zn_regs_v2)
+ if (!pvt->flags.zn_regs_v2)
return reg;
switch (reg) {
@@ -182,21 +180,6 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
-static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
-{
- /*
- * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
- * are shifted down by 0x5, so scrubval 0x5 is written to the register
- * as 0x0, scrubval 0x6 as 0x1, etc.
- */
- if (scrubval >= 0x5 && scrubval <= 0x14) {
- scrubval -= 0x5;
- pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
- } else {
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
- }
-}
/*
* Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
@@ -229,9 +212,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->umc) {
- __f17h_set_scrubval(pvt, scrubval);
- } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
+ if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
@@ -271,16 +252,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- if (pvt->umc) {
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
- if (scrubval & BIT(0)) {
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
- scrubval &= 0xF;
- scrubval += 0x5;
- } else {
- scrubval = 0;
- }
- } else if (pvt->fam == 0x15) {
+ if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
@@ -463,7 +435,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
#define for_each_umc(i) \
- for (i = 0; i < fam_type->max_mcs; i++)
+ for (i = 0; i < pvt->max_mcs; i++)
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
@@ -1284,40 +1256,102 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
{
unsigned long edac_cap = EDAC_FLAG_NONE;
u8 bit;
- if (pvt->umc) {
- u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
+ ? 19
+ : 17;
- for_each_umc(i) {
- if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
- continue;
+ if (pvt->dclr0 & BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
- umc_en_mask |= BIT(i);
+ return edac_cap;
+}
- /* UMC Configuration bit 12 (DimmEccEn) */
- if (pvt->umc[i].umc_cfg & BIT(12))
- dimm_ecc_en_mask |= BIT(i);
- }
+static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
+{
+ u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
+ unsigned long edac_cap = EDAC_FLAG_NONE;
- if (umc_en_mask == dimm_ecc_en_mask)
- edac_cap = EDAC_FLAG_SECDED;
- } else {
- bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
- ? 19
- : 17;
+ for_each_umc(i) {
+ if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
+ continue;
- if (pvt->dclr0 & BIT(bit))
- edac_cap = EDAC_FLAG_SECDED;
+ umc_en_mask |= BIT(i);
+
+ /* UMC Configuration bit 12 (DimmEccEn) */
+ if (pvt->umc[i].umc_cfg & BIT(12))
+ dimm_ecc_en_mask |= BIT(i);
}
+ if (umc_en_mask == dimm_ecc_en_mask)
+ edac_cap = EDAC_FLAG_SECDED;
+
return edac_cap;
}
-static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
+/*
+ * debug routine to display the memory sizes of all logical DIMMs and its
+ * CSROWs
+ */
+static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+{
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+ int dimm, size0, size1;
+
+ if (pvt->fam == 0xf) {
+ /* K8 families < revF not supported yet */
+ if (pvt->ext_model < K8_REV_F)
+ return;
+
+ WARN_ON(ctrl != 0);
+ }
+
+ if (pvt->fam == 0x10) {
+ dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
+ : pvt->dbam0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
+ pvt->csels[1].csbases :
+ pvt->csels[0].csbases;
+ } else if (ctrl) {
+ dbam = pvt->dbam0;
+ dcsb = pvt->csels[1].csbases;
+ }
+ edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, dbam);
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
+
+ /* Dump memory sizes for DIMM and its CSROWs */
+ for (dimm = 0; dimm < 4; dimm++) {
+ size0 = 0;
+ if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
+ /*
+ * For F15m60h, we need multiplier for LRDIMM cs_size
+ * calculation. We pass dimm value to the dbam_to_cs
+ * mapper so we can find the multiplier from the
+ * corresponding DCSM.
+ */
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam),
+ dimm);
+
+ size1 = 0;
+ if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam),
+ dimm);
+
+ amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+ dimm * 2, size0,
+ dimm * 2 + 1, size1);
+ }
+}
+
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
@@ -1360,7 +1394,7 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
-static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
+static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
u8 base, count = 0;
int cs_mode = 0;
@@ -1392,7 +1426,85 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
-static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 addr_mask_orig, addr_mask_deinterleaved;
+ u32 msb, weight, num_zero_bits;
+ int cs_mask_nr = csrow_nr;
+ int dimm, size = 0;
+
+ /* No Chip Selects are enabled. */
+ if (!cs_mode)
+ return size;
+
+ /* Requested size of an even CS but none are enabled. */
+ if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+ return size;
+
+ /* Requested size of an odd CS but none are enabled. */
+ if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+ return size;
+
+ /*
+ * Family 17h introduced systems with one mask per DIMM,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 and CS1 -> MASK0 / DIMM0
+ * CS2 and CS3 -> MASK1 / DIMM1
+ *
+ * Family 19h Model 10h introduced systems with one mask per Chip Select,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 -> MASK0 -> DIMM0
+ * CS1 -> MASK1 -> DIMM0
+ * CS2 -> MASK2 -> DIMM1
+ * CS3 -> MASK3 -> DIMM1
+ *
+ * Keep the mask number equal to the Chip Select number for newer systems,
+ * and shift the mask number for older systems.
+ */
+ dimm = csrow_nr >> 1;
+
+ if (!pvt->flags.zn_regs_v2)
+ cs_mask_nr >>= 1;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+ else
+ addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
+ */
+ msb = fls(addr_mask_orig) - 1;
+ weight = hweight_long(addr_mask_orig);
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
+
+ /* Take the number of zero bits off from the top of the mask. */
+ addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = (addr_mask_deinterleaved >> 2) + 1;
+
+ /* Return size in MBs. */
+ return size >> 10;
+}
+
+static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1, cs_mode;
@@ -1402,10 +1514,10 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
cs0 = dimm * 2;
cs1 = dimm * 2 + 1;
- cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
+ cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
+ size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
+ size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -1413,7 +1525,7 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
}
}
-static void __dump_misc_regs_df(struct amd64_pvt *pvt)
+static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i, tmp, umc_base;
@@ -1446,21 +1558,17 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
- umc_base + get_umc_reg(UMCCH_ADDR_CFG),
+ umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
- debug_display_dimm_sizes_df(pvt, i);
+ umc_debug_display_dimm_sizes(pvt, i);
}
-
- edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
- pvt->dhar, dhar_base(pvt));
}
-/* Display and decode various NB registers for debug purposes. */
-static void __dump_misc_regs(struct amd64_pvt *pvt)
+static void dct_dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
@@ -1480,26 +1588,17 @@ static void __dump_misc_regs(struct amd64_pvt *pvt)
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
- debug_display_dimm_sizes(pvt, 0);
+ dct_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
- debug_display_dimm_sizes(pvt, 1);
+ dct_debug_display_dimm_sizes(pvt, 1);
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
-}
-
-/* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
-{
- if (pvt->umc)
- __dump_misc_regs_df(pvt);
- else
- __dump_misc_regs(pvt);
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
@@ -1509,7 +1608,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void prep_chip_selects(struct amd64_pvt *pvt)
+static void dct_prep_chip_selects(struct amd64_pvt *pvt)
{
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
@@ -1517,21 +1616,23 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
- } else if (pvt->fam >= 0x17) {
- int umc;
-
- for_each_umc(umc) {
- pvt->csels[umc].b_cnt = 4;
- pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
- }
-
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
-static void read_umc_base_mask(struct amd64_pvt *pvt)
+static void umc_prep_chip_selects(struct amd64_pvt *pvt)
+{
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 4;
+ pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
+ }
+}
+
+static void umc_read_base_mask(struct amd64_pvt *pvt)
{
u32 umc_base_reg, umc_base_reg_sec;
u32 umc_mask_reg, umc_mask_reg_sec;
@@ -1562,7 +1663,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
- umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
+ umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
@@ -1585,15 +1686,10 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void read_dct_base_mask(struct amd64_pvt *pvt)
+static void dct_read_base_mask(struct amd64_pvt *pvt)
{
int cs;
- prep_chip_selects(pvt);
-
- if (pvt->umc)
- return read_umc_base_mask(pvt);
-
for_each_chip_select(cs, 0, pvt) {
int reg0 = DCSB0 + (cs * 4);
int reg1 = DCSB1 + (cs * 4);
@@ -1633,7 +1729,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
-static void determine_memory_type_df(struct amd64_pvt *pvt)
+static void umc_determine_memory_type(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i;
@@ -1650,7 +1746,7 @@ static void determine_memory_type_df(struct amd64_pvt *pvt)
* Check if the system supports the "DDR Type" field in UMC Config
* and has DDR5 DIMMs in use.
*/
- if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
+ if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR5;
else if (umc->dimm_cfg & BIT(4))
@@ -1670,13 +1766,10 @@ static void determine_memory_type_df(struct amd64_pvt *pvt)
}
}
-static void determine_memory_type(struct amd64_pvt *pvt)
+static void dct_determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
- if (pvt->umc)
- return determine_memory_type_df(pvt);
-
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
@@ -1726,30 +1819,14 @@ static void determine_memory_type(struct amd64_pvt *pvt)
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
}
+
+ edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
return;
ddr3:
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
}
-/* Get the number of DCT channels the memory controller is using. */
-static int k8_early_channel_count(struct amd64_pvt *pvt)
-{
- int flag;
-
- if (pvt->ext_model >= K8_REV_F)
- /* RevF (NPT) and later */
- flag = pvt->dclr0 & WIDTH_128;
- else
- /* RevE and earlier */
- flag = pvt->dclr0 & REVE_WIDTH_128;
-
- /* not used */
- pvt->dclr1 = 0;
-
- return (flag) ? 2 : 1;
-}
-
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
@@ -2001,69 +2078,6 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
}
}
-/*
- * Get the number of DCT channels in use.
- *
- * Return:
- * number of Memory Channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
-static int f1x_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, j, channels = 0;
-
- /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
- return 2;
-
- /*
- * Need to check if in unganged mode: In such, there are 2 channels,
- * but they are not in 128 bit mode and thus the above 'dclr0' status
- * bit will be OFF.
- *
- * Need to check DCT0[0] and DCT1[0] to see if only one of them has
- * their CSEnable bit on. If so, then SINGLE DIMM case.
- */
- edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
-
- /*
- * Check DRAM Bank Address Mapping values for each DIMM to see if there
- * is more than just one DIMM present in unganged mode. Need to check
- * both controllers since DIMMs can be placed in either one.
- */
- for (i = 0; i < 2; i++) {
- u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
-
- for (j = 0; j < 4; j++) {
- if (DBAM_DIMM(j, dbam) > 0) {
- channels++;
- break;
- }
- }
- }
-
- if (channels > 2)
- channels = 2;
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
-static int f17_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, channels = 0;
-
- /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for_each_umc(i)
- channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
@@ -2191,84 +2205,6 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
-static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
- unsigned int cs_mode, int csrow_nr)
-{
- u32 addr_mask_orig, addr_mask_deinterleaved;
- u32 msb, weight, num_zero_bits;
- int cs_mask_nr = csrow_nr;
- int dimm, size = 0;
-
- /* No Chip Selects are enabled. */
- if (!cs_mode)
- return size;
-
- /* Requested size of an even CS but none are enabled. */
- if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
- return size;
-
- /* Requested size of an odd CS but none are enabled. */
- if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
- return size;
-
- /*
- * Family 17h introduced systems with one mask per DIMM,
- * and two Chip Selects per DIMM.
- *
- * CS0 and CS1 -> MASK0 / DIMM0
- * CS2 and CS3 -> MASK1 / DIMM1
- *
- * Family 19h Model 10h introduced systems with one mask per Chip Select,
- * and two Chip Selects per DIMM.
- *
- * CS0 -> MASK0 -> DIMM0
- * CS1 -> MASK1 -> DIMM0
- * CS2 -> MASK2 -> DIMM1
- * CS3 -> MASK3 -> DIMM1
- *
- * Keep the mask number equal to the Chip Select number for newer systems,
- * and shift the mask number for older systems.
- */
- dimm = csrow_nr >> 1;
-
- if (!fam_type->flags.zn_regs_v2)
- cs_mask_nr >>= 1;
-
- /* Asymmetric dual-rank DIMM support. */
- if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
- else
- addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
-
- /*
- * The number of zero bits in the mask is equal to the number of bits
- * in a full mask minus the number of bits in the current mask.
- *
- * The MSB is the number of bits in the full mask because BIT[0] is
- * always 0.
- *
- * In the special 3 Rank interleaving case, a single bit is flipped
- * without swapping with the most significant bit. This can be handled
- * by keeping the MSB where it is and ignoring the single zero bit.
- */
- msb = fls(addr_mask_orig) - 1;
- weight = hweight_long(addr_mask_orig);
- num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
-
- /* Take the number of zero bits off from the top of the mask. */
- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
-
- edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
-
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- size = (addr_mask_deinterleaved >> 2) + 1;
-
- /* Return size in MBs. */
- return size >> 10;
-}
-
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
@@ -2792,227 +2728,6 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
}
/*
- * debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs
- */
-static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
-{
- int dimm, size0, size1;
- u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
- u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-
- if (pvt->fam == 0xf) {
- /* K8 families < revF not supported yet */
- if (pvt->ext_model < K8_REV_F)
- return;
- else
- WARN_ON(ctrl != 0);
- }
-
- if (pvt->fam == 0x10) {
- dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
- : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
- pvt->csels[1].csbases :
- pvt->csels[0].csbases;
- } else if (ctrl) {
- dbam = pvt->dbam0;
- dcsb = pvt->csels[1].csbases;
- }
- edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
- ctrl, dbam);
-
- edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
-
- /* Dump memory sizes for DIMM and its CSROWs */
- for (dimm = 0; dimm < 4; dimm++) {
-
- size0 = 0;
- if (dcsb[dimm*2] & DCSB_CS_ENABLE)
- /*
- * For F15m60h, we need multiplier for LRDIMM cs_size
- * calculation. We pass dimm value to the dbam_to_cs
- * mapper so we can find the multiplier from the
- * corresponding DCSM.
- */
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam),
- dimm);
-
- size1 = 0;
- if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam),
- dimm);
-
- amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0,
- dimm * 2 + 1, size1);
- }
-}
-
-static struct amd64_family_type family_types[] = {
- [K8_CPUS] = {
- .ctl_name = "K8",
- .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = k8_early_channel_count,
- .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
- .dbam_to_cs = k8_dbam_to_chip_select,
- }
- },
- [F10_CPUS] = {
- .ctl_name = "F10h",
- .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f10_dbam_to_chip_select,
- }
- },
- [F15_CPUS] = {
- .ctl_name = "F15h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_dbam_to_chip_select,
- }
- },
- [F15_M30H_CPUS] = {
- .ctl_name = "F15h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F15_M60H_CPUS] = {
- .ctl_name = "F15h_M60h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_m60h_dbam_to_chip_select,
- }
- },
- [F16_CPUS] = {
- .ctl_name = "F16h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F16_M30H_CPUS] = {
- .ctl_name = "F16h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F17_CPUS] = {
- .ctl_name = "F17h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M10H_CPUS] = {
- .ctl_name = "F17h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M30H_CPUS] = {
- .ctl_name = "F17h_M30h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
- .max_mcs = 8,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M60H_CPUS] = {
- .ctl_name = "F17h_M60h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M70H_CPUS] = {
- .ctl_name = "F17h_M70h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_CPUS] = {
- .ctl_name = "F19h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
- .max_mcs = 8,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_M10H_CPUS] = {
- .ctl_name = "F19h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
- .max_mcs = 12,
- .flags.zn_regs_v2 = 1,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_M50H_CPUS] = {
- .ctl_name = "F19h_M50h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
-};
-
-/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
* uses those to find the symbol in error and thus the DIMM.
@@ -3259,10 +2974,14 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* Currently, we can derive the channel number by looking at the 6th nibble in
* the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
* number.
+ *
+ * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
+ * the MCA_SYND[ErrorInformation] field.
*/
-static int find_umc_channel(struct mce *m)
+static void umc_get_err_info(struct mce *m, struct err_info *err)
{
- return (m->ipid & GENMASK(31, 0)) >> 20;
+ err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
+ err->csrow = m->synd & 0x7;
}
static void decode_umc_error(int node_id, struct mce *m)
@@ -3284,8 +3003,6 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
- err.channel = find_umc_channel(m);
-
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
@@ -3300,7 +3017,7 @@ static void decode_umc_error(int node_id, struct mce *m)
err.err_code = ERR_CHANNEL;
}
- err.csrow = m->synd & 0x7;
+ pvt->ops->get_err_info(m, &err);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
@@ -3316,37 +3033,10 @@ log_error:
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
- * Reserve F0 and F6 on systems with a UMC.
*/
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
- if (pvt->umc) {
- pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
- if (!pvt->F0) {
- edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
- return -ENODEV;
- }
-
- pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
- if (!pvt->F6) {
- pci_dev_put(pvt->F0);
- pvt->F0 = NULL;
-
- edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
- return -ENODEV;
- }
-
- if (!pci_ctl_dev)
- pci_ctl_dev = &pvt->F0->dev;
-
- edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
- edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
- edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
-
- return 0;
- }
-
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
@@ -3374,37 +3064,11 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
return 0;
}
-static void free_mc_sibling_devs(struct amd64_pvt *pvt)
-{
- if (pvt->umc) {
- pci_dev_put(pvt->F0);
- pci_dev_put(pvt->F6);
- } else {
- pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F2);
- }
-}
-
static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{
pvt->ecc_sym_sz = 4;
- if (pvt->umc) {
- u8 i;
-
- for_each_umc(i) {
- /* Check enabled channels only: */
- if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
- if (pvt->umc[i].ecc_ctrl & BIT(9)) {
- pvt->ecc_sym_sz = 16;
- return;
- } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
- pvt->ecc_sym_sz = 8;
- return;
- }
- }
- }
- } else if (pvt->fam >= 0x10) {
+ if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -3421,7 +3085,7 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
/*
* Retrieve the hardware registers of the memory controller.
*/
-static void __read_mc_regs_df(struct amd64_pvt *pvt)
+static void umc_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
@@ -3433,7 +3097,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
@@ -3445,7 +3109,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
* Retrieve the hardware registers of the memory controller (this includes the
* 'Address Map' and 'Misc' device regs)
*/
-static void read_mc_regs(struct amd64_pvt *pvt)
+static void dct_read_mc_regs(struct amd64_pvt *pvt)
{
unsigned int range;
u64 msr_val;
@@ -3466,13 +3130,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
edac_dbg(0, " TOP_MEM2 disabled\n");
}
- if (pvt->umc) {
- __read_mc_regs_df(pvt);
- amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
-
- goto skip;
- }
-
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
read_dram_ctl_register(pvt);
@@ -3513,14 +3170,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
-skip:
- read_dct_base_mask(pvt);
-
- determine_memory_type(pvt);
-
- if (!pvt->umc)
- edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
-
determine_ecc_sym_sz(pvt);
}
@@ -3558,36 +3207,47 @@ skip:
* encompasses
*
*/
-static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
+static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
- int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
- if (!pvt->umc) {
- csrow_nr >>= 1;
- cs_mode = DBAM_DIMM(csrow_nr, dbam);
- } else {
- cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
- }
+ csrow_nr >>= 1;
+ cs_mode = DBAM_DIMM(csrow_nr, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
- csrow_nr_orig, dct, cs_mode);
+ csrow_nr, dct, cs_mode);
+ edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
+
+ return nr_pages;
+}
+
+static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
+{
+ int csrow_nr = csrow_nr_orig;
+ u32 cs_mode, nr_pages;
+
+ cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
+
+ nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
+ nr_pages <<= 20 - PAGE_SHIFT;
+
+ edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
+ csrow_nr_orig, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
-static int init_csrows_df(struct mem_ctl_info *mci)
+static void umc_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
enum dev_type dev_type = DEV_UNKNOWN;
struct dimm_info *dimm;
- int empty = 1;
u8 umc, cs;
if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
@@ -3608,40 +3268,34 @@ static int init_csrows_df(struct mem_ctl_info *mci)
if (!csrow_enabled(cs, umc, pvt))
continue;
- empty = 0;
dimm = mci->csrows[cs]->channels[umc]->dimm;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, cs);
- dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+ dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
}
}
-
- return empty;
}
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
*/
-static int init_csrows(struct mem_ctl_info *mci)
+static void dct_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
- int i, j, empty = 1;
int nr_pages = 0;
+ int i, j;
u32 val;
- if (pvt->umc)
- return init_csrows_df(mci);
-
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
@@ -3664,19 +3318,18 @@ static int init_csrows(struct mem_ctl_info *mci)
continue;
csrow = mci->csrows[i];
- empty = 0;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, i);
if (row_dct0) {
- nr_pages = get_csrow_nr_pages(pvt, 0, i);
+ nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
- int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
+ int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
@@ -3691,15 +3344,13 @@ static int init_csrows(struct mem_ctl_info *mci)
: EDAC_SECDED;
}
- for (j = 0; j < pvt->channel_count; j++) {
+ for (j = 0; j < pvt->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
dimm->grain = 64;
}
}
-
- return empty;
}
/* get all cores on this DCT */
@@ -3862,59 +3513,66 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
-static bool ecc_enabled(struct amd64_pvt *pvt)
+static bool dct_ecc_enabled(struct amd64_pvt *pvt)
{
u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
- u8 ecc_en = 0, i;
+ u8 ecc_en = 0;
u32 value;
- if (boot_cpu_data.x86 >= 0x17) {
- u8 umc_en_mask = 0, ecc_en_mask = 0;
- struct amd64_umc *umc;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
- for_each_umc(i) {
- umc = &pvt->umc[i];
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
- /* Only check enabled UMCs. */
- if (!(umc->sdp_ctrl & UMC_SDP_INIT))
- continue;
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
+ if (!nb_mce_en)
+ edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+ MSR_IA32_MCG_CTL, nid);
- umc_en_mask |= BIT(i);
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
- if (umc->umc_cap_hi & UMC_ECC_ENABLED)
- ecc_en_mask |= BIT(i);
- }
+ if (!ecc_en || !nb_mce_en)
+ return false;
+ else
+ return true;
+}
- /* Check whether at least one UMC is enabled: */
- if (umc_en_mask)
- ecc_en = umc_en_mask == ecc_en_mask;
- else
- edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
+static bool umc_ecc_enabled(struct amd64_pvt *pvt)
+{
+ u8 umc_en_mask = 0, ecc_en_mask = 0;
+ u16 nid = pvt->mc_node_id;
+ struct amd64_umc *umc;
+ u8 ecc_en = 0, i;
- /* Assume UMC MCA banks are enabled. */
- nb_mce_en = true;
- } else {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ /* Only check enabled UMCs. */
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT))
+ continue;
- ecc_en = !!(value & NBCFG_ECC_ENABLE);
+ umc_en_mask |= BIT(i);
- nb_mce_en = nb_mce_bank_enabled_on_node(nid);
- if (!nb_mce_en)
- edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
- MSR_IA32_MCG_CTL, nid);
+ if (umc->umc_cap_hi & UMC_ECC_ENABLED)
+ ecc_en_mask |= BIT(i);
}
+ /* Check whether at least one UMC is enabled: */
+ if (umc_en_mask)
+ ecc_en = umc_en_mask == ecc_en_mask;
+ else
+ edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
+
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
- if (!ecc_en || !nb_mce_en)
+ if (!ecc_en)
return false;
else
return true;
}
static inline void
-f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
@@ -3944,142 +3602,234 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
}
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->umc) {
- f17h_determine_edac_ctl_cap(mci, pvt);
- } else {
- if (pvt->nbcap & NBCAP_SECDED)
- mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+ if (pvt->nbcap & NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & NBCAP_CHIPKILL)
- mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- }
+ if (pvt->nbcap & NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- mci->edac_cap = determine_edac_cap(pvt);
+ mci->edac_cap = dct_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = fam_type->ctl_name;
+ mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
+
+ dct_init_csrows(mci);
}
-/*
- * returns a pointer to the family descriptor on success, NULL otherwise.
- */
-static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
+static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+
+ umc_determine_edac_ctl_cap(mci, pvt);
+
+ mci->edac_cap = umc_determine_edac_cap(pvt);
+ mci->mod_name = EDAC_MOD_STR;
+ mci->ctl_name = pvt->ctl_name;
+ mci->dev_name = pci_name(pvt->F3);
+ mci->ctl_page_to_phys = NULL;
+
+ umc_init_csrows(mci);
+}
+
+static int dct_hw_info_get(struct amd64_pvt *pvt)
+{
+ int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
+
+ if (ret)
+ return ret;
+
+ dct_prep_chip_selects(pvt);
+ dct_read_base_mask(pvt);
+ dct_read_mc_regs(pvt);
+ dct_determine_memory_type(pvt);
+
+ return 0;
+}
+
+static int umc_hw_info_get(struct amd64_pvt *pvt)
+{
+ pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
+
+ umc_prep_chip_selects(pvt);
+ umc_read_base_mask(pvt);
+ umc_read_mc_regs(pvt);
+ umc_determine_memory_type(pvt);
+
+ return 0;
+}
+
+static void hw_info_put(struct amd64_pvt *pvt)
+{
+ pci_dev_put(pvt->F1);
+ pci_dev_put(pvt->F2);
+ kfree(pvt->umc);
+}
+
+static struct low_ops umc_ops = {
+ .hw_info_get = umc_hw_info_get,
+ .ecc_enabled = umc_ecc_enabled,
+ .setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
+ .dump_misc_regs = umc_dump_misc_regs,
+ .get_err_info = umc_get_err_info,
+};
+
+/* Use Family 16h versions for defaults and adjust as needed below. */
+static struct low_ops dct_ops = {
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .hw_info_get = dct_hw_info_get,
+ .ecc_enabled = dct_ecc_enabled,
+ .setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
+ .dump_misc_regs = dct_dump_misc_regs,
+};
+
+static int per_family_init(struct amd64_pvt *pvt)
{
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
+ pvt->max_mcs = 2;
+
+ /*
+ * Decide on which ops group to use here and do any family/model
+ * overrides below.
+ */
+ if (pvt->fam >= 0x17)
+ pvt->ops = &umc_ops;
+ else
+ pvt->ops = &dct_ops;
switch (pvt->fam) {
case 0xf:
- fam_type = &family_types[K8_CPUS];
- pvt->ops = &family_types[K8_CPUS].ops;
+ pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
+ "K8 revF or later" : "K8 revE or earlier";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
+ pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
+ pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
break;
case 0x10:
- fam_type = &family_types[F10_CPUS];
- pvt->ops = &family_types[F10_CPUS].ops;
+ pvt->ctl_name = "F10h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
+ pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
break;
case 0x15:
- if (pvt->model == 0x30) {
- fam_type = &family_types[F15_M30H_CPUS];
- pvt->ops = &family_types[F15_M30H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x30:
+ pvt->ctl_name = "F15h_M30h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
break;
- } else if (pvt->model == 0x60) {
- fam_type = &family_types[F15_M60H_CPUS];
- pvt->ops = &family_types[F15_M60H_CPUS].ops;
+ case 0x60:
+ pvt->ctl_name = "F15h_M60h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
+ break;
+ case 0x13:
+ /* Richland is only client */
+ return -ENODEV;
+ default:
+ pvt->ctl_name = "F15h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
break;
- /* Richland is only client */
- } else if (pvt->model == 0x13) {
- return NULL;
- } else {
- fam_type = &family_types[F15_CPUS];
- pvt->ops = &family_types[F15_CPUS].ops;
}
break;
case 0x16:
- if (pvt->model == 0x30) {
- fam_type = &family_types[F16_M30H_CPUS];
- pvt->ops = &family_types[F16_M30H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x30:
+ pvt->ctl_name = "F16h_M30h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
+ break;
+ default:
+ pvt->ctl_name = "F16h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
}
- fam_type = &family_types[F16_CPUS];
- pvt->ops = &family_types[F16_CPUS].ops;
break;
case 0x17:
- if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
- fam_type = &family_types[F17_M10H_CPUS];
- pvt->ops = &family_types[F17_M10H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x10 ... 0x2f:
+ pvt->ctl_name = "F17h_M10h";
break;
- } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
- fam_type = &family_types[F17_M30H_CPUS];
- pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ case 0x30 ... 0x3f:
+ pvt->ctl_name = "F17h_M30h";
+ pvt->max_mcs = 8;
break;
- } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
- fam_type = &family_types[F17_M60H_CPUS];
- pvt->ops = &family_types[F17_M60H_CPUS].ops;
+ case 0x60 ... 0x6f:
+ pvt->ctl_name = "F17h_M60h";
break;
- } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
- fam_type = &family_types[F17_M70H_CPUS];
- pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ case 0x70 ... 0x7f:
+ pvt->ctl_name = "F17h_M70h";
+ break;
+ default:
+ pvt->ctl_name = "F17h";
break;
}
- fallthrough;
- case 0x18:
- fam_type = &family_types[F17_CPUS];
- pvt->ops = &family_types[F17_CPUS].ops;
+ break;
- if (pvt->fam == 0x18)
- family_types[F17_CPUS].ctl_name = "F18h";
+ case 0x18:
+ pvt->ctl_name = "F18h";
break;
case 0x19:
- if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
- fam_type = &family_types[F19_M10H_CPUS];
- pvt->ops = &family_types[F19_M10H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x00 ... 0x0f:
+ pvt->ctl_name = "F19h";
+ pvt->max_mcs = 8;
+ break;
+ case 0x10 ... 0x1f:
+ pvt->ctl_name = "F19h_M10h";
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
break;
- } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
- fam_type = &family_types[F17_M70H_CPUS];
- pvt->ops = &family_types[F17_M70H_CPUS].ops;
- fam_type->ctl_name = "F19h_M20h";
+ case 0x20 ... 0x2f:
+ pvt->ctl_name = "F19h_M20h";
break;
- } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
- fam_type = &family_types[F19_M50H_CPUS];
- pvt->ops = &family_types[F19_M50H_CPUS].ops;
- fam_type->ctl_name = "F19h_M50h";
+ case 0x50 ... 0x5f:
+ pvt->ctl_name = "F19h_M50h";
break;
- } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
- fam_type = &family_types[F19_M10H_CPUS];
- pvt->ops = &family_types[F19_M10H_CPUS].ops;
- fam_type->ctl_name = "F19h_MA0h";
+ case 0xa0 ... 0xaf:
+ pvt->ctl_name = "F19h_MA0h";
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
break;
}
- fam_type = &family_types[F19_CPUS];
- pvt->ops = &family_types[F19_CPUS].ops;
- family_types[F19_CPUS].ctl_name = "F19h";
break;
default:
amd64_err("Unsupported family!\n");
- return NULL;
+ return -ENODEV;
}
- return fam_type;
+ return 0;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
@@ -4090,67 +3840,17 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
-static int hw_info_get(struct amd64_pvt *pvt)
-{
- u16 pci_id1, pci_id2;
- int ret;
-
- if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
- if (!pvt->umc)
- return -ENOMEM;
-
- pci_id1 = fam_type->f0_id;
- pci_id2 = fam_type->f6_id;
- } else {
- pci_id1 = fam_type->f1_id;
- pci_id2 = fam_type->f2_id;
- }
-
- ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
- if (ret)
- return ret;
-
- read_mc_regs(pvt);
-
- return 0;
-}
-
-static void hw_info_put(struct amd64_pvt *pvt)
-{
- if (pvt->F0 || pvt->F1)
- free_mc_sibling_devs(pvt);
-
- kfree(pvt->umc);
-}
-
static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- int ret = -EINVAL;
-
- /*
- * We need to determine how many memory channels there are. Then use
- * that information for calculating the size of the dynamic instance
- * tables in the 'mci' structure.
- */
- pvt->channel_count = pvt->ops->early_channel_count(pvt);
- if (pvt->channel_count < 0)
- return ret;
+ int ret = -ENOMEM;
- ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
-
- /*
- * Always allocate two channels since we can have setups with DIMMs on
- * only one channel. Also, this simplifies handling later for the price
- * of a couple of KBs tops.
- */
- layers[1].size = fam_type->max_mcs;
+ layers[1].size = pvt->max_mcs;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
@@ -4160,10 +3860,7 @@ static int init_one_instance(struct amd64_pvt *pvt)
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
- setup_mci_misc_attrs(mci);
-
- if (init_csrows(mci))
- mci->edac_cap = EDAC_FLAG_NONE;
+ pvt->ops->setup_mci_misc_attrs(mci);
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
@@ -4180,7 +3877,7 @@ static bool instance_has_memory(struct amd64_pvt *pvt)
bool cs_enabled = false;
int cs = 0, dct = 0;
- for (dct = 0; dct < fam_type->max_mcs; dct++) {
+ for (dct = 0; dct < pvt->max_mcs; dct++) {
for_each_chip_select(cs, dct, pvt)
cs_enabled |= csrow_enabled(cs, dct, pvt);
}
@@ -4209,12 +3906,11 @@ static int probe_one_instance(unsigned int nid)
pvt->mc_node_id = nid;
pvt->F3 = F3;
- ret = -ENODEV;
- fam_type = per_family_init(pvt);
- if (!fam_type)
+ ret = per_family_init(pvt);
+ if (ret < 0)
goto err_enable;
- ret = hw_info_get(pvt);
+ ret = pvt->ops->hw_info_get(pvt);
if (ret < 0)
goto err_enable;
@@ -4224,7 +3920,7 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
- if (!ecc_enabled(pvt)) {
+ if (!pvt->ops->ecc_enabled(pvt)) {
ret = -ENODEV;
if (!ecc_enable_override)
@@ -4250,13 +3946,10 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (pvt->fam == 0xf ?
- (pvt->ext_model >= K8_REV_F ? "revF or later "
- : "revE or earlier ")
- : ""), pvt->mc_node_id);
+ amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
- dump_misc_regs(pvt);
+ /* Display and decode various registers for debug purposes. */
+ pvt->ops->dump_misc_regs(pvt);
return ret;
@@ -4370,12 +4063,12 @@ static int __init amd64_edac_init(void)
}
/* register stuff with EDAC MCE */
- if (boot_cpu_data.x86 >= 0x17)
+ if (boot_cpu_data.x86 >= 0x17) {
amd_register_ecc_decoder(decode_umc_error);
- else
+ } else {
amd_register_ecc_decoder(decode_bus_error);
-
- setup_pci_device();
+ setup_pci_device();
+ }
#ifdef CONFIG_X86_32
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
@@ -4427,10 +4120,8 @@ module_init(amd64_edac_init);
module_exit(amd64_edac_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
- "Dave Peterson, Thayne Harbaugh");
-MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
- EDAC_AMD64_VERSION);
+MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 38e5ad95d010..e84fe0d4120a 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -114,22 +114,6 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
-#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
-#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
-#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
-#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
-#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
-#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
-#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
-#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
-#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
-#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
-#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
-#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
-#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
-#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
/*
* Function 1 - Address Map
@@ -215,8 +199,6 @@
#define DCT_SEL_HI 0x114
#define F15H_M60H_SCRCTRL 0x1C8
-#define F17H_SCR_BASE_ADDR 0x48
-#define F17H_SCR_LIMIT_ADDR 0x4C
/*
* Function 3 - Misc Control
@@ -291,25 +273,6 @@
#define UMC_SDP_INIT BIT(31)
-enum amd_families {
- K8_CPUS = 0,
- F10_CPUS,
- F15_CPUS,
- F15_M30H_CPUS,
- F15_M60H_CPUS,
- F16_CPUS,
- F16_M30H_CPUS,
- F17_CPUS,
- F17_M10H_CPUS,
- F17_M30H_CPUS,
- F17_M60H_CPUS,
- F17_M70H_CPUS,
- F19_CPUS,
- F19_M10H_CPUS,
- F19_M50H_CPUS,
- NUM_FAMILIES,
-};
-
/* Error injection control structure */
struct error_injection {
u32 section;
@@ -352,11 +315,21 @@ struct amd64_umc {
enum mem_type dram_type;
};
+struct amd64_family_flags {
+ /*
+ * Indicates that the system supports the new register offsets, etc.
+ * first introduced with Family 19h Model 10h.
+ */
+ __u64 zn_regs_v2 : 1,
+
+ __reserved : 63;
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
- struct pci_dev *F0, *F1, *F2, *F3, *F6;
+ struct pci_dev *F1, *F2, *F3;
u16 mc_node_id; /* MC index of this MC node */
u8 fam; /* CPU family */
@@ -364,7 +337,6 @@ struct amd64_pvt {
u8 stepping; /* ... stepping */
int ext_model; /* extended model value of this node */
- int channel_count;
/* Raw registers */
u32 dclr0; /* DRAM Configuration Low DCT0 reg */
@@ -394,6 +366,12 @@ struct amd64_pvt {
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
+ const char *ctl_name;
+ u16 f1_id, f2_id;
+ /* Maximum number of memory controllers per die/node. */
+ u8 max_mcs;
+
+ struct amd64_family_flags flags;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
@@ -484,30 +462,15 @@ struct ecc_settings {
* functions and per device encoding/decoding logic.
*/
struct low_ops {
- int (*early_channel_count) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
- struct err_info *);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
- unsigned cs_mode, int cs_mask_nr);
-};
-
-struct amd64_family_flags {
- /*
- * Indicates that the system supports the new register offsets, etc.
- * first introduced with Family 19h Model 10h.
- */
- __u64 zn_regs_v2 : 1,
-
- __reserved : 63;
-};
-
-struct amd64_family_type {
- const char *ctl_name;
- u16 f0_id, f1_id, f2_id, f6_id;
- /* Maximum number of memory controllers per die/node. */
- u8 max_mcs;
- struct amd64_family_flags flags;
- struct low_ops ops;
+ void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, u64 sys_addr,
+ struct err_info *err);
+ int (*dbam_to_cs)(struct amd64_pvt *pvt, u8 dct,
+ unsigned int cs_mode, int cs_mask_nr);
+ int (*hw_info_get)(struct amd64_pvt *pvt);
+ bool (*ecc_enabled)(struct amd64_pvt *pvt);
+ void (*setup_mci_misc_attrs)(struct mem_ctl_info *mci);
+ void (*dump_misc_regs)(struct amd64_pvt *pvt);
+ void (*get_err_info)(struct mce *m, struct err_info *err);
};
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 7508aa416ddb..ca718f63fcbc 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -593,5 +593,5 @@ module_init(amd8111_edac_init);
module_exit(amd8111_edac_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index 169353710982..28610ba514f4 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -354,5 +354,5 @@ module_init(amd8131_edac_init);
module_exit(amd8131_edac_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index ac7c9b42d4c7..7221b4bb6df2 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1462,7 +1462,7 @@ module_init(e752x_init);
module_exit(e752x_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman");
MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
module_param(force_function_unhide, int, 0444);
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 497e710fca3d..5852b95fa470 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -596,8 +596,7 @@ module_init(e7xxx_init);
module_exit(e7xxx_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 19522c568aa5..0689e1510721 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -394,17 +397,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
* Then restart the workq on the new delay
*/
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
- unsigned long value)
+ unsigned long msec)
{
- unsigned long jiffs = msecs_to_jiffies(value);
-
- if (value == 1000)
- jiffs = round_jiffies_relative(value);
-
- edac_dev->poll_msec = value;
- edac_dev->delay = jiffs;
+ edac_dev->poll_msec = msec;
+ edac_dev->delay = msecs_to_jiffies(msec);
- edac_mod_work(&edac_dev->work, jiffs);
+ /* See comment in edac_device_workq_setup() above */
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
}
int edac_device_alloc_index(void)
@@ -443,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
- /*
- * enable workq processing on this instance,
- * default = 1000 msec
- */
- edac_device_workq_setup(edac_dev, 1000);
+ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index ac678b4a21fc..010c26be5846 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -228,8 +228,9 @@ static struct kobj_type ktype_device_ctrl = {
*/
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
+ struct device *dev_root;
struct bus_type *edac_subsys;
- int err;
+ int err = -ENODEV;
edac_dbg(1, "\n");
@@ -247,15 +248,16 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
*/
edac_dev->owner = THIS_MODULE;
- if (!try_module_get(edac_dev->owner)) {
- err = -ENODEV;
+ if (!try_module_get(edac_dev->owner))
goto err_out;
- }
/* register */
- err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
- &edac_subsys->dev_root->kobj,
- "%s", edac_dev->name);
+ dev_root = bus_get_dev_root(edac_subsys);
+ if (dev_root) {
+ err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
+ &dev_root->kobj, "%s", edac_dev->name);
+ put_device(dev_root);
+ }
if (err) {
edac_dbg(1, "Failed to register '.../edac/%s'\n",
edac_dev->name);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 763c076d96f2..47593afdc234 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -53,7 +53,7 @@ bool edac_stop_work(struct delayed_work *work);
bool edac_mod_work(struct delayed_work *work, unsigned long delay);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
- *edac_dev, unsigned long value);
+ *edac_dev, unsigned long msec);
extern void edac_mc_reset_delay_period(unsigned long value);
/*
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 888d5728ecef..287cc51dbc86 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -337,8 +337,9 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
*/
static int edac_pci_main_kobj_setup(void)
{
- int err;
+ int err = -ENODEV;
struct bus_type *edac_subsys;
+ struct device *dev_root;
edac_dbg(0, "\n");
@@ -357,7 +358,6 @@ static int edac_pci_main_kobj_setup(void)
*/
if (!try_module_get(THIS_MODULE)) {
edac_dbg(1, "try_module_get() failed\n");
- err = -ENODEV;
goto decrement_count_fail;
}
@@ -369,9 +369,13 @@ static int edac_pci_main_kobj_setup(void)
}
/* Instanstiate the pci object */
- err = kobject_init_and_add(edac_pci_top_main_kobj,
- &ktype_edac_pci_main_kobj,
- &edac_subsys->dev_root->kobj, "pci");
+ dev_root = bus_get_dev_root(edac_subsys);
+ if (dev_root) {
+ err = kobject_init_and_add(edac_pci_top_main_kobj,
+ &ktype_edac_pci_main_kobj,
+ &dev_root->kobj, "pci");
+ put_device(dev_root);
+ }
if (err) {
edac_dbg(1, "Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 61b76ec226af..19fba258ae10 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
drvdata = mci->pvt_info;
platform_set_drvdata(pdev, mci);
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
- return -ENOMEM;
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ res = -ENOMEM;
+ goto free;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -243,6 +245,7 @@ err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
+free:
edac_mc_free(mci);
return res;
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 65aeea53e2df..a897b6aff368 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -13,7 +13,7 @@
#include "edac_module.h"
#include "skx_common.h"
-#define I10NM_REVISION "v0.0.5"
+#define I10NM_REVISION "v0.0.6"
#define EDAC_MOD_STR "i10nm_edac"
/* Debug macros */
@@ -22,25 +22,34 @@
#define I10NM_GET_SCK_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd0, &(reg))
-#define I10NM_GET_IMC_BAR(d, i, reg) \
- pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
+#define I10NM_GET_IMC_BAR(d, i, reg) \
+ pci_read_config_dword((d)->uracu, \
+ (res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
#define I10NM_GET_SAD(d, offset, i, reg)\
- pci_read_config_dword((d)->sad_all, (offset) + (i) * 8, &(reg))
+ pci_read_config_dword((d)->sad_all, (offset) + (i) * \
+ (res_cfg->type == GNR ? 12 : 8), &(reg))
#define I10NM_GET_HBM_IMC_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd4, &(reg))
#define I10NM_GET_CAPID3_CFG(d, reg) \
- pci_read_config_dword((d)->pcu_cr3, 0x90, &(reg))
+ pci_read_config_dword((d)->pcu_cr3, \
+ res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
+#define I10NM_GET_CAPID5_CFG(d, reg) \
+ pci_read_config_dword((d)->pcu_cr3, \
+ res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
#define I10NM_GET_DIMMMTR(m, i, j) \
- readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0x80c : \
+ (res_cfg->type == GNR ? 0xc0c : 0x2080c)) + \
(i) * (m)->chan_mmio_sz + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i) \
readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_MCMTR(m, i) \
- readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
+ (res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_AMAP(m, i) \
- readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \
+ (res_cfg->type == GNR ? 0xc14 : 0x20814)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_REG32(m, i, offset) \
readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
@@ -56,7 +65,10 @@
#define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
+#define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
+#define I10NM_GNR_IMC_MMIO_SIZE 0x4000
#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
+#define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
#define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
#define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
@@ -148,35 +160,47 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
static void enable_retry_rd_err_log(bool enable)
{
+ int i, j, imc_num, chan_num;
struct skx_imc *imc;
struct skx_dev *d;
- int i, j;
edac_dbg(2, "\n");
- list_for_each_entry(d, i10nm_edac_list, list)
- for (i = 0; i < I10NM_NUM_IMC; i++) {
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ imc_num = res_cfg->ddr_imc_num;
+ chan_num = res_cfg->ddr_chan_num;
+
+ for (i = 0; i < imc_num; i++) {
imc = &d->imc[i];
if (!imc->mbase)
continue;
- for (j = 0; j < I10NM_NUM_CHANNELS; j++) {
- if (imc->hbm_mc) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- } else {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (j = 0; j < chan_num; j++)
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub,
+ res_cfg->offsets_demand,
+ res_cfg->offsets_demand2);
+ }
+
+ imc_num += res_cfg->hbm_imc_num;
+ chan_num = res_cfg->hbm_chan_num;
+
+ for (; i < imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase || !imc->hbm_mc)
+ continue;
+
+ for (j = 0; j < chan_num; j++) {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm0,
+ res_cfg->offsets_demand_hbm0,
+ NULL);
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm1,
+ res_cfg->offsets_demand_hbm1,
+ NULL);
}
+ }
}
}
@@ -311,6 +335,79 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
return pdev;
}
+/**
+ * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
+ *
+ * @cfg : The pointer to the structure of EDAC resource configurations.
+ *
+ * For Granite Rapids CPUs, the number of present DDR memory controllers read
+ * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
+ * For other CPUs, the number of present DDR memory controllers is statically
+ * configured in @cfg->ddr_imc_num.
+ *
+ * RETURNS : 0 on success, < 0 on failure.
+ */
+static int i10nm_get_imc_num(struct res_config *cfg)
+{
+ int n, imc_num, chan_num = 0;
+ struct skx_dev *d;
+ u32 reg;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
+ res_cfg->pcu_cr3_bdf.dev,
+ res_cfg->pcu_cr3_bdf.fun);
+ if (!d->pcu_cr3)
+ continue;
+
+ if (I10NM_GET_CAPID5_CFG(d, reg))
+ continue;
+
+ n = I10NM_DDR_IMC_CH_CNT(reg);
+
+ if (!chan_num) {
+ chan_num = n;
+ edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
+ } else if (chan_num != n) {
+ i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
+ }
+ }
+
+ switch (cfg->type) {
+ case GNR:
+ /*
+ * One channel per DDR memory controller for Granite Rapids CPUs.
+ */
+ imc_num = chan_num;
+
+ if (!imc_num) {
+ i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
+ return -ENODEV;
+ }
+
+ if (imc_num > I10NM_NUM_DDR_IMC) {
+ i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
+ return -EINVAL;
+ }
+
+ if (cfg->ddr_imc_num != imc_num) {
+ /*
+ * Store the number of present DDR memory controllers.
+ */
+ cfg->ddr_imc_num = imc_num;
+ edac_dbg(2, "Set DDR MC number: %d", imc_num);
+ }
+
+ return 0;
+ default:
+ /*
+ * For other CPUs, the number of present DDR memory controllers
+ * is statically pre-configured in cfg->ddr_imc_num.
+ */
+ return 0;
+ }
+}
+
static bool i10nm_check_2lm(struct res_config *cfg)
{
struct skx_dev *d;
@@ -318,9 +415,9 @@ static bool i10nm_check_2lm(struct res_config *cfg)
int i;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[1],
- PCI_SLOT(cfg->sad_all_devfn),
- PCI_FUNC(cfg->sad_all_devfn));
+ d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
+ res_cfg->sad_all_bdf.dev,
+ res_cfg->sad_all_bdf.fun);
if (!d->sad_all)
continue;
@@ -337,20 +434,39 @@ static bool i10nm_check_2lm(struct res_config *cfg)
}
/*
- * Check whether the error comes from DDRT by ICX/Tremont model specific error code.
- * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS.
+ * Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
+ * Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
*/
static bool i10nm_mscod_is_ddrt(u32 mscod)
{
- switch (mscod) {
- case 0x0106: case 0x0107:
- case 0x0800: case 0x0804:
- case 0x0806 ... 0x0808:
- case 0x080a ... 0x080e:
- case 0x0810: case 0x0811:
- case 0x0816: case 0x081e:
- case 0x081f:
- return true;
+ switch (res_cfg->type) {
+ case I10NM:
+ switch (mscod) {
+ case 0x0106: case 0x0107:
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ break;
+ case SPR:
+ switch (mscod) {
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ break;
+ default:
+ return false;
}
return false;
@@ -358,6 +474,7 @@ static bool i10nm_mscod_is_ddrt(u32 mscod)
static bool i10nm_mc_decode_available(struct mce *mce)
{
+#define ICX_IMCx_CHy 0x06666000
u8 bank;
if (!decoding_via_mca || mem_cfg_2lm)
@@ -371,21 +488,26 @@ static bool i10nm_mc_decode_available(struct mce *mce)
switch (res_cfg->type) {
case I10NM:
- if (bank < 13 || bank > 26)
- return false;
-
- /* DDRT errors can't be decoded from MCA bank registers */
- if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ /* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
+ if (!(ICX_IMCx_CHy & (1 << bank)))
return false;
-
- if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ break;
+ case SPR:
+ if (bank < 13 || bank > 20)
return false;
-
- /* Check whether one of {13,14,17,18,21,22,25,26} */
- return ((bank - 13) & BIT(1)) == 0;
+ break;
default:
return false;
}
+
+ /* DDRT errors can't be decoded from MCA bank registers */
+ if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ return false;
+
+ if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ return false;
+
+ return true;
}
static bool i10nm_mc_decode(struct decoded_addr *res)
@@ -407,9 +529,29 @@ static bool i10nm_mc_decode(struct decoded_addr *res)
switch (res_cfg->type) {
case I10NM:
- bank = m->bank - 13;
- res->imc = bank / 4;
- res->channel = bank % 2;
+ bank = m->bank - 13;
+ res->imc = bank / 4;
+ res->channel = bank % 2;
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 39);
+ res->bank_group = GET_BITFIELD(m->misc, 40, 41);
+ res->bank_address = GET_BITFIELD(m->misc, 42, 43);
+ res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
+ res->rank = GET_BITFIELD(m->misc, 56, 58);
+ res->dimm = res->rank >> 2;
+ res->rank = res->rank % 4;
+ break;
+ case SPR:
+ bank = m->bank - 13;
+ res->imc = bank / 2;
+ res->channel = bank % 2;
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 36);
+ res->bank_group = GET_BITFIELD(m->misc, 37, 38);
+ res->bank_address = GET_BITFIELD(m->misc, 39, 40);
+ res->bank_group |= GET_BITFIELD(m->misc, 41, 41) << 2;
+ res->rank = GET_BITFIELD(m->misc, 57, 57);
+ res->dimm = GET_BITFIELD(m->misc, 58, 58);
break;
default:
return false;
@@ -421,18 +563,101 @@ static bool i10nm_mc_decode(struct decoded_addr *res)
return false;
}
- res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
- res->row = GET_BITFIELD(m->misc, 19, 39);
- res->bank_group = GET_BITFIELD(m->misc, 40, 41);
- res->bank_address = GET_BITFIELD(m->misc, 42, 43);
- res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
- res->rank = GET_BITFIELD(m->misc, 56, 58);
- res->dimm = res->rank >> 2;
- res->rank = res->rank % 4;
-
return true;
}
+/**
+ * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
+ *
+ * @d : The pointer to the structure of CPU socket EDAC device.
+ * @logical_idx : The logical index of the present memory controller (0 ~ max present MC# - 1).
+ * @physical_idx : To store the corresponding physical index of @logical_idx.
+ *
+ * RETURNS : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
+ */
+static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
+{
+#define GNR_MAX_IMC_PCI_CNT 28
+
+ struct pci_dev *mdev;
+ int i, logical = 0;
+
+ /*
+ * Detect present memory controllers from { PCI device: 8-5, function 7-1 }
+ */
+ for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
+ mdev = pci_get_dev_wrapper(d->seg,
+ d->bus[res_cfg->ddr_mdev_bdf.bus],
+ res_cfg->ddr_mdev_bdf.dev + i / 7,
+ res_cfg->ddr_mdev_bdf.fun + i % 7);
+
+ if (mdev) {
+ if (logical == logical_idx) {
+ *physical_idx = i;
+ return mdev;
+ }
+
+ pci_dev_put(mdev);
+ logical++;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
+ *
+ * @d : The pointer to the structure of CPU socket EDAC device.
+ * @i : The index of the CPU socket relative DDR memory controller.
+ * @offset : To store the MMIO offset of the i-th DDR memory controller.
+ * @size : To store the MMIO size of the i-th DDR memory controller.
+ *
+ * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
+ */
+static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
+{
+ struct pci_dev *mdev;
+ int physical_idx;
+ u32 reg;
+
+ switch (res_cfg->type) {
+ case GNR:
+ if (I10NM_GET_IMC_BAR(d, 0, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
+ return NULL;
+ }
+
+ mdev = get_gnr_mdev(d, i, &physical_idx);
+ if (!mdev)
+ return NULL;
+
+ *offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
+ I10NM_GNR_IMC_MMIO_OFFSET +
+ physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
+ *size = I10NM_GNR_IMC_MMIO_SIZE;
+
+ break;
+ default:
+ if (I10NM_GET_IMC_BAR(d, i, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
+ return NULL;
+ }
+
+ mdev = pci_get_dev_wrapper(d->seg,
+ d->bus[res_cfg->ddr_mdev_bdf.bus],
+ res_cfg->ddr_mdev_bdf.dev + i,
+ res_cfg->ddr_mdev_bdf.fun);
+ if (!mdev)
+ return NULL;
+
+ *offset = I10NM_GET_IMC_MMIO_OFFSET(reg);
+ *size = I10NM_GET_IMC_MMIO_SIZE(reg);
+ }
+
+ return mdev;
+}
+
static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
@@ -444,11 +669,15 @@ static int i10nm_get_ddr_munits(void)
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
+ d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
+ res_cfg->util_all_bdf.dev,
+ res_cfg->util_all_bdf.fun);
if (!d->util_all)
return -ENODEV;
- d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
+ d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
+ res_cfg->uracu_bdf.dev,
+ res_cfg->uracu_bdf.fun);
if (!d->uracu)
return -ENODEV;
@@ -461,9 +690,9 @@ static int i10nm_get_ddr_munits(void)
edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
j++, base, reg);
- for (i = 0; i < I10NM_NUM_DDR_IMC; i++) {
- mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
- 12 + i, 0);
+ for (i = 0; i < res_cfg->ddr_imc_num; i++) {
+ mdev = get_ddr_munit(d, i, &off, &size);
+
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No IMC found\n");
return -ENODEV;
@@ -473,13 +702,6 @@ static int i10nm_get_ddr_munits(void)
d->imc[i].mdev = mdev;
- if (I10NM_GET_IMC_BAR(d, i, reg)) {
- i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
- return -ENODEV;
- }
-
- off = I10NM_GET_IMC_MMIO_OFFSET(reg);
- size = I10NM_GET_IMC_MMIO_SIZE(reg);
edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
i, base + off, size, reg);
@@ -519,7 +741,6 @@ static int i10nm_get_hbm_munits(void)
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[1], 30, 3);
if (!d->pcu_cr3)
return -ENODEV;
@@ -540,11 +761,13 @@ static int i10nm_get_hbm_munits(void)
}
base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
- lmc = I10NM_NUM_DDR_IMC;
+ lmc = res_cfg->ddr_imc_num;
+
+ for (i = 0; i < res_cfg->hbm_imc_num; i++) {
+ mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
+ res_cfg->hbm_mdev_bdf.dev + i / 4,
+ res_cfg->hbm_mdev_bdf.fun + i % 4);
- for (i = 0; i < I10NM_NUM_HBM_IMC; i++) {
- mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
- 12 + i / 4, 1 + i % 4);
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No hbm mc found\n");
return -ENODEV;
@@ -594,8 +817,16 @@ static struct res_config i10nm_cfg0 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
- .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_bdf = {1, 29, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
@@ -605,8 +836,16 @@ static struct res_config i10nm_cfg1 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
- .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_bdf = {1, 29, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
@@ -616,10 +855,21 @@ static struct res_config spr_cfg = {
.type = SPR,
.decs_did = 0x3252,
.busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
+ .hbm_imc_num = 16,
+ .hbm_chan_num = 2,
+ .hbm_dimm_num = 1,
.ddr_chan_mmio_sz = 0x8000,
.hbm_chan_mmio_sz = 0x4000,
.support_ddr5 = true,
- .sad_all_devfn = PCI_DEVFN(10, 0),
+ .sad_all_bdf = {1, 10, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
.offsets_scrub = offsets_scrub_spr,
.offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
@@ -630,6 +880,23 @@ static struct res_config spr_cfg = {
.offsets_demand_hbm1 = offsets_demand_spr_hbm1,
};
+static struct res_config gnr_cfg = {
+ .type = GNR,
+ .decs_did = 0x3252,
+ .busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 12,
+ .ddr_chan_num = 1,
+ .ddr_dimm_num = 2,
+ .ddr_chan_mmio_sz = 0x4000,
+ .support_ddr5 = true,
+ .sad_all_bdf = {0, 13, 0},
+ .pcu_cr3_bdf = {0, 5, 0},
+ .util_all_bdf = {0, 13, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 5, 1},
+ .sad_all_offset = 0x300,
+};
+
static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
@@ -637,6 +904,9 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SIERRAFOREST_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -656,7 +926,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
{
struct skx_pvt *pvt = mci->pvt_info;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg;
+ u32 mtr, amap, mcddrtcfg = 0;
struct dimm_info *dimm;
int i, j, ndimms;
@@ -666,7 +936,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
ndimms = 0;
amap = I10NM_GET_AMAP(imc, i);
- mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
+
+ if (res_cfg->type != GNR)
+ mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
+
for (j = 0; j < imc->num_dimms; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
@@ -752,6 +1025,7 @@ static int __init i10nm_init(void)
struct skx_dev *d;
int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
u64 tolm, tohm;
+ int imc_num;
edac_dbg(2, "\n");
@@ -784,6 +1058,10 @@ static int __init i10nm_init(void)
return -ENODEV;
}
+ rc = i10nm_get_imc_num(cfg);
+ if (rc < 0)
+ goto fail;
+
mem_cfg_2lm = i10nm_check_2lm(cfg);
skx_set_mem_cfg(mem_cfg_2lm);
@@ -792,6 +1070,8 @@ static int __init i10nm_init(void)
if (i10nm_get_hbm_munits() && rc)
goto fail;
+ imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
+
list_for_each_entry(d, i10nm_edac_list, list) {
rc = skx_get_src_id(d, 0xf8, &src_id);
if (rc < 0)
@@ -802,7 +1082,7 @@ static int __init i10nm_init(void)
goto fail;
edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
- for (i = 0; i < I10NM_NUM_IMC; i++) {
+ for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
@@ -812,12 +1092,12 @@ static int __init i10nm_init(void)
d->imc[i].node_id = node_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
- d->imc[i].num_channels = I10NM_NUM_HBM_CHANNELS;
- d->imc[i].num_dimms = I10NM_NUM_HBM_DIMMS;
+ d->imc[i].num_channels = cfg->hbm_chan_num;
+ d->imc[i].num_dimms = cfg->hbm_dimm_num;
} else {
d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
- d->imc[i].num_channels = I10NM_NUM_DDR_CHANNELS;
- d->imc[i].num_dimms = I10NM_NUM_DDR_DIMMS;
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
}
rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index ba46057d4220..4b5a71f8739d 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1573,13 +1573,10 @@ module_init(i5000_init);
module_exit(i5000_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR
- ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
-MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
- I5000_REVISION);
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " I5000_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
module_param(misc_messages, int, 0444);
MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
-
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index f5d82518c15e..d470afe65001 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -909,7 +909,7 @@ static void i5100_do_inject(struct mem_ctl_info *mci)
*
* The injection code don't work without setting this register.
* The register needs to be flipped off then on else the hardware
- * will only preform the first injection.
+ * will only perform the first injection.
*
* Stop condition bits 7:4
* 1010 - Stop after one injection
@@ -1220,6 +1220,5 @@ module_init(i5100_init);
module_exit(i5100_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR
- ("Arthur Jones <ajones@riverbed.com>");
+MODULE_AUTHOR("Arthur Jones <ajones@riverbed.com>");
MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index fbec90d00f1e..b8a497f0de28 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -355,8 +355,7 @@ module_init(i82860_init);
module_exit(i82860_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
- "Ben Woodard <woodard@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) Ben Woodard <woodard@redhat.com>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 35ceaca578e1..7c5e2b3c0daa 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -72,5 +72,4 @@ module_exit(fsl_ddr_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state,
- "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index e50d7928bf8f..55320546c174 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -711,5 +711,4 @@ module_exit(mpc85xx_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state,
- "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 97a27e42dd61..265e0fb39bc7 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -76,6 +76,8 @@
#define DRP0_INTERRUPT_ENABLE BIT(6)
#define SB_DB_DRP_INTERRUPT_ENABLE 0x3
+#define ECC_POLL_MSEC 5000
+
enum {
LLCC_DRAM_CE = 0,
LLCC_DRAM_UE,
@@ -213,7 +215,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
for (i = 0; i < reg_data.reg_cnt; i++) {
synd_reg = reg_data.synd_reg + (i * 4);
- ret = regmap_read(drv->regmap, drv->offsets[bank] + synd_reg,
+ ret = regmap_read(drv->regmaps[bank], synd_reg,
&synd_val);
if (ret)
goto clear;
@@ -222,8 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
reg_data.name, i, synd_val);
}
- ret = regmap_read(drv->regmap,
- drv->offsets[bank] + reg_data.count_status_reg,
+ ret = regmap_read(drv->regmaps[bank], reg_data.count_status_reg,
&err_cnt);
if (ret)
goto clear;
@@ -233,8 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
reg_data.name, err_cnt);
- ret = regmap_read(drv->regmap,
- drv->offsets[bank] + reg_data.ways_status_reg,
+ ret = regmap_read(drv->regmaps[bank], reg_data.ways_status_reg,
&err_ways);
if (ret)
goto clear;
@@ -252,7 +252,7 @@ clear:
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
- struct llcc_drv_data *drv = edev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
@@ -285,19 +285,17 @@ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
return ret;
}
-static irqreturn_t
-llcc_ecc_irq_handler(int irq, void *edev_ctl)
+static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
for (i = 0; i < drv->num_banks; i++) {
- ret = regmap_read(drv->regmap,
- drv->offsets[i] + DRP_INTERRUPT_STATUS,
+ ret = regmap_read(drv->regmaps[i], DRP_INTERRUPT_STATUS,
&drp_error);
if (!ret && (drp_error & SB_ECC_ERROR)) {
@@ -312,8 +310,7 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
if (!ret)
irq_rc = IRQ_HANDLED;
- ret = regmap_read(drv->regmap,
- drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
+ ret = regmap_read(drv->regmaps[i], TRP_INTERRUPT_0_STATUS,
&trp_error);
if (!ret && (trp_error & SB_ECC_ERROR)) {
@@ -332,6 +329,11 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
return irq_rc;
}
+static void llcc_ecc_check(struct edac_device_ctl_info *edev_ctl)
+{
+ llcc_ecc_irq_handler(0, edev_ctl);
+}
+
static int qcom_llcc_edac_probe(struct platform_device *pdev)
{
struct llcc_drv_data *llcc_driv_data = pdev->dev.platform_data;
@@ -358,31 +360,32 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
- edev_ctl->pvt_info = llcc_driv_data;
-
- rc = edac_device_add_device(edev_ctl);
- if (rc)
- goto out_mem;
-
- platform_set_drvdata(pdev, edev_ctl);
- /* Request for ecc irq */
+ /* Check if LLCC driver has passed ECC IRQ */
ecc_irq = llcc_driv_data->ecc_irq;
- if (ecc_irq < 0) {
- rc = -ENODEV;
- goto out_dev;
- }
- rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
+ if (ecc_irq > 0) {
+ /* Use interrupt mode if IRQ is available */
+ rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
- if (rc)
- goto out_dev;
+ if (!rc) {
+ edac_op_state = EDAC_OPSTATE_INT;
+ goto irq_done;
+ }
+ }
- return rc;
+ /* Fall back to polling mode otherwise */
+ edev_ctl->poll_msec = ECC_POLL_MSEC;
+ edev_ctl->edac_check = llcc_ecc_check;
+ edac_op_state = EDAC_OPSTATE_POLL;
-out_dev:
- edac_device_del_device(edev_ctl->dev);
-out_mem:
- edac_device_free_ctl_info(edev_ctl);
+irq_done:
+ rc = edac_device_add_device(edev_ctl);
+ if (rc) {
+ edac_device_free_ctl_info(edev_ctl);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, edev_ctl);
return rc;
}
@@ -397,12 +400,19 @@ static int qcom_llcc_edac_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id qcom_llcc_edac_id_table[] = {
+ { .name = "qcom_llcc_edac" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
+
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
.remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
+ .id_table = qcom_llcc_edac_id_table,
};
module_platform_driver(qcom_llcc_edac_driver);
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index d0aef83dca2a..61e979d5437a 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -415,8 +415,7 @@ module_init(r82600_init);
module_exit(r82600_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
- "on behalf of EADS Astrium");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 9397abb42c49..0a862336a7ce 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -510,7 +510,7 @@ rir_found:
}
static u8 skx_close_row[] = {
- 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};
static u8 skx_close_column[] = {
@@ -518,7 +518,7 @@ static u8 skx_close_column[] = {
};
static u8 skx_open_row[] = {
- 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};
static u8 skx_open_column[] = {
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index f0f8e98f6efb..ce3e0069e028 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -560,44 +560,28 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
tp_event = HW_EVENT_ERR_CORRECTED;
}
- /*
- * According to Intel Architecture spec vol 3B,
- * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
- * memory errors should fit one of these masks:
- * 000f 0000 1mmm cccc (binary)
- * 000f 0010 1mmm cccc (binary) [RAM used as cache]
- * where:
- * f = Correction Report Filtering Bit. If 1, subsequent errors
- * won't be shown
- * mmm = error type
- * cccc = channel
- * If the mask doesn't match, report an error to the parsing logic
- */
- if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request error";
- break;
- case 1:
- optype = "memory read error";
- break;
- case 2:
- optype = "memory write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "memory scrubbing error";
- scrub_err = true;
- break;
- default:
- optype = "reserved";
- break;
- }
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ scrub_err = true;
+ break;
+ default:
+ optype = "reserved";
+ break;
}
+
if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
@@ -632,12 +616,18 @@ static bool skx_error_in_1st_level_mem(const struct mce *m)
if (!skx_mem_cfg_2lm)
return false;
- errcode = GET_BITFIELD(m->status, 0, 15);
+ errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
- if ((errcode & 0xef80) != 0x280)
- return false;
+ return errcode == MCACOD_EXT_MEM_ERR;
+}
- return true;
+static bool skx_error_in_mem(const struct mce *m)
+{
+ u32 errcode;
+
+ errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+
+ return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
@@ -651,13 +641,13 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
- /* ignore unless this is memory related with an address */
- if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+ /* Ignore unless this is memory related with an address */
+ if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
res.mce = mce;
- res.addr = mce->addr;
+ res.addr = mce->addr & MCI_ADDR_PHYSADDR;
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 0cbadd3d2cd3..b6d3607dffe2 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -33,7 +33,7 @@
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 4
+#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
@@ -57,6 +57,30 @@
#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */
/*
+ * According to Intel Architecture spec vol 3B,
+ * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
+ * memory errors should fit one of these masks:
+ * 000f 0000 1mmm cccc (binary)
+ * 000f 0010 1mmm cccc (binary) [RAM used as cache]
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ */
+#define MCACOD_MEM_ERR_MASK 0xef80
+/*
+ * Errors from either the memory of the 1-level memory system or the
+ * 2nd level memory (the slow "far" memory) of the 2-level memory system.
+ */
+#define MCACOD_MEM_CTL_ERR 0x80
+/*
+ * Errors from the 1st level memory (the fast "near" memory as cache)
+ * of the 2-level memory system.
+ */
+#define MCACOD_EXT_MEM_ERR 0x280
+
+/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
* memory controllers on the die.
@@ -105,7 +129,8 @@ struct skx_pvt {
enum type {
SKX,
I10NM,
- SPR
+ SPR,
+ GNR
};
enum {
@@ -149,19 +174,47 @@ struct decoded_addr {
bool decoded_by_adxl;
};
+struct pci_bdf {
+ u32 bus : 8;
+ u32 dev : 5;
+ u32 fun : 3;
+};
+
struct res_config {
enum type type;
/* Configuration agent device ID */
unsigned int decs_did;
/* Default bus number configuration register offset */
int busno_cfg_offset;
+ /* DDR memory controllers per socket */
+ int ddr_imc_num;
+ /* DDR channels per DDR memory controller */
+ int ddr_chan_num;
+ /* DDR DIMMs per DDR memory channel */
+ int ddr_dimm_num;
/* Per DDR channel memory-mapped I/O size */
int ddr_chan_mmio_sz;
+ /* HBM memory controllers per socket */
+ int hbm_imc_num;
+ /* HBM channels per HBM memory controller */
+ int hbm_chan_num;
+ /* HBM DIMMs per HBM memory channel */
+ int hbm_dimm_num;
/* Per HBM channel memory-mapped I/O size */
int hbm_chan_mmio_sz;
bool support_ddr5;
- /* SAD device number and function number */
- unsigned int sad_all_devfn;
+ /* SAD device BDF */
+ struct pci_bdf sad_all_bdf;
+ /* PCU device BDF */
+ struct pci_bdf pcu_cr3_bdf;
+ /* UTIL device BDF */
+ struct pci_bdf util_all_bdf;
+ /* URACU device BDF */
+ struct pci_bdf uracu_bdf;
+ /* DDR mdev device BDF */
+ struct pci_bdf ddr_mdev_bdf;
+ /* HBM mdev device BDF */
+ struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
/* Offsets of retry_rd_err_log registers */
u32 *offsets_scrub;
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
new file mode 100644
index 000000000000..ac7d1e0b324c
--- /dev/null
+++ b/drivers/edac/zynqmp_edac.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP OCM ECC Driver
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
+
+#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
+
+/* Error/Interrupt registers */
+#define ERR_CTRL_OFST 0x0
+#define OCM_ISR_OFST 0x04
+#define OCM_IMR_OFST 0x08
+#define OCM_IEN_OFST 0x0C
+#define OCM_IDS_OFST 0x10
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0x14
+
+/* Correctable error info registers */
+#define CE_FFA_OFST 0x1C
+#define CE_FFD0_OFST 0x20
+#define CE_FFD1_OFST 0x24
+#define CE_FFD2_OFST 0x28
+#define CE_FFD3_OFST 0x2C
+#define CE_FFE_OFST 0x30
+
+/* Uncorrectable error info registers */
+#define UE_FFA_OFST 0x34
+#define UE_FFD0_OFST 0x38
+#define UE_FFD1_OFST 0x3C
+#define UE_FFD2_OFST 0x40
+#define UE_FFD3_OFST 0x44
+#define UE_FFE_OFST 0x48
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x40
+#define ECC_CTRL_CLR_UE_ERR 0x80
+
+/* Fault injection data and count registers */
+#define OCM_FID0_OFST 0x4C
+#define OCM_FID1_OFST 0x50
+#define OCM_FID2_OFST 0x54
+#define OCM_FID3_OFST 0x58
+#define OCM_FIC_OFST 0x74
+
+#define UE_MAX_BITPOS_LOWER 31
+#define UE_MIN_BITPOS_UPPER 32
+#define UE_MAX_BITPOS_UPPER 63
+
+/* Interrupt masks */
+#define OCM_CEINTR_MASK BIT(6)
+#define OCM_UEINTR_MASK BIT(7)
+#define OCM_ECC_ENABLE_MASK BIT(0)
+
+#define OCM_FICOUNT_MASK GENMASK(23, 0)
+#define OCM_NUM_UE_BITPOS 2
+#define OCM_BASEVAL 0xFFFC0000
+#define EDAC_DEVICE "ZynqMP-OCM"
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @addr: Fault generated at this address
+ * @fault_lo: Generated fault data (lower 32-bit)
+ * @fault_hi: Generated fault data (upper 32-bit)
+ */
+struct ecc_error_info {
+ u32 addr;
+ u32 fault_lo;
+ u32 fault_hi;
+};
+
+/**
+ * struct ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct edac_priv - OCM private instance data
+ * @baseaddr: Base address of the OCM
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ * @debugfs_dir: Directory entry for debugfs
+ * @ce_bitpos: Bit position for Correctable Error
+ * @ue_bitpos: Array to store UnCorrectable Error bit positions
+ * @fault_injection_cnt: Fault Injection Counter value
+ */
+struct edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
+ struct ecc_status stat;
+ u32 ce_cnt;
+ u32 ue_cnt;
+#ifdef CONFIG_EDAC_DEBUG
+ struct dentry *debugfs_dir;
+ u8 ce_bitpos;
+ u8 ue_bitpos[OCM_NUM_UE_BITPOS];
+ u32 fault_injection_cnt;
+#endif
+};
+
+/**
+ * get_error_info - Get the current ECC error info
+ * @base: Pointer to the base address of the OCM
+ * @p: Pointer to the OCM ECC status structure
+ * @mask: Status register mask value
+ *
+ * Determines there is any ECC error or not
+ *
+ */
+static void get_error_info(void __iomem *base, struct ecc_status *p, int mask)
+{
+ if (mask & OCM_CEINTR_MASK) {
+ p->ce_cnt++;
+ p->ceinfo.fault_lo = readl(base + CE_FFD0_OFST);
+ p->ceinfo.fault_hi = readl(base + CE_FFD1_OFST);
+ p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
+ writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
+ } else if (mask & OCM_UEINTR_MASK) {
+ p->ue_cnt++;
+ p->ueinfo.fault_lo = readl(base + UE_FFD0_OFST);
+ p->ueinfo.fault_hi = readl(base + UE_FFD1_OFST);
+ p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
+ writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
+ }
+}
+
+/**
+ * handle_error - Handle error types CE and UE
+ * @dci: Pointer to the EDAC device instance
+ * @p: Pointer to the OCM ECC status structure
+ *
+ * Handles correctable and uncorrectable errors.
+ */
+static void handle_error(struct edac_device_ctl_info *dci, struct ecc_status *p)
+{
+ struct edac_priv *priv = dci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
+ "CE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
+ edac_device_handle_ce(dci, 0, 0, priv->message);
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
+ "UE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
+ edac_device_handle_ue(dci, 0, 0, priv->message);
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * intr_handler - ISR routine
+ * @irq: irq number
+ * @dev_id: device id pointer
+ *
+ * Return: IRQ_NONE, if CE/UE interrupt not set or IRQ_HANDLED otherwise
+ */
+static irqreturn_t intr_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct edac_priv *priv = dci->pvt_info;
+ int regval;
+
+ regval = readl(priv->baseaddr + OCM_ISR_OFST);
+ if (!(regval & (OCM_CEINTR_MASK | OCM_UEINTR_MASK))) {
+ WARN_ONCE(1, "Unhandled IRQ%d, ISR: 0x%x", irq, regval);
+ return IRQ_NONE;
+ }
+
+ get_error_info(priv->baseaddr, &priv->stat, regval);
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ handle_error(dci, &priv->stat);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * get_eccstate - Return the ECC status
+ * @base: Pointer to the OCM base address
+ *
+ * Get the ECC enable/disable status
+ *
+ * Return: ECC status 0/1.
+ */
+static bool get_eccstate(void __iomem *base)
+{
+ return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/**
+ * write_fault_count - write fault injection count
+ * @priv: Pointer to the EDAC private struct
+ *
+ * Update the fault injection count register, once the counter reaches
+ * zero, it injects errors
+ */
+static void write_fault_count(struct edac_priv *priv)
+{
+ u32 ficount = priv->fault_injection_cnt;
+
+ if (ficount & ~OCM_FICOUNT_MASK) {
+ ficount &= OCM_FICOUNT_MASK;
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "Fault injection count value truncated to %d\n", ficount);
+ }
+
+ writel(ficount, priv->baseaddr + OCM_FIC_OFST);
+}
+
+/*
+ * To get the Correctable Error injected, the following steps are needed:
+ * - Setup the optional Fault Injection Count:
+ * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
+ * - Write the Correctable Error bit position value:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/ocm/inject_ce_bitpos
+ */
+static ssize_t inject_ce_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct edac_priv *priv = edac_dev->pvt_info;
+ int ret;
+
+ if (!data)
+ return -EFAULT;
+
+ ret = kstrtou8_from_user(data, count, 0, &priv->ce_bitpos);
+ if (ret)
+ return ret;
+
+ if (priv->ce_bitpos > UE_MAX_BITPOS_UPPER)
+ return -EINVAL;
+
+ if (priv->ce_bitpos <= UE_MAX_BITPOS_LOWER) {
+ writel(BIT(priv->ce_bitpos), priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else {
+ writel(BIT(priv->ce_bitpos - UE_MIN_BITPOS_UPPER),
+ priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ }
+
+ write_fault_count(priv);
+
+ return count;
+}
+
+static const struct file_operations inject_ce_fops = {
+ .open = simple_open,
+ .write = inject_ce_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * To get the Uncorrectable Error injected, the following steps are needed:
+ * - Setup the optional Fault Injection Count:
+ * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
+ * - Write the Uncorrectable Error bit position values:
+ * echo <bit_pos0 val>,<bit_pos1 val> > /sys/kernel/debug/edac/ocm/inject_ue_bitpos
+ */
+static ssize_t inject_ue_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct edac_priv *priv = edac_dev->pvt_info;
+ char buf[6], *pbuf, *token[2];
+ u64 ue_bitpos;
+ int i, ret;
+ u8 len;
+
+ if (!data)
+ return -EFAULT;
+
+ len = min_t(size_t, count, sizeof(buf));
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pbuf = &buf[0];
+ for (i = 0; i < OCM_NUM_UE_BITPOS; i++)
+ token[i] = strsep(&pbuf, ",");
+
+ ret = kstrtou8(token[0], 0, &priv->ue_bitpos[0]);
+ if (ret)
+ return ret;
+
+ ret = kstrtou8(token[1], 0, &priv->ue_bitpos[1]);
+ if (ret)
+ return ret;
+
+ if (priv->ue_bitpos[0] > UE_MAX_BITPOS_UPPER ||
+ priv->ue_bitpos[1] > UE_MAX_BITPOS_UPPER)
+ return -EINVAL;
+
+ if (priv->ue_bitpos[0] == priv->ue_bitpos[1]) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Bit positions should not be equal\n");
+ return -EINVAL;
+ }
+
+ ue_bitpos = BIT(priv->ue_bitpos[0]) | BIT(priv->ue_bitpos[1]);
+
+ writel((u32)ue_bitpos, priv->baseaddr + OCM_FID0_OFST);
+ writel((u32)(ue_bitpos >> 32), priv->baseaddr + OCM_FID1_OFST);
+
+ write_fault_count(priv);
+
+ return count;
+}
+
+static const struct file_operations inject_ue_fops = {
+ .open = simple_open,
+ .write = inject_ue_write,
+ .llseek = generic_file_llseek,
+};
+
+static void setup_debugfs(struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_priv *priv = edac_dev->pvt_info;
+
+ priv->debugfs_dir = edac_debugfs_create_dir("ocm");
+ if (!priv->debugfs_dir)
+ return;
+
+ edac_debugfs_create_x32("inject_fault_count", 0644, priv->debugfs_dir,
+ &priv->fault_injection_cnt);
+ edac_debugfs_create_file("inject_ue_bitpos", 0644, priv->debugfs_dir,
+ edac_dev, &inject_ue_fops);
+ edac_debugfs_create_file("inject_ce_bitpos", 0644, priv->debugfs_dir,
+ edac_dev, &inject_ce_fops);
+}
+#endif
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct edac_priv *priv;
+ void __iomem *baseaddr;
+ struct resource *res;
+ int irq, ret;
+
+ baseaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_DEVICE, "ECC not enabled\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!dci)
+ return -ENOMEM;
+
+ priv = dci->pvt_info;
+ platform_set_drvdata(pdev, dci);
+ dci->dev = &pdev->dev;
+ priv->baseaddr = baseaddr;
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
+ dci->dev_name = dev_name(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto free_dev_ctl;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, intr_handler, 0,
+ dev_name(&pdev->dev), dci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
+ goto free_dev_ctl;
+ }
+
+ /* Enable UE, CE interrupts */
+ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IEN_OFST);
+
+#ifdef CONFIG_EDAC_DEBUG
+ setup_debugfs(dci);
+#endif
+
+ ret = edac_device_add_device(dci);
+ if (ret)
+ goto free_dev_ctl;
+
+ return 0;
+
+free_dev_ctl:
+ edac_device_free_ctl_info(dci);
+
+ return ret;
+}
+
+static int edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct edac_priv *priv = dci->pvt_info;
+
+ /* Disable UE, CE interrupts */
+ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IDS_OFST);
+
+#ifdef CONFIG_EDAC_DEBUG
+ debugfs_remove_recursive(priv->debugfs_dir);
+#endif
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_ocm_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ocmc-1.0"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
+
+static struct platform_driver zynqmp_ocm_edac_driver = {
+ .driver = {
+ .name = "zynqmp-ocm-edac",
+ .of_match_table = zynqmp_ocm_edac_match,
+ },
+ .probe = edac_probe,
+ .remove = edac_remove,
+};
+
+module_platform_driver(zynqmp_ocm_edac_driver);
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("Xilinx ZynqMP OCM ECC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 65bffde137e3..713582cc27d1 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -127,9 +127,9 @@ static int eisa_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int eisa_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct eisa_device *edev = to_eisa_device(dev);
+ const struct eisa_device *edev = to_eisa_device(dev);
add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig);
return 0;
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 930c2332c3c4..8173e60bb808 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -20,8 +20,8 @@ static struct eisa_root_device pci_eisa_root;
static int __init pci_eisa_init(struct pci_dev *pdev)
{
- int rc, i;
struct resource *res, *bus_res = NULL;
+ int rc;
if ((rc = pci_enable_device (pdev))) {
dev_err(&pdev->dev, "Could not enable device\n");
@@ -38,7 +38,7 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
* eisa_root_register() can only deal with a single io port resource,
* so we use the first valid io port resource.
*/
- pci_bus_for_each_resource(pdev->bus, res, i)
+ pci_bus_for_each_resource(pdev->bus, res)
if (res && (res->flags & IORESOURCE_IO)) {
bus_res = res;
break;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 89a6449e3f4a..2c55f06ba699 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -537,6 +537,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
cht_wc_extcon_set_5v_boost(ext, false);
break;
case INTEL_CHT_WC_LENOVO_YOGABOOK1:
+ case INTEL_CHT_WC_LENOVO_YT3_X90:
/* Do this first, as it may very well return -EPROBE_DEFER. */
ret = cht_wc_extcon_get_role_sw_and_regulator(ext);
if (ret)
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e1c71359b605..d43ba8e7260d 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1013,7 +1013,7 @@ ATTRIBUTE_GROUPS(extcon);
static int create_extcon_class(void)
{
if (!extcon_class) {
- extcon_class = class_create(THIS_MODULE, "extcon");
+ extcon_class = class_create("extcon");
if (IS_ERR(extcon_class))
return PTR_ERR(extcon_class);
extcon_class->dev_groups = extcon_groups;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9c89f7d53e99..2c16ee8fd842 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -111,6 +111,7 @@ struct inbound_transaction_resource {
struct client_resource resource;
struct fw_card *card;
struct fw_request *request;
+ bool is_fcp;
void *data;
size_t length;
};
@@ -643,19 +644,14 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
client->device->max_speed);
}
-static inline bool is_fcp_request(struct fw_request *request)
-{
- return request == NULL;
-}
-
static void release_request(struct client *client,
struct client_resource *resource)
{
struct inbound_transaction_resource *r = container_of(resource,
struct inbound_transaction_resource, resource);
- if (is_fcp_request(r->request))
- kfree(r->data);
+ if (r->is_fcp)
+ fw_request_put(r->request);
else
fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
@@ -669,15 +665,20 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct address_handler_resource *handler = callback_data;
+ bool is_fcp = is_in_fcp_region(offset, length);
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
size_t event_size0;
- void *fcp_frame = NULL;
int ret;
/* card may be different from handler->client->device->card */
fw_card_get(card);
+ // Extend the lifetime of data for request so that its payload is safely accessible in
+ // the process context for the client.
+ if (is_fcp)
+ fw_request_get(request);
+
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (r == NULL || e == NULL)
@@ -685,21 +686,10 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
r->card = card;
r->request = request;
+ r->is_fcp = is_fcp;
r->data = payload;
r->length = length;
- if (is_fcp_request(request)) {
- /*
- * FIXME: Let core-transaction.c manage a
- * single reference-counted copy?
- */
- fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
- if (fcp_frame == NULL)
- goto failed;
-
- r->data = fcp_frame;
- }
-
r->resource.release = release_request;
ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
if (ret < 0)
@@ -741,10 +731,11 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
failed:
kfree(r);
kfree(e);
- kfree(fcp_frame);
- if (!is_fcp_request(request))
+ if (!is_fcp)
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+ else
+ fw_request_put(request);
fw_card_put(card);
}
@@ -819,17 +810,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (is_fcp_request(r->request))
+ if (r->is_fcp) {
+ fw_request_put(r->request);
goto out;
+ }
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
- kfree(r->request);
+ fw_request_put(r->request);
goto out;
}
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
ret = -EFAULT;
- kfree(r->request);
+ fw_request_put(r->request);
goto out;
}
fw_send_response(r->card, r->request, a->rcode);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index adddd8c45d0c..aa597cda0d88 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -133,7 +133,7 @@ static void get_ids(const u32 *directory, int *id)
}
}
-static void get_modalias_ids(struct fw_unit *unit, int *id)
+static void get_modalias_ids(const struct fw_unit *unit, int *id)
{
get_ids(&fw_parent_device(unit)->config_rom[5], id);
get_ids(unit->directory, id);
@@ -195,7 +195,7 @@ static void fw_unit_remove(struct device *dev)
driver->remove(fw_unit(dev));
}
-static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+static int get_modalias(const struct fw_unit *unit, char *buffer, size_t buffer_size)
{
int id[] = {0, 0, 0, 0};
@@ -206,9 +206,9 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
id[0], id[1], id[2], id[3]);
}
-static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct fw_unit *unit = fw_unit(dev);
+ const struct fw_unit *unit = fw_unit(dev);
char modalias[64];
get_modalias(unit, modalias, sizeof(modalias));
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index af498d767702..a9f70c96323e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -535,12 +535,6 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
-static bool is_in_fcp_region(u64 offset, size_t length)
-{
- return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
- offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
-}
-
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -617,6 +611,7 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
EXPORT_SYMBOL(fw_core_remove_address_handler);
struct fw_request {
+ struct kref kref;
struct fw_packet response;
u32 request_header[4];
int ack;
@@ -625,13 +620,33 @@ struct fw_request {
u32 data[];
};
+void fw_request_get(struct fw_request *request)
+{
+ kref_get(&request->kref);
+}
+
+static void release_request(struct kref *kref)
+{
+ struct fw_request *request = container_of(kref, struct fw_request, kref);
+
+ kfree(request);
+}
+
+void fw_request_put(struct fw_request *request)
+{
+ kref_put(&request->kref, release_request);
+}
+
static void free_response_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- struct fw_request *request;
+ struct fw_request *request = container_of(packet, struct fw_request, response);
- request = container_of(packet, struct fw_request, response);
- kfree(request);
+ // Decrease the reference count since not at in-flight.
+ fw_request_put(request);
+
+ // Decrease the reference count to release the object.
+ fw_request_put(request);
}
int fw_get_response_length(struct fw_request *r)
@@ -782,6 +797,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
if (request == NULL)
return NULL;
+ kref_init(&request->kref);
request->response.speed = p->speed;
request->response.timestamp =
@@ -800,16 +816,22 @@ static struct fw_request *allocate_request(struct fw_card *card,
return request;
}
+/**
+ * fw_send_response: - send response packet for asynchronous transaction.
+ * @card: interface to send the response at.
+ * @request: firewire request data for the transaction.
+ * @rcode: response code to send.
+ *
+ * Submit a response packet into the asynchronous response transmission queue. The @request
+ * is going to be released when the transmission successfully finishes later.
+ */
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
- if (WARN_ONCE(!request, "invalid for FCP address handlers"))
- return;
-
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
- kfree(request);
+ fw_request_put(request);
return;
}
@@ -821,6 +843,9 @@ void fw_send_response(struct fw_card *card,
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
+ // Increase the reference count so that the object is kept during in-flight.
+ fw_request_get(request);
+
card->driver->send_response(card, &request->response);
}
EXPORT_SYMBOL(fw_send_response);
@@ -910,7 +935,7 @@ static void handle_fcp_region_request(struct fw_card *card,
rcu_read_lock();
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, NULL, tcode,
+ handler->address_callback(card, request, tcode,
destination, source,
p->generation, offset,
request->data,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 71d5f16f311c..eafa4eaae737 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -244,6 +244,9 @@ int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
+void fw_request_get(struct fw_request *request);
+void fw_request_put(struct fw_request *request);
+
#define FW_PHY_CONFIG_NO_NODE_ID -1
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
@@ -254,4 +257,10 @@ static inline bool is_ping_packet(u32 *data)
return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
}
+static inline bool is_in_fcp_region(u64 offset, size_t length)
+{
+ return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
+}
+
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 659256927b42..48b879e9e831 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -251,7 +251,7 @@ static inline void __init init_ohci1394_controller(int num, int slot, int func)
}
/**
- * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them
+ * init_ohci1394_dma_on_all_controllers - scan for OHCI1394 controllers and init DMA on them
* Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
*/
void __init init_ohci1394_dma_on_all_controllers(void)
@@ -283,7 +283,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
}
/**
- * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization
+ * setup_ohci1394_dma - enables early OHCI1394 DMA initialization
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index af22be84034b..538bd677c254 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -706,21 +706,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int rcode;
if (destination == IEEE1394_ALL_NODES) {
- kfree(r);
-
- return;
- }
-
- if (offset != dev->handler.offset)
+ // Although the response to the broadcast packet is not necessarily required, the
+ // fw_send_response() function should still be called to maintain the reference
+ // counting of the object. In the case, the call of function just releases the
+ // object as a result to decrease the reference counting.
+ rcode = RCODE_COMPLETE;
+ } else if (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
- else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ } else if (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
- else if (fwnet_incoming_packet(dev, payload, length,
- source, generation, false) != 0) {
+ } else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
- } else
+ } else {
rcode = RCODE_COMPLETE;
+ }
fw_send_response(card, r, rcode);
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 60051c0cabea..26db5b8dfc1e 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1117,7 +1117,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
tgt->workarounds = w;
}
-static struct scsi_host_template scsi_driver_template;
+static const struct scsi_host_template scsi_driver_template;
static void sbp2_remove(struct fw_unit *unit);
static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
@@ -1586,7 +1586,7 @@ static struct attribute *sbp2_scsi_sysfs_attrs[] = {
ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
-static struct scsi_host_template scsi_driver_template = {
+static const struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 99d439480612..f29d77ecf72d 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -56,9 +56,9 @@ static void ffa_device_remove(struct device *dev)
ffa_drv->remove(to_ffa_dev(dev));
}
-static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ const struct ffa_device *ffa_dev = to_ffa_dev(dev);
return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
ffa_dev->vm_id, &ffa_dev->uuid);
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index a14f65444b35..ea0f5083ac47 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -23,6 +23,38 @@ config ARM_SCMI_PROTOCOL
if ARM_SCMI_PROTOCOL
+config ARM_SCMI_NEED_DEBUGFS
+ bool
+ help
+ This declares whether at least one SCMI facility is configured
+ which needs debugfs support. When selected causess the creation
+ of a common SCMI debugfs root directory.
+
+config ARM_SCMI_RAW_MODE_SUPPORT
+ bool "Enable support for SCMI Raw transmission mode"
+ depends on DEBUG_FS
+ select ARM_SCMI_NEED_DEBUGFS
+ help
+ Enable support for SCMI Raw transmission mode.
+
+ If enabled allows the direct injection and snooping of SCMI bare
+ messages through a dedicated debugfs interface.
+ It is meant to be used by SCMI compliance/testing suites.
+
+ When enabled regular SCMI drivers interactions are inhibited in
+ order to avoid unexpected interactions with the SCMI Raw message
+ flow. If unsure say N.
+
+config ARM_SCMI_RAW_MODE_SUPPORT_COEX
+ bool "Allow SCMI Raw mode coexistence with normal SCMI stack"
+ depends on ARM_SCMI_RAW_MODE_SUPPORT
+ help
+ Allow SCMI Raw transmission mode to coexist with normal SCMI stack.
+
+ This will allow regular SCMI drivers to register with the core and
+ operate normally, thing which could make an SCMI test suite using the
+ SCMI Raw mode support unreliable. If unsure, say N.
+
config ARM_SCMI_HAVE_TRANSPORT
bool
help
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ea86f8cc8f7..b31d78fa66cc 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
scmi-bus-y = bus.o
+scmi-core-objs := $(scmi-bus-y)
+
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
@@ -8,9 +11,11 @@ scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
-scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
- $(scmi-transport-y)
+scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 35bb70724d44..c15928b8c5cc 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -7,17 +7,184 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "common.h"
+BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
+EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
+
static DEFINE_IDA(scmi_bus_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+
+static DEFINE_IDR(scmi_requested_devices);
+/* Protect access to scmi_requested_devices */
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
+/* Track globally the creation of SCMI SystemPower related devices */
+static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices and then the requested device is advertised to any
+ * registered party via the @scmi_requested_devices_nh notification chain.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
+ !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
+ pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
+ id_table->name, id_table->protocol_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
+
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ }
+ list_add(&rdev->node, phead);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ if (!ret)
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_REQUEST,
+ (void *)rdev->id_table);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * The unrequested device, described by the provided id_table, is at first
+ * removed from the IDR @scmi_requested_devices and then the removal is
+ * advertised to any registered party via the @scmi_requested_devices_nh
+ * notification chain.
+ */
+static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+
+ mutex_unlock(&scmi_requested_devices_mtx);
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
+ (void *)victim->id_table);
+ kfree(victim);
+ mutex_lock(&scmi_requested_devices_mtx);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
@@ -57,11 +224,11 @@ static int scmi_match_by_id_table(struct device *dev, void *data)
struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
- !strcmp(sdev->name, id_table->name);
+ (id_table->name && !strcmp(sdev->name, id_table->name));
}
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name)
+static struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
{
struct scmi_device_id id_table;
struct device *dev;
@@ -76,30 +243,6 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
return to_scmi_dev(dev);
}
-const struct scmi_protocol *scmi_protocol_get(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (!proto || !try_module_get(proto->owner)) {
- pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
- return NULL;
- }
-
- pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
-
- return proto;
-}
-
-void scmi_protocol_put(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (proto)
- module_put(proto->owner);
-}
-
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -120,12 +263,13 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
-static struct bus_type scmi_bus_type = {
+struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
};
+EXPORT_SYMBOL_GPL(scmi_bus_type);
int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
const char *mod_name)
@@ -146,7 +290,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
retval = driver_register(&driver->driver);
if (!retval)
- pr_debug("registered new scmi driver %s\n", driver->name);
+ pr_debug("Registered new scmi driver %s\n", driver->name);
return retval;
}
@@ -164,13 +308,53 @@ static void scmi_device_release(struct device *dev)
kfree(to_scmi_dev(dev));
}
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name)
+static void __scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(scmi_dev->dev.parent->of_node),
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+
+ if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
+ atomic_set(&scmi_syspower_registered, 0);
+
+ kfree_const(scmi_dev->name);
+ ida_free(&scmi_bus_id, scmi_dev->id);
+ device_unregister(&scmi_dev->dev);
+}
+
+static struct scmi_device *
+__scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
+ /*
+ * If the same protocol/name device already exist under the same parent
+ * (i.e. SCMI instance) just return the existent device.
+ * This avoids any race between the SCMI driver, creating devices for
+ * each DT defined protocol at probe time, and the concurrent
+ * registration of SCMI drivers.
+ */
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ return scmi_dev;
+
+ /*
+ * Ignore any possible subsequent failures while creating the device
+ * since we are doomed anyway at that point; not using a mutex which
+ * spans across this whole function to keep things simple and to avoid
+ * to serialize all the __scmi_device_create calls across possibly
+ * different SCMI server instances (parent)
+ */
+ if (protocol == SCMI_PROTOCOL_SYSTEM &&
+ atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
+ dev_warn(parent,
+ "SCMI SystemPower protocol device must be unique !\n");
+ return NULL;
+ }
+
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
return NULL;
@@ -191,7 +375,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
scmi_dev->dev.parent = parent;
- scmi_dev->dev.of_node = np;
+ device_set_node(&scmi_dev->dev, of_fwnode_handle(np));
scmi_dev->dev.bus = &scmi_bus_type;
scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
@@ -200,6 +384,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
if (retval)
goto put_dev;
+ pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ dev_name(&scmi_dev->dev), protocol, name);
+
return scmi_dev;
put_dev:
kfree_const(scmi_dev->name);
@@ -208,77 +396,85 @@ put_dev:
return NULL;
}
-void scmi_device_destroy(struct scmi_device *scmi_dev)
-{
- kfree_const(scmi_dev->name);
- scmi_handle_put(scmi_dev->handle);
- ida_free(&scmi_bus_id, scmi_dev->id);
- device_unregister(&scmi_dev->dev);
-}
-
-void scmi_device_link_add(struct device *consumer, struct device *supplier)
-{
- struct device_link *link;
-
- link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
-
- WARN_ON(!link);
-}
-
-void scmi_set_handle(struct scmi_device *scmi_dev)
-{
- scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
- if (scmi_dev->handle)
- scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
-}
-
-int scmi_protocol_register(const struct scmi_protocol *proto)
+/**
+ * scmi_device_create - A method to create one or more SCMI devices
+ *
+ * @np: A reference to the device node to use for the new device(s)
+ * @parent: The parent device to use identifying a specific SCMI instance
+ * @protocol: The SCMI protocol to be associated with this device
+ * @name: The requested-name of the device to be created; this is optional
+ * and if no @name is provided, all the devices currently known to
+ * be requested on the SCMI bus for @protocol will be created.
+ *
+ * This method can be invoked to create a single well-defined device (like
+ * a transport device or a device requested by an SCMI driver loaded after
+ * the core SCMI stack has been probed), or to create all the devices currently
+ * known to have been requested by the loaded SCMI drivers for a specific
+ * protocol (typically during SCMI core protocol enumeration at probe time).
+ *
+ * Return: The created device (or one of them if @name was NOT provided and
+ * multiple devices were created) or NULL if no device was created;
+ * note that NULL indicates an error ONLY in case a specific @name
+ * was provided: when @name param was not provided, a number of devices
+ * could have been potentially created for a whole protocol, unless no
+ * device was found to have been requested for that specific protocol.
+ */
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name)
{
- int ret;
-
- if (!proto) {
- pr_err("invalid protocol\n");
- return -EINVAL;
- }
-
- if (!proto->instance_init) {
- pr_err("missing init for protocol 0x%x\n", proto->id);
- return -EINVAL;
+ struct list_head *phead;
+ struct scmi_requested_dev *rdev;
+ struct scmi_device *scmi_dev = NULL;
+
+ if (name)
+ return __scmi_device_create(np, parent, protocol, name);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, protocol);
+ /* Nothing to do. */
+ if (!phead) {
+ mutex_unlock(&scmi_requested_devices_mtx);
+ return NULL;
}
- spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, (void *)proto,
- proto->id, proto->id + 1, GFP_ATOMIC);
- spin_unlock(&protocol_lock);
- if (ret != proto->id) {
- pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
- proto->id, ret);
- return ret;
+ /* Walk the list of requested devices for protocol and create them */
+ list_for_each_entry(rdev, phead, node) {
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ /* Report errors and carry on... */
+ if (sdev)
+ scmi_dev = sdev;
+ else
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
}
+ mutex_unlock(&scmi_requested_devices_mtx);
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
-
- return 0;
+ return scmi_dev;
}
-EXPORT_SYMBOL_GPL(scmi_protocol_register);
+EXPORT_SYMBOL_GPL(scmi_device_create);
-void scmi_protocol_unregister(const struct scmi_protocol *proto)
+void scmi_device_destroy(struct device *parent, int protocol, const char *name)
{
- spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, proto->id);
- spin_unlock(&protocol_lock);
-
- pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+ struct scmi_device *scmi_dev;
- return;
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ __scmi_device_destroy(scmi_dev);
}
-EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+EXPORT_SYMBOL_GPL(scmi_device_destroy);
static int __scmi_devices_unregister(struct device *dev, void *data)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- scmi_device_destroy(scmi_dev);
+ __scmi_device_destroy(scmi_dev);
return 0;
}
@@ -287,20 +483,33 @@ static void scmi_devices_unregister(void)
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
-int __init scmi_bus_init(void)
+static int __init scmi_bus_init(void)
{
int retval;
retval = bus_register(&scmi_bus_type);
if (retval)
- pr_err("scmi protocol bus register failed (%d)\n", retval);
+ pr_err("SCMI protocol bus register failed (%d)\n", retval);
+
+ pr_info("SCMI protocol bus registered\n");
return retval;
}
+subsys_initcall(scmi_bus_init);
-void __exit scmi_bus_exit(void)
+static void __exit scmi_bus_exit(void)
{
+ /*
+ * Destroy all remaining devices: just in case the drivers were
+ * manually unbound and at first and then the modules unloaded.
+ */
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
+module_exit(scmi_bus_exit);
+
+MODULE_ALIAS("scmi-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index a1c0154c31c6..c46dc5215af7 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -27,6 +27,48 @@
#include "protocols.h"
#include "notify.h"
+#define SCMI_MAX_CHANNELS 256
+
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
+ return -EIO;
+}
+
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -96,18 +138,19 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
-int scmi_handle_put(const struct scmi_handle *handle);
-void scmi_device_link_add(struct device *consumer, struct device *supplier);
-struct scmi_handle *scmi_handle_get(struct device *dev);
-void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-int __init scmi_bus_init(void);
-void __exit scmi_bus_exit(void);
+extern struct bus_type scmi_bus_type;
+
+#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
+#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
+extern struct blocking_notifier_head scmi_requested_devices_nh;
-const struct scmi_protocol *scmi_protocol_get(int protocol_id);
-void scmi_protocol_put(int protocol_id);
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name);
+void scmi_device_destroy(struct device *parent, int protocol, const char *name);
int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
@@ -116,6 +159,8 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
*
+ * @id: An identifier for this channel: this matches the protocol number
+ * used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @rx_timeout_ms: The configured RX timeout in milliseconds.
@@ -127,6 +172,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
+ int id;
struct device *dev;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
@@ -153,7 +199,7 @@ struct scmi_chan_info {
*/
struct scmi_transport_ops {
int (*link_supplier)(struct device *dev);
- bool (*chan_available)(struct device *dev, int idx);
+ bool (*chan_available)(struct device_node *of_node, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
@@ -170,11 +216,6 @@ struct scmi_transport_ops {
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
-int scmi_protocol_device_request(const struct scmi_device_id *id_table);
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name);
-
/**
* struct scmi_desc - Description of SoC integration
*
@@ -215,6 +256,36 @@ struct scmi_desc {
const bool atomic_enabled;
};
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return cinfo->no_completion_irq || desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
+{
+ return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return is_polling_required(cinfo, desc) &&
+ is_transport_polling_capable(desc);
+}
+
+void scmi_xfer_raw_put(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
+
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms);
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
extern const struct scmi_desc scmi_mailbox_desc;
#endif
@@ -229,7 +300,6 @@ extern const struct scmi_desc scmi_optee_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
struct scmi_shared_mem;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index f818d00bb2c6..e7d97b59963b 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -14,7 +14,10 @@
* Copyright (C) 2018-2021 ARM Ltd.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -34,22 +37,15 @@
#include "common.h"
#include "notify.h"
+#include "raw_mode.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
-enum scmi_error_codes {
- SCMI_SUCCESS = 0, /* Success */
- SCMI_ERR_SUPPORT = -1, /* Not supported */
- SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
- SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
- SCMI_ERR_ENTRY = -4, /* Not found */
- SCMI_ERR_RANGE = -5, /* Value out of range */
- SCMI_ERR_BUSY = -6, /* Device busy */
- SCMI_ERR_COMMS = -7, /* Communication Error */
- SCMI_ERR_GENERIC = -8, /* Generic Error */
- SCMI_ERR_HARDWARE = -9, /* Hardware Error */
- SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
-};
+static DEFINE_IDA(scmi_id);
+
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
@@ -58,18 +54,7 @@ static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
-static DEFINE_IDR(scmi_requested_devices);
-static DEFINE_MUTEX(scmi_requested_devices_mtx);
-
-/* Track globally the creation of SCMI SystemPower related devices */
-static bool scmi_syspower_registered;
-/* Protect access to scmi_syspower_registered */
-static DEFINE_MUTEX(scmi_syspower_mtx);
-
-struct scmi_requested_dev {
- const struct scmi_device_id *id_table;
- struct list_head node;
-};
+static struct dentry *scmi_top_dentry;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@@ -118,8 +103,23 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+};
+
+/**
* struct scmi_info - Structure representing a SCMI instance
*
+ * @id: A sequence number starting from zero identifying this instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @version: SCMI revision information containing protocol version,
@@ -147,8 +147,15 @@ struct scmi_protocol_instance {
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
+ * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ * bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
+ * @dbg: A pointer to debugfs related data (if any)
+ * @raw: An opaque reference handle used by SCMI Raw mode.
*/
struct scmi_info {
+ int id;
struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
@@ -166,32 +173,114 @@ struct scmi_info {
void *notify_priv;
struct list_head node;
int users;
+ struct notifier_block bus_nb;
+ struct notifier_block dev_req_nb;
+ /* Serialize device creation process for this instance */
+ struct mutex devreq_mtx;
+ struct scmi_debug_info *dbg;
+ void *raw;
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
+#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const int scmi_linux_errmap[] = {
- /* better than switch case as long as return value is continuous */
- 0, /* SCMI_SUCCESS */
- -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
- -EINVAL, /* SCMI_ERR_PARAM */
- -EACCES, /* SCMI_ERR_ACCESS */
- -ENOENT, /* SCMI_ERR_ENTRY */
- -ERANGE, /* SCMI_ERR_RANGE */
- -EBUSY, /* SCMI_ERR_BUSY */
- -ECOMM, /* SCMI_ERR_COMMS */
- -EIO, /* SCMI_ERR_GENERIC */
- -EREMOTEIO, /* SCMI_ERR_HARDWARE */
- -EPROTO, /* SCMI_ERR_PROTOCOL */
-};
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+static void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
+ spin_unlock(&protocol_lock);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+ spin_lock(&protocol_lock);
+ idr_remove(&scmi_protocols, proto->id);
+ spin_unlock(&protocol_lock);
-static inline int scmi_to_linux_errno(int errno)
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ * call will lead to the creation of all the devices currently requested
+ * for the specified protocol.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info,
+ int prot_id, const char *name)
{
- int err_idx = -errno;
+ struct scmi_device *sdev;
- if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
- return scmi_linux_errmap[err_idx];
- return -EIO;
+ mutex_lock(&info->devreq_mtx);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (name && !sdev)
+ dev_err(info->dev,
+ "failed to create device for protocol 0x%X (%s)\n",
+ prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ mutex_lock(&info->devreq_mtx);
+ scmi_device_destroy(info->dev, prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
}
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
@@ -311,8 +400,6 @@ static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
if (xfer_id != next_token)
atomic_add((int)(xfer_id - next_token), &transfer_last_id);
- /* Set in-flight */
- set_bit(xfer_id, minfo->xfer_alloc_table);
xfer->hdr.seq = (u16)xfer_id;
return 0;
@@ -331,32 +418,123 @@ static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
}
/**
+ * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ /* Set in-flight */
+ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_inflight_register - Try to register an xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper does NOT assume anything about the sequence number
+ * that was baked into the provided xfer, so it checks at first if it can
+ * be mapped to a free slot and fails with an error if another xfer with the
+ * same sequence number is currently still registered as in-flight.
+ *
+ * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
+ * could not rbe mapped to a free slot in the xfer_alloc_table.
+ */
+static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
+ * flight on the TX channel, if possible.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: The xfer to register
+ *
+ * Return: 0 on Success, error otherwise
+ */
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
+}
+
+/**
+ * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ /* Set a new monotonic token as the xfer sequence number */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret)
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
- * @set_pending: If true a monotonic token is picked and the xfer is added to
- * the pending hash table.
*
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
- * Picks an xfer from the free list @free_xfers (if any available) and, if
- * required, sets a monotonically increasing token and stores the inflight xfer
- * into the @pending_xfers hashtable for later retrieval.
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
*
* The successfully initialized xfer is refcounted.
*
- * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
- * @free_xfers.
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
*
- * Return: 0 if all went fine, else corresponding error.
+ * Return: An initialized xfer if all went fine, else pointer error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
- struct scmi_xfers_info *minfo,
- bool set_pending)
+ struct scmi_xfers_info *minfo)
{
- int ret;
unsigned long flags;
struct scmi_xfer *xfer;
@@ -376,31 +554,71 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
*/
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
- if (set_pending) {
- /* Pick and set monotonic token */
- ret = scmi_xfer_token_set(minfo, xfer);
- if (!ret) {
- hash_add(minfo->pending_xfers, &xfer->node,
- xfer->hdr.seq);
- xfer->pending = true;
- } else {
- dev_err(handle->dev,
- "Failed to get monotonic token %d\n", ret);
- hlist_add_head(&xfer->node, &minfo->free_xfers);
- xfer = ERR_PTR(ret);
- }
- }
-
- if (!IS_ERR(xfer)) {
- refcount_set(&xfer->users, 1);
- atomic_set(&xfer->busy, SCMI_XFER_FREE);
- }
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return xfer;
}
/**
+ * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Note that xfer is taken from the TX channel structures.
+ *
+ * Return: A valid xfer on Success, or an error-pointer otherwise
+ */
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
+{
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer = scmi_xfer_get(handle, &info->tx_minfo);
+ if (!IS_ERR(xfer))
+ xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
+
+ return xfer;
+}
+
+/**
+ * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
+ * to use for a specific protocol_id Raw transaction.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol_id: Identifier of the protocol
+ *
+ * Note that in a regular SCMI stack, usually, a protocol has to be defined in
+ * the DT to have an associated channel and be usable; but in Raw mode any
+ * protocol in range is allowed, re-using the Base channel, so as to enable
+ * fuzzing on any protocol without the need of a fully compiled DT.
+ *
+ * Return: A reference to the channel to use, or an ERR_PTR
+ */
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_chan_info *cinfo;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ cinfo = idr_find(&info->tx_idr, protocol_id);
+ if (!cinfo) {
+ if (protocol_id == SCMI_PROTOCOL_BASE)
+ return ERR_PTR(-EINVAL);
+ /* Use Base channel for protocols not defined for DT */
+ cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!cinfo)
+ return ERR_PTR(-EINVAL);
+ dev_warn_once(handle->dev,
+ "Using Base channel for protocol 0x%X\n",
+ protocol_id);
+ }
+
+ return cinfo;
+}
+
+/**
* __scmi_xfer_put() - Release a message
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -429,6 +647,24 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
}
/**
+ * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: A reference to the xfer to put
+ *
+ * Note that as with other xfer_put() handlers the xfer is really effectively
+ * released only if there are no more users on the system.
+ */
+void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
+ xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
+ return __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
* scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -623,25 +859,6 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
-static inline bool is_polling_required(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return cinfo->no_completion_irq || info->desc->force_polling;
-}
-
-static inline bool is_transport_polling_capable(struct scmi_info *info)
-{
- return info->desc->ops->poll_done ||
- info->desc->sync_cmds_completed_on_ret;
-}
-
-static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return is_polling_required(cinfo, info) &&
- is_transport_polling_capable(info);
-}
-
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -652,7 +869,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
ktime_t ts;
ts = ktime_get_boottime();
- xfer = scmi_xfer_get(cinfo->handle, minfo, false);
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
@@ -667,9 +884,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->rx.buf, xfer->rx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "NOTI", xfer->hdr.seq,
+ xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -678,6 +895,12 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
xfer->hdr.protocol_id, xfer->hdr.seq,
MSG_TYPE_NOTIFICATION);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+ scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
+ cinfo->id);
+ }
+
__scmi_xfer_put(minfo, xfer);
scmi_clear_channel(info, cinfo);
@@ -691,6 +914,9 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
+
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
scmi_clear_channel(info, cinfo);
return;
@@ -705,9 +931,11 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id,
xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
- "DLYD" : "RESP",
+ (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
+ (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
@@ -722,6 +950,18 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
complete(&xfer->done);
}
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ /*
+ * When in polling mode avoid to queue the Raw xfer on the IRQ
+ * RX path since it will be already queued at the end of the TX
+ * poll loop.
+ */
+ if (!xfer->hdr.poll_completion)
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
+
scmi_xfer_command_release(info, xfer);
}
@@ -785,36 +1025,18 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
ktime_after(ktime_get(), stop);
}
-/**
- * scmi_wait_for_message_response - An helper to group all the possible ways of
- * waiting for a synchronous message response.
- *
- * @cinfo: SCMI channel info
- * @xfer: Reference to the transfer being waited for.
- *
- * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
- * configuration flags like xfer->hdr.poll_completion.
- *
- * Return: 0 on Success, error otherwise.
- */
-static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer)
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, unsigned int timeout_ms)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct device *dev = info->dev;
- int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
-
- trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq,
- timeout_ms,
- xfer->hdr.poll_completion);
+ int ret = 0;
if (xfer->hdr.poll_completion) {
/*
* Real polling is needed only if transport has NOT declared
* itself to support synchronous commands replies.
*/
- if (!info->desc->sync_cmds_completed_on_ret) {
+ if (!desc->sync_cmds_completed_on_ret) {
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
@@ -833,6 +1055,8 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
if (!ret) {
unsigned long flags;
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
/*
* Do not fetch_response if an out-of-order delayed
@@ -840,16 +1064,27 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
+ desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
/* Trace polled replies. */
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
- "RESP",
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ xfer->hdr.protocol_id, xfer->hdr.id,
+ !SCMI_XFER_IS_RAW(xfer) ?
+ "RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
+
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
}
} else {
/* And we wait for the response. */
@@ -865,6 +1100,59 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ info->desc->max_rx_timeout_ms,
+ xfer->hdr.poll_completion);
+
+ return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+ info->desc->max_rx_timeout_ms);
+}
+
+/**
+ * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
+ * reply to an xfer raw request on a specific channel for the required timeout.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ * @timeout_ms: The maximum timeout in milliseconds
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms)
+{
+ int ret;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
+ if (ret)
+ dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -884,7 +1172,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_chan_info *cinfo;
/* Check for polling request on custom command xfers at first */
- if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
+ if (xfer->hdr.poll_completion &&
+ !is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
@@ -895,7 +1184,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return -EINVAL;
/* True ONLY if also supported by transport. */
- if (is_polling_enabled(cinfo, info))
+ if (is_polling_enabled(cinfo, info->desc))
xfer->hdr.poll_completion = true;
/*
@@ -910,6 +1199,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
+ /* Clear any stale status */
+ xfer->hdr.status = SCMI_SUCCESS;
xfer->state = SCMI_XFER_SENT_OK;
/*
* Even though spinlocking is not needed here since no race is possible
@@ -926,9 +1217,9 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->tx.buf, xfer->tx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "CMND", xfer->hdr.seq,
+ xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
@@ -952,8 +1243,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
xfer->rx.len = info->desc->max_msg_size;
}
-#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
-
/**
* do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
@@ -1041,13 +1330,22 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(pi->handle, minfo, true);
+ xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
return ret;
}
+ /* Pick a sequence number and register this xfer as in-flight */
+ ret = scmi_xfer_pending_set(xfer, minfo);
+ if (ret) {
+ dev_err(pi->handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ __scmi_xfer_put(minfo, xfer);
+ return ret;
+ }
+
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.type = MSG_TYPE_COMMAND;
@@ -1820,20 +2118,14 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
bool ret;
struct scmi_info *info = handle_to_scmi_info(handle);
- ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ ret = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
*atomic_threshold = info->atomic_threshold;
return ret;
}
-static inline
-struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
-{
- info->users++;
- return &info->handle;
-}
-
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
@@ -1845,7 +2137,7 @@ struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
*
* Return: pointer to handle if successful, NULL on error
*/
-struct scmi_handle *scmi_handle_get(struct device *dev)
+static struct scmi_handle *scmi_handle_get(struct device *dev)
{
struct list_head *p;
struct scmi_info *info;
@@ -1855,7 +2147,8 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
- handle = scmi_handle_get_from_info_unlocked(info);
+ info->users++;
+ handle = &info->handle;
break;
}
}
@@ -1876,7 +2169,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
* Return: 0 is successfully released
* if null was passed, it returns -EINVAL;
*/
-int scmi_handle_put(const struct scmi_handle *handle)
+static int scmi_handle_put(const struct scmi_handle *handle)
{
struct scmi_info *info;
@@ -1892,6 +2185,23 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
+static void scmi_device_link_add(struct device *consumer,
+ struct device *supplier)
+{
+ struct device_link *link;
+
+ link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ WARN_ON(!link);
+}
+
+static void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+ if (scmi_dev->handle)
+ scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
+}
+
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
struct scmi_xfers_info *info)
{
@@ -1911,8 +2221,8 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
hash_init(info->pending_xfers);
/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
- info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
- sizeof(long), GFP_KERNEL);
+ info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+ GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
@@ -1979,29 +2289,26 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return ret;
ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
- if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ if (!ret && !idr_is_empty(&sinfo->rx_idr))
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
return ret;
}
-static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id, bool tx)
{
int ret, idx;
+ char name[32];
struct scmi_chan_info *cinfo;
struct idr *idr;
+ struct scmi_device *tdev = NULL;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
- /* check if already allocated, used for multiple device per protocol */
- cinfo = idr_find(idr, prot_id);
- if (cinfo)
- return 0;
-
- if (!info->desc->ops->chan_available(dev, idx)) {
+ if (!info->desc->ops->chan_available(of_node, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
@@ -2012,27 +2319,52 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (!cinfo)
return -ENOMEM;
- cinfo->dev = dev;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ /* Create a unique name for this transport device */
+ snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+ idx ? "rx" : "tx", prot_id);
+ /* Create a uniquely named, dedicated transport device for this chan */
+ tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+ if (!tdev) {
+ dev_err(info->dev,
+ "failed to create transport device (%s)\n", name);
+ devm_kfree(info->dev, cinfo);
+ return -EINVAL;
+ }
+ of_node_get(of_node);
+
+ cinfo->id = prot_id;
+ cinfo->dev = &tdev->dev;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
- if (ret)
+ if (ret) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
return ret;
+ }
- if (tx && is_polling_required(cinfo, info)) {
- if (is_transport_polling_capable(info))
- dev_info(dev,
+ if (tx && is_polling_required(cinfo, info->desc)) {
+ if (is_transport_polling_capable(info->desc))
+ dev_info(&tdev->dev,
"Enabled polling mode TX channel - prot_id:%d\n",
prot_id);
else
- dev_warn(dev,
+ dev_warn(&tdev->dev,
"Polling mode NOT supported by transport.\n");
}
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
- dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ dev_err(info->dev,
+ "unable to allocate SCMI idr slot err %d\n", ret);
+ /* Destroy channel and device only if created by this call. */
+ if (tdev) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ }
return ret;
}
@@ -2041,13 +2373,14 @@ idr_alloc:
}
static inline int
-scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id)
{
- int ret = scmi_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, of_node, prot_id, true);
if (!ret) {
/* Rx is optional, report only memory errors */
- ret = scmi_chan_setup(info, dev, prot_id, false);
+ ret = scmi_chan_setup(info, of_node, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
@@ -2056,306 +2389,264 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
}
/**
- * scmi_get_protocol_device - Helper to get/create an SCMI device.
- *
- * @np: A device node representing a valid active protocols for the referred
- * SCMI instance.
- * @info: The referred SCMI instance for which we are getting/creating this
- * device.
- * @prot_id: The protocol ID.
- * @name: The device name.
- *
- * Referring to the specific SCMI instance identified by @info, this helper
- * takes care to return a properly initialized device matching the requested
- * @proto_id and @name: if device was still not existent it is created as a
- * child of the specified SCMI instance @info and its transport properly
- * initialized as usual.
- *
- * Return: A properly initialized scmi device, NULL otherwise.
+ * scmi_channels_setup - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
*/
-static inline struct scmi_device *
-scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static int scmi_channels_setup(struct scmi_info *info)
{
- struct scmi_device *sdev;
+ int ret;
+ struct device_node *child, *top_np = info->dev->of_node;
+
+ /* Initialize a common generic channel at first */
+ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
- /* Already created for this parent SCMI instance ? */
- sdev = scmi_child_dev_find(info->dev, prot_id, name);
- if (sdev)
- return sdev;
+ for_each_available_child_of_node(top_np, child) {
+ u32 prot_id;
- mutex_lock(&scmi_syspower_mtx);
- if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
- dev_warn(info->dev,
- "SCMI SystemPower protocol device must be unique !\n");
- mutex_unlock(&scmi_syspower_mtx);
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
- return NULL;
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(info->dev,
+ "Out of range protocol %d\n", prot_id);
+
+ ret = scmi_txrx_setup(info, child, prot_id);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
}
- pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+ return 0;
+}
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (!sdev) {
- dev_err(info->dev, "failed to create %d protocol device\n",
- prot_id);
- mutex_unlock(&scmi_syspower_mtx);
+static int scmi_chan_destroy(int id, void *p, void *idr)
+{
+ struct scmi_chan_info *cinfo = p;
- return NULL;
+ if (cinfo->dev) {
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
+
+ of_node_put(cinfo->dev->of_node);
+ scmi_device_destroy(info->dev, id, sdev->name);
+ cinfo->dev = NULL;
}
- if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
- dev_err(&sdev->dev, "failed to setup transport\n");
- scmi_device_destroy(sdev);
- mutex_unlock(&scmi_syspower_mtx);
+ idr_remove(idr, id);
- return NULL;
- }
+ return 0;
+}
- if (prot_id == SCMI_PROTOCOL_SYSTEM)
- scmi_syspower_registered = true;
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
+{
+ /* At first free all channels at the transport layer ... */
+ idr_for_each(idr, info->desc->ops->chan_free, idr);
- mutex_unlock(&scmi_syspower_mtx);
+ /* ...then destroy all underlying devices */
+ idr_for_each(idr, scmi_chan_destroy, idr);
- return sdev;
+ idr_destroy(idr);
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
{
- struct scmi_device *sdev;
+ scmi_cleanup_channels(info, &info->tx_idr);
- sdev = scmi_get_protocol_device(np, info, prot_id, name);
- if (!sdev)
- return;
-
- /* setup handle now as the transport is ready */
- scmi_set_handle(sdev);
+ scmi_cleanup_channels(info, &info->rx_idr);
}
-/**
- * scmi_create_protocol_devices - Create devices for all pending requests for
- * this SCMI instance.
- *
- * @np: The device node describing the protocol
- * @info: The SCMI instance descriptor
- * @prot_id: The protocol ID
- *
- * All devices previously requested for this instance (if any) are found and
- * created by scanning the proper @&scmi_requested_devices entry.
- */
-static void scmi_create_protocol_devices(struct device_node *np,
- struct scmi_info *info, int prot_id)
+static int scmi_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct list_head *phead;
+ struct scmi_info *info = bus_nb_to_scmi_info(nb);
+ struct scmi_device *sdev = to_scmi_dev(data);
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, prot_id);
- if (phead) {
- struct scmi_requested_dev *rdev;
+ /* Skip transport devices and devices of different SCMI instances */
+ if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
+ sdev->dev.parent != info->dev)
+ return NOTIFY_DONE;
- list_for_each_entry(rdev, phead, node)
- scmi_create_protocol_device(np, info, prot_id,
- rdev->id_table->name);
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ scmi_handle_put(sdev->handle);
+ sdev->handle = NULL;
+ break;
+ default:
+ return NOTIFY_DONE;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
+ sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
+ "about to be BOUND." : "UNBOUND.");
+
+ return NOTIFY_OK;
}
-/**
- * scmi_protocol_device_request - Helper to request a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be created.
- *
- * This helper let an SCMI driver request specific devices identified by the
- * @id_table to be created for each active SCMI instance.
- *
- * The requested device name MUST NOT be already existent for any protocol;
- * at first the freshly requested @id_table is annotated in the IDR table
- * @scmi_requested_devices, then a matching device is created for each already
- * active SCMI instance. (if any)
- *
- * This way the requested device is created straight-away for all the already
- * initialized(probed) SCMI instances (handles) and it remains also annotated
- * as pending creation if the requesting SCMI driver was loaded before some
- * SCMI instance and related transports were available: when such late instance
- * is probed, its probe will take care to scan the list of pending requested
- * devices and create those on its own (see @scmi_create_protocol_devices and
- * its enclosing loop)
- *
- * Return: 0 on Success
- */
-int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+static int scmi_device_request_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- int ret = 0;
- unsigned int id = 0;
- struct list_head *head, *phead = NULL;
- struct scmi_requested_dev *rdev;
- struct scmi_info *info;
+ struct device_node *np;
+ struct scmi_device_id *id_table = data;
+ struct scmi_info *info = req_nb_to_scmi_info(nb);
- pr_debug("Requesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ np = idr_find(&info->active_protocols, id_table->protocol_id);
+ if (!np)
+ return NOTIFY_DONE;
- /*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
- */
- mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
- list_for_each_entry(rdev, head, node) {
- if (!strcmp(rdev->id_table->name, id_table->name)) {
- pr_err("Ignoring duplicate request [%d] %s\n",
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- ret = -EINVAL;
- goto out;
- }
- }
- }
+ dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+ action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+ id_table->name, id_table->protocol_id);
- /*
- * No duplicate found for requested id_table, so let's create a new
- * requested device entry for this new valid request.
- */
- rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- ret = -ENOMEM;
- goto out;
+ switch (action) {
+ case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+ scmi_create_protocol_devices(np, info, id_table->protocol_id,
+ id_table->name);
+ break;
+ case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+ scmi_destroy_protocol_devices(info, id_table->protocol_id,
+ id_table->name);
+ break;
+ default:
+ return NOTIFY_DONE;
}
- rdev->id_table = id_table;
- /*
- * Append the new requested device table descriptor to the head of the
- * related protocol list, eventually creating such head if not already
- * there.
- */
- if (!phead) {
- phead = kzalloc(sizeof(*phead), GFP_KERNEL);
- if (!phead) {
- kfree(rdev);
- ret = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(phead);
-
- ret = idr_alloc(&scmi_requested_devices, (void *)phead,
- id_table->protocol_id,
- id_table->protocol_id + 1, GFP_KERNEL);
- if (ret != id_table->protocol_id) {
- pr_err("Failed to save SCMI device - ret:%d\n", ret);
- kfree(rdev);
- kfree(phead);
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- }
- list_add(&rdev->node, phead);
+ return NOTIFY_OK;
+}
- /*
- * Now effectively create and initialize the requested device for every
- * already initialized SCMI instance which has registered the requested
- * protocol as a valid active one: i.e. defined in DT and supported by
- * current platform FW.
- */
- mutex_lock(&scmi_list_mutex);
- list_for_each_entry(info, &scmi_list, node) {
- struct device_node *child;
-
- child = idr_find(&info->active_protocols,
- id_table->protocol_id);
- if (child) {
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(child, info,
- id_table->protocol_id,
- id_table->name);
- if (sdev) {
- /* Set handle if not already set: device existed */
- if (!sdev->handle)
- sdev->handle =
- scmi_handle_get_from_info_unlocked(info);
- /* Relink consumer and suppliers */
- if (sdev->handle)
- scmi_device_link_add(&sdev->dev,
- sdev->handle->dev);
- }
- } else {
- dev_err(info->dev,
- "Failed. SCMI protocol %d not active.\n",
- id_table->protocol_id);
- }
- }
- mutex_unlock(&scmi_list_mutex);
+static void scmi_debugfs_common_cleanup(void *d)
+{
+ struct scmi_debug_info *dbg = d;
-out:
- mutex_unlock(&scmi_requested_devices_mtx);
+ if (!dbg)
+ return;
- return ret;
+ debugfs_remove_recursive(dbg->top_dentry);
+ kfree(dbg->name);
+ kfree(dbg->type);
}
-/**
- * scmi_protocol_device_unrequest - Helper to unrequest a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be unrequested.
- *
- * An helper to let an SCMI driver release its request about devices; note that
- * devices are created and initialized once the first SCMI driver request them
- * but they destroyed only on SCMI core unloading/unbinding.
- *
- * The current SCMI transport layer uses such devices as internal references and
- * as such they could be shared as same transport between multiple drivers so
- * that cannot be safely destroyed till the whole SCMI stack is removed.
- * (unless adding further burden of refcounting.)
- */
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
{
- struct list_head *phead;
+ char top_dir[16];
+ struct dentry *trans, *top_dentry;
+ struct scmi_debug_info *dbg;
+ const char *c_ptr = NULL;
- pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return NULL;
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
- if (phead) {
- struct scmi_requested_dev *victim, *tmp;
+ dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
+ if (!dbg->name) {
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- list_for_each_entry_safe(victim, tmp, phead, node) {
- if (!strcmp(victim->id_table->name, id_table->name)) {
- list_del(&victim->node);
- kfree(victim);
- break;
- }
- }
+ of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
+ dbg->type = kstrdup(c_ptr, GFP_KERNEL);
+ if (!dbg->type) {
+ kfree(dbg->name);
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- if (list_empty(phead)) {
- idr_remove(&scmi_requested_devices,
- id_table->protocol_id);
- kfree(phead);
- }
+ snprintf(top_dir, 16, "%d", info->id);
+ top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
+ trans = debugfs_create_dir("transport", top_dentry);
+
+ dbg->is_atomic = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
+
+ debugfs_create_str("instance_name", 0400, top_dentry,
+ (char **)&dbg->name);
+
+ debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
+ &info->atomic_threshold);
+
+ debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
+
+ debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
+
+ debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
+ (u32 *)&info->desc->max_rx_timeout_ms);
+
+ debugfs_create_u32("max_msg_size", 0400, trans,
+ (u32 *)&info->desc->max_msg_size);
+
+ debugfs_create_u32("tx_max_msg", 0400, trans,
+ (u32 *)&info->tx_minfo.max_msg);
+
+ debugfs_create_u32("rx_max_msg", 0400, trans,
+ (u32 *)&info->rx_minfo.max_msg);
+
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+ scmi_debugfs_common_cleanup, dbg)) {
+ scmi_debugfs_common_cleanup(dbg);
+ return NULL;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ return dbg;
}
-static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
{
- int ret;
- struct idr *idr = &info->tx_idr;
+ int id, num_chans = 0, ret = 0;
+ struct scmi_chan_info *cinfo;
+ u8 channels[SCMI_MAX_CHANNELS] = {};
+ DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
+
+ if (!info->dbg)
+ return -EINVAL;
+
+ /* Enumerate all channels to collect their ids */
+ idr_for_each_entry(&info->tx_idr, cinfo, id) {
+ /*
+ * Cannot happen, but be defensive.
+ * Zero as num_chans is ok, warn and carry on.
+ */
+ if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
+ dev_warn(info->dev,
+ "SCMI RAW - Error enumerating channels\n");
+ break;
+ }
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->tx_idr);
+ if (!test_bit(cinfo->id, protos)) {
+ channels[num_chans++] = cinfo->id;
+ set_bit(cinfo->id, protos);
+ }
+ }
- idr = &info->rx_idr;
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->rx_idr);
+ info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
+ info->id, channels, num_chans,
+ info->desc, info->tx_minfo.max_msg);
+ if (IS_ERR(info->raw)) {
+ dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
+ ret = PTR_ERR(info->raw);
+ info->raw = NULL;
+ }
return ret;
}
@@ -2366,6 +2657,7 @@ static int scmi_probe(struct platform_device *pdev)
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
+ bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
@@ -2377,12 +2669,19 @@ static int scmi_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
+ info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
+ if (info->id < 0)
+ return info->id;
+
info->dev = dev;
info->desc = desc;
+ info->bus_nb.notifier_call = scmi_bus_notifier;
+ info->dev_req_nb.notifier_call = scmi_device_request_notifier;
INIT_LIST_HEAD(&info->node);
idr_init(&info->protocols);
mutex_init(&info->protocols_mtx);
idr_init(&info->active_protocols);
+ mutex_init(&info->devreq_mtx);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
@@ -2406,21 +2705,52 @@ static int scmi_probe(struct platform_device *pdev)
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
- return ret;
+ goto clear_ida;
}
- ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ /* Setup all channels described in the DT at first */
+ ret = scmi_channels_setup(info);
if (ret)
- return ret;
+ goto clear_ida;
- ret = scmi_xfer_info_init(info);
+ ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
if (ret)
goto clear_txrx_setup;
+ ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ if (ret)
+ goto clear_bus_notifier;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ if (scmi_top_dentry) {
+ info->dbg = scmi_debugfs_common_setup(info);
+ if (!info->dbg)
+ dev_warn(dev, "Failed to setup SCMI debugfs.\n");
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ ret = scmi_debugfs_raw_mode_setup(info);
+ if (!coex) {
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ /* Bail out anyway when coex disabled. */
+ return 0;
+ }
+
+ /* Coex enabled, carry on in any case. */
+ dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
+ }
+ }
+
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
- if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ if (info->desc->atomic_enabled &&
+ !is_transport_polling_capable(info->desc))
dev_err(dev,
"Transport is not polling capable. Atomic mode not supported.\n");
@@ -2432,6 +2762,8 @@ static int scmi_probe(struct platform_device *pdev)
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
+ if (coex)
+ return 0;
goto notification_exit;
}
@@ -2467,29 +2799,36 @@ static int scmi_probe(struct platform_device *pdev)
}
of_node_get(child);
- scmi_create_protocol_devices(child, info, prot_id);
+ scmi_create_protocol_devices(child, info, prot_id, NULL);
}
return 0;
notification_exit:
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
scmi_notification_exit(&info->handle);
+clear_dev_req_notifier:
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+clear_bus_notifier:
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
+clear_ida:
+ ida_free(&scmi_id, info->id);
return ret;
}
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
-{
- idr_remove(idr, id);
-}
-
static int scmi_remove(struct platform_device *pdev)
{
- int ret, id;
+ int id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct device_node *child;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
+
mutex_lock(&scmi_list_mutex);
if (info->users)
dev_warn(&pdev->dev,
@@ -2507,10 +2846,14 @@ static int scmi_remove(struct platform_device *pdev)
of_node_put(child);
idr_destroy(&info->active_protocols);
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+
/* Safe to free channels since no more users */
- ret = scmi_cleanup_txrx_channels(info);
- if (ret)
- dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
+ scmi_cleanup_txrx_channels(info);
+
+ ida_free(&scmi_id, info->id);
return 0;
}
@@ -2639,6 +2982,19 @@ static void __exit scmi_transports_exit(void)
__scmi_transports_setup(false);
}
+static struct dentry *scmi_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("scmi", NULL);
+ if (IS_ERR(d)) {
+ pr_err("Could NOT create SCMI top dentry.\n");
+ return NULL;
+ }
+
+ return d;
+}
+
static int __init scmi_driver_init(void)
{
int ret;
@@ -2647,13 +3003,14 @@ static int __init scmi_driver_init(void)
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
- scmi_bus_init();
-
/* Initialize any compiled-in transport which provided an init/exit */
ret = scmi_transports_init();
if (ret)
return ret;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
+ scmi_top_dentry = scmi_debugfs_init();
+
scmi_base_register();
scmi_clock_register();
@@ -2667,7 +3024,7 @@ static int __init scmi_driver_init(void)
return platform_driver_register(&scmi_driver);
}
-subsys_initcall(scmi_driver_init);
+module_init(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
@@ -2682,11 +3039,11 @@ static void __exit scmi_driver_exit(void)
scmi_system_unregister();
scmi_powercap_unregister();
- scmi_bus_exit();
-
scmi_transports_exit();
platform_driver_unregister(&scmi_driver);
+
+ debugfs_remove_recursive(scmi_top_dentry);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 1e40cb035044..1efa5e9392c4 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -19,13 +19,15 @@
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
*
* @cl: Mailbox Client
- * @chan: Transmit/Receive mailbox channel
+ * @chan: Transmit/Receive mailbox uni/bi-directional channel
+ * @chan_receiver: Optional Receiver mailbox unidirectional channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
*/
struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
+ struct mbox_chan *chan_receiver;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
};
@@ -46,12 +48,100 @@ static void rx_callback(struct mbox_client *cl, void *m)
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
-static bool mailbox_chan_available(struct device *dev, int idx)
+static bool mailbox_chan_available(struct device_node *of_node, int idx)
{
- return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ int num_mb;
+
+ /*
+ * Just check if bidirrectional channels are involved, and check the
+ * index accordingly; proper full validation will be made later
+ * in mailbox_chan_setup().
+ */
+ num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells");
+ if (num_mb == 3 && idx == 1)
+ idx = 2;
+
+ return !of_parse_phandle_with_args(of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
+/**
+ * mailbox_chan_validate - Validate transport configuration and map channels
+ *
+ * @cdev: Reference to the underlying transport device carrying the
+ * of_node descriptor to analyze.
+ * @a2p_rx_chan: A reference to an optional unidirectional channel to use
+ * for replies on the a2p channel. Set as zero if not present.
+ * @p2a_chan: A reference to the optional p2a channel.
+ * Set as zero if not present.
+ *
+ * At first, validate the transport configuration as described in terms of
+ * 'mboxes' and 'shmem', then determin which mailbox channel indexes are
+ * appropriate to be use in the current configuration.
+ *
+ * Return: 0 on Success or error
+ */
+static int mailbox_chan_validate(struct device *cdev,
+ int *a2p_rx_chan, int *p2a_chan)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh);
+
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 ||
+ (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) {
+ dev_warn(cdev,
+ "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
+ of_node_full_name(np), num_mb, num_sh);
+ return -EINVAL;
+ }
+
+ /* Bail out if provided shmem descriptors do not refer distinct areas */
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ /* Calculate channels IDs to use depending on mboxes/shmem layout */
+ if (!ret) {
+ switch (num_mb) {
+ case 1:
+ *a2p_rx_chan = 0;
+ *p2a_chan = 0;
+ break;
+ case 2:
+ if (num_sh == 2) {
+ *a2p_rx_chan = 0;
+ *p2a_chan = 1;
+ } else {
+ *a2p_rx_chan = 1;
+ *p2a_chan = 0;
+ }
+ break;
+ case 3:
+ *a2p_rx_chan = 1;
+ *p2a_chan = 2;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
@@ -59,11 +149,18 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
struct device_node *shmem;
- int ret, idx = tx ? 0 : 1;
+ int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1;
struct mbox_client *cl;
resource_size_t size;
struct resource res;
+ ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan);
+ if (ret)
+ return ret;
+
+ if (!tx && !p2a_chan)
+ return -ENODEV;
+
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
@@ -93,15 +190,26 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
cl->tx_block = false;
cl->knows_txdone = tx;
- smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
+ smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan);
if (IS_ERR(smbox->chan)) {
ret = PTR_ERR(smbox->chan);
if (ret != -EPROBE_DEFER)
- dev_err(cdev, "failed to request SCMI %s mailbox\n",
- tx ? "Tx" : "Rx");
+ dev_err(cdev,
+ "failed to request SCMI %s mailbox\n", desc);
return ret;
}
+ /* Additional unidirectional channel for TX if needed */
+ if (tx && a2p_rx_chan) {
+ smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan);
+ if (IS_ERR(smbox->chan_receiver)) {
+ ret = PTR_ERR(smbox->chan_receiver);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n");
+ return ret;
+ }
+ }
+
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
@@ -115,13 +223,13 @@ static int mailbox_chan_free(int id, void *p, void *data)
if (smbox && !IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
+ mbox_free_channel(smbox->chan_receiver);
cinfo->transport_info = NULL;
smbox->chan = NULL;
+ smbox->chan_receiver = NULL;
smbox->cinfo = NULL;
}
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 2a7aeab40e54..e123de6e8c67 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -328,11 +328,11 @@ static int scmi_optee_link_supplier(struct device *dev)
return 0;
}
-static bool scmi_optee_chan_available(struct device *dev, int idx)
+static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
{
u32 channel_id;
- return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+ return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
idx, &channel_id);
}
@@ -403,7 +403,7 @@ out:
static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
- if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
+ if (of_property_present(cinfo->dev->of_node, "shmem"))
return setup_static_shmem(dev, cinfo, channel);
else
return setup_dynamic_shmem(dev, channel);
@@ -481,8 +481,6 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
channel->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 2f3bf691db7c..78e1a01eb656 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -115,6 +115,7 @@ struct scmi_msg_hdr {
* - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
* - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
* (Missing synchronous response is assumed OK and ignored)
+ * @flags: Optional flags associated to this xfer.
* @lock: A spinlock to protect state and busy fields.
* @priv: A pointer for transport private usage.
*/
@@ -135,6 +136,12 @@ struct scmi_xfer {
#define SCMI_XFER_RESP_OK 1
#define SCMI_XFER_DRESP_OK 2
int state;
+#define SCMI_XFER_FLAG_IS_RAW BIT(0)
+#define SCMI_XFER_IS_RAW(x) ((x)->flags & SCMI_XFER_FLAG_IS_RAW)
+#define SCMI_XFER_FLAG_CHAN_SET BIT(1)
+#define SCMI_XFER_IS_CHAN_SET(x) \
+ ((x)->flags & SCMI_XFER_FLAG_CHAN_SET)
+ int flags;
/* A lock to protect state and busy fields */
spinlock_t lock;
void *priv;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
new file mode 100644
index 000000000000..d40df099fd51
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -0,0 +1,1443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Raw mode support
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * When enabled the SCMI Raw mode support exposes a userspace API which allows
+ * to send and receive SCMI commands, replies and notifications from a user
+ * application through injection and snooping of bare SCMI messages in binary
+ * little-endian format.
+ *
+ * Such injected SCMI transactions will then be routed through the SCMI core
+ * stack towards the SCMI backend server using whatever SCMI transport is
+ * currently configured on the system under test.
+ *
+ * It is meant to help in running any sort of SCMI backend server testing, no
+ * matter where the server is placed, as long as it is normally reachable via
+ * the transport configured on the system.
+ *
+ * It is activated by a Kernel configuration option since it is NOT meant to
+ * be used in production but only during development and in CI deployments.
+ *
+ * In order to avoid possible interferences between the SCMI Raw transactions
+ * originated from a test-suite and the normal operations of the SCMI drivers,
+ * when Raw mode is enabled, by default, all the regular SCMI drivers are
+ * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
+ * latter case the regular SCMI stack drivers will be loaded as usual and it is
+ * up to the user of this interface to take care of manually inhibiting the
+ * regular SCMI drivers in order to avoid interferences during the test runs.
+ *
+ * The exposed API is as follows.
+ *
+ * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
+ * which in turn is rooted under the corresponding underlying SCMI instance.
+ *
+ * /sys/kernel/debug/scmi/
+ * `-- 0
+ * |-- atomic_threshold_us
+ * |-- instance_name
+ * |-- raw
+ * | |-- channels
+ * | | |-- 0x10
+ * | | | |-- message
+ * | | | `-- message_async
+ * | | `-- 0x13
+ * | | |-- message
+ * | | `-- message_async
+ * | |-- errors
+ * | |-- message
+ * | |-- message_async
+ * | |-- notification
+ * | `-- reset
+ * `-- transport
+ * |-- is_atomic
+ * |-- max_msg_size
+ * |-- max_rx_timeout_ms
+ * |-- rx_max_msg
+ * |-- tx_max_msg
+ * `-- type
+ *
+ * where:
+ *
+ * - errors: used to read back timed-out and unexpected replies
+ * - message*: used to send sync/async commands and read back immediate and
+ * delayed reponses (if any)
+ * - notification: used to read any notification being emitted by the system
+ * (if previously enabled by the user app)
+ * - reset: used to flush the queues of messages (of any kind) still pending
+ * to be read; this is useful at test-suite start/stop to get
+ * rid of any unread messages from the previous run.
+ *
+ * with the per-channel entries rooted at /channels being present only on a
+ * system where multiple transport channels have been configured.
+ *
+ * Such per-channel entries can be used to explicitly choose a specific channel
+ * for SCMI bare message injection, in contrast with the general entries above
+ * where, instead, the selection of the proper channel to use is automatically
+ * performed based the protocol embedded in the injected message and on how the
+ * transport is configured on the system.
+ *
+ * Note that other common general entries are available under transport/ to let
+ * the user applications properly make up their expectations in terms of
+ * timeouts and message characteristics.
+ *
+ * Each write to the message* entries causes one command request to be built
+ * and sent while the replies or delayed response are read back from those same
+ * entries one message at time (receiving an EOF at each message boundary).
+ *
+ * The user application running the test is in charge of handling timeouts
+ * on replies and properly choosing SCMI sequence numbers for the outgoing
+ * requests (using the same sequence number is supported but discouraged).
+ *
+ * Injection of multiple in-flight requests is supported as long as the user
+ * application uses properly distinct sequence numbers for concurrent requests
+ * and takes care to properly manage all the related issues about concurrency
+ * and command/reply pairing. Keep in mind that, anyway, the real level of
+ * parallelism attainable in such scenario is dependent on the characteristics
+ * of the underlying transport being used.
+ *
+ * Since the SCMI core regular stack is partially used to deliver and collect
+ * the messages, late replies arrived after timeouts and any other sort of
+ * unexpected message can be identified by the SCMI core as usual and they will
+ * be reported as messages under "errors" for later analysis.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "common.h"
+
+#include "raw_mode.h"
+
+#include <trace/events/scmi.h>
+
+#define SCMI_XFER_RAW_MAX_RETRIES 10
+
+/**
+ * struct scmi_raw_queue - Generic Raw queue descriptor
+ *
+ * @free_bufs: A freelists listhead used to keep unused raw buffers
+ * @free_bufs_lock: Spinlock used to protect access to @free_bufs
+ * @msg_q: A listhead to a queue of snooped messages waiting to be read out
+ * @msg_q_lock: Spinlock used to protect access to @msg_q
+ * @wq: A waitqueue used to wait and poll on related @msg_q
+ */
+struct scmi_raw_queue {
+ struct list_head free_bufs;
+ /* Protect free_bufs[] lists */
+ spinlock_t free_bufs_lock;
+ struct list_head msg_q;
+ /* Protect msg_q[] lists */
+ spinlock_t msg_q_lock;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
+ *
+ * @id: Sequential Raw instance ID.
+ * @handle: Pointer to SCMI entity handle to use
+ * @desc: Pointer to the transport descriptor to use
+ * @tx_max_msg: Maximum number of concurrent TX in-flight messages
+ * @q: An array of Raw queue descriptors
+ * @chans_q: An XArray mapping optional additional per-channel queues
+ * @free_waiters: Head of freelist for unused waiters
+ * @free_mtx: A mutex to protect the waiters freelist
+ * @active_waiters: Head of list for currently active and used waiters
+ * @active_mtx: A mutex to protect the active waiters list
+ * @waiters_work: A work descriptor to be used with the workqueue machinery
+ * @wait_wq: A workqueue reference to the created workqueue
+ * @dentry: Top debugfs root dentry for SCMI Raw
+ * @gid: A group ID used for devres accounting
+ *
+ * Note that this descriptor is passed back to the core after SCMI Raw is
+ * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
+ *
+ */
+struct scmi_raw_mode_info {
+ unsigned int id;
+ const struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ int tx_max_msg;
+ struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
+ struct xarray chans_q;
+ struct list_head free_waiters;
+ /* Protect free_waiters list */
+ struct mutex free_mtx;
+ struct list_head active_waiters;
+ /* Protect active_waiters list */
+ struct mutex active_mtx;
+ struct work_struct waiters_work;
+ struct workqueue_struct *wait_wq;
+ struct dentry *dentry;
+ void *gid;
+};
+
+/**
+ * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
+ *
+ * @start_jiffies: The timestamp in jiffies of when this structure was queued.
+ * @cinfo: A reference to the channel to use for this transaction
+ * @xfer: A reference to the xfer to be waited for
+ * @async_response: A completion to be, optionally, used for async waits: it
+ * will be setup by @scmi_do_xfer_raw_start, if needed, to be
+ * pointed at by xfer->async_done.
+ * @node: A list node.
+ */
+struct scmi_xfer_raw_waiter {
+ unsigned long start_jiffies;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer *xfer;
+ struct completion async_response;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_raw_buffer - Structure to hold a full SCMI message
+ *
+ * @max_len: The maximum allowed message size (header included) that can be
+ * stored into @msg
+ * @msg: A message buffer used to collect a full message grabbed from an xfer.
+ * @node: A list node.
+ */
+struct scmi_raw_buffer {
+ size_t max_len;
+ struct scmi_msg msg;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
+ * layer
+ *
+ * @chan_id: The preferred channel to use: if zero the channel is automatically
+ * selected based on protocol.
+ * @raw: A reference to the Raw instance.
+ * @tx: A message buffer used to collect TX message on write.
+ * @tx_size: The effective size of the TX message.
+ * @tx_req_size: The final expected size of the complete TX message.
+ * @rx: A message buffer to collect RX message on read.
+ * @rx_size: The effective size of the RX message.
+ */
+struct scmi_dbg_raw_data {
+ u8 chan_id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_msg tx;
+ size_t tx_size;
+ size_t tx_req_size;
+ struct scmi_msg rx;
+ size_t rx_size;
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
+ unsigned int chan_id)
+{
+ if (!chan_id)
+ return raw->q[idx];
+
+ return xa_load(&raw->chans_q, chan_id);
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb = NULL;
+ struct list_head *head = &q->free_bufs;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ if (!list_empty(head)) {
+ rb = list_first_entry(head, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ list_add_tail(&rb->node, &q->free_bufs);
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+}
+
+static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ list_add_tail(&rb->node, &q->msg_q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ wake_up_interruptible(&q->wq);
+}
+
+static struct scmi_raw_buffer*
+scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb = NULL;
+
+ if (!list_empty(&q->msg_q)) {
+ rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+
+ return rb;
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb;
+
+ do {
+ rb = scmi_raw_buffer_dequeue(q);
+ if (rb)
+ scmi_raw_buffer_put(q, rb);
+ } while (rb);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo, bool async)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->free_mtx);
+ if (!list_empty(&raw->free_waiters)) {
+ rw = list_first_entry(&raw->free_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+
+ if (async) {
+ reinit_completion(&rw->async_response);
+ xfer->async_done = &rw->async_response;
+ }
+
+ rw->cinfo = cinfo;
+ rw->xfer = xfer;
+ }
+ mutex_unlock(&raw->free_mtx);
+
+ return rw;
+}
+
+static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ if (rw->xfer) {
+ rw->xfer->async_done = NULL;
+ rw->xfer = NULL;
+ }
+
+ mutex_lock(&raw->free_mtx);
+ list_add_tail(&rw->node, &raw->free_waiters);
+ mutex_unlock(&raw->free_mtx);
+}
+
+static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ /* A timestamp for the deferred worker to know how much this has aged */
+ rw->start_jiffies = jiffies;
+
+ trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
+ rw->xfer->hdr.protocol_id,
+ rw->xfer->hdr.seq,
+ raw->desc->max_rx_timeout_ms,
+ rw->xfer->hdr.poll_completion);
+
+ mutex_lock(&raw->active_mtx);
+ list_add_tail(&rw->node, &raw->active_waiters);
+ mutex_unlock(&raw->active_mtx);
+
+ /* kick waiter work */
+ queue_work(raw->wait_wq, &raw->waiters_work);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->active_mtx);
+ if (!list_empty(&raw->active_waiters)) {
+ rw = list_first_entry(&raw->active_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+ }
+ mutex_unlock(&raw->active_mtx);
+
+ return rw;
+}
+
+/**
+ * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
+ *
+ * @work: A reference to the work.
+ *
+ * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
+ * cannot wait to receive its response (if any) in the context of the injection
+ * routines so as not to leave the userspace write syscall, which delivered the
+ * SCMI message to send, pending till eventually a reply is received.
+ * Userspace should and will poll/wait instead on the read syscalls which will
+ * be in charge of reading a received reply (if any).
+ *
+ * Even though reply messages are collected and reported into the SCMI Raw layer
+ * on the RX path, nonetheless we have to properly wait for their completion as
+ * usual (and async_completion too if needed) in order to properly release the
+ * xfer structure at the end: to do this out of the context of the write/send
+ * these waiting jobs are delegated to this deferred worker.
+ *
+ * Any sent xfer, to be waited for, is timestamped and queued for later
+ * consumption by this worker: queue aging is accounted for while choosing a
+ * timeout for the completion, BUT we do not really care here if we end up
+ * accidentally waiting for a bit too long.
+ */
+static void scmi_xfer_raw_worker(struct work_struct *work)
+{
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+ unsigned long max_tmo;
+
+ raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
+ dev = raw->handle->dev;
+ max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
+
+ do {
+ int ret = 0;
+ unsigned int timeout_ms;
+ unsigned long aging;
+ struct scmi_xfer *xfer;
+ struct scmi_xfer_raw_waiter *rw;
+ struct scmi_chan_info *cinfo;
+
+ rw = scmi_xfer_raw_waiter_dequeue(raw);
+ if (!rw)
+ return;
+
+ cinfo = rw->cinfo;
+ xfer = rw->xfer;
+ /*
+ * Waiters are queued by wait-deadline at the end, so some of
+ * them could have been already expired when processed, BUT we
+ * have to check the completion status anyway just in case a
+ * virtually expired (aged) transaction was indeed completed
+ * fine and we'll have to wait for the asynchronous part (if
+ * any): for this reason a 1 ms timeout is used for already
+ * expired/aged xfers.
+ */
+ aging = jiffies - rw->start_jiffies;
+ timeout_ms = max_tmo > aging ?
+ jiffies_to_msecs(max_tmo - aging) : 1;
+
+ ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
+ timeout_ms);
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (raw->desc->ops->mark_txdone)
+ raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ /* Wait also for an async delayed response if needed */
+ if (!ret && xfer->async_done) {
+ unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+
+ if (!wait_for_completion_timeout(xfer->async_done, tmo))
+ dev_err(dev,
+ "timed out in RAW delayed resp - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+ }
+
+ /* Release waiter and xfer */
+ scmi_xfer_raw_put(raw->handle, xfer);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ } while (1);
+}
+
+static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
+{
+ int i;
+
+ dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
+
+ for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
+ scmi_raw_buffer_queue_flush(raw->q[i]);
+}
+
+/**
+ * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
+ * bare SCMI message.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary formmat.
+ * @len: Length of the message in @buf.
+ * @p: A pointer to return the initialized Raw xfer.
+ *
+ * After an xfer is picked from the TX pool and filled in with the message
+ * content, the xfer is registered as pending with the core in the usual way
+ * using the original sequence number provided by the user with the message.
+ *
+ * Note that, in case the testing user application is NOT using distinct
+ * sequence-numbers between successive SCMI messages such registration could
+ * fail temporarily if the previous message, using the same sequence number,
+ * had still not released; in such a case we just wait and retry.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
+ size_t len, struct scmi_xfer **p)
+{
+ u32 msg_hdr;
+ size_t tx_size;
+ struct scmi_xfer *xfer;
+ int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
+ struct device *dev = raw->handle->dev;
+
+ if (!buf || len < sizeof(u32))
+ return -EINVAL;
+
+ tx_size = len - sizeof(u32);
+ /* Ensure we have sane transfer sizes */
+ if (tx_size > raw->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_raw_get(raw->handle);
+ if (IS_ERR(xfer)) {
+ dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
+ return PTR_ERR(xfer);
+ }
+
+ /* Build xfer from the provided SCMI bare LE message */
+ msg_hdr = le32_to_cpu(*((__le32 *)buf));
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
+ /* Polling not supported */
+ xfer->hdr.poll_completion = false;
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->tx.len = tx_size;
+ xfer->rx.len = raw->desc->max_msg_size;
+ /* Clear the whole TX buffer */
+ memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
+ if (xfer->tx.len)
+ memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
+ *p = xfer;
+
+ /*
+ * In flight registration can temporarily fail in case of Raw messages
+ * if the user injects messages without using monotonically increasing
+ * sequence numbers since, in Raw mode, the xfer (and the token) is
+ * finally released later by a deferred worker. Just retry for a while.
+ */
+ do {
+ ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
+ if (ret) {
+ dev_dbg(dev,
+ "...retrying[%d] inflight registration\n",
+ retry);
+ msleep(raw->desc->max_rx_timeout_ms /
+ SCMI_XFER_RAW_MAX_RETRIES);
+ }
+ } while (ret && --retry);
+
+ if (ret) {
+ dev_warn(dev,
+ "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
+ xfer->hdr.seq, msg_hdr);
+ scmi_xfer_raw_put(raw->handle, xfer);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
+ *
+ * @raw: A reference to the Raw instance.
+ * @xfer: The xfer to send
+ * @chan_id: The channel ID to use, if zero the channels is automatically
+ * selected based on the protocol used.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * This function send a previously built raw xfer using an appropriate channel
+ * and queues the related waiting work.
+ *
+ * Note that we need to know explicitly if the required command is meant to be
+ * asynchronous in kind since we have to properly setup the waiter.
+ * (and deducing this from the payload is weak and do not scale given there is
+ * NOT a common header-flag stating if the command is asynchronous or not)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer *xfer, u8 chan_id,
+ bool async)
+{
+ int ret;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ if (!chan_id)
+ chan_id = xfer->hdr.protocol_id;
+ else
+ xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
+
+ cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
+ if (IS_ERR(cinfo))
+ return PTR_ERR(cinfo);
+
+ rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
+ if (!rw) {
+ dev_warn(dev, "RAW - Cannot get a free waiter !\n");
+ return -ENOMEM;
+ }
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, raw->desc))
+ xfer->hdr.poll_completion = true;
+
+ reinit_completion(&xfer->done);
+ /* Make sure xfer state update is visible before sending */
+ smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ ret = raw->desc->ops->send_message(rw->cinfo, xfer);
+ if (ret) {
+ dev_err(dev, "Failed to send RAW message %d\n", ret);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ return ret;
+ }
+
+ trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "cmnd", xfer->hdr.seq,
+ xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
+ scmi_xfer_raw_waiter_enqueue(raw, rw);
+
+ return ret;
+}
+
+/**
+ * scmi_raw_message_send - An helper to build and send an SCMI command using
+ * the provided SCMI bare message buffer
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary format.
+ * @len: Length of the message in @buf.
+ * @chan_id: The channel ID to use.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, u8 chan_id, bool async)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+
+ ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
+ if (ret)
+ scmi_xfer_raw_put(raw->handle, xfer);
+
+ return ret;
+}
+
+static struct scmi_raw_buffer *
+scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ while (list_empty(&q->msg_q)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ if (o_nonblock)
+ return ERR_PTR(-EAGAIN);
+
+ if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
+ return ERR_PTR(-ERESTARTSYS);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ }
+
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+/**
+ * scmi_raw_message_receive - An helper to dequeue and report the next
+ * available enqueued raw message payload that has been collected.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer to get hold of the whole SCMI message received and represented
+ * in little-endian binary format.
+ * @len: Length of @buf.
+ * @size: The effective size of the message copied into @buf
+ * @idx: The index of the queue to pick the next queued message from.
+ * @chan_id: The channel ID to use.
+ * @o_nonblock: A flag to request a non-blocking message dequeue.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, size_t *size,
+ unsigned int idx, unsigned int chan_id,
+ bool o_nonblock)
+{
+ int ret = 0;
+ struct scmi_raw_buffer *rb;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_select(raw, idx, chan_id);
+ if (!q)
+ return -ENODEV;
+
+ rb = scmi_raw_message_dequeue(q, o_nonblock);
+ if (IS_ERR(rb)) {
+ dev_dbg(raw->handle->dev, "RAW - No message available!\n");
+ return PTR_ERR(rb);
+ }
+
+ if (rb->msg.len <= len) {
+ memcpy(buf, rb->msg.buf, rb->msg.len);
+ *size = rb->msg.len;
+ } else {
+ ret = -ENOSPC;
+ }
+
+ scmi_raw_buffer_put(q, rb);
+
+ return ret;
+}
+
+/* SCMI Raw debugfs helpers */
+
+static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos,
+ unsigned int idx)
+{
+ ssize_t cnt;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (!rd->rx_size) {
+ int ret;
+
+ ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
+ &rd->rx_size, idx, rd->chan_id,
+ filp->f_flags & O_NONBLOCK);
+ if (ret) {
+ rd->rx_size = 0;
+ return ret;
+ }
+
+ /* Reset any previous filepos change, including writes */
+ *ppos = 0;
+ } else if (*ppos == rd->rx_size) {
+ /* Return EOF once all the message has been read-out */
+ rd->rx_size = 0;
+ return 0;
+ }
+
+ cnt = simple_read_from_buffer(buf, count, ppos,
+ rd->rx.buf, rd->rx_size);
+
+ return cnt;
+}
+
+static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos,
+ bool async)
+{
+ int ret;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (count > rd->tx.len - rd->tx_size)
+ return -ENOSPC;
+
+ /* On first write attempt @count carries the total full message size. */
+ if (!rd->tx_size)
+ rd->tx_req_size = count;
+
+ /*
+ * Gather a full message, possibly across multiple interrupted wrrtes,
+ * before sending it with a single RAW xfer.
+ */
+ if (rd->tx_size < rd->tx_req_size) {
+ size_t cnt;
+
+ cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
+ buf, count);
+ rd->tx_size += cnt;
+ if (cnt < count)
+ return cnt;
+ }
+
+ ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
+ rd->chan_id, async);
+
+ /* Reset ppos for next message ... */
+ rd->tx_size = 0;
+ *ppos = 0;
+
+ return ret ?: count;
+}
+
+static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
+ struct poll_table_struct *wait,
+ unsigned int idx)
+{
+ unsigned long flags;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+ struct scmi_raw_queue *q;
+ __poll_t mask = 0;
+
+ q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
+ if (!q)
+ return mask;
+
+ poll_wait(filp, &q->wq, wait);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ if (!list_empty(&q->msg_q))
+ mask = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return mask;
+}
+
+static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_REPLY_QUEUE);
+}
+
+static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+}
+
+static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
+}
+
+static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+{
+ u8 id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_dbg_raw_data *rd;
+ const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ raw = inode->i_private;
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
+ if (!rd->rx.buf) {
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
+ if (!rd->tx.buf) {
+ kfree(rd->rx.buf);
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ /* Grab channel ID from debugfs entry naming if any */
+ if (!kstrtou8(id_str, 16, &id))
+ rd->chan_id = id;
+
+ rd->raw = raw;
+ filp->private_data = rd;
+
+ return 0;
+}
+
+static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ kfree(rd->rx.buf);
+ kfree(rd->tx.buf);
+ kfree(rd);
+
+ return 0;
+}
+
+static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ scmi_xfer_raw_reset(rd->raw);
+
+ return count;
+}
+
+static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_NOTIF_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_ERRS_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
+ .owner = THIS_MODULE,
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_raw_buffer *rb;
+ struct device *dev = raw->handle->dev;
+ struct scmi_raw_queue *q;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
+ rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
+ if (!rb)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&q->free_bufs_lock);
+ INIT_LIST_HEAD(&q->free_bufs);
+ for (i = 0; i < raw->tx_max_msg; i++, rb++) {
+ rb->max_len = raw->desc->max_msg_size + sizeof(u32);
+ rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
+ if (!rb->msg.buf)
+ return ERR_PTR(-ENOMEM);
+ scmi_raw_buffer_put(q, rb);
+ }
+
+ spin_lock_init(&q->msg_q_lock);
+ INIT_LIST_HEAD(&q->msg_q);
+ init_waitqueue_head(&q->wq);
+
+ return q;
+}
+
+static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
+ if (!rw)
+ return -ENOMEM;
+
+ raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
+ WQ_UNBOUND | WQ_FREEZABLE |
+ WQ_HIGHPRI, WQ_SYSFS, raw->id);
+ if (!raw->wait_wq)
+ return -ENOMEM;
+
+ mutex_init(&raw->free_mtx);
+ INIT_LIST_HEAD(&raw->free_waiters);
+ mutex_init(&raw->active_mtx);
+ INIT_LIST_HEAD(&raw->active_waiters);
+
+ for (i = 0; i < raw->tx_max_msg; i++, rw++) {
+ init_completion(&rw->async_response);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ }
+ INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
+
+ return 0;
+}
+
+static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ u8 *channels, int num_chans)
+{
+ int ret, idx;
+ void *gid;
+ struct device *dev = raw->handle->dev;
+
+ gid = devres_open_group(dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
+ raw->q[idx] = scmi_raw_queue_init(raw);
+ if (IS_ERR(raw->q[idx])) {
+ ret = PTR_ERR(raw->q[idx]);
+ goto err;
+ }
+ }
+
+ xa_init(&raw->chans_q);
+ if (num_chans > 1) {
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+ void *xret;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto err_xa;
+ }
+
+ xret = xa_store(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+ if (xa_err(xret)) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+ ret = xa_err(xret);
+ goto err_xa;
+ }
+ }
+ }
+
+ ret = scmi_xfer_raw_worker_init(raw);
+ if (ret)
+ goto err_xa;
+
+ devres_close_group(dev, gid);
+ raw->gid = gid;
+
+ return 0;
+
+err_xa:
+ xa_destroy(&raw->chans_q);
+err:
+ devres_release_group(dev, gid);
+ return ret;
+}
+
+/**
+ * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @top_dentry: A reference to the top Raw debugfs dentry
+ * @instance_id: The ID of the underlying SCMI platform instance represented by
+ * this Raw instance
+ * @channels: The list of the existing channels
+ * @num_chans: The number of entries in @channels
+ * @desc: Reference to the transport operations
+ * @tx_max_msg: Max number of in-flight messages allowed by the transport
+ *
+ * This function prepare the SCMI Raw stack and creates the debugfs API.
+ *
+ * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
+ */
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg)
+{
+ int ret;
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+
+ if (!handle || !desc)
+ return ERR_PTR(-EINVAL);
+
+ dev = handle->dev;
+ raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return ERR_PTR(-ENOMEM);
+
+ raw->handle = handle;
+ raw->desc = desc;
+ raw->tx_max_msg = tx_max_msg;
+ raw->id = instance_id;
+
+ ret = scmi_raw_mode_setup(raw, channels, num_chans);
+ if (ret) {
+ devm_kfree(dev, raw);
+ return ERR_PTR(ret);
+ }
+
+ raw->dentry = debugfs_create_dir("raw", top_dentry);
+
+ debugfs_create_file("reset", 0200, raw->dentry, raw,
+ &scmi_dbg_raw_mode_reset_fops);
+
+ debugfs_create_file("message", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file("notification", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_notification_fops);
+
+ debugfs_create_file("errors", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_errors_fops);
+
+ /*
+ * Expose per-channel entries if multiple channels available.
+ * Just ignore errors while setting up these interfaces since we
+ * have anyway already a working core Raw support.
+ */
+ if (num_chans > 1) {
+ int i;
+ struct dentry *top_chans;
+
+ top_chans = debugfs_create_dir("channels", raw->dentry);
+
+ for (i = 0; i < num_chans; i++) {
+ char cdir[8];
+ struct dentry *chd;
+
+ snprintf(cdir, 8, "0x%02X", channels[i]);
+ chd = debugfs_create_dir(cdir, top_chans);
+
+ debugfs_create_file("message", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+ }
+ }
+
+ dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
+
+ return raw;
+}
+
+/**
+ * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
+ *
+ * @r: An opaque handle to an initialized SCMI Raw instance
+ */
+void scmi_raw_mode_cleanup(void *r)
+{
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ debugfs_remove_recursive(raw->dentry);
+
+ cancel_work_sync(&raw->waiters_work);
+ destroy_workqueue(raw->wait_wq);
+ xa_destroy(&raw->chans_q);
+}
+
+static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
+ struct scmi_xfer *xfer)
+{
+ __le32 *m;
+ size_t msg_size;
+
+ if (!xfer || !msg || !msg_len)
+ return -EINVAL;
+
+ /* Account for hdr ...*/
+ msg_size = xfer->rx.len + sizeof(u32);
+ /* ... and status if needed */
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ msg_size += sizeof(u32);
+
+ if (msg_size > *msg_len)
+ return -ENOSPC;
+
+ m = msg;
+ *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ *++m = cpu_to_le32(xfer->hdr.status);
+
+ memcpy(++m, xfer->rx.buf, xfer->rx.len);
+
+ *msg_len = msg_size;
+
+ return 0;
+}
+
+/**
+ * scmi_raw_message_report - Helper to report back valid reponses/notifications
+ * to raw message requests.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @xfer: The xfer containing the message to be reported
+ * @idx: The index of the queue.
+ * @chan_id: The channel ID to use.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the regular RX
+ * path to save and enqueue the response/notification payload carried by this
+ * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
+ *
+ * This way the caller can free the related xfer immediately afterwards and the
+ * user can read back the raw message payload at its own pace (if ever) without
+ * holding an xfer for too long.
+ */
+void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+ struct device *dev;
+ struct scmi_raw_queue *q;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
+ return;
+
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+ * realizing the free list was empty and effectively picking the next
+ * buffer to use from the oldest one enqueued and still unread on this
+ * msg_q.
+ *
+ * Note that nowhere else these locks are taken together, so no risk of
+ * deadlocks du eto inversion.
+ */
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_get(q);
+ if (!rb) {
+ /*
+ * Immediate and delayed replies to previously injected Raw
+ * commands MUST be read back from userspace to free the buffers:
+ * if this is not happening something is seriously broken and
+ * must be fixed at the application level: complain loudly.
+ */
+ if (idx == SCMI_RAW_REPLY_QUEUE) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ dev_warn(dev,
+ "RAW[%d] - Buffers exhausted. Dropping report.\n",
+ idx);
+ return;
+ }
+
+ /*
+ * Notifications and errors queues are instead handled in a
+ * circular manner: unread old buffers are just overwritten by
+ * newer ones.
+ *
+ * The main reason for this is that notifications originated
+ * by Raw requests cannot be distinguished from normal ones, so
+ * your Raw buffers queues risk to be flooded and depleted by
+ * notifications if you left it mistakenly enabled or when in
+ * coexistence mode.
+ */
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ if (WARN_ON(!rb)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ return;
+ }
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ dev_warn_once(dev,
+ "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
+ idx);
+ }
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
+ if (ret) {
+ dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
+ scmi_raw_buffer_put(q, rb);
+ return;
+ }
+
+ scmi_raw_buffer_enqueue(q, rb);
+}
+
+static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, u32 msg_hdr)
+{
+ /* Unpack received HDR as it is */
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+
+ memset(xfer->rx.buf, 0x00, xfer->rx.len);
+
+ raw->desc->ops->fetch_response(cinfo, xfer);
+}
+
+/**
+ * scmi_raw_error_report - Helper to report back timed-out or generally
+ * unexpected replies.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @cinfo: A reference to the channel to use to retrieve the broken xfer
+ * @msg_hdr: The SCMI message header of the message to fetch and report
+ * @priv: Any private data related to the xfer.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the RX path in
+ * case of errors to save and enqueue the bad message payload carried by the
+ * message that has just been received.
+ *
+ * Note that we have to manually fetch any available payload into a temporary
+ * xfer to be able to save and enqueue the message, since the regular RX error
+ * path which had called this would have not fetched the message payload having
+ * classified it as an error.
+ */
+void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer xfer;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ xfer.rx.len = raw->desc->max_msg_size;
+ xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
+ if (!xfer.rx.buf) {
+ dev_info(raw->handle->dev,
+ "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
+ msg_hdr);
+ return;
+ }
+
+ /* Any transport-provided priv must be passed back down to transport */
+ if (priv)
+ /* Ensure priv is visible */
+ smp_store_mb(xfer.priv, priv);
+
+ scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
+ scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
+
+ kfree(xfer.rx.buf);
+}
diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h
new file mode 100644
index 000000000000..8af756a83fd1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * Raw mode support header.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_RAW_MODE_H
+#define _SCMI_RAW_MODE_H
+
+#include "common.h"
+
+enum {
+ SCMI_RAW_REPLY_QUEUE,
+ SCMI_RAW_NOTIF_QUEUE,
+ SCMI_RAW_ERRS_QUEUE,
+ SCMI_RAW_MAX_QUEUE
+};
+
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg);
+void scmi_raw_mode_cleanup(void *raw);
+
+void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id);
+void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv);
+
+#endif /* _SCMI_RAW_MODE_H */
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 1dfe534b8518..87b4f4d35f06 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
xfer->hdr.status = ioread32(shmem->msg_payload);
/* Skip the length of header and status in shmem area i.e 8 bytes */
- xfer->rx.len = min_t(size_t, xfer->rx.len,
- ioread32(&shmem->length) - 8);
+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
size_t max_len, struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
/* Skip only the length of header in shmem area i.e 4 bytes */
- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 87a7b13cf868..93272e4bbd12 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -52,9 +52,9 @@ static irqreturn_t smc_msg_done_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static bool smc_chan_available(struct device *dev, int idx)
+static bool smc_chan_available(struct device_node *of_node, int idx)
{
- struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
if (!np)
return false;
@@ -171,8 +171,6 @@ static int smc_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
scmi_info->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 33c9b81a55cd..d68c01cb7aa0 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
}
vioch->shutdown_done = &vioch_shutdown_done;
- virtio_break_device(vioch->vqueue->vdev);
if (!vioch->is_rx && vioch->deferred_tx_wq)
/* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL;
@@ -386,7 +385,7 @@ static int virtio_link_supplier(struct device *dev)
return 0;
}
-static bool virtio_chan_available(struct device *dev, int idx)
+static bool virtio_chan_available(struct device_node *of_node, int idx)
{
struct scmi_vio_channel *channels, *vioch = NULL;
@@ -482,10 +481,14 @@ static int virtio_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
+ /*
+ * Break device to inhibit further traffic flowing while shutting down
+ * the channels: doing it later holding vioch->lock creates unsafe
+ * locking dependency chains as reported by LOCKDEP.
+ */
+ virtio_break_device(vioch->vqueue->vdev);
scmi_vio_channel_cleanup_sync(vioch);
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 1e1a51510e83..f9040bd61081 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
+static int sdei_hp_state;
+
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
{
int err;
- WARN_ON_ONCE(preemptible());
-
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored)
{
+ WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
{
int err;
- WARN_ON_ONCE(preemptible());
-
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored)
{
+ WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
{
int err;
+ WARN_ON_ONCE(preemptible());
+
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
@@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
int err;
struct sdei_crosscall_args *arg = data;
- WARN_ON_ONCE(preemptible());
-
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
int err;
struct sdei_crosscall_args *arg = data;
- WARN_ON_ONCE(preemptible());
-
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -561,8 +559,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
- WARN_ON(preemptible());
-
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{
int rv;
+ WARN_ON_ONCE(preemptible());
+
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+ cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err)
+ if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
+ return err;
+ }
- return err;
+ sdei_hp_state = err;
+ return 0;
}
static int sdei_device_restore(struct device *dev)
@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+ cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err) {
+ if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
+ sdei_hp_state = err;
+
return 0;
remove_reboot:
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 5f47dbf4889a..0ea5206be4c9 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -255,4 +255,3 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index f558b390fbfe..e4ccfb6a8fa5 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -301,6 +302,7 @@ struct cs_dsp_ops {
static const struct cs_dsp_ops cs_dsp_adsp1_ops;
static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
static const struct cs_dsp_ops cs_dsp_halo_ops;
+static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
struct cs_dsp_buf {
struct list_head list;
@@ -456,6 +458,33 @@ static const struct {
},
};
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
+ unsigned int off);
+
+static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored)
+{
+ struct cs_dsp *dsp = s->private;
+ struct cs_dsp_coeff_ctl *ctl;
+ unsigned int reg;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ cs_dsp_coeff_base_reg(ctl, &reg, 0);
+ seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
+ ctl->subname_len, ctl->subname, ctl->len,
+ cs_dsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type,
+ ctl->flags & WMFW_CTL_FLAG_VOLATILE ? 'V' : '-',
+ ctl->flags & WMFW_CTL_FLAG_SYS ? 'S' : '-',
+ ctl->flags & WMFW_CTL_FLAG_READABLE ? 'R' : '-',
+ ctl->flags & WMFW_CTL_FLAG_WRITEABLE ? 'W' : '-',
+ ctl->enabled ? "enabled" : "disabled",
+ ctl->set ? "dirty" : "clean");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cs_dsp_debugfs_read_controls);
+
/**
* cs_dsp_init_debugfs() - Create and populate DSP representation in debugfs
* @dsp: pointer to DSP structure
@@ -478,6 +507,9 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
debugfs_create_file(cs_dsp_debugfs_fops[i].name, 0444, root,
dsp, &cs_dsp_debugfs_fops[i].fops);
+ debugfs_create_file("controls", 0444, root, dsp,
+ &cs_dsp_debugfs_read_controls_fops);
+
dsp->debugfs_root = root;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
@@ -1300,6 +1332,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
int regions = 0;
int ret, offset, type;
+ if (!firmware)
+ return 0;
+
ret = -EINVAL;
pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
@@ -2821,7 +2856,10 @@ EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
*/
int cs_dsp_halo_init(struct cs_dsp *dsp)
{
- dsp->ops = &cs_dsp_halo_ops;
+ if (dsp->no_core_startstop)
+ dsp->ops = &cs_dsp_halo_ao_ops;
+ else
+ dsp->ops = &cs_dsp_halo_ops;
return cs_dsp_common_init(dsp);
}
@@ -3187,6 +3225,14 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+static const struct cs_dsp_ops cs_dsp_halo_ao_ops = {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_halo_validate_version,
+ .setup_algs = cs_dsp_halo_setup_algs,
+ .region_to_reg = cs_dsp_halo_region_to_reg,
+ .show_fw_status = cs_dsp_halo_show_fw_status,
+};
+
/**
* cs_dsp_chunk_write() - Format data to a DSP memory chunk
* @ch: Pointer to the chunk structure
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 66727ad3361b..03708ab64e56 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -304,7 +304,7 @@ static struct attribute *dmi_sysfs_sel_attrs[] = {
};
ATTRIBUTE_GROUPS(dmi_sysfs_sel);
-static struct kobj_type dmi_system_event_log_ktype = {
+static const struct kobj_type dmi_system_event_log_ktype = {
.release = dmi_entry_free,
.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
.default_groups = dmi_sysfs_sel_groups,
@@ -418,10 +418,10 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_GPNV:
- pr_info("dmi-sysfs: GPNV support missing.\n");
+ pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
return -EIO;
default:
- pr_info("dmi-sysfs: Unknown access method %02x\n",
+ pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
sel.access_method);
return -EIO;
}
@@ -563,7 +563,7 @@ static void dmi_sysfs_entry_release(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type dmi_sysfs_entry_ktype = {
+static const struct kobj_type dmi_sysfs_entry_ktype = {
.release = dmi_sysfs_entry_release,
.sysfs_ops = &dmi_sysfs_attr_ops,
.default_groups = dmi_sysfs_entry_groups,
@@ -603,16 +603,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
"%d-%d", dh->type, entry->instance);
- if (*ret) {
- kobject_put(&entry->kobj);
- return;
- }
-
/* Thread on the global list for cleanup */
spin_lock(&entry_list_lock);
list_add_tail(&entry->list, &entry_list);
spin_unlock(&entry_list_lock);
+ if (*ret) {
+ kobject_put(&entry->kobj);
+ return;
+ }
+
/* Handle specializations by type */
switch (dh->type) {
case DMI_ENTRY_SYSTEM_EVENT_LOG:
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 5cc238916551..55dec4eb2c00 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -608,7 +608,7 @@ static void edd_release(struct kobject * kobj)
kfree(dev);
}
-static struct kobj_type edd_ktype = {
+static const struct kobj_type edd_ktype = {
.release = edd_release,
.sysfs_ops = &edd_attr_ops,
};
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index 36d3b8b9da47..fa9c1c3bf168 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -12,7 +12,6 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/printk.h>
#include <linux/bcd.h>
#include <acpi/ghes.h>
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index 53e435c4f310..a55771b99a97 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -9,7 +9,6 @@
#include <linux/cper.h>
#include "cper_cxl.h"
-#include <linux/cxl_err.h>
#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
@@ -19,6 +18,17 @@
#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
"Restricted CXL Host Downstream Port",
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index 4d6c5327471a..f80a9af3d16e 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -10,11 +10,14 @@
#include <linux/kernel.h>
#include <linux/serial_core.h>
#include <linux/screen_info.h>
+#include <linux/string.h>
#include <asm/early_ioremap.h>
static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
+static u16 cur_line_y, max_line_y;
+static u32 efi_x_array[1024];
static u32 efi_x, efi_y;
static u64 fb_base;
static bool fb_wb;
@@ -85,9 +88,17 @@ static void efi_earlycon_clear_scanline(unsigned int y)
static void efi_earlycon_scroll_up(void)
{
unsigned long *dst, *src;
+ u16 maxlen = 0;
u16 len;
u32 i, height;
+ /* Find the cached maximum x coordinate */
+ for (i = 0; i < max_line_y; i++) {
+ if (efi_x_array[i] > maxlen)
+ maxlen = efi_x_array[i];
+ }
+ maxlen *= 4;
+
len = screen_info.lfb_linelength;
height = screen_info.lfb_height;
@@ -102,7 +113,7 @@ static void efi_earlycon_scroll_up(void)
return;
}
- memmove(dst, src, len);
+ memmove(dst, src, maxlen);
efi_earlycon_unmap(src, len);
efi_earlycon_unmap(dst, len);
@@ -135,6 +146,7 @@ static void
efi_earlycon_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
+ u32 cur_efi_x = efi_x;
unsigned int len;
const char *s;
void *dst;
@@ -143,16 +155,10 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
len = si->lfb_linelength;
while (num) {
- unsigned int linemax;
- unsigned int h, count = 0;
-
- for (s = str; *s && *s != '\n'; s++) {
- if (count == num)
- break;
- count++;
- }
+ unsigned int linemax = (si->lfb_width - efi_x) / font->width;
+ unsigned int h, count;
- linemax = (si->lfb_width - efi_x) / font->width;
+ count = strnchrnul(str, num, '\n') - str;
if (count > linemax)
count = linemax;
@@ -181,6 +187,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
str += count;
if (num > 0 && *s == '\n') {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
str++;
@@ -188,6 +195,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
}
if (efi_x + font->width > si->lfb_width) {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
}
@@ -195,6 +203,9 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
if (efi_y + font->height > si->lfb_height) {
u32 i;
+ efi_x_array[cur_line_y] = cur_efi_x;
+ cur_line_y = (cur_line_y + 1) % max_line_y;
+
efi_y -= font->height;
efi_earlycon_scroll_up();
@@ -204,6 +215,14 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
}
}
+static bool __initdata fb_probed;
+
+void __init efi_earlycon_reprobe(void)
+{
+ if (fb_probed)
+ setup_earlycon("efifb");
+}
+
static int __init efi_earlycon_setup(struct earlycon_device *device,
const char *opt)
{
@@ -211,15 +230,17 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
u16 xres, yres;
u32 i;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ fb_wb = opt && !strcmp(opt, "ram");
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
+ fb_probed = true;
return -ENODEV;
+ }
fb_base = screen_info.lfb_base;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- fb_wb = opt && !strcmp(opt, "ram");
-
si = &screen_info;
xres = si->lfb_width;
yres = si->lfb_height;
@@ -235,7 +256,15 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (!font)
return -ENODEV;
- efi_y = rounddown(yres, font->height) - font->height;
+ /* Fill the cache with maximum possible value of x coordinate */
+ memset32(efi_x_array, rounddown(xres, font->width), ARRAY_SIZE(efi_x_array));
+ efi_y = rounddown(yres, font->height);
+
+ /* Make sure we have cache for the x coordinate for the full screen */
+ max_line_y = efi_y / font->height + 1;
+ cur_line_y = 0;
+
+ efi_y -= font->height;
for (i = 0; i < (yres - efi_y) / font->height; i++)
efi_earlycon_scroll_up();
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 1639159493e3..ef0820f1a924 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -72,6 +72,9 @@ static void __init init_screen_info(void)
if (memblock_is_map_memory(screen_info.lfb_base))
memblock_mark_nomap(screen_info.lfb_base,
screen_info.lfb_size);
+
+ if (IS_ENABLED(CONFIG_EFI_EARLYCON))
+ efi_earlycon_reprobe();
}
}
@@ -92,7 +95,7 @@ static int __init uefi_init(u64 efi_system_table)
if (IS_ENABLED(CONFIG_64BIT))
set_bit(EFI_64BIT, &efi.flags);
- retval = efi_systab_check_header(&systab->hdr, 2);
+ retval = efi_systab_check_header(&systab->hdr);
if (retval)
goto out;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 09716eebe8ac..abeff7dc0b58 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -187,8 +187,27 @@ static const struct attribute_group efi_subsys_attr_group = {
static struct efivars generic_efivars;
static struct efivar_operations generic_ops;
+static bool generic_ops_supported(void)
+{
+ unsigned long name_size;
+ efi_status_t status;
+ efi_char16_t name;
+ efi_guid_t guid;
+
+ name_size = sizeof(name);
+
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+
+ return true;
+}
+
static int generic_ops_register(void)
{
+ if (!generic_ops_supported())
+ return 0;
+
generic_ops.get_variable = efi.get_variable;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -197,11 +216,14 @@ static int generic_ops_register(void)
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
}
- return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+ return efivars_register(&generic_efivars, &generic_ops);
}
static void generic_ops_unregister(void)
{
+ if (!generic_ops.get_variable)
+ return;
+
efivars_unregister(&generic_efivars);
}
@@ -394,8 +416,8 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
- destroy_workqueue(efi_rts_wq);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_destroy_wq;
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
@@ -443,7 +465,10 @@ err_unregister:
err_put:
kobject_put(efi_kobj);
efi_kobj = NULL;
- destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+ if (efi_rts_wq)
+ destroy_workqueue(efi_rts_wq);
+
return error;
}
@@ -478,7 +503,7 @@ void __init efi_find_mirror(void)
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -496,6 +521,12 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
+ /* skip bogus entries (including empty ones) */
+ if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
+ (md->num_pages <= 0) ||
+ (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
+ continue;
+
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
@@ -506,6 +537,9 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
return -ENOENT;
}
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ __weak __alias(__efi_mem_desc_lookup);
+
/*
* Calculate the highest address of an efi memory descriptor.
*/
@@ -532,6 +566,10 @@ void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
*/
void __init efi_mem_reserve(phys_addr_t addr, u64 size)
{
+ /* efi_mem_reserve() does not work under Xen */
+ if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
+ return;
+
if (!memblock_is_region_reserved(addr, size))
memblock_reserve(addr, size);
@@ -580,13 +618,20 @@ static __init int match_config_table(const efi_guid_t *guid,
int i;
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- if (!efi_guidcmp(*guid, table_types[i].guid)) {
- *(table_types[i].ptr) = table;
+ if (efi_guidcmp(*guid, table_types[i].guid))
+ continue;
+
+ if (!efi_config_table_is_usable(guid, table)) {
if (table_types[i].name[0])
- pr_cont("%s=0x%lx ",
+ pr_cont("(%s=0x%lx unusable) ",
table_types[i].name, table);
return 1;
}
+
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ", table_types[i].name, table);
+ return 1;
}
return 0;
@@ -717,20 +762,13 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
return 0;
}
-int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version)
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
{
if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect!\n");
return -EINVAL;
}
- if ((systab_hdr->revision >> 16) < min_major_version)
- pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- min_major_version);
-
return 0;
}
@@ -761,6 +799,7 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
char vendor[100] = "unknown";
const efi_char16_t *c16;
size_t i;
+ u16 rev;
c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
@@ -771,10 +810,14 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
- pr_info("EFI v%u.%.02u by %s\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- vendor);
+ rev = (u16)systab_hdr->revision;
+ pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
+
+ rev %= 10;
+ if (rev)
+ pr_cont(".%u", rev);
+
+ pr_cont(" by %s\n", vendor);
if (IS_ENABLED(CONFIG_X86_64) &&
systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
@@ -1004,6 +1047,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 2a2f52b017e7..87729c365be1 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -247,7 +247,7 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
- if (!efi_enabled(EFI_MEMMAP))
+ if (!efi_enabled(EFI_MEMMAP) && !efi_enabled(EFI_PARAVIRT))
return;
pr_debug("esrt-init: loading.\n");
@@ -258,20 +258,15 @@ void __init efi_esrt_init(void)
if (rc < 0 ||
(!(md.attribute & EFI_MEMORY_RUNTIME) &&
md.type != EFI_BOOT_SERVICES_DATA &&
- md.type != EFI_RUNTIME_SERVICES_DATA)) {
+ md.type != EFI_RUNTIME_SERVICES_DATA &&
+ md.type != EFI_ACPI_RECLAIM_MEMORY &&
+ md.type != EFI_ACPI_MEMORY_NVS)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
- max = efi_mem_desc_end(&md);
- if (max < efi.esrt) {
- pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
- (void *)efi.esrt, (void *)max);
- return;
- }
-
+ max = efi_mem_desc_end(&md) - efi.esrt;
size = sizeof(*esrt);
- max -= efi.esrt;
if (max < size) {
pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index be8b8c6e8b40..3abb2b357482 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -23,8 +23,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
- -fno-unwind-tables -fno-asynchronous-unwind-tables \
- $(call cc-option,-mbranch-protection=none)
+ -fno-unwind-tables -fno-asynchronous-unwind-tables
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
@@ -87,7 +86,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o
+lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
@@ -141,7 +140,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
#
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# For RISC-V, we don't need anything special other than arm64. Keep all the
# symbols in .init section and make sure that no absolute symbols references
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 43e9a4cab9f5..89ef820f3b34 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -1,7 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
# to be include'd by arch/$(ARCH)/boot/Makefile after setting
-# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET and EFI_ZBOOT_MACH_TYPE
+# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET, EFI_ZBOOT_MACH_TYPE and
+# EFI_ZBOOT_FORWARD_CFI
+
+quiet_cmd_copy_and_pad = PAD $@
+ cmd_copy_and_pad = cp $< $@ && \
+ truncate -s $(shell hexdump -s16 -n4 -e '"%u"' $<) $@
+
+# Pad the file to the size of the uncompressed image in memory, including BSS
+$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
+ $(call if_changed,copy_and_pad)
comp-type-$(CONFIG_KERNEL_GZIP) := gzip
comp-type-$(CONFIG_KERNEL_LZ4) := lz4
@@ -10,26 +19,32 @@ comp-type-$(CONFIG_KERNEL_LZO) := lzo
comp-type-$(CONFIG_KERNEL_XZ) := xzkern
comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
-# Copy the SizeOfHeaders, SizeOfCode and SizeOfImage fields from the payload to
-# the end of the compressed image. Note that this presupposes a PE header
-# offset of 64 bytes, which is what arm64, RISC-V and LoongArch use.
-quiet_cmd_compwithsize = $(quiet_cmd_$(comp-type-y))
- cmd_compwithsize = $(cmd_$(comp-type-y)) && ( \
- dd status=none if=$< bs=4 count=1 skip=37 ; \
- dd status=none if=$< bs=4 count=1 skip=23 ; \
- dd status=none if=$< bs=4 count=1 skip=36 ) >> $@
+# in GZIP, the appended le32 carrying the uncompressed size is part of the
+# format, but in other cases, we just append it at the end for convenience,
+# causing the original tools to complain when checking image integrity.
+# So disregard it when calculating the payload size in the zimage header.
+zboot-method-y := $(comp-type-y)_with_size
+zboot-size-len-y := 4
-$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
- $(call if_changed,compwithsize)
+zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
+zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
-OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \
+$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,$(zboot-method-y))
+
+OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
--rename-section .data=.gzdata,load,alloc,readonly,contents
$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
$(call if_changed,objcopy)
+aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
+ -DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
+
AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
-DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
- -DCOMP_TYPE="\"$(comp-type-y)\""
+ -DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
+ -DCOMP_TYPE="\"$(comp-type-y)\"" \
+ $(aflags-zboot-header-y)
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
@@ -44,4 +59,4 @@ OBJCOPYFLAGS_vmlinuz.efi := -O binary
$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
$(call if_changed,objcopy)
-targets += zboot-header.o vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+targets += zboot-header.o vmlinux.bin vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
diff --git a/drivers/firmware/efi/libstub/arm64-entry.S b/drivers/firmware/efi/libstub/arm64-entry.S
deleted file mode 100644
index b5c17e89a4fc..000000000000
--- a/drivers/firmware/efi/libstub/arm64-entry.S
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * EFI entry point.
- *
- * Copyright (C) 2013, 2014 Red Hat, Inc.
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- /*
- * The entrypoint of a arm64 bare metal image is at offset #0 of the
- * image, so this is a reasonable default for primary_entry_offset.
- * Only when the EFI stub is integrated into the core kernel, it is not
- * guaranteed that the PE/COFF header has been copied to memory too, so
- * in this case, primary_entry_offset should be overridden by the
- * linker and point to primary_entry() directly.
- */
- .weak primary_entry_offset
-
-SYM_CODE_START(efi_enter_kernel)
- /*
- * efi_pe_entry() will have copied the kernel image if necessary and we
- * end up here with device tree address in x1 and the kernel entry
- * point stored in x0. Save those values in registers which are
- * callee preserved.
- */
- ldr w2, =primary_entry_offset
- add x19, x0, x2 // relocated Image entrypoint
-
- mov x0, x1 // DTB address
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- /*
- * Clean the remainder of this routine to the PoC
- * so that we can safely disable the MMU and caches.
- */
- adr x4, 1f
- dc civac, x4
- dsb sy
-
- /* Turn off Dcache and MMU */
- mrs x4, CurrentEL
- cmp x4, #CurrentEL_EL2
- mrs x4, sctlr_el1
- b.ne 0f
- mrs x4, sctlr_el2
-0: bic x4, x4, #SCTLR_ELx_M
- bic x4, x4, #SCTLR_ELx_C
- b.eq 1f
- b 2f
-
- .balign 32
-1: pre_disable_mmu_workaround
- msr sctlr_el2, x4
- isb
- br x19 // jump to kernel entrypoint
-
-2: pre_disable_mmu_workaround
- msr sctlr_el1, x4
- isb
- br x19 // jump to kernel entrypoint
-
- .org 1b + 32
-SYM_CODE_END(efi_enter_kernel)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 7327b98d8e3f..770b8ecb7398 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_handle_t image_handle)
{
efi_status_t status;
- unsigned long kernel_size, kernel_memsize = 0;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
u32 phys_seed = 0;
u64 min_kimg_align = efi_get_kimg_min_align();
@@ -85,14 +85,17 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
}
- if (image->image_base != _text)
+ if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+ image->image_base = _text;
+ }
if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
+ kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
@@ -121,7 +124,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*/
*image_addr = (u64)_text;
*reserve_size = 0;
- goto clean_image_to_poc;
+ return EFI_SUCCESS;
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
@@ -137,14 +140,22 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
+ caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+
+ return EFI_SUCCESS;
+}
-clean_image_to_poc:
+asmlinkage void primary_entry(void);
+
+unsigned long primary_entry_offset(void)
+{
/*
- * Clean the copied Image to the PoC, and ensure it is not shadowed by
- * stale icache entries from before relocation.
+ * When built as part of the kernel, the EFI stub cannot branch to the
+ * kernel proper via the image header, as the PE/COFF header is
+ * strictly not part of the in-memory presentation of the image, only
+ * of the file representation. So instead, we need to jump to the
+ * actual entrypoint in the .text region of the image.
*/
- dcache_clean_poc(*image_addr, *image_addr + kernel_size);
- asm("ic ialluis");
-
- return EFI_SUCCESS;
+ return (char *)primary_entry - _text;
}
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..446e35eaf3d9 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <asm/efi.h>
+#include <asm/image.h>
#include <asm/memory.h>
#include <asm/sysreg.h>
@@ -16,17 +17,43 @@
static bool system_needs_vamap(void)
{
- const u8 *type1_family = efi_get_smbios_string(1, family);
+ const struct efi_smbios_type4_record *record;
+ const u32 __aligned(1) *socid;
+ const u8 *version;
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior. Most Altra systems
+ * can be identified by the SMCCC soc ID, which is conveniently exposed
+ * via the type 4 SMBIOS records. Otherwise, test the processor version
+ * field. eMAG systems all appear to have the processor version field
+ * set to "eMAG".
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
+ if (!record)
return false;
- efi_warn("Working around broken SetVirtualAddressMap()\n");
- return true;
+ socid = (u32 *)record->processor_id;
+ switch (*socid & 0xffff000f) {
+ static char const altra[] = "Ampere(TM) Altra(TM) Processor";
+ static char const emag[] = "eMAG";
+
+ default:
+ version = efi_get_smbios_string(&record->header, 4,
+ processor_version);
+ if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
+ strncmp(version, emag, sizeof(emag) - 1)))
+ break;
+
+ fallthrough;
+
+ case 0x0a160001: // Altra
+ case 0x0a160002: // Altra Max
+ efi_warn("Working around broken SetVirtualAddressMap()\n");
+ return true;
+ }
+
+ return false;
}
efi_status_t check_platform_features(void)
@@ -56,21 +83,58 @@ efi_status_t check_platform_features(void)
return EFI_SUCCESS;
}
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+#define DCTYPE "civac"
+#else
+#define DCTYPE "cvau"
+#endif
+
+u32 __weak code_size;
+
void efi_cache_sync_image(unsigned long image_base,
- unsigned long alloc_size,
- unsigned long code_size)
+ unsigned long alloc_size)
{
u32 ctr = read_cpuid_effective_cachetype();
u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
CTR_EL0_DminLine_SHIFT);
- do {
- asm("dc civac, %0" :: "r"(image_base));
- image_base += lsize;
- alloc_size -= lsize;
- } while (alloc_size >= lsize);
+ /* only perform the cache maintenance if needed for I/D coherency */
+ if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
+ unsigned long base = image_base;
+ unsigned long size = code_size;
+
+ do {
+ asm("dc " DCTYPE ", %0" :: "r"(base));
+ base += lsize;
+ size -= lsize;
+ } while (size >= lsize);
+ }
asm("ic ialluis");
dsb(ish);
isb();
+
+ efi_remap_image(image_base, alloc_size, code_size);
+}
+
+unsigned long __weak primary_entry_offset(void)
+{
+ /*
+ * By default, we can invoke the kernel via the branch instruction in
+ * the image header, so offset #0. This will be overridden by the EFI
+ * stub build that is linked into the core kernel, as in that case, the
+ * image header may not have been loaded into memory, or may be mapped
+ * with non-executable permissions.
+ */
+ return 0;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+{
+ void (* __noreturn enter_kernel)(u64, u64, u64, u64);
+
+ enter_kernel = (void *)entrypoint + primary_entry_offset();
+ enter_kernel(fdt_addr, 0, 0, 0);
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c
index 5245c4f031c0..cc4dcaea67fa 100644
--- a/drivers/firmware/efi/libstub/efi-stub-entry.c
+++ b/drivers/firmware/efi/libstub/efi-stub-entry.c
@@ -5,6 +5,15 @@
#include "efistub.h"
+static unsigned long screen_info_offset;
+
+struct screen_info *alloc_screen_info(void)
+{
+ if (IS_ENABLED(CONFIG_ARM))
+ return __alloc_screen_info();
+ return (void *)&screen_info + screen_info_offset;
+}
+
/*
* EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
* LoongArch. This is the entrypoint that is described in the PE/COFF header
@@ -56,6 +65,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
return status;
}
+ screen_info_offset = image_addr - (unsigned long)image->image_base;
+
status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
efi_free(image_size, image_addr);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f5a4bdacac64..1e0203d74691 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -651,3 +651,70 @@ efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
return status;
}
+
+/**
+ * efi_remap_image - Remap a loaded image with the appropriate permissions
+ * for code and data
+ *
+ * @image_base: the base of the image in memory
+ * @alloc_size: the size of the area in memory occupied by the image
+ * @code_size: the size of the leading part of the image containing code
+ * and read-only data
+ *
+ * efi_remap_image() uses the EFI memory attribute protocol to remap the code
+ * region of the loaded image read-only/executable, and the remainder
+ * read-write/non-executable. The code region is assumed to start at the base
+ * of the image, and will therefore cover the PE/COFF header as well.
+ */
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size)
+{
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ efi_memory_attribute_protocol_t *memattr;
+ efi_status_t status;
+ u64 attr;
+
+ /*
+ * If the firmware implements the EFI_MEMORY_ATTRIBUTE_PROTOCOL, let's
+ * invoke it to remap the text/rodata region of the decompressed image
+ * as read-only and the data/bss region as non-executable.
+ */
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+ if (status != EFI_SUCCESS)
+ return;
+
+ // Get the current attributes for the entire region
+ status = memattr->get_memory_attributes(memattr, image_base,
+ alloc_size, &attr);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to retrieve memory attributes for image region: 0x%lx\n",
+ status);
+ return;
+ }
+
+ // Mark the code region as read-only
+ status = memattr->set_memory_attributes(memattr, image_base, code_size,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to remap code region read-only\n");
+ return;
+ }
+
+ // If the entire region was already mapped as non-exec, clear the
+ // attribute from the code region. Otherwise, set it on the data
+ // region.
+ if (attr & EFI_MEMORY_XP) {
+ status = memattr->clear_memory_attributes(memattr, image_base,
+ code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap code region executable\n");
+ } else {
+ status = memattr->set_memory_attributes(memattr,
+ image_base + code_size,
+ alloc_size - code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap data region non-executable\n");
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 2955c1ac6a36..f9c1e8a2bd1d 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -47,11 +47,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
-struct screen_info * __weak alloc_screen_info(void)
-{
- return &screen_info;
-}
-
void __weak free_screen_info(struct screen_info *si)
{
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5b8f2c411ed8..67d5a20802e0 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -442,6 +442,26 @@ union efi_dxe_services_table {
} mixed_mode;
};
+typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
+
+union efi_memory_attribute_protocol {
+ struct {
+ efi_status_t (__efiapi *get_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
+
+ efi_status_t (__efiapi *set_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+
+ efi_status_t (__efiapi *clear_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+ };
+ struct {
+ u32 get_memory_attributes;
+ u32 set_memory_attributes;
+ u32 clear_memory_attributes;
+ } mixed_mode;
+};
+
typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
union efi_uga_draw_protocol {
@@ -1042,11 +1062,11 @@ efi_enable_reset_attack_mitigation(void) { }
void efi_retrieve_tpm2_eventlog(void);
struct screen_info *alloc_screen_info(void);
+struct screen_info *__alloc_screen_info(void);
void free_screen_info(struct screen_info *si);
void efi_cache_sync_image(unsigned long image_base,
- unsigned long alloc_size,
- unsigned long code_size);
+ unsigned long alloc_size);
struct efi_smbios_record {
u8 type;
@@ -1054,6 +1074,8 @@ struct efi_smbios_record {
u16 handle;
};
+const struct efi_smbios_record *efi_get_smbios_record(u8 type);
+
struct efi_smbios_type1_record {
struct efi_smbios_record header;
@@ -1067,13 +1089,48 @@ struct efi_smbios_type1_record {
u8 family;
};
-#define efi_get_smbios_string(__type, __name) ({ \
- int size = sizeof(struct efi_smbios_type ## __type ## _record); \
+struct efi_smbios_type4_record {
+ struct efi_smbios_record header;
+
+ u8 socket;
+ u8 processor_type;
+ u8 processor_family;
+ u8 processor_manufacturer;
+ u8 processor_id[8];
+ u8 processor_version;
+ u8 voltage;
+ u16 external_clock;
+ u16 max_speed;
+ u16 current_speed;
+ u8 status;
+ u8 processor_upgrade;
+ u16 l1_cache_handle;
+ u16 l2_cache_handle;
+ u16 l3_cache_handle;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 core_count;
+ u8 enabled_core_count;
+ u8 thread_count;
+ u16 processor_characteristics;
+ u16 processor_family2;
+ u16 core_count2;
+ u16 enabled_core_count2;
+ u16 thread_count2;
+ u16 thread_enabled;
+};
+
+#define efi_get_smbios_string(__record, __type, __name) ({ \
int off = offsetof(struct efi_smbios_type ## __type ## _record, \
__name); \
- __efi_get_smbios_string(__type, off, size); \
+ __efi_get_smbios_string((__record), __type, off); \
})
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset);
+
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size);
#endif
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
index eee7ed43cdfb..72c71ae201f0 100644
--- a/drivers/firmware/efi/libstub/loongarch-stub.c
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -21,26 +21,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_loaded_image_t *image,
efi_handle_t image_handle)
{
- int nr_pages = round_up(kernel_asize, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- efi_physical_addr_t kernel_addr = EFI_KIMG_PREFERRED_ADDRESS;
efi_status_t status;
+ unsigned long kernel_addr = 0;
- /*
- * Allocate space for the kernel image at the preferred offset. This is
- * the only location in memory from where we can execute the image, so
- * no point in falling back to another allocation.
- */
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &kernel_addr);
- if (status != EFI_SUCCESS)
- return status;
-
- *image_addr = EFI_KIMG_PREFERRED_ADDRESS;
- *image_size = kernel_asize;
+ kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
+
+ status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
+ EFI_KIMG_PREFERRED_ADDRESS, efi_get_kimg_min_align(), 0x0);
- memcpy((void *)EFI_KIMG_PREFERRED_ADDRESS,
- (void *)&kernel_offset - kernel_offset,
- kernel_fsize);
+ *image_addr = kernel_addr;
+ *image_size = kernel_asize;
return status;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 1692d19ae80f..32c7a54923b4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -101,6 +101,7 @@ efi_status_t efi_random_alloc(unsigned long size,
* to calculate the randomly chosen address, and allocate it directly
* using EFI_ALLOCATE_ADDRESS.
*/
+ status = EFI_OUT_OF_RESOURCES;
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
efi_memory_desc_t *md = (void *)map->map + map_offset;
efi_physical_addr_t target;
diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
index 8e76a8b384ba..4be1c4d1f922 100644
--- a/drivers/firmware/efi/libstub/screen_info.c
+++ b/drivers/firmware/efi/libstub/screen_info.c
@@ -15,18 +15,11 @@
* early, but it only works if the EFI stub is part of the core kernel image
* itself. The zboot decompressor can only use the configuration table
* approach.
- *
- * In order to support both methods from the same build of the EFI stub
- * library, provide this dummy global definition of struct screen_info. If it
- * is required to satisfy a link dependency, it means we need to override the
- * __weak alloc and free methods with the ones below, and those will be pulled
- * in as well.
*/
-struct screen_info screen_info;
static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(void)
+struct screen_info *__alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
index 460418b7f5f5..c217de2cc8d5 100644
--- a/drivers/firmware/efi/libstub/smbios.c
+++ b/drivers/firmware/efi/libstub/smbios.c
@@ -22,21 +22,30 @@ struct efi_smbios_protocol {
u8 minor_version;
};
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+const struct efi_smbios_record *efi_get_smbios_record(u8 type)
{
struct efi_smbios_record *record;
efi_smbios_protocol_t *smbios;
efi_status_t status;
u16 handle = 0xfffe;
- const u8 *strtable;
status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
(void **)&smbios) ?:
efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
if (status != EFI_SUCCESS)
return NULL;
+ return record;
+}
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset)
+{
+ const u8 *strtable;
+
+ if (!record)
+ return NULL;
- strtable = (u8 *)record + recsize;
+ strtable = (u8 *)record + record->length;
for (int i = 1; i < ((u8 *)record)[offset]; i++) {
int len = strlen(strtable);
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index ec4525d40e0c..fb676ded47fa 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -17,7 +17,7 @@ __efistub_efi_zboot_header:
.long MZ_MAGIC
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
- .long __efistub__gzdata_size - 12 // payload size
+ .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
.long 0, 0 // reserved
.asciz COMP_TYPE // compression type
.org .Ldoshdr + 0x38
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short 0
+ .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -78,9 +78,36 @@ __efistub_efi_zboot_header:
.quad 0 // ExceptionTable
.quad 0 // CertificationTable
.quad 0 // BaseRelocationTable
-#ifdef CONFIG_DEBUG_EFI
+#if defined(PE_DLL_CHAR_EX) || defined(CONFIG_DEBUG_EFI)
.long .Lefi_debug_table - .Ldoshdr // DebugTable
.long .Lefi_debug_table_size
+
+ .section ".rodata", "a"
+ .p2align 2
+.Lefi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY[]
+#ifdef PE_DLL_CHAR_EX
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS // Type
+ .long 4 // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_dll_characteristics_ex - .Ldoshdr // FileOffset
+#endif
+#ifdef CONFIG_DEBUG_EFI
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long .Lefi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_debug_entry - .Ldoshdr // FileOffset
+#endif
+ .set .Lefi_debug_table_size, . - .Lefi_debug_table
+ .previous
#endif
.Lsection_table:
@@ -110,23 +137,11 @@ __efistub_efi_zboot_header:
.set .Lsection_count, (. - .Lsection_table) / 40
+#ifdef PE_DLL_CHAR_EX
+.Lefi_dll_characteristics_ex:
+ .long PE_DLL_CHAR_EX
+#endif
#ifdef CONFIG_DEBUG_EFI
- .section ".rodata", "a"
- .align 2
-.Lefi_debug_table:
- // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
- .long 0 // Characteristics
- .long 0 // TimeDateStamp
- .short 0 // MajorVersion
- .short 0 // MinorVersion
- .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
- .long .Lefi_debug_entry_size // SizeOfData
- .long 0 // RVA
- .long .Lefi_debug_entry - .Ldoshdr // FileOffset
-
- .set .Lefi_debug_table_size, . - .Lefi_debug_table
- .previous
-
.Lefi_debug_entry:
// EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
.ascii "NB10" // Signature
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index 66be5fdc6b58..e5d7fa1f1d8f 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -50,18 +50,22 @@ static unsigned long alloc_preferred_address(unsigned long alloc_size)
}
void __weak efi_cache_sync_image(unsigned long image_base,
- unsigned long alloc_size,
- unsigned long code_size)
+ unsigned long alloc_size)
{
// Provided by the arch to perform the cache maintenance necessary for
// executable code loaded into memory to be safe for execution.
}
+struct screen_info *alloc_screen_info(void)
+{
+ return __alloc_screen_info();
+}
+
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
unsigned long compressed_size = _gzdata_end - _gzdata_start;
- unsigned long image_base, alloc_size, code_size;
+ unsigned long image_base, alloc_size;
efi_loaded_image_t *image;
efi_status_t status;
char *cmdline_ptr;
@@ -89,10 +93,6 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4),
EFI_ALLOC_ALIGN);
- // SizeOfHeaders and SizeOfCode from the compressee's PE/COFF header
- code_size = get_unaligned_le32(_gzdata_end - 8) +
- get_unaligned_le32(_gzdata_end - 12);
-
// If the architecture has a preferred address for the image,
// try that first.
image_base = alloc_preferred_address(alloc_size);
@@ -135,7 +135,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
goto free_image;
}
- efi_cache_sync_image(image_base, alloc_size, code_size);
+ efi_cache_sync_image(image_base, alloc_size);
status = efi_stub_common(handle, image, image_base, cmdline_ptr);
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 93d33f68333b..ac8c0ef85158 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -2,6 +2,8 @@
ENTRY(__efistub_efi_zboot_header);
+PROVIDE(zboot_code_size = ABSOLUTE(0));
+
SECTIONS
{
.head : ALIGN(4096) {
@@ -17,6 +19,11 @@ SECTIONS
*(.gzdata)
__efistub__gzdata_end = .;
*(.rodata* .init.rodata* .srodata*)
+
+ . = ALIGN(4);
+ __efistub_code_size = .;
+ LONG(zboot_code_size);
+
_etext = ALIGN(4096);
. = _etext;
}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 0a9aba5f9cef..ab85bf8e165a 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
@@ -129,6 +129,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn)
{
efi_memory_attributes_table_t *tbl;
+ bool has_bti = false;
int i, ret;
if (tbl_size <= sizeof(*tbl))
@@ -150,6 +151,10 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
return -ENOMEM;
}
+ if (tbl->version > 1 &&
+ (tbl->flags & EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD))
+ has_bti = true;
+
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI Memory Attributes table:\n");
@@ -169,7 +174,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_md_typeattr_format(buf, sizeof(buf), &md));
if (valid) {
- ret = fn(mm, &md);
+ ret = fn(mm, &md, has_bti);
if (ret)
pr_err("Error updating mappings, skipping subsequent md's\n");
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 7feee3d9c2bf..a400c4312c82 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
\
if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
pr_warn_once("EFI Runtime Services are disabled!\n"); \
+ efi_rts_work.status = EFI_DEVICE_ERROR; \
goto exit; \
} \
\
@@ -157,7 +158,7 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
* none of the remaining functions are actually ever called at runtime.
* So let's just use a single lock to serialize all Runtime Services calls.
*/
-static DEFINE_SEMAPHORE(efi_runtime_lock);
+static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
/*
* Expose the EFI runtime lock to the UV platform
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 7882d4b3f2be..456d0e5eaf78 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -264,6 +264,22 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
+ {
+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "IdeaPad Duet 3 10IGL5"),
+ },
+ },
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ },
+ },
{},
};
@@ -333,7 +349,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
+__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
@@ -347,7 +363,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
+}
+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ba9f18312f5..bfc5fa6aa47b 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -6,6 +6,8 @@
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*/
+#define pr_fmt(fmt) "efivars: " fmt
+
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/errno.h>
@@ -19,7 +21,7 @@
/* Private pointer to registered efivars */
static struct efivars *__efivars;
-static DEFINE_SEMAPHORE(efivars_lock);
+static DEFINE_SEMAPHORE(efivars_lock, 1);
static efi_status_t check_var_size(bool nonblocking, u32 attributes,
unsigned long size)
@@ -40,45 +42,47 @@ static efi_status_t check_var_size(bool nonblocking, u32 attributes,
}
/**
- * efivars_kobject - get the kobject for the registered efivars
+ * efivar_is_available - check if efivars is available
*
- * If efivars_register() has not been called we return NULL,
- * otherwise return the kobject used at registration time.
+ * @return true iff evivars is currently registered
*/
-struct kobject *efivars_kobject(void)
+bool efivar_is_available(void)
{
- if (!__efivars)
- return NULL;
-
- return __efivars->kobject;
+ return __efivars != NULL;
}
-EXPORT_SYMBOL_GPL(efivars_kobject);
+EXPORT_SYMBOL_GPL(efivar_is_available);
/**
* efivars_register - register an efivars
* @efivars: efivars to register
* @ops: efivars operations
- * @kobject: @efivars-specific kobject
*
* Only a single efivars can be registered at any time.
*/
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject)
+ const struct efivar_operations *ops)
{
+ int rv;
+
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (__efivars) {
+ pr_warn("efivars already registered\n");
+ rv = -EBUSY;
+ goto out;
+ }
+
efivars->ops = ops;
- efivars->kobject = kobject;
__efivars = efivars;
pr_info("Registered efivars operations\n");
-
+ rv = 0;
+out:
up(&efivars_lock);
- return 0;
+ return rv;
}
EXPORT_SYMBOL_GPL(efivars_register);
@@ -97,7 +101,7 @@ int efivars_unregister(struct efivars *efivars)
return -EINTR;
if (!__efivars) {
- printk(KERN_ERR "efivars not registered\n");
+ pr_err("efivars not registered\n");
rv = -EINVAL;
goto out;
}
@@ -117,7 +121,7 @@ out:
}
EXPORT_SYMBOL_GPL(efivars_unregister);
-int efivar_supports_writes(void)
+bool efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 9f190eab43ed..1bc7cbf2f65d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -44,14 +44,6 @@ config GOOGLE_COREBOOT_TABLE
device tree node /firmware/coreboot.
If unsure say N.
-config GOOGLE_COREBOOT_TABLE_ACPI
- tristate
- select GOOGLE_COREBOOT_TABLE
-
-config GOOGLE_COREBOOT_TABLE_OF
- tristate
- select GOOGLE_COREBOOT_TABLE
-
config GOOGLE_MEMCONSOLE
tristate
depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 2652c396c423..33ae94745aef 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -93,14 +93,19 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
for (i = 0; i < header->table_entries; i++) {
entry = ptr_entry;
- device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+ if (entry->size < sizeof(*entry)) {
+ dev_warn(dev, "coreboot table entry too small!\n");
+ return -EINVAL;
+ }
+
+ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
if (!device)
return -ENOMEM;
device->dev.parent = dev;
device->dev.bus = &coreboot_bus_type;
device->dev.release = coreboot_device_release;
- memcpy(&device->entry, ptr_entry, entry->size);
+ memcpy(device->raw, ptr_entry, entry->size);
switch (device->entry.tag) {
case LB_TAG_CBMEM_ENTRY:
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 37f4d335a606..d814dca33a08 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -79,6 +79,7 @@ struct coreboot_device {
struct lb_cbmem_ref cbmem_ref;
struct lb_cbmem_entry cbmem_entry;
struct lb_framebuffer framebuffer;
+ DECLARE_FLEX_ARRAY(u8, raw);
};
};
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index c6dcc1ef93ac..c323a818805c 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
- fb->blue_mask_size == formats[i].blue.length &&
- fb->reserved_mask_pos == formats[i].transp.offset &&
- fb->reserved_mask_size == formats[i].transp.length)
+ fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 4e2575dfeb90..96ea1fa76d35 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
- *attr = EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS;
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
@@ -1029,7 +1030,7 @@ static __init int gsmi_init(void)
}
#ifdef CONFIG_EFI
- ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
+ ret = efivars_register(&efivars, &efivar_ops);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index dca79caccd01..47db49911e7b 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -310,9 +310,8 @@ static int imx_scu_probe(struct platform_device *pdev)
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %s ret %d\n",
- chan_name, ret);
+ dev_err_probe(dev, ret, "Failed to request mbox chan %s\n",
+ chan_name);
kfree(chan_name);
return ret;
}
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 2a4f07423365..84b673427073 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -180,7 +180,11 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LVDS SS */
{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+ { "lvds0-pwm", IMX_SC_R_LVDS_0_PWM_0, 1, false, 0 },
+ { "lvds0-lpi2c", IMX_SC_R_LVDS_0_I2C_0, 2, true, 0 },
{ "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
+ { "lvds1-pwm", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 },
+ { "lvds1-lpi2c", IMX_SC_R_LVDS_1_I2C_0, 2, true, 0 },
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 77aa5c6398aa..798bcdb05d84 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -82,7 +82,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
if (!sm_phy_base)
- return 0;
+ return NULL;
return ioremap_cache(sm_phy_base, size);
}
@@ -311,11 +311,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fw);
- pr_info("secure-monitor enabled\n");
+ if (devm_of_platform_populate(dev))
+ goto out_in_base;
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
goto out_in_base;
+ pr_info("secure-monitor enabled\n");
+
return 0;
out_in_base:
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index e7bcfca4159f..d9629ff87861 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask);
}
-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
return res.a0;
}
-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
return res.a0;
}
-static int psci_to_linux_errno(int errno)
+static __always_inline int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
@@ -165,11 +167,13 @@ int psci_set_osi_mode(bool enable)
err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
if (err < 0)
- pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
+ pr_info(FW_BUG "failed to set %s mode: %d\n",
+ enable ? "OSI" : "PC", err);
return psci_to_linux_errno(err);
}
-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+static __always_inline int
+__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
@@ -177,13 +181,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
return psci_to_linux_errno(err);
}
-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
state, entry_point);
}
-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
state, entry_point);
@@ -440,6 +446,9 @@ static const struct file_operations psci_debugfs_ops = {
static int __init psci_debugfs_init(void)
{
+ if (!invoke_psci_fn || !psci_ops.get_version)
+ return 0;
+
return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
&psci_debugfs_ops));
}
@@ -447,10 +456,12 @@ late_initcall(psci_debugfs_init)
#endif
#ifdef CONFIG_CPU_IDLE
-static int psci_suspend_finisher(unsigned long state)
+static noinstr int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ phys_addr_t pa_cpu_resume;
+
+ pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -462,11 +473,22 @@ int psci_cpu_suspend_enter(u32 state)
if (!psci_power_state_loses_context(state)) {
struct arm_cpuidle_irq_context context;
+ ct_cpuidle_enter();
arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
arm_cpuidle_restore_irq_context(&context);
+ ct_cpuidle_exit();
} else {
+ /*
+ * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_enter();
+
ret = cpu_suspend(state, psci_suspend_finisher);
+
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_exit();
}
return ret;
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 9f918b9e6f8f..029e6d117cb8 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
index d111833364ba..16cf88acfa8e 100644
--- a/drivers/firmware/qcom_scm-smc.c
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -8,7 +8,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
@@ -52,29 +52,97 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
- int retry_count = 0;
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_res get_wq_res;
+ struct arm_smccc_args get_wq_ctx = {0};
+
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
+ struct arm_smccc_res *res)
+{
+ int ret;
+ u32 wq_ctx, smc_call_ctx;
+ struct arm_smccc_args resume;
+ struct arm_smccc_args *smc = waitq;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+
+ ret = qcom_scm_wait_for_wq_completion(wq_ctx);
+ if (ret)
+ return ret;
+
+ fill_wq_resume_args(&resume, smc_call_ctx);
+ smc = &resume;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
- return;
+ return 0;
}
do {
mutex_lock(&qcom_scm_lock);
- __scm_smc_do_quirk(smc, res);
+ ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ return ret;
+
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
}
@@ -83,7 +151,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int i;
+ int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
@@ -135,13 +203,17 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
- __scm_smc_do(&smc, &smc_res, atomic);
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
+ if (ret)
+ return ret;
+
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index cdbfe54c8146..fde33acd46b7 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -4,15 +4,18 @@
*/
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
@@ -33,6 +36,7 @@ struct qcom_scm {
struct clk *iface_clk;
struct clk *bus_clk;
struct icc_path *path;
+ struct completion waitq_comp;
struct reset_controller_dev reset;
/* control access to the interconnect path */
@@ -63,6 +67,9 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -898,7 +905,7 @@ static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
* Return negative errno on failure or 0 on success with @srcvm updated.
*/
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *srcvm,
+ u64 *srcvm,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt)
{
@@ -915,9 +922,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
__le32 *src;
void *ptr;
int ret, i, b;
- unsigned long srcvm_bits = *srcvm;
+ u64 srcvm_bits = *srcvm;
- src_sz = hweight_long(srcvm_bits) * sizeof(*src);
+ src_sz = hweight64(srcvm_bits) * sizeof(*src);
mem_to_map_sz = sizeof(*mem_to_map);
dest_sz = dest_cnt * sizeof(*destvm);
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
@@ -930,8 +937,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
/* Fill source vmid detail */
src = ptr;
i = 0;
- for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
- src[i++] = cpu_to_le32(b);
+ for (b = 0; b < BITS_PER_TYPE(u64); b++) {
+ if (srcvm_bits & BIT(b))
+ src[i++] = cpu_to_le32(b);
+ }
/* Fill details of mem buff to map */
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
@@ -1325,11 +1334,79 @@ bool qcom_scm_is_available(void)
}
EXPORT_SYMBOL(qcom_scm_is_available);
+static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
+{
+ /* FW currently only supports a single wq_ctx (zero).
+ * TODO: Update this logic to include dynamic allocation and lookup of
+ * completion structs when FW supports more wq_ctx values.
+ */
+ if (wq_ctx != 0) {
+ dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ complete(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+{
+ int ret;
+ struct qcom_scm *scm = data;
+ u32 wq_ctx, flags, more_pending = 0;
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
+ goto out;
+ }
+
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
+ flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
+ dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ goto out;
+ }
+
+ ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ if (ret)
+ goto out;
+ } while (more_pending);
+
+out:
+ return IRQ_HANDLED;
+}
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
unsigned long clks;
- int ret;
+ int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
@@ -1402,6 +1479,19 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
+ init_completion(&__scm->waitq_comp);
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0)
+ return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ }
+
__get_convention();
/*
@@ -1418,8 +1508,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
- if (download_mode)
- qcom_scm_set_download_mode(false);
+ qcom_scm_set_download_mode(false);
}
static const struct of_device_id qcom_scm_dt_match[] = {
@@ -1454,6 +1543,7 @@ static const struct of_device_id qcom_scm_dt_match[] = {
},
{ .compatible = "qcom,scm-msm8994" },
{ .compatible = "qcom,scm-msm8996" },
+ { .compatible = "qcom,scm-sm6375", .data = (void *)SCM_HAS_CORE_CLK },
{ .compatible = "qcom,scm" },
{}
};
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index db3d08a01209..e6e512bd57d1 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -60,6 +60,9 @@ struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx);
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
@@ -129,6 +132,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
@@ -137,6 +144,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
static inline int qcom_scm_remap_error(int err)
{
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 60ccf3e90d7d..db818f9dcb8e 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -17,9 +17,13 @@ static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
u64 __ro_after_init smccc_has_sve_hint = false;
+s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
+s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
{
+ struct arm_smccc_res res;
+
smccc_version = version;
smccc_conduit = conduit;
@@ -27,6 +31,18 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
smccc_version >= ARM_SMCCC_VERSION_1_3)
smccc_has_sve_hint = true;
+
+ if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
+ (smccc_conduit != SMCCC_CONDUIT_NONE)) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_SOC_ID, &res);
+ if ((s32)res.a0 >= 0) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+ smccc_soc_id_version = (s32)res.a0;
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+ smccc_soc_id_revision = (s32)res.a0;
+ }
+ }
}
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
@@ -44,6 +60,16 @@ u32 arm_smccc_get_version(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+s32 arm_smccc_get_soc_id_version(void)
+{
+ return smccc_soc_id_version;
+}
+
+s32 arm_smccc_get_soc_id_revision(void)
+{
+ return smccc_soc_id_revision;
+}
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index dd7c3d5e8b0b..890eb454599a 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -42,41 +42,23 @@ static int __init smccc_soc_init(void)
if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
return 0;
- if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) {
- pr_err("%s: invalid SMCCC conduit\n", __func__);
- return -EOPNOTSUPP;
- }
-
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_SOC_ID, &res);
-
- if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ soc_id_version = arm_smccc_get_soc_id_version();
+ if (soc_id_version == SMCCC_RET_NOT_SUPPORTED) {
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
return 0;
}
- if ((int)res.a0 < 0) {
- pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
- res.a0);
- return -EINVAL;
- }
-
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
- if ((int)res.a0 < 0) {
+ if (soc_id_version < 0) {
pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
return -EINVAL;
}
- soc_id_version = res.a0;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
- if ((int)res.a0 < 0) {
+ soc_id_rev = arm_smccc_get_soc_id_revision();
+ if (soc_id_rev < 0) {
pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
return -EINVAL;
}
- soc_id_rev = res.a0;
-
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b4081f4d88a3..80f4e2d14e04 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1133,18 +1133,22 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return ret;
genpool = svc_create_memory_pool(pdev, sh_memory);
- if (!genpool)
- return -ENOMEM;
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
- if (!controller)
- return -ENOMEM;
+ if (!controller) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
- if (!chans)
- return -ENOMEM;
+ if (!chans) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
controller->dev = dev;
controller->num_chans = SVC_NUM_CHANNEL;
@@ -1159,7 +1163,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
- return ret;
+ goto err_destroy_pool;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -1198,19 +1202,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = platform_device_add(svc->stratix10_svc_rsu);
if (ret) {
platform_device_put(svc->stratix10_svc_rsu);
- return ret;
+ goto err_free_kfifo;
}
svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unregister_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- return ret;
+ goto err_unregister_dev;
}
dev_set_drvdata(dev, svc);
@@ -1219,8 +1224,12 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return 0;
+err_unregister_dev:
+ platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
+err_destroy_pool:
+ gen_pool_destroy(genpool);
return ret;
}
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 3fd3563d962b..3c197db42c9d 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
if (disabled)
goto unlock_mutex;
+ sysfb_apply_efi_quirks();
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
@@ -107,7 +109,7 @@ static __init int sysfb_init(void)
goto unlock_mutex;
}
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index a353e27f83f5..74363ed7501f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -27,25 +27,58 @@ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
- const struct simplefb_format *f;
__u8 type;
+ u32 bits_per_pixel;
unsigned int i;
type = si->orig_video_isVGA;
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
+ /*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It's not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest. In the loop below,
+ * ignore simplefb formats with alpha bits, as EFI and VESA
+ * don't specify alpha channels.
+ */
+ if (si->lfb_depth > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
+ } else {
+ bits_per_pixel = si->lfb_depth;
+ }
+
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- f = &formats[i];
- if (si->lfb_depth == f->bits_per_pixel &&
+ const struct simplefb_format *f = &formats[i];
+
+ if (f->transp.length)
+ continue; /* transparent formats are unsupported by VESA/EFI */
+
+ if (bits_per_pixel == f->bits_per_pixel &&
si->red_size == f->red.length &&
si->red_pos == f->red.offset &&
si->green_size == f->green.length &&
si->green_pos == f->green.offset &&
si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset &&
- si->rsvd_size == f->transp.length &&
- si->rsvd_pos == f->transp.offset) {
+ si->blue_pos == f->blue.offset) {
mode->format = f->name;
mode->width = si->lfb_width;
mode->height = si->lfb_height;
@@ -110,7 +143,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
if (!pd)
return ERR_PTR(-ENOMEM);
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
if (ret)
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 3ca2b5d9e66f..6dfe3d34109e 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -193,7 +193,7 @@ static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
},
};
u32 fd = 0, len = 0;
- int remaining, err;
+ int remaining, err, close_err;
mutex_lock(&bpmp_debug_lock);
err = mrq_debug_open(bpmp, name, &fd, &len, 0);
@@ -231,7 +231,9 @@ static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
*nbytes = len;
close:
- err = mrq_debug_close(bpmp, fd);
+ close_err = mrq_debug_close(bpmp, fd);
+ if (!err)
+ err = close_err;
out:
mutex_unlock(&bpmp_debug_lock);
return err;
@@ -319,7 +321,7 @@ static int bpmp_debug_show(struct seq_file *m, void *p)
},
};
u32 fd = 0, len = 0;
- int remaining, err;
+ int remaining, err, close_err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
@@ -353,7 +355,9 @@ static int bpmp_debug_show(struct seq_file *m, void *p)
}
close:
- err = mrq_debug_close(bpmp, fd);
+ close_err = mrq_debug_close(bpmp, fd);
+ if (!err)
+ err = close_err;
out:
mutex_unlock(&bpmp_debug_lock);
return err;
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 042c2043929d..8b5e5daa9fae 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -764,19 +764,19 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
- if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+ if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
err = tegra_bpmp_init_clocks(bpmp);
if (err < 0)
goto free_mrq;
}
- if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
err = tegra_bpmp_init_resets(bpmp);
if (err < 0)
goto free_mrq;
}
- if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+ if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
err = tegra_bpmp_init_powergates(bpmp);
if (err < 0)
goto free_mrq;
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 6ea5789a89e2..2de0fb139ce1 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -104,7 +104,7 @@ static void mox_kobj_release(struct kobject *kobj)
kfree(to_rwtm(kobj)->kobj);
}
-static struct kobj_type mox_kobj_ktype = {
+static const struct kobj_type mox_kobj_ktype = {
.release = mox_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 129f68d7a6f5..a736db4a5825 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
}
/* Add new entry if not present */
- feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
if (!feature_data)
return -ENOMEM;
@@ -738,8 +738,31 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
- type, value, NULL);
+ u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL;
+ u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
+
+ if (value) {
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+ }
+
+ /*
+ * Work around completely misdesigned firmware API on Xilinx ZynqMP.
+ * The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only
+ * ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA
+ * bits, but there is no matching call to clear those bits. If those
+ * bits are not cleared, SDMMC tuning may fail.
+ *
+ * Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to
+ * allow complete unrestricted access to all address space, including
+ * IOU_SLCR SD_ITAPDLY Register and all the other registers, access
+ * to which was supposed to be protected by the current firmware API.
+ *
+ * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
+ * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
+ */
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
@@ -949,6 +972,39 @@ int zynqmp_pm_fpga_get_status(u32 *value)
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
+ * zynqmp_pm_fpga_get_config_status - Get the FPGA configuration status.
+ * @value: Buffer to store FPGA configuration status.
+ *
+ * This function provides access to the pmufw to get the FPGA configuration
+ * status
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 buf, lower_addr, upper_addr;
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ lower_addr = lower_32_bits((u64)&buf);
+ upper_addr = upper_32_bits((u64)&buf);
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET,
+ lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG,
+ ret_payload);
+
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status);
+
+/**
* zynqmp_pm_pinctrl_request - Request Pin from firmware
* @pin: Pin number to request
*
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 6ce143dafd04..0a00763b9f28 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -246,7 +246,7 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
- depends on MFD_INTEL_M10_BMC
+ depends on MFD_INTEL_M10_BMC_CORE
select FW_LOADER
select FW_UPLOAD
help
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 0804b7a0c298..2e7b41629406 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
+ * @pdata: afu platform device's pdata.
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index e5020e2b1f3d..674e9772f0ea 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -41,7 +41,7 @@ struct dfl_afu_mmio_region {
};
/**
- * struct fpga_afu_dma_region - afu DMA region data structure
+ * struct dfl_afu_dma_region - afu DMA region data structure
*
* @user_addr: region userspace virtual address.
* @length: region length.
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 587c82be12f7..7422d2bc6f37 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -141,7 +141,7 @@
* @fab_port_id: used to indicate current working mode of fabric counters.
* @fab_lock: lock to protect fabric counters working mode.
* @cpu: active CPU to which the PMU is bound for accesses.
- * @cpuhp_node: node for CPU hotplug notifier link.
+ * @node: node for CPU hotplug notifier link.
* @cpuhp_state: state for CPU hotplug notification;
*/
struct fme_perf_priv {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index d61ce9a18879..cdcf6dea4cc9 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -164,7 +164,7 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
- *
+ * @feature: sub feature info
* @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
@@ -273,7 +273,7 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
}
/**
- * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
+ * dfl_fme_destroy_bridges - destroy all fpga bridge platform device
* @pdata: fme platform device's pdata
*/
static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
index 096a699089d3..761f80f63312 100644
--- a/drivers/fpga/dfl-fme-pr.h
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -58,7 +58,7 @@ struct dfl_fme_bridge {
};
/**
- * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ * struct dfl_fme_br_pdata - platform data for FME bridge platform device.
*
* @cdev: container device.
* @port_id: port id.
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 0914e7328b1a..1bc04378118c 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/errno.h>
-#include <linux/aer.h>
#include "dfl.h"
@@ -376,10 +375,6 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
return ret;
}
- ret = pci_enable_pcie_error_reporting(pcidev);
- if (ret && ret != -EINVAL)
- dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
-
pci_set_master(pcidev);
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
@@ -387,24 +382,22 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "No suitable DMA support available.\n");
- goto disable_error_report_exit;
+ return ret;
}
ret = cci_init_drvdata(pcidev);
if (ret) {
dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
- goto disable_error_report_exit;
+ return ret;
}
ret = cci_enumerate_feature_devs(pcidev);
- if (!ret)
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
return ret;
+ }
- dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
-
-disable_error_report_exit:
- pci_disable_pcie_error_reporting(pcidev);
- return ret;
+ return 0;
}
static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
@@ -448,7 +441,6 @@ static void cci_pci_remove(struct pci_dev *pcidev)
cci_pci_sriov_configure(pcidev, 0);
cci_remove_feature_devs(pcidev);
- pci_disable_pcie_error_reporting(pcidev);
}
static struct pci_driver cci_pci_driver = {
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b9aae85ba930..dd7a783d53b5 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -13,6 +13,7 @@
#include <linux/dfl.h>
#include <linux/fpga-dfl.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/uaccess.h>
#include "dfl.h"
@@ -45,7 +46,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
};
/**
- * dfl_dev_info - dfl feature device information.
+ * struct dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
* @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
* @id: idr id of the feature dev.
@@ -67,7 +68,7 @@ static struct dfl_dev_info dfl_devs[] = {
};
/**
- * dfl_chardev_info - chardev information of dfl feature device
+ * struct dfl_chardev_info - chardev information of dfl feature device
* @name: nmae string of the char device.
* @devt: devt of the char device.
*/
@@ -293,9 +294,9 @@ static void dfl_bus_remove(struct device *dev)
ddrv->remove(ddev);
}
-static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct dfl_device *ddev = to_dfl_dev(dev);
+ const struct dfl_device *ddev = to_dfl_dev(dev);
return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
ddev->type, ddev->feature_id);
@@ -342,6 +343,8 @@ static void release_dfl_dev(struct device *dev)
if (ddev->mmio_res.parent)
release_resource(&ddev->mmio_res);
+ kfree(ddev->params);
+
ida_free(&dfl_device_ida, ddev->id);
kfree(ddev->irqs);
kfree(ddev);
@@ -380,7 +383,16 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
ddev->type = feature_dev_id_type(pdev);
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
+ ddev->dfh_version = feature->dfh_version;
ddev->cdev = pdata->dfl_cdev;
+ if (feature->param_size) {
+ ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
+ if (!ddev->params) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ddev->param_size = feature->param_size;
+ }
/* add mmio resource */
parent_res = &pdev->resource[feature->resource_index];
@@ -708,20 +720,27 @@ struct build_feature_devs_info {
* struct dfl_feature_info - sub feature info collected during feature dev build
*
* @fid: id of this sub feature.
+ * @revision: revision of this sub feature
+ * @dfh_version: version of Device Feature Header (DFH)
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
* @irq_base: start of irq index in this sub feature.
* @nr_irqs: number of irqs of this sub feature.
+ * @param_size: size DFH parameters.
+ * @params: DFH parameter data.
*/
struct dfl_feature_info {
u16 fid;
u8 revision;
+ u8 dfh_version;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
unsigned int irq_base;
unsigned int nr_irqs;
+ unsigned int param_size;
+ u64 params[];
};
static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
@@ -797,7 +816,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
+ feature->dfh_version = finfo->dfh_version;
+
+ if (finfo->param_size) {
+ feature->params = devm_kmemdup(binfo->dev,
+ finfo->params, finfo->param_size,
+ GFP_KERNEL);
+ if (!feature->params)
+ return -ENOMEM;
+ feature->param_size = finfo->param_size;
+ }
/*
* the FIU header feature has some fundamental functions (sriov
* set, port enable/disable) needed for the dfl bus device and
@@ -934,56 +963,115 @@ static u16 feature_id(u64 value)
return 0;
}
+static u64 *find_param(u64 *params, resource_size_t max, int param_id)
+{
+ u64 *end = params + max / sizeof(u64);
+ u64 v, next;
+
+ while (params < end) {
+ v = *params;
+ if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v))
+ return params;
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ break;
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ params += next;
+ }
+
+ return NULL;
+}
+
+/**
+ * dfh_find_param() - find parameter block for the given parameter id
+ * @dfl_dev: dfl device
+ * @param_id: id of dfl parameter
+ * @psize: destination to store size of parameter data in bytes
+ *
+ * Return: pointer to start of parameter data, PTR_ERR otherwise.
+ */
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize)
+{
+ u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id);
+
+ if (!phdr)
+ return ERR_PTR(-ENOENT);
+
+ if (psize)
+ *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64);
+
+ return phdr + 1;
+}
+EXPORT_SYMBOL_GPL(dfh_find_param);
+
static int parse_feature_irqs(struct build_feature_devs_info *binfo,
- resource_size_t ofst, u16 fid,
- unsigned int *irq_base, unsigned int *nr_irqs)
+ resource_size_t ofst, struct dfl_feature_info *finfo)
{
void __iomem *base = binfo->ioaddr + ofst;
unsigned int i, ibase, inr = 0;
+ void *params = finfo->params;
enum dfl_id_type type;
+ u16 fid = finfo->fid;
int virq;
+ u64 *p;
u64 v;
- type = feature_dev_id_type(binfo->feature_dev);
+ switch (finfo->dfh_version) {
+ case 0:
+ /*
+ * DFHv0 only provides MMIO resource information for each feature
+ * in the DFL header. There is no generic interrupt information.
+ * Instead, features with interrupt functionality provide
+ * the information in feature specific registers.
+ */
+ type = feature_dev_id_type(binfo->feature_dev);
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ switch (fid) {
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ }
+ break;
- /*
- * Ideally DFL framework should only read info from DFL header, but
- * current version DFL only provides mmio resources information for
- * each feature in DFL Header, no field for interrupt resources.
- * Interrupt resource information is provided by specific mmio
- * registers of each private feature which supports interrupt. So in
- * order to parse and assign irq resources, DFL framework has to look
- * into specific capability registers of these private features.
- *
- * Once future DFL version supports generic interrupt resource
- * information in common DFL headers, the generic interrupt parsing
- * code will be added. But in order to be compatible to old version
- * DFL, the driver may still fall back to these quirks.
- */
- if (type == PORT_ID) {
- switch (fid) {
- case PORT_FEATURE_ID_UINT:
- v = readq(base + PORT_UINT_CAP);
- ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
- inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
- break;
- case PORT_FEATURE_ID_ERROR:
- v = readq(base + PORT_ERROR_CAP);
- ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ case 1:
+ /*
+ * DFHv1 provides interrupt resource information in DFHv1
+ * parameter blocks.
+ */
+ p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X);
+ if (!p)
break;
- }
- } else if (type == FME_ID) {
- if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
- v = readq(base + FME_ERROR_CAP);
- ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
- }
+
+ p++;
+ ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p);
+ inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p);
+ break;
+
+ default:
+ dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version);
+ break;
}
if (!inr) {
- *irq_base = 0;
- *nr_irqs = 0;
+ finfo->irq_base = 0;
+ finfo->nr_irqs = 0;
return 0;
}
@@ -1006,12 +1094,37 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
}
}
- *irq_base = ibase;
- *nr_irqs = inr;
+ finfo->irq_base = ibase;
+ finfo->nr_irqs = inr;
return 0;
}
+static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max)
+{
+ int size = 0;
+ u64 v, next;
+
+ if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS,
+ readq(dfh_base + DFHv1_CSR_SIZE_GRP)))
+ return 0;
+
+ while (size + DFHv1_PARAM_HDR < max) {
+ v = readq(dfh_base + DFHv1_PARAM_HDR + size);
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ if (!next)
+ return -EINVAL;
+
+ size += next * sizeof(u64);
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ return size;
+ }
+
+ return -ENOENT;
+}
+
/*
* when create sub feature instances, for private features, it doesn't need
* to provide resource size and feature id as they could be read from DFH
@@ -1023,39 +1136,69 @@ static int
create_feature_instance(struct build_feature_devs_info *binfo,
resource_size_t ofst, resource_size_t size, u16 fid)
{
- unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ resource_size_t start, end;
+ int dfh_psize = 0;
u8 revision = 0;
+ u64 v, addr_off;
+ u8 dfh_ver = 0;
int ret;
- u64 v;
if (fid != FEATURE_ID_AFU) {
v = readq(binfo->ioaddr + ofst);
revision = FIELD_GET(DFH_REVISION, v);
-
+ dfh_ver = FIELD_GET(DFH_VERSION, v);
/* read feature size and id if inputs are invalid */
size = size ? size : feature_size(v);
fid = fid ? fid : feature_id(v);
+ if (dfh_ver == 1) {
+ dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size);
+ if (dfh_psize < 0) {
+ dev_err(binfo->dev,
+ "failed to read size of DFHv1 parameters %d\n",
+ dfh_psize);
+ return dfh_psize;
+ }
+ dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize);
+ }
}
if (binfo->len - ofst < size)
return -EINVAL;
- ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
- if (ret)
- return ret;
-
- finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL);
if (!finfo)
return -ENOMEM;
+ memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize);
+ finfo->param_size = dfh_psize;
+
finfo->fid = fid;
finfo->revision = revision;
- finfo->mmio_res.start = binfo->start + ofst;
- finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->dfh_version = dfh_ver;
+ if (dfh_ver == 1) {
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR);
+ addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v);
+ if (FIELD_GET(DFHv1_CSR_ADDR_REL, v))
+ start = addr_off << 1;
+ else
+ start = binfo->start + ofst + addr_off;
+
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP);
+ end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1;
+ } else {
+ start = binfo->start + ofst;
+ end = start + size - 1;
+ }
finfo->mmio_res.flags = IORESOURCE_MEM;
- finfo->irq_base = irq_base;
- finfo->nr_irqs = nr_irqs;
+ finfo->mmio_res.start = start;
+ finfo->mmio_res.end = end;
+
+ ret = parse_feature_irqs(binfo, ofst, finfo);
+ if (ret) {
+ kfree(finfo);
+ return ret;
+ }
list_add_tail(&finfo->node, &binfo->sub_features);
binfo->feature_num++;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 06cfcd5e84bb..1d724a28f00a 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -74,11 +74,47 @@
#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_VERSION GENMASK_ULL(59, 52) /* DFH version */
#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
#define DFH_TYPE_AFU 1
#define DFH_TYPE_PRIVATE 3
#define DFH_TYPE_FIU 4
+/*
+ * DFHv1 Register Offset definitons
+ * In DHFv1, DFH + GUID + CSR_START + CSR_SIZE_GROUP + PARAM_HDR + PARAM_DATA
+ * as common header registers
+ */
+#define DFHv1_CSR_ADDR 0x18 /* CSR Register start address */
+#define DFHv1_CSR_SIZE_GRP 0x20 /* Size of Reg Block and Group/tag */
+#define DFHv1_PARAM_HDR 0x28 /* Optional First Param header */
+
+/*
+ * CSR Rel Bit, 1'b0 = relative (offset from feature DFH start),
+ * 1'b1 = absolute (ARM or other non-PCIe use)
+ */
+#define DFHv1_CSR_ADDR_REL BIT_ULL(0)
+
+/* CSR Header Register Bit Definitions */
+#define DFHv1_CSR_ADDR_MASK GENMASK_ULL(63, 1) /* 63:1 of CSR address */
+
+/* CSR SIZE Goup Register Bit Definitions */
+#define DFHv1_CSR_SIZE_GRP_INSTANCE_ID GENMASK_ULL(15, 0) /* Enumeration instantiated IP */
+#define DFHv1_CSR_SIZE_GRP_GROUPING_ID GENMASK_ULL(30, 16) /* Group Features/interfaces */
+#define DFHv1_CSR_SIZE_GRP_HAS_PARAMS BIT_ULL(31) /* Presence of Parameters */
+#define DFHv1_CSR_SIZE_GRP_SIZE GENMASK_ULL(63, 32) /* Size of CSR Block in bytes */
+
+/* PARAM Header Register Bit Definitions */
+#define DFHv1_PARAM_HDR_ID GENMASK_ULL(15, 0) /* Id of this Param */
+#define DFHv1_PARAM_HDR_VER GENMASK_ULL(31, 16) /* Version Param */
+#define DFHv1_PARAM_HDR_NEXT_OFFSET GENMASK_ULL(63, 35) /* Offset of next Param */
+#define DFHv1_PARAM_HDR_NEXT_EOP BIT_ULL(32)
+#define DFHv1_PARAM_DATA 0x08 /* Offset of Param data from Param header */
+
+#define DFHv1_PARAM_ID_MSI_X 0x1
+#define DFHv1_PARAM_MSI_X_NUMV GENMASK_ULL(63, 32)
+#define DFHv1_PARAM_MSI_X_STARTV GENMASK_ULL(31, 0)
+
/* Next AFU Register Bitfield */
#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
@@ -231,6 +267,7 @@ struct dfl_feature_irq_ctx {
*
* @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
+ * @revision: revision of this sub feature.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s resources.
@@ -240,6 +277,9 @@ struct dfl_feature_irq_ctx {
* @ops: ops of this sub feature.
* @ddev: ptr to the dfl device of this sub feature.
* @priv: priv data of this feature.
+ * @dfh_version: version of the DFH
+ * @param_size: size of dfh parameters
+ * @params: point to memory copy of dfh parameters
*/
struct dfl_feature {
struct platform_device *dev;
@@ -252,6 +292,9 @@ struct dfl_feature {
const struct dfl_feature_ops *ops;
struct dfl_device *ddev;
void *priv;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
};
#define FEATURE_DEV_ID_UNUSED (-1)
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 727704431f61..a6c25dee9cc1 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
/**
* fpga_bridge_get - get an exclusive reference to an fpga bridge
* @dev: parent device that fpga bridge was registered with
- * @info: fpga manager info
+ * @info: fpga image specific information
*
* Given a device, get an exclusive reference to an fpga bridge.
*
@@ -293,12 +293,15 @@ static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fpga_bridge *bridge = to_fpga_bridge(dev);
- int enable = 1;
+ int state = 1;
- if (bridge->br_ops && bridge->br_ops->enable_show)
- enable = bridge->br_ops->enable_show(bridge);
+ if (bridge->br_ops && bridge->br_ops->enable_show) {
+ state = bridge->br_ops->enable_show(bridge);
+ if (state < 0)
+ return state;
+ }
- return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
}
static DEVICE_ATTR_RO(name);
@@ -360,7 +363,6 @@ fpga_bridge_register(struct device *parent, const char *name,
bridge->dev.parent = parent;
bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
- of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
ret = dev_set_name(&bridge->dev, "br%d", id);
if (ret)
@@ -372,6 +374,8 @@ fpga_bridge_register(struct device *parent, const char *name,
return ERR_PTR(ret);
}
+ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+
return bridge;
error_device:
@@ -413,7 +417,7 @@ static void fpga_bridge_dev_release(struct device *dev)
static int __init fpga_bridge_dev_init(void)
{
- fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
+ fpga_bridge_class = class_create("fpga_bridge");
if (IS_ERR(fpga_bridge_class))
return PTR_ERR(fpga_bridge_class);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 8efa67620e21..eb583f86a0b9 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -971,7 +971,7 @@ static int __init fpga_mgr_class_init(void)
{
pr_info("FPGA manager framework\n");
- fpga_mgr_class = class_create(THIS_MODULE, "fpga_manager");
+ fpga_mgr_class = class_create("fpga_manager");
if (IS_ERR(fpga_mgr_class))
return PTR_ERR(fpga_mgr_class);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 27ff9dea04ae..ccf6fdab1360 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -293,7 +293,7 @@ static void fpga_region_dev_release(struct device *dev)
*/
static int __init fpga_region_init(void)
{
- fpga_region_class = class_create(THIS_MODULE, "fpga_region");
+ fpga_region_class = class_create("fpga_region");
if (IS_ERR(fpga_region_class))
return PTR_ERR(fpga_region_class);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 79d48852825e..d7e2f9f461bc 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -14,6 +14,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+struct m10bmc_sec;
+
+struct m10bmc_sec_ops {
+ int (*rsu_status)(struct m10bmc_sec *sec);
+};
+
struct m10bmc_sec {
struct device *dev;
struct intel_m10bmc *m10bmc;
@@ -21,6 +27,7 @@ struct m10bmc_sec {
char *fw_name;
u32 fw_name_id;
bool cancel_request;
+ const struct m10bmc_sec_ops *ops;
};
static DEFINE_XARRAY_ALLOC(fw_upload_xa);
@@ -31,6 +38,71 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa);
#define REH_MAGIC GENMASK(15, 0)
#define REH_SHA_NUM_BYTES GENMASK(31, 16)
+static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 write_count = size / stride;
+ u32 leftover_offset = write_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp = 0;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
+ buf + offset, write_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
+ if (leftover_size) {
+ memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
+ ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
+ leftover_tmp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 read_count = size / stride;
+ u32 leftover_offset = read_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
+ if (leftover_size) {
+ ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
+ if (ret)
+ return ret;
+ memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
+ }
+
+ return 0;
+}
+
+
static ssize_t
show_root_entry_hash(struct device *dev, u32 exp_magic,
u32 prog_addr, u32 reh_addr, char *buf)
@@ -38,11 +110,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
struct m10bmc_sec *sec = dev_get_drvdata(dev);
int sha_num_bytes, i, ret, cnt = 0;
u8 hash[REH_SHA384_SIZE];
- unsigned int stride;
u32 magic;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
+ ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
if (ret)
return ret;
@@ -50,19 +120,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return sysfs_emit(buf, "hash not programmed\n");
sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
- if ((sha_num_bytes % stride) ||
- (sha_num_bytes != REH_SHA256_SIZE &&
- sha_num_bytes != REH_SHA384_SIZE)) {
+ if (sha_num_bytes != REH_SHA256_SIZE &&
+ sha_num_bytes != REH_SHA384_SIZE) {
dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
sha_num_bytes);
return -EINVAL;
}
- ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
- hash, sha_num_bytes / stride);
+ ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
if (ret) {
- dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
- reh_addr, sha_num_bytes / stride, ret);
+ dev_err(dev, "failed to read root entry hash\n");
return ret;
}
@@ -73,16 +140,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return cnt;
}
-#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
+#define DEVICE_ATTR_SEC_REH_RO(_name) \
static ssize_t _name##_root_entry_hash_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_root_entry_hash(dev, csr_map->_name##_magic, \
+ csr_map->_name##_prog_addr, \
+ csr_map->_name##_reh_addr, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_root_entry_hash)
-DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
+DEVICE_ATTR_SEC_REH_RO(bmc);
+DEVICE_ATTR_SEC_REH_RO(sr);
+DEVICE_ATTR_SEC_REH_RO(pr);
#define CSK_BIT_LEN 128U
#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
@@ -90,27 +165,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
static ssize_t
show_canceled_csk(struct device *dev, u32 addr, char *buf)
{
- unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
+ unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
struct m10bmc_sec *sec = dev_get_drvdata(dev);
DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
__le32 csk_le32[CSK_32ARRAY_SIZE];
u32 csk32[CSK_32ARRAY_SIZE];
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- if (size % stride) {
- dev_err(sec->dev,
- "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
- size, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
- size / stride);
+ ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
if (ret) {
- dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
- addr, size / stride, ret);
+ dev_err(sec->dev, "failed to read CSK vector\n");
return ret;
}
@@ -122,18 +186,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf)
return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
}
-#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
+#define DEVICE_ATTR_SEC_CSK_RO(_name) \
static ssize_t _name##_canceled_csks_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_canceled_csk(dev, _addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_canceled_csk(dev, \
+ csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_canceled_csks)
#define CSK_VEC_OFFSET 0x34
-DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
+DEVICE_ATTR_SEC_CSK_RO(bmc);
+DEVICE_ATTR_SEC_CSK_RO(sr);
+DEVICE_ATTR_SEC_CSK_RO(pr);
#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
@@ -141,31 +212,21 @@ static ssize_t flash_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct m10bmc_sec *sec = dev_get_drvdata(dev);
- unsigned int stride, num_bits;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ unsigned int num_bits;
u8 *flash_buf;
int cnt, ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- if (FLASH_COUNT_SIZE % stride) {
- dev_err(sec->dev,
- "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
- FLASH_COUNT_SIZE, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
if (!flash_buf)
return -ENOMEM;
- ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
- flash_buf, FLASH_COUNT_SIZE / stride);
+ ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
+ FLASH_COUNT_SIZE);
if (ret) {
- dev_err(sec->dev,
- "failed to read flash count: %x cnt %x: %d\n",
- STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
+ dev_err(sec->dev, "failed to read flash count\n");
goto exit_free;
}
cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
@@ -200,25 +261,94 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = {
static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
- dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
+ dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
- if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
+ if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
}
+static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(DRBL_RSU_STATUS, doorbell);
+}
+
+static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 auth_result;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
+}
+
+static bool rsu_status_ok(u32 status)
+{
+ return (status == RSU_STAT_NORMAL ||
+ status == RSU_STAT_NIOS_OK ||
+ status == RSU_STAT_USER_OK ||
+ status == RSU_STAT_FACTORY_OK);
+}
+
+static bool rsu_progress_done(u32 progress)
+{
+ return (progress == RSU_PROG_IDLE ||
+ progress == RSU_PROG_RSU_DONE);
+}
+
+static bool rsu_progress_busy(u32 progress)
+{
+ return (progress == RSU_PROG_AUTHENTICATING ||
+ progress == RSU_PROG_COPYING ||
+ progress == RSU_PROG_UPDATE_CANCEL ||
+ progress == RSU_PROG_PROGRAM_KEY_HASH);
+}
+
+static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
+ u32 *progress, u32 *status)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
+ if (ret)
+ return ret;
+
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return ret;
+
+ *status = ret;
+ *progress = rsu_prog(*doorbell_reg);
+
+ return 0;
+}
+
static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
- rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
+ if (!rsu_progress_done(rsu_prog(doorbell))) {
log_error_regs(sec, doorbell);
return FW_UPLOAD_ERR_BUSY;
}
@@ -226,19 +356,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_NONE;
}
-static inline bool rsu_start_done(u32 doorbell)
+static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
{
- u32 status, progress;
-
- if (doorbell & DRBL_RSU_REQUEST)
+ if (doorbell_reg & DRBL_RSU_REQUEST)
return false;
- status = rsu_stat(doorbell);
if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
return true;
- progress = rsu_prog(doorbell);
- if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
+ if (!rsu_progress_done(progress))
return true;
return false;
@@ -246,11 +372,12 @@ static inline bool rsu_start_done(u32 doorbell)
static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
{
- u32 doorbell, status;
- int ret;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, progress, status;
+ int ret, err;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
DRBL_RSU_REQUEST |
FIELD_PREP(DRBL_HOST_STATUS,
@@ -258,26 +385,25 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_start_done(doorbell),
- NIOS_HANDSHAKE_INTERVAL_US,
- NIOS_HANDSHAKE_TIMEOUT_US);
+ ret = read_poll_timeout(m10bmc_sec_progress_status, err,
+ err < 0 || rsu_start_done(doorbell_reg, progress, status),
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US,
+ false,
+ sec, &doorbell_reg, &progress, &status);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
- } else if (ret) {
+ } else if (err) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- status = rsu_stat(doorbell);
if (status == RSU_STAT_WEAROUT) {
dev_warn(sec->dev, "Excessive flash update count detected\n");
return FW_UPLOAD_ERR_WEAROUT;
} else if (status == RSU_STAT_ERASE_FAIL) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
@@ -286,11 +412,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned long poll_timeout;
u32 doorbell, progress;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -300,7 +427,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
if (time_after(jiffies, poll_timeout))
break;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
@@ -319,11 +446,12 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
{
- u32 doorbell;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, status;
int ret;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_WRITE_DONE));
@@ -331,68 +459,58 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_prog(doorbell) != RSU_PROG_READY,
+ csr_map->base + csr_map->doorbell,
+ doorbell_reg,
+ rsu_prog(doorbell_reg) != RSU_PROG_READY,
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- switch (rsu_stat(doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
- log_error_regs(sec, doorbell);
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_HW_ERROR;
+ status = ret;
+
+ if (!rsu_status_ok(status)) {
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
return FW_UPLOAD_ERR_NONE;
}
-static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
+static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
{
- if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
+ u32 progress, status;
+
+ if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
return -EIO;
- switch (rsu_stat(*doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
+ if (!rsu_status_ok(status))
return -EINVAL;
- }
- switch (rsu_prog(*doorbell)) {
- case RSU_PROG_IDLE:
- case RSU_PROG_RSU_DONE:
+ if (rsu_progress_done(progress))
return 0;
- case RSU_PROG_AUTHENTICATING:
- case RSU_PROG_COPYING:
- case RSU_PROG_UPDATE_CANCEL:
- case RSU_PROG_PROGRAM_KEY_HASH:
+
+ if (rsu_progress_busy(progress))
return -EAGAIN;
- default:
- return -EINVAL;
- }
+
+ return -EINVAL;
}
static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -400,7 +518,7 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_BUSY;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_ABORT_RSU));
@@ -421,39 +539,50 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
if (!size || size > M10BMC_STAGING_SIZE)
return FW_UPLOAD_ERR_INVALID_SIZE;
+ if (sec->m10bmc->flash_bulk_ops)
+ if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
+ return FW_UPLOAD_ERR_BUSY;
+
ret = rsu_check_idle(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
ret = rsu_update_init(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
ret = rsu_prog_ready(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
- if (sec->cancel_request)
- return rsu_cancel(sec);
+ if (sec->cancel_request) {
+ ret = rsu_cancel(sec);
+ goto unlock_flash;
+ }
return FW_UPLOAD_ERR_NONE;
+
+unlock_flash:
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
+ return ret;
}
#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
-static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
- u32 offset, u32 size, u32 *written)
+static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
{
struct m10bmc_sec *sec = fwl->dd_handle;
- u32 blk_size, doorbell, extra_offset;
- unsigned int stride, extra = 0;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ u32 blk_size, doorbell;
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (sec->cancel_request)
return rsu_cancel(sec);
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
} else if (rsu_prog(doorbell) != RSU_PROG_READY) {
@@ -461,28 +590,12 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data
return FW_UPLOAD_ERR_HW_ERROR;
}
- WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
+ WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
- ret = regmap_bulk_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + offset,
- (void *)data + offset,
- blk_size / stride);
+ ret = m10bmc_sec_write(sec, data, offset, blk_size);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- /*
- * If blk_size is not aligned to stride, then handle the extra
- * bytes with regmap_write.
- */
- if (blk_size % stride) {
- extra_offset = offset + ALIGN_DOWN(blk_size, stride);
- memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
- ret = regmap_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + extra_offset, extra);
- if (ret)
- return FW_UPLOAD_ERR_RW_ERROR;
- }
-
*written = blk_size;
return FW_UPLOAD_ERR_NONE;
}
@@ -539,16 +652,27 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
struct m10bmc_sec *sec = fwl->dd_handle;
(void)rsu_cancel(sec);
+
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
}
static const struct fw_upload_ops m10bmc_ops = {
.prepare = m10bmc_sec_prepare,
- .write = m10bmc_sec_write,
+ .write = m10bmc_sec_fw_write,
.poll_complete = m10bmc_sec_poll_complete,
.cancel = m10bmc_sec_cancel,
.cleanup = m10bmc_sec_cleanup,
};
+static const struct m10bmc_sec_ops m10sec_n3000_ops = {
+ .rsu_status = m10bmc_sec_n3000_rsu_status,
+};
+
+static const struct m10bmc_sec_ops m10sec_n6000_ops = {
+ .rsu_status = m10bmc_sec_n6000_rsu_status,
+};
+
#define SEC_UPDATE_LEN_MAX 32
static int m10bmc_sec_probe(struct platform_device *pdev)
{
@@ -564,6 +688,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
sec->dev = &pdev->dev;
sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
dev_set_drvdata(&pdev->dev, sec);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
@@ -574,20 +699,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
sec->fw_name_id);
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
- if (!sec->fw_name)
- return -ENOMEM;
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
&m10bmc_ops, sec);
if (IS_ERR(fwl)) {
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
- kfree(sec->fw_name);
- xa_erase(&fw_upload_xa, sec->fw_name_id);
- return PTR_ERR(fwl);
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
}
sec->fwl = fwl;
return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
}
static int m10bmc_sec_remove(struct platform_device *pdev)
@@ -604,9 +736,15 @@ static int m10bmc_sec_remove(struct platform_device *pdev)
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "d5005bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
+ },
+ {
+ .name = "n6000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
},
{ }
};
diff --git a/drivers/fpga/lattice-sysconfig-spi.c b/drivers/fpga/lattice-sysconfig-spi.c
index 2702b26b7f55..44691cfcf50a 100644
--- a/drivers/fpga/lattice-sysconfig-spi.c
+++ b/drivers/fpga/lattice-sysconfig-spi.c
@@ -3,6 +3,7 @@
* Lattice FPGA programming over slave SPI sysCONFIG interface.
*/
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include "lattice-sysconfig.h"
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index 7436976ea904..d6070e7f5205 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -6,6 +6,7 @@
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/fpga/fpga-mgr.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
@@ -33,7 +34,7 @@
#define MPF_BITS_PER_COMPONENT_SIZE 22
-#define MPF_STATUS_POLL_RETRIES 10000
+#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC)
#define MPF_STATUS_BUSY BIT(0)
#define MPF_STATUS_READY BIT(1)
#define MPF_STATUS_SPI_VIOLATION BIT(2)
@@ -42,46 +43,55 @@
struct mpf_priv {
struct spi_device *spi;
bool program_mode;
+ u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
+ u8 rx;
};
-static int mpf_read_status(struct spi_device *spi)
+static int mpf_read_status(struct mpf_priv *priv)
{
- u8 status = 0, status_command = MPF_SPI_READ_STATUS;
- struct spi_transfer xfers[2] = { 0 };
- int ret;
-
/*
* HW status is returned on MISO in the first byte after CS went
* active. However, first reading can be inadequate, so we submit
* two identical SPI transfers and use result of the later one.
*/
- xfers[0].tx_buf = &status_command;
- xfers[1].tx_buf = &status_command;
- xfers[0].rx_buf = &status;
- xfers[1].rx_buf = &status;
- xfers[0].len = 1;
- xfers[1].len = 1;
- xfers[0].cs_change = 1;
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ },
+ };
+ u8 status;
+ int ret;
- ret = spi_sync_transfer(spi, xfers, 2);
+ priv->tx = MPF_SPI_READ_STATUS;
+
+ ret = spi_sync_transfer(priv->spi, xfers, 2);
+ if (ret)
+ return ret;
+
+ status = priv->rx;
if ((status & MPF_STATUS_SPI_VIOLATION) ||
(status & MPF_STATUS_SPI_ERROR))
- ret = -EIO;
+ return -EIO;
- return ret ? : status;
+ return status;
}
static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
{
struct mpf_priv *priv = mgr->priv;
- struct spi_device *spi;
bool program_mode;
int status;
- spi = priv->spi;
program_mode = priv->program_mode;
- status = mpf_read_status(spi);
+ status = mpf_read_status(priv);
if (!program_mode && !status)
return FPGA_MGR_STATE_OPERATING;
@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
return 0;
}
-/* Poll HW status until busy bit is cleared and mask bits are set. */
-static int mpf_poll_status(struct spi_device *spi, u8 mask)
+static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
{
- int status, retries = MPF_STATUS_POLL_RETRIES;
-
- while (retries--) {
- status = mpf_read_status(spi);
- if (status < 0)
- return status;
+ int ret, status;
- if (status & MPF_STATUS_BUSY)
- continue;
-
- if (!mask || (status & mask))
- return status;
- }
+ /*
+ * Busy poll HW status. Polling stops if any of the following
+ * conditions are met:
+ * - timeout is reached
+ * - mpf_read_status() returns an error
+ * - busy bit is cleared AND mask bits are set
+ */
+ ret = read_poll_timeout(mpf_read_status, status,
+ (status < 0) ||
+ ((status & (MPF_STATUS_BUSY | mask)) == mask),
+ 0, MPF_STATUS_POLL_TIMEOUT, false, priv);
+ if (ret < 0)
+ return ret;
- return -EBUSY;
+ return status;
}
-static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
+static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
{
- int status = mpf_poll_status(spi, 0);
+ int status = mpf_poll_status(priv, 0);
if (status < 0)
return status;
- return spi_write(spi, buf, buf_size);
+ return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
}
-static int mpf_spi_write_then_read(struct spi_device *spi,
+static int mpf_spi_write_then_read(struct mpf_priv *priv,
const void *txbuf, size_t txbuf_size,
void *rxbuf, size_t rxbuf_size)
{
const u8 read_command[] = { MPF_SPI_READ_DATA };
int ret;
- ret = mpf_spi_write(spi, txbuf, txbuf_size);
+ ret = mpf_spi_write(priv, txbuf, txbuf_size);
if (ret)
return ret;
- ret = mpf_poll_status(spi, MPF_STATUS_READY);
+ ret = mpf_poll_status(priv, MPF_STATUS_READY);
if (ret < 0)
return ret;
- return spi_write_then_read(spi, read_command, sizeof(read_command),
+ return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
rxbuf, rxbuf_size);
}
@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
u32 isc_ret = 0;
int ret;
@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EOPNOTSUPP;
}
- spi = priv->spi;
-
- ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
+ ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
&isc_ret, sizeof(isc_ret));
if (ret || isc_ret) {
dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EFAULT;
}
- ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
+ ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
if (ret) {
dev_err(dev, "Failed to enter program mode: %d\n", ret);
return ret;
@@ -272,13 +280,32 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return 0;
}
+static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf)
+{
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .len = 1,
+ }, {
+ .tx_buf = buf,
+ .len = MPF_SPI_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ret = mpf_poll_status(priv, 0);
+ if (ret < 0)
+ return ret;
+
+ priv->tx = MPF_SPI_FRAME;
+
+ return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
+}
+
static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
- u8 spi_frame_command[] = { MPF_SPI_FRAME };
- struct spi_transfer xfers[2] = { 0 };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret, i;
if (count % MPF_SPI_FRAME_SIZE) {
@@ -287,19 +314,8 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
return -EINVAL;
}
- spi = priv->spi;
-
- xfers[0].tx_buf = spi_frame_command;
- xfers[0].len = sizeof(spi_frame_command);
-
for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
- xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
- xfers[1].len = MPF_SPI_FRAME_SIZE;
-
- ret = mpf_poll_status(spi, 0);
- if (ret >= 0)
- ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
-
+ ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE);
if (ret) {
dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
i, count / MPF_SPI_FRAME_SIZE);
@@ -317,12 +333,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
const u8 release_command[] = { MPF_SPI_RELEASE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret;
- spi = priv->spi;
-
- ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
+ ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
if (ret) {
dev_err(dev, "Failed to disable ISC: %d\n", ret);
return ret;
@@ -330,7 +343,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
usleep_range(1000, 2000);
- ret = mpf_spi_write(spi, release_command, sizeof(release_command));
+ ret = mpf_spi_write(priv, release_command, sizeof(release_command));
if (ret) {
dev_err(dev, "Failed to exit program mode: %d\n", ret);
return ret;
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 357cea58ec98..f7f01982a512 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 2d9c491f7be9..b76d85449b8f 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -69,7 +69,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
if (err)
return err;
- status = readl(priv->io_base);
+ status = xlnx_pr_decouple_read(priv, CTRL_OFFSET);
clk_disable(priv->clk);
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index c60f20949c47..f3434e2c487b 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -77,6 +77,26 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status;
+ int ret;
+
+ ret = zynqmp_pm_fpga_get_config_status(&status);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", status);
+}
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *zynqmp_fpga_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(zynqmp_fpga);
+
static const struct fpga_manager_ops zynqmp_fpga_ops = {
.state = zynqmp_fpga_ops_state,
.write_init = zynqmp_fpga_ops_write_init,
@@ -113,6 +133,7 @@ static struct platform_driver zynqmp_fpga_driver = {
.driver = {
.name = "zynqmp_fpga_manager",
.of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ .dev_groups = zynqmp_fpga_groups,
},
};
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 694e80c06665..0b927c9f4267 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -897,10 +897,10 @@ static const struct attribute_group *cfam_attr_groups[] = {
NULL,
};
-static char *cfam_devnode(struct device *dev, umode_t *mode,
+static char *cfam_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
- struct fsi_slave *slave = to_fsi_slave(dev);
+ const struct fsi_slave *slave = to_fsi_slave(dev);
#ifdef CONFIG_FSI_NEW_DEV_NODE
return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
@@ -915,7 +915,7 @@ static const struct device_type cfam_type = {
.groups = cfam_attr_groups
};
-static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+static char *fsi_cdev_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
#ifdef CONFIG_FSI_NEW_DEV_NODE
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 77a4b280c552..48f2ee0f78c4 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -387,7 +387,7 @@ static int __init gnss_module_init(void)
return ret;
}
- gnss_class = class_create(THIS_MODULE, "gnss");
+ gnss_class = class_create("gnss");
if (IS_ERR(gnss_class)) {
ret = PTR_ERR(gnss_class);
pr_err("failed to create class: %d\n", ret);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ec7cfd4f52b1..5521f060d58e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -3,14 +3,6 @@
# GPIO infrastructure and drivers
#
-config ARCH_HAVE_CUSTOM_GPIO_H
- bool
- help
- Selecting this config option from the architecture Kconfig allows
- the architecture to provide a custom asm/gpio.h implementation
- overriding the default implementations. New uses of this are
- strongly discouraged.
-
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -47,6 +39,14 @@ config GPIOLIB_IRQCHIP
select IRQ_DOMAIN
bool
+config OF_GPIO_MM_GPIOCHIP
+ bool
+ help
+ This adds support for the legacy 'struct of_mm_gpio_chip' interface
+ from PowerPC. Existing drivers using this interface need to select
+ this symbol, but new drivers should use the generic gpio-regmap
+ infrastructure instead.
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
@@ -100,7 +100,7 @@ config GPIO_GENERIC
tristate
config GPIO_REGMAP
- depends on REGMAP
+ select REGMAP
tristate
# put drivers in the right section, in alphabetical order
@@ -139,6 +139,7 @@ config GPIO_ALTERA
tristate "Altera GPIO"
depends on OF_GPIO
select GPIOLIB_IRQCHIP
+ select OF_GPIO_MM_GPIOCHIP
help
Say Y or M here to build support for the Altera PIO device.
@@ -354,16 +355,6 @@ config GPIO_IMX_SCU
def_bool y
depends on IMX_SCU
-config GPIO_IOP
- tristate "Intel IOP GPIO"
- depends on ARCH_IOP32X || COMPILE_TEST
- select GPIO_GENERIC
- help
- Say yes here to support the GPIO functionality of a number of Intel
- IOP32X or IOP33X series of chips.
-
- If unsure, say N.
-
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
depends on ARCH_IXP4XX
@@ -390,6 +381,18 @@ config GPIO_LOONGSON
help
Driver for GPIO functionality on Loongson-2F/3A/3B processors.
+config GPIO_LOONGSON_64BIT
+ tristate "Loongson 64 bit GPIO support"
+ depends on LOONGARCH || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ help
+ Say yes here to support the GPIO functionality of a number of
+ Loongson series of chips. The Loongson GPIO controller supports
+ up to 60 GPIOS in total, 4 of which are dedicated GPIO pins, and
+ the remaining 56 are reused with other functions, with edge or
+ level triggered interrupts.
+
config GPIO_LPC18XX
tristate "NXP LPC18XX/43XX GPIO support"
default y if ARCH_LPC18XX
@@ -421,6 +424,7 @@ config GPIO_MENZ127
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
+ select OF_GPIO_MM_GPIOCHIP
help
This enables support for memory mapped GPIOs on the External Bus Unit
(EBU) found on Lantiq SoCs. The GPIOs are output only as they are
@@ -429,6 +433,7 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
+ select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
@@ -495,14 +500,6 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device.
-config GPIO_PMIC_EIC_SPRD
- tristate "Spreadtrum PMIC EIC support"
- depends on MFD_SC27XX_PMIC || COMPILE_TEST
- depends on OF_GPIO
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support Spreadtrum PMIC EIC device.
-
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
@@ -626,6 +623,17 @@ config GPIO_SYSCON
help
Say yes here to support GPIO functionality though SYSCON driver.
+config GPIO_TANGIER
+ tristate
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO support for Intel Tangier and compatible platforms.
+ Currently supported:
+ - Elkhart Lake
+ - Merrifield
+
+ If built as a module its name will be gpio-tangier.
+
config GPIO_TB10X
bool
select GPIO_GENERIC
@@ -762,7 +770,7 @@ config GPIO_XTENSA
config GPIO_ZEVIO
bool "LSI ZEVIO SoC memory mapped GPIOs"
- depends on ARM && OF_GPIO
+ depends on ARM
help
Say yes here to support the GPIO controller in LSI ZEVIO SoCs.
@@ -831,6 +839,7 @@ menu "Port-mapped I/O GPIO drivers"
config GPIO_I8255
tristate
+ select GPIO_REGMAP
help
Enables support for the i8255 interface library functions. The i8255
interface library provides functions to facilitate communication with
@@ -845,6 +854,8 @@ config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
+ select REGMAP_IRQ
select GPIOLIB_IRQCHIP
select GPIO_I8255
help
@@ -870,8 +881,10 @@ config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
+ select REGMAP_IRQ
select GPIOLIB_IRQCHIP
- select GPIO_I8255
+ select GPIO_REGMAP
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
@@ -893,6 +906,7 @@ config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
select GPIO_I8255
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
@@ -1004,6 +1018,16 @@ config GPIO_ADNP
enough to represent all pins, but the driver will assume a
register layout for 64 pins (8 registers).
+config GPIO_FXL6408
+ tristate "FXL6408 I2C GPIO expander"
+ select GPIO_REGMAP
+ select REGMAP_I2C
+ help
+ GPIO driver for Fairchild Semiconductor FXL6408 GPIO expander.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-fxl6408.
+
config GPIO_GW_PLD
tristate "Gateworks PLD GPIO Expander"
depends on OF_GPIO
@@ -1239,6 +1263,17 @@ config HTC_EGPIO
several HTC phones. It provides basic support for input
pins, output pins, and IRQs.
+config GPIO_ELKHARTLAKE
+ tristate "Intel Elkhart Lake PSE GPIO support"
+ depends on X86 || COMPILE_TEST
+ select GPIO_TANGIER
+ help
+ Select this option to enable GPIO support for Intel Elkhart Lake
+ PSE GPIO IP.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-elkhartlake.
+
config GPIO_JANZ_TTL
tristate "Janz VMOD-TTL Digital IO Module"
depends on MFD_JANZ_CMODIO
@@ -1257,6 +1292,18 @@ config GPIO_KEMPLD
This driver can also be built as a module. If so, the module will be
called gpio-kempld.
+config GPIO_LJCA
+ tristate "INTEL La Jolla Cove Adapter GPIO support"
+ depends on MFD_LJCA
+ select GPIOLIB_IRQCHIP
+ default MFD_LJCA
+ help
+ Select this option to enable GPIO driver for the INTEL
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-ljca.
+
config GPIO_LP3943
tristate "TI/National Semiconductor LP3943 GPIO expander"
depends on MFD_LP3943
@@ -1315,6 +1362,14 @@ config GPIO_PALMAS
Select this option to enable GPIO driver for the TI PALMAS
series chip family.
+config GPIO_PMIC_EIC_SPRD
+ tristate "Spreadtrum PMIC EIC support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support Spreadtrum PMIC EIC device.
+
config GPIO_RC5T583
bool "RICOH RC5T583 GPIO"
depends on MFD_RC5T583
@@ -1438,13 +1493,6 @@ config GPIO_TWL6040
Say yes here to access the GPO signals of twl6040
audio chip from Texas Instruments.
-config GPIO_UCB1400
- tristate "Philips UCB1400 GPIO"
- depends on UCB1400_CORE
- help
- This enables support for the Philips UCB1400 GPIO pins.
- The UCB1400 is an AC97 audio codec.
-
config GPIO_WHISKEY_COVE
tristate "GPIO support for Whiskey Cove PMIC"
depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC_BXTWC
@@ -1516,7 +1564,7 @@ config GPIO_BT8XX
config GPIO_MERRIFIELD
tristate "Intel Merrifield GPIO support"
depends on X86_INTEL_MID
- select GPIOLIB_IRQCHIP
+ select GPIO_TANGIER
help
Say Y here to support Intel Merrifield GPIO.
@@ -1531,6 +1579,7 @@ config GPIO_MLXBF2
tristate "Mellanox BlueField 2 SoC GPIO"
depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 010587025fc8..20036af3acb1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -54,12 +54,14 @@ obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o
obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
obj-$(CONFIG_GPIO_EIC_SPRD) += gpio-eic-sprd.o
+obj-$(CONFIG_GPIO_ELKHARTLAKE) += gpio-elkhartlake.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EN7523) += gpio-en7523.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_EXAR) += gpio-exar.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o
+obj-$(CONFIG_GPIO_FXL6408) += gpio-fxl6408.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
@@ -72,15 +74,16 @@ obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDIO_16) += gpio-idio-16.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o
-obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
obj-$(CONFIG_GPIO_LATCH) += gpio-latch.o
+obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o
obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
+obj-$(CONFIG_GPIO_LOONGSON_64BIT) += gpio-loongson-64bit.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
@@ -146,6 +149,7 @@ obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
+obj-$(CONFIG_GPIO_TANGIER) += gpio-tangier.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
@@ -166,7 +170,6 @@ obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
-obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76560744587a..189c3abe7e79 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -59,11 +59,6 @@ the device tree back-end. It is legacy and should not be used in new code.
Work items:
-- Get rid of struct of_mm_gpio_chip altogether: use the generic MMIO
- GPIO for all current users (see below). Delete struct of_mm_gpio_chip,
- to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_add()
- of_mm_gpiochip_remove() from the kernel.
-
- Change all consumer drivers that #include <linux/of_gpio.h> to
#include <linux/gpio/consumer.h> and stop doing custom parsing of the
GPIO lines from the device tree. This can be tricky and often ivolves
@@ -81,6 +76,16 @@ Work items:
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
+
+Work items:
+
+- Get rid of struct of_mm_gpio_chip altogether: use the generic MMIO
+ GPIO for all current users (see below). Delete struct of_mm_gpio_chip,
+ to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
+ CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
+
+
Get rid of <linux/gpio.h>
This legacy header is a one stop shop for anything GPIO is closely tied
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 7b8829c8e423..f2253fd5ab4b 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -8,17 +8,14 @@
*/
#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/err.h>
#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include "gpio-i8255.h"
@@ -38,212 +35,101 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
+#define DIO48E_ENABLE_INTERRUPT 0xB
+#define DIO48E_DISABLE_INTERRUPT DIO48E_ENABLE_INTERRUPT
+#define DIO48E_CLEAR_INTERRUPT 0xF
+
#define DIO48E_NUM_PPI 2
-/**
- * struct dio48e_reg - device register structure
- * @ppi: Programmable Peripheral Interface groups
- * @enable_buffer: Enable/Disable Buffer groups
- * @unused1: Unused
- * @enable_interrupt: Write: Enable Interrupt
- * Read: Disable Interrupt
- * @unused2: Unused
- * @enable_counter: Write: Enable Counter/Timer Addressing
- * Read: Disable Counter/Timer Addressing
- * @unused3: Unused
- * @clear_interrupt: Clear Interrupt
- */
-struct dio48e_reg {
- struct i8255 ppi[DIO48E_NUM_PPI];
- u8 enable_buffer[DIO48E_NUM_PPI];
- u8 unused1;
- u8 enable_interrupt;
- u8 unused2;
- u8 enable_counter;
- u8 unused3;
- u8 clear_interrupt;
+static const struct regmap_range dio48e_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x9), regmap_reg_range(0xB, 0xB),
+ regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF),
};
-
-/**
- * struct dio48e_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @ppi_state: PPI device states
- * @lock: synchronization lock to prevent I/O race conditions
- * @reg: I/O address offset for the device registers
- * @irq_mask: I/O bits affected by interrupts
- */
-struct dio48e_gpio {
- struct gpio_chip chip;
- struct i8255_state ppi_state[DIO48E_NUM_PPI];
- raw_spinlock_t lock;
- struct dio48e_reg __iomem *reg;
- unsigned char irq_mask;
+static const struct regmap_range dio48e_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x6),
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_range dio48e_volatile_ranges[] = {
+ i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4),
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_range dio48e_precious_ranges[] = {
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_access_table dio48e_wr_table = {
+ .yes_ranges = dio48e_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_wr_ranges),
+};
+static const struct regmap_access_table dio48e_rd_table = {
+ .yes_ranges = dio48e_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_rd_ranges),
+};
+static const struct regmap_access_table dio48e_volatile_table = {
+ .yes_ranges = dio48e_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_volatile_ranges),
+};
+static const struct regmap_access_table dio48e_precious_table = {
+ .yes_ranges = dio48e_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_precious_ranges),
+};
+static const struct regmap_config dio48e_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0xF,
+ .wr_table = &dio48e_wr_table,
+ .rd_table = &dio48e_rd_table,
+ .volatile_table = &dio48e_volatile_table,
+ .precious_table = &dio48e_precious_table,
+ .cache_type = REGCACHE_FLAT,
+ .use_raw_spinlock = true,
};
-static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- if (i8255_get_direction(dio48egpio->ppi_state, offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_direction_input(dio48egpio->reg->ppi, dio48egpio->ppi_state,
- offset);
-
- return 0;
-}
-
-static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_direction_output(dio48egpio->reg->ppi, dio48egpio->ppi_state,
- offset, value);
-
- return 0;
-}
-
-static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- return i8255_get(dio48egpio->reg->ppi, offset);
-}
-
-static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_get_multiple(dio48egpio->reg->ppi, mask, bits, chip->ngpio);
-
- return 0;
-}
-
-static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_set(dio48egpio->reg->ppi, dio48egpio->ppi_state, offset, value);
-}
-
-static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_set_multiple(dio48egpio->reg->ppi, dio48egpio->ppi_state, mask,
- bits, chip->ngpio);
-}
-
-static void dio48e_irq_ack(struct irq_data *data)
-{
-}
-
-static void dio48e_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- unsigned long flags;
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (offset == 19)
- dio48egpio->irq_mask &= ~BIT(0);
- else
- dio48egpio->irq_mask &= ~BIT(1);
- gpiochip_disable_irq(chip, offset);
-
- if (!dio48egpio->irq_mask)
- /* disable interrupts */
- ioread8(&dio48egpio->reg->enable_interrupt);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-}
-
-static void dio48e_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- unsigned long flags;
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (!dio48egpio->irq_mask) {
- /* enable interrupts */
- iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
- iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
+/* only bit 3 on each respective Port C supports interrupts */
+#define DIO48E_REGMAP_IRQ(_ppi) \
+ [19 + (_ppi) * 24] = { \
+ .mask = BIT(_ppi), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_RISING }, \
}
- gpiochip_enable_irq(chip, offset);
- if (offset == 19)
- dio48egpio->irq_mask |= BIT(0);
- else
- dio48egpio->irq_mask |= BIT(1);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-}
-
-static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- const unsigned long offset = irqd_to_hwirq(data);
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return -EINVAL;
-
- if (flow_type != IRQ_TYPE_NONE && flow_type != IRQ_TYPE_EDGE_RISING)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip dio48e_irqchip = {
- .name = "104-dio-48e",
- .irq_ack = dio48e_irq_ack,
- .irq_mask = dio48e_irq_mask,
- .irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_irq dio48e_regmap_irqs[] = {
+ DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1),
};
-static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
+static int dio48e_handle_mask_sync(struct regmap *const map, const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
{
- struct dio48e_gpio *const dio48egpio = dev_id;
- struct gpio_chip *const chip = &dio48egpio->chip;
- const unsigned long irq_mask = dio48egpio->irq_mask;
- unsigned long gpio;
+ unsigned int *const irq_mask = irq_drv_data;
+ const unsigned int prev_mask = *irq_mask;
+ int err;
+ unsigned int val;
- for_each_set_bit(gpio, &irq_mask, 2)
- generic_handle_domain_irq(chip->irq.domain,
- 19 + gpio*24);
+ /* exit early if no change since the previous mask */
+ if (mask_buf == prev_mask)
+ return 0;
- raw_spin_lock(&dio48egpio->lock);
+ /* remember the current mask for the next mask sync */
+ *irq_mask = mask_buf;
- iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
+ /* if all previously masked, enable interrupts when unmasking */
+ if (prev_mask == mask_buf_def) {
+ err = regmap_write(map, DIO48E_CLEAR_INTERRUPT, 0x00);
+ if (err)
+ return err;
+ return regmap_write(map, DIO48E_ENABLE_INTERRUPT, 0x00);
+ }
- raw_spin_unlock(&dio48egpio->lock);
+ /* if all are currently masked, disable interrupts */
+ if (mask_buf == mask_buf_def)
+ return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
- return IRQ_HANDLED;
+ return 0;
}
#define DIO48E_NGPIO 48
@@ -266,41 +152,24 @@ static const char *dio48e_names[DIO48E_NGPIO] = {
"PPI Group 1 Port C 5", "PPI Group 1 Port C 6", "PPI Group 1 Port C 7"
};
-static int dio48e_irq_init_hw(struct gpio_chip *gc)
+static int dio48e_irq_init_hw(struct regmap *const map)
{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(gc);
+ unsigned int val;
/* Disable IRQ by default */
- ioread8(&dio48egpio->reg->enable_interrupt);
-
- return 0;
-}
-
-static void dio48e_init_ppi(struct i8255 __iomem *const ppi,
- struct i8255_state *const ppi_state)
-{
- const unsigned long ngpio = 24;
- const unsigned long mask = GENMASK(ngpio - 1, 0);
- const unsigned long bits = 0;
- unsigned long i;
-
- /* Initialize all GPIO to output 0 */
- for (i = 0; i < DIO48E_NUM_PPI; i++) {
- i8255_mode0_output(&ppi[i]);
- i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
- }
+ return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
}
static int dio48e_probe(struct device *dev, unsigned int id)
{
- struct dio48e_gpio *dio48egpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
+ struct i8255_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
int err;
-
- dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
- if (!dio48egpio)
- return -ENOMEM;
+ struct regmap_irq_chip *chip;
+ unsigned int irq_mask;
+ struct regmap_irq_chip_data *chip_data;
if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -308,53 +177,48 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- dio48egpio->reg = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
- if (!dio48egpio->reg)
+ regs = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
+ if (!regs)
return -ENOMEM;
- dio48egpio->chip.label = name;
- dio48egpio->chip.parent = dev;
- dio48egpio->chip.owner = THIS_MODULE;
- dio48egpio->chip.base = -1;
- dio48egpio->chip.ngpio = DIO48E_NGPIO;
- dio48egpio->chip.names = dio48e_names;
- dio48egpio->chip.get_direction = dio48e_gpio_get_direction;
- dio48egpio->chip.direction_input = dio48e_gpio_direction_input;
- dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
- dio48egpio->chip.get = dio48e_gpio_get;
- dio48egpio->chip.get_multiple = dio48e_gpio_get_multiple;
- dio48egpio->chip.set = dio48e_gpio_set;
- dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
-
- girq = &dio48egpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = dio48e_irq_init_hw;
-
- raw_spin_lock_init(&dio48egpio->lock);
-
- i8255_state_init(dio48egpio->ppi_state, DIO48E_NUM_PPI);
- dio48e_init_ppi(dio48egpio->reg->ppi, dio48egpio->ppi_state);
-
- err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
+ map = devm_regmap_init_mmio(dev, regs, &dio48e_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "Unable to initialize register map\n");
- err = devm_request_irq(dev, irq[id], dio48e_irq_handler, 0, name,
- dio48egpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->irq_drv_data = devm_kzalloc(dev, sizeof(irq_mask), GFP_KERNEL);
+ if (!chip->irq_drv_data)
+ return -ENOMEM;
+
+ chip->name = name;
+ chip->mask_base = DIO48E_ENABLE_INTERRUPT;
+ chip->ack_base = DIO48E_CLEAR_INTERRUPT;
+ chip->no_status = true;
+ chip->num_regs = 1;
+ chip->irqs = dio48e_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs);
+ chip->handle_mask_sync = dio48e_handle_mask_sync;
+
+ /* Initialize to prevent spurious interrupts before we're ready */
+ err = dio48e_irq_init_hw(map);
+ if (err)
return err;
- }
- return 0;
+ err = devm_regmap_add_irq_chip(dev, map, irq[id], 0, 0, chip, &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ config.parent = dev;
+ config.map = map;
+ config.num_ppi = DIO48E_NUM_PPI;
+ config.names = dio48e_names;
+ config.domain = regmap_irq_get_domain(chip_data);
+
+ return devm_i8255_regmap_register(dev, &config);
}
static struct isa_driver dio48e_driver = {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index c5e231fde1af..ba73ee9c0c29 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -8,23 +8,18 @@
*/
#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/gpio/regmap.h>
#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
-#include "gpio-i8255.h"
-
-MODULE_IMPORT_NS(I8255);
-
#define IDI_48_EXTENT 8
#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
@@ -38,185 +33,84 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
-/**
- * struct idi_48_reg - device register structure
- * @port0: Port 0 Inputs
- * @unused: Unused
- * @port1: Port 1 Inputs
- * @irq: Read: IRQ Status Register/IRQ Clear
- * Write: IRQ Enable/Disable
- */
-struct idi_48_reg {
- u8 port0[3];
- u8 unused;
- u8 port1[3];
- u8 irq;
-};
+#define IDI48_IRQ_STATUS 0x7
+#define IDI48_IRQ_ENABLE IDI48_IRQ_STATUS
-/**
- * struct idi_48_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @lock: synchronization lock to prevent I/O race conditions
- * @irq_mask: input bits affected by interrupts
- * @reg: I/O address offset for the device registers
- * @cos_enb: Change-Of-State IRQ enable boundaries mask
- */
-struct idi_48_gpio {
- struct gpio_chip chip;
- spinlock_t lock;
- unsigned char irq_mask[6];
- struct idi_48_reg __iomem *reg;
- unsigned char cos_enb;
-};
-
-static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+static int idi_48_reg_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask)
{
- return GPIO_LINE_DIRECTION_IN;
-}
+ const unsigned int line = offset % 8;
+ const unsigned int stride = offset / 8;
+ const unsigned int port = (stride / 3) * 4;
+ const unsigned int port_stride = stride % 3;
-static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- return 0;
-}
-
-static int idi_48_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- void __iomem *const ppi = idi48gpio->reg;
-
- return i8255_get(ppi, offset);
-}
-
-static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- void __iomem *const ppi = idi48gpio->reg;
-
- i8255_get_multiple(ppi, mask, bits, chip->ngpio);
+ *reg = base + port + port_stride;
+ *mask = BIT(line);
return 0;
}
-static void idi_48_irq_ack(struct irq_data *data)
-{
-}
-
-static void idi_48_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned int offset = irqd_to_hwirq(data);
- const unsigned long boundary = offset / 8;
- const unsigned long mask = BIT(offset % 8);
- unsigned long flags;
-
- spin_lock_irqsave(&idi48gpio->lock, flags);
-
- idi48gpio->irq_mask[boundary] &= ~mask;
- gpiochip_disable_irq(chip, offset);
-
- /* Exit early if there are still input lines with IRQ unmasked */
- if (idi48gpio->irq_mask[boundary])
- goto exit;
-
- idi48gpio->cos_enb &= ~BIT(boundary);
-
- iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
-
-exit:
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
-}
-
-static void idi_48_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned int offset = irqd_to_hwirq(data);
- const unsigned long boundary = offset / 8;
- const unsigned long mask = BIT(offset % 8);
- unsigned int prev_irq_mask;
- unsigned long flags;
-
- spin_lock_irqsave(&idi48gpio->lock, flags);
-
- prev_irq_mask = idi48gpio->irq_mask[boundary];
-
- gpiochip_enable_irq(chip, offset);
- idi48gpio->irq_mask[boundary] |= mask;
-
- /* Exit early if IRQ was already unmasked for this boundary */
- if (prev_irq_mask)
- goto exit;
-
- idi48gpio->cos_enb |= BIT(boundary);
-
- iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
-
-exit:
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
-}
-
-static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- /* The only valid irq types are none and both-edges */
- if (flow_type != IRQ_TYPE_NONE &&
- (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip idi_48_irqchip = {
- .name = "104-idi-48",
- .irq_ack = idi_48_irq_ack,
- .irq_mask = idi_48_irq_mask,
- .irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_range idi_48_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x6),
+};
+static const struct regmap_range idi_48_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x7),
+};
+static const struct regmap_range idi_48_precious_ranges[] = {
+ regmap_reg_range(0x7, 0x7),
+};
+static const struct regmap_access_table idi_48_wr_table = {
+ .no_ranges = idi_48_wr_ranges,
+ .n_no_ranges = ARRAY_SIZE(idi_48_wr_ranges),
+};
+static const struct regmap_access_table idi_48_rd_table = {
+ .yes_ranges = idi_48_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idi_48_rd_ranges),
+};
+static const struct regmap_access_table idi_48_precious_table = {
+ .yes_ranges = idi_48_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idi_48_precious_ranges),
+};
+static const struct regmap_config idi48_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0x6,
+ .wr_table = &idi_48_wr_table,
+ .rd_table = &idi_48_rd_table,
+ .precious_table = &idi_48_precious_table,
+ .use_raw_spinlock = true,
};
-static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
-{
- struct idi_48_gpio *const idi48gpio = dev_id;
- unsigned long cos_status;
- unsigned long boundary;
- unsigned long irq_mask;
- unsigned long bit_num;
- unsigned long gpio;
- struct gpio_chip *const chip = &idi48gpio->chip;
-
- spin_lock(&idi48gpio->lock);
-
- cos_status = ioread8(&idi48gpio->reg->irq);
-
- /* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
- if (cos_status & BIT(6)) {
- spin_unlock(&idi48gpio->lock);
- return IRQ_NONE;
- }
-
- /* Bit 0-5 indicate which Change-Of-State boundary triggered the IRQ */
- cos_status &= 0x3F;
-
- for_each_set_bit(boundary, &cos_status, 6) {
- irq_mask = idi48gpio->irq_mask[boundary];
-
- for_each_set_bit(bit_num, &irq_mask, 8) {
- gpio = bit_num + boundary * 8;
+#define IDI48_NGPIO 48
- generic_handle_domain_irq(chip->irq.domain,
- gpio);
- }
+#define IDI48_REGMAP_IRQ(_id) \
+ [_id] = { \
+ .mask = BIT((_id) / 8), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \
}
- spin_unlock(&idi48gpio->lock);
-
- return IRQ_HANDLED;
-}
+static const struct regmap_irq idi48_regmap_irqs[IDI48_NGPIO] = {
+ IDI48_REGMAP_IRQ(0), IDI48_REGMAP_IRQ(1), IDI48_REGMAP_IRQ(2), /* 0-2 */
+ IDI48_REGMAP_IRQ(3), IDI48_REGMAP_IRQ(4), IDI48_REGMAP_IRQ(5), /* 3-5 */
+ IDI48_REGMAP_IRQ(6), IDI48_REGMAP_IRQ(7), IDI48_REGMAP_IRQ(8), /* 6-8 */
+ IDI48_REGMAP_IRQ(9), IDI48_REGMAP_IRQ(10), IDI48_REGMAP_IRQ(11), /* 9-11 */
+ IDI48_REGMAP_IRQ(12), IDI48_REGMAP_IRQ(13), IDI48_REGMAP_IRQ(14), /* 12-14 */
+ IDI48_REGMAP_IRQ(15), IDI48_REGMAP_IRQ(16), IDI48_REGMAP_IRQ(17), /* 15-17 */
+ IDI48_REGMAP_IRQ(18), IDI48_REGMAP_IRQ(19), IDI48_REGMAP_IRQ(20), /* 18-20 */
+ IDI48_REGMAP_IRQ(21), IDI48_REGMAP_IRQ(22), IDI48_REGMAP_IRQ(23), /* 21-23 */
+ IDI48_REGMAP_IRQ(24), IDI48_REGMAP_IRQ(25), IDI48_REGMAP_IRQ(26), /* 24-26 */
+ IDI48_REGMAP_IRQ(27), IDI48_REGMAP_IRQ(28), IDI48_REGMAP_IRQ(29), /* 27-29 */
+ IDI48_REGMAP_IRQ(30), IDI48_REGMAP_IRQ(31), IDI48_REGMAP_IRQ(32), /* 30-32 */
+ IDI48_REGMAP_IRQ(33), IDI48_REGMAP_IRQ(34), IDI48_REGMAP_IRQ(35), /* 33-35 */
+ IDI48_REGMAP_IRQ(36), IDI48_REGMAP_IRQ(37), IDI48_REGMAP_IRQ(38), /* 36-38 */
+ IDI48_REGMAP_IRQ(39), IDI48_REGMAP_IRQ(40), IDI48_REGMAP_IRQ(41), /* 39-41 */
+ IDI48_REGMAP_IRQ(42), IDI48_REGMAP_IRQ(43), IDI48_REGMAP_IRQ(44), /* 42-44 */
+ IDI48_REGMAP_IRQ(45), IDI48_REGMAP_IRQ(46), IDI48_REGMAP_IRQ(47), /* 45-47 */
+};
-#define IDI48_NGPIO 48
static const char *idi48_names[IDI48_NGPIO] = {
"Bit 0 A", "Bit 1 A", "Bit 2 A", "Bit 3 A", "Bit 4 A", "Bit 5 A",
"Bit 6 A", "Bit 7 A", "Bit 8 A", "Bit 9 A", "Bit 10 A", "Bit 11 A",
@@ -228,75 +122,58 @@ static const char *idi48_names[IDI48_NGPIO] = {
"Bit 18 B", "Bit 19 B", "Bit 20 B", "Bit 21 B", "Bit 22 B", "Bit 23 B"
};
-static int idi_48_irq_init_hw(struct gpio_chip *gc)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(gc);
-
- /* Disable IRQ by default */
- iowrite8(0, &idi48gpio->reg->irq);
- ioread8(&idi48gpio->reg->irq);
-
- return 0;
-}
-
static int idi_48_probe(struct device *dev, unsigned int id)
{
- struct idi_48_gpio *idi48gpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
+ struct gpio_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
+ struct regmap_irq_chip *chip;
+ struct regmap_irq_chip_data *chip_data;
int err;
- idi48gpio = devm_kzalloc(dev, sizeof(*idi48gpio), GFP_KERNEL);
- if (!idi48gpio)
- return -ENOMEM;
-
if (!devm_request_region(dev, base[id], IDI_48_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
base[id], base[id] + IDI_48_EXTENT);
return -EBUSY;
}
- idi48gpio->reg = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
- if (!idi48gpio->reg)
+ regs = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
+ if (!regs)
return -ENOMEM;
- idi48gpio->chip.label = name;
- idi48gpio->chip.parent = dev;
- idi48gpio->chip.owner = THIS_MODULE;
- idi48gpio->chip.base = -1;
- idi48gpio->chip.ngpio = IDI48_NGPIO;
- idi48gpio->chip.names = idi48_names;
- idi48gpio->chip.get_direction = idi_48_gpio_get_direction;
- idi48gpio->chip.direction_input = idi_48_gpio_direction_input;
- idi48gpio->chip.get = idi_48_gpio_get;
- idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
-
- girq = &idi48gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = idi_48_irq_init_hw;
-
- spin_lock_init(&idi48gpio->lock);
+ map = devm_regmap_init_mmio(dev, regs, &idi48_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "Unable to initialize register map\n");
- err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
-
- err = devm_request_irq(dev, irq[id], idi_48_irq_handler, IRQF_SHARED,
- name, idi48gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- return err;
- }
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
- return 0;
+ chip->name = name;
+ chip->status_base = IDI48_IRQ_STATUS;
+ chip->unmask_base = IDI48_IRQ_ENABLE;
+ chip->clear_on_unmask = true;
+ chip->num_regs = 1;
+ chip->irqs = idi48_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(idi48_regmap_irqs);
+
+ err = devm_regmap_add_irq_chip(dev, map, irq[id], IRQF_SHARED, 0, chip,
+ &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ config.parent = dev;
+ config.regmap = map;
+ config.ngpio = IDI48_NGPIO;
+ config.names = idi48_names;
+ config.reg_dat_base = GPIO_REGMAP_ADDR(0x0);
+ config.ngpio_per_reg = 8;
+ config.reg_mask_xlate = idi_48_reg_mask_xlate;
+ config.irq_domain = regmap_irq_get_domain(chip_data);
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &config));
}
static struct isa_driver idi_48_driver = {
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index a6439e3daff0..9b01c391efce 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -307,6 +307,7 @@ static void adnp_irq_mask(struct irq_data *d)
unsigned int pos = d->hwirq & 7;
adnp->irq_enable[reg] &= ~BIT(pos);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void adnp_irq_unmask(struct irq_data *d)
@@ -316,6 +317,7 @@ static void adnp_irq_unmask(struct irq_data *d)
unsigned int reg = d->hwirq >> adnp->reg_shift;
unsigned int pos = d->hwirq & 7;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
adnp->irq_enable[reg] |= BIT(pos);
}
@@ -372,13 +374,15 @@ static void adnp_irq_bus_unlock(struct irq_data *d)
mutex_unlock(&adnp->irq_lock);
}
-static struct irq_chip adnp_irq_chip = {
+static const struct irq_chip adnp_irq_chip = {
.name = "gpio-adnp",
.irq_mask = adnp_irq_mask,
.irq_unmask = adnp_irq_unmask,
.irq_set_type = adnp_irq_set_type,
.irq_bus_lock = adnp_irq_bus_lock,
.irq_bus_sync_unlock = adnp_irq_bus_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int adnp_irq_setup(struct adnp *adnp)
@@ -469,7 +473,8 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
return err;
girq = &chip->irq;
- girq->chip = &adnp_irq_chip;
+ gpio_irq_chip_set_chip(girq, &adnp_irq_chip);
+
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 6d17d262ad91..20a686f12df7 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -10,19 +10,20 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/machine.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+
#define AGGREGATOR_MAX_GPIOS 512
/*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index b59fae993626..54d7c450c596 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -7,7 +7,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
-#include <linux/of_gpio.h> /* For of_mm_gpio_chip */
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
@@ -24,14 +24,12 @@
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
* @mapped_irq : kernel mapped irq number.
-* @irq_chip : IRQ chip configuration
*/
struct altera_gpio_chip {
struct of_mm_gpio_chip mmchip;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
int mapped_irq;
- struct irq_chip irq_chip;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
@@ -43,6 +41,7 @@ static void altera_gpio_irq_unmask(struct irq_data *d)
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
mm_gc = &altera_gc->mmchip;
+ gpiochip_enable_irq(&mm_gc->gc, irqd_to_hwirq(d));
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
@@ -68,6 +67,7 @@ static void altera_gpio_irq_mask(struct irq_data *d)
intmask &= ~BIT(irqd_to_hwirq(d));
writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+ gpiochip_disable_irq(&mm_gc->gc, irqd_to_hwirq(d));
}
/*
@@ -233,6 +233,17 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static const struct irq_chip altera_gpio_irq_chip = {
+ .name = "altera-gpio",
+ .irq_mask = altera_gpio_irq_mask,
+ .irq_unmask = altera_gpio_irq_unmask,
+ .irq_set_type = altera_gpio_irq_set_type,
+ .irq_startup = altera_gpio_irq_startup,
+ .irq_shutdown = altera_gpio_irq_mask,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int altera_gpio_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -278,15 +289,9 @@ static int altera_gpio_probe(struct platform_device *pdev)
}
altera_gc->interrupt_trigger = reg;
- altera_gc->irq_chip.name = "altera-gpio";
- altera_gc->irq_chip.irq_mask = altera_gpio_irq_mask;
- altera_gc->irq_chip.irq_unmask = altera_gpio_irq_unmask;
- altera_gc->irq_chip.irq_set_type = altera_gpio_irq_set_type;
- altera_gc->irq_chip.irq_startup = altera_gpio_irq_startup;
- altera_gc->irq_chip.irq_shutdown = altera_gpio_irq_mask;
-
girq = &altera_gc->mmchip.gc.irq;
- girq->chip = &altera_gc->irq_chip;
+ gpio_irq_chip_set_chip(girq, &altera_gpio_irq_chip);
+
if (altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
girq->parent_handler = altera_gpio_irq_leveL_high_handler;
else
@@ -330,7 +335,7 @@ MODULE_DEVICE_TABLE(of, altera_gpio_of_match);
static struct platform_driver altera_gpio_driver = {
.driver = {
.name = "altera_gpio",
- .of_match_table = of_match_ptr(altera_gpio_of_match),
+ .of_match_table = altera_gpio_of_match,
},
.probe = altera_gpio_probe,
.remove = altera_gpio_remove,
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 454cefbeecf0..72755fee6478 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -29,7 +30,7 @@ struct aspeed_sgpio_pdata {
struct aspeed_sgpio {
struct gpio_chip chip;
- struct irq_chip intc;
+ struct device *dev;
struct clk *pclk;
raw_spinlock_t lock;
void __iomem *base;
@@ -296,6 +297,10 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
addr = bank_reg(gpio, bank, reg_irq_enable);
+ /* Unmasking the IRQ */
+ if (set)
+ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
+
raw_spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(addr);
@@ -307,6 +312,12 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
iowrite32(reg, addr);
raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ /* Masking the IRQ */
+ if (!set)
+ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d));
+
+
}
static void aspeed_sgpio_irq_mask(struct irq_data *d)
@@ -401,6 +412,27 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
+static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ const struct aspeed_sgpio_bank *bank;
+ struct aspeed_sgpio *gpio;
+ u32 bit;
+ int offset;
+
+ irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+ seq_printf(p, dev_name(gpio->dev));
+}
+
+static const struct irq_chip aspeed_sgpio_irq_chip = {
+ .irq_ack = aspeed_sgpio_irq_ack,
+ .irq_mask = aspeed_sgpio_irq_mask,
+ .irq_unmask = aspeed_sgpio_irq_unmask,
+ .irq_set_type = aspeed_sgpio_set_type,
+ .irq_print_chip = aspeed_sgpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
struct platform_device *pdev)
{
@@ -423,14 +455,8 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
}
- gpio->intc.name = dev_name(&pdev->dev);
- gpio->intc.irq_ack = aspeed_sgpio_irq_ack;
- gpio->intc.irq_mask = aspeed_sgpio_irq_mask;
- gpio->intc.irq_unmask = aspeed_sgpio_irq_unmask;
- gpio->intc.irq_set_type = aspeed_sgpio_set_type;
-
irq = &gpio->chip.irq;
- irq->chip = &gpio->intc;
+ gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip);
irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
@@ -524,6 +550,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
+ gpio->dev = &pdev->dev;
+
pdata = device_get_match_data(&pdev->dev);
if (!pdata)
return -EINVAL;
@@ -609,4 +637,3 @@ static struct platform_driver aspeed_sgpio_driver = {
module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe);
MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index a94da80d3a95..da33bbbdacb9 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -53,7 +54,7 @@ struct aspeed_gpio_config {
*/
struct aspeed_gpio {
struct gpio_chip chip;
- struct irq_chip irqc;
+ struct device *dev;
raw_spinlock_t lock;
void __iomem *base;
int irq;
@@ -566,6 +567,10 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
addr = bank_reg(gpio, bank, reg_irq_enable);
+ /* Unmasking the IRQ */
+ if (set)
+ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
+
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
@@ -579,6 +584,10 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (copro)
aspeed_gpio_copro_release(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ /* Masking the IRQ */
+ if (!set)
+ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d));
}
static void aspeed_gpio_irq_mask(struct irq_data *d)
@@ -1080,6 +1089,30 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
+static void aspeed_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ const struct aspeed_gpio_bank *bank;
+ struct aspeed_gpio *gpio;
+ u32 bit;
+ int rc, offset;
+
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ if (rc)
+ return;
+
+ seq_printf(p, dev_name(gpio->dev));
+}
+
+static const struct irq_chip aspeed_gpio_irq_chip = {
+ .irq_ack = aspeed_gpio_irq_ack,
+ .irq_mask = aspeed_gpio_irq_mask,
+ .irq_unmask = aspeed_gpio_irq_unmask,
+ .irq_set_type = aspeed_gpio_set_type,
+ .irq_print_chip = aspeed_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -1137,8 +1170,9 @@ MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
+ struct gpio_irq_chip *girq;
struct aspeed_gpio *gpio;
- int rc, i, banks, err;
+ int rc, irq, i, banks, err;
u32 ngpio;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -1149,6 +1183,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
+ gpio->dev = &pdev->dev;
+
raw_spin_lock_init(&gpio->lock);
gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node);
@@ -1201,31 +1237,23 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
}
- /* Optionally set up an irqchip if there is an IRQ */
- rc = platform_get_irq(pdev, 0);
- if (rc > 0) {
- struct gpio_irq_chip *girq;
-
- gpio->irq = rc;
- girq = &gpio->chip.irq;
- girq->chip = &gpio->irqc;
- girq->chip->name = dev_name(&pdev->dev);
- girq->chip->irq_ack = aspeed_gpio_irq_ack;
- girq->chip->irq_mask = aspeed_gpio_irq_mask;
- girq->chip->irq_unmask = aspeed_gpio_irq_unmask;
- girq->chip->irq_set_type = aspeed_gpio_set_type;
- girq->parent_handler = aspeed_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = gpio->irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- girq->init_valid_mask = aspeed_init_irq_valid_mask;
- }
+ /* Set up an irqchip */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ gpio->irq = irq;
+ girq = &gpio->chip.irq;
+ gpio_irq_chip_set_chip(girq, &aspeed_gpio_irq_chip);
+
+ girq->parent_handler = aspeed_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->init_valid_mask = aspeed_init_irq_valid_mask;
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 3958c6d97639..aa0a954b8392 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -71,6 +71,7 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
+ gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -85,6 +86,7 @@ static void ath79_gpio_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+ gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data));
}
static void ath79_gpio_irq_enable(struct irq_data *data)
@@ -169,13 +171,15 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return 0;
}
-static struct irq_chip ath79_gpio_irqchip = {
+static const struct irq_chip ath79_gpio_irqchip = {
.name = "gpio-ath79",
.irq_enable = ath79_gpio_irq_enable,
.irq_disable = ath79_gpio_irq_disable,
.irq_mask = ath79_gpio_irq_mask,
.irq_unmask = ath79_gpio_irq_unmask,
.irq_set_type = ath79_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void ath79_gpio_irq_handler(struct irq_desc *desc)
@@ -274,7 +278,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
/* Optional interrupt setup */
if (!np || of_property_read_bool(np, "interrupt-controller")) {
girq = &ctrl->gc.irq;
- girq->chip = &ath79_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index 137aea49ba02..3720b90cad10 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -70,6 +70,7 @@ static void cdns_gpio_irq_mask(struct irq_data *d)
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
iowrite32(BIT(d->hwirq), cgpio->regs + CDNS_GPIO_IRQ_DIS);
+ gpiochip_disable_irq(chip, irqd_to_hwirq(d));
}
static void cdns_gpio_irq_unmask(struct irq_data *d)
@@ -77,6 +78,7 @@ static void cdns_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
+ gpiochip_enable_irq(chip, irqd_to_hwirq(d));
iowrite32(BIT(d->hwirq), cgpio->regs + CDNS_GPIO_IRQ_EN);
}
@@ -138,11 +140,13 @@ static void cdns_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static struct irq_chip cdns_gpio_irqchip = {
+static const struct irq_chip cdns_gpio_irqchip = {
.name = "cdns-gpio",
.irq_mask = cdns_gpio_irq_mask,
.irq_unmask = cdns_gpio_irq_unmask,
- .irq_set_type = cdns_gpio_irq_set_type
+ .irq_set_type = cdns_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int cdns_gpio_probe(struct platform_device *pdev)
@@ -222,7 +226,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
girq = &cgpio->gc.irq;
- girq->chip = &cdns_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip);
girq->parent_handler = cdns_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index fa51a91afa54..aaaf61dc2632 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -24,8 +24,6 @@
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
-#include <asm-generic/gpio.h>
-
#define MAX_REGS_BANKS 5
#define MAX_INT_PER_BANK 32
@@ -252,7 +250,6 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
#ifdef CONFIG_OF_GPIO
- chips->chip.of_gpio_n_cells = 2;
chips->chip.parent = dev;
chips->chip.request = gpiochip_generic_request;
chips->chip.free = gpiochip_generic_free;
@@ -325,7 +322,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -534,7 +531,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
}
/*
- * Arrange gpio_to_irq() support, handling either direct IRQs or
+ * Arrange gpiod_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
* IRQs, while the others use banked IRQs, would need some setup
* tweaks to recognize hardware which can do that.
@@ -642,9 +639,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
context->set_falling = readl_relaxed(&g->set_falling);
}
- /* Clear Bank interrupt enable bit */
- writel_relaxed(0, base + BINTEN);
-
/* Clear all interrupt status registers */
writel_relaxed(GENMASK(31, 0), &g->intstat);
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 8d722e026e9c..84352a6f4973 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -91,7 +91,6 @@ enum sprd_eic_type {
struct sprd_eic {
struct gpio_chip chip;
- struct irq_chip intc;
void __iomem *base[SPRD_EIC_MAX_BANK];
enum sprd_eic_type type;
spinlock_t lock;
@@ -255,6 +254,8 @@ static void sprd_eic_irq_mask(struct irq_data *data)
default:
dev_err(chip->parent, "Unsupported EIC type.\n");
}
+
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_eic_irq_unmask(struct irq_data *data)
@@ -263,6 +264,8 @@ static void sprd_eic_irq_unmask(struct irq_data *data)
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
u32 offset = irqd_to_hwirq(data);
+ gpiochip_enable_irq(chip, offset);
+
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IE, 1);
@@ -564,6 +567,15 @@ static void sprd_eic_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
+static const struct irq_chip sprd_eic_irq = {
+ .name = "sprd-eic",
+ .irq_ack = sprd_eic_irq_ack,
+ .irq_mask = sprd_eic_irq_mask,
+ .irq_unmask = sprd_eic_irq_unmask,
+ .irq_set_type = sprd_eic_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
static int sprd_eic_probe(struct platform_device *pdev)
{
const struct sprd_eic_variant_data *pdata;
@@ -626,15 +638,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
break;
}
- sprd_eic->intc.name = dev_name(&pdev->dev);
- sprd_eic->intc.irq_ack = sprd_eic_irq_ack;
- sprd_eic->intc.irq_mask = sprd_eic_irq_mask;
- sprd_eic->intc.irq_unmask = sprd_eic_irq_unmask;
- sprd_eic->intc.irq_set_type = sprd_eic_irq_set_type;
- sprd_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
irq = &sprd_eic->chip.irq;
- irq->chip = &sprd_eic->intc;
+ gpio_irq_chip_set_chip(irq, &sprd_eic_irq);
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = sprd_eic_irq_handler;
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
new file mode 100644
index 000000000000..a9c8b16215be
--- /dev/null
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Elkhart Lake PSE GPIO driver
+ *
+ * Copyright (c) 2023 Intel Corporation.
+ *
+ * Authors: Pandith N <pandith.n@intel.com>
+ * Raag Jadav <raag.jadav@intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include "gpio-tangier.h"
+
+/* Each Intel EHL PSE GPIO Controller has 30 GPIO pins */
+#define EHL_PSE_NGPIO 30
+
+static int ehl_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tng_gpio *priv;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ priv->dev = dev;
+ priv->irq = irq;
+
+ priv->info.base = -1;
+ priv->info.ngpio = EHL_PSE_NGPIO;
+
+ priv->wake_regs.gwmr = GWMR_EHL;
+ priv->wake_regs.gwsr = GWSR_EHL;
+ priv->wake_regs.gsir = GSIR_EHL;
+
+ ret = devm_tng_gpio_probe(dev, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "tng_gpio_probe error\n");
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+}
+
+static int ehl_gpio_suspend(struct device *dev)
+{
+ return tng_gpio_suspend(dev);
+}
+
+static int ehl_gpio_resume(struct device *dev)
+{
+ return tng_gpio_resume(dev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ehl_gpio_pm_ops, ehl_gpio_suspend, ehl_gpio_resume);
+
+static const struct platform_device_id ehl_gpio_ids[] = {
+ { "gpio-elkhartlake" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ehl_gpio_ids);
+
+static struct platform_driver ehl_gpio_driver = {
+ .driver = {
+ .name = "gpio-elkhartlake",
+ .pm = pm_sleep_ptr(&ehl_gpio_pm_ops),
+ },
+ .probe = ehl_gpio_probe,
+ .id_table = ehl_gpio_ids,
+};
+module_platform_driver(ehl_gpio_driver);
+
+MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
+MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
+MODULE_DESCRIPTION("Intel Elkhart Lake PSE GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(GPIO_TANGIER);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 2e1779709113..6cedf46efec6 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
+#include <linux/seq_file.h>
#define EP93XX_GPIO_F_INT_STATUS 0x5c
#define EP93XX_GPIO_A_INT_STATUS 0xa0
@@ -40,7 +41,6 @@
#define EP93XX_GPIO_F_IRQ_BASE 80
struct ep93xx_gpio_irq_chip {
- struct irq_chip ic;
u8 irq_offset;
u8 int_unmasked;
u8 int_enabled;
@@ -148,7 +148,7 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
*/
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned int irq = irq_desc_get_irq(desc);
- int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
+ int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
chained_irq_enter(irqchip, desc);
@@ -185,6 +185,7 @@ static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
ep93xx_gpio_update_int_params(epg, eic);
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
@@ -195,6 +196,7 @@ static void ep93xx_gpio_irq_mask(struct irq_data *d)
eic->int_unmasked &= ~BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
@@ -203,6 +205,7 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
eic->int_unmasked |= BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
}
@@ -320,15 +323,25 @@ static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
return 0;
}
-static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
+static void ep93xx_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
- ic->irq_ack = ep93xx_gpio_irq_ack;
- ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
- ic->irq_mask = ep93xx_gpio_irq_mask;
- ic->irq_unmask = ep93xx_gpio_irq_unmask;
- ic->irq_set_type = ep93xx_gpio_irq_type;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, dev_name(gc->parent));
}
+static const struct irq_chip gpio_eic_irq_chip = {
+ .name = "ep93xx-gpio-eic",
+ .irq_ack = ep93xx_gpio_irq_ack,
+ .irq_mask = ep93xx_gpio_irq_mask,
+ .irq_unmask = ep93xx_gpio_irq_unmask,
+ .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
+ .irq_set_type = ep93xx_gpio_irq_type,
+ .irq_print_chip = ep93xx_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
@@ -350,8 +363,6 @@ static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
- struct irq_chip *ic;
-
gc->set_config = ep93xx_gpio_set_config;
egc->eic = devm_kcalloc(dev, 1,
sizeof(*egc->eic),
@@ -359,12 +370,7 @@ static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
if (!egc->eic)
return -ENOMEM;
egc->eic->irq_offset = bank->irq;
- ic = &egc->eic->ic;
- ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
- if (!ic->name)
- return -ENOMEM;
- ep93xx_init_irq_chip(dev, ic);
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
}
if (bank->has_irq) {
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 2728672ef9f8..31e26072f6ae 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -349,7 +349,7 @@ static const struct of_device_id ftgpio_gpio_of_match[] = {
static struct platform_driver ftgpio_gpio_driver = {
.driver = {
.name = "ftgpio010-gpio",
- .of_match_table = of_match_ptr(ftgpio_gpio_of_match),
+ .of_match_table = ftgpio_gpio_of_match,
},
.probe = ftgpio_gpio_probe,
.remove = ftgpio_gpio_remove,
diff --git a/drivers/gpio/gpio-fxl6408.c b/drivers/gpio/gpio-fxl6408.c
new file mode 100644
index 000000000000..208fa851e82a
--- /dev/null
+++ b/drivers/gpio/gpio-fxl6408.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FXL6408 GPIO driver
+ *
+ * Copyright 2023 Toradex
+ *
+ * Author: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/regmap.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define FXL6408_REG_DEVICE_ID 0x01
+#define FXL6408_MF_FAIRCHILD 0b101
+#define FXL6408_MF_SHIFT 5
+
+/* Bits set here indicate that the GPIO is an output. */
+#define FXL6408_REG_IO_DIR 0x03
+
+/*
+ * Bits set here, when the corresponding bit of IO_DIR is set, drive
+ * the output high instead of low.
+ */
+#define FXL6408_REG_OUTPUT 0x05
+
+/* Bits here make the output High-Z, instead of the OUTPUT value. */
+#define FXL6408_REG_OUTPUT_HIGH_Z 0x07
+
+/* Returns the current status (1 = HIGH) of the input pins. */
+#define FXL6408_REG_INPUT_STATUS 0x0f
+
+/*
+ * Return the current interrupt status
+ * This bit is HIGH if input GPIO != default state (register 09h).
+ * The flag is cleared after being read (bit returns to 0).
+ * The input must go back to default state and change again before this flag is raised again.
+ */
+#define FXL6408_REG_INT_STS 0x13
+
+#define FXL6408_NGPIO 8
+
+static const struct regmap_range rd_range[] = {
+ { FXL6408_REG_DEVICE_ID, FXL6408_REG_DEVICE_ID },
+ { FXL6408_REG_IO_DIR, FXL6408_REG_OUTPUT },
+ { FXL6408_REG_INPUT_STATUS, FXL6408_REG_INPUT_STATUS },
+};
+
+static const struct regmap_range wr_range[] = {
+ { FXL6408_REG_DEVICE_ID, FXL6408_REG_DEVICE_ID },
+ { FXL6408_REG_IO_DIR, FXL6408_REG_OUTPUT },
+ { FXL6408_REG_OUTPUT_HIGH_Z, FXL6408_REG_OUTPUT_HIGH_Z },
+};
+
+static const struct regmap_range volatile_range[] = {
+ { FXL6408_REG_DEVICE_ID, FXL6408_REG_DEVICE_ID },
+ { FXL6408_REG_INPUT_STATUS, FXL6408_REG_INPUT_STATUS },
+};
+
+static const struct regmap_access_table rd_table = {
+ .yes_ranges = rd_range,
+ .n_yes_ranges = ARRAY_SIZE(rd_range),
+};
+
+static const struct regmap_access_table wr_table = {
+ .yes_ranges = wr_range,
+ .n_yes_ranges = ARRAY_SIZE(wr_range),
+};
+
+static const struct regmap_access_table volatile_table = {
+ .yes_ranges = volatile_range,
+ .n_yes_ranges = ARRAY_SIZE(volatile_range),
+};
+
+static const struct regmap_config regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = FXL6408_REG_INT_STS,
+ .wr_table = &wr_table,
+ .rd_table = &rd_table,
+ .volatile_table = &volatile_table,
+
+ .cache_type = REGCACHE_RBTREE,
+ .num_reg_defaults_raw = FXL6408_REG_INT_STS + 1,
+};
+
+static int fxl6408_identify(struct device *dev, struct regmap *regmap)
+{
+ int val, ret;
+
+ ret = regmap_read(regmap, FXL6408_REG_DEVICE_ID, &val);
+ if (ret)
+ return dev_err_probe(dev, ret, "error reading DEVICE_ID\n");
+ if (val >> FXL6408_MF_SHIFT != FXL6408_MF_FAIRCHILD)
+ return dev_err_probe(dev, -ENODEV, "invalid device id 0x%02x\n", val);
+
+ return 0;
+}
+
+static int fxl6408_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int ret;
+ struct gpio_regmap_config gpio_config = {
+ .parent = dev,
+ .ngpio = FXL6408_NGPIO,
+ .reg_dat_base = GPIO_REGMAP_ADDR(FXL6408_REG_INPUT_STATUS),
+ .reg_set_base = GPIO_REGMAP_ADDR(FXL6408_REG_OUTPUT),
+ .reg_dir_out_base = GPIO_REGMAP_ADDR(FXL6408_REG_IO_DIR),
+ .ngpio_per_reg = FXL6408_NGPIO,
+ };
+
+ gpio_config.regmap = devm_regmap_init_i2c(client, &regmap);
+ if (IS_ERR(gpio_config.regmap))
+ return dev_err_probe(dev, PTR_ERR(gpio_config.regmap),
+ "failed to allocate register map\n");
+
+ ret = fxl6408_identify(dev, gpio_config.regmap);
+ if (ret)
+ return ret;
+
+ /* Disable High-Z of outputs, so that our OUTPUT updates actually take effect. */
+ ret = regmap_write(gpio_config.regmap, FXL6408_REG_OUTPUT_HIGH_Z, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to write 'output high Z' register\n");
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+}
+
+static const __maybe_unused struct of_device_id fxl6408_dt_ids[] = {
+ { .compatible = "fcs,fxl6408" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxl6408_dt_ids);
+
+static const struct i2c_device_id fxl6408_id[] = {
+ { "fxl6408", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fxl6408_id);
+
+static struct i2c_driver fxl6408_driver = {
+ .driver = {
+ .name = "fxl6408",
+ .of_match_table = fxl6408_dt_ids,
+ },
+ .probe_new = fxl6408_probe,
+ .id_table = fxl6408_id,
+};
+module_i2c_driver(fxl6408_driver);
+
+MODULE_AUTHOR("Emanuele Ghidoli <emanuele.ghidoli@toradex.com>");
+MODULE_DESCRIPTION("FXL6408 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index f6a3de99f7db..7bd4c2a4cc11 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -81,7 +81,6 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
gc->base = -1;
gc->ngpio = (u16)(uintptr_t)of_device_get_match_data(&pdev->dev);
- gc->of_gpio_n_cells = 2;
/* This function adds a memory mapped GPIO chip */
ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 2689671b6b01..43d823a56e59 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -8,13 +8,13 @@
*/
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
#include "gpio-i8255.h"
@@ -30,83 +30,22 @@ MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
#define GPIOMM_NUM_PPI 2
-/**
- * struct gpiomm_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @ppi_state: Programmable Peripheral Interface group states
- * @ppi: Programmable Peripheral Interface groups
- */
-struct gpiomm_gpio {
- struct gpio_chip chip;
- struct i8255_state ppi_state[GPIOMM_NUM_PPI];
- struct i8255 __iomem *ppi;
+static const struct regmap_range gpiomm_volatile_ranges[] = {
+ i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4),
+};
+static const struct regmap_access_table gpiomm_volatile_table = {
+ .yes_ranges = gpiomm_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(gpiomm_volatile_ranges),
+};
+static const struct regmap_config gpiomm_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0x7,
+ .volatile_table = &gpiomm_volatile_table,
+ .cache_type = REGCACHE_FLAT,
};
-
-static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- if (i8255_get_direction(gpiommgpio->ppi_state, offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_direction_input(gpiommgpio->ppi, gpiommgpio->ppi_state, offset);
-
- return 0;
-}
-
-static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_direction_output(gpiommgpio->ppi, gpiommgpio->ppi_state, offset,
- value);
-
- return 0;
-}
-
-static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- return i8255_get(gpiommgpio->ppi, offset);
-}
-
-static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_get_multiple(gpiommgpio->ppi, mask, bits, chip->ngpio);
-
- return 0;
-}
-
-static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_set(gpiommgpio->ppi, gpiommgpio->ppi_state, offset, value);
-}
-
-static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_set_multiple(gpiommgpio->ppi, gpiommgpio->ppi_state, mask, bits,
- chip->ngpio);
-}
#define GPIOMM_NGPIO 48
static const char *gpiomm_names[GPIOMM_NGPIO] = {
@@ -120,30 +59,11 @@ static const char *gpiomm_names[GPIOMM_NGPIO] = {
"Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7",
};
-static void gpiomm_init_dio(struct i8255 __iomem *const ppi,
- struct i8255_state *const ppi_state)
-{
- const unsigned long ngpio = 24;
- const unsigned long mask = GENMASK(ngpio - 1, 0);
- const unsigned long bits = 0;
- unsigned long i;
-
- /* Initialize all GPIO to output 0 */
- for (i = 0; i < GPIOMM_NUM_PPI; i++) {
- i8255_mode0_output(&ppi[i]);
- i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
- }
-}
-
static int gpiomm_probe(struct device *dev, unsigned int id)
{
- struct gpiomm_gpio *gpiommgpio;
const char *const name = dev_name(dev);
- int err;
-
- gpiommgpio = devm_kzalloc(dev, sizeof(*gpiommgpio), GFP_KERNEL);
- if (!gpiommgpio)
- return -ENOMEM;
+ struct i8255_regmap_config config = {};
+ void __iomem *regs;
if (!devm_request_region(dev, base[id], GPIOMM_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -151,34 +71,20 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- gpiommgpio->ppi = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
- if (!gpiommgpio->ppi)
+ regs = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
+ if (!regs)
return -ENOMEM;
- gpiommgpio->chip.label = name;
- gpiommgpio->chip.parent = dev;
- gpiommgpio->chip.owner = THIS_MODULE;
- gpiommgpio->chip.base = -1;
- gpiommgpio->chip.ngpio = GPIOMM_NGPIO;
- gpiommgpio->chip.names = gpiomm_names;
- gpiommgpio->chip.get_direction = gpiomm_gpio_get_direction;
- gpiommgpio->chip.direction_input = gpiomm_gpio_direction_input;
- gpiommgpio->chip.direction_output = gpiomm_gpio_direction_output;
- gpiommgpio->chip.get = gpiomm_gpio_get;
- gpiommgpio->chip.get_multiple = gpiomm_gpio_get_multiple;
- gpiommgpio->chip.set = gpiomm_gpio_set;
- gpiommgpio->chip.set_multiple = gpiomm_gpio_set_multiple;
-
- i8255_state_init(gpiommgpio->ppi_state, GPIOMM_NUM_PPI);
- gpiomm_init_dio(gpiommgpio->ppi, gpiommgpio->ppi_state);
-
- err = devm_gpiochip_add_data(dev, &gpiommgpio->chip, gpiommgpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
+ config.map = devm_regmap_init_mmio(dev, regs, &gpiomm_regmap_config);
+ if (IS_ERR(config.map))
+ return dev_err_probe(dev, PTR_ERR(config.map),
+ "Unable to initialize register map\n");
+
+ config.parent = dev;
+ config.num_ppi = GPIOMM_NUM_PPI;
+ config.names = gpiomm_names;
- return 0;
+ return devm_i8255_regmap_register(dev, &config);
}
static struct isa_driver gpiomm_driver = {
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
index 55bd69043bf4..29a03de37fd8 100644
--- a/drivers/gpio/gpio-hisi.c
+++ b/drivers/gpio/gpio-hisi.c
@@ -37,7 +37,6 @@ struct hisi_gpio {
struct device *dev;
void __iomem *reg_base;
unsigned int line_num;
- struct irq_chip irq_chip;
int irq;
};
@@ -100,12 +99,14 @@ static void hisi_gpio_irq_set_mask(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_SET_WX, BIT(irqd_to_hwirq(d)));
+ gpiochip_disable_irq(chip, irqd_to_hwirq(d));
}
static void hisi_gpio_irq_clr_mask(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ gpiochip_enable_irq(chip, irqd_to_hwirq(d));
hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_CLR_WX, BIT(irqd_to_hwirq(d)));
}
@@ -191,20 +192,24 @@ static void hisi_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(irq_c, desc);
}
+static const struct irq_chip hisi_gpio_irq_chip = {
+ .name = "HISI-GPIO",
+ .irq_ack = hisi_gpio_set_ack,
+ .irq_mask = hisi_gpio_irq_set_mask,
+ .irq_unmask = hisi_gpio_irq_clr_mask,
+ .irq_set_type = hisi_gpio_irq_set_type,
+ .irq_enable = hisi_gpio_irq_enable,
+ .irq_disable = hisi_gpio_irq_disable,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
{
struct gpio_chip *chip = &hisi_gpio->chip;
struct gpio_irq_chip *girq_chip = &chip->irq;
- /* Set hooks for irq_chip */
- hisi_gpio->irq_chip.irq_ack = hisi_gpio_set_ack;
- hisi_gpio->irq_chip.irq_mask = hisi_gpio_irq_set_mask;
- hisi_gpio->irq_chip.irq_unmask = hisi_gpio_irq_clr_mask;
- hisi_gpio->irq_chip.irq_set_type = hisi_gpio_irq_set_type;
- hisi_gpio->irq_chip.irq_enable = hisi_gpio_irq_enable;
- hisi_gpio->irq_chip.irq_disable = hisi_gpio_irq_disable;
-
- girq_chip->chip = &hisi_gpio->irq_chip;
+ gpio_irq_chip_set_chip(girq_chip, &hisi_gpio_irq_chip);
girq_chip->default_type = IRQ_TYPE_NONE;
girq_chip->num_parents = 1;
girq_chip->parents = &hisi_gpio->irq;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 4e13e937f832..c208ac1c54a6 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
/*
@@ -48,7 +49,7 @@
struct hlwd_gpio {
struct gpio_chip gpioc;
- struct irq_chip irqc;
+ struct device *dev;
void __iomem *regs;
int irq;
u32 edge_emulation;
@@ -123,6 +124,7 @@ static void hlwd_gpio_irq_mask(struct irq_data *data)
mask &= ~BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
+ gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
@@ -132,6 +134,7 @@ static void hlwd_gpio_irq_unmask(struct irq_data *data)
unsigned long flags;
u32 mask;
+ gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
@@ -202,6 +205,24 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
+static void hlwd_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct hlwd_gpio *hlwd =
+ gpiochip_get_data(irq_data_get_irq_chip_data(data));
+
+ seq_printf(p, dev_name(hlwd->dev));
+}
+
+static const struct irq_chip hlwd_gpio_irq_chip = {
+ .irq_mask = hlwd_gpio_irq_mask,
+ .irq_unmask = hlwd_gpio_irq_unmask,
+ .irq_enable = hlwd_gpio_irq_enable,
+ .irq_set_type = hlwd_gpio_irq_set_type,
+ .irq_print_chip = hlwd_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int hlwd_gpio_probe(struct platform_device *pdev)
{
struct hlwd_gpio *hlwd;
@@ -216,6 +237,8 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
if (IS_ERR(hlwd->regs))
return PTR_ERR(hlwd->regs);
+ hlwd->dev = &pdev->dev;
+
/*
* Claim all GPIOs using the OWNER register. This will not work on
* systems where the AHBPROT memory firewall hasn't been configured to
@@ -259,14 +282,8 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
return hlwd->irq;
}
- hlwd->irqc.name = dev_name(&pdev->dev);
- hlwd->irqc.irq_mask = hlwd_gpio_irq_mask;
- hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask;
- hlwd->irqc.irq_enable = hlwd_gpio_irq_enable;
- hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type;
-
girq = &hlwd->gpioc.irq;
- girq->chip = &hlwd->irqc;
+ gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
index 9b97db418df1..64ab80fc4a1e 100644
--- a/drivers/gpio/gpio-i8255.c
+++ b/drivers/gpio/gpio-i8255.c
@@ -3,48 +3,43 @@
* Intel 8255 Programmable Peripheral Interface
* Copyright (C) 2022 William Breathitt Gray
*/
-#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/io.h>
+#include <linux/gpio/regmap.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
+#include <linux/regmap.h>
#include "gpio-i8255.h"
+#define I8255_NGPIO 24
+#define I8255_NGPIO_PER_REG 8
#define I8255_CONTROL_PORTC_LOWER_DIRECTION BIT(0)
#define I8255_CONTROL_PORTB_DIRECTION BIT(1)
#define I8255_CONTROL_PORTC_UPPER_DIRECTION BIT(3)
#define I8255_CONTROL_PORTA_DIRECTION BIT(4)
#define I8255_CONTROL_MODE_SET BIT(7)
-#define I8255_PORTA 0
-#define I8255_PORTB 1
-#define I8255_PORTC 2
-
-static int i8255_get_port(struct i8255 __iomem *const ppi,
- const unsigned long io_port, const unsigned long mask)
-{
- const unsigned long bank = io_port / 3;
- const unsigned long ppi_port = io_port % 3;
-
- return ioread8(&ppi[bank].port[ppi_port]) & mask;
-}
-
-static u8 i8255_direction_mask(const unsigned long offset)
+#define I8255_PORTA 0x0
+#define I8255_PORTB 0x1
+#define I8255_PORTC 0x2
+#define I8255_CONTROL 0x3
+#define I8255_REG_DAT_BASE I8255_PORTA
+#define I8255_REG_DIR_IN_BASE I8255_CONTROL
+
+static int i8255_direction_mask(const unsigned int offset)
{
- const unsigned long port_offset = offset % 8;
- const unsigned long io_port = offset / 8;
- const unsigned long ppi_port = io_port % 3;
+ const unsigned int stride = offset / I8255_NGPIO_PER_REG;
+ const unsigned int line = offset % I8255_NGPIO_PER_REG;
- switch (ppi_port) {
+ switch (stride) {
case I8255_PORTA:
return I8255_CONTROL_PORTA_DIRECTION;
case I8255_PORTB:
return I8255_CONTROL_PORTB_DIRECTION;
case I8255_PORTC:
/* Port C can be configured by nibble */
- if (port_offset >= 4)
+ if (line >= 4)
return I8255_CONTROL_PORTC_UPPER_DIRECTION;
return I8255_CONTROL_PORTC_LOWER_DIRECTION;
default:
@@ -53,234 +48,93 @@ static u8 i8255_direction_mask(const unsigned long offset)
}
}
-static void i8255_set_port(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long io_port,
- const unsigned long mask, const unsigned long bits)
+static int i8255_ppi_init(struct regmap *const map, const unsigned int base)
{
- const unsigned long bank = io_port / 3;
- const unsigned long ppi_port = io_port % 3;
- unsigned long flags;
- unsigned long out_state;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- out_state = ioread8(&ppi[bank].port[ppi_port]);
- out_state = (out_state & ~mask) | (bits & mask);
- iowrite8(out_state, &ppi[bank].port[ppi_port]);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
+ int err;
+
+ /* Configure all ports to MODE 0 output mode */
+ err = regmap_write(map, base + I8255_CONTROL, I8255_CONTROL_MODE_SET);
+ if (err)
+ return err;
+
+ /* Initialize all GPIO to output 0 */
+ err = regmap_write(map, base + I8255_PORTA, 0x00);
+ if (err)
+ return err;
+ err = regmap_write(map, base + I8255_PORTB, 0x00);
+ if (err)
+ return err;
+ return regmap_write(map, base + I8255_PORTC, 0x00);
}
-/**
- * i8255_direction_input - configure signal offset as input
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: signal offset to configure as input
- *
- * Configures a signal @offset as input for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks. The @state control_state
- * values are updated to reflect the new configuration.
- */
-void i8255_direction_input(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long offset)
+static int i8255_reg_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask)
{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
- unsigned long flags;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- state[bank].control_state |= I8255_CONTROL_MODE_SET;
- state[bank].control_state |= i8255_direction_mask(offset);
-
- iowrite8(state[bank].control_state, &ppi[bank].control);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_direction_input, I8255);
-
-/**
- * i8255_direction_output - configure signal offset as output
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: signal offset to configure as output
- * @value: signal value to output
- *
- * Configures a signal @offset as output for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks and sets the respective signal
- * output to the desired @value. The @state control_state values are updated to
- * reflect the new configuration.
- */
-void i8255_direction_output(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long offset,
- const unsigned long value)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
- unsigned long flags;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- state[bank].control_state |= I8255_CONTROL_MODE_SET;
- state[bank].control_state &= ~i8255_direction_mask(offset);
-
- iowrite8(state[bank].control_state, &ppi[bank].control);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
-
- i8255_set(ppi, state, offset, value);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_direction_output, I8255);
-
-/**
- * i8255_get - get signal value at signal offset
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @offset: offset of signal to get
- *
- * Returns the signal value (0=low, 1=high) for the signal at @offset for the
- * respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-int i8255_get(struct i8255 __iomem *const ppi, const unsigned long offset)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long offset_mask = BIT(offset % 8);
-
- return !!i8255_get_port(ppi, io_port, offset_mask);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_get, I8255);
-
-/**
- * i8255_get_direction - get the I/O direction for a signal offset
- * @state: devices states of the respective PPI banks
- * @offset: offset of signal to get direction
- *
- * Returns the signal direction (0=output, 1=input) for the signal at @offset.
- */
-int i8255_get_direction(const struct i8255_state *const state,
- const unsigned long offset)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
-
- return !!(state[bank].control_state & i8255_direction_mask(offset));
-}
-EXPORT_SYMBOL_NS_GPL(i8255_get_direction, I8255);
-
-/**
- * i8255_get_multiple - get multiple signal values at multiple signal offsets
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @mask: mask of signals to get
- * @bits: bitmap to store signal values
- * @ngpio: number of GPIO signals of the respective PPI banks
- *
- * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask
- * for the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_get_multiple(struct i8255 __iomem *const ppi,
- const unsigned long *const mask,
- unsigned long *const bits, const unsigned long ngpio)
-{
- unsigned long offset;
- unsigned long port_mask;
- unsigned long io_port;
- unsigned long port_state;
-
- bitmap_zero(bits, ngpio);
-
- for_each_set_clump8(offset, port_mask, mask, ngpio) {
- io_port = offset / 8;
- port_state = i8255_get_port(ppi, io_port, port_mask);
-
- bitmap_set_value8(bits, port_state, offset);
+ const unsigned int ppi = offset / I8255_NGPIO;
+ const unsigned int ppi_offset = offset % I8255_NGPIO;
+ const unsigned int stride = ppi_offset / I8255_NGPIO_PER_REG;
+ const unsigned int line = ppi_offset % I8255_NGPIO_PER_REG;
+
+ switch (base) {
+ case I8255_REG_DAT_BASE:
+ *reg = base + stride + ppi * 4;
+ *mask = BIT(line);
+ return 0;
+ case I8255_REG_DIR_IN_BASE:
+ *reg = base + ppi * 4;
+ *mask = i8255_direction_mask(ppi_offset);
+ return 0;
+ default:
+ /* Should never reach this path */
+ return -EINVAL;
}
}
-EXPORT_SYMBOL_NS_GPL(i8255_get_multiple, I8255);
/**
- * i8255_mode0_output - configure all PPI ports to MODE 0 output mode
- * @ppi: Intel 8255 Programmable Peripheral Interface bank
+ * devm_i8255_regmap_register - Register an i8255 GPIO controller
+ * @dev: device that is registering this i8255 GPIO device
+ * @config: configuration for i8255_regmap_config
*
- * Configures all Intel 8255 Programmable Peripheral Interface (@ppi) ports to
- * MODE 0 (Basic Input/Output) output mode.
+ * Registers an Intel 8255 Programmable Peripheral Interface GPIO controller.
+ * Returns 0 on success and negative error number on failure.
*/
-void i8255_mode0_output(struct i8255 __iomem *const ppi)
+int devm_i8255_regmap_register(struct device *const dev,
+ const struct i8255_regmap_config *const config)
{
- iowrite8(I8255_CONTROL_MODE_SET, &ppi->control);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_mode0_output, I8255);
+ struct gpio_regmap_config gpio_config = {0};
+ unsigned long i;
+ int err;
-/**
- * i8255_set - set signal value at signal offset
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: offset of signal to set
- * @value: value of signal to set
- *
- * Assigns output @value for the signal at @offset for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_set(struct i8255 __iomem *const ppi, struct i8255_state *const state,
- const unsigned long offset, const unsigned long value)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long port_offset = offset % 8;
- const unsigned long mask = BIT(port_offset);
- const unsigned long bits = value << port_offset;
+ if (!config->parent)
+ return -EINVAL;
- i8255_set_port(ppi, state, io_port, mask, bits);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_set, I8255);
+ if (!config->map)
+ return -EINVAL;
-/**
- * i8255_set_multiple - set signal values at multiple signal offsets
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @mask: mask of signals to set
- * @bits: bitmap of signal output values
- * @ngpio: number of GPIO signals of the respective PPI banks
- *
- * Assigns output values defined by @bits for the signals defined by @mask for
- * the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_set_multiple(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long *const mask,
- const unsigned long *const bits,
- const unsigned long ngpio)
-{
- unsigned long offset;
- unsigned long port_mask;
- unsigned long io_port;
- unsigned long value;
+ if (!config->num_ppi)
+ return -EINVAL;
- for_each_set_clump8(offset, port_mask, mask, ngpio) {
- io_port = offset / 8;
- value = bitmap_get_value8(bits, offset);
- i8255_set_port(ppi, state, io_port, port_mask, value);
+ for (i = 0; i < config->num_ppi; i++) {
+ err = i8255_ppi_init(config->map, i * 4);
+ if (err)
+ return err;
}
-}
-EXPORT_SYMBOL_NS_GPL(i8255_set_multiple, I8255);
-
-/**
- * i8255_state_init - initialize i8255_state structure
- * @state: devices states of the respective PPI banks
- * @nbanks: number of Intel 8255 Programmable Peripheral Interface banks
- *
- * Initializes the @state of each Intel 8255 Programmable Peripheral Interface
- * bank for use in i8255 library functions.
- */
-void i8255_state_init(struct i8255_state *const state,
- const unsigned long nbanks)
-{
- unsigned long bank;
- for (bank = 0; bank < nbanks; bank++)
- spin_lock_init(&state[bank].lock);
+ gpio_config.parent = config->parent;
+ gpio_config.regmap = config->map;
+ gpio_config.ngpio = I8255_NGPIO * config->num_ppi;
+ gpio_config.names = config->names;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(I8255_REG_DAT_BASE);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(I8255_REG_DAT_BASE);
+ gpio_config.reg_dir_in_base = GPIO_REGMAP_ADDR(I8255_REG_DIR_IN_BASE);
+ gpio_config.ngpio_per_reg = I8255_NGPIO_PER_REG;
+ gpio_config.irq_domain = config->domain;
+ gpio_config.reg_mask_xlate = i8255_reg_mask_xlate;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
-EXPORT_SYMBOL_NS_GPL(i8255_state_init, I8255);
+EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, I8255);
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
diff --git a/drivers/gpio/gpio-i8255.h b/drivers/gpio/gpio-i8255.h
index d9084aae9446..9dcf639b94df 100644
--- a/drivers/gpio/gpio-i8255.h
+++ b/drivers/gpio/gpio-i8255.h
@@ -3,44 +3,32 @@
#ifndef _I8255_H_
#define _I8255_H_
-#include <linux/spinlock.h>
-#include <linux/types.h>
+struct device;
+struct irq_domain;
+struct regmap;
-/**
- * struct i8255 - Intel 8255 register structure
- * @port: Port A, B, and C
- * @control: Control register
- */
-struct i8255 {
- u8 port[3];
- u8 control;
-};
+#define i8255_volatile_regmap_range(_base) regmap_reg_range(_base, _base + 0x2)
/**
- * struct i8255_state - Intel 8255 state structure
- * @lock: synchronization lock for accessing device state
- * @control_state: Control register state
+ * struct i8255_regmap_config - Configuration for the register map of an i8255
+ * @parent: parent device
+ * @map: regmap for the i8255
+ * @num_ppi: number of i8255 Programmable Peripheral Interface
+ * @names: (optional) array of names for gpios
+ * @domain: (optional) IRQ domain if the controller is interrupt-capable
+ *
+ * Note: The regmap is expected to have cache enabled and i8255 control
+ * registers not marked as volatile.
*/
-struct i8255_state {
- spinlock_t lock;
- u8 control_state;
+struct i8255_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+ int num_ppi;
+ const char *const *names;
+ struct irq_domain *domain;
};
-void i8255_direction_input(struct i8255 __iomem *ppi, struct i8255_state *state,
- unsigned long offset);
-void i8255_direction_output(struct i8255 __iomem *ppi,
- struct i8255_state *state, unsigned long offset,
- unsigned long value);
-int i8255_get(struct i8255 __iomem *ppi, unsigned long offset);
-int i8255_get_direction(const struct i8255_state *state, unsigned long offset);
-void i8255_get_multiple(struct i8255 __iomem *ppi, const unsigned long *mask,
- unsigned long *bits, unsigned long ngpio);
-void i8255_mode0_output(struct i8255 __iomem *const ppi);
-void i8255_set(struct i8255 __iomem *ppi, struct i8255_state *state,
- unsigned long offset, unsigned long value);
-void i8255_set_multiple(struct i8255 __iomem *ppi, struct i8255_state *state,
- const unsigned long *mask, const unsigned long *bits,
- unsigned long ngpio);
-void i8255_state_init(struct i8255_state *const state, unsigned long nbanks);
+int devm_i8255_regmap_register(struct device *dev,
+ const struct i8255_regmap_config *config);
#endif /* _I8255_H_ */
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 3b31f5e9bf40..0be9285efebc 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -457,7 +457,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
init:
ichx_gpiolib_setup(&ichx_priv.chip);
- err = gpiochip_add_data(&ichx_priv.chip, NULL);
+ err = devm_gpiochip_add_data(dev, &ichx_priv.chip, NULL);
if (err) {
dev_err(dev, "Failed to register GPIOs\n");
return err;
@@ -469,19 +469,11 @@ init:
return 0;
}
-static int ichx_gpio_remove(struct platform_device *pdev)
-{
- gpiochip_remove(&ichx_priv.chip);
-
- return 0;
-}
-
static struct platform_driver ichx_gpio_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ichx_gpio_probe,
- .remove = ichx_gpio_remove,
};
module_platform_driver(ichx_gpio_driver);
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 1cafdf46f875..00f547d26254 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -92,6 +92,8 @@ static void idt_gpio_mask(struct irq_data *d)
writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void idt_gpio_unmask(struct irq_data *d)
@@ -100,6 +102,7 @@ static void idt_gpio_unmask(struct irq_data *d)
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned long flags;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
ctrl->mask_cache &= ~BIT(d->hwirq);
@@ -119,12 +122,14 @@ static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
return 0;
}
-static struct irq_chip idt_gpio_irqchip = {
+static const struct irq_chip idt_gpio_irqchip = {
.name = "IDTGPIO",
.irq_mask = idt_gpio_mask,
.irq_ack = idt_gpio_ack,
.irq_unmask = idt_gpio_unmask,
- .irq_set_type = idt_gpio_irq_set_type
+ .irq_set_type = idt_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int idt_gpio_probe(struct platform_device *pdev)
@@ -168,7 +173,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
return parent_irq;
girq = &ctrl->gc.irq;
- girq->chip = &idt_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &idt_gpio_irqchip);
girq->init_hw = idt_gpio_irq_init_hw;
girq->parent_handler = idt_gpio_dispatch;
girq->num_parents = 1;
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 17be21b8f3b7..e190bde5397d 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -136,4 +136,3 @@ subsys_initcall_sync(_imx_scu_gpio_init);
MODULE_AUTHOR("Shenwei Wang <shenwei.wang@nxp.com>");
MODULE_DESCRIPTION("NXP GPIO over IMX SCU API");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
deleted file mode 100644
index 7390b5ca09e3..000000000000
--- a/drivers/gpio/gpio-iop.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/plat-iop/gpio.c
- * GPIO handling for Intel IOP3xx processors.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
-#include <linux/platform_device.h>
-
-#define IOP3XX_GPOE 0x0000
-#define IOP3XX_GPID 0x0004
-#define IOP3XX_GPOD 0x0008
-
-static int iop3xx_gpio_probe(struct platform_device *pdev)
-{
- struct gpio_chip *gc;
- void __iomem *base;
- int err;
-
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
- return -ENOMEM;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- err = bgpio_init(gc, &pdev->dev, 1, base + IOP3XX_GPID,
- base + IOP3XX_GPOD, NULL, NULL, base + IOP3XX_GPOE, 0);
- if (err)
- return err;
-
- gc->base = 0;
- gc->owner = THIS_MODULE;
- gc->label = "gpio-iop";
-
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
-}
-
-static struct platform_driver iop3xx_gpio_driver = {
- .driver = {
- .name = "gpio-iop",
- },
- .probe = iop3xx_gpio_probe,
-};
-
-static int __init iop3xx_gpio_init(void)
-{
- return platform_driver_register(&iop3xx_gpio_driver);
-}
-arch_initcall(iop3xx_gpio_init);
-
-MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
new file mode 100644
index 000000000000..87863f0230f5
--- /dev/null
+++ b/drivers/gpio/gpio-ljca.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-GPIO driver
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dev_printk.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* GPIO commands */
+#define LJCA_GPIO_CONFIG 1
+#define LJCA_GPIO_READ 2
+#define LJCA_GPIO_WRITE 3
+#define LJCA_GPIO_INT_EVENT 4
+#define LJCA_GPIO_INT_MASK 5
+#define LJCA_GPIO_INT_UNMASK 6
+
+#define LJCA_GPIO_CONF_DISABLE BIT(0)
+#define LJCA_GPIO_CONF_INPUT BIT(1)
+#define LJCA_GPIO_CONF_OUTPUT BIT(2)
+#define LJCA_GPIO_CONF_PULLUP BIT(3)
+#define LJCA_GPIO_CONF_PULLDOWN BIT(4)
+#define LJCA_GPIO_CONF_DEFAULT BIT(5)
+#define LJCA_GPIO_CONF_INTERRUPT BIT(6)
+#define LJCA_GPIO_INT_TYPE BIT(7)
+
+#define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1)
+#define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0)
+
+/* Intentional overlap with PULLUP / PULLDOWN */
+#define LJCA_GPIO_CONF_SET BIT(3)
+#define LJCA_GPIO_CONF_CLR BIT(4)
+
+struct gpio_op {
+ u8 index;
+ u8 value;
+} __packed;
+
+struct gpio_packet {
+ u8 num;
+ struct gpio_op item[];
+} __packed;
+
+#define LJCA_GPIO_BUF_SIZE 60
+struct ljca_gpio_dev {
+ struct platform_device *pdev;
+ struct gpio_chip gc;
+ struct ljca_gpio_info *gpio_info;
+ DECLARE_BITMAP(unmasked_irqs, LJCA_MAX_GPIO_NUM);
+ DECLARE_BITMAP(enabled_irqs, LJCA_MAX_GPIO_NUM);
+ DECLARE_BITMAP(reenable_irqs, LJCA_MAX_GPIO_NUM);
+ u8 *connect_mode;
+ /* mutex to protect irq bus */
+ struct mutex irq_lock;
+ struct work_struct work;
+ /* lock to protect package transfer to Hardware */
+ struct mutex trans_lock;
+
+ u8 obuf[LJCA_GPIO_BUF_SIZE];
+ u8 ibuf[LJCA_GPIO_BUF_SIZE];
+};
+
+static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
+ packet->num = 1;
+
+ ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_CONFIG, packet,
+ struct_size(packet, item, packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct gpio_packet *ack_packet = (struct gpio_packet *)ljca_gpio->ibuf;
+ unsigned int ibuf_len = LJCA_GPIO_BUF_SIZE;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_READ, packet,
+ struct_size(packet, item, packet->num), ljca_gpio->ibuf, &ibuf_len);
+ if (ret)
+ goto out_unlock;
+
+ if (!ibuf_len || ack_packet->num != packet->num) {
+ dev_err(&ljca_gpio->pdev->dev, "failed gpio_id:%u %u", gpio_id, ack_packet->num);
+ ret = -EIO;
+ }
+
+out_unlock:
+ mutex_unlock(&ljca_gpio->trans_lock);
+ if (ret)
+ return ret;
+ return ack_packet->item[0].value > 0;
+}
+
+static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
+ int value)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = value & 1;
+
+ ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_WRITE, packet,
+ struct_size(packet, item, packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ return ljca_gpio_read(ljca_gpio, offset);
+}
+
+static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = ljca_gpio_write(ljca_gpio, offset, val);
+ if (ret)
+ dev_err(chip->parent, "offset:%u val:%d set value failed %d\n", offset, val, ret);
+}
+
+static int ljca_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = LJCA_GPIO_CONF_INPUT | LJCA_GPIO_CONF_CLR;
+
+ return gpio_config(ljca_gpio, offset, config);
+}
+
+static int ljca_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = LJCA_GPIO_CONF_OUTPUT | LJCA_GPIO_CONF_CLR;
+ int ret;
+
+ ret = gpio_config(ljca_gpio, offset, config);
+ if (ret)
+ return ret;
+
+ ljca_gpio_set_value(chip, offset, val);
+ return 0;
+}
+
+static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ ljca_gpio->connect_mode[offset] = 0;
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ljca_gpio->connect_mode[offset] |= LJCA_GPIO_CONF_PULLUP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ljca_gpio->connect_mode[offset] |= LJCA_GPIO_CONF_PULLDOWN;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_PERSIST_STATE:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ WARN_ON_ONCE(ngpios != ljca_gpio->gpio_info->num);
+ bitmap_copy(valid_mask, ljca_gpio->gpio_info->valid_pin_map, ngpios);
+
+ return 0;
+}
+
+static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ ljca_gpio_init_valid_mask(chip, valid_mask, ngpios);
+}
+
+static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool enable)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = 0;
+
+ ret = ljca_transfer(ljca_gpio->gpio_info->ljca,
+ enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK, packet,
+ struct_size(packet, item, packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static void ljca_gpio_async(struct work_struct *work)
+{
+ struct ljca_gpio_dev *ljca_gpio = container_of(work, struct ljca_gpio_dev, work);
+ int gpio_id;
+ int unmasked;
+
+ for_each_set_bit(gpio_id, ljca_gpio->reenable_irqs, ljca_gpio->gc.ngpio) {
+ clear_bit(gpio_id, ljca_gpio->reenable_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ if (unmasked)
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ }
+}
+
+static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, int len)
+{
+ const struct gpio_packet *packet = evt_data;
+ struct ljca_gpio_dev *ljca_gpio = context;
+ int i;
+ int irq;
+
+ if (cmd != LJCA_GPIO_INT_EVENT)
+ return;
+
+ for (i = 0; i < packet->num; i++) {
+ irq = irq_find_mapping(ljca_gpio->gc.irq.domain, packet->item[i].index);
+ if (!irq) {
+ dev_err(ljca_gpio->gc.parent, "gpio_id %u does not mapped to IRQ yet\n",
+ packet->item[i].index);
+ return;
+ }
+
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
+ }
+
+ schedule_work(&ljca_gpio->work);
+}
+
+static void ljca_irq_unmask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ gpiochip_enable_irq(gc, gpio_id);
+ set_bit(gpio_id, ljca_gpio->unmasked_irqs);
+}
+
+static void ljca_irq_mask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ clear_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ gpiochip_disable_irq(gc, gpio_id);
+}
+
+static int ljca_irq_set_type(struct irq_data *irqd, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ ljca_gpio->connect_mode[gpio_id] = LJCA_GPIO_CONF_INTERRUPT;
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ljca_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&ljca_gpio->irq_lock);
+}
+
+static void ljca_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+ int enabled;
+ int unmasked;
+
+ enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ gpio_config(ljca_gpio, gpio_id, 0);
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ set_bit(gpio_id, ljca_gpio->enabled_irqs);
+ } else {
+ ljca_enable_irq(ljca_gpio, gpio_id, false);
+ clear_bit(gpio_id, ljca_gpio->enabled_irqs);
+ }
+ }
+
+ mutex_unlock(&ljca_gpio->irq_lock);
+}
+
+static const struct irq_chip ljca_gpio_irqchip = {
+ .name = "ljca-irq",
+ .irq_mask = ljca_irq_mask,
+ .irq_unmask = ljca_irq_unmask,
+ .irq_set_type = ljca_irq_set_type,
+ .irq_bus_lock = ljca_irq_bus_lock,
+ .irq_bus_sync_unlock = ljca_irq_bus_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int ljca_gpio_probe(struct platform_device *pdev)
+{
+ struct ljca_gpio_dev *ljca_gpio;
+ struct gpio_irq_chip *girq;
+ int ret;
+
+ ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL);
+ if (!ljca_gpio)
+ return -ENOMEM;
+
+ ljca_gpio->gpio_info = dev_get_platdata(&pdev->dev);
+ ljca_gpio->connect_mode = devm_kcalloc(&pdev->dev, ljca_gpio->gpio_info->num,
+ sizeof(*ljca_gpio->connect_mode), GFP_KERNEL);
+ if (!ljca_gpio->connect_mode)
+ return -ENOMEM;
+
+ mutex_init(&ljca_gpio->irq_lock);
+ mutex_init(&ljca_gpio->trans_lock);
+ ljca_gpio->pdev = pdev;
+ ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
+ ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
+ ljca_gpio->gc.get = ljca_gpio_get_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_config = ljca_gpio_set_config;
+ ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
+ ljca_gpio->gc.can_sleep = true;
+ ljca_gpio->gc.parent = &pdev->dev;
+
+ ljca_gpio->gc.base = -1;
+ ljca_gpio->gc.ngpio = ljca_gpio->gpio_info->num;
+ ljca_gpio->gc.label = ACPI_COMPANION(&pdev->dev) ?
+ acpi_dev_name(ACPI_COMPANION(&pdev->dev)) :
+ dev_name(&pdev->dev);
+ ljca_gpio->gc.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, ljca_gpio);
+ ljca_register_event_cb(ljca_gpio->gpio_info->ljca, ljca_gpio_event_cb, ljca_gpio);
+
+ girq = &ljca_gpio->gc.irq;
+ gpio_irq_chip_set_chip(girq, &ljca_gpio_irqchip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->init_valid_mask = ljca_gpio_irq_init_valid_mask;
+
+ INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
+ ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio);
+ if (ret) {
+ ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca);
+ mutex_destroy(&ljca_gpio->irq_lock);
+ mutex_destroy(&ljca_gpio->trans_lock);
+ }
+
+ return ret;
+}
+
+static int ljca_gpio_remove(struct platform_device *pdev)
+{
+ struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&ljca_gpio->gc);
+ ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca);
+ mutex_destroy(&ljca_gpio->irq_lock);
+ mutex_destroy(&ljca_gpio->trans_lock);
+ return 0;
+}
+
+#define LJCA_GPIO_DRV_NAME "ljca-gpio"
+static const struct platform_device_id ljca_gpio_id[] = {
+ { LJCA_GPIO_DRV_NAME, 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, ljca_gpio_id);
+
+static struct platform_driver ljca_gpio_driver = {
+ .driver.name = LJCA_GPIO_DRV_NAME,
+ .probe = ljca_gpio_probe,
+ .remove = ljca_gpio_remove,
+};
+module_platform_driver(ljca_gpio_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_AUTHOR("Wang Zhifeng <zhifeng.wang@intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LJCA);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
new file mode 100644
index 000000000000..06213bbfabdd
--- /dev/null
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Loongson GPIO Support
+ *
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+
+enum loongson_gpio_mode {
+ BIT_CTRL_MODE,
+ BYTE_CTRL_MODE,
+};
+
+struct loongson_gpio_chip_data {
+ const char *label;
+ enum loongson_gpio_mode mode;
+ unsigned int conf_offset;
+ unsigned int out_offset;
+ unsigned int in_offset;
+};
+
+struct loongson_gpio_chip {
+ struct gpio_chip chip;
+ struct fwnode_handle *fwnode;
+ spinlock_t lock;
+ void __iomem *reg_base;
+ const struct loongson_gpio_chip_data *chip_data;
+};
+
+static inline struct loongson_gpio_chip *to_loongson_gpio_chip(struct gpio_chip *chip)
+{
+ return container_of(chip, struct loongson_gpio_chip, chip);
+}
+
+static inline void loongson_commit_direction(struct loongson_gpio_chip *lgpio, unsigned int pin,
+ int input)
+{
+ u8 bval = input ? 1 : 0;
+
+ writeb(bval, lgpio->reg_base + lgpio->chip_data->conf_offset + pin);
+}
+
+static void loongson_commit_level(struct loongson_gpio_chip *lgpio, unsigned int pin, int high)
+{
+ u8 bval = high ? 1 : 0;
+
+ writeb(bval, lgpio->reg_base + lgpio->chip_data->out_offset + pin);
+}
+
+static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+ unsigned long flags;
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+
+ spin_lock_irqsave(&lgpio->lock, flags);
+ loongson_commit_direction(lgpio, pin, 1);
+ spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
+}
+
+static int loongson_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, int value)
+{
+ unsigned long flags;
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+
+ spin_lock_irqsave(&lgpio->lock, flags);
+ loongson_commit_level(lgpio, pin, value);
+ loongson_commit_direction(lgpio, pin, 0);
+ spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
+}
+
+static int loongson_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ u8 bval;
+ int val;
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+
+ bval = readb(lgpio->reg_base + lgpio->chip_data->in_offset + pin);
+ val = bval & 1;
+
+ return val;
+}
+
+static int loongson_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
+{
+ u8 bval;
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+
+ bval = readb(lgpio->reg_base + lgpio->chip_data->conf_offset + pin);
+ if (bval & 1)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+{
+ unsigned long flags;
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+
+ spin_lock_irqsave(&lgpio->lock, flags);
+ loongson_commit_level(lgpio, pin, value);
+ spin_unlock_irqrestore(&lgpio->lock, flags);
+}
+
+static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct platform_device *pdev = to_platform_device(chip->parent);
+
+ return platform_get_irq(pdev, offset);
+}
+
+static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgpio,
+ struct device_node *np, void __iomem *reg_base)
+{
+ int ret;
+ u32 ngpios;
+
+ lgpio->reg_base = reg_base;
+
+ if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
+ ret = bgpio_init(&lgpio->chip, dev, 8,
+ lgpio->reg_base + lgpio->chip_data->in_offset,
+ lgpio->reg_base + lgpio->chip_data->out_offset,
+ NULL, NULL,
+ lgpio->reg_base + lgpio->chip_data->conf_offset,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ } else {
+ lgpio->chip.direction_input = loongson_gpio_direction_input;
+ lgpio->chip.get = loongson_gpio_get;
+ lgpio->chip.get_direction = loongson_gpio_get_direction;
+ lgpio->chip.direction_output = loongson_gpio_direction_output;
+ lgpio->chip.set = loongson_gpio_set;
+ lgpio->chip.parent = dev;
+ spin_lock_init(&lgpio->lock);
+ }
+
+ device_property_read_u32(dev, "ngpios", &ngpios);
+
+ lgpio->chip.can_sleep = 0;
+ lgpio->chip.ngpio = ngpios;
+ lgpio->chip.label = lgpio->chip_data->label;
+ lgpio->chip.to_irq = loongson_gpio_to_irq;
+
+ return devm_gpiochip_add_data(dev, &lgpio->chip, lgpio);
+}
+
+static int loongson_gpio_probe(struct platform_device *pdev)
+{
+ void __iomem *reg_base;
+ struct loongson_gpio_chip *lgpio;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+
+ lgpio = devm_kzalloc(dev, sizeof(*lgpio), GFP_KERNEL);
+ if (!lgpio)
+ return -ENOMEM;
+
+ lgpio->chip_data = device_get_match_data(dev);
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ return loongson_gpio_init(dev, lgpio, np, reg_base);
+}
+
+static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
+ .label = "ls2k_gpio",
+ .mode = BIT_CTRL_MODE,
+ .conf_offset = 0x0,
+ .in_offset = 0x20,
+ .out_offset = 0x10,
+};
+
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
+ .label = "ls7a_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+};
+
+static const struct of_device_id loongson_gpio_of_match[] = {
+ {
+ .compatible = "loongson,ls2k-gpio",
+ .data = &loongson_gpio_ls2k_data,
+ },
+ {
+ .compatible = "loongson,ls7a-gpio",
+ .data = &loongson_gpio_ls7a_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, loongson_gpio_of_match);
+
+static const struct acpi_device_id loongson_gpio_acpi_match[] = {
+ {
+ .id = "LOON0002",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_gpio_acpi_match);
+
+static struct platform_driver loongson_gpio_driver = {
+ .driver = {
+ .name = "loongson-gpio",
+ .of_match_table = loongson_gpio_of_match,
+ .acpi_match_table = loongson_gpio_acpi_match,
+ },
+ .probe = loongson_gpio_probe,
+};
+
+static int __init loongson_gpio_setup(void)
+{
+ return platform_driver_register(&loongson_gpio_driver);
+}
+postcore_initcall(loongson_gpio_setup);
+
+MODULE_DESCRIPTION("Loongson gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 5d90b3bc5a25..6ca3b969db4d 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO Driver for Loongson 1 SoC
*
- * Copyright (C) 2015-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
+ * Copyright (C) 2015-2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
#include <linux/module.h>
@@ -19,15 +16,19 @@
#define GPIO_DATA 0x20
#define GPIO_OUTPUT 0x30
-static void __iomem *gpio_reg_base;
+struct ls1x_gpio_chip {
+ struct gpio_chip gc;
+ void __iomem *reg_base;
+};
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
+ struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | BIT(offset),
- gpio_reg_base + GPIO_CFG);
+ __raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset),
+ ls1x_gc->reg_base + GPIO_CFG);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
@@ -35,61 +36,75 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
+ struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~BIT(offset),
- gpio_reg_base + GPIO_CFG);
+ __raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset),
+ ls1x_gc->reg_base + GPIO_CFG);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int ls1x_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct gpio_chip *gc;
+ struct ls1x_gpio_chip *ls1x_gc;
int ret;
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ ls1x_gc = devm_kzalloc(dev, sizeof(*ls1x_gc), GFP_KERNEL);
+ if (!ls1x_gc)
return -ENOMEM;
- gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(gpio_reg_base))
- return PTR_ERR(gpio_reg_base);
+ ls1x_gc->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ls1x_gc->reg_base))
+ return PTR_ERR(ls1x_gc->reg_base);
- ret = bgpio_init(gc, dev, 4, gpio_reg_base + GPIO_DATA,
- gpio_reg_base + GPIO_OUTPUT, NULL,
- NULL, gpio_reg_base + GPIO_DIR, 0);
+ ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA,
+ ls1x_gc->reg_base + GPIO_OUTPUT, NULL,
+ NULL, ls1x_gc->reg_base + GPIO_DIR, 0);
if (ret)
goto err;
- gc->owner = THIS_MODULE;
- gc->request = ls1x_gpio_request;
- gc->free = ls1x_gpio_free;
- gc->base = pdev->id * 32;
+ ls1x_gc->gc.owner = THIS_MODULE;
+ ls1x_gc->gc.request = ls1x_gpio_request;
+ ls1x_gc->gc.free = ls1x_gpio_free;
+ /*
+ * Clear ngpio to let gpiolib get the correct number
+ * by reading ngpios property
+ */
+ ls1x_gc->gc.ngpio = 0;
- ret = devm_gpiochip_add_data(dev, gc, NULL);
+ ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc);
if (ret)
goto err;
- platform_set_drvdata(pdev, gc);
- dev_info(dev, "Loongson1 GPIO driver registered\n");
+ platform_set_drvdata(pdev, ls1x_gc);
+
+ dev_info(dev, "GPIO controller registered with %d pins\n",
+ ls1x_gc->gc.ngpio);
return 0;
err:
- dev_err(dev, "failed to register GPIO device\n");
+ dev_err(dev, "failed to register GPIO controller\n");
return ret;
}
+static const struct of_device_id ls1x_gpio_dt_ids[] = {
+ { .compatible = "loongson,ls1x-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_gpio_dt_ids);
+
static struct platform_driver ls1x_gpio_driver = {
.probe = ls1x_gpio_probe,
.driver = {
.name = "ls1x-gpio",
+ .of_match_table = ls1x_gpio_dt_ids,
},
};
module_platform_driver(ls1x_gpio_driver);
-MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
MODULE_DESCRIPTION("Loongson1 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 68e982cdee73..7f2fde191755 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -351,6 +351,7 @@ static void max732x_irq_mask(struct irq_data *d)
struct max732x_chip *chip = gpiochip_get_data(gc);
chip->irq_mask_cur &= ~(1 << d->hwirq);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void max732x_irq_unmask(struct irq_data *d)
@@ -358,6 +359,7 @@ static void max732x_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct max732x_chip *chip = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
chip->irq_mask_cur |= 1 << d->hwirq;
}
@@ -429,7 +431,7 @@ static int max732x_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static struct irq_chip max732x_irq_chip = {
+static const struct irq_chip max732x_irq_chip = {
.name = "max732x",
.irq_mask = max732x_irq_mask,
.irq_unmask = max732x_irq_unmask,
@@ -437,6 +439,8 @@ static struct irq_chip max732x_irq_chip = {
.irq_bus_sync_unlock = max732x_irq_bus_sync_unlock,
.irq_set_type = max732x_irq_set_type,
.irq_set_wake = max732x_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static uint8_t max732x_irq_pending(struct max732x_chip *chip)
@@ -517,7 +521,7 @@ static int max732x_irq_setup(struct max732x_chip *chip,
}
girq = &chip->gpio_chip.irq;
- girq->chip = &max732x_irq_chip;
+ gpio_irq_chip_set_chip(girq, &max732x_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 92ea8411050d..421d7e3a6c66 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -2,60 +2,25 @@
/*
* Intel Merrifield SoC GPIO driver
*
- * Copyright (c) 2016 Intel Corporation.
+ * Copyright (c) 2016, 2023 Intel Corporation.
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/string_helpers.h>
+#include <linux/types.h>
-#define GCCR 0x000 /* controller configuration */
-#define GPLR 0x004 /* pin level r/o */
-#define GPDR 0x01c /* pin direction */
-#define GPSR 0x034 /* pin set w/o */
-#define GPCR 0x04c /* pin clear w/o */
-#define GRER 0x064 /* rising edge detect */
-#define GFER 0x07c /* falling edge detect */
-#define GFBR 0x094 /* glitch filter bypass */
-#define GIMR 0x0ac /* interrupt mask */
-#define GISR 0x0c4 /* interrupt source */
-#define GITR 0x300 /* input type */
-#define GLPR 0x318 /* level input polarity */
-#define GWMR 0x400 /* wake mask */
-#define GWSR 0x418 /* wake source */
-#define GSIR 0xc00 /* secure input */
+#include "gpio-tangier.h"
/* Intel Merrifield has 192 GPIO pins */
#define MRFLD_NGPIO 192
-struct mrfld_gpio_pinrange {
- unsigned int gpio_base;
- unsigned int pin_base;
- unsigned int npins;
-};
-
-#define GPIO_PINRANGE(gstart, gend, pstart) \
- { \
- .gpio_base = (gstart), \
- .pin_base = (pstart), \
- .npins = (gend) - (gstart) + 1, \
- }
-
-struct mrfld_gpio {
- struct gpio_chip chip;
- void __iomem *reg_base;
- raw_spinlock_t lock;
- struct device *dev;
-};
-
-static const struct mrfld_gpio_pinrange mrfld_gpio_ranges[] = {
+static const struct tng_gpio_pinrange mrfld_gpio_ranges[] = {
GPIO_PINRANGE(0, 11, 146),
GPIO_PINRANGE(12, 13, 144),
GPIO_PINRANGE(14, 15, 35),
@@ -84,323 +49,15 @@ static const struct mrfld_gpio_pinrange mrfld_gpio_ranges[] = {
GPIO_PINRANGE(190, 191, 178),
};
-static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned int offset,
- unsigned int reg_type_offset)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- u8 reg = offset / 32;
-
- return priv->reg_base + reg_type_offset + reg * 4;
-}
-
-static int mrfld_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- void __iomem *gplr = gpio_reg(chip, offset, GPLR);
-
- return !!(readl(gplr) & BIT(offset % 32));
-}
-
-static void mrfld_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpsr, *gpcr;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- if (value) {
- gpsr = gpio_reg(chip, offset, GPSR);
- writel(BIT(offset % 32), gpsr);
- } else {
- gpcr = gpio_reg(chip, offset, GPCR);
- writel(BIT(offset % 32), gpcr);
- }
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int mrfld_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- unsigned long flags;
- u32 value;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- value = readl(gpdr);
- value &= ~BIT(offset % 32);
- writel(value, gpdr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int mrfld_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- unsigned long flags;
-
- mrfld_gpio_set(chip, offset, value);
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- value = readl(gpdr);
- value |= BIT(offset % 32);
- writel(value, gpdr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
-
- if (readl(gpdr) & BIT(offset % 32))
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
-static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
- unsigned int debounce)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gfbr = gpio_reg(chip, offset, GFBR);
- unsigned long flags;
- u32 value;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- if (debounce)
- value = readl(gfbr) & ~BIT(offset % 32);
- else
- value = readl(gfbr) | BIT(offset % 32);
- writel(value, gfbr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int mrfld_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
- unsigned long config)
-{
- u32 debounce;
-
- if ((pinconf_to_config_param(config) == PIN_CONFIG_BIAS_DISABLE) ||
- (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_UP) ||
- (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_DOWN))
- return gpiochip_generic_config(chip, offset, config);
-
- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
- return -ENOTSUPP;
-
- debounce = pinconf_to_config_argument(config);
- return mrfld_gpio_set_debounce(chip, offset, debounce);
-}
-
-static void mrfld_irq_ack(struct irq_data *d)
-{
- struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
- u32 gpio = irqd_to_hwirq(d);
- void __iomem *gisr = gpio_reg(&priv->chip, gpio, GISR);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- writel(BIT(gpio % 32), gisr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask)
-{
- void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR);
- unsigned long flags;
- u32 value;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- if (unmask)
- value = readl(gimr) | BIT(gpio % 32);
- else
- value = readl(gimr) & ~BIT(gpio % 32);
- writel(value, gimr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void mrfld_irq_mask(struct irq_data *d)
-{
- struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
- u32 gpio = irqd_to_hwirq(d);
-
- mrfld_irq_unmask_mask(priv, gpio, false);
- gpiochip_disable_irq(&priv->chip, gpio);
-}
-
-static void mrfld_irq_unmask(struct irq_data *d)
-{
- struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
- u32 gpio = irqd_to_hwirq(d);
-
- gpiochip_enable_irq(&priv->chip, gpio);
- mrfld_irq_unmask_mask(priv, gpio, true);
-}
-
-static int mrfld_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct mrfld_gpio *priv = gpiochip_get_data(gc);
- u32 gpio = irqd_to_hwirq(d);
- void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
- void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
- void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR);
- void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR);
- unsigned long flags;
- u32 value;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- if (type & IRQ_TYPE_EDGE_RISING)
- value = readl(grer) | BIT(gpio % 32);
- else
- value = readl(grer) & ~BIT(gpio % 32);
- writel(value, grer);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = readl(gfer) | BIT(gpio % 32);
- else
- value = readl(gfer) & ~BIT(gpio % 32);
- writel(value, gfer);
-
- /*
- * To prevent glitches from triggering an unintended level interrupt,
- * configure GLPR register first and then configure GITR.
- */
- if (type & IRQ_TYPE_LEVEL_LOW)
- value = readl(glpr) | BIT(gpio % 32);
- else
- value = readl(glpr) & ~BIT(gpio % 32);
- writel(value, glpr);
-
- if (type & IRQ_TYPE_LEVEL_MASK) {
- value = readl(gitr) | BIT(gpio % 32);
- writel(value, gitr);
-
- irq_set_handler_locked(d, handle_level_irq);
- } else if (type & IRQ_TYPE_EDGE_BOTH) {
- value = readl(gitr) & ~BIT(gpio % 32);
- writel(value, gitr);
-
- irq_set_handler_locked(d, handle_edge_irq);
- }
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct mrfld_gpio *priv = gpiochip_get_data(gc);
- u32 gpio = irqd_to_hwirq(d);
- void __iomem *gwmr = gpio_reg(&priv->chip, gpio, GWMR);
- void __iomem *gwsr = gpio_reg(&priv->chip, gpio, GWSR);
- unsigned long flags;
- u32 value;
-
- raw_spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear the existing wake status */
- writel(BIT(gpio % 32), gwsr);
-
- if (on)
- value = readl(gwmr) | BIT(gpio % 32);
- else
- value = readl(gwmr) & ~BIT(gpio % 32);
- writel(value, gwmr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- dev_dbg(priv->dev, "%s wake for gpio %u\n", str_enable_disable(on), gpio);
- return 0;
-}
-
-static const struct irq_chip mrfld_irqchip = {
- .name = "gpio-merrifield",
- .irq_ack = mrfld_irq_ack,
- .irq_mask = mrfld_irq_mask,
- .irq_unmask = mrfld_irq_unmask,
- .irq_set_type = mrfld_irq_set_type,
- .irq_set_wake = mrfld_irq_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-static void mrfld_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct mrfld_gpio *priv = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
- unsigned long base, gpio;
-
- chained_irq_enter(irqchip, desc);
-
- /* Check GPIO controller to check which pin triggered the interrupt */
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- void __iomem *gisr = gpio_reg(&priv->chip, base, GISR);
- void __iomem *gimr = gpio_reg(&priv->chip, base, GIMR);
- unsigned long pending, enabled;
-
- pending = readl(gisr);
- enabled = readl(gimr);
-
- /* Only interrupts that are enabled */
- pending &= enabled;
-
- for_each_set_bit(gpio, &pending, 32)
- generic_handle_domain_irq(gc->irq.domain, base + gpio);
- }
-
- chained_irq_exit(irqchip, desc);
-}
-
-static int mrfld_irq_init_hw(struct gpio_chip *chip)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- void __iomem *reg;
- unsigned int base;
-
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- /* Clear the rising-edge detect register */
- reg = gpio_reg(&priv->chip, base, GRER);
- writel(0, reg);
- /* Clear the falling-edge detect register */
- reg = gpio_reg(&priv->chip, base, GFER);
- writel(0, reg);
- }
-
- return 0;
-}
-
-static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
+static const char *mrfld_gpio_get_pinctrl_dev_name(struct tng_gpio *priv)
{
+ struct device *dev = priv->dev;
struct acpi_device *adev;
const char *name;
adev = acpi_dev_get_first_match_dev("INTC1002", NULL, -1);
if (adev) {
- name = devm_kstrdup(priv->dev, acpi_dev_name(adev), GFP_KERNEL);
+ name = devm_kstrdup(dev, acpi_dev_name(adev), GFP_KERNEL);
acpi_dev_put(adev);
} else {
name = "pinctrl-merrifield";
@@ -409,37 +66,10 @@ static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
return name;
}
-static int mrfld_gpio_add_pin_ranges(struct gpio_chip *chip)
-{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
- const struct mrfld_gpio_pinrange *range;
- const char *pinctrl_dev_name;
- unsigned int i;
- int retval;
-
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
- if (!pinctrl_dev_name)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
- range = &mrfld_gpio_ranges[i];
- retval = gpiochip_add_pin_range(&priv->chip, pinctrl_dev_name,
- range->gpio_base,
- range->pin_base,
- range->npins);
- if (retval) {
- dev_err(priv->dev, "failed to add GPIO pin range\n");
- return retval;
- }
- }
-
- return 0;
-}
-
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct gpio_irq_chip *girq;
- struct mrfld_gpio *priv;
+ struct device *dev = &pdev->dev;
+ struct tng_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
int retval;
@@ -449,10 +79,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev));
- if (retval) {
- dev_err(&pdev->dev, "I/O memory mapping error\n");
- return retval;
- }
+ if (retval)
+ return dev_err_probe(dev, retval, "I/O memory mapping error\n");
base = pcim_iomap_table(pdev)[1];
@@ -462,53 +90,36 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
/* Release the IO mapping, since we already get the info from BAR1 */
pcim_iounmap_regions(pdev, BIT(1));
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->dev = &pdev->dev;
+ priv->dev = dev;
priv->reg_base = pcim_iomap_table(pdev)[0];
- priv->chip.label = dev_name(&pdev->dev);
- priv->chip.parent = &pdev->dev;
- priv->chip.request = gpiochip_generic_request;
- priv->chip.free = gpiochip_generic_free;
- priv->chip.direction_input = mrfld_gpio_direction_input;
- priv->chip.direction_output = mrfld_gpio_direction_output;
- priv->chip.get = mrfld_gpio_get;
- priv->chip.set = mrfld_gpio_set;
- priv->chip.get_direction = mrfld_gpio_get_direction;
- priv->chip.set_config = mrfld_gpio_set_config;
- priv->chip.base = gpio_base;
- priv->chip.ngpio = MRFLD_NGPIO;
- priv->chip.can_sleep = false;
- priv->chip.add_pin_ranges = mrfld_gpio_add_pin_ranges;
+ priv->pin_info.pin_ranges = mrfld_gpio_ranges;
+ priv->pin_info.nranges = ARRAY_SIZE(mrfld_gpio_ranges);
+ priv->pin_info.name = mrfld_gpio_get_pinctrl_dev_name(priv);
+ if (!priv->pin_info.name)
+ return -ENOMEM;
- raw_spin_lock_init(&priv->lock);
+ priv->info.base = gpio_base;
+ priv->info.ngpio = MRFLD_NGPIO;
+ priv->info.first = irq_base;
retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (retval < 0)
return retval;
- girq = &priv->chip.irq;
- gpio_irq_chip_set_chip(girq, &mrfld_irqchip);
- girq->init_hw = mrfld_irq_init_hw;
- girq->parent_handler = mrfld_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = pci_irq_vector(pdev, 0);
- girq->first = irq_base;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
+ priv->irq = pci_irq_vector(pdev, 0);
- retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
- if (retval) {
- dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
- return retval;
- }
+ priv->wake_regs.gwmr = GWMR_MRFLD;
+ priv->wake_regs.gwsr = GWSR_MRFLD;
+ priv->wake_regs.gsir = GSIR_MRFLD;
+
+ retval = devm_tng_gpio_probe(dev, priv);
+ if (retval)
+ return dev_err_probe(dev, retval, "tng_gpio_probe error\n");
pci_set_drvdata(pdev, priv);
return 0;
@@ -525,9 +136,9 @@ static struct pci_driver mrfld_gpio_driver = {
.id_table = mrfld_gpio_ids,
.probe = mrfld_gpio_probe,
};
-
module_pci_driver(mrfld_gpio_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_DESCRIPTION("Intel Merrifield SoC GPIO driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(GPIO_TANGIER);
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 77a41151c921..6abe01bc39c3 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/resource.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -65,10 +66,10 @@ struct mlxbf2_gpio_context_save_regs {
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
struct gpio_chip gc;
- struct irq_chip irq_chip;
/* YU GPIO blocks address */
void __iomem *gpio_io;
+ struct device *dev;
struct mlxbf2_gpio_context_save_regs *csave_regs;
};
@@ -237,6 +238,7 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
unsigned long flags;
u32 val;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(irqd));
raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
val |= BIT(offset);
@@ -261,6 +263,7 @@ static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
val &= ~BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(irqd));
}
static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
@@ -322,6 +325,24 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return 0;
}
+static void mlxbf2_gpio_irq_print_chip(struct irq_data *irqd,
+ struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
+
+ seq_printf(p, dev_name(gs->dev));
+}
+
+static const struct irq_chip mlxbf2_gpio_irq_chip = {
+ .irq_set_type = mlxbf2_gpio_irq_set_type,
+ .irq_enable = mlxbf2_gpio_irq_enable,
+ .irq_disable = mlxbf2_gpio_irq_disable,
+ .irq_print_chip = mlxbf2_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
/* BlueField-2 GPIO driver initialization routine. */
static int
mlxbf2_gpio_probe(struct platform_device *pdev)
@@ -340,6 +361,8 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
if (!gs)
return -ENOMEM;
+ gs->dev = dev;
+
/* YU GPIO block address */
gs->gpio_io = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gs->gpio_io))
@@ -376,13 +399,8 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
- gs->irq_chip.name = name;
- gs->irq_chip.irq_set_type = mlxbf2_gpio_irq_set_type;
- gs->irq_chip.irq_enable = mlxbf2_gpio_irq_enable;
- gs->irq_chip.irq_disable = mlxbf2_gpio_irq_disable;
-
girq = &gs->gc.irq;
- girq->chip = &gs->irq_chip;
+ gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
girq->handler = handle_simple_irq;
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 538e31fe8903..f3c158259636 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -10,8 +10,8 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 000494e0c533..3b0bfff8c778 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 52d7b8d99170..036ad2324892 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -532,17 +532,35 @@ static int msc313_gpio_direction_output(struct gpio_chip *chip, unsigned int off
return 0;
}
+static void msc313_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
+static void msc313_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, d->hwirq);
+ irq_chip_unmask_parent(d);
+}
+
/*
* The interrupt handling happens in the parent interrupt controller,
* we don't do anything here.
*/
-static struct irq_chip msc313_gpio_irqchip = {
+static const struct irq_chip msc313_gpio_irqchip = {
.name = "GPIO",
.irq_eoi = irq_chip_eoi_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = msc313_gpio_irq_mask,
+ .irq_unmask = msc313_gpio_irq_unmask,
.irq_set_type = irq_chip_set_type_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
/*
@@ -644,7 +662,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpiochip->names = gpio->gpio_data->names;
gpioirqchip = &gpiochip->irq;
- gpioirqchip->chip = &msc313_gpio_irqchip;
+ gpio_irq_chip_set_chip(gpioirqchip, &msc313_gpio_irqchip);
gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node);
gpioirqchip->parent_domain = parent_domain;
gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq;
@@ -655,11 +673,6 @@ static int msc313_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, gpiochip, gpio);
}
-static int msc313_gpio_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id msc313_gpio_of_match[] = {
#ifdef CONFIG_MACH_INFINITY
{
@@ -710,6 +723,5 @@ static struct platform_driver msc313_gpio_driver = {
.pm = &msc313_gpio_ops,
},
.probe = msc313_gpio_probe,
- .remove = msc313_gpio_remove,
};
builtin_platform_driver(msc313_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 91a4232ee58c..a68f682aec01 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1002,7 +1002,7 @@ static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
BUG();
}
- if (IS_ENABLED(CONFIG_PWM))
+ if (IS_REACHABLE(CONFIG_PWM))
mvebu_pwm_suspend(mvchip);
return 0;
@@ -1054,7 +1054,7 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
BUG();
}
- if (IS_ENABLED(CONFIG_PWM))
+ if (IS_REACHABLE(CONFIG_PWM))
mvebu_pwm_resume(mvchip);
return 0;
@@ -1228,7 +1228,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM)) {
+ if (IS_REACHABLE(CONFIG_PWM)) {
err = mvebu_pwm_probe(pdev, mvchip, id);
if (err)
return err;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index d5626c572d24..9d0cec4b82a3 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
@@ -159,6 +160,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
+ unsigned long flags;
u32 bit, val;
u32 gpio_idx = d->hwirq;
int edge;
@@ -197,6 +199,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
if (GPIO_EDGE_SEL >= 0) {
val = readl(port->base + GPIO_EDGE_SEL);
if (edge == GPIO_INT_BOTH_EDGES)
@@ -217,15 +221,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
writel(1 << gpio_idx, port->base + GPIO_ISR);
port->pad_type[gpio_idx] = type;
- return 0;
+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+
+ return port->gc.direction_input(&port->gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
+ unsigned long flags;
u32 bit, val;
int edge;
+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
val = readl(reg);
@@ -240,9 +249,12 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
- return;
+ goto unlock;
}
writel(val | (edge << (bit << 1)), reg);
+
+unlock:
+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
/* handle 32 interrupts in one status register */
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 7f59e5d936c2..390e619a2831 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -364,4 +364,3 @@ MODULE_AUTHOR("Freescale Semiconductor, "
"Daniel Mack <danielncaiaq.de>, "
"Juergen Beisert <kernel@pengutronix.de>");
MODULE_DESCRIPTION("Freescale MXS GPIO");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 80ddc43fd875..a08be5bf6808 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
#include <linux/syscore_ops.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -47,6 +48,7 @@ struct gpio_regs {
struct gpio_bank {
void __iomem *base;
const struct omap_gpio_reg_offs *regs;
+ struct device *dev;
int irq;
u32 non_wakeup_gpios;
@@ -681,6 +683,7 @@ static void omap_gpio_mask_irq(struct irq_data *d)
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
omap_set_gpio_irqenable(bank, offset, 0);
raw_spin_unlock_irqrestore(&bank->lock, flags);
+ gpiochip_disable_irq(&bank->chip, offset);
}
static void omap_gpio_unmask_irq(struct irq_data *d)
@@ -690,6 +693,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
+ gpiochip_enable_irq(&bank->chip, offset);
raw_spin_lock_irqsave(&bank->lock, flags);
omap_set_gpio_irqenable(bank, offset, 1);
@@ -708,6 +712,40 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
+static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+
+ seq_printf(p, dev_name(bank->dev));
+}
+
+static const struct irq_chip omap_gpio_irq_chip = {
+ .irq_startup = omap_gpio_irq_startup,
+ .irq_shutdown = omap_gpio_irq_shutdown,
+ .irq_mask = omap_gpio_mask_irq,
+ .irq_unmask = omap_gpio_unmask_irq,
+ .irq_set_type = omap_gpio_irq_type,
+ .irq_set_wake = omap_gpio_wake_enable,
+ .irq_bus_lock = omap_gpio_irq_bus_lock,
+ .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
+ .irq_print_chip = omap_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static const struct irq_chip omap_gpio_irq_chip_nowake = {
+ .irq_startup = omap_gpio_irq_startup,
+ .irq_shutdown = omap_gpio_irq_shutdown,
+ .irq_mask = omap_gpio_mask_irq,
+ .irq_unmask = omap_gpio_unmask_irq,
+ .irq_set_type = omap_gpio_irq_type,
+ .irq_bus_lock = omap_gpio_irq_bus_lock,
+ .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
+ .irq_print_chip = omap_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -986,13 +1024,11 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
writel_relaxed(0, base + bank->regs->ctrl);
}
-static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
- struct device *pm_dev)
+static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev)
{
struct gpio_irq_chip *irq;
static int gpio;
const char *label;
- int irq_base = 0;
int ret;
/*
@@ -1020,34 +1056,20 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
if (!label)
return -ENOMEM;
bank->chip.label = label;
- bank->chip.base = gpio;
+ bank->chip.base = -1;
}
bank->chip.ngpio = bank->width;
-#ifdef CONFIG_ARCH_OMAP1
- /*
- * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
- * irq_alloc_descs() since a base IRQ offset will no longer be needed.
- */
- irq_base = devm_irq_alloc_descs(bank->chip.parent,
- -1, 0, bank->width, 0);
- if (irq_base < 0) {
- dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n");
- return -ENODEV;
- }
-#endif
-
+ irq = &bank->chip.irq;
/* MPUIO is a bit different, reading IRQ status clears it */
if (bank->is_mpuio && !bank->regs->wkup_en)
- irqc->irq_set_wake = NULL;
-
- irq = &bank->chip.irq;
- irq->chip = irqc;
+ gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip_nowake);
+ else
+ gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip);
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->num_parents = 1;
irq->parents = &bank->irq;
- irq->first = irq_base;
ret = gpiochip_add_data(&bank->chip, bank);
if (ret)
@@ -1376,7 +1398,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
const struct omap_gpio_platform_data *pdata;
struct gpio_bank *bank;
- struct irq_chip *irqc;
int ret;
pdata = device_get_match_data(dev);
@@ -1389,21 +1410,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (!bank)
return -ENOMEM;
- irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
- return -ENOMEM;
-
- irqc->irq_startup = omap_gpio_irq_startup,
- irqc->irq_shutdown = omap_gpio_irq_shutdown,
- irqc->irq_ack = dummy_irq_chip.irq_ack,
- irqc->irq_mask = omap_gpio_mask_irq,
- irqc->irq_unmask = omap_gpio_unmask_irq,
- irqc->irq_set_type = omap_gpio_irq_type,
- irqc->irq_set_wake = omap_gpio_wake_enable,
- irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
- irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
- irqc->name = dev_name(&pdev->dev);
- irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+ bank->dev = dev;
bank->irq = platform_get_irq(pdev, 0);
if (bank->irq <= 0) {
@@ -1467,7 +1474,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_mod_init(bank);
- ret = omap_gpio_chip_init(bank, irqc, dev);
+ ret = omap_gpio_chip_init(bank, dev);
if (ret) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index a59d61cd44b2..1286b22ef23a 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,34 +306,31 @@ static bool pca953x_check_register(struct pca953x_chip *chip, unsigned int reg,
static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
u32 checkbank)
{
+ int bank_shift;
int bank;
int offset;
- if (reg >= 0x30) {
- /*
- * Reserved block between 14h and 2Fh does not align on
- * expected bank boundaries like other devices.
- */
- int temp = reg - 0x30;
-
- bank = temp / NBANK(chip);
- offset = temp - (bank * NBANK(chip));
- bank += 8;
- } else if (reg >= 0x54) {
+ if (reg >= 0x54) {
/*
* Handle lack of reserved registers after output port
* configuration register to form a bank.
*/
- int temp = reg - 0x54;
-
- bank = temp / NBANK(chip);
- offset = temp - (bank * NBANK(chip));
- bank += 16;
+ reg -= 0x54;
+ bank_shift = 16;
+ } else if (reg >= 0x30) {
+ /*
+ * Reserved block between 14h and 2Fh does not align on
+ * expected bank boundaries like other devices.
+ */
+ reg -= 0x30;
+ bank_shift = 8;
} else {
- bank = reg / NBANK(chip);
- offset = reg - (bank * NBANK(chip));
+ bank_shift = 0;
}
+ bank = bank_shift + reg / NBANK(chip);
+ offset = reg % NBANK(chip);
+
/* Register is not in the matching bank. */
if (!(BIT(bank) & checkbank))
return false;
@@ -464,7 +461,6 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
case PCAL953X_PULL_SEL:
case PCAL953X_INT_MASK:
case PCAL953X_INT_STAT:
- case PCAL953X_OUT_CONF:
pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x20;
break;
case PCAL6524_INT_EDGE:
@@ -474,6 +470,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
case PCAL6524_DEBOUNCE:
pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
break;
+ default:
+ pinctrl = 0;
+ break;
}
return pinctrl + addr + (off / BANK_SZ);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index 6c07a8811a7a..6a5a8e593ed5 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -18,11 +18,11 @@
#define SLG7XL45106_GPO_REG 0xDB
/**
- * struct pca9570_platform_data - GPIO platformdata
+ * struct pca9570_chip_data - GPIO platformdata
* @ngpio: no of gpios
* @command: Command to be sent
*/
-struct pca9570_platform_data {
+struct pca9570_chip_data {
u16 ngpio;
u32 command;
};
@@ -36,7 +36,7 @@ struct pca9570_platform_data {
*/
struct pca9570 {
struct gpio_chip chip;
- const struct pca9570_platform_data *p_data;
+ const struct pca9570_chip_data *chip_data;
struct mutex lock;
u8 out;
};
@@ -46,8 +46,8 @@ static int pca9570_read(struct pca9570 *gpio, u8 *value)
struct i2c_client *client = to_i2c_client(gpio->chip.parent);
int ret;
- if (gpio->p_data->command != 0)
- ret = i2c_smbus_read_byte_data(client, gpio->p_data->command);
+ if (gpio->chip_data->command != 0)
+ ret = i2c_smbus_read_byte_data(client, gpio->chip_data->command);
else
ret = i2c_smbus_read_byte(client);
@@ -62,8 +62,8 @@ static int pca9570_write(struct pca9570 *gpio, u8 value)
{
struct i2c_client *client = to_i2c_client(gpio->chip.parent);
- if (gpio->p_data->command != 0)
- return i2c_smbus_write_byte_data(client, gpio->p_data->command, value);
+ if (gpio->chip_data->command != 0)
+ return i2c_smbus_write_byte_data(client, gpio->chip_data->command, value);
return i2c_smbus_write_byte(client, value);
}
@@ -127,8 +127,8 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.get = pca9570_get;
gpio->chip.set = pca9570_set;
gpio->chip.base = -1;
- gpio->p_data = device_get_match_data(&client->dev);
- gpio->chip.ngpio = gpio->p_data->ngpio;
+ gpio->chip_data = device_get_match_data(&client->dev);
+ gpio->chip.ngpio = gpio->chip_data->ngpio;
gpio->chip.can_sleep = true;
mutex_init(&gpio->lock);
@@ -141,15 +141,15 @@ static int pca9570_probe(struct i2c_client *client)
return devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
}
-static const struct pca9570_platform_data pca9570_gpio = {
+static const struct pca9570_chip_data pca9570_gpio = {
.ngpio = 4,
};
-static const struct pca9570_platform_data pca9571_gpio = {
+static const struct pca9570_chip_data pca9571_gpio = {
.ngpio = 8,
};
-static const struct pca9570_platform_data slg7xl45106_gpio = {
+static const struct pca9570_chip_data slg7xl45106_gpio = {
.ngpio = 8,
.command = SLG7XL45106_GPO_REG,
};
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index cec2f2c78255..3de1d3ad7472 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -7,18 +7,16 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
-#include <linux/platform_data/pcf857x.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-
static const struct i2c_device_id pcf857x_id[] = {
{ "pcf8574", 8 },
{ "pcf8574a", 8 },
@@ -37,7 +35,6 @@ static const struct i2c_device_id pcf857x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
-#ifdef CONFIG_OF
static const struct of_device_id pcf857x_of_table[] = {
{ .compatible = "nxp,pcf8574" },
{ .compatible = "nxp,pcf8574a" },
@@ -55,7 +52,6 @@ static const struct of_device_id pcf857x_of_table[] = {
{ }
};
MODULE_DEVICE_TABLE(of, pcf857x_of_table);
-#endif
/*
* The pcf857x, pca857x, and pca967x chips only expose one read and one
@@ -73,11 +69,11 @@ struct pcf857x {
struct gpio_chip chip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
- unsigned out; /* software latch */
- unsigned status; /* current status */
- unsigned irq_enabled; /* enabled irqs */
+ unsigned int out; /* software latch */
+ unsigned int status; /* current status */
+ unsigned int irq_enabled; /* enabled irqs */
- int (*write)(struct i2c_client *client, unsigned data);
+ int (*write)(struct i2c_client *client, unsigned int data);
int (*read)(struct i2c_client *client);
};
@@ -85,19 +81,19 @@ struct pcf857x {
/* Talk to 8-bit I/O expander */
-static int i2c_write_le8(struct i2c_client *client, unsigned data)
+static int i2c_write_le8(struct i2c_client *client, unsigned int data)
{
return i2c_smbus_write_byte(client, data);
}
static int i2c_read_le8(struct i2c_client *client)
{
- return (int)i2c_smbus_read_byte(client);
+ return i2c_smbus_read_byte(client);
}
/* Talk to 16-bit I/O expander */
-static int i2c_write_le16(struct i2c_client *client, unsigned word)
+static int i2c_write_le16(struct i2c_client *client, unsigned int word)
{
u8 buf[2] = { word & 0xff, word >> 8, };
int status;
@@ -119,10 +115,10 @@ static int i2c_read_le16(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
-static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
+static int pcf857x_input(struct gpio_chip *chip, unsigned int offset)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- int status;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int status;
mutex_lock(&gpio->lock);
gpio->out |= (1 << offset);
@@ -132,20 +128,35 @@ static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
return status;
}
-static int pcf857x_get(struct gpio_chip *chip, unsigned offset)
+static int pcf857x_get(struct gpio_chip *chip, unsigned int offset)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- int value;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int value;
value = gpio->read(gpio->client);
return (value < 0) ? value : !!(value & (1 << offset));
}
-static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
+static int pcf857x_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- unsigned bit = 1 << offset;
- int status;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int value = gpio->read(gpio->client);
+
+ if (value < 0)
+ return value;
+
+ *bits &= ~*mask;
+ *bits |= value & *mask;
+
+ return 0;
+}
+
+static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ unsigned int bit = 1 << offset;
+ int status;
mutex_lock(&gpio->lock);
if (value)
@@ -158,16 +169,28 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
return status;
}
-static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
+static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
{
pcf857x_output(chip, offset, value);
}
+static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+ gpio->out &= ~*mask;
+ gpio->out |= *bits & *mask;
+ gpio->write(gpio->client, gpio->out);
+ mutex_unlock(&gpio->lock);
+}
+
/*-------------------------------------------------------------------------*/
static irqreturn_t pcf857x_irq(int irq, void *data)
{
- struct pcf857x *gpio = data;
+ struct pcf857x *gpio = data;
unsigned long change, i, status;
status = gpio->read(gpio->client);
@@ -250,18 +273,11 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
- struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
- struct pcf857x *gpio;
- unsigned int n_latch = 0;
- int status;
-
- if (IS_ENABLED(CONFIG_OF) && np)
- of_property_read_u32(np, "lines-initial-states", &n_latch);
- else if (pdata)
- n_latch = pdata->n_latch;
- else
- dev_dbg(&client->dev, "no platform data\n");
+ struct pcf857x *gpio;
+ unsigned int n_latch = 0;
+ int status;
+
+ device_property_read_u32(&client->dev, "lines-initial-states", &n_latch);
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
@@ -270,12 +286,14 @@ static int pcf857x_probe(struct i2c_client *client)
mutex_init(&gpio->lock);
- gpio->chip.base = pdata ? pdata->gpio_base : -1;
+ gpio->chip.base = -1;
gpio->chip.can_sleep = true;
gpio->chip.parent = &client->dev;
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
+ gpio->chip.get_multiple = pcf857x_get_multiple;
gpio->chip.set = pcf857x_set;
+ gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = id->driver_data;
@@ -377,17 +395,6 @@ static int pcf857x_probe(struct i2c_client *client)
if (status < 0)
goto fail;
- /* Let platform code set up the GPIOs and their users.
- * Now is the first time anyone could use them.
- */
- if (pdata && pdata->setup) {
- status = pdata->setup(client,
- gpio->chip.base, gpio->chip.ngpio,
- pdata->context);
- if (status < 0)
- dev_warn(&client->dev, "setup --> %d\n", status);
- }
-
dev_info(&client->dev, "probed\n");
return 0;
@@ -399,16 +406,6 @@ fail:
return status;
}
-static void pcf857x_remove(struct i2c_client *client)
-{
- struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct pcf857x *gpio = i2c_get_clientdata(client);
-
- if (pdata && pdata->teardown)
- pdata->teardown(client, gpio->chip.base, gpio->chip.ngpio,
- pdata->context);
-}
-
static void pcf857x_shutdown(struct i2c_client *client)
{
struct pcf857x *gpio = i2c_get_clientdata(client);
@@ -420,10 +417,9 @@ static void pcf857x_shutdown(struct i2c_client *client)
static struct i2c_driver pcf857x_driver = {
.driver = {
.name = "pcf857x",
- .of_match_table = of_match_ptr(pcf857x_of_table),
+ .of_match_table = pcf857x_of_table,
},
.probe_new = pcf857x_probe,
- .remove = pcf857x_remove,
.shutdown = pcf857x_shutdown,
.id_table = pcf857x_id,
};
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index a86ce748384b..6726c32e31e6 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -107,6 +107,8 @@ static void idio_16_irq_mask(struct irq_data *data)
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
+
+ gpiochip_disable_irq(chip, irqd_to_hwirq(data));
}
static void idio_16_irq_unmask(struct irq_data *data)
@@ -117,6 +119,8 @@ static void idio_16_irq_unmask(struct irq_data *data)
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
+ gpiochip_enable_irq(chip, irqd_to_hwirq(data));
+
idio16gpio->irq_mask |= mask;
if (!prev_irq_mask) {
@@ -138,12 +142,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_16_irqchip = {
+static const struct irq_chip idio_16_irqchip = {
.name = "pci-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type
+ .irq_set_type = idio_16_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@@ -242,7 +248,7 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idio_16_state_init(&idio16gpio->state);
girq = &idio16gpio->chip.irq;
- girq->chip = &idio_16_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 8a9b98fa418f..463c0613abb9 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -3,15 +3,6 @@
* GPIO driver for the ACCES PCIe-IDIO-24 family
* Copyright (C) 2018 William Breathitt Gray
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* This driver supports the following ACCES devices: PCIe-IDIO-24,
* PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
*/
@@ -396,6 +387,8 @@ static void idio_24_irq_mask(struct irq_data *data)
}
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+
+ gpiochip_disable_irq(chip, irqd_to_hwirq(data));
}
static void idio_24_irq_unmask(struct irq_data *data)
@@ -408,6 +401,8 @@ static void idio_24_irq_unmask(struct irq_data *data)
const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
+ gpiochip_enable_irq(chip, irqd_to_hwirq(data));
+
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
@@ -437,12 +432,14 @@ static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_24_irqchip = {
+static const struct irq_chip idio_24_irqchip = {
.name = "pcie-idio-24",
.irq_ack = idio_24_irq_ack,
.irq_mask = idio_24_irq_mask,
.irq_unmask = idio_24_irq_unmask,
- .irq_set_type = idio_24_irq_set_type
+ .irq_set_type = idio_24_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
@@ -535,7 +532,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idio24gpio->chip.set_multiple = idio_24_gpio_set_multiple;
girq = &idio24gpio->chip.irq;
- girq->chip = &idio_24_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_24_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index e518490c4b68..c3e4d90f6b18 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -47,7 +47,6 @@ enum {
/**
* struct sprd_pmic_eic - PMIC EIC controller
* @chip: the gpio_chip structure.
- * @intc: the irq_chip structure.
* @map: the regmap from the parent device.
* @offset: the EIC controller's offset address of the PMIC.
* @reg: the array to cache the EIC registers.
@@ -56,7 +55,6 @@ enum {
*/
struct sprd_pmic_eic {
struct gpio_chip chip;
- struct irq_chip intc;
struct regmap *map;
u32 offset;
u8 reg[CACHE_NR_REGS];
@@ -151,15 +149,21 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 offset = irqd_to_hwirq(data);
pmic_eic->reg[REG_IE] = 0;
pmic_eic->reg[REG_TRIG] = 0;
+
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_pmic_eic_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 offset = irqd_to_hwirq(data);
+
+ gpiochip_enable_irq(chip, offset);
pmic_eic->reg[REG_IE] = 1;
pmic_eic->reg[REG_TRIG] = 1;
@@ -292,6 +296,17 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static const struct irq_chip pmic_eic_irq_chip = {
+ .name = "sprd-pmic-eic",
+ .irq_mask = sprd_pmic_eic_irq_mask,
+ .irq_unmask = sprd_pmic_eic_irq_unmask,
+ .irq_set_type = sprd_pmic_eic_irq_set_type,
+ .irq_bus_lock = sprd_pmic_eic_bus_lock,
+ .irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int sprd_pmic_eic_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
@@ -338,16 +353,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
- pmic_eic->intc.name = dev_name(&pdev->dev);
- pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
- pmic_eic->intc.irq_unmask = sprd_pmic_eic_irq_unmask;
- pmic_eic->intc.irq_set_type = sprd_pmic_eic_irq_set_type;
- pmic_eic->intc.irq_bus_lock = sprd_pmic_eic_bus_lock;
- pmic_eic->intc.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock;
- pmic_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
irq = &pmic_eic->chip.irq;
- irq->chip = &pmic_eic->intc;
+ gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
irq->threaded = true;
ret = devm_gpiochip_add_data(&pdev->dev, &pmic_eic->chip, pmic_eic);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1198ab0305d0..a1630ed4b741 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -171,11 +171,6 @@ static inline struct pxa_gpio_bank *gpio_to_pxabank(struct gpio_chip *c,
return chip_to_pxachip(c)->banks + gpio / 32;
}
-static inline int gpio_is_pxa_type(int type)
-{
- return (type & MMP_GPIO) == 0;
-}
-
static inline int gpio_is_mmp_type(int type)
{
return (type & MMP_GPIO) != 0;
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index 3c414e0005fc..ecb0d3800dfe 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -234,7 +234,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
}
-static const struct of_device_id rpi_exp_gpio_ids[] = {
+static const struct of_device_id rpi_exp_gpio_ids[] __maybe_unused = {
{ .compatible = "raspberrypi,firmware-gpio" },
{ }
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 5b117f3bd322..2525adb52f4f 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -663,7 +663,7 @@ static struct platform_driver gpio_rcar_device_driver = {
.driver = {
.name = "gpio_rcar",
.pm = &gpio_rcar_pm_ops,
- .of_match_table = of_match_ptr(gpio_rcar_of_table),
+ .of_match_table = gpio_rcar_of_table,
}
};
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index 62ba18b3a602..cb2f63eee2aa 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -38,7 +38,6 @@ struct rda_gpio {
struct gpio_chip chip;
void __iomem *base;
spinlock_t lock;
- struct irq_chip irq_chip;
int irq;
};
@@ -74,6 +73,7 @@ static void rda_gpio_irq_mask(struct irq_data *data)
value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ gpiochip_disable_irq(chip, offset);
}
static void rda_gpio_irq_ack(struct irq_data *data)
@@ -154,6 +154,7 @@ static void rda_gpio_irq_unmask(struct irq_data *data)
u32 offset = irqd_to_hwirq(data);
u32 trigger = irqd_get_trigger_type(data);
+ gpiochip_enable_irq(chip, offset);
rda_gpio_set_irq(chip, offset, trigger);
}
@@ -195,6 +196,16 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
+static const struct irq_chip rda_gpio_irq_chip = {
+ .name = "rda-gpio",
+ .irq_ack = rda_gpio_irq_ack,
+ .irq_mask = rda_gpio_irq_mask,
+ .irq_unmask = rda_gpio_irq_unmask,
+ .irq_set_type = rda_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int rda_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -241,15 +252,8 @@ static int rda_gpio_probe(struct platform_device *pdev)
rda_gpio->chip.base = -1;
if (rda_gpio->irq >= 0) {
- rda_gpio->irq_chip.name = "rda-gpio",
- rda_gpio->irq_chip.irq_ack = rda_gpio_irq_ack,
- rda_gpio->irq_chip.irq_mask = rda_gpio_irq_mask,
- rda_gpio->irq_chip.irq_unmask = rda_gpio_irq_unmask,
- rda_gpio->irq_chip.irq_set_type = rda_gpio_irq_set_type,
- rda_gpio->irq_chip.flags = IRQCHIP_SKIP_SET_WAKE,
-
girq = &rda_gpio->chip.irq;
- girq->chip = &rda_gpio->irq_chip;
+ gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip);
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
girq->parent_handler = rda_gpio_irq_handler;
@@ -286,4 +290,3 @@ module_platform_driver_probe(rda_gpio_driver, rda_gpio_probe);
MODULE_DESCRIPTION("RDA Micro GPIO driver");
MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index d35169bde25a..73c7260d89c0 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -4,11 +4,19 @@
*
* Copyright (C) 2016 Russell King
*/
-#include <linux/gpio/driver.h>
-#include <linux/gpio/gpio-reg.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/gpio-reg.h>
struct gpio_reg {
struct gpio_chip gc;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 6383136cbe59..c08c8e528867 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -5,11 +5,17 @@
* Copyright 2020 Michael Walle <michael@walle.cc>
*/
-#include <linux/gpio/driver.h>
-#include <linux/gpio/regmap.h>
-#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
struct gpio_regmap {
struct device *parent;
@@ -111,6 +117,11 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->reg_dat_base && !gpio->reg_set_base)
+ return GPIO_LINE_DIRECTION_IN;
+ if (gpio->reg_set_base && !gpio->reg_dat_base)
+ return GPIO_LINE_DIRECTION_OUT;
+
if (gpio->reg_dir_out_base) {
base = gpio_regmap_addr(gpio->reg_dir_out_base);
invert = 0;
@@ -249,15 +260,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
-
- /*
- * If our regmap is fast_io we should probably set can_sleep to false.
- * Right now, the regmap doesn't save this property, nor is there any
- * access function for it.
- * The only regmap type which uses fast_io is regmap-mmio. For now,
- * assume a safe default of true here.
- */
- chip->can_sleep = true;
+ chip->can_sleep = regmap_might_sleep(config->regmap);
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
@@ -265,8 +268,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
else if (gpio->reg_set_base)
chip->set = gpio_regmap_set;
+ chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
- chip->get_direction = gpio_regmap_get_direction;
chip->direction_input = gpio_regmap_direction_input;
chip->direction_output = gpio_regmap_direction_output;
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 200e43a6f4b4..e5de15a2ab9a 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -299,7 +299,7 @@ static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
}
/*
- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * gpiod_to_irq() callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
*/
static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index 3e95da717fc9..767c33ae3213 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -236,7 +236,7 @@ MODULE_DEVICE_TABLE(of, sama5d2_piobu_ids);
static struct platform_driver sama5d2_piobu_driver = {
.driver = {
.name = "sama5d2-piobu",
- .of_match_table = of_match_ptr(sama5d2_piobu_ids)
+ .of_match_table = sama5d2_piobu_ids,
},
.probe = sama5d2_piobu_probe,
};
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 238f3210970c..98939cd4a71e 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
parent = irq_find_host(irq_parent);
+ of_node_put(irq_parent);
if (!parent) {
dev_err(dev, "no IRQ parent domain\n");
return -ENODEV;
@@ -269,7 +270,7 @@ static struct platform_driver sifive_gpio_driver = {
.probe = sifive_gpio_probe,
.driver = {
.name = "sifive_gpio",
- .of_match_table = of_match_ptr(sifive_gpio_match),
+ .of_match_table = sifive_gpio_match,
},
};
builtin_platform_driver(sifive_gpio_driver)
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 60514bc5454f..a1c8702f362c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -377,8 +377,8 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label);
if (ret) {
- label = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
- dev_name(dev), fwnode_get_name(swnode));
+ label = devm_kasprintf(dev, GFP_KERNEL, "%s-%pfwP",
+ dev_name(dev), swnode);
if (!label)
return -ENOMEM;
}
@@ -736,7 +736,7 @@ static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
gpiod_remove_hogs(dev->hogs);
- for (hog = dev->hogs; !hog->chip_label; hog++) {
+ for (hog = dev->hogs; hog->chip_label; hog++) {
kfree(hog->chip_label);
kfree(hog->line_name);
}
@@ -784,10 +784,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
GFP_KERNEL);
else
hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s",
+ "gpio-sim.%u-%pfwP",
dev->id,
- fwnode_get_name(
- bank->swnode));
+ bank->swnode);
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
@@ -954,9 +953,9 @@ static void gpio_sim_device_deactivate_unlocked(struct gpio_sim_device *dev)
swnode = dev_fwnode(&dev->pdev->dev);
platform_device_unregister(dev->pdev);
+ gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
dev->pdev = NULL;
- gpio_sim_remove_hogs(dev);
}
static ssize_t
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index f8c5e9fc4bac..051bc99bdfb2 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -10,7 +10,6 @@
struct gpio_siox_ddata {
struct gpio_chip gchip;
- struct irq_chip ichip;
struct mutex lock;
u8 setdata[1];
u8 getdata[3];
@@ -97,9 +96,8 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
static void gpio_siox_irq_ack(struct irq_data *d)
{
- struct irq_chip *ic = irq_data_get_irq_chip(d);
- struct gpio_siox_ddata *ddata =
- container_of(ic, struct gpio_siox_ddata, ichip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(gc);
raw_spin_lock(&ddata->irqlock);
ddata->irq_status &= ~(1 << d->hwirq);
@@ -108,21 +106,21 @@ static void gpio_siox_irq_ack(struct irq_data *d)
static void gpio_siox_irq_mask(struct irq_data *d)
{
- struct irq_chip *ic = irq_data_get_irq_chip(d);
- struct gpio_siox_ddata *ddata =
- container_of(ic, struct gpio_siox_ddata, ichip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(gc);
raw_spin_lock(&ddata->irqlock);
ddata->irq_enable &= ~(1 << d->hwirq);
raw_spin_unlock(&ddata->irqlock);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void gpio_siox_irq_unmask(struct irq_data *d)
{
- struct irq_chip *ic = irq_data_get_irq_chip(d);
- struct gpio_siox_ddata *ddata =
- container_of(ic, struct gpio_siox_ddata, ichip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
raw_spin_lock(&ddata->irqlock);
ddata->irq_enable |= 1 << d->hwirq;
raw_spin_unlock(&ddata->irqlock);
@@ -130,9 +128,8 @@ static void gpio_siox_irq_unmask(struct irq_data *d)
static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
{
- struct irq_chip *ic = irq_data_get_irq_chip(d);
- struct gpio_siox_ddata *ddata =
- container_of(ic, struct gpio_siox_ddata, ichip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(gc);
raw_spin_lock(&ddata->irqlock);
ddata->irq_type[d->hwirq] = type;
@@ -143,8 +140,7 @@ static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
{
- struct gpio_siox_ddata *ddata =
- container_of(chip, struct gpio_siox_ddata, gchip);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(chip);
int ret;
mutex_lock(&ddata->lock);
@@ -167,8 +163,7 @@ static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
static void gpio_siox_set(struct gpio_chip *chip,
unsigned int offset, int value)
{
- struct gpio_siox_ddata *ddata =
- container_of(chip, struct gpio_siox_ddata, gchip);
+ struct gpio_siox_ddata *ddata = gpiochip_get_data(chip);
u8 mask = 1 << (19 - offset);
mutex_lock(&ddata->lock);
@@ -208,11 +203,22 @@ static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
return GPIO_LINE_DIRECTION_OUT;
}
+static const struct irq_chip gpio_siox_irq_chip = {
+ .name = "siox-gpio",
+ .irq_ack = gpio_siox_irq_ack,
+ .irq_mask = gpio_siox_irq_mask,
+ .irq_unmask = gpio_siox_irq_unmask,
+ .irq_set_type = gpio_siox_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int gpio_siox_probe(struct siox_device *sdevice)
{
struct gpio_siox_ddata *ddata;
struct gpio_irq_chip *girq;
struct device *dev = &sdevice->dev;
+ struct gpio_chip *gc;
int ret;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
@@ -224,30 +230,25 @@ static int gpio_siox_probe(struct siox_device *sdevice)
mutex_init(&ddata->lock);
raw_spin_lock_init(&ddata->irqlock);
- ddata->gchip.base = -1;
- ddata->gchip.can_sleep = 1;
- ddata->gchip.parent = dev;
- ddata->gchip.owner = THIS_MODULE;
- ddata->gchip.get = gpio_siox_get;
- ddata->gchip.set = gpio_siox_set;
- ddata->gchip.direction_input = gpio_siox_direction_input;
- ddata->gchip.direction_output = gpio_siox_direction_output;
- ddata->gchip.get_direction = gpio_siox_get_direction;
- ddata->gchip.ngpio = 20;
-
- ddata->ichip.name = "siox-gpio";
- ddata->ichip.irq_ack = gpio_siox_irq_ack;
- ddata->ichip.irq_mask = gpio_siox_irq_mask;
- ddata->ichip.irq_unmask = gpio_siox_irq_unmask;
- ddata->ichip.irq_set_type = gpio_siox_irq_set_type;
-
- girq = &ddata->gchip.irq;
- girq->chip = &ddata->ichip;
+ gc = &ddata->gchip;
+ gc->base = -1;
+ gc->can_sleep = 1;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->get = gpio_siox_get;
+ gc->set = gpio_siox_set;
+ gc->direction_input = gpio_siox_direction_input;
+ gc->direction_output = gpio_siox_direction_output;
+ gc->get_direction = gpio_siox_get_direction;
+ gc->ngpio = 20;
+
+ girq = &gc->irq;
+ gpio_irq_chip_set_chip(girq, &gpio_siox_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
girq->threaded = true;
- ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
+ ret = devm_gpiochip_add_data(dev, gc, ddata);
if (ret)
dev_err(dev, "Failed to register gpio chip (%d)\n", ret);
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 9bff63990eee..072b4e653216 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -120,6 +120,7 @@ static void sprd_gpio_irq_mask(struct irq_data *data)
u32 offset = irqd_to_hwirq(data);
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 0);
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_gpio_irq_ack(struct irq_data *data)
@@ -136,6 +137,7 @@ static void sprd_gpio_irq_unmask(struct irq_data *data)
u32 offset = irqd_to_hwirq(data);
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 1);
+ gpiochip_enable_irq(chip, offset);
}
static int sprd_gpio_irq_set_type(struct irq_data *data,
@@ -205,13 +207,14 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
-static struct irq_chip sprd_gpio_irqchip = {
+static const struct irq_chip sprd_gpio_irqchip = {
.name = "sprd-gpio",
.irq_ack = sprd_gpio_irq_ack,
.irq_mask = sprd_gpio_irq_mask,
.irq_unmask = sprd_gpio_irq_unmask,
.irq_set_type = sprd_gpio_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int sprd_gpio_probe(struct platform_device *pdev)
@@ -245,7 +248,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
irq = &sprd_gpio->chip.irq;
- irq->chip = &sprd_gpio_irqchip;
+ gpio_irq_chip_set_chip(irq, &sprd_gpio_irqchip);
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = sprd_gpio_irq_handler;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 0fa4f0a93378..27cc4da53565 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -234,6 +234,7 @@ static void stmpe_gpio_irq_mask(struct irq_data *d)
int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
+ gpiochip_disable_irq(gc, offset);
}
static void stmpe_gpio_irq_unmask(struct irq_data *d)
@@ -244,6 +245,7 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
int regoffset = offset / 8;
int mask = BIT(offset % 8);
+ gpiochip_enable_irq(gc, offset);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
}
@@ -357,13 +359,15 @@ static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
}
}
-static struct irq_chip stmpe_gpio_irq_chip = {
+static const struct irq_chip stmpe_gpio_irq_chip = {
.name = "stmpe-gpio",
.irq_bus_lock = stmpe_gpio_irq_lock,
.irq_bus_sync_unlock = stmpe_gpio_irq_sync_unlock,
.irq_mask = stmpe_gpio_irq_mask,
.irq_unmask = stmpe_gpio_irq_unmask,
.irq_set_type = stmpe_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
#define MAX_GPIOS 24
@@ -511,7 +515,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
}
girq = &stmpe_gpio->chip.irq;
- girq->chip = &stmpe_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &stmpe_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 0ce1543426a4..4750ea34204c 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -292,7 +292,7 @@ static int xway_stp_probe(struct platform_device *pdev)
}
/* check which edge trigger we should use, default to a falling edge */
- if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+ if (!of_property_read_bool(pdev->dev.of_node, "lantiq,rising"))
chip->edge = XWAY_STP_FALLING;
clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
new file mode 100644
index 000000000000..e990781935ba
--- /dev/null
+++ b/drivers/gpio/gpio-tangier.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Tangier GPIO driver
+ *
+ * Copyright (c) 2016, 2021, 2023 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Pandith N <pandith.n@intel.com>
+ * Raag Jadav <raag.jadav@intel.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/spinlock.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
+
+#include "gpio-tangier.h"
+
+#define GCCR 0x000 /* Controller configuration */
+#define GPLR 0x004 /* Pin level r/o */
+#define GPDR 0x01c /* Pin direction */
+#define GPSR 0x034 /* Pin set w/o */
+#define GPCR 0x04c /* Pin clear w/o */
+#define GRER 0x064 /* Rising edge detect */
+#define GFER 0x07c /* Falling edge detect */
+#define GFBR 0x094 /* Glitch filter bypass */
+#define GIMR 0x0ac /* Interrupt mask */
+#define GISR 0x0c4 /* Interrupt source */
+#define GITR 0x300 /* Input type */
+#define GLPR 0x318 /* Level input polarity */
+
+/**
+ * struct tng_gpio_context - Context to be saved during suspend-resume
+ * @level: Pin level
+ * @gpdr: Pin direction
+ * @grer: Rising edge detect enable
+ * @gfer: Falling edge detect enable
+ * @gimr: Interrupt mask
+ * @gwmr: Wake mask
+ */
+struct tng_gpio_context {
+ u32 level;
+ u32 gpdr;
+ u32 grer;
+ u32 gfer;
+ u32 gimr;
+ u32 gwmr;
+};
+
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned int offset,
+ unsigned int reg)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ u8 reg_offset = offset / 32;
+
+ return priv->reg_base + reg + reg_offset * 4;
+}
+
+static void __iomem *gpio_reg_and_bit(struct gpio_chip *chip, unsigned int offset,
+ unsigned int reg, u8 *bit)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ u8 reg_offset = offset / 32;
+ u8 shift = offset % 32;
+
+ *bit = shift;
+ return priv->reg_base + reg + reg_offset * 4;
+}
+
+static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *gplr;
+ u8 shift;
+
+ gplr = gpio_reg_and_bit(chip, offset, GPLR, &shift);
+
+ return !!(readl(gplr) & BIT(shift));
+}
+
+static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ unsigned long flags;
+ void __iomem *reg;
+ u8 shift;
+
+ reg = gpio_reg_and_bit(chip, offset, value ? GPSR : GPCR, &shift);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ writel(BIT(shift), reg);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ unsigned long flags;
+ void __iomem *gpdr;
+ u32 value;
+ u8 shift;
+
+ gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gpdr);
+ value &= ~BIT(shift);
+ writel(value, gpdr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int tng_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ unsigned long flags;
+ void __iomem *gpdr;
+ u8 shift;
+
+ gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift);
+ tng_gpio_set(chip, offset, value);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gpdr);
+ value |= BIT(shift);
+ writel(value, gpdr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int tng_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *gpdr;
+ u8 shift;
+
+ gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift);
+
+ if (readl(gpdr) & BIT(shift))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int tng_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned int debounce)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ unsigned long flags;
+ void __iomem *gfbr;
+ u32 value;
+ u8 shift;
+
+ gfbr = gpio_reg_and_bit(chip, offset, GFBR, &shift);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gfbr);
+ if (debounce)
+ value &= ~BIT(shift);
+ else
+ value |= BIT(shift);
+ writel(value, gfbr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return gpiochip_generic_config(chip, offset, config);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ debounce = pinconf_to_config_argument(config);
+ return tng_gpio_set_debounce(chip, offset, debounce);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void tng_irq_ack(struct irq_data *d)
+{
+ struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ unsigned long flags;
+ void __iomem *gisr;
+ u8 shift;
+
+ gisr = gpio_reg_and_bit(&priv->chip, gpio, GISR, &shift);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+ writel(BIT(shift), gisr);
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
+{
+ unsigned long flags;
+ void __iomem *gimr;
+ u32 value;
+ u8 shift;
+
+ gimr = gpio_reg_and_bit(&priv->chip, gpio, GIMR, &shift);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gimr);
+ if (unmask)
+ value |= BIT(shift);
+ else
+ value &= ~BIT(shift);
+ writel(value, gimr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void tng_irq_mask(struct irq_data *d)
+{
+ struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+
+ tng_irq_unmask_mask(priv, gpio, false);
+ gpiochip_disable_irq(&priv->chip, gpio);
+}
+
+static void tng_irq_unmask(struct irq_data *d)
+{
+ struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(&priv->chip, gpio);
+ tng_irq_unmask_mask(priv, gpio, true);
+}
+
+static int tng_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
+ void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR);
+ void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR);
+ u8 shift = gpio % 32;
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(grer);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value |= BIT(shift);
+ else
+ value &= ~BIT(shift);
+ writel(value, grer);
+
+ value = readl(gfer);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value |= BIT(shift);
+ else
+ value &= ~BIT(shift);
+ writel(value, gfer);
+
+ /*
+ * To prevent glitches from triggering an unintended level interrupt,
+ * configure GLPR register first and then configure GITR.
+ */
+ value = readl(glpr);
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value |= BIT(shift);
+ else
+ value &= ~BIT(shift);
+ writel(value, glpr);
+
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ value = readl(gitr);
+ value |= BIT(shift);
+ writel(value, gitr);
+
+ irq_set_handler_locked(d, handle_level_irq);
+ } else if (type & IRQ_TYPE_EDGE_BOTH) {
+ value = readl(gitr);
+ value &= ~BIT(shift);
+ writel(value, gitr);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+ }
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int tng_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *gwmr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwmr);
+ void __iomem *gwsr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwsr);
+ u8 shift = gpio % 32;
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ /* Clear the existing wake status */
+ writel(BIT(shift), gwsr);
+
+ value = readl(gwmr);
+ if (on)
+ value |= BIT(shift);
+ else
+ value &= ~BIT(shift);
+ writel(value, gwmr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ dev_dbg(priv->dev, "%s wake for gpio %lu\n", str_enable_disable(on), gpio);
+ return 0;
+}
+
+static const struct irq_chip tng_irqchip = {
+ .name = "gpio-tangier",
+ .irq_ack = tng_irq_ack,
+ .irq_mask = tng_irq_mask,
+ .irq_unmask = tng_irq_unmask,
+ .irq_set_type = tng_irq_set_type,
+ .irq_set_wake = tng_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void tng_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long base, gpio;
+
+ chained_irq_enter(irqchip, desc);
+
+ /* Check GPIO controller to check which pin triggered the interrupt */
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ void __iomem *gisr = gpio_reg(&priv->chip, base, GISR);
+ void __iomem *gimr = gpio_reg(&priv->chip, base, GIMR);
+ unsigned long pending, enabled;
+
+ pending = readl(gisr);
+ enabled = readl(gimr);
+
+ /* Only interrupts that are enabled */
+ pending &= enabled;
+
+ for_each_set_bit(gpio, &pending, 32)
+ generic_handle_domain_irq(gc->irq.domain, base + gpio);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int tng_irq_init_hw(struct gpio_chip *chip)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *reg;
+ unsigned int base;
+
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ /* Clear the rising-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GRER);
+ writel(0, reg);
+
+ /* Clear the falling-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GFER);
+ writel(0, reg);
+ }
+
+ return 0;
+}
+
+static int tng_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct tng_gpio *priv = gpiochip_get_data(chip);
+ const struct tng_gpio_pinrange *range;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < priv->pin_info.nranges; i++) {
+ range = &priv->pin_info.pin_ranges[i];
+ ret = gpiochip_add_pin_range(&priv->chip,
+ priv->pin_info.name,
+ range->gpio_base,
+ range->pin_base,
+ range->npins);
+ if (ret) {
+ dev_err(priv->dev, "failed to add GPIO pin range\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
+{
+ const struct tng_gpio_info *info = &gpio->info;
+ struct gpio_irq_chip *girq;
+ int ret;
+
+ gpio->ctx = devm_kcalloc(dev, DIV_ROUND_UP(info->ngpio, 32), sizeof(*gpio->ctx), GFP_KERNEL);
+ if (!gpio->ctx)
+ return -ENOMEM;
+
+ gpio->chip.label = dev_name(dev);
+ gpio->chip.parent = dev;
+ gpio->chip.request = gpiochip_generic_request;
+ gpio->chip.free = gpiochip_generic_free;
+ gpio->chip.direction_input = tng_gpio_direction_input;
+ gpio->chip.direction_output = tng_gpio_direction_output;
+ gpio->chip.get = tng_gpio_get;
+ gpio->chip.set = tng_gpio_set;
+ gpio->chip.get_direction = tng_gpio_get_direction;
+ gpio->chip.set_config = tng_gpio_set_config;
+ gpio->chip.base = info->base;
+ gpio->chip.ngpio = info->ngpio;
+ gpio->chip.can_sleep = false;
+ gpio->chip.add_pin_ranges = tng_gpio_add_pin_ranges;
+
+ raw_spin_lock_init(&gpio->lock);
+
+ girq = &gpio->chip.irq;
+ gpio_irq_chip_set_chip(girq, &tng_irqchip);
+ girq->init_hw = tng_irq_init_hw;
+ girq->parent_handler = tng_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = gpio->irq;
+ girq->first = info->first;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ ret = devm_gpiochip_add_data(dev, &gpio->chip, gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "gpiochip_add error\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, GPIO_TANGIER);
+
+int tng_gpio_suspend(struct device *dev)
+{
+ struct tng_gpio *priv = dev_get_drvdata(dev);
+ struct tng_gpio_context *ctx = priv->ctx;
+ unsigned long flags;
+ unsigned int base;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) {
+ /* GPLR is RO, values read will be restored using GPSR */
+ ctx->level = readl(gpio_reg(&priv->chip, base, GPLR));
+
+ ctx->gpdr = readl(gpio_reg(&priv->chip, base, GPDR));
+ ctx->grer = readl(gpio_reg(&priv->chip, base, GRER));
+ ctx->gfer = readl(gpio_reg(&priv->chip, base, GFER));
+ ctx->gimr = readl(gpio_reg(&priv->chip, base, GIMR));
+
+ ctx->gwmr = readl(gpio_reg(&priv->chip, base, priv->wake_regs.gwmr));
+ }
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(tng_gpio_suspend, GPIO_TANGIER);
+
+int tng_gpio_resume(struct device *dev)
+{
+ struct tng_gpio *priv = dev_get_drvdata(dev);
+ struct tng_gpio_context *ctx = priv->ctx;
+ unsigned long flags;
+ unsigned int base;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) {
+ /* GPLR is RO, values read will be restored using GPSR */
+ writel(ctx->level, gpio_reg(&priv->chip, base, GPSR));
+
+ writel(ctx->gpdr, gpio_reg(&priv->chip, base, GPDR));
+ writel(ctx->grer, gpio_reg(&priv->chip, base, GRER));
+ writel(ctx->gfer, gpio_reg(&priv->chip, base, GFER));
+ writel(ctx->gimr, gpio_reg(&priv->chip, base, GIMR));
+
+ writel(ctx->gwmr, gpio_reg(&priv->chip, base, priv->wake_regs.gwmr));
+ }
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(tng_gpio_resume, GPIO_TANGIER);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
+MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
+MODULE_DESCRIPTION("Intel Tangier GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tangier.h b/drivers/gpio/gpio-tangier.h
new file mode 100644
index 000000000000..16c4f22908fb
--- /dev/null
+++ b/drivers/gpio/gpio-tangier.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Tangier GPIO functions
+ *
+ * Copyright (c) 2016, 2021, 2023 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Pandith N <pandith.n@intel.com>
+ * Raag Jadav <raag.jadav@intel.com>
+ */
+
+#ifndef _GPIO_TANGIER_H_
+#define _GPIO_TANGIER_H_
+
+#include <linux/gpio/driver.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+struct device;
+
+struct tng_gpio_context;
+
+/* Elkhart Lake specific wake registers */
+#define GWMR_EHL 0x100 /* Wake mask */
+#define GWSR_EHL 0x118 /* Wake source */
+#define GSIR_EHL 0x130 /* Secure input */
+
+/* Merrifield specific wake registers */
+#define GWMR_MRFLD 0x400 /* Wake mask */
+#define GWSR_MRFLD 0x418 /* Wake source */
+#define GSIR_MRFLD 0xc00 /* Secure input */
+
+/**
+ * struct tng_wake_regs - Platform specific wake registers
+ * @gwmr: Wake mask
+ * @gwsr: Wake source
+ * @gsir: Secure input
+ */
+struct tng_wake_regs {
+ u32 gwmr;
+ u32 gwsr;
+ u32 gsir;
+};
+
+/**
+ * struct tng_gpio_pinrange - Map pin numbers to gpio numbers
+ * @gpio_base: Starting GPIO number of this range
+ * @pin_base: Starting pin number of this range
+ * @npins: Number of pins in this range
+ */
+struct tng_gpio_pinrange {
+ unsigned int gpio_base;
+ unsigned int pin_base;
+ unsigned int npins;
+};
+
+#define GPIO_PINRANGE(gstart, gend, pstart) \
+(struct tng_gpio_pinrange) { \
+ .gpio_base = (gstart), \
+ .pin_base = (pstart), \
+ .npins = (gend) - (gstart) + 1, \
+ }
+
+/**
+ * struct tng_gpio_pin_info - Platform specific pinout information
+ * @pin_ranges: Pin to GPIO mapping
+ * @nranges: Number of pin ranges
+ * @name: Respective pinctrl device name
+ */
+struct tng_gpio_pin_info {
+ const struct tng_gpio_pinrange *pin_ranges;
+ unsigned int nranges;
+ const char *name;
+};
+
+/**
+ * struct tng_gpio_info - Platform specific GPIO and IRQ information
+ * @base: GPIO base to start numbering with
+ * @ngpio: Amount of GPIOs supported by the controller
+ * @first: First IRQ to start numbering with
+ */
+struct tng_gpio_info {
+ int base;
+ u16 ngpio;
+ unsigned int first;
+};
+
+/**
+ * struct tng_gpio - Platform specific private data
+ * @chip: Instance of the struct gpio_chip
+ * @reg_base: Base address of MMIO registers
+ * @irq: Interrupt for the GPIO device
+ * @lock: Synchronization lock to prevent I/O race conditions
+ * @dev: The GPIO device
+ * @ctx: Context to be saved during suspend-resume
+ * @wake_regs: Platform specific wake registers
+ * @pin_info: Platform specific pinout information
+ * @info: Platform specific GPIO and IRQ information
+ */
+struct tng_gpio {
+ struct gpio_chip chip;
+ void __iomem *reg_base;
+ int irq;
+ raw_spinlock_t lock;
+ struct device *dev;
+ struct tng_gpio_context *ctx;
+ struct tng_wake_regs wake_regs;
+ struct tng_gpio_pin_info pin_info;
+ struct tng_gpio_info info;
+};
+
+int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio);
+
+int tng_gpio_suspend(struct device *dev);
+int tng_gpio_resume(struct device *dev);
+
+#endif /* _GPIO_TANGIER_H_ */
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index de6afa3f9716..78f8790168ae 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -167,7 +167,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tb10x_gpio);
- if (of_find_property(np, "interrupt-controller", NULL)) {
+ if (of_property_read_bool(np, "interrupt-controller")) {
struct irq_chip_generic *gc;
ret = platform_get_irq(pdev, 0);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index fdc5bdcd5638..b904de0b1784 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -670,13 +670,14 @@ static unsigned int tegra186_gpio_child_offset_to_irq(struct gpio_chip *chip,
static const struct of_device_id tegra186_pmc_of_match[] = {
{ .compatible = "nvidia,tegra186-pmc" },
{ .compatible = "nvidia,tegra194-pmc" },
+ { .compatible = "nvidia,tegra234-pmc" },
{ /* sentinel */ }
};
static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
{
struct device *dev = gpio->gpio.parent;
- unsigned int i, j;
+ unsigned int i;
u32 value;
for (i = 0; i < gpio->soc->num_ports; i++) {
@@ -698,27 +699,23 @@ static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
* On Tegra194 and later, each pin can be routed to one or more
* interrupts.
*/
- for (j = 0; j < gpio->num_irqs_per_bank; j++) {
- dev_dbg(dev, "programming default interrupt routing for port %s\n",
- port->name);
-
- offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, j);
-
- /*
- * By default we only want to route GPIO pins to IRQ 0. This works
- * only under the assumption that we're running as the host kernel
- * and hence all GPIO pins are owned by Linux.
- *
- * For cases where Linux is the guest OS, the hypervisor will have
- * to configure the interrupt routing and pass only the valid
- * interrupts via device tree.
- */
- if (j == 0) {
- value = readl(base + offset);
- value = BIT(port->pins) - 1;
- writel(value, base + offset);
- }
- }
+ dev_dbg(dev, "programming default interrupt routing for port %s\n",
+ port->name);
+
+ offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, 0);
+
+ /*
+ * By default we only want to route GPIO pins to IRQ 0. This works
+ * only under the assumption that we're running as the host kernel
+ * and hence all GPIO pins are owned by Linux.
+ *
+ * For cases where Linux is the guest OS, the hypervisor will have
+ * to configure the interrupt routing and pass only the valid
+ * interrupts via device tree.
+ */
+ value = readl(base + offset);
+ value = BIT(port->pins) - 1;
+ writel(value, base + offset);
}
}
}
@@ -1137,6 +1134,7 @@ static const struct tegra_gpio_soc tegra234_aon_soc = {
.name = "tegra234-gpio-aon",
.instance = 1,
.num_irqs_per_bank = 8,
+ .has_gte = true,
};
#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index cc62c6e64103..8521c6aacace 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -354,16 +354,22 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
return IRQ_SET_MASK_OK;
}
-static void thunderx_gpio_irq_enable(struct irq_data *data)
+static void thunderx_gpio_irq_enable(struct irq_data *d)
{
- irq_chip_enable_parent(data);
- thunderx_gpio_irq_unmask(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ irq_chip_enable_parent(d);
+ thunderx_gpio_irq_unmask(d);
}
-static void thunderx_gpio_irq_disable(struct irq_data *data)
+static void thunderx_gpio_irq_disable(struct irq_data *d)
{
- thunderx_gpio_irq_mask(data);
- irq_chip_disable_parent(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ thunderx_gpio_irq_mask(d);
+ irq_chip_disable_parent(d);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
/*
@@ -372,7 +378,7 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
* semantics and other acknowledgment tasks associated with the GPIO
* mechanism.
*/
-static struct irq_chip thunderx_gpio_irq_chip = {
+static const struct irq_chip thunderx_gpio_irq_chip = {
.name = "GPIO",
.irq_enable = thunderx_gpio_irq_enable,
.irq_disable = thunderx_gpio_irq_disable,
@@ -383,8 +389,8 @@ static struct irq_chip thunderx_gpio_irq_chip = {
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = thunderx_gpio_irq_set_type,
-
- .flags = IRQCHIP_SET_TYPE_MASKED
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -526,7 +532,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
- girq->chip = &thunderx_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
girq->fwnode = of_node_to_fwnode(dev->of_node);
girq->parent_domain =
irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index e739dcea61b2..6f8bd1155db7 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#define TQMX86_NGPIO 8
@@ -34,7 +35,6 @@
struct tqmx86_gpio_data {
struct gpio_chip chip;
- struct irq_chip irq_chip;
void __iomem *io_base;
int irq;
raw_spinlock_t spinlock;
@@ -122,6 +122,7 @@ static void tqmx86_gpio_irq_mask(struct irq_data *data)
gpiic &= ~mask;
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
@@ -134,6 +135,7 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~mask;
@@ -226,6 +228,22 @@ static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
clear_bit(3, valid_mask);
}
+static void tqmx86_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, gc->label);
+}
+
+static const struct irq_chip tqmx86_gpio_irq_chip = {
+ .irq_mask = tqmx86_gpio_irq_mask,
+ .irq_unmask = tqmx86_gpio_irq_unmask,
+ .irq_set_type = tqmx86_gpio_irq_set_type,
+ .irq_print_chip = tqmx86_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int tqmx86_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -277,14 +295,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (irq > 0) {
- struct irq_chip *irq_chip = &gpio->irq_chip;
u8 irq_status;
- irq_chip->name = chip->label;
- irq_chip->irq_mask = tqmx86_gpio_irq_mask;
- irq_chip->irq_unmask = tqmx86_gpio_irq_unmask;
- irq_chip->irq_set_type = tqmx86_gpio_irq_set_type;
-
/* Mask all interrupts */
tqmx86_gpio_write(gpio, 0, TQMX86_GPIIC);
@@ -293,7 +305,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
girq = &chip->irq;
- girq->chip = irq_chip;
+ gpio_irq_chip_set_chip(girq, &tqmx86_gpio_irq_chip);
girq->parent_handler = tqmx86_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
deleted file mode 100644
index 676adf1f198a..000000000000
--- a/drivers/gpio/gpio-ucb1400.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Philips UCB1400 GPIO driver
- *
- * Author: Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/ucb1400.h>
-#include <linux/gpio/driver.h>
-
-static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_direction(gpio->ac97, off, 0);
- return 0;
-}
-
-static int ucb1400_gpio_dir_out(struct gpio_chip *gc, unsigned off, int val)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_direction(gpio->ac97, off, 1);
- ucb1400_gpio_set_value(gpio->ac97, off, val);
- return 0;
-}
-
-static int ucb1400_gpio_get(struct gpio_chip *gc, unsigned off)
-{
- struct ucb1400_gpio *gpio;
-
- gpio = gpiochip_get_data(gc);
- return !!ucb1400_gpio_get_value(gpio->ac97, off);
-}
-
-static void ucb1400_gpio_set(struct gpio_chip *gc, unsigned off, int val)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_value(gpio->ac97, off, val);
-}
-
-static int ucb1400_gpio_probe(struct platform_device *dev)
-{
- struct ucb1400_gpio *ucb = dev_get_platdata(&dev->dev);
- int err = 0;
-
- if (!(ucb && ucb->gpio_offset)) {
- err = -EINVAL;
- goto err;
- }
-
- platform_set_drvdata(dev, ucb);
-
- ucb->gc.label = "ucb1400_gpio";
- ucb->gc.base = ucb->gpio_offset;
- ucb->gc.ngpio = 10;
- ucb->gc.owner = THIS_MODULE;
-
- ucb->gc.direction_input = ucb1400_gpio_dir_in;
- ucb->gc.direction_output = ucb1400_gpio_dir_out;
- ucb->gc.get = ucb1400_gpio_get;
- ucb->gc.set = ucb1400_gpio_set;
- ucb->gc.can_sleep = true;
-
- err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
-
-err:
- return err;
-
-}
-
-static struct platform_driver ucb1400_gpio_driver = {
- .probe = ucb1400_gpio_probe,
- .driver = {
- .name = "ucb1400_gpio"
- },
-};
-
-module_platform_driver(ucb1400_gpio_driver);
-
-MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ucb1400_gpio");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 9db42f6a2043..d3f3a69d4907 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
- struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
static void vf610_gpio_irq_mask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
vf610_gpio_writel(0, pcr_base);
+ gpiochip_disable_irq(gc, gpio_num);
}
static void vf610_gpio_irq_unmask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
- vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
+ gpiochip_enable_irq(gc, gpio_num);
+ vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
pcr_base);
}
@@ -237,6 +240,17 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
+static const struct irq_chip vf610_irqchip = {
+ .name = "gpio-vf610",
+ .irq_ack = vf610_gpio_irq_ack,
+ .irq_mask = vf610_gpio_irq_mask,
+ .irq_unmask = vf610_gpio_irq_unmask,
+ .irq_set_type = vf610_gpio_irq_set_type,
+ .irq_set_wake = vf610_gpio_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void vf610_gpio_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -249,7 +263,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
- struct irq_chip *ic;
int i;
int ret;
@@ -304,7 +317,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc;
gc->parent = dev;
- gc->label = "vf610-gpio";
+ gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
@@ -315,14 +328,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ic = &port->ic;
- ic->name = "gpio-vf610";
- ic->irq_ack = vf610_gpio_irq_ack;
- ic->irq_mask = vf610_gpio_irq_mask;
- ic->irq_unmask = vf610_gpio_irq_unmask;
- ic->irq_set_type = vf610_gpio_irq_set_type;
- ic->irq_set_wake = vf610_gpio_irq_set_wake;
-
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -331,7 +336,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
vf610_gpio_writel(~0, port->base + PORT_ISFR);
girq = &gc->irq;
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &vf610_irqchip);
girq->parent_handler = vf610_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 5e108ba9956a..6734e7e1e2a4 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/bitops.h>
/* register offset */
@@ -31,7 +32,7 @@ struct visconti_gpio {
void __iomem *base;
spinlock_t lock; /* protect gpio register */
struct gpio_chip gpio_chip;
- struct irq_chip irq_chip;
+ struct device *dev;
};
static int visconti_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -119,11 +120,45 @@ static int visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
return 0;
}
+static void visconti_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+static void visconti_gpio_unmask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ irq_chip_unmask_parent(d);
+}
+
+static void visconti_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct visconti_gpio *priv = gpiochip_get_data(gc);
+
+ seq_printf(p, dev_name(priv->dev));
+}
+
+static const struct irq_chip visconti_gpio_irq_chip = {
+ .irq_mask = visconti_gpio_mask_irq,
+ .irq_unmask = visconti_gpio_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = visconti_gpio_irq_set_type,
+ .irq_print_chip = visconti_gpio_irq_print_chip,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int visconti_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct visconti_gpio *priv;
- struct irq_chip *irq_chip;
struct gpio_irq_chip *girq;
struct irq_domain *parent;
struct device_node *irq_parent;
@@ -134,6 +169,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ priv->dev = dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -164,16 +200,8 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return ret;
}
- irq_chip = &priv->irq_chip;
- irq_chip->name = dev_name(dev);
- irq_chip->irq_mask = irq_chip_mask_parent;
- irq_chip->irq_unmask = irq_chip_unmask_parent;
- irq_chip->irq_eoi = irq_chip_eoi_parent;
- irq_chip->irq_set_type = visconti_gpio_irq_set_type;
- irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
-
girq = &priv->gpio_chip.irq;
- girq->chip = irq_chip;
+ gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
girq->fwnode = of_node_to_fwnode(dev->of_node);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
@@ -194,7 +222,7 @@ static struct platform_driver visconti_gpio_driver = {
.probe = visconti_gpio_probe,
.driver = {
.name = "visconti_gpio",
- .of_match_table = of_match_ptr(visconti_gpio_of_match),
+ .of_match_table = visconti_gpio_of_match,
}
};
module_platform_driver(visconti_gpio_driver);
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 97e6caedf1f3..817750e4e033 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -98,7 +98,6 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->of_gpio_n_cells = 2;
chip->can_sleep = false;
return devm_gpiochip_add_data(dev, chip, data);
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index fd88500399c6..2d23b27d55af 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#define IPROC_CCA_INT_F_GPIOINT BIT(0)
@@ -27,7 +28,6 @@
#define IPROC_GPIO_CCA_INT_EDGE 0x24
struct iproc_gpio_chip {
- struct irq_chip irqchip;
struct gpio_chip gc;
spinlock_t lock;
struct device *dev;
@@ -69,6 +69,7 @@ static void iproc_gpio_irq_unmask(struct irq_data *d)
u32 irq = d->irq;
u32 int_mask, irq_type, event_mask;
+ gpiochip_enable_irq(gc, pin);
spin_lock_irqsave(&chip->lock, flags);
irq_type = irq_get_trigger_type(irq);
event_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
@@ -110,6 +111,7 @@ static void iproc_gpio_irq_mask(struct irq_data *d)
chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
}
spin_unlock_irqrestore(&chip->lock, flags);
+ gpiochip_disable_irq(gc, pin);
}
static int iproc_gpio_irq_set_type(struct irq_data *d, u32 type)
@@ -191,6 +193,24 @@ static irqreturn_t iproc_gpio_irq_handler(int irq, void *data)
return int_bits ? IRQ_HANDLED : IRQ_NONE;
}
+static void iproc_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+
+ seq_printf(p, dev_name(chip->dev));
+}
+
+static const struct irq_chip iproc_gpio_irq_chip = {
+ .irq_ack = iproc_gpio_irq_ack,
+ .irq_mask = iproc_gpio_irq_mask,
+ .irq_unmask = iproc_gpio_irq_unmask,
+ .irq_set_type = iproc_gpio_irq_set_type,
+ .irq_print_chip = iproc_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int iproc_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -230,16 +250,8 @@ static int iproc_gpio_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
struct gpio_irq_chip *girq;
- struct irq_chip *irqc;
u32 val;
- irqc = &chip->irqchip;
- irqc->name = dev_name(dev);
- irqc->irq_ack = iproc_gpio_irq_ack;
- irqc->irq_mask = iproc_gpio_irq_mask;
- irqc->irq_unmask = iproc_gpio_irq_unmask;
- irqc->irq_set_type = iproc_gpio_irq_set_type;
-
chip->intr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->intr))
return PTR_ERR(chip->intr);
@@ -261,7 +273,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
}
girq = &chip->gc.irq;
- girq->chip = irqc;
+ gpio_irq_chip_set_chip(girq, &iproc_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 2fc6b6ff7f16..1fa66f2a667f 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -68,7 +68,6 @@ struct xgpio_instance {
DECLARE_BITMAP(dir, 64);
spinlock_t gpio_lock; /* For serializing operations */
int irq;
- struct irq_chip irqchip;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
DECLARE_BITMAP(falling_edge, 64);
@@ -416,6 +415,8 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ gpiochip_disable_irq(&chip->gc, irq_offset);
}
/**
@@ -431,6 +432,8 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
u32 old_enable = xgpio_get_value32(chip->enable, bit);
u32 mask = BIT(bit / 32), val;
+ gpiochip_enable_irq(&chip->gc, irq_offset);
+
spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -544,6 +547,16 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static const struct irq_chip xgpio_irq_chip = {
+ .name = "gpio-xilinx",
+ .irq_ack = xgpio_irq_ack,
+ .irq_mask = xgpio_irq_mask,
+ .irq_unmask = xgpio_irq_unmask,
+ .irq_set_type = xgpio_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
/**
* xgpio_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
@@ -558,7 +571,6 @@ static int xgpio_probe(struct platform_device *pdev)
int status = 0;
struct device_node *np = pdev->dev.of_node;
u32 is_dual = 0;
- u32 cells = 2;
u32 width[2];
u32 state[2];
u32 dir[2];
@@ -591,15 +603,6 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_from_arr32(chip->dir, dir, 64);
- /* Update cells with gpio-cells value */
- if (of_property_read_u32(np, "#gpio-cells", &cells))
- dev_dbg(&pdev->dev, "Missing gpio-cells property\n");
-
- if (cells != 2) {
- dev_err(&pdev->dev, "#gpio-cells mismatch\n");
- return -EINVAL;
- }
-
/*
* Check device node and parent device node for device width
* and assume default width of 32
@@ -630,7 +633,6 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
- chip->gc.of_gpio_n_cells = cells;
chip->gc.get = xgpio_get;
chip->gc.set = xgpio_set;
chip->gc.request = xgpio_request;
@@ -664,12 +666,6 @@ static int xgpio_probe(struct platform_device *pdev)
if (chip->irq <= 0)
goto skip_irq;
- chip->irqchip.name = "gpio-xilinx";
- chip->irqchip.irq_ack = xgpio_irq_ack;
- chip->irqchip.irq_mask = xgpio_irq_mask;
- chip->irqchip.irq_unmask = xgpio_irq_unmask;
- chip->irqchip.irq_set_type = xgpio_set_irq_type;
-
/* Disable per-channel interrupts */
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, 0);
/* Clear any existing per-channel interrupts */
@@ -679,7 +675,7 @@ static int xgpio_probe(struct platform_device *pdev)
xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
girq = &chip->gc.irq;
- girq->chip = &chip->irqchip;
+ gpio_irq_chip_set_chip(girq, &xgpio_irq_chip);
girq->parent_handler = xgpio_irqhandler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 0199f545335f..b4b52213bcd9 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -90,6 +90,13 @@ static void xlp_gpio_set_reg(void __iomem *addr, unsigned gpio, int state)
writel(value, addr + regset);
}
+static void xlp_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+}
+
static void xlp_gpio_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -100,6 +107,7 @@ static void xlp_gpio_irq_disable(struct irq_data *d)
xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x0);
__clear_bit(d->hwirq, priv->gpio_enabled_mask);
spin_unlock_irqrestore(&priv->lock, flags);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void xlp_gpio_irq_mask_ack(struct irq_data *d)
@@ -163,10 +171,12 @@ static int xlp_gpio_set_irq_type(struct irq_data *d, unsigned int type)
static struct irq_chip xlp_gpio_irq_chip = {
.name = "XLP-GPIO",
.irq_mask_ack = xlp_gpio_irq_mask_ack,
+ .irq_enable = xlp_gpio_irq_enable,
.irq_disable = xlp_gpio_irq_disable,
.irq_set_type = xlp_gpio_set_irq_type,
.irq_unmask = xlp_gpio_irq_unmask,
- .flags = IRQCHIP_ONESHOT_SAFE,
+ .flags = IRQCHIP_ONESHOT_SAFE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void xlp_gpio_generic_handler(struct irq_desc *desc)
@@ -272,7 +282,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
girq = &gc->irq;
- girq->chip = &xlp_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &xlp_gpio_irq_chip);
girq->parent_handler = xlp_gpio_generic_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 49c878cfd5c6..51d6119e1bb4 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -195,7 +195,7 @@ static const struct spi_device_id xra1403_ids[] = {
};
MODULE_DEVICE_TABLE(spi, xra1403_ids);
-static const struct of_device_id xra1403_spi_of_match[] = {
+static const struct of_device_id xra1403_spi_of_match[] __maybe_unused = {
{ .compatible = "exar,xra1403" },
{},
};
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index ce9d1282165c..f0f571b323f2 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -5,13 +5,15 @@
* Author: Fabian Vogt <fabian@ritter-vogt.de>
*/
-#include <linux/spinlock.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+
#include <linux/gpio/driver.h>
/*
@@ -162,7 +164,6 @@ static const struct gpio_chip zevio_gpio_chip = {
.base = 0,
.owner = THIS_MODULE,
.ngpio = 32,
- .of_gpio_n_cells = 2,
};
/* Initialization */
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index bed0380c5136..97496c0f9133 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -7,17 +7,19 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/errno.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/machine.h>
#include <linux/export.h>
-#include <linux/acpi.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/mutex.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+
#include "gpiolib.h"
#include "gpiolib-acpi.h"
@@ -126,7 +128,7 @@ static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
- return gc->parent && device_match_acpi_handle(gc->parent, data);
+ return ACPI_HANDLE_FWNODE(gc->fwnode) == data;
}
/**
@@ -385,7 +387,7 @@ err:
}
static bool acpi_gpio_irq_is_wake(struct device *parent,
- struct acpi_resource_gpio *agpio)
+ const struct acpi_resource_gpio *agpio)
{
unsigned int pin = agpio->pin_table[0];
@@ -536,6 +538,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
+ if (acpi_quirk_skip_gpio_event_handlers())
+ return;
+
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
@@ -643,7 +648,7 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
{
const struct acpi_gpio_mapping *gm;
- if (!adev->driver_gpios)
+ if (!adev || !adev->driver_gpios)
return false;
for (gm = adev->driver_gpios; gm->name; gm++)
@@ -778,7 +783,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.pin_config = agpio->pin_config;
lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
/*
* Polarity and triggering are only specified for GpioInt
@@ -837,13 +842,10 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
&args);
if (ret) {
- struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_device *adev;
- if (!adev)
- return ret;
-
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args,
- &quirks))
+ adev = to_acpi_device_node(fwnode);
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks))
return ret;
}
/*
@@ -1104,7 +1106,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in
dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
}
- if (wake_capable)
+ /* avoid suspend issues with GPIOs when systems are using S3 */
+ if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
*wake_capable = info.wake_capable;
return irq;
@@ -1387,16 +1390,6 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
-void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
-{
- /* Set default fwnode to parent's one if present */
- if (gc->parent)
- ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent));
-
- if (gc->fwnode)
- device_set_node(&gdev->dev, gc->fwnode);
-}
-
static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
@@ -1623,6 +1616,44 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@18",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 9475f99a9694..0fcd7e14d7f9 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -9,12 +9,10 @@
#define GPIOLIB_ACPI_H
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/types.h>
#include <linux/gpio/consumer.h>
-struct acpi_device;
struct device;
struct fwnode_handle;
@@ -26,8 +24,6 @@ struct gpio_device;
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
-void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
-
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
@@ -42,8 +38,6 @@ int acpi_gpio_count(struct device *dev, const char *con_id);
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
-static inline void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { }
-
static inline void
acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e878e3f22b0e..0a33971c964c 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -321,7 +321,7 @@ static void linehandle_free(struct linehandle_state *lh)
if (lh->descs[i])
gpiod_free(lh->descs[i]);
kfree(lh->label);
- put_device(&lh->gdev->dev);
+ gpio_device_put(lh->gdev);
kfree(lh);
}
@@ -363,8 +363,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
- lh->gdev = gdev;
- get_device(&gdev->dev);
+ lh->gdev = gpio_device_get(gdev);
if (handlereq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
@@ -1576,7 +1575,7 @@ static void linereq_free(struct linereq *lr)
}
kfifo_free(&lr->events);
kfree(lr->label);
- put_device(&lr->gdev->dev);
+ gpio_device_put(lr->gdev);
kfree(lr);
}
@@ -1646,8 +1645,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (!lr)
return -ENOMEM;
- lr->gdev = gdev;
- get_device(&gdev->dev);
+ lr->gdev = gpio_device_get(gdev);
for (i = 0; i < ulr.num_lines; i++) {
lr->lines[i].req = lr;
@@ -1916,7 +1914,7 @@ static void lineevent_free(struct lineevent_state *le)
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
- put_device(&le->gdev->dev);
+ gpio_device_put(le->gdev);
kfree(le);
}
@@ -2094,8 +2092,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
le = kzalloc(sizeof(*le), GFP_KERNEL);
if (!le)
return -ENOMEM;
- le->gdev = gdev;
- get_device(&gdev->dev);
+ le->gdev = gpio_device_get(gdev);
if (eventreq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
@@ -2671,7 +2668,7 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
init_waitqueue_head(&cdev->wait);
INIT_KFIFO(cdev->events);
- cdev->gdev = gdev;
+ cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
ret = blocking_notifier_chain_register(&gdev->notifier,
@@ -2679,7 +2676,6 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
if (ret)
goto out_free_bitmap;
- get_device(&gdev->dev);
file->private_data = cdev;
ret = nonseekable_open(inode, file);
@@ -2694,6 +2690,7 @@ out_unregister_notifier:
blocking_notifier_chain_unregister(&gdev->notifier,
&cdev->lineinfo_changed_nb);
out_free_bitmap:
+ gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
out_free_cdev:
kfree(cdev);
@@ -2716,7 +2713,7 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
bitmap_free(cdev->watched_lines);
blocking_notifier_chain_unregister(&gdev->notifier,
&cdev->lineinfo_changed_nb);
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
kfree(cdev);
return 0;
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 16a696249229..fe9ce6b19f15 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -130,61 +130,6 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
/**
- * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @dev: device for lifecycle management
- * @node: handle of the OF node
- * @propname: name of the DT property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- struct gpio_desc **dr;
- struct gpio_desc *desc;
-
- desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
- if (IS_ERR(desc))
- return desc;
-
- /*
- * For non-exclusive GPIO descriptors, check if this descriptor is
- * already under resource management by this device.
- */
- if (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
-
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
- if (dres)
- return desc;
- }
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
-
-/**
* devm_fwnode_gpiod_get_index - get a GPIO descriptor from a given node
* @dev: GPIO consumer
* @fwnode: firmware node containing GPIO reference
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 4fff7258ee41..1436cdb5fa26 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -10,19 +10,62 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/io.h>
-#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include "gpiolib.h"
#include "gpiolib-of.h"
+/*
+ * This is Linux-specific flags. By default controllers' and Linux' mapping
+ * match, but GPIO controllers are free to translate their own flags to
+ * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
+ */
+enum of_gpio_flags {
+ OF_GPIO_ACTIVE_LOW = 0x1,
+ OF_GPIO_SINGLE_ENDED = 0x2,
+ OF_GPIO_OPEN_DRAIN = 0x4,
+ OF_GPIO_TRANSITORY = 0x8,
+ OF_GPIO_PULL_UP = 0x10,
+ OF_GPIO_PULL_DOWN = 0x20,
+ OF_GPIO_PULL_DISABLE = 0x40,
+};
+
+/**
+ * of_gpio_named_count() - Count GPIOs for a device
+ * @np: device node to count GPIOs for
+ * @propname: property name containing gpio specifier(s)
+ *
+ * The function returns the count of GPIOs specified for a node.
+ * Note that the empty GPIO specifiers count too. Returns either
+ * Number of gpios defined in property,
+ * -EINVAL for an incorrectly formed gpios property, or
+ * -ENOENT for a missing gpios property
+ *
+ * Example:
+ * gpios = <0
+ * &gpio1 1 2
+ * 0
+ * &gpio2 3 4>;
+ *
+ * The above example defines four GPIOs, two of which are not specified.
+ * This function will return '4'
+ */
+static int of_gpio_named_count(const struct device_node *np,
+ const char *propname)
+{
+ return of_count_phandle_with_args(np, propname, "#gpio-cells");
+}
+
/**
* of_gpio_spi_cs_get_count() - special GPIO counting for SPI
* @dev: Consuming device
@@ -50,12 +93,6 @@ static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
return of_gpio_named_count(np, "gpios");
}
-/*
- * This is used by external users of of_gpio_count() from <linux/of_gpio.h>
- *
- * FIXME: get rid of those external users by converting them to GPIO
- * descriptors and let them all use gpiod_count()
- */
int of_gpio_get_count(struct device *dev, const char *con_id)
{
int ret;
@@ -345,19 +382,28 @@ out:
return desc;
}
-int of_get_named_gpio_flags(const struct device_node *np, const char *list_name,
- int index, enum of_gpio_flags *flags)
+/**
+ * of_get_named_gpio() - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @propname: Name of property containing gpio specifier(s)
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+int of_get_named_gpio(const struct device_node *np, const char *propname,
+ int index)
{
struct gpio_desc *desc;
- desc = of_get_named_gpiod_flags(np, list_name, index, flags);
+ desc = of_get_named_gpiod_flags(np, propname, index, NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
else
return desc_to_gpio(desc);
}
-EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+EXPORT_SYMBOL_GPL(of_get_named_gpio);
/* Converts gpio_lookup_flags into bitmask of GPIO_* values */
static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
@@ -389,52 +435,6 @@ static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
return lflags;
}
-/**
- * gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @node: handle of the OF node
- * @propname: name of the DT property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- unsigned long lflags;
- struct gpio_desc *desc;
- enum of_gpio_flags of_flags;
- int ret;
-
- desc = of_get_named_gpiod_flags(node, propname, index, &of_flags);
- if (!desc || IS_ERR(desc))
- return desc;
-
- ret = gpiod_request(desc, label);
- if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
- return desc;
- if (ret)
- return ERR_PTR(ret);
-
- lflags = of_convert_gpio_flags(of_flags);
-
- ret = gpiod_configure_flags(desc, propname, lflags, dflags);
- if (ret < 0) {
- gpiod_put(desc);
- return ERR_PTR(ret);
- }
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
-
static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
const char *con_id,
unsigned int idx,
@@ -668,7 +668,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
u32 tmp;
int ret;
- chip_np = chip->of_node;
+ chip_np = dev_of_node(&chip->gpiodev->dev);
if (!chip_np)
return ERR_PTR(-EINVAL);
@@ -760,7 +760,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
struct device_node *np;
int ret;
- for_each_available_child_of_node(chip->of_node, np) {
+ for_each_available_child_of_node(dev_of_node(&chip->gpiodev->dev), np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
@@ -894,6 +894,8 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
+#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
/**
* of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank)
* @np: device node of the GPIO chip
@@ -966,26 +968,23 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
kfree(gc->label);
}
EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
+#endif
#ifdef CONFIG_PINCTRL
static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
- struct device_node *np = chip->of_node;
struct of_phandle_args pinspec;
struct pinctrl_dev *pctldev;
+ struct device_node *np;
int index = 0, ret;
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
struct property *group_names;
+ np = dev_of_node(&chip->gpiodev->dev);
if (!np)
return 0;
- if (!of_property_read_bool(np, "gpio-ranges") &&
- chip->of_gpio_ranges_fallback) {
- return chip->of_gpio_ranges_fallback(chip, np);
- }
-
group_names = of_find_property(np, group_names_propname, NULL);
for (;; index++) {
@@ -1063,7 +1062,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
struct device_node *np;
int ret;
- np = to_of_node(dev_fwnode(&chip->gpiodev->dev));
+ np = dev_of_node(&chip->gpiodev->dev);
if (!np)
return 0;
@@ -1092,19 +1091,3 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
fwnode_handle_put(chip->fwnode);
}
-
-void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
-{
- /* Set default OF node to parent's one if present */
- if (gc->parent)
- gdev->dev.of_node = gc->parent->of_node;
-
- if (gc->fwnode)
- gc->of_node = to_of_node(gc->fwnode);
-
- /* If the gpiochip has an assigned OF node this takes precedence */
- if (gc->of_node)
- gdev->dev.of_node = gc->of_node;
- else
- gc->of_node = gdev->dev.of_node;
-}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index a6c593e6766c..6b3a5347c5d9 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -4,7 +4,6 @@
#define GPIOLIB_OF_H
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/types.h>
#include <linux/notifier.h>
@@ -23,7 +22,6 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
int of_gpio_get_count(struct device *dev, const char *con_id);
-void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
const char *con_id,
@@ -38,10 +36,6 @@ static inline int of_gpio_get_count(struct device *dev, const char *con_id)
{
return 0;
}
-static inline void of_gpio_dev_init(struct gpio_chip *gc,
- struct gpio_device *gdev)
-{
-}
#endif /* CONFIG_OF_GPIO */
extern struct notifier_block gpio_of_notifier;
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index dd9ccac214d1..b5a6eaf3729b 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -6,13 +6,14 @@
*/
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/property.h>
#include <linux/string.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+
#include "gpiolib.h"
#include "gpiolib-swnode.h"
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index cd27bf173dec..530dfd19d7b5 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -1,18 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kdev_t.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
+
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
#include "gpiolib.h"
#include "gpiolib-sysfs.h"
+struct kernfs_node;
+
#define GPIO_IRQF_TRIGGER_NONE 0
#define GPIO_IRQF_TRIGGER_FALLING BIT(0)
#define GPIO_IRQF_TRIGGER_RISING BIT(1)
@@ -426,8 +437,8 @@ ATTRIBUTE_GROUPS(gpiochip);
* /sys/class/gpio/unexport ... write-only
* integer N ... number of GPIO to unexport
*/
-static ssize_t export_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t export_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf, size_t len)
{
long gpio;
@@ -478,8 +489,8 @@ done:
}
static CLASS_ATTR_WO(export);
-static ssize_t unexport_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t unexport_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf, size_t len)
{
long gpio;
@@ -491,7 +502,7 @@ static ssize_t unexport_store(struct class *class,
goto done;
desc = gpio_to_desc(gpio);
- /* reject bogus commands (gpio_unexport ignores them) */
+ /* reject bogus commands (gpiod_unexport() ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
@@ -523,8 +534,6 @@ ATTRIBUTE_GROUPS(gpio_class);
static struct class gpio_class = {
.name = "gpio",
- .owner = THIS_MODULE,
-
.class_groups = gpio_class_groups,
};
@@ -556,7 +565,7 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
int offset;
/* can't export until sysfs is available ... */
- if (!gpio_class.p) {
+ if (!class_is_registered(&gpio_class)) {
pr_debug("%s: called too early!\n", __func__);
return -ENOENT;
}
@@ -730,7 +739,7 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
* register later, in gpiolib_sysfs_init() ... here we just
* verify that _some_ field of gpio_class got initialized.
*/
- if (!gpio_class.p)
+ if (!class_is_registered(&gpio_class))
return 0;
/*
@@ -790,7 +799,7 @@ static int __init gpiolib_sysfs_init(void)
* early (e.g. before the class_register above was called).
*
* We run before arch_initcall() so chip->dev nodes can have
- * registered, and so arch_initcall() can always gpio_export().
+ * registered, and so arch_initcall() can always gpiod_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5a66d9616d7c..04fb05df805b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,34 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/bitmap.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/compat.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/spinlock.h>
+#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/seq_file.h>
-#include <linux/gpio.h>
-#include <linux/idr.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
+#include <linux/spinlock.h>
+
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/file.h>
+
#include <uapi/linux/gpio.h>
-#include "gpiolib.h"
-#include "gpiolib-of.h"
#include "gpiolib-acpi.h"
-#include "gpiolib-swnode.h"
#include "gpiolib-cdev.h"
+#include "gpiolib-of.h"
+#include "gpiolib-swnode.h"
#include "gpiolib-sysfs.h"
+#include "gpiolib.h"
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@ -57,7 +61,20 @@
static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
-static int gpio_bus_match(struct device *dev, struct device_driver *drv);
+
+static int gpio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * Only match if the fwnode doesn't already have a proper struct device
+ * created for it.
+ */
+ if (fwnode && fwnode->dev != dev)
+ return 0;
+ return 1;
+}
+
static struct bus_type gpio_bus_type = {
.name = "gpio",
.match = gpio_bus_match,
@@ -356,7 +373,7 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
}
/*
- * devprop_gpiochip_set_names - Set GPIO line names using device properties
+ * gpiochip_set_names - Set GPIO line names using device properties
* @chip: GPIO chip whose lines should be named, if possible
*
* Looks for device property "gpio-line-names" and if it exists assigns
@@ -364,7 +381,7 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
* names belong to the underlying firmware node and should not be released
* by the caller.
*/
-static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+static int gpiochip_set_names(struct gpio_chip *chip)
{
struct gpio_device *gdev = chip->gpiodev;
struct device *dev = &gdev->dev;
@@ -531,6 +548,14 @@ static void gpiochip_free_valid_mask(struct gpio_chip *gc)
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
{
+ /*
+ * Device Tree platforms are supposed to use "gpio-ranges"
+ * property. This check ensures that the ->add_pin_ranges()
+ * won't be called for them.
+ */
+ if (device_property_present(&gc->gpiodev->dev, "gpio-ranges"))
+ return 0;
+
if (gc->add_pin_ranges)
return gc->add_pin_ranges(gc);
@@ -547,7 +572,7 @@ bool gpiochip_line_is_valid(const struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
-static void gpiodevice_release(struct device *dev)
+static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
unsigned long flags;
@@ -576,14 +601,22 @@ static void gpiodevice_release(struct device *dev)
static int gpiochip_setup_dev(struct gpio_device *gdev)
{
+ struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
int ret;
+ /*
+ * If fwnode doesn't belong to another device, it's safe to clear its
+ * initialized flag.
+ */
+ if (fwnode && !fwnode->dev)
+ fwnode_dev_initialized(fwnode, false);
+
ret = gcdev_register(gdev, gpio_devt);
if (ret)
return ret;
/* From this point, the .release() function cleans up gpio_device */
- gdev->dev.release = gpiodevice_release;
+ gdev->dev.release = gpiodev_release;
ret = gpiochip_sysfs_register(gdev);
if (ret)
@@ -647,11 +680,28 @@ static void gpiochip_setup_devs(void)
}
}
+static void gpiochip_set_data(struct gpio_chip *gc, void *data)
+{
+ gc->gpiodev->data = data;
+}
+
+/**
+ * gpiochip_get_data() - get per-subdriver data for the chip
+ * @gc: GPIO chip
+ *
+ * Returns:
+ * The per-subdriver data for the chip.
+ */
+void *gpiochip_get_data(struct gpio_chip *gc)
+{
+ return gc->gpiodev->data;
+}
+EXPORT_SYMBOL_GPL(gpiochip_get_data);
+
int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct fwnode_handle *fwnode = NULL;
struct gpio_device *gdev;
unsigned long flags;
unsigned int i;
@@ -659,10 +709,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret = 0;
- if (gc->fwnode)
- fwnode = gc->fwnode;
- else if (gc->parent)
- fwnode = dev_fwnode(gc->parent);
+ /*
+ * If the calling driver did not initialize firmware node, do it here
+ * using the parent device, if any.
+ */
+ if (!gc->fwnode && gc->parent)
+ gc->fwnode = dev_fwnode(gc->parent);
/*
* First: allocate and populate the internal stat container, and
@@ -674,16 +726,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->dev.bus = &gpio_bus_type;
gdev->dev.parent = gc->parent;
gdev->chip = gc;
- gc->gpiodev = gdev;
- of_gpio_dev_init(gc, gdev);
- acpi_gpio_dev_init(gc, gdev);
+ gc->gpiodev = gdev;
+ gpiochip_set_data(gc, data);
- /*
- * Assign fwnode depending on the result of the previous calls,
- * if none of them succeed, assign it to the parent's one.
- */
- gc->fwnode = gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
+ device_set_node(&gdev->dev, gc->fwnode);
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@@ -748,7 +795,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
gdev->ngpio = gc->ngpio;
- gdev->data = data;
spin_lock_irqsave(&gpio_lock, flags);
@@ -805,7 +851,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_remove_from_list;
}
- ret = devprop_gpiochip_set_names(gc);
+ ret = gpiochip_set_names(gc);
if (ret)
goto err_remove_from_list;
@@ -882,7 +928,7 @@ err_free_gpiochip_mask:
gpiochip_free_valid_mask(gc);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
goto err_print_message;
}
err_remove_from_list:
@@ -911,19 +957,6 @@ err_print_message:
EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
/**
- * gpiochip_get_data() - get per-subdriver data for the chip
- * @gc: GPIO chip
- *
- * Returns:
- * The per-subdriver data for the chip.
- */
-void *gpiochip_get_data(struct gpio_chip *gc)
-{
- return gc->gpiodev->data;
-}
-EXPORT_SYMBOL_GPL(gpiochip_get_data);
-
-/**
* gpiochip_remove() - unregister a gpio_chip
* @gc: the chip to unregister
*
@@ -949,9 +982,9 @@ void gpiochip_remove(struct gpio_chip *gc)
gpiochip_free_valid_mask(gc);
/*
* We accept no more calls into the driver from this point, so
- * NULL the driver data pointer
+ * NULL the driver data pointer.
*/
- gdev->data = NULL;
+ gpiochip_set_data(gc, NULL);
spin_lock_irqsave(&gpio_lock, flags);
for (i = 0; i < gdev->ngpio; i++) {
@@ -972,7 +1005,7 @@ void gpiochip_remove(struct gpio_chip *gc)
*/
gcdev_unregister(gdev);
up_write(&gdev->sem);
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -1126,14 +1159,8 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
/* Just pick something */
fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
fwspec.param_count = 2;
- ret = __irq_domain_alloc_irqs(gc->irq.domain,
- /* just pick something */
- -1,
- 1,
- NUMA_NO_NODE,
- &fwspec,
- false,
- NULL);
+ ret = irq_domain_alloc_irqs(gc->irq.domain, 1,
+ NUMA_NO_NODE, &fwspec);
if (ret < 0) {
chip_err(gc,
"can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
@@ -1196,7 +1223,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
@@ -1364,8 +1391,7 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
* gpiochip by assigning the gpiochip as chip data, and using the irqchip
* stored inside the gpiochip.
*/
-int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
+int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
{
struct gpio_chip *gc = d->host_data;
int ret = 0;
@@ -1441,8 +1467,9 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain,
struct irq_data *data, bool reserve)
{
struct gpio_chip *gc = domain->host_data;
+ unsigned int hwirq = irqd_to_hwirq(data);
- return gpiochip_lock_as_irq(gc, data->hwirq);
+ return gpiochip_lock_as_irq(gc, hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
@@ -1459,8 +1486,9 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data)
{
struct gpio_chip *gc = domain->host_data;
+ unsigned int hwirq = irqd_to_hwirq(data);
- return gpiochip_unlock_as_irq(gc, data->hwirq);
+ return gpiochip_unlock_as_irq(gc, hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
@@ -1500,33 +1528,37 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
- return gpiochip_reqres_irq(gc, d->hwirq);
+ return gpiochip_reqres_irq(gc, hwirq);
}
EXPORT_SYMBOL(gpiochip_irq_reqres);
void gpiochip_irq_relres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
- gpiochip_relres_irq(gc, d->hwirq);
+ gpiochip_relres_irq(gc, hwirq);
}
EXPORT_SYMBOL(gpiochip_irq_relres);
static void gpiochip_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
if (gc->irq.irq_mask)
gc->irq.irq_mask(d);
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void gpiochip_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
if (gc->irq.irq_unmask)
gc->irq.irq_unmask(d);
}
@@ -1534,17 +1566,19 @@ static void gpiochip_irq_unmask(struct irq_data *d)
static void gpiochip_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
gc->irq.irq_enable(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
gc->irq.irq_disable(d);
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
@@ -2063,17 +2097,15 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
- struct gpio_device *gdev;
VALIDATE_DESC(desc);
- gdev = desc->gdev;
- if (try_module_get(gdev->owner)) {
+ if (try_module_get(desc->gdev->owner)) {
ret = gpiod_request_commit(desc, label);
if (ret)
- module_put(gdev->owner);
+ module_put(desc->gdev->owner);
else
- get_device(&gdev->dev);
+ gpio_device_get(desc->gdev);
}
if (ret)
@@ -2134,7 +2166,7 @@ void gpiod_free(struct gpio_desc *desc)
{
if (desc && desc->gdev && gpiod_free_commit(desc)) {
module_put(desc->gdev->owner);
- put_device(&desc->gdev->dev);
+ gpio_device_put(desc->gdev);
} else {
WARN_ON(extra_checks);
}
@@ -3905,14 +3937,11 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
const char *label,
bool platform_lookup_allowed)
{
- struct gpio_desc *desc = ERR_PTR(-ENOENT);
- unsigned long lookupflags;
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ struct gpio_desc *desc;
int ret;
- if (!IS_ERR_OR_NULL(fwnode))
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
-
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, &flags, &lookupflags);
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup did not
@@ -4253,16 +4282,18 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
struct gpio_array *array_info = NULL;
struct gpio_chip *gc;
int count, bitmap_size;
+ size_t descs_size;
count = gpiod_count(dev, con_id);
if (count < 0)
return ERR_PTR(count);
- descs = kzalloc(struct_size(descs, desc, count), GFP_KERNEL);
+ descs_size = struct_size(descs, desc, count);
+ descs = kzalloc(descs_size, GFP_KERNEL);
if (!descs)
return ERR_PTR(-ENOMEM);
- for (descs->ndescs = 0; descs->ndescs < count; ) {
+ for (descs->ndescs = 0; descs->ndescs < count; descs->ndescs++) {
desc = gpiod_get_index(dev, con_id, descs->ndescs, flags);
if (IS_ERR(desc)) {
gpiod_put_array(descs);
@@ -4282,20 +4313,17 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
gc->ngpio : count);
- array = kzalloc(struct_size(descs, desc, count) +
- struct_size(array_info, invert_mask,
- 3 * bitmap_size), GFP_KERNEL);
+ array = krealloc(descs, descs_size +
+ struct_size(array_info, invert_mask, 3 * bitmap_size),
+ GFP_KERNEL | __GFP_ZERO);
if (!array) {
gpiod_put_array(descs);
return ERR_PTR(-ENOMEM);
}
- memcpy(array, descs,
- struct_size(descs, desc, descs->ndescs + 1));
- kfree(descs);
-
descs = array;
- array_info = (void *)(descs->desc + count);
+
+ array_info = (void *)descs + descs_size;
array_info->get_mask = array_info->invert_mask +
bitmap_size;
array_info->set_mask = array_info->get_mask +
@@ -4310,8 +4338,13 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
count - descs->ndescs);
descs->info = array_info;
}
+
+ /* If there is no cache for fast bitmap processing path, continue */
+ if (!array_info)
+ continue;
+
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info && array_info->chip != gc) {
+ if (array_info->chip != gc) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -4319,8 +4352,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
* Detect array members which belong to the 'fast' chip
* but their pins are not in hardware order.
*/
- else if (array_info &&
- gpio_chip_hwgpio(desc) != descs->ndescs) {
+ else if (gpio_chip_hwgpio(desc) != descs->ndescs) {
/*
* Don't use fast path if all array members processed so
* far belong to the same chip as this one but its pin
@@ -4334,7 +4366,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
__clear_bit(descs->ndescs,
array_info->set_mask);
}
- } else if (array_info) {
+ } else {
/* Exclude open drain or open source from fast output */
if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
gpiochip_line_is_open_source(gc, descs->ndescs))
@@ -4345,8 +4377,6 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
__set_bit(descs->ndescs,
array_info->invert_mask);
}
-
- descs->ndescs++;
}
if (array_info)
dev_dbg(dev,
@@ -4410,20 +4440,6 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
-
-static int gpio_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
-
- /*
- * Only match if the fwnode doesn't already have a proper struct device
- * created for it.
- */
- if (fwnode && fwnode->dev != dev)
- return 0;
- return 1;
-}
-
static int gpio_stub_drv_probe(struct device *dev)
{
/*
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b3c2db6eba80..cca81375f127 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -82,6 +82,16 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
return container_of(dev, struct gpio_device, dev);
}
+static inline struct gpio_device *gpio_device_get(struct gpio_device *gdev)
+{
+ return to_gpio_device(get_device(&gdev->dev));
+}
+
+static inline void gpio_device_put(struct gpio_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+
/* gpio suffixes used for ACPI and device tree lookup */
static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 315cbdf61979..ba3fb04bb691 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -10,14 +10,13 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
- select FB_CMDLINE
select I2C
- select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
+ select VIDEO_CMDLINE
select VIDEO_NOMODESET
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
@@ -53,7 +52,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug"
- default y
+ default n
+ depends on BROKEN
depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL
@@ -63,6 +63,12 @@ config DRM_USE_DYNAMIC_DEBUG
bytes per callsite, the .data costs can be substantial, and
are therefore configurable.
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ help
+ KUnit Helpers for KMS drivers.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
@@ -73,6 +79,7 @@ config DRM_KUNIT_TEST
select DRM_KMS_HELPER
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -225,6 +232,10 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
+config DRM_SUBALLOC_HELPER
+ tristate
+ depends on DRM
+
config DRM_SCHED
tristate
depends on DRM
@@ -391,64 +402,7 @@ menuconfig DRM_LEGACY
Unless you have strong reasons to go rogue, say "N".
if DRM_LEGACY
-
-config DRM_TDFX
- tristate "3dfx Banshee/Voodoo3+"
- depends on DRM && PCI
- help
- Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
- graphics card. If M is selected, the module will be called tdfx.
-
-config DRM_R128
- tristate "ATI Rage 128"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have an ATI Rage 128 graphics card. If M
- is selected, the module will be called r128. AGP support for
- this card is strongly suggested (unless you have a PCI version).
-
-config DRM_I810
- tristate "Intel I810"
- # !PREEMPTION because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
- help
- Choose this option if you have an Intel I810 graphics card. If M is
- selected, the module will be called i810. AGP support is required
- for this driver to work.
-
-config DRM_MGA
- tristate "Matrox g200/g400"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have a Matrox G200, G400 or G450 graphics
- card. If M is selected, the module will be called mga. AGP
- support is required for this driver to work.
-
-config DRM_SIS
- tristate "SiS video cards"
- depends on DRM && AGP
- depends on FB_SIS || FB_SIS=n
- help
- Choose this option if you have a SiS 630 or compatible video
- chipset. If M is selected the module will be called sis. AGP
- support is required for this driver to work.
-
-config DRM_VIA
- tristate "Via unichrome video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Via unichrome or compatible video
- chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
- tristate "Savage video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
- chipset. If M is selected the module will be called savage.
-
+# leave here to list legacy drivers
endif # DRM_LEGACY
config DRM_EXPORT_FOR_TESTS
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc637343d87b..a33257d2bc7f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -82,12 +82,16 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
drm_dma_helper-y := drm_gem_dma_helper.o
+drm_dma_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_dma.o
drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
+drm_suballoc_helper-y := drm_suballoc.o
+obj-$(CONFIG_DRM_SUBALLOC_HELPER) += drm_suballoc_helper.o
+
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
@@ -126,7 +130,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
# Drivers and the rest
#
-obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
+obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
@@ -134,21 +138,14 @@ obj-y += arm/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
-obj-$(CONFIG_DRM_TDFX) += tdfx/
-obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
-obj-$(CONFIG_DRM_MGA) += mga/
-obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/
-obj-$(CONFIG_DRM_SIS) += sis/
-obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
-obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 5fcd510f1abb..12adca8c7819 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -3,9 +3,11 @@
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
+ depends on !UML
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
@@ -13,9 +15,12 @@ config DRM_AMDGPU
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
+ select I2C
+ select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
+ select DRM_SUBALLOC_HELPER
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 798d0e9a60b7..415a7fa395c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -34,6 +34,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
@@ -53,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
- amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
@@ -76,12 +77,14 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
- sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
+ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
+ nbio_v7_9.o
# add DF block
amdgpu-y += \
df_v1_7.o \
- df_v3_6.o
+ df_v3_6.o \
+ df_v4_3.o
# add GMC block
amdgpu-y += \
@@ -90,7 +93,7 @@ amdgpu-y += \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
- mmhub_v3_0_1.o gfxhub_v3_0_3.o
+ mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o
# add UMC block
amdgpu-y += \
@@ -133,9 +136,11 @@ amdgpu-y += \
gfx_v9_0.o \
gfx_v9_4.o \
gfx_v9_4_2.o \
+ gfx_v9_4_3.o \
gfx_v10_0.o \
imu_v11_0.o \
gfx_v11_0.o \
+ gfx_v11_0_3.o \
imu_v11_0_3.o
# add async DMA block
@@ -145,6 +150,7 @@ amdgpu-y += \
sdma_v3_0.o \
sdma_v4_0.o \
sdma_v4_4.o \
+ sdma_v4_4_2.o \
sdma_v5_0.o \
sdma_v5_2.o \
sdma_v6_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6b74df446694..02b827785e39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -50,10 +50,8 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
#include <linux/pci.h>
-#include <linux/aer.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -150,7 +148,7 @@ struct amdgpu_watchdog_timer
* Modules parameters.
*/
extern int amdgpu_modeset;
-extern int amdgpu_vram_limit;
+extern unsigned int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
@@ -187,7 +185,6 @@ extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
extern uint amdgpu_force_long_training;
-extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
@@ -195,6 +192,7 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
@@ -242,6 +240,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -423,29 +422,11 @@ struct amdgpu_clock {
* alignment).
*/
-#define AMDGPU_SA_NUM_FENCE_LISTS 32
-
struct amdgpu_sa_manager {
- wait_queue_head_t wq;
- struct amdgpu_bo *bo;
- struct list_head *hole;
- struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-/* sub-allocation buffer */
-struct amdgpu_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct amdgpu_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct dma_fence *fence;
+ struct drm_suballoc_manager base;
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
};
int amdgpu_fence_slab_init(void);
@@ -489,7 +470,7 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -608,7 +589,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/* VRAM scratch page for HDP bug, default vram page */
-struct amdgpu_vram_scratch {
+struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
volatile uint32_t *ptr;
u64 gpu_addr;
@@ -755,6 +736,11 @@ struct amdgpu_mqd {
#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_reset_domain;
+/*
+ * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
+ */
+#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
+
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -848,7 +834,7 @@ struct amdgpu_device {
/* memory management */
struct amdgpu_mman mman;
- struct amdgpu_vram_scratch vram_scratch;
+ struct amdgpu_mem_scratch mem_scratch;
struct amdgpu_wb wb;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
@@ -870,7 +856,7 @@ struct amdgpu_device {
struct amdgpu_vkms_output *amdgpu_vkms_output;
struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src vline0_irq;
struct amdgpu_irq_src vupdate_irq;
@@ -1017,7 +1003,6 @@ struct amdgpu_device {
bool in_runpm;
bool has_pr3;
- bool pm_sysfs_en;
bool ucode_sysfs_en;
bool psp_sysfs_en;
@@ -1107,18 +1092,14 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data);
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data);
-
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1240,7 +1221,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
#define amdgpu_asic_invalidate_hdp(adev, r) \
((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
- ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : 0))
+ ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1266,6 +1247,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
@@ -1385,10 +1367,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1399,11 +1383,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57b5e11446c6..aeeec211861c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/backlight.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
@@ -31,7 +32,6 @@
#include <acpi/video.h>
#include <acpi/actbl.h>
-#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_display.h"
@@ -971,6 +971,34 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return true;
}
+
+/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if ((adev->flags & AMD_IS_APU) &&
+ adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
+ return false;
+
+ if ((adev->flags & AMD_IS_APU) &&
+ amdgpu_acpi_is_s3_active(adev))
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+#else
+ return true;
+#endif
+}
+
/*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
*
@@ -1043,24 +1071,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
}
/**
- * amdgpu_acpi_should_gpu_reset
- *
- * @adev: amdgpu_device_pointer
- *
- * returns true if should reset GPU, false if not
- */
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- if (amdgpu_sriov_vf(adev))
- return false;
-
- return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
-}
-
-/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
@@ -1073,26 +1083,25 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
+ if (adev->asic_type < CHIP_RAVEN)
+ return false;
+
/*
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
* risky to do any special firmware-related preparations for entering
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
- return false;
- }
#if !IS_ENABLED(CONFIG_AMD_PMC)
dev_warn_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
- return false;
-#else
- return true;
#endif /* CONFIG_AMD_PMC */
+ return true;
}
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f99d4873bf22..0385f7f69278 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -96,7 +96,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
size_t *start_offset)
{
/*
- * The first num_doorbells are used by amdgpu.
+ * The first num_kernel_doorbells are used by amdgpu.
* amdkfd takes whatever's left in the aperture.
*/
if (adev->enable_mes) {
@@ -109,11 +109,11 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
*aperture_base = adev->doorbell.base;
*aperture_size = 0;
*start_offset = 0;
- } else if (adev->doorbell.size > adev->doorbell.num_doorbells *
+ } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells *
sizeof(u32)) {
*aperture_base = adev->doorbell.base;
*aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+ *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32);
} else {
*aperture_base = 0;
*aperture_size = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0040deaf8a83..01ba3589b60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -97,7 +97,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
- uint64_t vram_used;
+ int64_t vram_used;
uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
@@ -271,9 +271,9 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
((struct drm_file *)(drm_priv))->driver_priv)->vm)
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid);
+ struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
@@ -308,6 +308,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dmabuf);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b15091d8310d..83a83ced2439 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
@@ -81,6 +82,25 @@ static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
return false;
}
+/**
+ * reuse_dmamap() - Check whether adev can share the original
+ * userptr BO
+ *
+ * If both adev and bo_adev are in direct mapping or
+ * in the same iommu group, they can share the original BO.
+ *
+ * @adev: Device to which can or cannot share the original BO
+ * @bo_adev: Device to which allocated BO belongs to
+ *
+ * Return: returns true if adev can share original userptr BO,
+ * false otherwise.
+ */
+static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
+{
+ return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
+ (adev->dev->iommu_group == bo_adev->dev->iommu_group);
+}
+
/* Set memory usage limits. Current, limits are
* System (TTM + userptr) memory - 15/16th System RAM
* TTM memory - 3/8th System RAM
@@ -252,15 +272,19 @@ create_dmamap_sg_bo(struct amdgpu_device *adev,
struct kgd_mem *mem, struct amdgpu_bo **bo_out)
{
struct drm_gem_object *gem_obj;
- int ret, align;
+ int ret;
+ uint64_t flags = 0;
ret = amdgpu_bo_reserve(mem->bo, false);
if (ret)
return ret;
- align = 1;
- ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align,
- AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE,
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
+ flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED);
+
+ ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
+ AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
amdgpu_bo_unreserve(mem->bo);
@@ -480,9 +504,6 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
if (unlikely(ret))
goto release_sg;
- drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
- ttm->num_pages);
-
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -710,6 +731,21 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
}
}
+static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
+{
+ if (!mem->dmabuf) {
+ struct dma_buf *ret = amdgpu_gem_prime_export(
+ &mem->bo->tbo.base,
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DRM_RDWR : 0);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ mem->dmabuf = ret;
+ }
+
+ return 0;
+}
+
static int
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
@@ -717,16 +753,9 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct drm_gem_object *gobj;
int ret;
- if (!mem->dmabuf) {
- mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
- mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0);
- if (IS_ERR(mem->dmabuf)) {
- ret = PTR_ERR(mem->dmabuf);
- mem->dmabuf = NULL;
- return ret;
- }
- }
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ return ret;
gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
if (IS_ERR(gobj))
@@ -796,11 +825,11 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
va + bo_size, vm);
if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
- (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
- same_hive) {
+ (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
+ same_hive) {
/* Mappings on the local GPU, or VRAM mappings in the
- * local hive, or userptr mapping IOMMU direct map mode
- * share the original BO
+ * local hive, or userptr mapping can reuse dma map
+ * address space share the original BO
*/
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = mem->bo;
@@ -1430,18 +1459,11 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
}
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid)
+ struct amdgpu_vm *avm, u32 pasid)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
@@ -1458,19 +1480,12 @@ int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
@@ -1588,7 +1603,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
- size_t available;
+ ssize_t available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
@@ -1597,6 +1612,9 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ if (available < 0)
+ available = 0;
+
return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
@@ -1612,6 +1630,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_bo *bo;
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
+ uint64_t aligned_size;
u64 alloc_flags;
int ret;
@@ -1667,22 +1686,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
* the memory.
*/
if ((*mem)->aql_queue)
- size = size >> 1;
+ size >>= 1;
+ aligned_size = PAGE_ALIGN(size);
(*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
- va, size, domain_string(alloc_domain));
+ va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
- ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
@@ -1739,7 +1759,7 @@ err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
@@ -2099,7 +2119,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
}
amdgpu_amdkfd_remove_eviction_fence(
- bo, bo->kfd_bo->process_info->eviction_fence);
+ bo, bo->vm_bo->vm->process_info->eviction_fence);
amdgpu_bo_unreserve(bo);
@@ -2221,30 +2241,27 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
int ret;
- if (dma_buf->ops != &amdgpu_dmabuf_ops)
- /* Can't handle non-graphics buffers */
- return -EINVAL;
-
- obj = dma_buf->priv;
- if (drm_to_adev(obj->dev) != adev)
- /* Can't handle buffers from other devices */
- return -EINVAL;
+ obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT)))
+ AMDGPU_GEM_DOMAIN_GTT))) {
/* Only VRAM and GTT BOs are supported */
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_obj;
+ }
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err_put_obj;
+ }
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
- if (ret) {
- kfree(*mem);
- return ret;
- }
+ if (ret)
+ goto err_free_mem;
if (size)
*size = amdgpu_bo_size(bo);
@@ -2261,7 +2278,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- drm_gem_object_get(&bo->tbo.base);
+ get_dma_buf(dma_buf);
+ (*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
@@ -2273,6 +2291,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
(*mem)->is_imported = true;
return 0;
+
+err_free_mem:
+ kfree(*mem);
+err_put_obj:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dma_buf)
+{
+ int ret;
+
+ mutex_lock(&mem->lock);
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ goto out;
+
+ get_dma_buf(mem->dmabuf);
+ *dma_buf = mem->dmabuf;
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
}
/* Evict a userptr BO by stopping the queues if necessary
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index e4d78491bcc7..ededdc01ca28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -28,6 +28,8 @@
struct hmm_range;
+struct drm_file;
+
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1a050379190..456e385333b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -411,17 +411,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err) {
- DRM_ERROR("Failed to request firmware\n");
- return err;
- }
-
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2ebbc6382a06..6be30dcb029d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -25,7 +25,9 @@
*/
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -996,13 +998,33 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
+ if (amdgpu_connector->detected_hpd_without_ddc) {
+ force = true;
+ amdgpu_connector->detected_hpd_without_ddc = false;
+ }
+
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later
+ */
+ if (!dret && !force &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ amdgpu_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&adev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8516c814bc9b..2eb2c66843a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -32,6 +32,8 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
+#include <drm/ttm/ttm_tt.h>
+
#include "amdgpu_cs.h"
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -61,6 +63,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(p->ctx);
return -ECANCELED;
}
+
+ amdgpu_sync_create(&p->sync);
return 0;
}
@@ -452,18 +456,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
}
r = amdgpu_sync_fence(&p->sync, fence);
- if (r)
- goto error;
-
- /*
- * When we have an explicit dependency it might be necessary to insert a
- * pipeline sync to make sure that all caches etc are flushed and the
- * next job actually sees the results from the previous one.
- */
- if (fence->context == p->gang_leader->base.entity->fence_context)
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
-
-error:
dma_fence_put(fence);
return r;
}
@@ -1188,10 +1180,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct drm_gpu_scheduler *sched;
struct amdgpu_bo_list_entry *e;
+ struct dma_fence *fence;
unsigned int i;
int r;
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ return r;
+ }
+
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
@@ -1211,10 +1212,27 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
- if (r && r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- return r;
+ sched = p->gang_leader->base.entity->rq->sched;
+ while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+
+ /*
+ * When we have an dependency it might be necessary to insert a
+ * pipeline sync to make sure that all caches etc are flushed and the
+ * next job actually sees the results from the previous one
+ * before we start executing on the same scheduler ring.
+ */
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
+ continue;
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ }
+ return 0;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1254,9 +1272,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
continue;
fence = &p->jobs[i]->base.s_fence->scheduled;
+ dma_fence_get(fence);
r = drm_sched_job_add_dependency(&leader->base, fence);
- if (r)
- goto error_cleanup;
+ if (r) {
+ dma_fence_put(fence);
+ return r;
+ }
}
if (p->gang_size > 1) {
@@ -1282,7 +1303,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
if (r) {
r = -EAGAIN;
- goto error_unlock;
+ mutex_unlock(&p->adev->notifier_lock);
+ return r;
}
p->fence = dma_fence_get(&leader->base.s_fence->finished);
@@ -1329,14 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;
-
-error_unlock:
- mutex_unlock(&p->adev->notifier_lock);
-
-error_cleanup:
- for (i = 0; i < p->gang_size; ++i)
- drm_sched_job_cleanup(&p->jobs[i]->base);
- return r;
}
/* Cleanup the parser structure */
@@ -1344,6 +1358,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
unsigned i;
+ amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 113f39510a72..fb3e3d56d427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_CS_H__
#define __AMDGPU_CS_H__
+#include <linux/ww_mutex.h>
+
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0f16d3c09309..f60753f97ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1717,7 +1717,7 @@ no_preempt:
static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
{
- int r, resched, length;
+ int r, length;
struct amdgpu_ring *ring;
struct dma_fence **fences = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
@@ -1747,8 +1747,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
/* stop the scheduler */
kthread_park(ring->sched.thread);
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
-
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
if (r) {
@@ -1785,8 +1783,6 @@ failure:
up_read(&adev->reset_domain->sem);
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
-
pro_end:
kfree(fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index afe6af9c0138..5c7d40873ee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -35,8 +35,11 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
+#include <linux/apple-gmux.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
@@ -78,6 +81,10 @@
#include <drm/drm_drv.h>
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -90,6 +97,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_MAX_RETRY_LIMIT 2
#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+static const struct drm_driver amdgpu_kms_driver;
+
const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -160,7 +169,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
*
* The amdgpu driver provides a sysfs API for reporting the product name
* for the device
- * The file serial_number is used for this and returns the product name
+ * The file product_name is used for this and returns the product name
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
@@ -182,7 +191,7 @@ static DEVICE_ATTR(product_name, S_IRUGO,
*
* The amdgpu driver provides a sysfs API for reporting the part number
* for the device
- * The file serial_number is used for this and returns the part number
+ * The file product_number is used for this and returns the part number
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
@@ -593,7 +602,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (amdgpu_device_skip_hw_access(adev))
return 0;
- if (index < adev->doorbell.num_doorbells) {
+ if (index < adev->doorbell.num_kernel_doorbells) {
return readl(adev->doorbell.ptr + index);
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
@@ -616,7 +625,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (amdgpu_device_skip_hw_access(adev))
return;
- if (index < adev->doorbell.num_doorbells) {
+ if (index < adev->doorbell.num_kernel_doorbells) {
writel(v, adev->doorbell.ptr + index);
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
@@ -637,7 +646,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (amdgpu_device_skip_hw_access(adev))
return 0;
- if (index < adev->doorbell.num_doorbells) {
+ if (index < adev->doorbell.num_kernel_doorbells) {
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
@@ -660,7 +669,7 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (amdgpu_device_skip_hw_access(adev))
return;
- if (index < adev->doorbell.num_doorbells) {
+ if (index < adev->doorbell.num_kernel_doorbells) {
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
@@ -671,20 +680,20 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
* amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u32 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u32 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -702,20 +711,20 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
* amdgpu_device_indirect_rreg64 - read a 64bits indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u64 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u64 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -745,13 +754,15 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -774,13 +785,15 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -799,6 +812,18 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_get_rev_id - query device rev_id
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return device rev_id
+ */
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
+{
+ return adev->nbio.funcs->get_rev_id(adev);
+}
+
+/**
* amdgpu_invalid_rreg - dummy reg read function
*
* @adev: amdgpu_device pointer
@@ -924,32 +949,33 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
+ * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
*/
-static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
{
- return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->vram_scratch.robj,
- &adev->vram_scratch.gpu_addr,
- (void **)&adev->vram_scratch.ptr);
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mem_scratch.robj,
+ &adev->mem_scratch.gpu_addr,
+ (void **)&adev->mem_scratch.ptr);
}
/**
- * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
+ * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
-static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
}
/**
@@ -1034,7 +1060,7 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
- adev->doorbell.num_doorbells = 0;
+ adev->doorbell.num_kernel_doorbells = 0;
adev->doorbell.ptr = NULL;
return 0;
}
@@ -1049,27 +1075,27 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
if (adev->enable_mes) {
- adev->doorbell.num_doorbells =
+ adev->doorbell.num_kernel_doorbells =
adev->doorbell.size / sizeof(u32);
} else {
- adev->doorbell.num_doorbells =
+ adev->doorbell.num_kernel_doorbells =
min_t(u32, adev->doorbell.size / sizeof(u32),
adev->doorbell_index.max_assignment+1);
- if (adev->doorbell.num_doorbells == 0)
+ if (adev->doorbell.num_kernel_doorbells == 0)
return -EINVAL;
/* For Vega, reserve and map two pages on doorbell BAR since SDMA
* paging queue doorbell use the second page. The
* AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
* doorbells are in the first page. So with paging queue enabled,
- * the max num_doorbells should + 1 page (0x400 in dword)
+ * the max num_kernel_doorbells should + 1 page (0x400 in dword)
*/
if (adev->asic_type >= CHIP_VEGA10)
- adev->doorbell.num_doorbells += 0x400;
+ adev->doorbell.num_kernel_doorbells += 0x400;
}
adev->doorbell.ptr = ioremap(adev->doorbell.base,
- adev->doorbell.num_doorbells *
+ adev->doorbell.num_kernel_doorbells *
sizeof(u32));
if (adev->doorbell.ptr == NULL)
return -ENOMEM;
@@ -1351,6 +1377,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
return pcie_aspm_enabled(adev->pdev);
}
+bool amdgpu_device_aspm_support_quirk(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
@@ -1981,17 +2018,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
- err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
if (err) {
dev_err(adev->dev,
- "Failed to load gpu_info firmware \"%s\"\n",
- fw_name);
- goto out;
- }
- err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
- if (err) {
- dev_err(adev->dev,
- "Failed to validate gpu_info firmware \"%s\"\n",
+ "Failed to get gpu_info firmware \"%s\"\n",
fw_name);
goto out;
}
@@ -2078,6 +2108,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent;
int i, r;
+ bool total;
amdgpu_device_enable_virtual_display(adev);
@@ -2153,7 +2184,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
- amdgpu_amdkfd_device_probe(adev);
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
@@ -2161,6 +2191,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
+ total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d <%s>\n",
@@ -2174,7 +2205,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
} else if (r) {
DRM_ERROR("early_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
@@ -2205,7 +2236,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
}
+ if (!total)
+ return -ENODEV;
+ amdgpu_amdkfd_device_probe(adev);
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -2331,7 +2365,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
}
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
- ring->num_hw_submission, amdgpu_job_hang_limit,
+ ring->num_hw_submission, 0,
timeout, adev->reset_domain->wq,
ring->sched_score, ring->name,
adev->dev);
@@ -2390,9 +2424,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
- r = amdgpu_device_vram_scratch_init(adev);
+ r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
+ DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
@@ -2410,8 +2444,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_CSA_SIZE);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
goto init_failed;
@@ -2504,8 +2539,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_fru_get_product_info(adev);
init_failed:
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, true);
return r;
}
@@ -2581,9 +2614,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip CG for GFX on S0ix */
+ /* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2617,9 +2651,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip PG for GFX on S0ix */
+ /* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2871,7 +2906,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
- amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
}
@@ -3027,6 +3062,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
continue;
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->in_s0ix &&
+ (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
+ * These are in TMR, hence are expected to be reused by PSP-TOS to reload
+ * from this location and RLC Autoload automatically also gets loaded
+ * from here based on PMFW -> PSP message during re-init sequence.
+ * Therefore, the psp suspend & resume should be skipped to avoid destroy
+ * the TMR and reload FWs again for IMU enabled APU ASICs.
+ */
+ if (amdgpu_in_reset(adev) &&
+ (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -3126,9 +3179,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
+ AMD_IP_BLOCK_TYPE_MES,
AMD_IP_BLOCK_TYPE_UVD,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_IP_BLOCK_TYPE_VCN
+ AMD_IP_BLOCK_TYPE_VCN,
+ AMD_IP_BLOCK_TYPE_JPEG
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -3227,15 +3282,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.hw = true;
-
- if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
- * amdgpu_device_resume() after IP resume.
- */
- amdgpu_gfx_off_ctrl(adev, false);
- DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
- }
-
}
return 0;
@@ -3257,9 +3303,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_amdkfd_resume_iommu(adev);
- if (r)
- return r;
+ if (!adev->in_s0ix) {
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ return r;
+ }
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
@@ -3530,6 +3578,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
int r, i;
bool px = false;
u32 max_MBps;
+ int tmp;
adev->shutdown = false;
adev->flags = flags;
@@ -3687,6 +3736,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Get rid of things like offb */
+ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+ if (r)
+ return r;
+
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -3703,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
+ * internal path natively support atomics, set have_atomics_support to true.
+ */
+ else if ((adev->flags & AMD_IS_APU) &&
+ (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
+ adev->have_atomics_support = true;
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
@@ -3746,7 +3806,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -3754,8 +3820,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->pdev);
-
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -3806,18 +3870,6 @@ fence_driver_init:
r = amdgpu_device_ip_init(adev);
if (r) {
- /* failed in exclusive mode due to timeout */
- if (amdgpu_sriov_vf(adev) &&
- !amdgpu_sriov_runtime(adev) &&
- amdgpu_virt_mmio_blocked(adev) &&
- !amdgpu_virt_wait_reset(adev)) {
- dev_err(adev->dev, "VF exclusive mode timeout\n");
- /* Don't send request since VF is inactive. */
- adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
- adev->virt.ops = NULL;
- r = -EAGAIN;
- goto release_ras_con;
- }
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
goto release_ras_con;
@@ -3845,11 +3897,8 @@ fence_driver_init:
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
r = amdgpu_pm_sysfs_init(adev);
- if (r) {
- adev->pm_sysfs_en = false;
- DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- } else
- adev->pm_sysfs_en = true;
+ if (r)
+ DRM_ERROR("registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
@@ -3889,8 +3938,10 @@ fence_driver_init:
msecs_to_jiffies(AMDGPU_RESUME_MS));
}
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_release_full_gpu(adev, true);
flush_delayed_work(&adev->delayed_init_work);
+ }
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r)
@@ -3911,12 +3962,15 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_px(ddev)) {
- px = true;
+ px = amdgpu_device_supports_px(ddev);
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
+
+ if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- }
if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
@@ -3927,6 +3981,20 @@ fence_driver_init:
return 0;
release_ras_con:
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ /* failed in exclusive mode due to timeout */
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_mmio_blocked(adev) &&
+ !amdgpu_virt_wait_reset(adev)) {
+ dev_err(adev->dev, "VF exclusive mode timeout\n");
+ /* Don't send request since VF is inactive. */
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ adev->virt.ops = NULL;
+ r = -EAGAIN;
+ }
amdgpu_release_ras_context(adev);
failed:
@@ -3989,12 +4057,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized) {
- flush_delayed_work(&adev->mman.bdev.wq);
- ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- }
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
- if (adev->pm_sysfs_en)
+ if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
@@ -4014,18 +4080,19 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
- amdgpu_device_unmap_mmio(adev);
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_device_unmap_mmio(adev);
}
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
int idx;
+ bool px;
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
- release_firmware(adev->firmware.gpu_info_fw);
- adev->firmware.gpu_info_fw = NULL;
+ amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
@@ -4040,10 +4107,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_device_supports_px(adev_to_drm(adev))) {
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
+
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
- }
+
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);
@@ -4128,8 +4201,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
- drm_kms_helper_poll_disable(dev);
-
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
@@ -4223,18 +4294,9 @@ exit:
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- if (adev->in_s0ix) {
- /* re-enable gfxoff after IP resume. This re-enables gfxoff after
- * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
- */
- amdgpu_gfx_off_ctrl(adev, true);
- DRM_DEBUG("will enable gfxoff for the mission mode\n");
- }
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
- drm_kms_helper_poll_enable(dev);
-
amdgpu_ras_resume(adev);
if (adev->mode_info.num_crtc) {
@@ -4260,6 +4322,9 @@ exit:
}
adev->in_suspend = false;
+ if (adev->enable_mes)
+ amdgpu_mes_self_test(adev);
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
@@ -4447,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
- shadow = &vmbo->bo;
+ /* If vm is compute context or adev is APU, shadow will be NULL */
+ if (!vmbo->shadow)
+ continue;
+ shadow = vmbo->shadow;
+
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
@@ -4610,11 +4679,6 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (!amdgpu_ras_is_poison_mode_supported(adev))
return true;
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
- return false;
- }
-
if (amdgpu_sriov_vf(adev))
return true;
@@ -4739,7 +4803,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset && amdgpu_gpu_recovery) {
+ if (!need_full_reset && amdgpu_gpu_recovery &&
+ amdgpu_device_ip_check_soft_reset(adev)) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
@@ -5134,6 +5199,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
* @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
*
* Attempt to reset the GPU if it has hung (all asics).
* Attempt to do soft-reset or full-reset and reinitialize Asic
@@ -5303,8 +5369,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
- /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
@@ -5573,7 +5640,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -5589,7 +5656,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -5865,8 +5932,8 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
int amdgpu_in_reset(struct amdgpu_device *adev)
{
return atomic_read(&adev->reset_domain->in_gpu_reset);
- }
-
+}
+
/**
* amdgpu_device_halt() - bring hardware to some kind of halt state
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1bbd56029a4f..0ecce0b92b82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -33,13 +33,16 @@
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "df_v4_3.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+#include "nbio_v7_9.h"
#include "hdp_v4_0.h"
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
@@ -542,6 +545,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
struct harvest_table *harvest_info;
u16 offset;
int i;
+ uint32_t umc_harvest_config = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
@@ -569,12 +573,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++;
break;
default:
break;
}
}
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
}
/* ================================================== */
@@ -704,7 +713,7 @@ static void ip_hw_instance_release(struct kobject *kobj)
kfree(ip_hw_instance);
}
-static struct kobj_type ip_hw_instance_ktype = {
+static const struct kobj_type ip_hw_instance_ktype = {
.release = ip_hw_instance_release,
.sysfs_ops = &ip_hw_instance_sysfs_ops,
.default_groups = ip_hw_instance_groups,
@@ -723,7 +732,7 @@ static void ip_hw_id_release(struct kobject *kobj)
kfree(ip_hw_id);
}
-static struct kobj_type ip_hw_id_ktype = {
+static const struct kobj_type ip_hw_id_ktype = {
.release = ip_hw_id_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -786,18 +795,18 @@ static const struct sysfs_ops ip_die_entry_sysfs_ops = {
.show = ip_die_entry_attr_show,
};
-static struct kobj_type ip_die_entry_ktype = {
+static const struct kobj_type ip_die_entry_ktype = {
.release = ip_die_entry_release,
.sysfs_ops = &ip_die_entry_sysfs_ops,
.default_groups = ip_die_entry_groups,
};
-static struct kobj_type die_kobj_ktype = {
+static const struct kobj_type die_kobj_ktype = {
.release = die_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
-static struct kobj_type ip_discovery_ktype = {
+static const struct kobj_type ip_discovery_ktype = {
.release = ip_disc_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -1155,8 +1164,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
AMDGPU_MAX_SDMA_INSTANCES);
}
- if (le16_to_cpu(ip->hw_id) == UMC_HWID)
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
for (k = 0; k < num_base_address; k++) {
/*
@@ -1491,6 +1502,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1536,6 +1548,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1582,6 +1595,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -1640,6 +1654,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
@@ -1833,6 +1848,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
@@ -1933,9 +1951,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
- break;
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ return 0;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2166,6 +2183,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
@@ -2250,6 +2268,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v7_4_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
+ case IP_VERSION(7, 9, 0):
+ adev->nbio.funcs = &nbio_v7_9_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
+ break;
case IP_VERSION(7, 2, 0):
case IP_VERSION(7, 2, 1):
case IP_VERSION(7, 3, 0):
@@ -2295,6 +2317,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
adev->hdp.funcs = &hdp_v4_0_funcs;
break;
case IP_VERSION(5, 0, 0):
@@ -2329,6 +2352,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 2):
adev->df.funcs = &df_v1_7_funcs;
break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b22471b3bd63..d60fe7eb5579 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -42,6 +42,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_vblank.h>
/**
@@ -63,7 +64,7 @@
void amdgpu_display_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- hotplug_work);
+ hotplug_work.work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -1617,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int r;
+ drm_kms_helper_poll_disable(dev);
+
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
@@ -1693,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
drm_modeset_unlock_all(dev);
+ drm_kms_helper_poll_enable(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 271e30e34d93..0c001bb8fc2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -37,6 +37,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h>
+#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 7199b6b0be81..8fd11497faba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -21,6 +21,9 @@
*
*/
+#ifndef AMDGPU_DOORBELL_H
+#define AMDGPU_DOORBELL_H
+
/*
* GPU doorbell structures, functions & helpers
*/
@@ -29,7 +32,9 @@ struct amdgpu_doorbell {
resource_size_t base;
resource_size_t size;
u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+
+ /* Number of doorbells reserved for amdgpu kernel driver */
+ u32 num_kernel_doorbells;
};
/* Reserved doorbells for amdgpu (including multimedia).
@@ -306,3 +311,4 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b4f2d61ea0d5..b1ca1ab6d6ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,7 +23,6 @@
*/
#include <drm/amdgpu_drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
@@ -39,7 +38,6 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
-#include <linux/fb.h>
#include <linux/dynamic_debug.h>
#include "amdgpu.h"
@@ -105,13 +103,19 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
- * 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
+ * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
+ * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
+ * 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
+ * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
+ * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 49
+#define KMS_DRIVER_MINOR 52
#define KMS_DRIVER_PATCHLEVEL 0
-int amdgpu_vram_limit;
+unsigned int amdgpu_vram_limit = UINT_MAX;
int amdgpu_vis_vram_limit;
int amdgpu_gart_size = -1; /* auto */
int amdgpu_gtt_size = -1; /* auto */
@@ -144,8 +148,8 @@ uint amdgpu_pcie_lane_cap;
u64 amdgpu_cg_mask = 0xffffffffffffffff;
uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
-char *amdgpu_disable_cu = NULL;
-char *amdgpu_virtual_display = NULL;
+char *amdgpu_disable_cu;
+char *amdgpu_virtual_display;
/*
* OverDrive(bit 14) disabled by default
@@ -153,7 +157,6 @@ char *amdgpu_virtual_display = NULL;
*/
uint amdgpu_pp_feature_mask = 0xfff7bfff;
uint amdgpu_force_long_training;
-int amdgpu_job_hang_limit;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
@@ -181,11 +184,13 @@ int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -515,13 +520,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: job_hang_limit (int)
- * Set how much time allow a job hang and not drop it. The default is 0.
- */
-MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
-module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
-
-/**
* DOC: lbpw (int)
* Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
*/
@@ -816,7 +814,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
* DOC: no_queue_eviction_on_vm_fault (int)
* If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
*/
-int amdgpu_no_queue_eviction_on_vm_fault = 0;
+int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
@@ -880,6 +878,32 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
@@ -892,7 +916,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -906,6 +930,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
@@ -2095,11 +2129,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
}
#endif
- /* Get rid of things like offb */
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
- if (ret)
- return ret;
-
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
return PTR_ERR(adev);
@@ -2204,6 +2233,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ drm_dev_unplug(dev);
+
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -2243,8 +2274,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
- drm_dev_unplug(dev);
-
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device
@@ -2380,8 +2409,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
- else
+ else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
+ if (!adev->in_s0ix && !adev->in_s3)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2402,6 +2433,9 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
+ if (!adev->in_s0ix && !adev->in_s3)
+ return 0;
+
/* Avoids registers access if device is physically gone */
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;
@@ -2425,7 +2459,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = false;
if (r)
return r;
- return amdgpu_asic_reset(adev);
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+ return 0;
}
static int amdgpu_pmops_thaw(struct device *dev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index c96e458ed088..27a782a9dc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 99a7855ab1bc..c57252f004e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -60,12 +60,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ struct amdgpu_mem_stats stats;
ktime_t usage[AMDGPU_HW_IP_NUM];
uint32_t bus, dev, fn, domain;
unsigned int hw_ip;
int ret;
+ memset(&stats, 0, sizeof(stats));
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
@@ -75,7 +76,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
if (ret)
return;
- amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_vm_get_memory(vm, &stats);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
@@ -90,9 +91,22 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
- seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
- seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
- seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+ seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
+ seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
+ seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
+ seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n",
+ stats.visible_vram/1024UL);
+ seq_printf(m, "amd-evicted-vram:\t%llu KiB\n",
+ stats.evicted_vram/1024UL);
+ seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n",
+ stats.evicted_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-vram:\t%llu KiB\n",
+ stats.requested_vram/1024UL);
+ seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n",
+ stats.requested_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-gtt:\t%llu KiB\n",
+ stats.requested_gtt/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
index 41a4c7056729..e86834bfea1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
@@ -30,7 +30,6 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 00444203220d..f52d0ba91a77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- if (!ring->no_scheduler)
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
@@ -672,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) {
+ struct amdgpu_job *job;
+
+ /* For non-scheduler bad job, i.e. failed ib test, we need to signal
+ * it right here or we won't be able to track them in fence_drv
+ * and they will remain unsignaled during sa_bo free.
+ */
+ job = container_of(old, struct amdgpu_job, hw_fence);
+ if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bb7350ea1d75..863cb668e000 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -34,6 +34,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -61,10 +62,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
@@ -257,7 +258,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & VM_ACCESS_FLAGS))
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
return drm_gem_ttm_mmap(obj, vma);
}
@@ -968,7 +969,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(file->pid, PIDTYPE_TGID);
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 23692e5d4d13..f3f541ba0aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
return amdgpu_compute_multipipe == 1;
}
+ if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+ return true;
+
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)
@@ -302,6 +305,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.kiq;
+ ring->vm_hub = AMDGPU_GFXHUB_0;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
@@ -372,8 +376,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
- &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
@@ -680,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto late_fini;
+ if (adev->gfx.cp_ecc_error_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ }
} else {
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}
@@ -693,6 +702,50 @@ late_fini:
return r;
}
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_gfx_ras *ras = NULL;
+
+ /* adev->gfx.ras is NULL, which means gfx does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->gfx.ras)
+ return 0;
+
+ ras = adev->gfx.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register gfx ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "gfx");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use gfx default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
+
+ return 0;
+}
+
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
+ return adev->gfx.ras->poison_consumption_handler(adev, entry);
+
+ return 0;
+}
+
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b3df4787877e..bfabea76d166 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -42,6 +42,8 @@
#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+#define AMDGPU_MAX_GC_INSTANCES 8
+
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -53,6 +55,15 @@ enum amdgpu_gfx_pipe_priority {
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+enum amdgpu_gfx_partition {
+ AMDGPU_SPX_PARTITION_MODE = 0,
+ AMDGPU_DPX_PARTITION_MODE = 1,
+ AMDGPU_TPX_PARTITION_MODE = 2,
+ AMDGPU_QPX_PARTITION_MODE = 3,
+ AMDGPU_CPX_PARTITION_MODE = 4,
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE,
+};
+
struct amdgpu_mec {
struct amdgpu_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
@@ -178,6 +189,8 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ /* Whether texture coordinate truncation is conformant. */
+ bool ta_cntl2_truncate_coord_mode;
uint64_t tcc_disabled_mask;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
@@ -210,6 +223,11 @@ struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
+ int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+ int (*poison_consumption_handler)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
};
struct amdgpu_gfx_funcs {
@@ -316,13 +334,14 @@ struct amdgpu_gfx {
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
- struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
struct amdgpu_irq_src cp_ecc_error_irq;
struct amdgpu_irq_src sq_irq;
+ struct amdgpu_irq_src rlc_gc_fed_irq;
struct sq_work sq_work;
/* gfx status */
@@ -356,6 +375,10 @@ struct amdgpu_gfx {
struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS];
struct amdgpu_ring_mux muxer;
+
+ enum amdgpu_gfx_partition partition_mode;
+ uint32_t num_xcd;
+ uint32_t num_xcc_per_xcp;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -432,4 +455,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 02a4c93673ce..4e2531758866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -35,6 +35,7 @@
#include "amdgpu_xgmi.h"
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
@@ -201,13 +202,20 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base)
{
+ uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (limit && limit < mc->real_vram_size)
+ if (limit < mc->real_vram_size)
mc->real_vram_size = limit;
+ if (vis_limit && vis_limit < mc->visible_vram_size)
+ mc->visible_vram_size = vis_limit;
+
+ if (mc->real_vram_size < mc->visible_vram_size)
+ mc->visible_vram_size = mc->real_vram_size;
+
if (mc->xgmi.num_physical_nodes == 0) {
mc->fb_start = mc->vram_start;
mc->fb_end = mc->vram_end;
@@ -387,8 +395,21 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
while (fault->timestamp >= stamp) {
uint64_t tmp;
- if (atomic64_read(&fault->key) == key)
- return true;
+ if (atomic64_read(&fault->key) == key) {
+ /*
+ * if we get a fault which is already present in
+ * the fault_ring and the timestamp of
+ * the fault is after the expired timestamp,
+ * then this is a new fault that needs to be added
+ * into the fault ring.
+ */
+ if (fault->timestamp_expiry != 0 &&
+ amdgpu_ih_ts_after(fault->timestamp_expiry,
+ timestamp))
+ break;
+ else
+ return true;
+ }
tmp = fault->timestamp;
fault = &gmc->fault_ring[fault->next];
@@ -424,28 +445,74 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
{
struct amdgpu_gmc *gmc = &adev->gmc;
uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
+ struct amdgpu_ih_ring *ih;
struct amdgpu_gmc_fault *fault;
+ uint32_t last_wptr;
+ uint64_t last_ts;
uint32_t hash;
uint64_t tmp;
+ ih = adev->irq.retry_cam_enabled ? &adev->irq.ih_soft : &adev->irq.ih1;
+ /* Get the WPTR of the last entry in IH ring */
+ last_wptr = amdgpu_ih_get_wptr(adev, ih);
+ /* Order wptr with ring data. */
+ rmb();
+ /* Get the timetamp of the last entry in IH ring */
+ last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1);
+
hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
do {
- if (atomic64_cmpxchg(&fault->key, key, 0) == key)
+ if (atomic64_read(&fault->key) == key) {
+ /*
+ * Update the timestamp when this fault
+ * expired.
+ */
+ fault->timestamp_expiry = last_ts;
break;
+ }
tmp = fault->timestamp;
fault = &gmc->fault_ring[fault->next];
} while (fault->timestamp < tmp);
}
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
{
- if (!adev->gmc.xgmi.connected_to_cpu) {
- adev->gmc.xgmi.ras = &xgmi_ras;
- amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
- adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm;
- }
+ int r;
+
+ /* umc ras block */
+ r = amdgpu_umc_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mmhub ras block */
+ r = amdgpu_mmhub_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* hdp ras block */
+ r = amdgpu_hdp_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mca.x ras block */
+ r = amdgpu_mca_mp0_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mp1_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mpio_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* xgmi ras block */
+ r = amdgpu_xgmi_ras_sw_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -487,7 +554,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
for (i = 0; i < adev->num_rings; ++i) {
ring = adev->rings[i];
- vmhub = ring->funcs->vmhub;
+ vmhub = ring->vm_hub;
if (ring == &adev->mes.ring)
continue;
@@ -503,7 +570,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
- ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+ ring->name, ring->vm_inv_eng, ring->vm_hub);
}
return 0;
@@ -544,6 +611,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
/* VANGOGH */
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
@@ -586,6 +654,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
gc_ver == IP_VERSION(9, 4, 0) ||
gc_ver == IP_VERSION(9, 4, 1) ||
gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3) ||
gc_ver >= IP_VERSION(10, 3, 0));
gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 0305b660cd17..6d105d7fb98b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -70,6 +70,7 @@ struct amdgpu_gmc_fault {
uint64_t timestamp:48;
uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
atomic64_t key;
+ uint64_t timestamp_expiry:48;
};
/*
@@ -104,6 +105,8 @@ struct amdgpu_vmhub {
uint32_t vm_cntx_cntl_vm_fault;
uint32_t vm_l2_bank_select_reserved_cid2;
+ uint32_t vm_contexts_disable;
+
const struct amdgpu_vmhub_funcs *vmhub_funcs;
};
@@ -351,7 +354,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
new file mode 100644
index 000000000000..b6cf801939aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_hdp_ras *ras;
+
+ if (!adev->hdp.ras)
+ return 0;
+
+ ras = adev->hdp.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register hdp ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "hdp");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->hdp.ras_if = &ras->ras_block.ras_comm;
+
+ /* hdp ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index ac5c61d3de2b..7b8a6152dc8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -43,5 +43,5 @@ struct amdgpu_hdp {
struct amdgpu_hdp_ras *ras;
};
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bcccc348dbe2..4ff348e10e4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (size) {
r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
- &ib->sa_bo, size, 256);
+ &ib->sa_bo, size);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -267,7 +267,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
- amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
+ amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
@@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
- AMDGPU_IB_POOL_SIZE,
- AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_SIZE, 256,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index fcb711a11a5b..c991ca0b7a1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -202,7 +202,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence **fences;
unsigned i;
@@ -277,7 +277,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
@@ -338,7 +338,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
@@ -398,7 +398,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *idle = NULL;
struct amdgpu_vmid *id = NULL;
@@ -497,6 +497,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
!--id_mgr->reserved_use_count) {
/* give the reserved ID back to normal round robin */
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
+ id_mgr->reserved = NULL;
}
vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index a6aef488a822..fafebec5b7b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -45,7 +45,6 @@
#include <linux/irq.h>
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -597,6 +596,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
if (!src->enabled_types || !src->funcs->set)
return -EINVAL;
+ if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
+ return -EINVAL;
+
if (atomic_dec_and_test(&src->enabled_types[type]))
return amdgpu_irq_update(adev, src, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index e9f2c11ea416..be243adf3e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -98,6 +98,8 @@ struct amdgpu_irq {
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
uint32_t srbm_soft_reset;
+ u32 retry_cam_doorbell_index;
+ bool retry_cam_enabled;
};
void amdgpu_irq_disable_all(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9e549923622b..c3d9d75143f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -161,8 +161,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* use sched fence if available */
- f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
+ /* Check if any fences where initialized */
+ if (job->base.s_fence && job->base.s_fence->finished.ops)
+ f = &job->base.s_fence->finished;
+ else if (job->hw_fence.ops)
+ f = &job->hw_fence;
+ else
+ f = NULL;
+
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6f81ed4fb0d9..b07c000fc8ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -118,6 +118,10 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ /* JPEG in SRIOV does not support direct register read/write */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
@@ -202,17 +206,18 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else {
r = 0;
}
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
- break;
- udelay(1);
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
}
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
-
dma_fence_put(fence);
error:
return r;
@@ -236,19 +241,28 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
-void jpeg_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_jpeg_ras *ras;
+
if (!adev->jpeg.ras)
- return;
+ return 0;
+
+ ras = adev->jpeg.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register jpeg ras block!\n");
+ return err;
+ }
- amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
+ strcpy(ras->ras_block.ras_comm.name, "jpeg");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &ras->ras_block.ras_comm;
- strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
- adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
- adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->jpeg.ras->ras_block.ras_late_init)
- adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index e8ca3e32ad52..0ca76f0f23e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -72,6 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void jpeg_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7aa7e52ca784..0efb38539d70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -43,6 +43,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
+#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -767,6 +768,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
+ uint32_t pcie_gen_mask;
int ret;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -785,15 +787,20 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (adev->pm.dpm_enabled) {
dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
+ dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
} else {
- dev_info->max_engine_clock = adev->clock.default_sclk * 10;
- dev_info->max_memory_clock = adev->clock.default_mclk * 10;
+ dev_info->max_engine_clock =
+ dev_info->min_engine_clock =
+ adev->clock.default_sclk * 10;
+ dev_info->max_memory_clock =
+ dev_info->min_memory_clock =
+ adev->clock.default_mclk * 10;
}
dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines;
dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
- dev_info->_pad = 0;
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
@@ -801,6 +808,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
+ if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
@@ -847,6 +856,26 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+ /* Combine the chip gen mask with the platform (CPU/mobo) mask. */
+ pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ dev_info->pcie_gen = fls(pcie_gen_mask);
+ dev_info->pcie_num_lanes =
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+
+ dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
+ dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
+ dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
+ adev->gfx.config.gc_gl1c_per_sa;
+ dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
+ dev_info->mall_size = adev->gmc.mall_size;
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1014,6 +1043,24 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
ui32 /= 100;
break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
+ /* get peak pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
+ /* get peak pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 51c2a82e2fa4..8d9ff9e151de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -70,3 +70,75 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
amdgpu_mca_reset_error_count(adev, mc_status_addr);
}
+
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp0.ras)
+ return 0;
+
+ ras = adev->mca.mp0.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp1.ras)
+ return 0;
+
+ ras = adev->mca.mp1.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mpio.ras)
+ return 0;
+
+ ras = adev->mca.mpio.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index 7ce16d16e34b..997a073e2409 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -30,12 +30,7 @@ struct amdgpu_mca_ras {
struct amdgpu_mca_ras_block *ras;
};
-struct amdgpu_mca_funcs {
- void (*init)(struct amdgpu_device *adev);
-};
-
struct amdgpu_mca {
- const struct amdgpu_mca_funcs *funcs;
struct amdgpu_mca_ras mp0;
struct amdgpu_mca_ras mp1;
struct amdgpu_mca_ras mpio;
@@ -55,5 +50,7 @@ void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status);
-
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 0c546245793b..f0f00466b59f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/firmware.h>
+
#include "amdgpu_mes.h"
#include "amdgpu.h"
#include "soc15_common.h"
@@ -1102,6 +1104,11 @@ int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
&ctx_data->meta_data_obj,
&ctx_data->meta_data_mc_addr,
&ctx_data->meta_data_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
+ return r;
+ }
+
if (!ctx_data->meta_data_obj)
return -ENOMEM;
@@ -1326,12 +1333,9 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
struct amdgpu_mes_ctx_data ctx_data = {0};
struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
- AMDGPU_MES_CTX_MAX_GFX_RINGS},
- { AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
- { AMDGPU_RING_TYPE_SDMA,
- AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
+ int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
+ { AMDGPU_RING_TYPE_COMPUTE, 1 },
+ { AMDGPU_RING_TYPE_SDMA, 1} };
int i, r, pasid, k = 0;
pasid = amdgpu_pasid_alloc(16);
@@ -1423,3 +1427,78 @@ error_pasid:
kfree(vm);
return 0;
}
+
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
+{
+ const struct mes_firmware_header_v1_0 *mes_hdr;
+ struct amdgpu_firmware_info *info;
+ char ucode_prefix[30];
+ char fw_name[40];
+ bool need_retry = false;
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
+ need_retry = true;
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
+ ucode_prefix);
+ DRM_INFO("try to fall back to %s\n", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ fw_name);
+ }
+
+ if (r)
+ goto out;
+
+ mes_hdr = (const struct mes_firmware_header_v1_0 *)
+ adev->mes.fw[pipe]->data;
+ adev->mes.uc_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+ adev->mes.data_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ int ucode, ucode_data;
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ ucode = AMDGPU_UCODE_ID_CP_MES;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+ } else {
+ ucode = AMDGPU_UCODE_ID_CP_MES1;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ }
+
+ info = &adev->firmware.ucode[ucode];
+ info->ucode_id = ucode;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+ PAGE_SIZE);
+
+ info = &adev->firmware.ucode[ucode_data];
+ info->ucode_id = ucode_data;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+ PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 97c05d08a551..547ec35691fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -306,6 +306,7 @@ struct amdgpu_mes_funcs {
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..0f6b1021fef3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mmhub_ras *ras;
+
+ if (!adev->mmhub.ras)
+ return 0;
+
+ ras = adev->mmhub.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mmhub ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mmhub");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if = &ras->ras_block.ras_comm;
+
+ /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 93430d3823c9..d21bb6dae56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -48,5 +48,7 @@ struct amdgpu_mmhub {
struct amdgpu_mmhub_ras *ras;
};
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8a39300b1a84..32fe05c810c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -35,7 +35,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
@@ -534,6 +533,7 @@ struct amdgpu_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct amdgpu_hpd hpd;
struct amdgpu_router router;
@@ -549,8 +549,8 @@ struct amdgpu_mst_connector {
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_connector *mst_port;
+ struct drm_dp_mst_port *mst_output_port;
+ struct amdgpu_connector *mst_root;
bool is_mst_connector;
struct amdgpu_encoder *mst_encoder;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 37d779b8e4a6..a3bc00577a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -22,6 +22,29 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_nbio_ras *ras;
+
+ if (!adev->nbio.ras)
+ return 0;
+
+ ras = adev->nbio.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index a240336bbc6b..c686ff4bcc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -106,5 +106,6 @@ struct amdgpu_nbio {
struct amdgpu_nbio_ras *ras;
};
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4e684c2afc70..2bd1a54ee866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -139,7 +139,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
- else
+ else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
return true;
fail:
- DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
- man->size);
+ if (man)
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+ man->size);
return false;
}
@@ -599,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1264,24 +1265,41 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats)
{
unsigned int domain;
+ uint64_t size = amdgpu_bo_size(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- *vram_mem += amdgpu_bo_size(bo);
+ stats->vram += size;
+ if (amdgpu_bo_in_cpu_visible_vram(bo))
+ stats->visible_vram += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
- *gtt_mem += amdgpu_bo_size(bo);
+ stats->gtt += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
- *cpu_mem += amdgpu_bo_size(bo);
+ stats->cpu += size;
break;
}
+
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->requested_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->requested_visible_vram += size;
+
+ if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->evicted_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->evicted_visible_vram += size;
+ }
+ } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
+ stats->requested_gtt += size;
+ }
}
/**
@@ -1314,7 +1332,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
- adev->in_suspend || adev->shutdown)
+ adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
@@ -1345,7 +1363,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
@@ -1354,8 +1371,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->resource->start << PAGE_SHIFT;
- if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
+ if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1377,10 +1393,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- (offset + bo->base.size) > adev->gmc.visible_vram_size)
+ !amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1573,9 +1588,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
else if (dma_buf)
- seq_printf(m, " exported as %p", dma_buf);
+ seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 93207badf83f..35b8106816a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -126,6 +126,27 @@ struct amdgpu_bo_vm {
struct amdgpu_vm_bo_base entries[];
};
+struct amdgpu_mem_stats {
+ /* current VRAM usage, includes visible VRAM */
+ uint64_t vram;
+ /* current visible VRAM usage */
+ uint64_t visible_vram;
+ /* current GTT usage */
+ uint64_t gtt;
+ /* current system memory usage */
+ uint64_t cpu;
+ /* sum of evicted buffers, includes visible VRAM */
+ uint64_t evicted_vram;
+ /* sum of evicted buffers due to CPU access */
+ uint64_t evicted_visible_vram;
+ /* how much userspace asked for, includes vis.VRAM */
+ uint64_t requested_vram;
+ /* how much userspace asked for */
+ uint64_t requested_visible_vram;
+ /* how much userspace asked for */
+ uint64_t requested_gtt;
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -325,8 +346,8 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
@@ -336,15 +357,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
/*
* sub allocation
*/
+static inline struct amdgpu_sa_manager *
+to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct amdgpu_sa_manager, base);
+}
-static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -355,11 +383,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
+ struct drm_suballoc **sa_bo,
+ unsigned int size);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence);
+ struct drm_suballoc **sa_bo,
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7a2fc920739b..9d7e6e0e73ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp,
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -122,6 +123,39 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp
}
}
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int ret = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -158,6 +192,7 @@ static int psp_early_init(void *handle)
psp_v12_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 1):
@@ -192,7 +227,10 @@ static int psp_early_init(void *handle)
psp_check_pmfw_centralized_cstate_management(psp);
- return 0;
+ if (amdgpu_sriov_vf(adev))
+ return psp_init_sriov_microcode(psp);
+ else
+ return psp_init_microcode(psp);
}
void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
@@ -300,7 +338,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
/* runtime db doesn't exist, exit */
- dev_warn(adev->dev, "PSP runtime database doesn't exist\n");
+ dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
return false;
}
@@ -350,42 +388,6 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_init_sriov_microcode(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- int ret = 0;
-
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(9, 0, 0):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "vega10");
- break;
- case IP_VERSION(11, 0, 9):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "navi12");
- break;
- case IP_VERSION(11, 0, 7):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "sienna_cichlid");
- break;
- case IP_VERSION(13, 0, 2):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "aldebaran");
- ret &= psp_init_ta_microcode(psp, "aldebaran");
- break;
- case IP_VERSION(13, 0, 0):
- adev->virt.autoload_ucode_id = 0;
- break;
- case IP_VERSION(13, 0, 10):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
- break;
- default:
- BUG();
- break;
- }
- return ret;
-}
-
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -401,15 +403,6 @@ static int psp_sw_init(void *handle)
ret = -ENOMEM;
}
- if (amdgpu_sriov_vf(adev))
- ret = psp_init_sriov_microcode(psp);
- else
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
- }
-
adev->psp.xgmi_context.supports_extended_data =
!adev->gmc.xgmi.connected_to_cpu &&
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
@@ -514,20 +507,11 @@ static int psp_sw_fini(void *handle)
psp_memory_training_fini(psp);
- release_firmware(psp->sos_fw);
- psp->sos_fw = NULL;
-
- release_firmware(psp->asd_fw);
- psp->asd_fw = NULL;
-
- release_firmware(psp->ta_fw);
- psp->ta_fw = NULL;
-
- release_firmware(psp->cap_fw);
- psp->cap_fw = NULL;
-
- release_firmware(psp->toc_fw);
- psp->toc_fw = NULL;
+ amdgpu_ucode_release(&psp->sos_fw);
+ amdgpu_ucode_release(&psp->asd_fw);
+ amdgpu_ucode_release(&psp->ta_fw);
+ amdgpu_ucode_release(&psp->cap_fw);
+ amdgpu_ucode_release(&psp->toc_fw);
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
@@ -620,7 +604,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
- int index, idx;
+ int index;
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -628,9 +612,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (psp->adev->no_hw_access)
return 0;
- if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
- return 0;
-
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -694,7 +675,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
}
exit:
- drm_dev_exit(idx);
return ret;
}
@@ -797,9 +777,13 @@ static int psp_tmr_init(struct psp_context *psp)
if (!psp->tmr_bo) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
+ PSP_TMR_ALIGNMENT,
+ AMDGPU_HAS_VRAM(psp->adev) ?
+ AMDGPU_GEM_DOMAIN_VRAM :
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->tmr_bo, &psp->tmr_mc_addr,
+ pptr);
}
return ret;
@@ -855,7 +839,15 @@ static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
static int psp_tmr_unload(struct psp_context *psp)
{
int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+ struct psp_gfx_cmd_resp *cmd;
+
+ /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
+ * as TMR is not loaded at all
+ */
+ if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_unload_cmd_buf(psp, cmd);
dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
@@ -1092,7 +1084,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp,
* physical) for ta to host memory
*/
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -1685,7 +1678,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->hdcp_context.context.initialized) {
+ if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
@@ -1752,7 +1745,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->dtm_context.context.initialized) {
+ if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
@@ -1820,7 +1813,7 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->rap_context.context.initialized) {
+ if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;
@@ -1901,7 +1894,7 @@ out_unlock:
static int psp_securedisplay_initialize(struct psp_context *psp)
{
int ret;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
/*
* TODO: bypass the initialize in sriov for now
@@ -2908,25 +2901,15 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-int psp_init_asd_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for asd microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
if (err)
goto out;
@@ -2938,31 +2921,19 @@ int psp_init_asd_microcode(struct psp_context *psp,
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to initialize asd microcode\n");
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.asd_fw);
return err;
}
-int psp_init_toc_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for toc microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
if (err)
goto out;
@@ -2974,9 +2945,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to request/validate toc microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.toc_fw);
return err;
}
@@ -3107,8 +3076,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
return 0;
}
-int psp_init_sos_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3121,17 +3089,8 @@ int psp_init_sos_microcode(struct psp_context *psp,
uint8_t *ucode_array_start_addr;
int fw_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for sos microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
if (err)
goto out;
@@ -3203,10 +3162,7 @@ int psp_init_sos_microcode(struct psp_context *psp,
return 0;
out:
- dev_err(adev->dev,
- "failed to init sos firmware\n");
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.sos_fw);
return err;
}
@@ -3272,41 +3228,76 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
return 0;
}
-int psp_init_ta_microcode(struct psp_context *psp,
- const char *chip_name)
+static int parse_ta_v1_microcode(struct psp_context *psp)
{
+ const struct ta_firmware_header_v1_0 *ta_hdr;
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
- const struct ta_firmware_header_v2_0 *ta_hdr;
- int err = 0;
- int ta_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for ta microcode\n");
+ ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
+
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
return -EINVAL;
- }
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->ras.offset_bytes);
+
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
+ return 0;
+}
+
+static int parse_ta_v2_microcode(struct psp_context *psp)
+{
+ const struct ta_firmware_header_v2_0 *ta_hdr;
+ struct amdgpu_device *adev = psp->adev;
+ int err = 0;
+ int ta_index = 0;
ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
- if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
- dev_err(adev->dev, "unsupported TA header version\n");
- err = -EINVAL;
- goto out;
- }
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
+ return -EINVAL;
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
@@ -3314,19 +3305,44 @@ int psp_init_ta_microcode(struct psp_context *psp,
&ta_hdr->ta_fw_bin[ta_index],
ta_hdr);
if (err)
- goto out;
+ return err;
}
return 0;
-out:
- dev_err(adev->dev, "fail to initialize ta microcode\n");
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
+}
+
+int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
+{
+ const struct common_firmware_header *hdr;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[PSP_FW_NAME_LEN];
+ int err;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
+ if (err)
+ return err;
+
+ hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
+ switch (le16_to_cpu(hdr->header_version_major)) {
+ case 1:
+ err = parse_ta_v1_microcode(psp);
+ break;
+ case 2:
+ err = parse_ta_v2_microcode(psp);
+ break;
+ default:
+ dev_err(adev->dev, "unsupported TA header version\n");
+ err = -EINVAL;
+ }
+
+ if (err)
+ amdgpu_ucode_release(&adev->psp.ta_fw);
+
return err;
}
-int psp_init_cap_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3334,28 +3350,20 @@ int psp_init_cap_microcode(struct psp_context *psp,
struct amdgpu_firmware_info *info = NULL;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for cap microcode\n");
- return -EINVAL;
- }
-
if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
- err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
- if (err) {
- dev_warn(adev->dev, "cap microcode does not exist, skip\n");
- err = 0;
- goto out;
- }
-
- err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
if (err) {
+ if (err == -ENODEV) {
+ dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ err = 0;
+ goto out;
+ }
dev_err(adev->dev, "fail to initialize cap microcode\n");
- goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3372,8 +3380,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
return 0;
out:
- release_firmware(adev->psp.cap_fw);
- adev->psp.cap_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.cap_fw);
return err;
}
@@ -3444,10 +3451,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
/* LFB address which is aligned to 1MB boundary per PSP request */
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
- AMDGPU_GEM_DOMAIN_VRAM,
- &fw_buf_bo,
- &fw_pri_mc_addr,
- &fw_pri_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &fw_buf_bo, &fw_pri_mc_addr,
+ &fw_pri_cpu_addr);
if (ret)
goto rel_buf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ad490c1e2f57..3ab8a88789c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -34,6 +34,7 @@
#include "amdgpu_atomfirmware.h"
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+#include "nbio_v4_3.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -176,7 +177,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, NULL);
}
dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
@@ -706,13 +707,23 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ if (amdgpu_ras_is_feature_allowed(adev, head) ||
+ amdgpu_ras_is_poison_mode_supported(adev))
+ return 1;
+ else
+ return 0;
+}
+
/* wrapper of psp_ras_enable_features */
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret;
+ int ret = 0;
if (!con)
return -EINVAL;
@@ -736,7 +747,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
}
/* Do not enable if it is not allowed. */
- WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
+ goto out;
/* Only enable ras feature operation handle on host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -754,7 +766,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
- ret = 0;
out:
if (head->block == AMDGPU_RAS_BLOCK__GFX)
kfree(info);
@@ -910,9 +921,6 @@ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_de
if (block >= AMDGPU_RAS_BLOCK__LAST)
return NULL;
- if (!amdgpu_ras_is_supported(adev, block))
- return NULL;
-
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
@@ -1087,6 +1095,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ /* inject on guest isn't allowed, return success directly */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (!obj)
return -EINVAL;
@@ -1122,11 +1134,54 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/**
- * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
+ * @query_info: pointer to ras_query_if
+ *
+ * Return 0 for query success or do nothing, otherwise return an error
+ * on failures
+ */
+static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
+{
+ int ret;
+
+ if (!query_info)
+ /* do nothing if query_info is not specified */
+ return 0;
+
+ ret = amdgpu_ras_query_error_status(adev, query_info);
+ if (ret)
+ return ret;
+
+ *ce_count += query_info->ce_count;
+ *ue_count += query_info->ue_count;
+
+ /* some hardware/IP supports read to clear
+ * no need to explictly reset the err status after the query call */
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
+ dev_warn(adev->dev,
+ "Failed to reset error counter and error status\n");
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
* @adev: pointer to AMD GPU device
* @ce_count: pointer to an integer to be set to the count of correctible errors.
* @ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
+ * @query_info: pointer to ras_query_if if the query request is only for
+ * specific ip block; if info is NULL, then the qurey request is for
+ * all the ip blocks that support query ras error counters/status
*
* If set, @ce_count or @ue_count, count and return the corresponding
* error counts in those integer pointers. Return 0 if the device
@@ -1134,11 +1189,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
*/
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count)
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
+ int ret;
if (!adev->ras_enabled || !con)
return -EOPNOTSUPP;
@@ -1150,26 +1207,23 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
ce = 0;
ue = 0;
- list_for_each_entry(obj, &con->head, node) {
- struct ras_query_if info = {
- .head = obj->head,
- };
- int res;
-
- res = amdgpu_ras_query_error_status(adev, &info);
- if (res)
- return res;
+ if (!query_info) {
+ /* query all the ip blocks that support ras query interface */
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
- if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
- if (amdgpu_ras_reset_error_status(adev, info.head.block))
- dev_warn(adev->dev, "Failed to reset error counter and error status");
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
}
-
- ce += info.ce_count;
- ue += info.ue_count;
+ } else {
+ /* query specific ip block */
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
}
+ if (ret)
+ return ret;
+
if (ce_count)
*ce_count = ce;
@@ -1564,14 +1618,14 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
struct amdgpu_ras_block_object *block_obj =
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
- if (!block_obj || !block_obj->hw_ops)
+ if (!block_obj)
return;
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
*/
- if (block_obj->hw_ops->query_poison_status) {
+ if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
poison_stat = block_obj->hw_ops->query_poison_status(adev);
if (!poison_stat) {
/* Not poison consumption interrupt, no need to handle it */
@@ -1585,7 +1639,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (!adev->gmc.xgmi.connected_to_cpu)
amdgpu_umc_poison_handler(adev, false);
- if (block_obj->hw_ops->handle_poison_consumption)
+ if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
/* gpu reset is fallback for failed and default cases */
@@ -1593,6 +1647,8 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
block_obj->ras_comm.name);
amdgpu_ras_reset_gpu(adev);
+ } else {
+ amdgpu_gfx_poison_consumption_handler(adev, entry);
}
}
@@ -2029,22 +2085,32 @@ out:
/*
* write error record array to eeprom, the function should be
* protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
*/
-int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
int save_count;
- if (!con || !con->eh_data)
+ if (!con || !con->eh_data) {
+ if (new_cnt)
+ *new_cnt = 0;
+
return 0;
+ }
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
save_count = data->count - control->ras_num_recs;
mutex_unlock(&con->recovery_lock);
+
+ if (new_cnt)
+ *new_cnt = save_count / adev->umc.retire_unit;
+
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_append(control,
@@ -2131,11 +2197,12 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
/*
* Justification of value bad_page_cnt_threshold in ras structure
*
- * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
- * in eeprom, and introduce two scenarios accordingly.
+ * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
+ * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
+ * scenarios accordingly.
*
* Bad page retirement enablement:
- * - If amdgpu_bad_page_threshold = -1,
+ * - If amdgpu_bad_page_threshold = -2,
* bad_page_cnt_threshold = typical value by formula.
*
* - When the value from user is 0 < amdgpu_bad_page_threshold <
@@ -2344,22 +2411,31 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev))
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
-
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
- adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- } else {
+ else
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
1 << AMDGPU_RAS_BLOCK__SDMA |
1 << AMDGPU_RAS_BLOCK__GFX);
- }
+
+ /* VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
+ adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+
+ /*
+ * XGMI RAS is not supported if xgmi num physical nodes
+ * is zero
+ */
+ if (!adev->gmc.xgmi.num_physical_nodes)
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
@@ -2395,7 +2471,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
@@ -2405,11 +2481,42 @@ Out:
pm_runtime_put_autosuspend(dev->dev);
}
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int r;
- bool df_poison, umc_poison;
if (con)
return 0;
@@ -2455,21 +2562,34 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* initialize nbio ras function ahead of any other
* ras functions so hardware fatal error interrupt
* can be enabled as early as possible */
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ if (!adev->gmc.xgmi.connected_to_cpu)
adev->nbio.ras = &nbio_v7_4_ras;
- amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
- adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
- }
+ break;
+ case IP_VERSION(4, 3, 0):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbio v4_3 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS */
+ adev->nbio.ras = &nbio_v4_3_ras;
break;
default:
/* nbio ras is not available */
break;
}
+ /* nbio ras block needs to be enabled ahead of other ras blocks
+ * to handle fatal error */
+ r = amdgpu_nbio_ras_sw_init(adev);
+ if (r)
+ return r;
+
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt) {
r = adev->nbio.ras->init_ras_controller_interrupt(adev);
@@ -2484,26 +2604,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
- /* Init poison supported flag, the default value is false */
- if (adev->gmc.xgmi.connected_to_cpu) {
- /* enabled by default when GPU is connected to CPU */
- con->poison_supported = true;
- }
- else if (adev->df.funcs &&
- adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras &&
- adev->umc.ras->query_ras_poison_mode) {
- df_poison =
- adev->df.funcs->query_ras_poison_mode(adev);
- umc_poison =
- adev->umc.ras->query_ras_poison_mode(adev);
- /* Only poison is set in both DF and UMC, we can support it */
- if (df_poison && umc_poison)
- con->poison_supported = true;
- else if (df_poison != umc_poison)
- dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
- df_poison, umc_poison);
- }
+ amdgpu_ras_query_poison_mode(adev);
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
@@ -2564,6 +2665,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
{
struct amdgpu_ras_block_object *ras_obj = NULL;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_query_if *query_info;
unsigned long ue_count, ce_count;
int r;
@@ -2605,11 +2707,17 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
+ if (!query_info)
+ return -ENOMEM;
+ memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
+
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
+ kfree(query_info);
return 0;
interrupt:
@@ -2946,11 +3054,26 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
int amdgpu_ras_is_supported(struct amdgpu_device *adev,
unsigned int block)
{
+ int ret = 0;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (block >= AMDGPU_RAS_BLOCK_COUNT)
return 0;
- return ras && (adev->ras_enabled & (1 << block));
+
+ ret = ras && (adev->ras_enabled & (1 << block));
+
+ /* For the special asic with mem ecc enabled but sram ecc
+ * not enabled, even if the ras block is not supported on
+ * .ras_enabled, if the asic supports poison mode and the
+ * ras block has ras configuration, it can be considered
+ * that the ras block supports ras function.
+ */
+ if (!ret &&
+ amdgpu_ras_is_poison_mode_supported(adev) &&
+ amdgpu_ras_get_ras_block(adev, block, 0))
+ ret = 1;
+
+ return ret;
}
int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
@@ -2971,9 +3094,6 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
if (!adev || !ras_block_obj)
return -EINVAL;
- if (!amdgpu_ras_asic_supported(adev))
- return 0;
-
ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
if (!ras_node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index bf5a95104ec1..17b3d1992e80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -540,13 +540,15 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count);
+ unsigned long *ue_count,
+ struct ras_query_if *query_info);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
-int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt);
static inline enum ta_ras_block
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
@@ -581,6 +583,10 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__FUSE;
case AMDGPU_RAS_BLOCK__MCA:
return TA_RAS_BLOCK__MCA;
+ case AMDGPU_RAS_BLOCK__VCN:
+ return TA_RAS_BLOCK__VCN;
+ case AMDGPU_RAS_BLOCK__JPEG:
+ return TA_RAS_BLOCK__JPEG;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2d9f3f4cd79e..c2c2a7718613 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -107,47 +107,12 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_IP_DISCOVERY) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 10):
- return true;
- default:
- return false;
- }
- }
-
- return adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_ALDEBARAN;
-}
-
-static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
- struct amdgpu_ras_eeprom_control *control)
-{
- struct atom_context *atom_ctx = adev->mode_info.atom_context;
-
- if (!control || !atom_ctx)
- return false;
-
- if (strnstr(atom_ctx->vbios_version,
- "D342",
- sizeof(atom_ctx->vbios_version)))
- control->i2c_address = EEPROM_I2C_MADDR_0;
- else
- control->i2c_address = EEPROM_I2C_MADDR_4;
-
- return true;
-}
-
-static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev,
- struct amdgpu_ras_eeprom_control *control)
-{
switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */
+ case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 2): /* Aldebaran */
case IP_VERSION(13, 0, 10):
- control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
default:
return false;
@@ -178,43 +143,35 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return true;
}
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_MADDR_0;
- break;
-
- case CHIP_ARCTURUS:
- return __get_eeprom_i2c_addr_arct(adev, control);
-
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 2):
+ /* VEGA20 and ARCTURUS */
+ if (adev->asic_type == CHIP_VEGA20)
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else if (strnstr(atom_ctx->vbios_version,
+ "D342",
+ sizeof(atom_ctx->vbios_version)))
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return true;
+ case IP_VERSION(11, 0, 7):
control->i2c_address = EEPROM_I2C_MADDR_0;
- break;
-
- case CHIP_ALDEBARAN:
+ return true;
+ case IP_VERSION(13, 0, 2):
if (strnstr(atom_ctx->vbios_version, "D673",
sizeof(atom_ctx->vbios_version)))
control->i2c_address = EEPROM_I2C_MADDR_4;
else
control->i2c_address = EEPROM_I2C_MADDR_0;
- break;
-
- case CHIP_IP_DISCOVERY:
- return __get_eeprom_i2c_addr_ip_discovery(adev, control);
-
- default:
- return false;
- }
-
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ return true;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_4;
- break;
-
+ return true;
default:
- break;
+ return false;
}
-
- return true;
}
static void
@@ -417,7 +374,8 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!__is_ras_eeprom_supported(adev))
+ if (!__is_ras_eeprom_supported(adev) ||
+ !amdgpu_bad_page_threshold)
return false;
/* skip check eeprom table for VEGA20 Gaming */
@@ -428,10 +386,18 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
- dev_warn(adev->dev, "This GPU is in BAD status.");
- dev_warn(adev->dev, "Please retire it or set a larger "
- "threshold value when reloading driver.\n");
- return true;
+ if (amdgpu_bad_page_threshold == -1) {
+ dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
+ con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ dev_warn(adev->dev,
+ "But GPU can be operated due to bad_page_threshold = -1.\n");
+ return false;
+ } else {
+ dev_warn(adev->dev, "This GPU is in BAD status.");
+ dev_warn(adev->dev, "Please retire it or set a larger "
+ "threshold value when reloading driver.\n");
+ return true;
+ }
}
return false;
@@ -1191,8 +1157,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
control->ras_num_recs, ras->bad_page_cnt_threshold);
- if (amdgpu_bad_page_threshold == -2) {
- dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
+ if (amdgpu_bad_page_threshold == -1) {
+ dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
} else {
*exceed_err_limit = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index f778466bb9db..6437ead87e5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -24,6 +24,7 @@
#include "amdgpu_reset.h"
#include "aldebaran.h"
#include "sienna_cichlid.h"
+#include "smu_v13_0_10.h"
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler)
@@ -44,6 +45,9 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_init(adev);
break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_init(adev);
+ break;
default:
break;
}
@@ -62,6 +66,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_fini(adev);
break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_fini(adev);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f752c7ae7f60..d8749444b689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
+#include <drm/drm_suballoc.h>
struct amdgpu_device;
struct amdgpu_ring;
@@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type {
};
struct amdgpu_ib {
- struct amdgpu_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
@@ -164,7 +165,6 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned vmhub;
unsigned extra_dw;
/* ring read/write ptr handling */
@@ -249,6 +249,7 @@ struct amdgpu_ring {
uint64_t ptr_mask;
uint32_t buf_mask;
u32 idx;
+ u32 xcc_id;
u32 me;
u32 pipe;
u32 queue;
@@ -274,6 +275,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+ unsigned vm_hub;
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
@@ -295,7 +297,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 012b72d00e04..85fb730d9fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
/* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
@@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 524d10b21041..c6b4337eb20c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -44,327 +44,63 @@
#include "amdgpu.h"
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
-
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned int size, u32 suballoc_align, u32 domain)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- INIT_LIST_HEAD(&sa_manager->flist[i]);
+ int r;
- r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
- &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
+ r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
+ &sa_manager->bo, &sa_manager->gpu_addr,
+ &sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
- memset(sa_manager->cpu_ptr, 0, sa_manager->size);
+ memset(sa_manager->cpu_ptr, 0, size);
+ drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
-
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
return;
}
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- amdgpu_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- amdgpu_sa_bo_remove_locked(sa_bo);
- }
+ drm_suballoc_manager_fini(&sa_manager->base);
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
- sa_manager->size = 0;
}
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
-{
- struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- dma_fence_put(sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, true, 0);
- if (sa_manager->hole->next == &sa_manager->olist)
- return;
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
- sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL ||
- !dma_fence_is_signaled(sa_bo->fence)) {
- return;
- }
- amdgpu_sa_bo_remove_locked(sa_bo);
+ return PTR_ERR(sa);
}
-}
-static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
- }
+ *sa_bo = sa;
return 0;
}
-static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * amdgpu_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (!list_empty(&sa_manager->flist[i]))
- return true;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct dma_fence **fences,
- unsigned *tries)
-{
- struct amdgpu_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
- struct amdgpu_sa_bo *sa_bo;
-
- fences[i] = NULL;
-
- if (list_empty(&sa_manager->flist[i]))
- continue;
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct amdgpu_sa_bo, flist);
-
- if (!dma_fence_is_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
- }
-
- if (best_bo) {
- uint32_t idx = best_bo->fence->context;
-
- idx %= AMDGPU_SA_NUM_FENCE_LISTS;
- ++tries[idx];
- sa_manager->hole = best_bo->olist.prev;
-
- /* we knew that this one is signaled,
- so it's save to remote it */
- amdgpu_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
-}
-
-int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align)
-{
- struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned count;
- int i, r;
- signed long t;
-
- if (WARN_ON_ONCE(align > sa_manager->align))
- return -EINVAL;
-
- if (WARN_ON_ONCE(size > sa_manager->size))
- return -EINVAL;
-
- *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if (!(*sa_bo))
- return -ENOMEM;
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- tries[i] = 0;
-
- do {
- amdgpu_sa_bo_try_free(sa_manager);
-
- if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
-
- /* see if we can skip over some allocations */
- } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
-
- for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (fences[i])
- fences[count++] = dma_fence_get(fences[i]);
-
- if (count) {
- spin_unlock(&sa_manager->wq.lock);
- t = dma_fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- for (i = 0; i < count; ++i)
- dma_fence_put(fences[i]);
-
- r = (t > 0) ? 0 : t;
- spin_lock(&sa_manager->wq.lock);
- } else {
- /* if we have nothing to wait for block */
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- amdgpu_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
-
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
-}
-
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
+void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
struct dma_fence *fence)
{
- struct amdgpu_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !dma_fence_is_signaled(fence)) {
- uint32_t idx;
-
- (*sa_bo)->fence = dma_fence_get(fence);
- idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
- list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
- } else {
- amdgpu_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
@@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
- struct amdgpu_sa_bo *i;
-
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (i->fence)
- seq_printf(m, " protected by 0x%016llx on context %llu",
- i->fence->seqno, i->fence->context);
-
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index e9b45089a28a..863b2a34b2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
@@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return r;
}
- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
+ mutex_unlock(&mgr->lock);
fdput(f);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index ea5278f094c0..231ca06bc9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -154,16 +154,11 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
{
- int err = 0;
uint16_t version_major;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
const struct sdma_firmware_header_v2_0 *hdr_v2;
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
header = (const struct common_firmware_header *)
sdma_inst->fw->data;
version_major = le16_to_cpu(header->header_version_major);
@@ -195,7 +190,7 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
if (duplicate)
break;
}
@@ -205,16 +200,22 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
}
int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance,
- bool duplicate)
+ u32 instance, bool duplicate)
{
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- int err = 0, i;
+ int err, i;
const struct sdma_firmware_header_v2_0 *sdma_hdr;
uint16_t version_major;
-
- err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ char ucode_prefix[30];
+ char fw_name[40];
+
+ amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ if (instance == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name);
if (err)
goto out;
@@ -279,10 +280,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
}
out:
- if (err) {
- DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ if (err)
amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
- }
return err;
}
@@ -306,3 +305,38 @@ void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
}
}
}
+
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_sdma_ras *ras = NULL;
+
+ /* adev->sdma.ras is NULL, which means sdma does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->sdma.ras)
+ return 0;
+
+ ras = adev->sdma.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register sdma ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "sdma");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 7d99205c2e01..fc8528812598 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,10 +124,11 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance, bool duplicate);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
+ bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 2c1d82fc4c34..8ed0e073656f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -77,11 +77,11 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
}
}
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id)
{
- *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
- memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
+ memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd));
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
(*cmd)->cmd_id = command_id;
}
@@ -93,7 +93,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct psp_context *psp = &adev->psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_device *dev = adev_to_drm(adev);
uint32_t phy_id;
uint32_t op;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
index fe98574748f4..456ad68ed4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -30,7 +30,7 @@
void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
void psp_securedisplay_parse_resp_status(struct psp_context *psp,
enum ta_securedisplay_status status);
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index bac7976975bd..dcd8c066bc1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f);
- if (r)
+ if (r) {
+ dma_fence_put(f);
return r;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 677ad2016976..525dffbe046a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+ __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
__entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(job->base.sched));
+ to_amdgpu_ring(job->base.entity->rq->sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -233,7 +233,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
__entry->pasid = vm->pasid;
__assign_str(ring, ring->name);
__entry->vmid = job->vmid;
- __entry->vm_hub = ring->funcs->vmhub,
+ __entry->vm_hub = ring->vm_hub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
@@ -427,7 +427,7 @@ TRACE_EVENT(amdgpu_vm_flush,
TP_fast_assign(
__assign_str(ring, ring->name);
__entry->vmid = vmid;
- __entry->vm_hub = ring->funcs->vmhub;
+ __entry->vm_hub = ring->vm_hub;
__entry->pd_addr = pd_addr;
),
TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 55e0284b2bdd..2cd081cbf706 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -44,10 +44,10 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- /* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
- return -EINVAL;
-
adev = amdgpu_ttm_adev(bo->bdev);
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
@@ -1679,10 +1675,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
/* reserve vram for mem train according to TMR location */
amdgpu_ttm_training_data_block_init(adev);
ret = amdgpu_bo_create_kernel_at(adev,
- ctx->c2p_train_data_offset,
- ctx->train_data_size,
- &ctx->c2p_bo,
- NULL);
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ &ctx->c2p_bo,
+ NULL);
if (ret) {
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
@@ -1692,10 +1688,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
- adev->mman.discovery_tmr_size,
- &adev->mman.discovery_memory,
- NULL);
+ adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+ adev->mman.discovery_tmr_size,
+ &adev->mman.discovery_memory,
+ NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
@@ -1718,7 +1714,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
int r;
- u64 vis_vram_limit;
mutex_init(&adev->mman.gtt_window_lock);
@@ -1741,12 +1736,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Reduce size of CPU-visible VRAM if requested */
- vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
- if (amdgpu_vis_vram_limit > 0 &&
- vis_vram_limit <= adev->gmc.visible_vram_size)
- adev->gmc.visible_vram_size = vis_vram_limit;
-
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cb62e6249c2..f76b1cb8baf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -126,19 +126,6 @@ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
}
}
-void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr)
-{
- uint16_t version_major = le16_to_cpu(hdr->header_version_major);
- uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
-
- DRM_DEBUG("IMU\n");
- amdgpu_ucode_print_common_hdr(hdr);
-
- if (version_major != 1) {
- DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
- }
-}
-
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
{
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
@@ -472,6 +459,12 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n",
le32_to_cpu(desc->size_bytes));
break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ DRM_DEBUG("psp_ras_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_ras_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
default:
DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type);
break;
@@ -504,7 +497,7 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
}
}
-int amdgpu_ucode_validate(const struct firmware *fw)
+static int amdgpu_ucode_validate(const struct firmware *fw)
{
const struct common_firmware_header *hdr =
(const struct common_firmware_header *)fw->data;
@@ -669,6 +662,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "VCN1_RAM";
case AMDGPU_UCODE_ID_DMCUB:
return "DMCUB";
+ case AMDGPU_UCODE_ID_CAP:
+ return "CAP";
default:
return "UNKNOWN UCODE";
}
@@ -1059,12 +1054,229 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
+static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
+{
+ if (block_type == MP0_HWIP) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "vega10";
+ case CHIP_VEGA12:
+ return "vega12";
+ default:
+ return NULL;
+ }
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ return "navi10";
+ case IP_VERSION(11, 0, 2):
+ return "vega20";
+ case IP_VERSION(11, 0, 3):
+ return "renoir";
+ case IP_VERSION(11, 0, 4):
+ return "arcturus";
+ case IP_VERSION(11, 0, 5):
+ return "navi14";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid";
+ case IP_VERSION(11, 0, 9):
+ return "navi12";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby";
+ case IP_VERSION(11, 5, 0):
+ return "vangogh";
+ case IP_VERSION(12, 0, 1):
+ return "green_sardine";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran";
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ return "yellow_carp";
+ }
+ } else if (block_type == MP1_HWIP) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return "arcturus_smc";
+ return NULL;
+ case IP_VERSION(11, 0, 0):
+ return "navi10_smc";
+ case IP_VERSION(11, 0, 5):
+ return "navi14_smc";
+ case IP_VERSION(11, 0, 9):
+ return "navi12_smc";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid_smc";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder_smc";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish_smc";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby_smc";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran_smc";
+ }
+ } else if (block_type == SDMA0_HWIP) {
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ return "vega10_sdma";
+ case IP_VERSION(4, 0, 1):
+ return "vega12_sdma";
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_sdma";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_sdma";
+ return "raven_sdma";
+ case IP_VERSION(4, 1, 2):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_sdma";
+ return "green_sardine_sdma";
+ case IP_VERSION(4, 2, 0):
+ return "vega20_sdma";
+ case IP_VERSION(4, 2, 2):
+ return "arcturus_sdma";
+ case IP_VERSION(4, 4, 0):
+ return "aldebaran_sdma";
+ case IP_VERSION(5, 0, 0):
+ return "navi10_sdma";
+ case IP_VERSION(5, 0, 1):
+ return "cyan_skillfish2_sdma";
+ case IP_VERSION(5, 0, 2):
+ return "navi14_sdma";
+ case IP_VERSION(5, 0, 5):
+ return "navi12_sdma";
+ case IP_VERSION(5, 2, 0):
+ return "sienna_cichlid_sdma";
+ case IP_VERSION(5, 2, 2):
+ return "navy_flounder_sdma";
+ case IP_VERSION(5, 2, 4):
+ return "dimgrey_cavefish_sdma";
+ case IP_VERSION(5, 2, 5):
+ return "beige_goby_sdma";
+ case IP_VERSION(5, 2, 3):
+ return "yellow_carp_sdma";
+ case IP_VERSION(5, 2, 1):
+ return "vangogh_sdma";
+ }
+ } else if (block_type == UVD_HWIP) {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_vcn";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_vcn";
+ return "raven_vcn";
+ case IP_VERSION(2, 5, 0):
+ return "arcturus_vcn";
+ case IP_VERSION(2, 2, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_vcn";
+ return "green_sardine_vcn";
+ case IP_VERSION(2, 6, 0):
+ return "aldebaran_vcn";
+ case IP_VERSION(2, 0, 0):
+ return "navi10_vcn";
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ return "navi12_vcn";
+ return "navi14_vcn";
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ return "sienna_cichlid_vcn";
+ return "navy_flounder_vcn";
+ case IP_VERSION(3, 0, 2):
+ return "vangogh_vcn";
+ case IP_VERSION(3, 0, 16):
+ return "dimgrey_cavefish_vcn";
+ case IP_VERSION(3, 0, 33):
+ return "beige_goby_vcn";
+ case IP_VERSION(3, 1, 1):
+ return "yellow_carp_vcn";
+ }
+ } else if (block_type == GC_HWIP) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ return "vega10";
+ case IP_VERSION(9, 2, 1):
+ return "vega12";
+ case IP_VERSION(9, 4, 0):
+ return "vega20";
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ case IP_VERSION(9, 4, 1):
+ return "arcturus";
+ case IP_VERSION(9, 3, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir";
+ return "green_sardine";
+ case IP_VERSION(9, 4, 2):
+ return "aldebaran";
+ case IP_VERSION(10, 1, 10):
+ return "navi10";
+ case IP_VERSION(10, 1, 1):
+ return "navi14";
+ case IP_VERSION(10, 1, 2):
+ return "navi12";
+ case IP_VERSION(10, 3, 0):
+ return "sienna_cichlid";
+ case IP_VERSION(10, 3, 2):
+ return "navy_flounder";
+ case IP_VERSION(10, 3, 1):
+ return "vangogh";
+ case IP_VERSION(10, 3, 4):
+ return "dimgrey_cavefish";
+ case IP_VERSION(10, 3, 5):
+ return "beige_goby";
+ case IP_VERSION(10, 3, 3):
+ return "yellow_carp";
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ return "cyan_skillfish2";
+ }
+ }
+ return NULL;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
char *ip_name;
+ const char *legacy;
uint32_t version = adev->ip_versions[block_type][0];
+ legacy = amdgpu_ucode_legacy_naming(adev, block_type);
+ if (legacy) {
+ snprintf(ucode_prefix, len, "%s", legacy);
+ return;
+ }
+
switch (block_type) {
case GC_HWIP:
ip_name = "gc";
@@ -1091,3 +1303,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
}
+
+/*
+ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
+ *
+ * @adev: amdgpu device
+ * @fw: pointer to load firmware to
+ * @fw_name: firmware to load
+ *
+ * This is a helper that will use request_firmware and amdgpu_ucode_validate
+ * to load and run basic validation on firmware. If the load fails, remap
+ * the error code to -ENODEV, so that early_init functions will fail to load.
+ */
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name)
+{
+ int err = request_firmware(fw, fw_name, adev->dev);
+
+ if (err)
+ return -ENODEV;
+ err = amdgpu_ucode_validate(*fw);
+ if (err)
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+
+ return err;
+}
+
+/*
+ * amdgpu_ucode_release - Release firmware microcode
+ *
+ * @fw: pointer to firmware to release
+ */
+void amdgpu_ucode_release(const struct firmware **fw)
+{
+ release_firmware(*fw);
+ *fw = NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 552e06929229..b03321e7d2d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -538,12 +538,15 @@ struct amdgpu_firmware {
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-int amdgpu_ucode_validate(const struct firmware *fw);
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name);
+void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f76c19fc0392..1edf8e6aeb16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -68,7 +68,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, NULL);
}
out:
@@ -147,7 +147,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
@@ -169,25 +169,33 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
- if (!adev->gmc.xgmi.connected_to_cpu) {
- struct ras_err_data err_data = {0, 0, 0, NULL};
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
+ } else if (reset) {
+ /* MCA poison handler is only responsible for GPU reset,
+ * let MCA notifier do page retirement.
+ */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev);
}
- } else if (reset) {
- /* MCA poison handler is only responsible for GPU reset,
- * let MCA notifier do page retirement.
- */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV!\n");
}
return ret;
@@ -200,6 +208,36 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
}
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_umc_ras *ras;
+
+ if (!adev->umc.ras)
+ return 0;
+
+ ras = adev->umc.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register umc ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+
+ return 0;
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
@@ -264,3 +302,34 @@ void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
err_data->err_addr_cnt++;
}
+
+int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ umc_func func, void *data)
+{
+ uint32_t node_inst = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ int ret = 0;
+
+ if (adev->umc.node_inst_num) {
+ LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ } else {
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ ret = func(adev, 0, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
+ umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a6951160f13a..86133f77a9a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -42,11 +42,15 @@
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
#define LOOP_UMC_NODE_INST(node_inst) \
- for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++)
+ for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
+
+typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
+ uint32_t umc_inst, uint32_t ch_inst, void *data);
+
struct amdgpu_umc_ras {
struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
@@ -69,19 +73,25 @@ struct amdgpu_umc {
/* number of umc instance with memory map register access */
uint32_t umc_inst_num;
- /*number of umc node instance with memory map register access*/
+ /* Total number of umc node instance including harvest one */
uint32_t node_inst_num;
/* UMC regiser per channel offset */
uint32_t channel_offs;
+ /* how many pages are retired in one UE */
+ uint32_t retire_unit;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
struct amdgpu_umc_ras *ras;
+
+ /* active mask for umc node instance */
+ unsigned long active_mask;
};
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
@@ -98,4 +108,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst);
+
+int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ umc_func func, void *data);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e00bb654e24b..6887109abb13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,19 +260,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->uvd.fw);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->uvd.fw);
- adev->uvd.fw = NULL;
+ amdgpu_ucode_release(&adev->uvd.fw);
return r;
}
@@ -331,8 +323,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->uvd.harvest_config & (1 << j))
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
- &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ &adev->uvd.inst[j].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -394,7 +389,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
- release_firmware(adev->uvd.fw);
+ amdgpu_ucode_release(&adev->uvd.fw);
return 0;
}
@@ -1123,14 +1118,11 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
+ uint32_t offset, data[4];
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint32_t data[4];
uint64_t addr;
- long r;
- int i;
- unsigned offset_idx = 0;
- unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
+ int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
@@ -1139,16 +1131,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (adev->asic_type >= CHIP_VEGA10) {
- offset_idx = 1 + ring->me;
- offset[1] = adev->reg_offset[UVD_HWIP][0][1];
- offset[2] = adev->reg_offset[UVD_HWIP][1][1];
- }
+ if (adev->asic_type >= CHIP_VEGA10)
+ offset = adev->reg_offset[UVD_HWIP][ring->me][1];
+ else
+ offset = UVD_BASE_SI;
- data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
- data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
- data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
- data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
+ data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0);
+ data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0);
+ data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0);
+ data[3] = PACKET0(offset + UVD_NO_OP, 0);
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
@@ -1165,14 +1156,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout(bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, false,
- msecs_to_jiffies(10));
- if (r == 0)
- r = -ETIMEDOUT;
- if (r < 0)
- goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b239e874f2d5..e2b7324a70cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,19 +158,11 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vce.fw);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->vce.fw);
- adev->vce.fw = NULL;
+ amdgpu_ucode_release(&adev->vce.fw);
return r;
}
@@ -186,7 +178,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
(binary_id << 8));
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vce.vcpu_bo,
&adev->vce.gpu_addr, &adev->vce.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -226,7 +220,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
- release_firmware(adev->vce.fw);
+ amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
return 0;
@@ -591,6 +585,7 @@ err:
/**
* amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
*
+ * @p: cs parser
* @ib: indirect buffer to use
* @lo: address of lower dword
* @hi: address of higher dword
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b1622ac9949f..e63fcc58e8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -26,6 +26,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <drm/drm_drv.h>
@@ -36,26 +37,26 @@
#include "soc15d.h"
/* Firmware Names */
-#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
-#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
-#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
-#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
-#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
-#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
-#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
-#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
-#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
-#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
-#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
-#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
+#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
+#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
+#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
+#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
+#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
+#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
+#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
+#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
+#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
+#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
+#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
-#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
-#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
-#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
-#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
-#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
-#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
+#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
+#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
+#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
+#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
+#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
+#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -80,10 +81,24 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+{
+ char ucode_prefix[30];
+ char fw_name[40];
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.fw);
+
+ return r;
+}
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
unsigned long bo_size;
- const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
@@ -96,130 +111,26 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
- switch (adev->ip_versions[UVD_HWIP][0]) {
- case IP_VERSION(1, 0, 0):
- case IP_VERSION(1, 0, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- fw_name = FIRMWARE_RAVEN2;
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- fw_name = FIRMWARE_PICASSO;
- else
- fw_name = FIRMWARE_RAVEN;
- break;
- case IP_VERSION(2, 5, 0):
- fw_name = FIRMWARE_ARCTURUS;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 2, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- fw_name = FIRMWARE_RENOIR;
- else
- fw_name = FIRMWARE_GREEN_SARDINE;
-
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 6, 0):
- fw_name = FIRMWARE_ALDEBARAN;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 0, 0):
- fw_name = FIRMWARE_NAVI10;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 0, 2):
- if (adev->asic_type == CHIP_NAVI12)
- fw_name = FIRMWARE_NAVI12;
- else
- fw_name = FIRMWARE_NAVI14;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 0):
- case IP_VERSION(3, 0, 64):
- case IP_VERSION(3, 0, 192):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
- fw_name = FIRMWARE_SIENNA_CICHLID;
- else
- fw_name = FIRMWARE_NAVY_FLOUNDER;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 2):
- fw_name = FIRMWARE_VANGOGH;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 16):
- fw_name = FIRMWARE_DIMGREY_CAVEFISH;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 33):
- fw_name = FIRMWARE_BEIGE_GOBY;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 1, 1):
- fw_name = FIRMWARE_YELLOW_CARP;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 1, 2):
- fw_name = FIRMWARE_VCN_3_1_2;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 0):
- fw_name = FIRMWARE_VCN4_0_0;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 2):
- fw_name = FIRMWARE_VCN4_0_2;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 4):
- fw_name = FIRMWARE_VCN4_0_4;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- default:
- return -EINVAL;
- }
-
- r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
- r = amdgpu_ucode_validate(adev->vcn.fw);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->vcn.fw);
- adev->vcn.fw = NULL;
- return r;
+ /*
+ * Some Steam Deck's BIOS versions are incompatible with the
+ * indirect SRAM mode, leading to amdgpu being unable to get
+ * properly probed (and even potentially crashing the kernel).
+ * Hence, check for these versions here - notice this is
+ * restricted to Vangogh (Deck's APU).
+ */
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.indirect_sram = false;
+ dev_info(adev->dev,
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ }
}
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -274,8 +185,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
@@ -296,8 +210,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
@@ -333,7 +250,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
}
- release_firmware(adev->vcn.fw);
+ amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -1250,25 +1167,42 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
if (!ras_if)
return 0;
- ih_data.head = *ras_if;
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ if (!amdgpu_sriov_vf(adev)) {
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV for VCN!\n");
+ }
return 0;
}
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_vcn_ras *ras;
+
if (!adev->vcn.ras)
- return;
+ return 0;
- amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
+ ras = adev->vcn.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register vcn ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "vcn");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &ras->ras_block.ras_comm;
- strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
- adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
- adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->vcn.ras->ras_block.ras_late_init)
- adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dbb8d68a30c6..c730949ece7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -369,6 +369,7 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
+int amdgpu_vcn_early_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
@@ -399,6 +400,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2994b9db196f..f2e2cbaa7fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
@@ -982,11 +983,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
if (offset == reg_access_ctrl->grbm_cntl) {
/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
writel(v, scratch_reg2);
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else if (offset == reg_access_ctrl->grbm_idx) {
/* if the target reg offset is grbm_idx, write to scratch_reg3 */
writel(v, scratch_reg3);
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else {
/*
* SCRATCH_REG0 = read/write value
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 2b9d806e23af..4f7bab52282a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -88,6 +88,7 @@ struct amdgpu_virt_ops {
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
+ void (*ras_poison_handler)(struct amdgpu_device *adev);
};
/*
@@ -123,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
/* Indirect Reg Access enabled */
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+ /* AV1 Support MODE*/
+ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -321,6 +324,8 @@ static inline bool is_virtual_machine(void)
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dc379dc22c77..3c0310576b3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
@@ -482,7 +483,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
if (job->vmid == 0)
@@ -516,7 +517,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
+ unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
bool spm_update_needed = job->spm_update_needed;
@@ -866,6 +867,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pages_addr[idx - 1] + PAGE_SIZE))
break;
}
+ if (!contiguous)
+ count--;
num_entries = count *
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
@@ -917,8 +920,8 @@ error_unlock:
return r;
}
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats)
{
struct amdgpu_bo_va *bo_va, *tmp;
@@ -926,41 +929,36 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
spin_unlock(&vm->status_lock);
}
+
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 094bb4807303..6f085f0b4ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -29,7 +29,7 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
@@ -40,6 +40,7 @@ struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
+struct amdgpu_mem_stats;
/*
* GPUVM handling
@@ -457,8 +458,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b5f3bba851db..df63dc3bca18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -673,6 +673,7 @@ void amdgpu_vm_pt_free_work(struct work_struct *work)
* @adev: amdgpu device structure
* @vm: amdgpu vm structure
* @start: optional cursor where to start freeing PDs/PTs
+ * @unlocked: vm resv unlock status
*
* Free the page directory or page table level and all sub levels.
*/
@@ -974,7 +975,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.pid,
+ vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 535cd6569bcc..349416e176a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -171,7 +171,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -198,7 +198,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index faa12146635c..43d6a9d6a538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -453,7 +453,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
- if (size >= (u64)pages_per_block << PAGE_SHIFT)
+ if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
cur_size = size;
@@ -882,7 +883,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
- drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+ drm_buddy_free_list(&mgr->mm, &rsv->allocated);
kfree(rsv);
}
drm_buddy_fini(&mgr->mm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 4b9e7b050ccd..439925477fb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -29,13 +29,16 @@
#include "df/df_3_6_offset.h"
#include "xgmi/xgmi_4_0_0_smn.h"
#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "xgmi/xgmi_6_1_0_sh_mask.h"
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
#include "amdgpu_reset.h"
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
+#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
+#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
static DEFINE_MUTEX(xgmi_mutex);
@@ -79,11 +82,27 @@ static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
};
+static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
+};
+
static const int walf_pcs_err_status_reg_aldebaran[] = {
smnPCS_GOPX1_PCS_ERROR_STATUS,
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
};
+static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
+};
+
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
{"XGMI PCS DataLossErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
@@ -162,6 +181,67 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};
+static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
+ {"XGMI3X16 PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI3X16 PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI3X16 PCS FlowCtrlAckErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
+ {"XGMI3X16 PCS RxFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
+ {"XGMI3X16 PCS RxFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
+ {"XGMI3X16 PCS CRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI3X16 PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI3X16 PCS TxVcidDataErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
+ {"XGMI3X16 PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI3X16 PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI3X16 PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI3X16 PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI3X16 PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI3X16 PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI3X16 PCS FlowCtrlCRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
+ {"XGMI3X16 PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI3X16 PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI3X16 PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI3X16 PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI3X16 PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+ {"XGMI3X16 PCS ReplayAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
+ {"XGMI3X16 PCS SyncHdrErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
+ {"XGMI3X16 PCS TxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
+ {"XGMI3X16 PCS RxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubTxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubRxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
+ {"XGMI3X16 PCS RxCMDPktErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
+};
+
/**
* DOC: AMDGPU XGMI Support
*
@@ -228,7 +308,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
.show = amdgpu_xgmi_show_attrs,
};
-struct kobj_type amdgpu_xgmi_hive_type = {
+static const struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
.default_groups = amdgpu_xgmi_hive_groups,
@@ -809,39 +889,47 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
uint32_t value,
+ uint32_t mask_value,
uint32_t *ue_count,
uint32_t *ce_count,
- bool is_xgmi_pcs)
+ bool is_xgmi_pcs,
+ bool check_mask)
{
int i;
- int ue_cnt;
+ int ue_cnt = 0;
+ const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
+ uint32_t field_array_size = 0;
if (is_xgmi_pcs) {
- /* query xgmi pcs error status,
- * only ue is supported */
- for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
- ue_cnt = (value &
- xgmi_pcs_ras_fields[i].pcs_err_mask) >>
- xgmi_pcs_ras_fields[i].pcs_err_shift;
- if (ue_cnt) {
- dev_info(adev->dev, "%s detected\n",
- xgmi_pcs_ras_fields[i].err_name);
- *ue_count += ue_cnt;
- }
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
+ pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
+ } else {
+ pcs_ras_fields = &xgmi_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
}
} else {
- /* query wafl pcs error status,
- * only ue is supported */
- for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
- ue_cnt = (value &
- wafl_pcs_ras_fields[i].pcs_err_mask) >>
- wafl_pcs_ras_fields[i].pcs_err_shift;
- if (ue_cnt) {
- dev_info(adev->dev, "%s detected\n",
- wafl_pcs_ras_fields[i].err_name);
- *ue_count += ue_cnt;
- }
+ pcs_ras_fields = &wafl_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
+ }
+
+ if (check_mask)
+ value = value & ~mask_value;
+
+ /* query xgmi/walf pcs error status,
+ * only ue is supported */
+ for (i = 0; value && i < field_array_size; i++) {
+ ue_cnt = (value &
+ pcs_ras_fields[i].pcs_err_mask) >>
+ pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
}
+
+ /* reset bit value if the bit is checked */
+ value &= ~(pcs_ras_fields[i].pcs_err_mask);
}
return 0;
@@ -852,7 +940,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
- uint32_t data;
+ uint32_t data, mask_data = 0;
uint32_t ue_cnt = 0, ce_cnt = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
@@ -867,15 +955,15 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_VEGA20:
@@ -883,31 +971,35 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_ALDEBARAN:
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, true);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, true);
}
break;
default:
@@ -956,12 +1048,30 @@ struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
struct amdgpu_xgmi_ras xgmi_ras = {
.ras_block = {
- .ras_comm = {
- .name = "xgmi_wafl",
- .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &xgmi_ras_hw_ops,
.ras_late_init = amdgpu_xgmi_ras_late_init,
},
};
+
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_xgmi_ras *ras;
+
+ if (!adev->gmc.xgmi.ras)
+ return 0;
+
+ ras = adev->gmc.xgmi.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 30dcc1681b4e..86fbf56938f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -73,5 +73,6 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
adev->gmc.xgmi.hive_id &&
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 6c97148ca0ed..24d42d24e6a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
- uint32_t reserved : 26;
+ uint32_t av1_support : 1;
+ uint32_t reserved : 25;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index afad094f84c2..10098fdd33fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_fixed.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 18ae9433e463..d95b2dc78063 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -28,7 +28,6 @@
#include <acpi/video.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cbca9866645c..67d16236b216 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -73,10 +73,9 @@ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/*
@@ -137,18 +136,15 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
}
out:
if (err) {
pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 248f1a4e915f..9a24ed463abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2837,7 +2838,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2902,7 +2903,7 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3302,7 +3303,7 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v10_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index cd9c19060d89..c14b70350a51 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2956,7 +2957,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -3032,7 +3033,7 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3426,7 +3427,7 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 76323deecc58..7f85ba5b726f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -23,8 +23,9 @@
#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2715,7 +2716,7 @@ static int dce_v6_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2776,7 +2777,7 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3103,7 +3104,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 01cf3ab111cb..d421a268c9ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2739,7 +2740,7 @@ static int dce_v8_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2802,7 +2803,7 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3195,7 +3196,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index b991609f46c1..5dfab80ffff2 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -94,7 +94,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
}
- /* Exit boradcast mode */
+ /* Exit broadcast mode */
adev->df.funcs->enable_broadcast_mode(adev, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_3.c b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
new file mode 100644
index 000000000000..e8b9e19ede2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v4_3.h"
+
+#include "df/df_4_3_offset.h"
+#include "df/df_4_3_sh_mask.h"
+
+static bool df_v4_3_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t hw_assert_msklo, hw_assert_mskhi;
+ uint32_t v0, v1, v28, v31;
+
+ hw_assert_msklo = RREG32_SOC15(DF, 0,
+ regDF_CS_UMC_AON0_HardwareAssertMaskLow);
+ hw_assert_mskhi = RREG32_SOC15(DF, 0,
+ regDF_NCS_PG0_HardwareAssertMaskHigh);
+
+ v0 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0);
+ v1 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1);
+ v28 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28);
+ v31 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31);
+
+ if (v0 && v1 && v28 && v31)
+ return true;
+ else if (!v0 && !v1 && !v28 && !v31)
+ return false;
+ else {
+ dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",
+ v0, v1, v28, v31);
+ return false;
+ }
+}
+
+const struct amdgpu_df_funcs df_v4_3_funcs = {
+ .query_ras_poison_mode = df_v4_3_query_ras_poison_mode,
+};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
index 801a95b34e8c..06ef0724edd3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
@@ -1,4 +1,3 @@
-
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
@@ -20,9 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-/*********************************************************************/
-// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
-/*********************************************************************/
+
+#ifndef __DF_V4_3_H__
+#define __DF_V4_3_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_df_funcs df_v4_3_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 49d34c7bbf20..f5b5ce1051a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3891,18 +3891,12 @@ err1:
static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -3974,9 +3968,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
char fw_name[40];
- char *wks = "";
+ char ucode_prefix[30];
+ const char *wks = "";
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -3984,90 +3978,40 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 1, 1):
- chip_name = "navi14";
- if (!(adev->pdev->device == 0x7340 &&
- adev->pdev->revision != 0x00))
- wks = "_wks";
- break;
- case IP_VERSION(10, 1, 2):
- chip_name = "navi12";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(10, 3, 2):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(10, 3, 1):
- chip_name = "vangogh";
- break;
- case IP_VERSION(10, 3, 4):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(10, 3, 5):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(10, 3, 3):
- chip_name = "yellow_carp";
- break;
- case IP_VERSION(10, 3, 6):
- chip_name = "gc_10_3_6";
- break;
- case IP_VERSION(10, 1, 3):
- case IP_VERSION(10, 1, 4):
- chip_name = "cyan_skillfish2";
- break;
- case IP_VERSION(10, 3, 7):
- chip_name = "gc_10_3_7";
- break;
- default:
- BUG();
- }
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) &&
+ (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)))
+ wks = "_wks";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
/* don't check this. There are apparently firmwares in the wild with
* incorrect size in the header
*/
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err == -ENODEV)
+ goto out;
if (err)
dev_dbg(adev->dev,
- "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
@@ -4077,47 +4021,34 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
- dev_err(adev->dev,
- "gfx10: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
gfx_v10_0_check_gfxoff_flag(adev);
@@ -4270,19 +4201,11 @@ static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
-static int gfx_v10_0_me_init(struct amdgpu_device *adev)
+static void gfx_v10_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v10_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
@@ -4538,6 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
else
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
@@ -4566,6 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX10_MEC_HPD_SIZE);
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4650,9 +4575,7 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v10_0_me_init(adev);
- if (r)
- return r;
+ gfx_v10_0_me_init(adev);
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
@@ -7345,7 +7268,6 @@ static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -7364,17 +7286,9 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
- /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- } else {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- }
-
+ /* Remove the steps of clearing KIQ position.
+ * It causes GFX hang when another Win guest is rendering.
+ */
return 0;
}
gfx_v10_0_cp_enable(adev, false);
@@ -7630,7 +7544,7 @@ static int gfx_v10_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v10_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v10_0_init_microcode(adev);
}
static int gfx_v10_0_late_init(void *handle)
@@ -9337,7 +9251,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
.get_wptr = gfx_v10_0_ring_get_wptr_gfx,
.set_wptr = gfx_v10_0_ring_set_wptr_gfx,
@@ -9392,7 +9305,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
@@ -9428,7 +9340,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index a56c6e106d00..f5c376276984 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -46,6 +46,7 @@
#include "clearstate_gfx11.h"
#include "v11_structs.h"
#include "gfx_v11_0.h"
+#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
@@ -431,18 +432,37 @@ err1:
static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
kfree(adev->gfx.rlc.register_list_format);
}
+static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
+{
+ const struct psp_firmware_header_v1_0 *toc_hdr;
+ int err = 0;
+ char fw_name[40];
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ if (err)
+ goto out;
+
+ toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
+ adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
+ adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
+ adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
+ adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
+ le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.toc_fw);
+ return err;
+}
+
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -457,10 +477,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
@@ -477,10 +494,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -493,10 +507,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -508,10 +519,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -525,59 +533,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
+ err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
+
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
out:
if (err) {
- dev_err(adev->dev,
- "gfx11: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
}
return err;
}
-static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev)
-{
- const struct psp_firmware_header_v1_0 *toc_hdr;
- int err = 0;
- char fw_name[40];
- char ucode_prefix[30];
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
- if (err)
- goto out;
-
- toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
- adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
- adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
- adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
- adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
- le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
- return 0;
-out:
- dev_err(adev->dev, "Failed to load TOC microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
- return err;
-}
-
static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
@@ -714,19 +686,11 @@ static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
}
-static int gfx_v11_0_me_init(struct amdgpu_device *adev)
+static void gfx_v11_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v11_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
@@ -790,8 +754,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
* zero here */
WARN_ON(simd != 0);
- /* type 2 wave data */
- dst[(*no_fields)++] = 2;
+ /* type 3 wave data */
+ dst[(*no_fields)++] = 3;
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
@@ -852,7 +816,14 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ break;
case IP_VERSION(11, 0, 3):
+ adev->gfx.ras = &gfx_v11_0_3_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -895,6 +866,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
else
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
@@ -925,6 +897,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX11_MEC_HPD_SIZE);
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -987,10 +960,11 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
total_size = gfx_v11_0_calc_toc_total_size(adev);
r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.rlc_autoload_bo,
- &adev->gfx.rlc.rlc_autoload_gpu_addr,
- (void **)&adev->gfx.rlc.rlc_autoload_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.rlc_autoload_bo,
+ &adev->gfx.rlc.rlc_autoload_gpu_addr,
+ (void **)&adev->gfx.rlc.rlc_autoload_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
@@ -1287,10 +1261,8 @@ static int gfx_v11_0_sw_init(void *handle)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- case IP_VERSION(11, 0, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -1298,6 +1270,15 @@ static int gfx_v11_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.me.num_me = 1;
+ adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.mec.num_mec = 1;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 4;
+ break;
default:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
@@ -1308,6 +1289,11 @@ static int gfx_v11_0_sw_init(void *handle)
break;
}
+ /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
+ amdgpu_sriov_is_pp_one_vf(adev))
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
@@ -1329,6 +1315,13 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* FED error */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
+ &adev->gfx.rlc_gc_fed_irq);
+ if (r)
+ return r;
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
if (adev->gfx.imu.funcs) {
@@ -1339,9 +1332,7 @@ static int gfx_v11_0_sw_init(void *handle)
}
}
- r = gfx_v11_0_me_init(adev);
- if (r)
- return r;
+ gfx_v11_0_me_init(adev);
r = gfx_v11_0_rlc_init(adev);
if (r) {
@@ -1409,9 +1400,6 @@ static int gfx_v11_0_sw_init(void *handle)
/* allocate visible FB for rlc auto-loading fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
- r = gfx_v11_0_init_toc_microcode(adev);
- if (r)
- dev_err(adev->dev, "Failed to load toc firmware!\n");
r = gfx_v11_0_rlc_autoload_buffer_init(adev);
if (r)
return r;
@@ -1421,6 +1409,11 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_gfx_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1510,44 +1503,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
}
-static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
{
- u32 data, mask;
+ u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
+
+ gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
+ gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
+ CC_GC_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
+ gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
+ GC_USER_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines);
- data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
- data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
+}
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
+ u32 rb_mask;
- mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se);
+ gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
+ gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
+ CC_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
+ GC_USER_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines);
- return (~data) & mask;
+ return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
}
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
- int i, j;
- u32 data;
- u32 active_rbs = 0;
- u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se;
+ u32 rb_bitmap_width_per_sa;
+ u32 max_sa;
+ u32 active_sa_bitmap;
+ u32 global_active_rb_bitmap;
+ u32 active_rb_bitmap = 0;
+ u32 i;
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
- data = gfx_v11_0_get_rb_active_bitmap(adev);
- active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
- rb_bitmap_width_per_sh);
- }
+ /* query sa bitmap from SA_UNIT_DISABLE registers */
+ active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
+ /* query rb bitmap from RB_BACKEND_DISABLE registers */
+ global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
+
+ /* generate active rb bitmap according to active sa bitmap */
+ max_sa = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se;
+ rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
+ for (i = 0; i < max_sa; i++) {
+ if (active_sa_bitmap & (1 << i))
+ active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.config.backend_enable_mask = active_rbs;
- adev->gfx.config.num_rbs = hweight32(active_rbs);
+ active_rb_bitmap |= global_active_rb_bitmap;
+ adev->gfx.config.backend_enable_mask = active_rb_bitmap;
+ adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
@@ -1640,6 +1659,11 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override = 0;
+ /* Set whether texture coordinate truncation is conformant. */
+ tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
+ adev->gfx.config.ta_cntl2_truncate_coord_mode =
+ REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
+
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
@@ -2649,7 +2673,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align */
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_obj,
&adev->gfx.pfp.pfp_fw_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_ptr);
@@ -2660,7 +2686,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_data_obj,
&adev->gfx.pfp.pfp_fw_data_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_data_ptr);
@@ -2863,7 +2891,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align*/
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_obj,
&adev->gfx.me.me_fw_gpu_addr,
(void **)&adev->gfx.me.me_fw_ptr);
@@ -2874,7 +2904,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_data_obj,
&adev->gfx.me.me_fw_data_gpu_addr,
(void **)&adev->gfx.me.me_fw_data_ptr);
@@ -3380,7 +3412,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw_ucode_ptr);
@@ -3391,7 +3425,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_data_obj,
&adev->gfx.mec.mec_fw_data_gpu_addr,
(void **)&fw_data_ptr);
@@ -4618,14 +4654,35 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false;
}
+static int gfx_v11_0_post_soft_reset(void *handle)
+{
+ /**
+ * GFX soft reset will impact MES, need resume MES when do GFX soft reset
+ */
+ return amdgpu_mes_resume((struct amdgpu_device *)handle);
+}
+
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ if (amdgpu_sriov_vf(adev)) {
+ clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
+ clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+ clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
+ if (clock_counter_hi_pre != clock_counter_hi_after)
+ clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+ } else {
+ clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ if (clock_counter_hi_pre != clock_counter_hi_after)
+ clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ }
+ clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -4680,7 +4737,7 @@ static int gfx_v11_0_early_init(void *handle)
gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v11_0_init_microcode(adev);
}
static int gfx_v11_0_ras_late_init(void *handle)
@@ -6008,6 +6065,16 @@ static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
+ return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
+
+ return 0;
+}
+
#if 0
static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
@@ -6089,6 +6156,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset,
+ .post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
@@ -6100,7 +6168,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
@@ -6148,7 +6215,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v11_0_ring_get_rptr_compute,
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
@@ -6184,7 +6250,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v11_0_ring_get_rptr_compute,
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
@@ -6238,6 +6303,10 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.process = gfx_v11_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
+ .process = gfx_v11_0_rlc_gc_fed_irq,
+};
+
static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -6248,6 +6317,10 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
+
+ adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
+ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
+
}
static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
new file mode 100644
index 000000000000..068b9586a223
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "soc21.h"
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "gfx_v11_0.h"
+
+
+static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t rlc_status0 = 0, rlc_status1 = 0;
+ struct ras_common_if *ras_if = NULL;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ rlc_status0 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_0));
+ rlc_status1 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_1));
+
+ if (!rlc_status0 && !rlc_status1) {
+ dev_warn(adev->dev, "RLC_GC_FED irq is generated, but rlc_status0 and rlc_status1 are empty!\n");
+ return 0;
+ }
+
+ /* Use RLC_RLCS_FED_STATUS_0/1 to distinguish FED error block. */
+ if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) ||
+ REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR))
+ ras_if = adev->sdma.ras_if;
+ else
+ ras_if = adev->gfx.ras_if;
+
+ if (!ras_if) {
+ dev_err(adev->dev, "Gfx or sdma ras block not initialized, rlc_status0:0x%x.\n",
+ rlc_status0);
+ return -EINVAL;
+ }
+
+ dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name);
+ }
+
+ return 0;
+}
+
+static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */
+ if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
+ (entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
+ !entry->vmid && !entry->pasid)
+ amdgpu_ras_reset_gpu(adev);
+
+ return 0;
+}
+
+struct amdgpu_gfx_ras gfx_v11_0_3_ras = {
+ .rlc_gc_fed_irq = gfx_v11_0_3_rlc_gc_fed_irq,
+ .poison_consumption_handler = gfx_v11_0_3_poison_consumption_handler,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h
new file mode 100644
index 000000000000..672c7920b3d0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V11_0_3_H__
+#define __GFX_V11_0_3_H__
+
+extern struct amdgpu_gfx_ras gfx_v11_0_3_ras;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 204b246f0e3f..c41219e23151 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -338,10 +338,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
@@ -349,10 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
@@ -360,10 +354,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
@@ -371,10 +362,9 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -382,14 +372,10 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
}
return err;
}
@@ -2375,7 +2361,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0f2976507e48..9d5c1e29b4a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -887,6 +887,16 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+}
+
/*
* Core functions
*/
@@ -927,88 +937,44 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (err)
goto out;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
-
out:
if (err) {
pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ gfx_v7_0_free_microcode(adev);
}
return err;
}
-static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
-{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
-}
-
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -2772,7 +2738,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d47135606e3e..b1f2684d854a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -924,20 +924,14 @@ err1:
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ))
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -989,40 +983,34 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
@@ -1030,20 +1018,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1060,10 +1045,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -1110,20 +1094,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1132,19 +1113,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.mec2_fw->data;
adev->gfx.mec2_fw_version =
@@ -1219,18 +1197,12 @@ out:
dev_err(adev->dev,
"gfx8: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
return err;
}
@@ -1340,7 +1312,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..f46d4b18a3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -149,6 +149,16 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a
+#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b
+#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0
+
+#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068
+#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069
+#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -1078,18 +1088,12 @@ err1:
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -1251,55 +1255,40 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
}
return err;
}
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1328,10 +1317,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -1340,13 +1326,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
- if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+
return err;
}
@@ -1361,7 +1343,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1371,10 +1353,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -1386,91 +1365,49 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ /* ignore failures to load */
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
} else {
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
-out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
- if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
return err;
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[30];
int r;
DRM_DEBUG("\n");
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- chip_name = "vega10";
- break;
- case IP_VERSION(9, 2, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(9, 4, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(9, 2, 2):
- case IP_VERSION(9, 1, 0):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(9, 4, 1):
- chip_name = "arcturus";
- break;
- case IP_VERSION(9, 3, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(9, 4, 2):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
/* No CPG in Arcturus */
if (adev->gfx.num_gfx_rings) {
- r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
if (r)
return r;
}
- r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
@@ -1783,7 +1720,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -2008,27 +1946,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- if (adev->gfx.ras) {
- err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
- if (err) {
- DRM_ERROR("Failed to register gfx ras block!\n");
- return err;
- }
-
- strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
- adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
- adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm;
-
- /* If not define special ras_late_init function, use gfx default ras_late_init */
- if (!adev->gfx.ras->ras_block.ras_late_init)
- adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->gfx.ras->ras_block.ras_cb)
- adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
- }
-
adev->gfx.config.gb_addr_config = gb_addr_config;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
@@ -2088,6 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -2158,12 +2076,6 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v9_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load gfx firmware!\n");
- return r;
- }
-
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
r = adev->gfx.rlc.funcs->init(adev);
@@ -2193,6 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle)
/* disable scheduler on the real ring */
ring->no_scheduler = true;
+ ring->vm_hub = AMDGPU_GFXHUB_0;
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -2210,6 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring->is_sw_ring = true;
hw_prio = amdgpu_sw_ring_priority(i);
+ ring->vm_hub = AMDGPU_GFXHUB_0;
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
NULL);
@@ -2276,6 +2190,11 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_gfx_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3845,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4082,6 +4002,36 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
+ case IP_VERSION(9, 1, 0):
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
+ clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
+ hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
+ /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
+ break;
+ case IP_VERSION(9, 2, 2):
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
+ clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
+ hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
+ /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
+ break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
@@ -4605,7 +4555,7 @@ static int gfx_v9_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v9_0_init_microcode(adev);
}
static int gfx_v9_0_ecc_late_init(void *handle)
@@ -6844,7 +6794,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
@@ -6877,7 +6826,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
@@ -6899,7 +6847,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
@@ -6953,7 +6900,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -6992,7 +6938,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
new file mode 100644
index 000000000000..5f8500577c02
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15_common.h"
+#include "vega10_enum.h"
+
+#include "gc/gc_9_4_3_offset.h"
+#include "gc/gc_9_4_3_sh_mask.h"
+
+#include "gfx_v9_4_3.h"
+
+#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+
+static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ uint64_t clock;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32_SOC15(GC, 0, regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+
+ return clock;
+}
+
+static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num,
+ u32 sh_num,
+ u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SH_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
+}
+
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+}
+
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
+{
+ WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+}
+
+static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
+ uint32_t simd, uint32_t wave,
+ uint32_t *dst, int *no_fields)
+{
+ /* type 1 wave data */
+ dst[(*no_fields)++] = 1;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
+}
+
+static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
+{
+ wave_read_regs(adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
+}
+
+static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
+
+static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q, u32 vm)
+{
+ soc15_grbm_select(adev, me, pipe, q, vm);
+}
+
+static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
+{
+ uint32_t rlc_setting;
+
+ /* if RLC is not enabled, do nothing */
+ rlc_setting = RREG32_SOC15(GC, 0, regRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return false;
+
+ return true;
+}
+
+static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ unsigned i;
+
+ data = RLC_SAFE_MODE__CMD_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RLC_SAFE_MODE__CMD_MASK;
+ WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+}
+
+static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
+{
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
+ return 0;
+}
+
+static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+ u32 i, j, k;
+ u32 mask;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff);
+ for (k = 0; k < adev->usec_timeout; k++) {
+ if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0)
+ break;
+ udelay(1);
+ }
+ if (k == adev->usec_timeout) {
+ gfx_v9_4_3_select_se_sh(adev, 0xffffffff,
+ 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ i, j);
+ return;
+ }
+ }
+ }
+ gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
+ for (k = 0; k < adev->usec_timeout; k++) {
+ if ((RREG32_SOC15(GC, 0, regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp;
+
+ /* These interrupts should be enabled to drive DS clock */
+
+ tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
+
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
+ if (adev->gfx.num_gfx_rings)
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
+
+ WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+}
+
+static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
+{
+ WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, false);
+ gfx_v9_4_3_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
+{
+ WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ udelay(50);
+ WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
+ udelay(50);
+}
+
+static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
+{
+#ifdef AMDGPU_RLC_DEBUG_RETRY
+ u32 rlc_ucode_ver;
+#endif
+
+ WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+ udelay(50);
+
+ /* carrizo do enable cp interrupt after cp inited */
+ if (!(adev->flags & AMD_IS_APU)) {
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, true);
+ udelay(50);
+ }
+
+#ifdef AMDGPU_RLC_DEBUG_RETRY
+ /* RLC_GPM_GENERAL_6 : RLC Ucode version */
+ rlc_ucode_ver = RREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_6);
+ if (rlc_ucode_ver == 0x108) {
+ dev_info(adev->dev,
+ "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
+ rlc_ucode_ver, adev->gfx.rlc_fw_version);
+ /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
+ * default is 0x9C4 to create a 100us interval */
+ WREG32_SOC15(GC, 0, regRLC_GPM_TIMER_INT_3, 0x9C4);
+ /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
+ * to disable the page fault retry interrupts, default is
+ * 0x100 (256) */
+ WREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_12, 0x100);
+ }
+#endif
+}
+
+static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_0 *hdr;
+ const __le32 *fw_data;
+ unsigned i, fw_size;
+
+ if (!adev->gfx.rlc_fw)
+ return -EINVAL;
+
+ hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+ fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+
+ WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
+ RLCG_UCODE_LOADING_START_ADDRESS);
+ for (i = 0; i < fw_size; i++) {
+ if (amdgpu_emu_mode == 1 && i % 100 == 0) {
+ dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
+ msleep(1);
+ }
+ WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->gfx.rlc.funcs->stop(adev);
+
+ /* disable CG */
+ WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
+
+ /* TODO: revisit pg function */
+ /* gfx_v9_4_3_init_pg(adev);*/
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ /* legacy rlc firmware loading */
+ r = gfx_v9_4_3_rlc_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ adev->gfx.rlc.funcs->start(adev);
+
+ return 0;
+}
+
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 reg, data;
+
+ reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+}
+
+static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
+ {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
+};
+
+static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_4_3_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_4_3,
+ ARRAY_SIZE(rlcg_access_gc_9_4_3));
+}
+
+const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_4_3_select_se_sh,
+ .read_wave_data = &gfx_v9_4_3_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
+};
+
+const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
+ .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
+ .set_safe_mode = gfx_v9_4_3_set_safe_mode,
+ .unset_safe_mode = gfx_v9_4_3_unset_safe_mode,
+ .init = gfx_v9_4_3_rlc_init,
+ .resume = gfx_v9_4_3_rlc_resume,
+ .stop = gfx_v9_4_3_rlc_stop,
+ .reset = gfx_v9_4_3_rlc_reset,
+ .start = gfx_v9_4_3_rlc_start,
+ .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
+ .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
+};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
index ea8d9760132f..84e69701b81a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
@@ -19,16 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef __LINK_HWSS_HPO_FRL_H__
-#define __LINK_HWSS_HPO_FRL_H__
-#include "link_hwss.h"
+#ifndef __GFX_V9_4_3_H__
+#define __GFX_V9_4_3_H__
-bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
- const struct link_resource *link_res);
-const struct link_hwss *get_hpo_frl_link_hwss(void);
+extern const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs;
+extern const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs;
-#endif /* __LINK_HWSS_HPO_FRL_H__ */
+#endif /* __GFX_V9_4_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ec4d5e15b766..ab2325f6c7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,7 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
new file mode 100644
index 000000000000..c59c6c85fbff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "gfxhub_v1_2.h"
+#include "gfxhub_v1_1.h"
+
+#include "gc/gc_9_4_3_offset.h"
+#include "gc/gc_9_4_3_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+
+#define regVM_L2_CNTL3_DEFAULT 0x80100007
+#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+
+static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
+{
+ return (u64)RREG32_SOC15(GC, 0, regMC_VM_FB_OFFSET) << 24;
+}
+
+static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+}
+
+static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base;
+
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v1_2_setup_vm_pt_regs(adev, 0, pt_base);
+
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ } else {
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
+}
+
+static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+ uint32_t tmp;
+
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, 0,
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(GC, 0,
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
+ WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+ }
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
+}
+
+static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp);
+}
+
+static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp);
+}
+
+static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+
+}
+
+static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ unsigned num_level, block_size;
+ uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* Send no-retry XNACK on fault to suppress VM fault storm.
+ * On Aldebaran, XNACK can be enabled in the SQ per-process.
+ * Retry faults need to be enabled for that to work.
+ */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !adev->gmc.noretry ||
+ adev->asic_type == CHIP_ALDEBARAN);
+ WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, 0,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, 0,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+}
+
+static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ unsigned i;
+
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
+ /*
+ * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ gfxhub_v1_2_init_gart_aperture_regs(adev);
+ gfxhub_v1_2_init_system_aperture_regs(adev);
+ gfxhub_v1_2_init_tlb_regs(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_2_init_cache_regs(adev);
+
+ gfxhub_v1_2_enable_system_domain(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_2_disable_identity_aperture(adev);
+ gfxhub_v1_2_setup_vmid_config(adev);
+ gfxhub_v1_2_program_invalidation(adev);
+
+ return 0;
+}
+
+static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL,
+ 0);
+ WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0);
+}
+
+/**
+ * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+ tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp,
+ VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static void gfxhub_v1_2_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_SEM);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(GC, 0, regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+}
+
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
+ .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v1_2_gart_enable,
+ .gart_disable = gfxhub_v1_2_gart_disable,
+ .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
+ .init = gfxhub_v1_2_init,
+ .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h
new file mode 100644
index 000000000000..e2d508f5a7ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFXHUB_V1_2_H__
+#define __GFXHUB_V1_2_H__
+
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 34513e8e1519..9b3a02527318 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,7 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 3f8676d23a5e..4aacbbec31e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -167,7 +167,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index 0e13370c2057..13712640fa46 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -151,19 +151,20 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
+ /* Program the AGP BAR */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
@@ -416,34 +417,12 @@ static void gfxhub_v3_0_set_fault_enable_default(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1);
WREG32_SOC15(GC, 0, regCP_DEBUG, tmp);
- /**
- * Set GRBM_GFX_INDEX in broad cast mode
- * before programming GL1C_UTCL0_CNTL1 and SQG_CONFIG
- */
- WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, regGRBM_GFX_INDEX_DEFAULT);
-
- /**
- * Retry respond mode: RETRY
- * Error (no retry) respond mode: SUCCESS
- */
- tmp = RREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1);
- tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_MODE, 0);
- tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_FAULT_MODE, 0x2);
- WREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1, tmp);
-
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
- /* Disable SQ XNACK interrupt for all VMIDs */
- tmp = RREG32_SOC15(GC, 0, regSQG_CONFIG);
- tmp = REG_SET_FIELD(tmp, SQG_CONFIG, XNACK_INTR_MASK,
- SQG_CONFIG__XNACK_INTR_MASK_MASK >>
- SQG_CONFIG__XNACK_INTR_MASK__SHIFT);
- WREG32_SOC15(GC, 0, regSQG_CONFIG, tmp);
-
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
index 080ff11ca305..6e0bd628c889 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -159,17 +159,17 @@ static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
/* Disable AGP. */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 21e46817d82d..b213dcf8ca06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -78,13 +78,25 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -467,8 +479,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
@@ -522,7 +534,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
if (ring->is_mes_queue)
return;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -680,31 +692,15 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v8_7_ras;
break;
default:
break;
}
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
-
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
@@ -741,7 +737,6 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static int gmc_v10_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
@@ -757,10 +752,6 @@ static int gmc_v10_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -835,10 +826,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1014,6 +1002,10 @@ static int gmc_v10_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1061,9 +1053,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1077,10 +1072,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ if (!adev->in_s0ix)
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
@@ -1101,7 +1098,7 @@ static int gmc_v10_0_hw_init(void *handle)
* harvestable groups in gc_utcl2 need to be programmed before any GFX block
* register setup within GMC, or else system hang when harvesting SA.
*/
- if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
+ if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
adev->gfxhub.funcs->utcl2_harvest(adev);
r = gmc_v10_0_gart_enable(adev);
@@ -1129,7 +1126,8 @@ static int gmc_v10_0_hw_init(void *handle)
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
@@ -1145,7 +1143,6 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 4326078689cd..d95f9fe8f1c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -64,13 +64,25 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -262,6 +274,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
+ * @vmhub: which hub to flush
+ * @flush_type: the flush type
*
* Flush the TLB for the requested page table.
*/
@@ -301,6 +315,8 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
+ * @flush_type: the flush type
+ * @all_hub: flush all hubs
*
* Flush the TLB for the requested pasid.
*/
@@ -362,8 +378,8 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
@@ -417,7 +433,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
if (ring->is_mes_queue)
return;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
@@ -555,9 +571,9 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
case IP_VERSION(8, 10, 0):
adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
- adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
+ adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
if (adev->umc.node_inst_num == 4)
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
else
@@ -569,23 +585,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not define special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
@@ -661,6 +660,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
+ amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
if (amdgpu_sriov_vf(adev))
@@ -833,6 +833,10 @@ static int gmc_v11_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -862,6 +866,12 @@ static int gmc_v11_0_sw_fini(void *handle)
static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32(hub->vm_contexts_disable, 0);
+ return;
+ }
}
/**
@@ -941,7 +951,6 @@ static int gmc_v11_0_hw_fini(void *handle)
return 0;
}
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v11_0_gart_disable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ec291d28edff..b7dad4e67813 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -131,19 +131,12 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -258,7 +251,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -894,8 +887,7 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 979da6f510e8..402960b0174e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -156,16 +156,10 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -292,7 +286,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -389,10 +383,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1081,8 +1072,7 @@ static int gmc_v7_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 382dde1ce74c..504c1b34dab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -264,16 +264,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -474,7 +468,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
if (amdgpu_sriov_vf(adev)) {
tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
@@ -587,10 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1203,8 +1194,7 @@ static int gmc_v8_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 08d6cf79fb15..2fe21cefd772 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -49,8 +49,10 @@
#include "mmhub_v1_0.h"
#include "athub_v1_0.h"
#include "gfxhub_v1_1.h"
+#include "gfxhub_v1_2.h"
#include "mmhub_v9_4.h"
#include "mmhub_v1_7.h"
+#include "mmhub_v1_8.h"
#include "umc_v6_1.h"
#include "umc_v6_0.h"
#include "umc_v6_7.h"
@@ -484,6 +486,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -504,6 +514,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -537,32 +555,49 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
const char *mmhub_cid;
const char *hub_name;
u64 addr;
+ uint32_t cam_index = 0;
+ int ret;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (retry_fault) {
- /* Returning 1 here also prevents sending the IV to the KFD */
+ if (adev->irq.retry_cam_enabled) {
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
- /* Process it onyl if it's the first fault for this address */
- if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ cam_index = entry->src_data[2] & 0x3ff;
+
+ ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault);
+ WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
+ if (ret)
+ return 1;
+ } else {
+ /* Process it onyl if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
- return 1;
+ return 1;
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
- /* Try to handle the recoverable page faults by filling page
- * tables
- */
- if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
- return 1;
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
+ return 1;
+ }
}
if (!printk_ratelimit())
@@ -641,6 +676,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
+ case IP_VERSION(1, 8, 0):
case IP_VERSION(9, 4, 2):
mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
break;
@@ -719,7 +755,8 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
return false;
return ((vmhub == AMDGPU_MMHUB_0 ||
@@ -970,9 +1007,9 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
+ bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
@@ -1023,10 +1060,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
uint32_t reg;
/* Do nothing because there's no lut register for mmhub1. */
- if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ if (ring->vm_hub == AMDGPU_MMHUB_1)
return;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -1128,6 +1165,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
if (is_vram) {
if (bo_adev == adev) {
if (uncached)
@@ -1139,8 +1177,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
- if (adev->ip_versions[GC_HWIP][0] ==
- IP_VERSION(9, 4, 2) &&
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
@@ -1272,6 +1310,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@@ -1280,6 +1319,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@@ -1289,6 +1329,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
+ adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
if (!adev->gmc.xgmi.connected_to_cpu)
adev->umc.ras = &umc_v6_7_ras;
if (1 & adev->smuio.funcs->get_die_id(adev))
@@ -1299,23 +1340,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
@@ -1327,6 +1351,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
adev->mmhub.funcs = &mmhub_v1_7_funcs;
break;
+ case IP_VERSION(1, 8, 0):
+ adev->mmhub.funcs = &mmhub_v1_8_funcs;
+ break;
default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
@@ -1349,45 +1376,47 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
/* mmhub ras is not available */
break;
}
-
- if (adev->mmhub.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
-
- strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
- adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
- adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm;
- }
}
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
+ else
+ adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
}
static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
{
adev->hdp.ras = &hdp_v4_0_ras;
- amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
- adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm;
}
-static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
{
+ struct amdgpu_mca *mca = &adev->mca;
+
/* is UMC the right IP to check for MCA? Maybe DF? */
switch (adev->ip_versions[UMC_HWIP][0]) {
case IP_VERSION(6, 7, 0):
- if (!adev->gmc.xgmi.connected_to_cpu)
- adev->mca.funcs = &mca_v3_0_funcs;
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ mca->mp0.ras = &mca_v3_0_mp0_ras;
+ mca->mp1.ras = &mca_v3_0_mp1_ras;
+ mca->mpio.ras = &mca_v3_0_mpio_ras;
+ }
break;
default:
break;
}
}
+static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->gmc.xgmi.ras = &xgmi_ras;
+}
+
static int gmc_v9_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
@@ -1408,7 +1437,8 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_mmhub_ras_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
gmc_v9_0_set_hdp_ras_funcs(adev);
- gmc_v9_0_set_mca_funcs(adev);
+ gmc_v9_0_set_mca_ras_funcs(adev);
+ gmc_v9_0_set_xgmi_ras_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -1417,10 +1447,6 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1536,10 +1562,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1549,6 +1572,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -1628,8 +1652,6 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gfxhub.funcs->init(adev);
adev->mmhub.funcs->init(adev);
- if (adev->mca.funcs)
- adev->mca.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -1680,6 +1702,7 @@ static int gmc_v9_0_sw_init(void *handle)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
adev->num_vmhubs = 2;
@@ -1776,12 +1799,17 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->vm_manager.first_kfd_vmid =
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;
amdgpu_vm_manager_init(adev);
gmc_v9_0_save_registers(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1862,9 +1890,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1911,11 +1942,15 @@ static int gmc_v9_0_hw_init(void *handle)
value = true;
if (!amdgpu_sriov_vf(adev)) {
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
- for (i = 0; i < adev->num_vmhubs; ++i)
+ for (i = 0; i < adev->num_vmhubs; ++i) {
+ if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0))
+ continue;
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
+ }
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
@@ -1939,7 +1974,8 @@ static int gmc_v9_0_hw_init(void *handle)
*/
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
@@ -1963,7 +1999,6 @@ static int gmc_v9_0_hw_fini(void *handle)
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index adf89680f53e..71d1a2e3bac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,7 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2))
return;
if (!ring || !ring->funcs->emit_wreg)
@@ -160,11 +161,6 @@ struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
struct amdgpu_hdp_ras hdp_v4_0_ras = {
.ras_block = {
- .ras_comm = {
- .name = "hdp",
- .block = AMDGPU_RAS_BLOCK__HDP,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &hdp_v4_0_ras_hw_ops,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 7cd79a3844b2..b02e1cef78a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -119,7 +119,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
* ih_v6_0_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
- * @ih: amdgpu_ih_ring pointet
+ * @ih: amdgpu_ih_ring pointer
* @enable: true - enable the interrupts, false - disable the interrupts
*
* Toggle the interrupt ring buffer (IH_V6_0)
@@ -381,6 +381,7 @@ static void ih_v6_0_irq_disable(struct amdgpu_device *adev)
* ih_v6_0_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer. Also check for
@@ -425,6 +426,7 @@ out:
* ih_v6_0_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
*
*/
static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
@@ -450,6 +452,7 @@ static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
* ih_v6_0_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
*
* Set the IH ring buffer rptr.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 95548c512f4f..4ab90c7852c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
@@ -49,10 +50,7 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.imu_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.imu_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name);
if (err)
goto out;
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
@@ -77,7 +75,7 @@ out:
dev_err(adev->dev,
"gfx11: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.imu_fw);
+ amdgpu_ucode_release(&adev->gfx.imu_fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9360204da7fb..a3076eb8af6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -376,7 +376,7 @@ static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -485,6 +485,7 @@ int jpeg_v1_0_sw_init(void *handle)
return r;
ring = &adev->jpeg.inst->ring_dec;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -548,7 +549,6 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
.extra_dw = 64,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index f3c1af5130ab..0eddf7c824a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -86,6 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -613,7 +614,7 @@ void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -762,7 +763,6 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index f2b743a93915..b040f51d9aa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -127,6 +127,10 @@ static int jpeg_v2_5_sw_init(void *handle)
ring = &adev->jpeg.inst[i].ring_dec;
ring->use_doorbell = true;
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
+ ring->vm_hub = AMDGPU_MMHUB_1;
+ else
+ ring->vm_hub = AMDGPU_MMHUB_0;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
@@ -138,6 +142,10 @@ static int jpeg_v2_5_sw_init(void *handle)
adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
}
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -641,7 +649,6 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_1,
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
@@ -671,7 +678,6 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
@@ -806,6 +812,4 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a1b751d9ac06..1c2292cc5f2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
@@ -100,6 +101,7 @@ static int jpeg_v3_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -559,7 +561,6 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 3beb731b2ce5..77e1e64aa1d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -28,6 +28,7 @@
#include "soc15d.h"
#include "jpeg_v2_0.h"
#include "jpeg_v4_0.h"
+#include "mmsch_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -35,12 +36,15 @@
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
+static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
+
/**
* jpeg_v4_0_early_init - set function pointers
*
@@ -103,7 +107,9 @@ static int jpeg_v4_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -113,6 +119,10 @@ static int jpeg_v4_0_sw_init(void *handle)
adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -149,16 +159,26 @@ static int jpeg_v4_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
int r;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+ if (amdgpu_sriov_vf(adev)) {
+ r = jpeg_v4_0_start_sriov(adev);
+ if (r)
+ return r;
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ jpeg_v4_0_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ } else {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
@@ -177,11 +197,11 @@ static int jpeg_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
-
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
+ jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
@@ -386,6 +406,120 @@ static int jpeg_v4_0_start(struct amdgpu_device *adev)
return 0;
}
+static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ header.version = MMSCH_VERSION;
+ header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);
+
+ header.jpegdec.init_status = 0;
+ header.jpegdec.table_offset = 0;
+ header.jpegdec.table_size = 0;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ ring = &adev->jpeg.inst->ring_dec;
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
+ regUVD_JRBC_RB_SIZE), ring->ring_size / 4);
+
+ /* add end packet */
+ MMSCH_V4_0_INSERT_END();
+
+ /* refine header */
+ header.jpegdec.init_status = 0;
+ header.jpegdec.table_offset = header.total_size;
+ header.jpegdec.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Update init table header in memory */
+ size = sizeof(struct mmsch_v4_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ /* message MMSCH (in VCN[0]) to initialize this client
+ * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ /* 2, update vmid of descriptor */
+ tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ size = header.total_size;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ /* 5, kick off the initialization and wait until
+ * MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ param = 0x00000001;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->jpegdec.init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status);
+
+ return 0;
+
+}
+
/**
* jpeg_v4_0_stop - stop JPEG block
*
@@ -509,6 +643,11 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
@@ -577,7 +716,6 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
@@ -685,6 +823,4 @@ static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index d4bd7d1d2649..6dae4a2e2767 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -51,19 +51,13 @@ static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
return -EINVAL;
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp0",
- },
.hw_ops = &mca_v3_0_mp0_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -77,19 +71,13 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp1",
- },
.hw_ops = &mca_v3_0_mp1_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -103,40 +91,14 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mpio",
- },
.hw_ops = &mca_v3_0_mpio_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
};
-
-
-static void mca_v3_0_init(struct amdgpu_device *adev)
-{
- struct amdgpu_mca *mca = &adev->mca;
-
- mca->mp0.ras = &mca_v3_0_mp0_ras;
- mca->mp1.ras = &mca_v3_0_mp1_ras;
- mca->mpio.ras = &mca_v3_0_mpio_ras;
- amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
- mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm;
- mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm;
- mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm;
-}
-
-const struct amdgpu_mca_funcs mca_v3_0_funcs = {
- .init = mca_v3_0_init,
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
index b899b86194c2..d3eaef0d7f2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
@@ -21,6 +21,8 @@
#ifndef __MCA_V3_0_H__
#define __MCA_V3_0_H__
-extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp0_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp1_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mpio_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 614394118a53..2e2062636d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -379,89 +379,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
.resume_gang = mes_v10_1_resume_gang,
};
-static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- default:
- BUG();
- }
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- chip_name);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -1007,7 +924,6 @@ static int mes_v10_1_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v10_1_funcs;
adev->mes.kiq_hw_init = &mes_v10_1_kiq_hw_init;
@@ -1019,10 +935,6 @@ static int mes_v10_1_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v10_1_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v10_1_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1059,8 +971,7 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v10_1_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1229,6 +1140,22 @@ static int mes_v10_1_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v10_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1241,6 +1168,7 @@ static int mes_v10_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.name = "mes_v10_1",
+ .early_init = mes_v10_0_early_init,
.late_init = mes_v10_0_late_init,
.sw_init = mes_v10_1_sw_init,
.sw_fini = mes_v10_1_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 970b066b37bb..45280f047180 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -33,13 +33,20 @@
#include "mes_v11_api_def.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -196,7 +203,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
- mes_add_queue_pkt.trap_en = 1;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
@@ -459,80 +465,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.misc_op = mes_v11_0_misc_op,
};
-static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- char fw_name[30];
- char ucode_prefix[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- ucode_prefix);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- ucode_prefix);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -549,7 +481,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]);
@@ -582,7 +516,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]);
@@ -1087,7 +1023,6 @@ static int mes_v11_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
@@ -1100,10 +1035,6 @@ static int mes_v11_0_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v11_0_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v11_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1140,8 +1071,7 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v11_0_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1164,13 +1094,14 @@ static int mes_v11_0_sw_fini(void *handle)
return 0;
}
-static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
+static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring)
{
uint32_t data;
int i;
+ struct amdgpu_device *adev = ring->adev;
mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
+ soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
@@ -1196,8 +1127,6 @@ static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
-
- adev->mes.ring.sched.ready = false;
}
static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
@@ -1214,6 +1143,16 @@ static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
}
+static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* tell RLC which is KIQ dequeue */
+ tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
+ tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK;
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+}
+
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
@@ -1251,11 +1190,17 @@ failure:
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
- if (adev->mes.ring.sched.ready)
- mes_v11_0_kiq_dequeue_sched(adev);
+ if (adev->mes.ring.sched.ready) {
+ mes_v11_0_kiq_dequeue(&adev->mes.ring);
+ adev->mes.ring.sched.ready = false;
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring);
+ mes_v11_0_kiq_clear(adev);
+ }
- if (!amdgpu_sriov_vf(adev))
- mes_v11_0_enable(adev, false);
+ mes_v11_0_enable(adev, false);
return 0;
}
@@ -1338,12 +1283,28 @@ static int mes_v11_0_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v11_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
@@ -1352,6 +1313,7 @@ static int mes_v11_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
+ .early_init = mes_v11_0_early_init,
.late_init = mes_v11_0_late_init,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3e51e773f92b..15e7cbeae75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,7 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 6fa7090bc6cb..73afbf2facc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -134,7 +134,7 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
new file mode 100644
index 000000000000..342d1702104c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "mmhub_v1_8.h"
+
+#include "mmhub/mmhub_1_8_0_offset.h"
+#include "mmhub/mmhub_1_8_0_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+
+#define regVM_L2_CNTL3_DEFAULT 0x80100007
+#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+
+static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
+ u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
+
+ base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+ top <<= 24;
+
+ adev->gmc.fb_start = base;
+ adev->gmc.fb_end = top;
+
+ return base;
+}
+
+static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
+}
+
+static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base;
+
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
+
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ } else {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
+}
+
+static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+ uint32_t tmp;
+
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Set default page address. */
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+}
+
+static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp);
+}
+
+static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp);
+}
+
+static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F);
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+}
+
+static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned num_level, block_size;
+ uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* On Aldebaran, XNACK can be enabled in the SQ per-process.
+ * Retry faults need to be enabled for that to work.
+ */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ 1);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+}
+
+static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned i;
+
+ for (i = 0; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ mmhub_v1_8_init_gart_aperture_regs(adev);
+ mmhub_v1_8_init_system_aperture_regs(adev);
+ mmhub_v1_8_init_tlb_regs(adev);
+ mmhub_v1_8_init_cache_regs(adev);
+
+ mmhub_v1_8_enable_system_domain(adev);
+ mmhub_v1_8_disable_identity_aperture(adev);
+ mmhub_v1_8_setup_vmid_config(adev);
+ mmhub_v1_8_program_invalidation(adev);
+
+ return 0;
+}
+
+static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0);
+ }
+}
+
+/**
+ * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+{
+ u32 tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static void mmhub_v1_8_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+}
+
+static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
+ .get_fb_location = mmhub_v1_8_get_fb_location,
+ .init = mmhub_v1_8_init,
+ .gart_enable = mmhub_v1_8_gart_enable,
+ .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
+ .gart_disable = mmhub_v1_8_gart_disable,
+ .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
+ .set_clockgating = mmhub_v1_8_set_clockgating,
+ .get_clockgating = mmhub_v1_8_get_clockgating,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h
new file mode 100644
index 000000000000..0bb36200e4e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __MMHUB_V1_8_H__
+#define __MMHUB_V1_8_H__
+
+extern const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 0e664d0cc8d5..278e32db878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -234,7 +234,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 4638ea7c2eec..fcf2813e70db 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -164,7 +164,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 16cc82215e2e..17a792616979 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -169,26 +169,27 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
- /* Disable AGP. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
- }
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
@@ -516,6 +517,9 @@ static void mmhub_v3_0_init(struct amdgpu_device *adev)
hub->vm_l2_bank_select_reserved_cid2 =
SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2);
+ hub->vm_contexts_disable =
+ SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE);
+
hub->vmhub_funcs = &mmhub_v3_0_vmhub_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 6bdf2ef0298d..26509b6b8c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -183,12 +183,12 @@ static void mmhub_v3_0_1_init_system_aperture_regs(struct amdgpu_device *adev)
*/
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
index 45465acaa943..26abbc6a47ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
@@ -162,10 +162,10 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Disable AGP. */
+ /* Program the AGP BAR */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev)) {
/*
@@ -175,13 +175,13 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
*/
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 445cb06b9d26..72083e96222f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,7 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
index 0312c71c3af9..83653a50a1a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -38,6 +38,11 @@
#define MMSCH_VF_MAILBOX_RESP__OK 0x1
#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+
enum mmsch_v4_0_command_type {
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..63725b2ebc03 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -404,6 +404,11 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
@@ -411,4 +416,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
+ .ras_poison_handler = xgpu_ai_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index fa7e13e0459e..af1a784696bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..cae1aaa4ddb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -426,6 +426,11 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -433,4 +438,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
+ .ras_poison_handler = xgpu_nv_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 73887b0aa1d6..d0221ce08769 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 15eb3658d70e..d5ed9e0e1a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -26,6 +26,7 @@
#include "nbio/nbio_4_3_0_offset.h"
#include "nbio/nbio_4_3_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev)
@@ -337,7 +338,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
- return;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
+ uint32_t data;
+
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
+ data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ }
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
@@ -532,3 +539,81 @@ const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = {
.remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
.get_rom_offset = nbio_v4_3_get_rom_offset,
};
+
+static int nbio_v4_3_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+
+ return 0;
+}
+
+static int nbio_v4_3_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to bif ring. since bif ring is not enabled, just leave process callback
+ * as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v4_3_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v4_3_set_ras_err_event_athub_irq_state,
+ .process = nbio_v4_3_process_err_event_athub_irq,
+};
+
+static void nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_int_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static int nbio_v4_3_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v4_3_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt
+ * nbio v4_3 uses the same irq source as nbio v7_4 */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+struct amdgpu_nbio_ras nbio_v4_3_ras = {
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_err_event_athub_interrupt = nbio_v4_3_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
index 711999ceedf4..399037cdf4fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
@@ -29,5 +29,6 @@
extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs;
extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs;
+extern struct amdgpu_nbio_ras nbio_v4_3_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 31776b12e4c4..4ef1fa4603c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -394,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
break;
}
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 1):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+ data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+ break;
+ }
+
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 19455a725939..685abf57ffdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -238,7 +238,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 8);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
new file mode 100644
index 000000000000..24d12075ca3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "nbio_v7_9.h"
+#include "amdgpu_ras.h"
+
+#include "nbio/nbio_7_9_0_offset.h"
+#include "nbio/nbio_7_9_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
+static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0);
+
+ return tmp;
+}
+
+static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
+ BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
+ else
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
+}
+
+static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev)
+{
+ return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
+}
+
+static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size)
+{
+ u32 doorbell_range = 0, doorbell_ctrl = 0;
+
+ doorbell_range =
+ REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_OFFSET_ENTRY, doorbell_index);
+ doorbell_range =
+ REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_SIZE_ENTRY, doorbell_size);
+ doorbell_ctrl =
+ REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_ENABLE, 1);
+ doorbell_ctrl =
+ REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size);
+
+ switch (instance) {
+ case 0:
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1, doorbell_range);
+
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0xe);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xe);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
+ 0x1);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, doorbell_ctrl);
+ break;
+ case 1:
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, doorbell_range);
+
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0x8);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x8);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
+ 0x2);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_ctrl);
+ break;
+ case 2:
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, doorbell_range);
+
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0x9);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x9);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
+ 0x8);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_ctrl);
+ break;
+ case 3:
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, doorbell_range);
+
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0xa);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xa);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
+ 0x9);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL, doorbell_ctrl);
+ break;
+ default:
+ break;
+ };
+
+ return;
+}
+
+static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+ u32 doorbell_range = 0, doorbell_ctrl = 0;
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_SIZE_ENTRY,
+ 0x8);
+
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_ENABLE, 1);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0x4);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
+ } else {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
+ doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
+ }
+
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_ctrl);
+}
+
+static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+ /* Enable to allow doorbell pass thru on pre-silicon bare-metal */
+ WREG32_SOC15(NBIO, 0, regBIFC_DOORBELL_ACCESS_EN_PF, 0xfffff);
+ WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
+ BIF_DOORBELL_APER_EN, enable ? 1 : 0);
+}
+
+static void nbio_v7_9_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = 0;
+
+ if (enable) {
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_EN, 1) |
+ REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_MODE, 1) |
+ REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_SIZE, 0);
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
+ lower_32_bits(adev->doorbell.base));
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
+ upper_32_bits(adev->doorbell.base));
+ }
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
+}
+
+static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+ u32 ih_doorbell_range = 0, ih_doorbell_ctrl = 0;
+
+ if (use_doorbell) {
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
+ doorbell_index);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_SIZE_ENTRY,
+ 0x4);
+
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_ENABLE, 1);
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID, 0);
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET, 0);
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0x4);
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0);
+ } else {
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
+ ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
+ S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
+ }
+
+ WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_0, ih_doorbell_range);
+ WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, ih_doorbell_ctrl);
+}
+
+
+static void nbio_v7_9_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void nbio_v7_9_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void nbio_v7_9_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+}
+
+static void nbio_v7_9_ih_control(struct amdgpu_device *adev)
+{
+ u32 interrupt_cntl;
+
+ /* setup interrupt control */
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
+ interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
+ /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+ * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl =
+ REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
+ /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl =
+ REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
+}
+
+static u32 nbio_v7_9_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v7_9_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v7_9_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
+}
+
+static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
+}
+
+const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = {
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+ .ref_and_mask_sdma2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
+ .ref_and_mask_sdma3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+};
+
+static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15_PREREG(NBIO, 0, BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
+}
+
+const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
+ .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset,
+ .get_rev_id = nbio_v7_9_get_rev_id,
+ .mc_access_enable = nbio_v7_9_mc_access_enable,
+ .get_memsize = nbio_v7_9_get_memsize,
+ .sdma_doorbell_range = nbio_v7_9_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v7_9_vcn_doorbell_range,
+ .enable_doorbell_aperture = nbio_v7_9_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v7_9_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v7_9_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_9_enable_doorbell_interrupt,
+ .update_medium_grain_clock_gating = nbio_v7_9_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_9_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_9_get_clockgating_state,
+ .ih_control = nbio_v7_9_ih_control,
+ .remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h
new file mode 100644
index 000000000000..8e04eb484328
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NBIO_V7_9_H__
+#define __NBIO_V7_9_H__
+
+#include "soc15_common.h"
+
+extern const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg;
+extern const struct amdgpu_nbio_funcs nbio_v7_9_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6853b93ac82e..0fb6013441f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -98,7 +98,17 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
-static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_encode = {
+ .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
+ .codec_array = sc_video_codecs_encode_array,
+};
+
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
@@ -110,20 +120,37 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs sc_video_codecs_decode =
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] =
+{
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 =
+{
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0),
+ .codec_array = sc_video_codecs_decode_array_vcn0,
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
{
- .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
- .codec_array = sc_video_codecs_decode_array,
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1),
+ .codec_array = sc_video_codecs_decode_array_vcn1,
};
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
-static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
@@ -135,16 +162,33 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] =
+{
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
{
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
.codec_array = sriov_sc_video_codecs_encode_array,
};
-static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 =
{
- .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
- .codec_array = sriov_sc_video_codecs_decode_array,
+ .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_sc_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_sc_video_codecs_decode_array_vcn1,
};
/* Beige Goby*/
@@ -181,33 +225,50 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = {
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
+ if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
+ return -EINVAL;
+
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) {
- if (encode)
- *codecs = &sriov_sc_video_codecs_encode;
- else
- *codecs = &sriov_sc_video_codecs_decode;
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ if (encode)
+ *codecs = &sriov_sc_video_codecs_encode;
+ else
+ *codecs = &sriov_sc_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_sc_video_codecs_encode;
+ else
+ *codecs = &sriov_sc_video_codecs_decode_vcn0;
+ }
} else {
- if (encode)
- *codecs = &nv_video_codecs_encode;
- else
- *codecs = &sc_video_codecs_decode;
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ if (encode)
+ *codecs = &sc_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sc_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode_vcn0;
+ }
}
return 0;
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
- *codecs = &sc_video_codecs_decode;
+ *codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
@@ -229,47 +290,6 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -393,9 +413,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
en = &nv_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = nv_get_register_value(adev,
@@ -509,24 +530,9 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void nv_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (!(adev->flags & AMD_IS_APU) &&
@@ -535,13 +541,6 @@ static void nv_program_aspm(struct amdgpu_device *adev)
}
-static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
-{
- adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
-}
-
const struct amdgpu_ip_block_version nv_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -556,11 +555,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
-static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -686,10 +680,10 @@ static int nv_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &nv_pcie_rreg;
- adev->pcie_wreg = &nv_pcie_wreg;
- adev->pcie_rreg64 = &nv_pcie_rreg64;
- adev->pcie_wreg64 = &nv_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -702,7 +696,7 @@ static int nv_common_early_init(void *handle)
adev->asic_funcs = &nv_asic_funcs;
- adev->rev_id = nv_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -993,11 +987,26 @@ static int nv_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
- amdgpu_virt_update_sriov_video_codec(adev,
- sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
- sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_sc_video_codecs_encode_array,
+ ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
+ sriov_sc_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_sc_video_codecs_encode_array,
+ ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
+ sriov_sc_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
+ }
}
+ /* Enable selfring doorbell aperture late because doorbell BAR
+ * aperture will change if resize BAR successfully in gmc sw_init.
+ */
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
+
return 0;
}
@@ -1026,8 +1035,6 @@ static int nv_common_hw_init(void *handle)
if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
- /* enable pcie gen2/3 link */
- nv_pcie_gen3_enable(adev);
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
@@ -1039,7 +1046,7 @@ static int nv_common_hw_init(void *handle)
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
- nv_enable_doorbell_aperture(adev, true);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -1048,8 +1055,13 @@ static int nv_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */
- nv_enable_doorbell_aperture(adev, false);
+ /* Disable the doorbell aperture and selfring doorbell aperture
+ * separately in hw_fini because nv_enable_doorbell_aperture
+ * has been removed and there is no need to delay disabling
+ * selfring doorbell.
+ */
+ adev->nbio.funcs->enable_doorbell_aperture(adev, false);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 9de46fa8f46c..e1b7fca09666 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -47,83 +47,17 @@ MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- default: BUG();
- }
-
- err = psp_init_asd_microcode(psp, chip_name);
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
- goto out;
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- }
-
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
-
- return err;
+ return err;
+
+ return psp_init_ta_microcode(psp, ucode_prefix);
}
static int psp_v10_0_ring_create(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd3e3e23a939..8f84fe40abbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -88,159 +88,56 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[PSP_FW_NAME_LEN];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(11, 0, 2):
- chip_name = "vega20";
- break;
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 4):
- chip_name = "arcturus";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 5, 0):
- chip_name = "vangogh";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- default:
- BUG();
- }
-
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.xgmi_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->xgmi.fw_version);
- adev->psp.xgmi_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->xgmi.size_bytes);
- adev->psp.xgmi_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ras_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->ras.fw_version);
- adev->psp.ras_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->ras.size_bytes);
- adev->psp.ras_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->ras.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(
- ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context
- .bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- err = psp_init_sos_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(11, 5, 0):
- err = psp_init_asd_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_toc_microcode(psp, ucode_prefix);
break;
default:
BUG();
}
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 8ed2281b6557..fcd708eae75c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -48,83 +48,25 @@ MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- if (adev->apu_flags & AMD_APU_IS_RENOIR) {
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
- }
- }
-
- return 0;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
-out:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- if (err) {
- dev_err(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
+ /* only supported on renoir */
+ if (!(adev->apu_flags & AMD_APU_IS_RENOIR))
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
- return err;
+ return 0;
}
static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index e6a26a7e5e5e..caee76ab7110 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -70,32 +71,19 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran";
- break;
- case IP_VERSION(13, 0, 1):
- case IP_VERSION(13, 0, 3):
- chip_name = "yellow_carp";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
if (!amdgpu_sriov_vf(adev)) {
- err = psp_init_ta_microcode(&adev->psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
}
@@ -105,21 +93,22 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index 9d4e24e518e8..d5ba58eba3e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -35,25 +35,17 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin");
static int psp_v13_0_4_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 4):
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 4):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 157147c6c94e..f6b75e3e47ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -57,26 +57,18 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
+ char ucode_prefix[30];
int err = 0;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- chip_name = "vega10";
- break;
- case CHIP_VEGA12:
- chip_name = "vega12";
- break;
- default: BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c52d246a1d96..fd2a7b66ac56 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -113,10 +113,9 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -151,10 +150,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -176,10 +172,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 486d9b5c1b9e..e572389089d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -250,10 +250,9 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -309,10 +308,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -332,10 +328,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4d780e4430e7..9295ac7edd56 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -575,60 +575,17 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
// vega10 real chip need to use PSP to load firmware
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
- char fw_name[30];
int ret, i;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(4, 0, 0):
- chip_name = "vega10";
- break;
- case IP_VERSION(4, 0, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(4, 2, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(4, 1, 0):
- case IP_VERSION(4, 1, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(4, 2, 2):
- chip_name = "arcturus";
- break;
- case IP_VERSION(4, 1, 2):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(4, 4, 0):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
} else {
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
@@ -1866,6 +1823,15 @@ static int sdma_v4_0_sw_init(void *handle)
/* doorbell size is 2 dwords, get DWORD offset */
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ /*
+ * On Arcturus, SDMA instance 5~7 has a different vmhub
+ * type(AMDGPU_MMHUB_1).
+ */
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
+ ring->vm_hub = AMDGPU_MMHUB_1;
+ else
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
@@ -1884,6 +1850,11 @@ static int sdma_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
ring->doorbell_index += 0x400;
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
+ ring->vm_hub = AMDGPU_MMHUB_1;
+ else
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
@@ -1894,6 +1865,11 @@ static int sdma_v4_0_sw_init(void *handle)
}
}
+ if (amdgpu_sdma_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
+ return -EINVAL;
+ }
+
return r;
}
@@ -1908,7 +1884,7 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
@@ -1941,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_0_ctx_switch_enable(adev, false);
@@ -2332,44 +2310,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
- .get_rptr = sdma_v4_0_ring_get_rptr,
- .get_wptr = sdma_v4_0_ring_get_wptr,
- .set_wptr = sdma_v4_0_ring_set_wptr,
- .emit_frame_size =
- 6 + /* sdma_v4_0_ring_emit_hdp_flush */
- 3 + /* hdp invalidate */
- 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
- /* sdma_v4_0_ring_emit_vm_flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
- 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
- .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
- .emit_ib = sdma_v4_0_ring_emit_ib,
- .emit_fence = sdma_v4_0_ring_emit_fence,
- .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
- .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
- .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
- .test_ring = sdma_v4_0_ring_test_ring,
- .test_ib = sdma_v4_0_ring_test_ib,
- .insert_nop = sdma_v4_0_ring_insert_nop,
- .pad_ib = sdma_v4_0_ring_pad_ib,
- .emit_wreg = sdma_v4_0_ring_emit_wreg,
- .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
-/*
- * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
- * So create a individual constant ring_funcs for those instances.
- */
-static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
- .type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
- .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
- .support_64bit_ptrs = true,
- .secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr,
@@ -2402,40 +2342,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
- .get_rptr = sdma_v4_0_ring_get_rptr,
- .get_wptr = sdma_v4_0_page_ring_get_wptr,
- .set_wptr = sdma_v4_0_page_ring_set_wptr,
- .emit_frame_size =
- 6 + /* sdma_v4_0_ring_emit_hdp_flush */
- 3 + /* hdp invalidate */
- 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
- /* sdma_v4_0_ring_emit_vm_flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
- 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
- .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
- .emit_ib = sdma_v4_0_ring_emit_ib,
- .emit_fence = sdma_v4_0_ring_emit_fence,
- .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
- .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
- .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
- .test_ring = sdma_v4_0_ring_test_ring,
- .test_ib = sdma_v4_0_ring_test_ib,
- .insert_nop = sdma_v4_0_ring_insert_nop,
- .pad_ib = sdma_v4_0_ring_pad_ib,
- .emit_wreg = sdma_v4_0_ring_emit_wreg,
- .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
-static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
- .type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
- .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
- .support_64bit_ptrs = true,
- .secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
.set_wptr = sdma_v4_0_page_ring_set_wptr,
@@ -2467,19 +2373,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
- adev->sdma.instance[i].ring.funcs =
- &sdma_v4_0_ring_funcs_2nd_mmhub;
- else
- adev->sdma.instance[i].ring.funcs =
- &sdma_v4_0_ring_funcs;
+ adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
if (adev->sdma.has_page_queue) {
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
- adev->sdma.instance[i].page.funcs =
- &sdma_v4_0_page_ring_funcs_2nd_mmhub;
- else
- adev->sdma.instance[i].page.funcs =
+ adev->sdma.instance[i].page.funcs =
&sdma_v4_0_page_ring_funcs;
adev->sdma.instance[i].page.me = i;
}
@@ -2731,22 +2628,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
break;
}
- if (adev->sdma.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
-
- strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
- adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
- adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->sdma.ras->ras_block.ras_late_init)
- adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->sdma.ras->ras_block.ras_cb)
- adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
- }
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
new file mode 100644
index 000000000000..64dcaa2670dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -0,0 +1,1967 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+
+#include "sdma/sdma_4_4_2_offset.h"
+#include "sdma/sdma_4_4_2_sh_mask.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+#include "vega10_sdma_pkt_open.h"
+
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
+#include "amdgpu_ras.h"
+
+MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
+
+static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
+{
+ return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset);
+}
+
+static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
+{
+ switch (seq_num) {
+ case 0:
+ return SOC15_IH_CLIENTID_SDMA0;
+ case 1:
+ return SOC15_IH_CLIENTID_SDMA1;
+ case 2:
+ return SOC15_IH_CLIENTID_SDMA2;
+ case 3:
+ return SOC15_IH_CLIENTID_SDMA3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
+{
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ return 0;
+ case SOC15_IH_CLIENTID_SDMA1:
+ return 1;
+ case SOC15_IH_CLIENTID_SDMA2:
+ return 2;
+ case SOC15_IH_CLIENTID_SDMA3:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * sdma_v4_4_2_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) {
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdma_v4_4_2_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ u64 *rptr;
+
+ /* XXX check if swapping is necessary on BE */
+ rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
+
+ DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
+ return ((*rptr) >> 2);
+}
+
+/**
+ * sdma_v4_4_2_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ DRM_DEBUG("Setting write pointer\n");
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
+ }
+}
+
+static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->funcs->nop |
+ SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ * @flags: unused
+ *
+ * Schedule an IB in the DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+
+}
+
+static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
+ int mem_space, int hdp,
+ uint32_t addr0, uint32_t addr1,
+ uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ if (mem_space) {
+ /* memory */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ } else {
+ /* registers */
+ amdgpu_ring_write(ring, addr0 << 2);
+ amdgpu_ring_write(ring, addr1 << 2);
+ }
+ amdgpu_ring_write(ring, ref); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ ref_and_mask, ref_and_mask, 10);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed.
+ */
+static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ /* write the fence */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
+}
+
+
+/**
+ * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers.
+ */
+static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i, unset = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].ring;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = 1;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues.
+ */
+static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+}
+
+/**
+ * sdma_v4_4_2_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers.
+ */
+static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i;
+ bool unset = false;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].page;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
+ (!unset)) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = true;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch.
+ */
+static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
+ }
+ WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
+
+ /* Extend page fault timeout to avoid interrupt storm */
+ WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
+ }
+
+}
+
+/**
+ * sdma_v4_4_2_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines.
+ */
+static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl;
+ int i;
+
+ if (!enable) {
+ sdma_v4_4_2_gfx_stop(adev);
+ sdma_v4_4_2_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_stop(adev);
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
+ }
+}
+
+/*
+ * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
+ * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
+
+ sdma_v4_4_2_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+/**
+ * sdma_v4_4_2_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
+ sdma_v4_4_2_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
+{
+
+}
+
+/**
+ * sdma_v4_4_2_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
+{
+ sdma_v4_4_2_init_pg(adev);
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
+{
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+ int i, j;
+
+ /* halt the MEs */
+ sdma_v4_4_2_enable(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!adev->sdma.instance[i].fw)
+ return -EINVAL;
+
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+ amdgpu_ucode_print_sdma_hdr(&hdr->header);
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+
+ fw_data = (const __le32 *)
+ (adev->sdma.instance[i].fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
+
+ for (j = 0; j < fw_size; j++)
+ WREG32_SDMA(i, regSDMA_UCODE_DATA,
+ le32_to_cpup(fw_data++));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+ } else {
+ /* bypass sdma microcode loading on Gopher */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) {
+ r = sdma_v4_4_2_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_4_2_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ }
+
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_4_2_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, regSDMA_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ sdma_v4_4_2_enable(adev, true);
+ } else {
+ r = sdma_v4_4_2_rlc_resume(adev);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_alloc(ring, 5);
+ if (r)
+ goto error_free_wb;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Test a simple IB in the DMA ring.
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+ unsigned index;
+ long r;
+ u32 tmp = 0;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err0;
+
+ ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
+ ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.length_dw = 8;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err1;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
+
+err1:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+err0:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+
+/**
+ * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA.
+ */
+static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+}
+
+/**
+ * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using sDMA.
+ */
+static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw - 1;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
+ }
+}
+
+/**
+ * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA.
+ */
+static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
+ ib->ptr[ib->length_dw++] = upper_32_bits(flags);
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
+}
+
+/**
+ * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill with padding
+ */
+static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (-ib->length_dw) & 7;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+ SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
+ addr & 0xfffffffc,
+ upper_32_bits(addr) & 0xffffffff,
+ seq, 0xffffffff, 4);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA.
+ */
+static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
+static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
+}
+
+static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ return false;
+ default:
+ return false;
+ }
+}
+
+static int sdma_v4_4_2_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = sdma_v4_4_2_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if (sdma_v4_4_2_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
+ sdma_v4_4_2_set_ring_funcs(adev);
+ sdma_v4_4_2_set_buffer_funcs(adev);
+ sdma_v4_4_2_set_vm_pte_funcs(adev);
+ sdma_v4_4_2_set_irq_funcs(adev);
+
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+#endif
+
+static int sdma_v4_4_2_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+#if 0
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_4_2_process_ras_data_cb,
+ };
+#endif
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v4_4_2_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA SRAM ECC event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_VM_HOLE,
+ &adev->sdma.vm_hole_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
+ &adev->sdma.doorbell_invalid_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
+ &adev->sdma.pool_timeout_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
+ &adev->sdma.srbm_write_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
+ ring->use_doorbell?"true":"false");
+
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ ring->doorbell_index += 0x400;
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int sdma_v4_4_2_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
+
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_4_2_init_golden_registers(adev);
+
+ r = sdma_v4_4_2_start(adev);
+
+ return r;
+}
+
+static int sdma_v4_4_2_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
+
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_fini(adev);
+}
+
+static int sdma_v4_4_2_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_init(adev);
+}
+
+static bool sdma_v4_4_2_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
+
+ if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
+ return false;
+ }
+
+ return true;
+}
+
+static int sdma_v4_4_2_wait_for_idle(void *handle)
+{
+ unsigned i, j;
+ u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ for (j = 0; j < adev->sdma.num_instances; j++) {
+ sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
+ if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
+ break;
+ }
+ if (j == adev->sdma.num_instances)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int sdma_v4_4_2_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance;
+
+ DRM_DEBUG("IH: SDMA trap\n");
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ goto out;
+
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+
+out:
+ return AMDGPU_RAS_SUCCESS;
+}
+#endif
+
+static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return 0;
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
+ return 0;
+}
+
+static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG);
+ /*
+ * FIXME: This was inherited from Aldebaran, but no this field
+ * definition in the regspec of both Aldebaran and SDMA 4.4.2
+ */
+ sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0;
+ WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+ struct amdgpu_task_info task_info;
+ u64 addr;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0 || instance >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance invalid %d\n", instance);
+ return -EINVAL;
+ }
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_dbg_ratelimited(adev->dev,
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static void sdma_v4_4_2_update_medium_grain_clock_gating(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ }
+}
+
+
+static void sdma_v4_4_2_update_medium_grain_light_sleep(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 1-not override: enable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 0-override:disable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ }
+}
+
+static int sdma_v4_4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ sdma_v4_4_2_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ sdma_v4_4_2_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static int sdma_v4_4_2_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_SDMA_MGCG */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL));
+ if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK))
+ *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
+
+ /* AMD_CG_SUPPORT_SDMA_LS */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL));
+ if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
+ *flags |= AMD_CG_SUPPORT_SDMA_LS;
+}
+
+const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
+ .name = "sdma_v4_4_2",
+ .early_init = sdma_v4_4_2_early_init,
+ .late_init = sdma_v4_4_2_late_init,
+ .sw_init = sdma_v4_4_2_sw_init,
+ .sw_fini = sdma_v4_4_2_sw_fini,
+ .hw_init = sdma_v4_4_2_hw_init,
+ .hw_fini = sdma_v4_4_2_hw_fini,
+ .suspend = sdma_v4_4_2_suspend,
+ .resume = sdma_v4_4_2_resume,
+ .is_idle = sdma_v4_4_2_is_idle,
+ .wait_for_idle = sdma_v4_4_2_wait_for_idle,
+ .soft_reset = sdma_v4_4_2_soft_reset,
+ .set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
+ .set_powergating_state = sdma_v4_4_2_set_powergating_state,
+ .get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_page_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs =
+ &sdma_v4_4_2_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
+ }
+}
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
+ .set = sdma_v4_4_2_set_trap_irq_state,
+ .process = sdma_v4_4_2_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
+ .process = sdma_v4_4_2_process_illegal_inst_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
+ .set = sdma_v4_4_2_set_ecc_irq_state,
+ .process = amdgpu_sdma_process_ecc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
+ .process = sdma_v4_4_2_process_vm_hole_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
+ .process = sdma_v4_4_2_process_doorbell_invalid_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
+ .process = sdma_v4_4_2_process_pool_timeout_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
+ .process = sdma_v4_4_2_process_srbm_write_irq,
+};
+
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+
+ adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
+ adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
+ adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
+ adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
+ adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
+ adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+}
+
+/**
+ * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
+ *
+ * Copy GPU buffers using the DMA engine.
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ bool tmz)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+}
+
+/**
+ * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine.
+ */
+static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+}
+
+static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
+ .copy_max_bytes = 0x400000,
+ .copy_num_dw = 7,
+ .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
+
+ .fill_max_bytes = 0x400000,
+ .fill_num_dw = 5,
+ .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
+};
+
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+}
+
+static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v4_4_2_vm_copy_pte,
+
+ .write_pte = sdma_v4_4_2_vm_write_pte,
+ .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
+};
+
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ struct drm_gpu_scheduler *sched;
+ unsigned i;
+
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 4,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v4_4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
new file mode 100644
index 000000000000..4814e8a074d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SDMA_V4_4_2_H__
+#define __SDMA_V4_4_2_H__
+
+extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d4d9f196db83..92e1299be021 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -237,39 +237,13 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
// emulation only, won't work on real chip
// navi10 real chip need to use PSP to load firmware
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
- int ret, i;
+{ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(5, 0, 2):
- chip_name = "navi14";
- break;
- case IP_VERSION(5, 0, 5):
- chip_name = "navi12";
- break;
- case IP_VERSION(5, 0, 1):
- chip_name = "cyan_skillfish2";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
@@ -1415,6 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle)
(adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
@@ -1791,7 +1766,6 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_0_ring_get_rptr,
.get_wptr = sdma_v5_0_ring_get_wptr,
.set_wptr = sdma_v5_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 809eca54fc61..ca7e8757d78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,59 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v5_2_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-
-// emulation only, won't work on real chip
-// navi10 real chip need to use PSP to load firmware
-static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
-
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 2, 0):
- chip_name = "sienna_cichlid_sdma";
- break;
- case IP_VERSION(5, 2, 2):
- chip_name = "navy_flounder_sdma";
- break;
- case IP_VERSION(5, 2, 1):
- chip_name = "vangogh_sdma";
- break;
- case IP_VERSION(5, 2, 4):
- chip_name = "dimgrey_cavefish_sdma";
- break;
- case IP_VERSION(5, 2, 5):
- chip_name = "beige_goby_sdma";
- break;
- case IP_VERSION(5, 2, 3):
- chip_name = "yellow_carp_sdma";
- break;
- case IP_VERSION(5, 2, 6):
- chip_name = "sdma_5_2_6";
- break;
- case IP_VERSION(5, 2, 7):
- chip_name = "sdma_5_2_7";
- break;
- default:
- BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -809,12 +756,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
- /* TODO: check whether can submit a doorbell request to raise
- * a doorbell fence to exit gfxoff.
- */
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, false);
-
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
@@ -823,8 +764,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, true);
if (r)
return r;
r = sdma_v5_2_rlc_resume(adev);
@@ -1296,7 +1235,7 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
- r = sdma_v5_2_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
@@ -1314,6 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle)
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
@@ -1714,7 +1654,6 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_2_ring_get_rptr,
.get_wptr = sdma_v5_2_ring_get_wptr,
.set_wptr = sdma_v5_2_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 049c26a45d85..3d9a80511a45 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -78,29 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v6_0_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30];
- char ucode_prefix[30];
-
- DRM_DEBUG("\n");
-
- amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -296,8 +273,6 @@ static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
@@ -349,7 +324,9 @@ static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
* sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
- * @fence: amdgpu fence object
+ * @addr: address
+ * @seq: fence seq number
+ * @flags: fence flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
@@ -426,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
*
* @adev: amdgpu_device pointer
- * @enable: enable/disable the DMA MEs context switch.
+ * @enable: enable/disable context switching due to queue empty conditions
*
- * Halt or unhalt the async dma engines context switch.
+ * Enable or disable the async dma engines queue empty context switch.
*/
-static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{
+ u32 f32_cntl;
+ int i;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
+ }
+ }
}
/**
@@ -522,10 +510,7 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- if (amdgpu_sriov_vf(adev))
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
- else
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
@@ -602,10 +587,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
ring->sched.ready = true;
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
- }
r = amdgpu_ring_test_helper(ring);
if (r) {
@@ -801,7 +784,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
int r = 0;
if (amdgpu_sriov_vf(adev)) {
- sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
/* set RB registers */
@@ -822,7 +804,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
/* unhalt the MEs */
sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ sdma_v6_0_ctxempty_int_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev);
@@ -1083,10 +1065,9 @@ static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA.
*/
@@ -1190,7 +1171,6 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
- * @vm: amdgpu_vm pointer
*
* Update the page table base and flush the VM TLB
* using sDMA.
@@ -1198,7 +1178,28 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+ uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
+
+ /* Update the PD address for this VMID. */
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
+ (hub->ctx_addr_distance * vmid),
+ lower_32_bits(pd_addr));
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
+ (hub->ctx_addr_distance * vmid),
+ upper_32_bits(pd_addr));
+
+ /* Trigger invalidation. */
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
+ amdgpu_ring_write(ring, req);
+ amdgpu_ring_write(ring, 0xFFFFFFFF);
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
}
static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
@@ -1234,6 +1235,24 @@ static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {
+ .ras_block = {
+ .ras_late_init = amdgpu_ras_block_late_init,
+ },
+};
+
+static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(6, 0, 3):
+ adev->sdma.ras = &sdma_v6_0_3_ras;
+ break;
+ default:
+ break;
+ }
+
+}
+
static int sdma_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1243,6 +1262,7 @@ static int sdma_v6_0_early_init(void *handle)
sdma_v6_0_set_vm_pte_funcs(adev);
sdma_v6_0_set_irq_funcs(adev);
sdma_v6_0_set_mqd_funcs(adev);
+ sdma_v6_0_set_ras_funcs(adev);
return 0;
}
@@ -1260,7 +1280,7 @@ static int sdma_v6_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v6_0_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
@@ -1278,6 +1298,7 @@ static int sdma_v6_0_sw_init(void *handle)
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
+ ring->vm_hub = AMDGPU_GFXHUB_0;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
@@ -1287,6 +1308,11 @@ static int sdma_v6_0_sw_init(void *handle)
return r;
}
+ if (amdgpu_sdma_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
+ return -EINVAL;
+ }
+
return r;
}
@@ -1320,7 +1346,7 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0;
}
- sdma_v6_0_ctx_switch_enable(adev, false);
+ sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
return 0;
@@ -1426,10 +1452,12 @@ static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
@@ -1527,7 +1555,6 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v6_0_ring_get_rptr,
.get_wptr = sdma_v6_0_ring_get_wptr,
.set_wptr = sdma_v6_0_ring_set_wptr,
@@ -1588,10 +1615,11 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
/**
* sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill with commands
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1617,7 +1645,7 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
/**
* sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 81a6d5b94987..8b8086d5c864 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -40,7 +40,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c
adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
return true;
#endif
- return false;
+ return amdgpu_reset_method == AMD_RESET_METHOD_MODE2;
}
static struct amdgpu_reset_handler *
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
new file mode 100644
index 000000000000..ae29620b1ea4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smu_v13_0_10.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+
+static bool smu_v13_0_10_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ if (adev->pm.fw_version >= 0x00502005 && !amdgpu_sriov_vf(adev))
+ return true;
+
+ return false;
+}
+
+static struct amdgpu_reset_handler *
+smu_v13_0_10_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ }
+
+ if (smu_v13_0_10_is_mode2_default(reset_ctl) &&
+ amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_MODE2) {
+ list_for_each_entry (handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2)
+ return handler;
+ }
+ }
+
+ return NULL;
+}
+
+static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+smu_v13_0_10_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (!amdgpu_sriov_vf(adev))
+ r = smu_v13_0_10_mode2_suspend_ip(adev);
+
+ return r;
+}
+
+static int smu_v13_0_10_mode2_reset(struct amdgpu_device *adev)
+{
+ return amdgpu_dpm_mode2_reset(adev);
+}
+
+static void smu_v13_0_10_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+static int
+smu_v13_0_10_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r;
+
+ r = smu_v13_0_10_mode2_reset(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "ASIC reset failed with error, %d ", r);
+ }
+ return r;
+}
+
+static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ int i, r;
+ struct psp_context *psp = &adev->psp;
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_firmware_info *ucode_list[2];
+ int ucode_count = 0;
+
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_IMU_I:
+ case AMDGPU_UCODE_ID_IMU_D:
+ ucode_list[ucode_count++] = ucode;
+ break;
+ default:
+ break;
+ }
+ }
+
+ r = psp_load_fw_list(psp, ucode_list, ucode_count);
+ if (r) {
+ dev_err(adev->dev, "IMU ucode load failed after mode2 reset\n");
+ return r;
+ }
+
+ r = psp_rlc_autoload_start(psp);
+ if (r) {
+ DRM_ERROR("Failed to start rlc autoload after mode2 reset\n");
+ return r;
+ }
+
+ amdgpu_dpm_enable_gfx_features(adev);
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = smu_v13_0_10_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+
+end:
+ if (r)
+ return -EAGAIN;
+ else
+ return r;
+}
+
+static struct amdgpu_reset_handler smu_v13_0_10_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = smu_v13_0_10_mode2_prepare_hwcontext,
+ .perform_reset = smu_v13_0_10_mode2_perform_reset,
+ .restore_hwcontext = smu_v13_0_10_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = smu_v13_0_10_mode2_reset,
+};
+
+int smu_v13_0_10_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = smu_v13_0_10_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = smu_v13_0_10_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &smu_v13_0_10_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int smu_v13_0_10_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h
new file mode 100644
index 000000000000..e0cb72a0eec6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V13_0_10_H__
+#define __SMU_V13_0_10_H__
+
+#include "amdgpu.h"
+
+int smu_v13_0_10_reset_init(struct amdgpu_device *adev);
+int smu_v13_0_10_reset_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 7cd17dda32ce..6d15d5cd9e07 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -191,47 +191,6 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -342,11 +301,10 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
- return 10000;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
- return reference_clock / 4;
+ return 10000;
return reference_clock;
}
@@ -439,8 +397,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
@@ -650,24 +609,6 @@ static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc15_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -678,13 +619,6 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
-static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
-{
- adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
-}
-
const struct amdgpu_ip_block_version vega10_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -694,11 +628,6 @@ const struct amdgpu_ip_block_version vega10_common_ip_block =
.funcs = &soc15_common_ip_funcs,
};
-static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static void soc15_reg_base_init(struct amdgpu_device *adev)
{
/* Set IP register base before any HW register access */
@@ -935,10 +864,10 @@ static int soc15_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc15_pcie_rreg;
- adev->pcie_wreg = &soc15_pcie_wreg;
- adev->pcie_rreg64 = &soc15_pcie_rreg64;
- adev->pcie_wreg64 = &soc15_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
@@ -948,7 +877,7 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->rev_id = soc15_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xFF;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -1164,6 +1093,11 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
+ case IP_VERSION(9, 4, 3):
+ adev->asic_funcs = &vega20_asic_funcs;
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -1184,6 +1118,11 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
+ /* Enable selfring doorbell aperture late because doorbell BAR
+ * aperture will change if resize BAR successfully in gmc sw_init.
+ */
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
+
return 0;
}
@@ -1229,8 +1168,6 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
@@ -1243,7 +1180,8 @@ static int soc15_common_hw_init(void *handle)
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
- soc15_enable_doorbell_aperture(adev, true);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, true);
+
/* HW doorbell routing policy: doorbell writing not
* in SDMA/IH/MM/ACV range will be routed to CP. So
* we need to init SDMA doorbell range prior
@@ -1259,8 +1197,14 @@ static int soc15_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */
- soc15_enable_doorbell_aperture(adev, false);
+ /* Disable the doorbell aperture and selfring doorbell aperture
+ * separately in hw_fini because soc15_enable_doorbell_aperture
+ * has been removed and there is no need to delay disabling
+ * selfring doorbell.
+ */
+ adev->nbio.funcs->enable_doorbell_aperture(adev, false);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
+
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 5562670b7b52..d77162536514 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -48,19 +48,32 @@
static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
{
- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
- .codec_array = vcn_4_0_0_video_codecs_encode_array,
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
@@ -69,67 +82,119 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
{
- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
- .codec_array = vcn_4_0_0_video_codecs_decode_array,
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
-static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
- const struct amdgpu_video_codecs **codecs)
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
{
- switch (adev->ip_versions[UVD_HWIP][0]) {
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
- case IP_VERSION(4, 0, 0):
- case IP_VERSION(4, 0, 2):
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode;
- return 0;
- default:
- return -EINVAL;
- }
-}
-/*
- * Indirect registers accessor
- */
-static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
+/* SRIOV SOC21, not const since data is controlled by host */
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
-static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+};
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
-static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
-static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
+static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
+ return -EINVAL;
+
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
+ if (amdgpu_sriov_vf(adev)) {
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
+ }
+ } else {
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ }
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
@@ -255,9 +320,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc21_get_register_value(adev,
@@ -374,21 +440,6 @@ static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc21_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -399,13 +450,6 @@ static void soc21_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
-static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
-{
- adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
-}
-
const struct amdgpu_ip_block_version soc21_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -415,11 +459,6 @@ const struct amdgpu_ip_block_version soc21_common_ip_block =
.funcs = &soc21_common_ip_funcs,
};
-static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -544,10 +583,10 @@ static int soc21_common_early_init(void *handle)
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc21_pcie_rreg;
- adev->pcie_wreg = &soc21_pcie_wreg;
- adev->pcie_rreg64 = &soc21_pcie_rreg64;
- adev->pcie_wreg64 = &soc21_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -560,7 +599,7 @@ static int soc21_common_early_init(void *handle)
adev->asic_funcs = &soc21_asic_funcs;
- adev->rev_id = soc21_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
@@ -640,7 +679,10 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
- AMD_CG_SUPPORT_GFX_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
@@ -669,7 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ adev->external_rev_id = adev->rev_id + 0x80;
break;
default:
@@ -689,8 +731,36 @@ static int soc21_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
+ }
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ /* don't need to fail gpu late init
+ * if enabling athub_err_event interrupt failed
+ * nbio v4_3 only support fatal error hanlding
+ * just enable the interrupt directly */
+ amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
+ /* Enable selfring doorbell aperture late because doorbell BAR
+ * aperture will change if resize BAR successfully in gmc sw_init.
+ */
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
return 0;
}
@@ -714,8 +784,6 @@ static int soc21_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc21_pcie_gen3_enable(adev);
/* enable aspm */
soc21_program_aspm(adev);
/* setup nbio registers */
@@ -727,7 +795,7 @@ static int soc21_common_hw_init(void *handle)
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
- soc21_enable_doorbell_aperture(adev, true);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -736,11 +804,21 @@ static int soc21_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */
- soc21_enable_doorbell_aperture(adev, false);
+ /* Disable the doorbell aperture and selfring doorbell aperture
+ * separately in hw_fini because soc21_enable_doorbell_aperture
+ * has been removed and there is no need to delay disabling
+ * selfring doorbell.
+ */
+ adev->nbio.funcs->enable_doorbell_aperture(adev, false);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_put_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 509d8a1945eb..30d0482ac466 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -84,6 +84,8 @@ enum ta_ras_block {
TA_RAS_BLOCK__MP1,
TA_RAS_BLOCK__FUSE,
TA_RAS_BLOCK__MCA,
+ TA_RAS_BLOCK__VCN,
+ TA_RAS_BLOCK__JPEG,
TA_NUM_BLOCK_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index cf8ff064dc72..00d8bdb8254f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -55,10 +55,10 @@ enum ta_securedisplay_status {
TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
};
-/** @enum ta_securedisplay_max_phy
+/** @enum ta_securedisplay_phy_ID
* Physical ID number to use for reading corresponding DIO Scratch register for ROI
*/
-enum ta_securedisplay_max_phy {
+enum ta_securedisplay_phy_ID {
TA_SECUREDISPLAY_PHY0 = 0,
TA_SECUREDISPLAY_PHY1 = 1,
TA_SECUREDISPLAY_PHY2 = 2,
@@ -139,16 +139,16 @@ union ta_securedisplay_cmd_output {
uint32_t reserved[4];
};
-/** @struct securedisplay_cmd
- * Secure Display Command which is shared buffer memory
- */
-struct securedisplay_cmd {
- uint32_t cmd_id; /* +0 Bytes Command ID */
- enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
- uint32_t reserved[2]; /* +8 Bytes Reserved */
- union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
- union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
- /**@note Total 48 Bytes */
+/** @struct ta_securedisplay_cmd
+* Secure display command which is shared buffer memory
+*/
+struct ta_securedisplay_cmd {
+ uint32_t cmd_id; /**< +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /**< +4 Bytes Status code returned by the secure display TA */
+ uint32_t reserved[2]; /**< +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /**< +16 Bytes Command input buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message; /**< +32 Bytes Command output buffer */
+ /**@note Total 48 Bytes */
};
#endif //_TA_SECUREDISPLAY_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 72fd963f178b..530549314ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -57,13 +57,6 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
-static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
- uint32_t umc_inst,
- uint32_t ch_inst)
-{
- return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-}
-
static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t mc_umc_status, uint32_t umc_reg_offset)
{
@@ -167,24 +160,28 @@ static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_dev
}
}
+static int umc_v6_7_ecc_info_querry_ecc_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+
+ umc_v6_7_ecc_info_query_correctable_error_count(adev,
+ umc_inst, ch_inst,
+ &(err_data->ce_count));
+
+ umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
+ umc_inst, ch_inst,
+ &(err_data->ue_count));
+
+ return 0;
+}
+
static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
-
- /*TODO: driver needs to toggle DF Cstate to ensure
- * safe access of UMC registers. Will add the protection */
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_v6_7_ecc_info_query_correctable_error_count(adev,
- umc_inst, ch_inst,
- &(err_data->ce_count));
- umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
- umc_inst, ch_inst,
- &(err_data->ue_count));
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v6_7_ecc_info_querry_ecc_error_count, ras_error_status);
}
void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
@@ -222,23 +219,23 @@ void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
}
}
-static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t ch_inst,
- uint32_t umc_inst)
+static int umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
- return;
+ return 0;
if (!err_data->err_addr)
- return;
+ return 0;
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -250,25 +247,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
umc_v6_7_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst);
}
+
+ return 0;
}
static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
-
- /*TODO: driver needs to toggle DF Cstate to ensure
- * safe access of UMC resgisters. Will add the protection
- * when firmware interface is ready */
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_v6_7_ecc_info_query_error_address(adev,
- err_data,
- ch_inst,
- umc_inst);
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v6_7_ecc_info_query_error_address, ras_error_status);
}
static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
@@ -371,11 +358,14 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev
}
}
-static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
+static int umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_addr;
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0,
@@ -409,58 +399,54 @@ static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
/* clear higher chip error count */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
UMC_V6_7_CE_CNT_INIT);
+
+ return 0;
}
static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
{
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
+ amdgpu_umc_loop_channels(adev,
+ umc_v6_7_reset_error_count_per_channel, NULL);
+}
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
+static int umc_v6_7_query_ecc_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
- umc_v6_7_reset_error_count_per_channel(adev,
- umc_reg_offset);
- }
+ umc_v6_7_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count),
+ ch_inst, umc_inst);
+
+ umc_v6_7_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+
+ return 0;
}
static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- /*TODO: driver needs to toggle DF Cstate to ensure
- * safe access of UMC registers. Will add the protection */
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
- umc_v6_7_query_correctable_error_count(adev,
- umc_reg_offset,
- &(err_data->ce_count),
- ch_inst, umc_inst);
- umc_v6_7_querry_uncorrectable_error_count(adev,
- umc_reg_offset,
- &(err_data->ue_count));
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v6_7_query_ecc_error_count, ras_error_status);
umc_v6_7_reset_error_count(adev);
}
-static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst)
+static int umc_v6_7_query_error_address(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
uint32_t mc_umc_status_addr;
uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -470,12 +456,12 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
- return;
+ return 0;
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
+ return 0;
}
/* calculate error address if ue error is detected */
@@ -491,29 +477,15 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+
+ return 0;
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- /*TODO: driver needs to toggle DF Cstate to ensure
- * safe access of UMC resgisters. Will add the protection
- * when firmware interface is ready */
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
- umc_v6_7_query_error_address(adev,
- err_data,
- umc_reg_offset, ch_inst,
- umc_inst);
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v6_7_query_error_address, ras_error_status);
}
static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index b7da4528cf0a..d51ae0bc36f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -76,10 +76,13 @@ static inline uint32_t get_umc_v8_10_reg_offset(struct amdgpu_device *adev,
UMC_8_NODE_DIST * node_inst;
}
-static void umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
+static int umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
@@ -87,24 +90,14 @@ static void umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
/* clear error count */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
UMC_V8_10_CE_CNT_INIT);
+
+ return 0;
}
static void umc_v8_10_clear_error_count(struct amdgpu_device *adev)
{
- uint32_t node_inst = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v8_10_reg_offset(adev,
- node_inst,
- umc_inst,
- ch_inst);
-
- umc_v8_10_clear_error_count_per_channel(adev,
- umc_reg_offset);
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_clear_error_count_per_channel, NULL);
}
static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
@@ -147,29 +140,29 @@ static void umc_v8_10_query_uncorrectable_error_count(struct amdgpu_device *adev
*error_count += 1;
}
+static int umc_v8_10_query_ecc_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
+
+ umc_v8_10_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_10_query_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+
+ return 0;
+}
+
static void umc_v8_10_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- uint32_t node_inst = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v8_10_reg_offset(adev,
- node_inst,
- umc_inst,
- ch_inst);
-
- umc_v8_10_query_correctable_error_count(adev,
- umc_reg_offset,
- &(err_data->ce_count));
- umc_v8_10_query_uncorrectable_error_count(adev,
- umc_reg_offset,
- &(err_data->ue_count));
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_query_ecc_error_count, ras_error_status);
umc_v8_10_clear_error_count(adev);
}
@@ -209,39 +202,69 @@ static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
return 0;
}
-static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
- uint32_t node_inst,
- uint32_t ch_inst,
- uint32_t umc_inst)
+static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst, uint64_t mc_umc_status)
{
- uint64_t mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr;
- uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr_base;
uint64_t na_err_addr, retired_page_addr;
uint32_t channel_index, addr_lsb, col = 0;
int ret = 0;
+ channel_index =
+ adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst];
+
+ /* the lowest lsb bits should be ignored */
+ addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
+ err_addr &= ~((0x1ULL << addr_lsb) - 1);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
+ }
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
+ }
+}
+
+static int umc_v8_10_query_error_address(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint64_t mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr;
+ uint64_t mc_umc_addrt0;
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
+
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
- return;
+ return 0;
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
+ return 0;
}
- channel_index =
- adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
- adev->umc.channel_inst_num +
- umc_inst * adev->umc.channel_inst_num +
- ch_inst];
-
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
@@ -251,62 +274,31 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* the lowest lsb bits should be ignored */
- addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
- err_addr &= ~((0x1ULL << addr_lsb) - 1);
- na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
- }
+ umc_v8_10_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst, node_inst, mc_umc_status);
}
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+
+ return 0;
}
static void umc_v8_10_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t node_inst = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v8_10_reg_offset(adev,
- node_inst,
- umc_inst,
- ch_inst);
-
- umc_v8_10_query_error_address(adev,
- err_data,
- umc_reg_offset,
- node_inst,
- ch_inst,
- umc_inst);
- }
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_query_error_address, ras_error_status);
}
-static void umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
+static int umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCntSel);
@@ -321,48 +313,135 @@ static void umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
/* set error count to initial value */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT);
+
+ return 0;
}
static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
{
- uint32_t node_inst = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
-
- LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v8_10_reg_offset(adev,
- node_inst,
- umc_inst,
- ch_inst);
-
- umc_v8_10_err_cnt_init_per_channel(adev, umc_reg_offset);
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_err_cnt_init_per_channel, NULL);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ /*
+ * Force return true, because UMCCH0_0_GeccCtrl
+ * is not accessible from host side
+ */
+ return true;
+}
+
+static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
+ *error_count += 1;
}
}
-static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
- struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
+static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
{
- uint32_t ecc_ctrl_addr, ecc_ctrl;
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- ecc_ctrl_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
- ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
- umc_reg_offset) * 4);
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
- return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
+ *error_count += 1;
+ }
}
-static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+static int umc_v8_10_ecc_info_query_ecc_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
{
- uint32_t umc_reg_offset = 0;
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+
+ umc_v8_10_ecc_info_query_correctable_error_count(adev,
+ node_inst, umc_inst, ch_inst,
+ &(err_data->ce_count));
+ umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
+ node_inst, umc_inst, ch_inst,
+ &(err_data->ue_count));
+ return 0;
+}
- /* Enabling fatal error in umc node0 instance0 channel0 will be
- * considered as fatal error mode
- */
- umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
- return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_ecc_info_query_ecc_error_count, ras_error_status);
+}
+
+static int umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t eccinfo_table_idx;
+ uint64_t mc_umc_status, err_addr;
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
+
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+
+ if (mc_umc_status == 0)
+ return 0;
+
+ if (!err_data->err_addr)
+ return 0;
+
+ /* calculate error address if ue error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
+
+ err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ umc_v8_10_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst, node_inst, mc_umc_status);
+ }
+
+ return 0;
+}
+
+static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_10_ecc_info_query_error_address, ras_error_status);
}
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
@@ -376,4 +455,6 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
},
.err_cnt_init = umc_v8_10_err_cnt_init,
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index 25eaf4af5fcf..c6dfd433fec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -31,9 +31,9 @@
/* number of umc instance with memory map register access */
#define UMC_V8_10_UMC_INSTANCE_NUM 2
-/* Total channel instances for all umc nodes */
+/* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
- (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num)
+ (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
/* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e407be6cb63c..e32b656b3dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -444,6 +444,7 @@ static int uvd_v7_0_sw_init(void *handle)
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
@@ -454,6 +455,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
@@ -1397,7 +1399,7 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1440,7 +1442,7 @@ static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1802,7 +1804,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
@@ -1835,7 +1836,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 66cd3d11aa4b..57b85bb6a1e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,6 +466,7 @@ static int vce_v4_0_sw_init(void *handle)
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i];
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
@@ -1021,7 +1022,7 @@ static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1103,7 +1104,6 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c b/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c
index 1ceda3d0cd5b..2b9ddb3d2fe1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c
@@ -65,7 +65,7 @@ void vcn_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
void vcn_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
uint32_t vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index f0fbcda76f5e..761c28fa6ec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -57,11 +57,12 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
- * vcn_v1_0_early_init - set function pointers
+ * vcn_v1_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v1_0_early_init(void *handle)
{
@@ -75,7 +76,7 @@ static int vcn_v1_0_early_init(void *handle)
jpeg_v1_0_early_init(handle);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -119,6 +120,7 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
ring = &adev->vcn.inst->ring_dec;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -140,6 +142,7 @@ static int vcn_v1_0_sw_init(void *handle)
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
hw_prio, NULL);
@@ -1547,7 +1550,7 @@ static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1692,7 +1695,7 @@ static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1976,7 +1979,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.support_64bit_ptrs = false,
.no_user_fence = true,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
@@ -2011,7 +2013,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.nop = VCN_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_enc_ring_get_rptr,
.get_wptr = vcn_v1_0_enc_ring_get_wptr,
.set_wptr = vcn_v1_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 08871bad9994..7c2b3aa48083 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -62,11 +62,12 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
- * vcn_v2_0_early_init - set function pointers
+ * vcn_v2_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_0_early_init(void *handle)
{
@@ -81,7 +82,7 @@ static int vcn_v2_0_early_init(void *handle)
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -128,6 +129,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
@@ -158,6 +160,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
+ ring->vm_hub = AMDGPU_MMHUB_0;
if (!amdgpu_sriov_vf(adev))
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
else
@@ -1510,7 +1513,7 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -1670,7 +1673,7 @@ void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
@@ -2013,7 +2016,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_dec_ring_get_rptr,
.get_wptr = vcn_v2_0_dec_ring_get_wptr,
.set_wptr = vcn_v2_0_dec_ring_set_wptr,
@@ -2044,7 +2046,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_enc_ring_get_rptr,
.get_wptr = vcn_v2_0_enc_ring_get_wptr,
.set_wptr = vcn_v2_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ec87b00f2e05..ab0b45d0ead1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -71,11 +71,12 @@ static int amdgpu_ih_clientid_vcns[] = {
};
/**
- * vcn_v2_5_early_init - set function pointers
+ * vcn_v2_5_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_5_early_init(void *handle)
{
@@ -107,7 +108,7 @@ static int vcn_v2_5_early_init(void *handle)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -185,6 +186,12 @@ static int vcn_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
+
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
+ ring->vm_hub = AMDGPU_MMHUB_1;
+ else
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -200,6 +207,11 @@ static int vcn_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
+ ring->vm_hub = AMDGPU_MMHUB_1;
+ else
+ ring->vm_hub = AMDGPU_MMHUB_0;
+
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
@@ -224,6 +236,10 @@ static int vcn_v2_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1557,38 +1573,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_1,
- .get_rptr = vcn_v2_5_dec_ring_get_rptr,
- .get_wptr = vcn_v2_5_dec_ring_get_wptr,
- .set_wptr = vcn_v2_5_dec_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
- 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
- 6,
- .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
- .emit_ib = vcn_v2_0_dec_ring_emit_ib,
- .emit_fence = vcn_v2_0_dec_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
- .test_ring = vcn_v2_0_dec_ring_test_ring,
- .test_ib = amdgpu_vcn_dec_ring_test_ib,
- .insert_nop = vcn_v2_0_dec_ring_insert_nop,
- .insert_start = vcn_v2_0_dec_ring_insert_start,
- .insert_end = vcn_v2_0_dec_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
-static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_DEC,
- .align_mask = 0xf,
- .secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
.set_wptr = vcn_v2_5_dec_ring_set_wptr,
@@ -1688,7 +1672,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB_1,
.get_rptr = vcn_v2_5_enc_ring_get_rptr,
.get_wptr = vcn_v2_5_enc_ring_get_wptr,
.set_wptr = vcn_v2_5_enc_ring_set_wptr,
@@ -1714,36 +1697,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_ENC,
- .align_mask = 0x3f,
- .nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB_0,
- .get_rptr = vcn_v2_5_enc_ring_get_rptr,
- .get_wptr = vcn_v2_5_enc_ring_get_wptr,
- .set_wptr = vcn_v2_5_enc_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
- 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
- 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
- 1, /* vcn_v2_0_enc_ring_insert_end */
- .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
- .emit_ib = vcn_v2_0_enc_ring_emit_ib,
- .emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_enc_ring_test_ring,
- .test_ib = amdgpu_vcn_enc_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
- .insert_end = vcn_v2_0_enc_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1751,10 +1704,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
- adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
- else /* CHIP_ALDEBARAN */
- adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
}
@@ -1768,10 +1718,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
- adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
- else /* CHIP_ALDEBARAN */
- adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
@@ -2030,6 +1977,4 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9c8b5fd99037..3eab186261aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -78,11 +78,12 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
/**
- * vcn_v3_0_early_init - set function pointers
+ * vcn_v3_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v3_0_early_init(void *handle)
{
@@ -109,7 +110,7 @@ static int vcn_v3_0_early_init(void *handle)
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -188,6 +189,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
@@ -211,6 +213,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
hw_prio, &adev->vcn.inst[i].sched_score);
@@ -1737,7 +1740,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = VCN_DEC_SW_CMD_NO_OP,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
@@ -1770,6 +1772,10 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1894,7 +1900,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
@@ -1995,7 +2000,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_enc_ring_get_rptr,
.get_wptr = vcn_v3_0_enc_ring_get_wptr,
.set_wptr = vcn_v3_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1e2b22299975..bf0674039598 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -68,18 +68,27 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
- * vcn_v4_0_early_init - set function pointers
+ * vcn_v4_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+ adev->vcn.harvest_config |= 1 << i;
+ dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
+ }
+ }
+ }
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -88,7 +97,7 @@ static int vcn_v4_0_early_init(void *handle)
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -140,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
-
+ ring->vm_hub = AMDGPU_MMHUB_0;
sprintf(ring->name, "vcn_unified_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
@@ -172,6 +181,10 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -237,16 +250,11 @@ static int vcn_v4_0_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_enc[0];
- if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
- ring->sched.ready = false;
- ring->no_scheduler = true;
- dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
- } else {
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v4_0_unified_ring_set_wptr(ring);
- ring->sched.ready = true;
- }
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1631,6 +1639,10 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1705,7 +1717,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
create = ptr + addr + offset - start;
- /* H246, HEVC and VP9 can run on any instance */
+ /* H264, HEVC and VP9 can run on any instance */
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
@@ -1719,7 +1731,29 @@ out:
return r;
}
-#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
+#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
+#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
+
+#define RADEON_VCN_ENGINE_INFO (0x30000001)
+#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
+
+#define RENCODE_ENCODE_STANDARD_AV1 2
+#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
+#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
+
+/* return the offset in ib if id is found, -1 otherwise
+ * to speed up the searching we only search upto max_offset
+ */
+static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
+{
+ int i;
+
+ for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
+ if (ib->ptr[i + 1] == id)
+ return i;
+ }
+ return -1;
+}
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
@@ -1729,34 +1763,41 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_vcn_decode_buffer *decode_buffer;
uint64_t addr;
uint32_t val;
+ int idx;
/* The first instance can decode anything */
if (!ring->me)
return 0;
- /* unified queue ib header has 8 double words. */
- if (ib->length_dw < 8)
- return 0;
-
- val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
- if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
+ /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+ idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+ RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+ if (idx < 0) /* engine info is missing */
return 0;
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_dec_msg(p, job, addr);
+ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
+ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+ idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
+ RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+ if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+ return vcn_v4_0_limit_sched(p, job);
+ }
+ return 0;
}
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2085,6 +2126,4 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 1706081d054d..536128447b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -38,6 +38,11 @@
#define mmIH_CHICKEN_ALDEBARAN 0x18d
#define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0
+#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN 0x00ea
+#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN_BASE_IDX 0
+#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE_MASK 0x00010000L
+
static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
@@ -251,36 +256,14 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
return 0;
}
-/**
- * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
- *
- * @adev: amdgpu_device pointer
- *
- * Reroute VMC and UMC interrupts on primary ih ring to
- * ih ring 1 so they won't lose when bunches of page faults
- * interrupts overwhelms the interrupt handler(VEGA20)
- */
-static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+static uint32_t vega20_setup_retry_doorbell(u32 doorbell_index)
{
- uint32_t tmp;
+ u32 val = 0;
- /* vega20 ih reroute will go through psp this
- * function is used for newer asics starting arcturus
- */
- if (adev->ip_versions[OSSSYS_HWIP][0] >= IP_VERSION(4, 2, 1)) {
- /* Reroute to IH ring 1 for VMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
- /* Reroute IH ring 1 for UTCL2 */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
- }
+ val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, OFFSET, doorbell_index);
+ val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, ENABLE, 1);
+
+ return val;
}
/**
@@ -321,7 +304,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* psp firmware won't program IH_CHICKEN for aldebaran
* driver needs to program it properly according to
* MC_SPACE type in IH_RB_CNTL */
- if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) {
+ if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) ||
+ (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
@@ -332,8 +316,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
- if (i == 1)
- vega20_ih_reroute_ih(adev);
ret = vega20_ih_enable_ring(adev, ih[i]);
if (ret)
return ret;
@@ -346,6 +328,20 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
+ /* Allocate the doorbell for IH Retry CAM */
+ adev->irq.retry_cam_doorbell_index = (adev->doorbell_index.ih + 3) << 1;
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RETRY_CAM,
+ vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index));
+
+ /* Enable IH Retry CAM */
+ if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0))
+ WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
+ ENABLE, 1);
+ else
+ WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL, ENABLE, 1);
+
+ adev->irq.retry_cam_enabled = true;
+
/* enable interrupts */
ret = vega20_ih_toggle_interrupts(adev, true);
if (ret)
@@ -551,12 +547,14 @@ static int vega20_ih_sw_init(void *handle)
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
/* initialize ih control registers offset */
vega20_ih_init_register_offset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 12ef782eb478..531f173ade2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,10 +81,6 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
-#if IS_ENABLED(CONFIG_X86)
-#include <asm/intel-family.h>
-#endif
-
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@@ -1105,24 +1101,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void vi_enable_aspm(struct amdgpu_device *adev)
{
u32 data, orig;
@@ -1138,24 +1116,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
-static bool aspm_support_quirk_check(void)
-{
-#if IS_ENABLED(CONFIG_X86)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
-#else
- return true;
-#endif
-}
-
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (adev->flags & AMD_IS_APU ||
@@ -1743,8 +1710,6 @@ static int vi_common_hw_init(void *handle)
/* move the golden regs per IP block */
vi_init_golden_registers(adev);
- /* enable pcie gen2/3 link */
- vi_pcie_gen3_enable(adev);
/* enable aspm */
vi_program_aspm(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 0c4c5499bb5c..73ca9aebf086 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -2936,3 +2936,490 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
+static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
+ 0xbf820001, 0xbf8202d6,
+ 0xb8f8f802, 0x89788678,
+ 0xb8fbf803, 0x866eff78,
+ 0x00002000, 0xbf840009,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf85001a, 0x866eff7b,
+ 0x00000400, 0xbf85004d,
+ 0xbf8e0010, 0xb8fbf803,
+ 0xbf82fffa, 0x866eff7b,
+ 0x03c00900, 0xbf850011,
+ 0x866eff7b, 0x000071ff,
+ 0xbf840008, 0x866fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1a87, 0xb8eff801,
+ 0x8e6e8c6e, 0x866e6f6e,
+ 0xbf850006, 0x866eff6d,
+ 0x00ff0000, 0xbf850003,
+ 0x866eff7b, 0x00000400,
+ 0xbf850036, 0xb8faf807,
+ 0x867aff7a, 0x001f8000,
+ 0x8e7a8b7a, 0x8979ff79,
+ 0xfc000000, 0x87797a79,
+ 0xba7ff807, 0x00000000,
+ 0xb8faf812, 0xb8fbf813,
+ 0x8efa887a, 0xc0031bbd,
+ 0x00000010, 0xbf8cc07f,
+ 0x8e6e976e, 0x8979ff79,
+ 0x00800000, 0x87796e79,
+ 0xc0071bbd, 0x00000000,
+ 0xbf8cc07f, 0xc0071ebd,
+ 0x00000008, 0xbf8cc07f,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0x866eff6d,
+ 0x01ff0000, 0xbf850005,
+ 0x8778ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+ 0xbf820005, 0x866eff6d,
+ 0x01000000, 0xbf850002,
+ 0x806c846c, 0x826d806d,
+ 0x866dff6d, 0x0000ffff,
+ 0x8f7a8b79, 0x867aff7a,
+ 0x001f8000, 0xb97af807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xb8faf807,
+ 0x867aff7a, 0x001f8000,
+ 0x8e7a8b7a, 0x8979ff79,
+ 0xfc000000, 0x87797a79,
+ 0xba7ff807, 0x00000000,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2985, 0x807a817a,
+ 0x8e7a8a7a, 0x8e7a817a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0xbef1007c,
+ 0xbef00080, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbef600ff, 0x01000000,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
+ 0x00000078, 0x80788478,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
+ 0x00000078, 0x80788478,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
+ 0x00000078, 0x80788478,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf810000, 0x00000000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 6770cbe3250a..f2087cc2e89d 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -33,15 +33,20 @@
* aldebaran:
* cpp -DASIC_FAMILY=CHIP_ALDEBARAN cwsr_trap_handler_gfx9.asm -P -o aldebaran.sp3
* sp3 aldebaran.sp3 -hex aldebaran.hex
+ *
+ * gc_9_4_3:
+ * cpp -DASIC_FAMILY=GC_9_4_3 cwsr_trap_handler_gfx9.asm -P -o gc_9_4_3.sp3
+ * sp3 gc_9_4_3.sp3 -hex gc_9_4_3.hex
*/
#define CHIP_VEGAM 18
#define CHIP_ARCTURUS 23
#define CHIP_ALDEBARAN 25
+#define CHIP_GC_9_4_3 26
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
-var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+var SINGLE_STEP_MISSED_WORKAROUND = (ASIC_FAMILY <= CHIP_ALDEBARAN) //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
/**************************************************************************/
/* variables */
@@ -77,6 +82,10 @@ var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK = 0x80
var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
+var SQ_WAVE_TRAPSTS_HOST_TRAP_MASK = 0x400000
+var SQ_WAVE_TRAPSTS_WAVE_BEGIN_MASK = 0x800000
+var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x1000000
+var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x2000000
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
@@ -95,10 +104,10 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
-var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
-var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
-var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
-var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+var TTMP_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
+var TTMP_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+var TTMP_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP_DEBUG_TRAP_ENABLED_MASK = 0x800000
/* Save */
var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
@@ -129,6 +138,11 @@ var s_save_alloc_size = s_save_trapsts //conflict
var s_save_m0 = ttmp5
var s_save_ttmps_lo = s_save_tmp //no conflict
var s_save_ttmps_hi = s_save_trapsts //no conflict
+#if ASIC_FAMILY >= CHIP_GC_9_4_3
+var s_save_ib_sts = ttmp13
+#else
+var s_save_ib_sts = ttmp11
+#endif
/* Restore */
var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
@@ -215,9 +229,15 @@ L_NOT_HALTED:
// Any concurrent SAVECTX will be handled upon re-entry once halted.
// Check non-maskable exceptions. memory_violation, illegal_instruction
- // and xnack_error exceptions always cause the wave to enter the trap
- // handler.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ // and debugger (host trap, wave start/end, trap after instruction)
+ // exceptions always cause the wave to enter the trap handler.
+ s_and_b32 ttmp2, s_save_trapsts, \
+ SQ_WAVE_TRAPSTS_MEM_VIOL_MASK | \
+ SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK | \
+ SQ_WAVE_TRAPSTS_HOST_TRAP_MASK | \
+ SQ_WAVE_TRAPSTS_WAVE_BEGIN_MASK | \
+ SQ_WAVE_TRAPSTS_WAVE_END_MASK | \
+ SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK
s_cbranch_scc1 L_FETCH_2ND_TRAP
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
@@ -265,9 +285,9 @@ L_FETCH_2ND_TRAP:
s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
s_waitcnt lgkmcnt(0)
- s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
- s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
- s_or_b32 ttmp11, ttmp11, ttmp2
+ s_lshl_b32 ttmp2, ttmp2, TTMP_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 s_save_ib_sts, s_save_ib_sts, TTMP_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 s_save_ib_sts, s_save_ib_sts, ttmp2
s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
@@ -1058,17 +1078,17 @@ function set_status_without_spi_prio(status, tmp)
end
function save_and_clear_ib_sts(tmp)
- // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+ // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space s_save_ib_sts[31:26].
s_getreg_b32 tmp, hwreg(HW_REG_IB_STS)
s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshl_b32 tmp, tmp, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
- s_or_b32 ttmp11, ttmp11, tmp
+ s_lshl_b32 tmp, tmp, (TTMP_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_andn2_b32 s_save_ib_sts, s_save_ib_sts, TTMP_SAVE_RCNT_FIRST_REPLAY_MASK
+ s_or_b32 s_save_ib_sts, s_save_ib_sts, tmp
s_setreg_imm32_b32 hwreg(HW_REG_IB_STS), 0x0
end
function restore_ib_sts(tmp)
- s_lshr_b32 tmp, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_lshr_b32 tmp, s_save_ib_sts, (TTMP_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
s_setreg_b32 hwreg(HW_REG_IB_STS), tmp
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6d291aa6386b..1b54a9aaae70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -93,7 +93,7 @@ int kfd_chardev_init(void)
if (err < 0)
goto err_register_chrdev;
- kfd_class = class_create(THIS_MODULE, kfd_dev_name);
+ kfd_class = class_create(kfd_dev_name);
err = PTR_ERR(kfd_class);
if (IS_ERR(kfd_class))
goto err_class_create;
@@ -1065,6 +1065,20 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
mutex_unlock(&p->svms.lock);
return -EADDRINUSE;
}
+
+ /* When register user buffer check if it has been registered by svm by
+ * buffer cpu virtual address.
+ */
+ if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
+ interval_tree_iter_first(&p->svms.objects,
+ args->mmap_offset >> PAGE_SHIFT,
+ (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
+ pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
+ args->mmap_offset);
+ mutex_unlock(&p->svms.lock);
+ return -EADDRINUSE;
+ }
+
mutex_unlock(&p->svms.lock);
#endif
mutex_lock(&p->mutex);
@@ -1127,8 +1141,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
/* Update the VRAM usage count */
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ uint64_t size = args->size;
+
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
+ size >>= 1;
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ }
mutex_unlock(&p->mutex);
@@ -1293,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ mutex_unlock(&p->mutex);
+
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1316,9 +1335,9 @@ get_process_device_data_failed:
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
@@ -1332,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
+ bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@@ -1384,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
- if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ }
+ mutex_unlock(&p->mutex);
+ if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1409,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
@@ -1567,6 +1590,58 @@ err_unlock:
return r;
}
+static int kfd_ioctl_export_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_export_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ void *mem;
+ int ret = 0;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
+ mutex_unlock(&p->mutex);
+ if (ret)
+ goto err_out;
+
+ ret = dma_buf_fd(dmabuf, args->flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ goto err_out;
+ }
+ /* dma_buf_fd assigns the reference count to the fd, no need to
+ * put the reference here.
+ */
+ args->dmabuf_fd = ret;
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&p->mutex);
+err_out:
+ return ret;
+}
+
/* Handle requests for watching SMI events */
static int kfd_ioctl_smi_events(struct file *filep,
struct kfd_process *p, void *data)
@@ -2749,6 +2824,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
kfd_ioctl_get_available_memory, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
+ kfd_ioctl_export_dmabuf, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -2879,8 +2957,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = dev->adev->rmmio_remap.bus_addr;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 3251f4783ba1..475e47027354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1462,6 +1462,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca
num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
break;
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
*pcache_info = aldebaran_cache_info;
num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b8936340742b..00f528eb9812 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume_iommu(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
@@ -203,6 +204,14 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
/* Navi1x+ */
if (gc_version >= IP_VERSION(10, 1, 1))
kfd->device_info.needs_pci_atomics = true;
+ } else if (gc_version < IP_VERSION(12, 0, 0)) {
+ /*
+ * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
+ * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
+ * PCIe atomics support.
+ */
+ kfd->device_info.needs_pci_atomics = true;
+ kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
}
} else {
kfd->device_info.doorbell_size = 4;
@@ -262,23 +271,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
- gfx_target_version = 80003;
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS10:
gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS12:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_VEGAM:
gfx_target_version = 80003;
if (!vf)
@@ -328,6 +326,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90010;
f2g = &aldebaran_kfd2kgd;
break;
+ case IP_VERSION(9, 4, 3):
+ gfx_target_version = 90400;
+ f2g = &aldebaran_kfd2kgd;
+ break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
gfx_target_version = 100100;
@@ -460,6 +462,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
@@ -635,7 +641,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init(kfd->adev);
- if (kgd2kfd_resume_iommu(kfd))
+ if (kfd_resume_iommu(kfd))
goto device_iommu_error;
if (kfd_resume(kfd))
@@ -784,6 +790,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
+ if (!kfd->init_complete)
+ return 0;
+
+ return kfd_resume_iommu(kfd);
+}
+
+static int kfd_resume_iommu(struct kfd_dev *kfd)
+{
int err = 0;
err = kfd_iommu_resume(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ecb4c3abc629..7a95698d83f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
if (q->wptr_bo) {
- wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
+ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
}
@@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (init_mqd_managers(dqm))
goto out_free;
- if (allocate_hiq_sdma_mqd(dqm)) {
+ if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
@@ -2397,7 +2397,8 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.uninitialize(dqm);
- deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
+ if (!dqm->dev->shared_resources.enable_mes)
+ deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index d119070956fb..8b2dd2670ab7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -59,30 +59,27 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
- /* Aldebaran can safely support different XNACK modes
- * per process
- */
- if (!pdd->process->xnack_enabled)
- qpd->sh_mem_config |=
- 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
- } else if (dqm->dev->noretry &&
- !dqm->dev->use_iommu_v2) {
- qpd->sh_mem_config |=
- 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
- }
+ if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
+ if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) {
+ if (!pdd->process->xnack_enabled)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+ else
+ qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT);
+ }
+
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
- pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cd4e61bf0493..38c9e1ca6691 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
if (!pdd->doorbell_index) {
int r = kfd_alloc_process_doorbells(pdd->dev,
&pdd->doorbell_index);
- if (r)
+ if (r < 0)
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 729d26d648af..c894cf8f7c50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters;
uint32_t i;
- event_waiters = kmalloc_array(num_events,
- sizeof(struct kfd_event_waiter),
- GFP_KERNEL);
+ event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
+ GFP_KERNEL);
if (!event_waiters)
return NULL;
- for (i = 0; (event_waiters) && (i < num_events) ; i++) {
+ for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
- event_waiters[i].activated = false;
- }
return event_waiters;
}
@@ -1052,8 +1049,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
- | VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+ | VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 10048ce16aea..54933903bcb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
@@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t i, j;
int r;
- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
- prange->last);
+ pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
+ prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
- r = svm_range_vram_node_new(adev, prange, true);
- if (r) {
- dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
- goto out;
- }
-
- amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
@@ -391,14 +385,14 @@ out_free_vram_pages:
migrate->dst[i + 3] = 0;
}
#endif
-out:
+
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger)
+ uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
+ uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
@@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
+ r = svm_range_vram_node_new(adev, prange, true);
+ if (r) {
+ dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ return r;
+ }
+ ttm_res_offset = prange->offset << PAGE_SHIFT;
+
for (addr = start; addr < end;) {
unsigned long next;
@@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
+ ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
+ else
+ svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}
@@ -1027,8 +1032,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
/* Disable SVM support capability */
pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
- devm_release_mem_region(adev->dev, res->start,
- res->end - res->start + 1);
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 09b966dc3768..aee2212e52f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -77,6 +77,7 @@ err_ioctl:
static void kfd_exit(void)
{
+ kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 4f6390f3236e..5aa75f72caa1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -143,6 +143,13 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+ /*
+ * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
+ * acknowledgment.
+ */
+ if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
+ m->cp_hqd_hq_status0 |= 1 << 29;
+
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -308,11 +315,16 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct queue_properties *q)
{
struct v11_sdma_mqd *m;
+ int size;
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
- memset(m, 0, sizeof(struct v11_sdma_mqd));
+ if (mm->dev->shared_resources.enable_mes)
+ size = PAGE_SIZE;
+ else
+ size = sizeof(struct v11_sdma_mqd);
+ memset(m, 0, size);
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
@@ -345,6 +357,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+ m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
+ << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
+ & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
+
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
@@ -443,6 +459,14 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
+ /*
+ * To allocate SDMA MQDs by generic functions
+ * when MES is enabled.
+ */
+ if (dev->shared_resources.enable_mes) {
+ mqd->allocate_mqd = allocate_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
+ }
pr_debug("%s@%i\n", __func__, __LINE__);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 0778e587a2d6..fdbfd725841f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -135,6 +135,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
{
uint64_t addr;
struct v9_mqd *m;
+ struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
@@ -167,6 +168,20 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ /* On GC 9.4.3, DW 41 is re-purposed as
+ * compute_tg_chunk_size.
+ * TODO: review this setting when active CUs in the
+ * partition play a role
+ */
+ m->compute_static_thread_mgmt_se6 = 1;
+ }
+ } else {
+ /* PM4 queue */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ m->compute_static_thread_mgmt_se6 = 0;
+ /* TODO: program pm4_target_xcc */
+ }
}
if (q->tba_addr) {
@@ -209,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
struct v9_mqd *m;
m = get_mqd(mqd);
@@ -254,10 +270,13 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+ m->cp_hqd_pq_control |=
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ m->cp_hqd_pq_control |=
+ CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index ed02b6d8bf63..f612325241aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -238,7 +238,8 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
pm->pmf = &kfd_vi_pm_funcs;
break;
default:
- if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2))
+ if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))
pm->pmf = &kfd_aldebaran_pm_funcs;
else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 552c3ac85a13..94a438956868 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -206,6 +206,9 @@ enum cache_policy {
#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
+#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
+ ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)))
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
@@ -926,6 +929,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
+void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 51b1683ac5c1..07a9eaf9b7d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -344,7 +344,7 @@ static const struct sysfs_ops kfd_procfs_ops = {
.show = kfd_procfs_show,
};
-static struct kobj_type procfs_type = {
+static const struct kobj_type procfs_type = {
.release = kfd_procfs_kobj_release,
.sysfs_ops = &kfd_procfs_ops,
};
@@ -469,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
-static struct kobj_type procfs_queue_type = {
+static const struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_groups = procfs_queue_groups,
};
@@ -478,7 +478,7 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct kobj_type procfs_stats_type = {
+static const struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = kfd_procfs_kobj_release,
};
@@ -487,7 +487,7 @@ static const struct sysfs_ops sysfs_counters_ops = {
.show = kfd_sysfs_counters_show,
};
-static struct kobj_type sysfs_counters_type = {
+static const struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = kfd_procfs_kobj_release,
};
@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
+ mmu_notifier_put(&p->mmu_notifier);
+}
+
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
+ /*
+ * Do early return if table is empty.
+ *
+ * This could potentially happen if this function is called concurrently
+ * by mmu_notifier and by kfd_cleanup_pocesses.
+ *
+ */
+ if (hash_empty(kfd_processes_table)) {
+ mutex_unlock(&kfd_processes_mutex);
+ return;
+ }
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
- /* Indicate to other users that MM is no longer valid */
- p->mm = NULL;
-
- mmu_notifier_put(&p->mmu_notifier);
+ kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released. We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+ struct kfd_process *p;
+ struct hlist_node *p_temp;
+ unsigned int temp;
+ HLIST_HEAD(cleanup_list);
+
+ /*
+ * Move all remaining kfd_process from the process table to a
+ * temp list for processing. Once done, callback from mmu_notifier
+ * release will not see the kfd_process in the table and do early return,
+ * avoiding double free issues.
+ */
+ mutex_lock(&kfd_processes_mutex);
+ hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+ hash_del_rcu(&p->kfd_processes);
+ synchronize_srcu(&kfd_processes_srcu);
+ hlist_add_head(&p->kfd_processes, &cleanup_list);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+
+ hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+ kfd_process_notifier_release_internal(p);
+
+ /*
+ * Ensures that all outstanding free_notifier get called, triggering
+ * the release of the kfd_process struct.
+ */
+ mmu_notifier_synchronize();
+}
+
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
@@ -1330,7 +1383,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
+ if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
continue;
/* GFXv10 and later GPUs do not support shader preemption
@@ -1563,6 +1616,8 @@ err_free_pdd:
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file)
{
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
struct kfd_process *p;
struct kfd_dev *dev;
int ret;
@@ -1573,10 +1628,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (pdd->drm_priv)
return -EBUSY;
+ ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
p = pdd->process;
dev = pdd->dev;
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
&p->ef);
if (ret) {
@@ -1593,7 +1653,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+ ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
if (ret)
goto err_set_pasid;
@@ -1607,6 +1667,7 @@ err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
err_reserve_ib_mem:
pdd->drm_priv = NULL;
+ amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
return ret;
}
@@ -1978,8 +2039,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
return -ENOMEM;
}
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
- | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 5137476ec18e..4236539d9f93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
cleanup:
- if (dev->shared_resources.enable_mes)
- uninit_queue(*q);
+ uninit_queue(*q);
+ *q = NULL;
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 814f99888ab1..96a138a39515 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -570,6 +571,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
goto reserve_bo_failed;
}
+ if (clear) {
+ r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+ if (r) {
+ pr_debug("failed %d to sync bo\n", r);
+ amdgpu_bo_unreserve(bo);
+ goto reserve_bo_failed;
+ }
+ }
+
r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
@@ -2162,7 +2172,15 @@ restart:
pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
- &pdd->dev->adev->irq.ih1);
+ pdd->dev->adev->irq.retry_cam_enabled ?
+ &pdd->dev->adev->irq.ih :
+ &pdd->dev->adev->irq.ih1);
+
+ if (pdd->dev->adev->irq.retry_cam_enabled)
+ amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
+ &pdd->dev->adev->irq.ih_soft);
+
+
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bceb1a5b2518..8e4124dcb6e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -278,7 +278,7 @@ static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
-static struct kobj_type sysprops_type = {
+static const struct kobj_type sysprops_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -318,7 +318,7 @@ static const struct sysfs_ops iolink_ops = {
.show = iolink_show,
};
-static struct kobj_type iolink_type = {
+static const struct kobj_type iolink_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -350,7 +350,7 @@ static const struct sysfs_ops mem_ops = {
.show = mem_show,
};
-static struct kobj_type mem_type = {
+static const struct kobj_type mem_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -395,7 +395,7 @@ static const struct sysfs_ops cache_ops = {
.show = kfd_cache_show,
};
-static struct kobj_type cache_type = {
+static const struct kobj_type cache_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@@ -566,7 +566,7 @@ static const struct sysfs_ops node_ops = {
.show = node_show,
};
-static struct kobj_type node_type = {
+static const struct kobj_type node_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
@@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
p2plink->attr.name = "properties";
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&iolink->attr);
+ sysfs_attr_init(&p2plink->attr);
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 2efe93f74f84..2d8e55e29637 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
+ select DRM_AMD_DC_FP if (X86 || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -20,17 +20,10 @@ config DRM_AMD_DC
panic on most architectures. We'll revert this when the following bug report
has been resolved: https://github.com/llvm/llvm-project/issues/41896.
-config DRM_AMD_DC_DCN
+config DRM_AMD_DC_FP
def_bool n
help
- Raven, Navi, and newer family support for display engine
-
-config DRM_AMD_DC_HDCP
- bool "Enable HDCP support in DC"
- depends on DRM_AMD_DC
- select DRM_DISPLAY_HDCP_HELPER
- help
- Choose this option if you want to support HDCP authentication.
+ Floating point support, required for DCN-based SoCs
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
@@ -51,7 +44,7 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
depends on DEBUG_FS
- depends on DRM_AMD_DC_DCN
+ depends on DRM_AMD_DC_FP
help
Choose this option if you want to
support secure display
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 2633de77de5e..0d610cb376bb 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -36,18 +36,14 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
-ifdef CONFIG_DRM_AMD_DC_HDCP
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
-endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src
-ifdef CONFIG_DRM_AMD_DC_HDCP
DAL_LIBS += modules/hdcp
-endif
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 90fb0f3cdb6f..249b073f6a23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -33,7 +33,7 @@ AMDGPUDM = \
amdgpu_dm_mst_types.o \
amdgpu_dm_color.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
endif
@@ -41,9 +41,7 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
AMDGPUDM += amdgpu_dm_hdcp.o
-endif
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 50c783e19f5a..8b4b186c57f5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -28,7 +28,6 @@
#include "dm_services_types.h"
#include "dc.h"
-#include "dc_link_dp.h"
#include "link_enc_cfg.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
@@ -39,6 +38,11 @@
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
#include "vid.h"
#include "amdgpu.h"
@@ -48,10 +52,8 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_crtc.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
-#endif
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -66,7 +68,7 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-#include "i2caux_interface.h"
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -104,7 +106,6 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
-#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
@@ -210,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
@@ -262,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- uint32_t v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -341,12 +342,52 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
{
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
- else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
return true;
else
return false;
}
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
@@ -361,7 +402,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
- uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -391,7 +432,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
WARN_ON(!e);
- vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
@@ -465,7 +506,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dev = acrtc->base.dev;
vblank = &drm_dev->vblank[acrtc->base.index];
previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
@@ -489,7 +530,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* if a pageflip happened inside front-porch.
*/
if (vrr_active) {
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc->dm_irq_params.stream &&
@@ -529,7 +570,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
vrr_active, acrtc->dm_irq_params.active_planes);
@@ -541,7 +582,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!vrr_active)
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/**
* Following stuff must happen at start of vblank, for crc
@@ -648,7 +689,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
- uint8_t link_index = 0;
+ u8 link_index = 0;
struct drm_device *dev;
if (adev == NULL)
@@ -672,7 +713,14 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
- DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
hpd_aconnector = aconnector;
break;
}
@@ -749,7 +797,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
- uint32_t count = 0;
+ u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
struct dc_link *plink = NULL;
@@ -772,15 +820,14 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
DRM_ERROR("Failed to allocate dmub_hpd_wrk");
return;
}
- dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
+ dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+ GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
- if (dmub_hpd_wrk->dmub_notify)
- memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
dmub_hpd_wrk->adev = adev;
if (notify.type == DMUB_NOTIFICATION_HPD) {
plink = adev->dm.dc->links[notify.link_index];
@@ -1015,7 +1062,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
- uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
@@ -1176,32 +1223,46 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
- uint64_t pt_base;
- uint32_t logical_addr_low;
- uint32_t logical_addr_high;
- uint32_t agp_base, agp_bot, agp_top;
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
- logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
- else
- logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1225,10 +1286,25 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
- pa_config->is_hvm_enabled = 0;
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
}
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
@@ -1237,6 +1313,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
struct amdgpu_device *adev;
enum dc_connection_type new_connection_type = dc_connection_none;
unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
@@ -1250,7 +1329,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
- if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
@@ -1261,15 +1340,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
goto skip;
mutex_lock(&adev->dm.dc_lock);
- if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ }
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
- hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
- dc_link_dp_handle_link_loss(dc_link);
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
}
mutex_unlock(&adev->dm.dc_lock);
@@ -1419,9 +1532,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dc_callback_init init_params;
-#endif
int r;
adev->dm.ddev = adev_to_drm(adev);
@@ -1429,9 +1540,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&init_params, 0, sizeof(init_params));
-#endif
mutex_init(&adev->dm.dpia_aux_lock);
mutex_init(&adev->dm.dc_lock);
@@ -1513,6 +1622,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -1534,6 +1646,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+ /* Disable SubVP + DRR config by default */
+ init_data.flags.disable_subvp_drr = true;
+ if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
+ init_data.flags.disable_subvp_drr = false;
+
init_data.flags.seamless_boot_edp_requested = false;
if (check_seamless_boot_capability(adev)) {
@@ -1589,6 +1706,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
+ /* TODO: There is a new drm mst change where the freedom of
+ * vc_next_start_slot update is revoked/moved into drm, instead of in
+ * driver. This forces us to make sure to get vc_next_start_slot updated
+ * in drm function each time without considering if mst_state is active
+ * or not. Otherwise, next time hotplug will give wrong start_slot
+ * number. We are implementing a temporary solution to even notify drm
+ * mst deallocation when link is no longer of MST type when uncommitting
+ * the stream so we will have more time to work on a proper solution.
+ * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
+ * should notify drm to do a complete "reset" of its states and stop
+ * calling further drm mst functions when link is no longer of an MST
+ * type. This could happen when we unplug an MST hubs/displays. When
+ * uncommit stream comes later after unplug, we should just reset
+ * hardware states only.
+ */
+ adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1629,7 +1766,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1640,9 +1776,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
-#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs) {
+ DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+ }
#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1730,20 +1868,20 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue = NULL;
}
- for (i = 0; i < adev->dm.display_indexes_num; i++) {
- drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
- }
-
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- kfree(adev->dm.crc_rd_wrk);
- adev->dm.crc_rd_wrk = NULL;
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
}
#endif
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
adev->dm.hdcp_workqueue = NULL;
@@ -1751,9 +1889,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (adev->dm.dc)
dc_deinit_callbacks(adev->dm.dc);
-#endif
- dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (adev->dm.dc)
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
@@ -1875,25 +2013,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
- if (r == -ENOENT) {
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
adev->dm.fw_dmcu = NULL;
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
- fw_name_dmcu);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
- if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
}
@@ -1939,7 +2069,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
- const char *fw_name_dmub;
enum dmub_asic dmub_asic;
enum dmub_status status;
int r;
@@ -1947,73 +2076,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
- fw_name_dmub = FIRMWARE_RENOIR_DMUB;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
case IP_VERSION(3, 0, 0):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- } else {
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
- }
+ dmub_asic = DMUB_ASIC_DCN30;
break;
case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
- fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
- fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
- fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
- fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
case IP_VERSION(3, 1, 4):
dmub_asic = DMUB_ASIC_DCN314;
- fw_name_dmub = FIRMWARE_DCN_314_DMUB;
break;
case IP_VERSION(3, 1, 5):
dmub_asic = DMUB_ASIC_DCN315;
- fw_name_dmub = FIRMWARE_DCN_315_DMUB;
break;
case IP_VERSION(3, 1, 6):
dmub_asic = DMUB_ASIC_DCN316;
- fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
case IP_VERSION(3, 2, 0):
dmub_asic = DMUB_ASIC_DCN32;
- fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
break;
case IP_VERSION(3, 2, 1):
dmub_asic = DMUB_ASIC_DCN321;
- fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
}
- r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
- if (r) {
- DRM_ERROR("DMUB firmware loading failed: %d\n", r);
- return 0;
- }
-
- r = amdgpu_ucode_validate(adev->dm.dmub_fw);
- if (r) {
- DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
@@ -2080,7 +2179,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
* TODO: Move this into GART.
*/
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
if (r)
@@ -2135,11 +2236,8 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
-
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return 0;
}
@@ -2165,6 +2263,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
break;
}
}
@@ -2200,9 +2300,9 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
/* In the case where abm is implemented on dmcub,
- * dmcu object will be null.
- * ABM 2.4 and up are implemented on dmcub.
- */
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
if (dmcu) {
if (!dmcu_load_iram(dmcu, params))
return -EINVAL;
@@ -2210,7 +2310,7 @@ static int dm_late_init(void *handle)
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
return -EINVAL;
@@ -2233,7 +2333,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
- aconnector->mst_port)
+ aconnector->mst_root)
continue;
mgr = &aconnector->mst_mgr;
@@ -2241,6 +2341,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@@ -2378,11 +2486,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
enable ? "enable" : "disable");
if (enable) {
- rc = dm_enable_vblank(&acrtc->base);
+ rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
if (rc)
DRM_WARN("Failed to enable vblank interrupts\n");
} else {
- dm_disable_vblank(&acrtc->base);
+ amdgpu_dm_crtc_disable_vblank(&acrtc->base);
}
}
@@ -2425,7 +2533,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_state(dc, context);
+ res = dc_commit_streams(dc, context->streams, context->stream_count);
fail:
dc_release_state(context);
@@ -2486,7 +2594,7 @@ struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- uint32_t i;
+ u32 i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
@@ -2611,10 +2719,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
bundle->surface_updates[m].surface->force_full_update =
true;
}
- dc_commit_updates_for_stream(
- dm->dc, bundle->surface_updates,
- dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
}
cleanup:
@@ -2684,7 +2795,7 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2734,16 +2845,18 @@ static int dm_resume(void *handle)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link &&
- aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -2850,7 +2963,7 @@ const struct amdgpu_ip_block_version dm_ip_block =
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
- .get_format_info = amd_get_format_info,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -2863,30 +2976,18 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
struct amdgpu_dm_backlight_caps *caps;
- struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
- struct dc_link *link = NULL;
struct drm_luminance_range_info *luminance_range;
- int i;
- if (!aconnector || !aconnector->dc_link)
- return;
-
- link = aconnector->dc_link;
- if (link->connector_signal != SIGNAL_TYPE_EDP)
+ if (aconnector->bl_idx == -1 ||
+ aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
adev = drm_to_adev(conn_base->dev);
- dm = &adev->dm;
- for (i = 0; i < dm->num_of_edps; i++) {
- if (link == dm->backlight_link[i])
- break;
- }
- if (i >= dm->num_of_edps)
- return;
- caps = &dm->backlight_caps[i];
+
+ caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
@@ -2901,8 +3002,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = true;
luminance_range = &conn_base->display_info.luminance_range;
- caps->aux_min_input_signal = luminance_range->min_luminance;
- caps->aux_max_input_signal = luminance_range->max_luminance;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
}
void amdgpu_dm_update_connector_after_detect(
@@ -3021,6 +3128,13 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid);
}
+ if (!aconnector->timing_requested) {
+ aconnector->timing_requested =
+ kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ dm_error("failed to create aconnector->requested_timing\n");
+ }
+
drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
@@ -3032,11 +3146,11 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -3053,9 +3167,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
bool ret = false;
if (adev->dm.disable_hpd_irq)
@@ -3067,16 +3179,16 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
*/
mutex_lock(&aconnector->hpd_lock);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
dm_con_state->update_hdcp = true;
}
-#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3117,8 +3229,8 @@ static void handle_hpd_irq(void *param)
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
- uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- uint8_t dret;
+ u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
@@ -3146,7 +3258,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
- uint8_t retry;
+ u8 retry;
dret = 0;
process_count++;
@@ -3165,7 +3277,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
- uint8_t wret;
+ u8 wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
@@ -3225,7 +3337,7 @@ static void handle_hpd_rx_irq(void *param)
union hpd_irq_data hpd_irq_data;
bool link_loss = false;
bool has_left_work = false;
- int idx = aconnector->base.index;
+ int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3279,7 +3391,7 @@ static void handle_hpd_rx_irq(void *param)
out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
- if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3317,12 +3429,10 @@ out:
}
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
if (adev->dm.hdcp_workqueue)
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
}
-#endif
if (dc_link->type != dc_connection_mst_branch)
drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
@@ -3367,7 +3477,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
aconnector;
}
}
@@ -4071,16 +4181,18 @@ static const struct backlight_ops amdgpu_dm_backlight_ops = {
};
static void
-amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
{
- char bl_name[16];
+ struct drm_device *drm = aconnector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
+ char bl_name[16];
- amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
- dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
+ if (aconnector->bl_idx == -1)
+ return;
if (!acpi_video_backlight_use_native()) {
- drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
+ drm_info(drm, "Skipping amdgpu DM backlight registration\n");
/* Try registering an ACPI video backlight device instead. */
acpi_video_register_backlight();
return;
@@ -4091,17 +4203,16 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
- adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
+ drm->primary->index + aconnector->bl_idx);
- dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
- adev_to_drm(dm->adev)->dev,
- dm,
- &amdgpu_dm_backlight_ops,
- &props);
+ dm->backlight_dev[aconnector->bl_idx] =
+ backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ &amdgpu_dm_backlight_ops, &props);
- if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
+ if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
- else
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ } else
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
}
@@ -4146,24 +4257,29 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
}
-static void register_backlight_device(struct amdgpu_display_manager *dm,
- struct dc_link *link)
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
{
- if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
- link->type != dc_connection_none) {
- /*
- * Event if registration failed, we should continue with
- * DM initialization because not having a backlight control
- * is better then a black screen.
- */
- if (!dm->backlight_dev[dm->num_of_edps])
- amdgpu_dm_register_backlight_device(dm);
+ struct dc_link *link = aconnector->dc_link;
+ int bl_idx = dm->num_of_edps;
- if (dm->backlight_dev[dm->num_of_edps]) {
- dm->backlight_link[dm->num_of_edps] = link;
- dm->num_of_edps++;
- }
+ if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+ link->type == dc_connection_none)
+ return;
+
+ if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+ drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+ return;
}
+
+ aconnector->bl_idx = bl_idx;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ dm->backlight_link[bl_idx] = link;
+ dm->num_of_edps++;
+
+ update_connector_ext_caps(aconnector);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -4179,20 +4295,23 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- int32_t i;
+ s32 i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- uint32_t link_cnt;
- int32_t primary_planes;
+ u32 link_cnt;
+ s32 primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+ amdgpu_dm_set_irq_funcs(adev);
+
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -4236,20 +4355,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
- if (!plane->blends_with_above || !plane->blends_with_below)
- continue;
-
if (!plane->pixel_format_support.argb8888)
continue;
+ if (max_overlay-- == 0)
+ break;
+
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
-
- /* Only create one overlay plane. */
- break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -4328,7 +4444,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
- if (!dc_link_detect_sink(link, &new_connection_type))
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -4343,10 +4459,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (ret) {
amdgpu_dm_update_connector_after_detect(aconnector);
- register_backlight_device(dm, link);
-
- if (dm->num_of_edps)
- update_connector_ext_caps(aconnector);
+ setup_backlight_device(dm, aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -4500,9 +4613,75 @@ DEVICE_ATTR_WO(s3_debug);
#endif
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ if (r)
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return r;
+}
+
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -4615,8 +4794,6 @@ static int dm_early_init(void *handle)
break;
}
- amdgpu_dm_set_irq_funcs(adev);
-
if (adev->mode_info.funcs == NULL)
adev->mode_info.funcs = &dm_display_funcs;
@@ -4632,7 +4809,7 @@ static int dm_early_init(void *handle)
#endif
adev->dc_enabled = true;
- return 0;
+ return dm_init_microcode(adev);
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4697,7 +4874,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
static int
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
- const uint64_t tiling_flags,
+ const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
bool tmz_surface,
@@ -4795,7 +4972,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
@@ -4804,7 +4981,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- fill_blending_from_plane_state(
+ amdgpu_dm_plane_fill_blending_from_plane_state(
plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
@@ -4823,7 +5000,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
int ret;
bool force_disable_dcc = false;
- ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
return ret;
@@ -4872,7 +5049,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
- int32_t y, int32_t width, int32_t height,
+ s32 y, s32 width, s32 height,
int *i, bool ffu)
{
if (*i > DC_MAX_DIRTY_RECTS)
@@ -4908,6 +5085,7 @@ out:
* @new_plane_state: New state of @plane
* @crtc_state: New state of CRTC connected to the @plane
* @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @dirty_regions_changed: dirty regions changed
*
* For PSR SU, DC informs the DMUB uController of dirty rectangle regions
* (referred to as "damage clips" in DRM nomenclature) that require updating on
@@ -4924,15 +5102,17 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
- struct dc_flip_addrs *flip_addrs)
+ struct dc_flip_addrs *flip_addrs,
+ bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
- uint32_t num_clips;
+ u32 num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
- uint32_t i = 0;
+ u32 i = 0;
+ *dirty_regions_changed = false;
/*
* Cursor plane has it's own dirty rect update interface. See
@@ -4950,9 +5130,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
- &dirty_rects[i], clips->x1,
- clips->y1, clips->x2 - clips->x1,
- clips->y2 - clips->y1,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
@@ -4977,6 +5157,8 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
+ *dirty_regions_changed = bb_changed;
+
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
@@ -5078,7 +5260,7 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
bool is_y420, int requested_bpc)
{
- uint8_t bpc;
+ u8 bpc;
if (is_y420) {
bpc = 8;
@@ -5307,8 +5489,6 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- stream->output_color_space = get_output_color_space(timing_out);
-
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@@ -5319,6 +5499,8 @@ static void fill_stream_properties_from_drm_display_mode(
adjust_colour_depth_from_display_info(timing_out, info);
}
}
+
+ stream->output_color_space = get_output_color_space(timing_out);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -5596,7 +5778,6 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode,
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -5622,11 +5803,15 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
uint32_t max_dsc_target_bpp_limit_override)
{
const struct dc_link_settings *verified_link_cap = NULL;
- uint32_t link_bw_in_kbps;
- uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
verified_link_cap = dc_link_get_link_cap(stream->link);
link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
@@ -5649,8 +5834,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (bw_range.max_kbps < link_bw_in_kbps) {
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
0,
&stream->timing,
&dsc_cfg)) {
@@ -5664,8 +5848,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bw_in_kbps,
&stream->timing,
&dsc_cfg)) {
@@ -5680,12 +5863,16 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
struct drm_connector *drm_connector = &aconnector->base;
- uint32_t link_bandwidth_kbps;
+ u32 link_bandwidth_kbps;
struct dc *dc = sink->ctx->dc;
- uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
- uint32_t dsc_max_supported_bw_in_kbps;
- uint32_t max_dsc_target_bpp_limit_override =
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -5704,8 +5891,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5722,8 +5908,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dsc_max_supported_bw_in_kbps > 0)
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5747,7 +5932,6 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
@@ -5770,9 +5954,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
-#endif
struct dc_sink *sink = NULL;
@@ -5831,7 +6013,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -5862,12 +6045,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream, &mode, &aconnector->base, con_state, old_stream,
requested_bpc);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (aconnector->timing_changed) {
+ DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
+ __func__,
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
-#endif
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -6042,10 +6231,8 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- const struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_display_manager *dm = &adev->dm;
- int i;
/*
* Call only if mst_mgr was initialized before since it's not done
@@ -6054,15 +6241,10 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- for (i = 0; i < dm->num_of_edps; i++) {
- if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
- backlight_device_unregister(dm->backlight_dev[i]);
- dm->backlight_dev[i] = NULL;
- }
+ if (aconnector->bl_idx != -1) {
+ backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
}
-#endif
if (aconnector->dc_em_sink)
dc_sink_release(aconnector->dc_em_sink);
@@ -6143,6 +6325,8 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
+ amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
+
if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
@@ -6256,7 +6440,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_plane_state->plane_size.surface_size.width = stream->src.width;
dc_plane_state->plane_size.chroma_size.height = stream->src.height;
dc_plane_state->plane_size.chroma_size.width = stream->src.width;
- dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->rotation = ROTATION_ANGLE_0;
@@ -6554,11 +6737,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
- if (!aconnector->port || !aconnector->dc_sink)
+ if (!aconnector->mst_output_port || !aconnector->dc_sink)
return 0;
- mst_port = aconnector->port;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
return 0;
@@ -6568,7 +6751,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
return PTR_ERR(mst_state);
if (!mst_state->pbn_div)
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -6597,7 +6780,6 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
@@ -6614,7 +6796,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
aconnector = to_amdgpu_dm_connector(connector);
- if (!aconnector->port)
+ if (!aconnector->mst_output_port)
continue;
if (!new_con_state || !new_con_state->crtc)
@@ -6654,7 +6836,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
dm_conn_state->pbn, false);
if (ret < 0)
return ret;
@@ -6662,7 +6844,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
continue;
}
- vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
if (vcpi < 0)
return vcpi;
@@ -6671,7 +6853,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
return 0;
}
-#endif
static int to_drm_connector_type(enum signal_type st)
{
@@ -6905,21 +7086,21 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
const struct drm_display_mode *m;
struct drm_display_mode *new_mode;
uint i;
- uint32_t new_modes_count = 0;
+ u32 new_modes_count = 0;
/* Standard FPS values
*
* 23.976 - TV/NTSC
- * 24 - Cinema
- * 25 - TV/PAL
+ * 24 - Cinema
+ * 25 - TV/PAL
* 29.97 - TV/NTSC
- * 30 - TV/NTSC
- * 48 - Cinema HFR
- * 50 - TV/PAL
- * 60 - Commonly used
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
* 48,72,96,120 - Multiples of 24
*/
- static const uint32_t common_rates[] = {
+ static const u32 common_rates[] = {
23976, 24000, 25000, 29970, 30000,
48000, 50000, 60000, 72000, 96000, 120000
};
@@ -6935,8 +7116,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
return 0;
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
- uint64_t target_vtotal, target_vtotal_diff;
- uint64_t num, den;
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
continue;
@@ -6982,7 +7163,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -6996,12 +7177,18 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
@@ -7028,6 +7215,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.funcs->reset(&aconnector->base);
aconnector->connector_id = link_index;
+ aconnector->bl_idx = -1;
aconnector->dc_link = link;
aconnector->base.interlace_allowed = false;
aconnector->base.doublescan_allowed = false;
@@ -7035,6 +7223,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
/*
@@ -7076,11 +7267,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc for non-edp. */
- aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+ aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -7094,13 +7284,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
drm_connector_attach_vrr_capable_property(&aconnector->base);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
-#endif
}
}
@@ -7178,7 +7366,7 @@ create_i2c(struct ddc_service *ddc_service,
*/
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *aencoder)
{
int res = 0;
@@ -7362,28 +7550,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
- const struct drm_connector_state *old_state,
- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- /* Handle: Type0/1 change */
- if (old_state->hdcp_content_type != state->hdcp_content_type &&
- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
return true;
}
- /* CP is being re enabled, ignore this
- *
- * Handles: ENABLED -> DESIRED
- */
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
return false;
}
@@ -7391,9 +7606,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: UNDESIRED -> ENABLED
*/
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Stream removed and re-enabled
*
@@ -7403,10 +7618,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (!(old_state->crtc && old_state->crtc->enabled) &&
- state->crtc && state->crtc->enabled &&
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
return true;
}
@@ -7418,35 +7635,41 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
return true;
}
- /*
- * Handles: UNDESIRED -> UNDESIRED
- * DESIRED -> DESIRED
- * ENABLED -> ENABLED
- */
- if (old_state->content_protection == state->content_protection)
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
return false;
+ }
- /*
- * Handles: UNDESIRED -> DESIRED
- * DESIRED -> UNDESIRED
- * ENABLED -> UNDESIRED
- */
- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
return true;
+ }
- /*
- * Handles: DESIRED -> ENABLED
- */
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -7488,6 +7711,8 @@ static void update_freesync_state_on_stream(
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
if (!new_stream)
return;
@@ -7501,7 +7726,7 @@ static void update_freesync_state_on_stream(
return;
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- vrr_params = acrtc->dm_irq_params.vrr_params;
+ vrr_params = acrtc->dm_irq_params.vrr_params;
if (surface) {
mod_freesync_handle_preflip(
@@ -7512,7 +7737,7 @@ static void update_freesync_state_on_stream(
&vrr_params);
if (adev->family < AMDGPU_FAMILY_AI &&
- amdgpu_dm_vrr_active(new_crtc_state)) {
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
mod_freesync_handle_v_update(dm->freesync_module,
new_stream, &vrr_params);
@@ -7523,11 +7748,27 @@ static void update_freesync_state_on_stream(
}
}
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
mod_freesync_build_vrr_infopacket(
dm->freesync_module,
new_stream,
&vrr_params,
- PACKET_TYPE_VRR,
+ packet_type,
TRANSFER_FUNC_UNKNOWN,
&vrr_infopacket,
pack_sdp_v1_3);
@@ -7541,6 +7782,7 @@ static void update_freesync_state_on_stream(
new_crtc_state->vrr_infopacket = vrr_infopacket;
new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
if (new_crtc_state->freesync_vrr_info_changed)
DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
@@ -7613,8 +7855,8 @@ static void update_stream_irq_parameters(
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
- bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
- bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
if (!old_vrr_active && new_vrr_active) {
/* Transition VRR inactive -> active:
@@ -7625,7 +7867,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7633,7 +7875,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7652,7 +7894,14 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
*/
for_each_old_plane_in_state(state, plane, old_plane_state, i)
if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
+}
+
+static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
+{
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+
+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -7662,8 +7911,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
- uint64_t timestamp_ns;
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7674,10 +7923,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
unsigned long flags;
- uint32_t target_vblank, last_flip_vblank;
- bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ u32 target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
+ bool dirty_rects_changed = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -7728,6 +7978,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
@@ -7736,7 +7988,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
- fill_dc_scaling_info(dm->adev, new_plane_state,
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
&bundle->scaling_infos[planes_count]);
bundle->surface_updates[planes_count].scaling_info =
@@ -7765,18 +8017,42 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
- &bundle->flip_addrs[planes_count]);
+ &bundle->flip_addrs[planes_count],
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
/*
* Only allow immediate flips for fast updates that don't
- * change FB pitch, DCC state, rotation or mirroing.
+ * change memory domain, FB pitch, DCC state, rotation or
+ * mirroring.
*/
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
- acrtc_state->update_type == UPDATE_TYPE_FAST;
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@@ -7934,12 +8210,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
- dc_commit_updates_for_stream(dm->dc,
- bundle->surface_updates,
- planes_count,
- acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
/**
* Enable or disable the interrupts on the backend.
@@ -7987,7 +8263,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
- !acrtc_state->stream->link->psr_settings.psr_allow_active)
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
@@ -8039,7 +8318,7 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- notify:
+notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8112,7 +8391,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- uint32_t i, j;
+ u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
unsigned long flags;
@@ -8199,7 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* aconnector as needed
*/
- if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
@@ -8254,7 +8533,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -8280,16 +8559,72 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
new_crtc_state = NULL;
+ old_crtc_state = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8301,13 +8636,45 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
}
- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
hdcp_update_display(
adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type,
- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
}
-#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -8384,12 +8751,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
- status->plane_count,
- dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
@@ -8403,9 +8769,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk;
-#endif
#endif
/* Count number of newly disabled CRTCs for dropping PM refs later. */
if (old_crtc_state->active && !new_crtc_state->active)
@@ -8418,9 +8781,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_irq_parameters(dm, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- crc_rd_wrk = dm->crc_rd_wrk;
-#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8449,10 +8809,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crc_rd_wrk->crtc = crtc;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -8743,7 +9105,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
}
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
- uint64_t num, den, res;
+ u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -8846,7 +9208,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -8881,7 +9244,14 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
- if (dm_new_crtc_state->stream &&
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
@@ -8893,7 +9263,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@@ -8939,7 +9309,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (modereset_required(new_crtc_state))
goto skip_modeset;
- if (modeset_required(new_crtc_state, new_stream,
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
dm_old_crtc_state->stream)) {
WARN_ON(dm_new_crtc_state->stream);
@@ -8970,7 +9340,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
skip_modeset:
/* Release extra reference */
if (new_stream)
- dc_stream_release(new_stream);
+ dc_stream_release(new_stream);
/*
* We want to do dc stream updates that do not require a
@@ -9191,7 +9561,8 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *new_plane_state,
bool enable,
- bool *lock_and_validation_needed)
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
{
struct dm_atomic_state *dm_state = NULL;
@@ -9265,8 +9636,9 @@ static int dm_update_plane_state(struct dc *dc,
return -EINVAL;
}
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
- dc_plane_state_release(dm_old_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
*lock_and_validation_needed = true;
@@ -9289,7 +9661,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
@@ -9299,6 +9671,14 @@ static int dm_update_plane_state(struct dc *dc,
if (!dc_new_plane_state)
return -ENOMEM;
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+ else
+ *is_top_most_overlay = false;
+ }
+
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
@@ -9427,7 +9807,6 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -9442,7 +9821,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!aconnector->port || !aconnector->mst_port)
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
aconnector = NULL;
else
break;
@@ -9451,9 +9830,8 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (!aconnector)
return 0;
- return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
}
-#endif
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -9495,10 +9873,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
struct dsc_mst_fairness_vars vars[MAX_PIPES];
-#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -9524,12 +9903,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- if (dm_old_con_state->abm_level !=
- dm_new_con_state->abm_level)
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
new_crtc_state->connectors_changed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -9541,7 +9919,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
-#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -9619,7 +9996,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
* atomic state, so call drm helper to normalize zpos.
*/
- drm_atomic_normalize_zpos(dev, state);
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9627,7 +10008,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_plane_state,
new_plane_state,
false,
- &lock_and_validation_needed);
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
if (ret) {
DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
@@ -9666,20 +10048,19 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_plane_state,
new_plane_state,
true,
- &lock_and_validation_needed);
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
if (ret) {
DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
-#endif
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
@@ -9745,6 +10126,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
@@ -9771,10 +10172,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
goto fail;
}
@@ -9783,7 +10184,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
}
-#endif
/*
* Perform validation of MST topology in the state:
@@ -9879,7 +10279,7 @@ fail:
static bool is_dp_capable_without_timing_msa(struct dc *dc,
struct amdgpu_dm_connector *amdgpu_dm_connector)
{
- uint8_t dpcd_data;
+ u8 dpcd_data;
bool capable = false;
if (amdgpu_dm_connector->dc_link &&
@@ -9898,7 +10298,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
- uint8_t *data,
+ u8 *data,
unsigned int length,
struct amdgpu_hdmi_vsdb_info *vsdb)
{
@@ -9953,7 +10353,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -9994,7 +10394,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -10010,21 +10410,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+ mutex_lock(&adev->dm.dc_lock);
if (adev->dm.dmub_srv)
- return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
else
- return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
}
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
- uint8_t *edid_ext = NULL;
+ u8 *edid_ext = NULL;
int i;
bool valid_vsdb_found = false;
@@ -10079,6 +10483,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@@ -10171,6 +10576,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
}
}
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;
@@ -10200,7 +10625,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
- uint32_t value, const char *func_name)
+ u32 value, const char *func_name)
{
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
@@ -10215,7 +10640,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
const char *func_name)
{
- uint32_t value;
+ u32 value;
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
DC_ERR("invalid register read; address = 0\n");
@@ -10251,7 +10676,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
goto out;
- }
+ }
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
DRM_ERROR("wait_for_completion_timeout timeout!");
@@ -10294,6 +10719,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
ret = p_notify->aux_reply.length;
*operation_result = p_notify->result;
out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
mutex_unlock(&adev->dm.dpia_aux_lock);
return ret;
}
@@ -10321,6 +10747,8 @@ int amdgpu_dm_process_dmub_set_config_sync(
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
mutex_unlock(&adev->dm.dpia_aux_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index df3c25e32c65..2e2413fd73a4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -31,6 +31,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
+#include "link_service_types.h"
/*
* This file contains the definition for amdgpu_display_manager
@@ -58,6 +59,7 @@
#include "irq_types.h"
#include "signal_types.h"
#include "amdgpu_dm_crc.h"
+#include "mod_info_packet.h"
struct aux_payload;
struct set_config_cmd_payload;
enum aux_return_code_type;
@@ -459,9 +461,7 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
struct mod_freesync *freesync_module;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct hdcp_workqueue *hdcp_workqueue;
-#endif
/**
* @vblank_control_workqueue:
@@ -494,11 +494,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @crc_rd_wrk:
+ * @secure_display_ctxs:
*
- * Work to be executed in a separate thread to communicate with PSP.
+ * Store the ROI information and the work_struct to command dmub and psp for
+ * all crtcs.
*/
- struct crc_rd_work *crc_rd_wrk;
+ struct secure_display_context *secure_display_ctxs;
#endif
/**
* @hpd_rx_offload_wq:
@@ -575,10 +576,41 @@ enum mst_progress_status {
MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
};
+/**
+ * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
+ *
+ * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
+ * struct is useful to keep track of the display-specific information about
+ * FreeSync.
+ */
+struct amdgpu_hdmi_vsdb_info {
+ /**
+ * @amd_vsdb_version: Vendor Specific Data Block Version, should be
+ * used to determine which Vendor Specific InfoFrame (VSIF) to send.
+ */
+ unsigned int amd_vsdb_version;
+
+ /**
+ * @freesync_supported: FreeSync Supported.
+ */
+ bool freesync_supported;
+
+ /**
+ * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
+ */
+ unsigned int min_refresh_rate_hz;
+
+ /**
+ * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
+ */
+ unsigned int max_refresh_rate_hz;
+};
+
struct amdgpu_dm_connector {
struct drm_connector base;
uint32_t connector_id;
+ int bl_idx;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
@@ -603,8 +635,8 @@ struct amdgpu_dm_connector {
/* DM only */
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_dm_connector *mst_port;
+ struct drm_dp_mst_port *mst_output_port;
+ struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -643,6 +675,15 @@ struct amdgpu_dm_connector {
/* Record progress status of mst*/
uint8_t mst_status;
+
+ /* Automated testing */
+ bool timing_changed;
+ struct dc_crtc_timing *timing_requested;
+
+ /* Adaptive Sync */
+ bool pack_sdp_v1_3;
+ enum adaptive_sync_type as_type;
+ struct amdgpu_hdmi_vsdb_info vsdb_info;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -705,45 +746,12 @@ struct dm_connector_state {
uint8_t underscan_hborder;
bool underscan_enable;
bool freesync_capable;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
bool update_hdcp;
-#endif
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
};
-/**
- * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
- *
- * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
- * struct is useful to keep track of the display-specific information about
- * FreeSync.
- */
-struct amdgpu_hdmi_vsdb_info {
- /**
- * @amd_vsdb_version: Vendor Specific Data Block Version, should be
- * used to determine which Vendor Specific InfoFrame (VSIF) to send.
- */
- unsigned int amd_vsdb_version;
-
- /**
- * @freesync_supported: FreeSync Supported.
- */
- bool freesync_supported;
-
- /**
- * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
- */
- unsigned int min_refresh_rate_hz;
-
- /**
- * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
- */
- unsigned int max_refresh_rate_hz;
-};
-
-
#define to_dm_connector_state(x)\
container_of((x), struct dm_connector_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 66df2394d7e4..27711743c22c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -101,35 +101,44 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct crc_rd_work *crc_rd_wrk;
- struct amdgpu_device *adev;
+ struct secure_display_context *secure_display_ctx;
struct psp_context *psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
- uint8_t phy_id;
+ struct dc_stream_state *stream;
+ uint8_t phy_inst;
int ret;
- crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crtc = crc_rd_wrk->crtc;
+ secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
+ crtc = secure_display_ctx->crtc;
if (!crtc) {
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
return;
}
- adev = drm_to_adev(crtc->dev);
- psp = &adev->psp;
- phy_id = crc_rd_wrk->phy_inst;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ psp = &drm_to_adev(crtc->dev)->psp;
+
+ if (!psp->securedisplay_context.context.initialized) {
+ DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
+ return;
+ }
+
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ phy_inst = stream->link->link_enc_hw_inst;
+ /* need lock for multiple crtcs to use the command buffer */
mutex_lock(&psp->securedisplay_context.mutex);
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id =
- phy_id;
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ /* PSP TA is expected to finish data transmission over I2C within current frame,
+ * even there are up to 4 crtcs request to send in this frame.
+ */
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
@@ -142,17 +151,23 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct crc_fw_work *crc_fw_wrk;
+ struct secure_display_context *secure_display_ctx;
struct amdgpu_display_manager *dm;
+ struct drm_crtc *crtc;
+ struct dc_stream_state *stream;
- crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
- dm = crc_fw_wrk->dm;
+ secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
+ crtc = secure_display_ctx->crtc;
+
+ if (!crtc)
+ return;
+
+ dm = &drm_to_adev(crtc->dev)->dm;
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
+ dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
mutex_unlock(&dm->dc_lock);
-
- kfree(crc_fw_wrk);
}
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
@@ -189,6 +204,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ int i;
+#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
@@ -200,21 +218,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
- /* Enable CRTC CRC generation if necessary. */
+ /* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Disable secure_display if it was enabled */
if (!enable) {
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
-
- if (adev->dm.crc_rd_wrk->crtc == crtc) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
/* stop ROI update on this crtc */
- dc_stream_forward_crc_window(stream_state->ctx->dc,
- NULL, stream_state, true);
- adev->dm.crc_rd_wrk->crtc = NULL;
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ dc_stream_forward_crc_window(stream_state, NULL, true);
}
- spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
}
}
#endif
@@ -329,7 +344,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux;
+ aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
if (!aux) {
DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
@@ -347,6 +362,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc);
#endif
@@ -456,14 +472,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
{
- struct dc_stream_state *stream_state;
struct drm_device *drm_dev = NULL;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct crc_rd_work *crc_rd_wrk;
- struct crc_fw_work *crc_fw_wrk;
- unsigned long flags1, flags2;
+ struct secure_display_context *secure_display_ctx = NULL;
+ unsigned long flags1;
if (crtc == NULL)
return;
@@ -473,75 +487,76 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
drm_dev = crtc->dev;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
- stream_state = acrtc->dm_irq_params.stream;
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
- if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
+ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
+ !dm_is_crc_source_crtc(cur_crc_src))
goto cleanup;
- if (!dm_is_crc_source_crtc(cur_crc_src))
+ if (!acrtc->dm_irq_params.window_param.activated)
goto cleanup;
- if (!acrtc->dm_irq_params.window_param.activated)
+ if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
goto cleanup;
+ }
- if (acrtc->dm_irq_params.window_param.update_win) {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
+ secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
+ if (WARN_ON(secure_display_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_context,
+ * don't expect it to be changed here.
+ */
+ secure_display_ctx->crtc = crtc;
+ }
+ if (acrtc->dm_irq_params.window_param.update_win) {
/* prepare work for dmub to update ROI */
- crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
- if (!crc_fw_wrk)
- goto cleanup;
-
- INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
- crc_fw_wrk->dm = &adev->dm;
- crc_fw_wrk->stream = stream_state;
- crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start;
- crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end -
+ secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
+ secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
+ secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end -
+ secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&crc_fw_wrk->forward_roi_work);
+ schedule_work(&secure_display_ctx->forward_roi_work);
acrtc->dm_irq_params.window_param.update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
} else {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
-
- if (adev->dm.crc_rd_wrk) {
- crc_rd_wrk = adev->dm.crc_rd_wrk;
- spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
- crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
- spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
- schedule_work(&crc_rd_wrk->notify_ta_work);
- }
+ /* prepare work for psp to read ROI/CRC and send to I2C */
+ schedule_work(&secure_display_ctx->notify_ta_work);
}
cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
}
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void)
+struct secure_display_context *
+amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct crc_rd_work *crc_rd_wrk = NULL;
+ struct secure_display_context *secure_display_ctxs = NULL;
+ int i;
- crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL);
+ secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_context),
+ GFP_KERNEL);
- if (!crc_rd_wrk)
+ if (!secure_display_ctxs)
return NULL;
- spin_lock_init(&crc_rd_wrk->crc_rd_work_lock);
- INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ }
- return crc_rd_wrk;
+ return secure_display_ctxs;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 71bce608d751..935adca6f048 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -45,7 +45,7 @@ struct crc_window_param {
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
- /* CRC windwo is activated or not*/
+ /* CRC window is activated or not*/
bool activated;
/* Update crc window during vertical blank or not */
bool update_win;
@@ -53,22 +53,17 @@ struct crc_window_param {
int skip_frame_cnt;
};
-/* read_work for driver to call PSP to read */
-struct crc_rd_work {
+struct secure_display_context {
+ /* work to notify PSP TA*/
struct work_struct notify_ta_work;
- /* To protect crc_rd_work carried fields*/
- spinlock_t crc_rd_work_lock;
- struct drm_crtc *crtc;
- uint8_t phy_inst;
-};
-/* forward_work for driver to forward ROI to dmu */
-struct crc_fw_work {
+ /* work to forward ROI to dmcu/dmub */
struct work_struct forward_roi_work;
- struct amdgpu_display_manager *dm;
- struct dc_stream_state *stream;
+
+ struct drm_crtc *crtc;
+
+ /* Region of Interest (ROI) */
struct rect rect;
- bool is_stop_cmd;
};
#endif
@@ -100,11 +95,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void);
+struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
+ struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
-#define amdgpu_dm_crtc_secure_display_create_work()
+#define amdgpu_dm_crtc_secure_display_create_contexts()
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 22125daf9dcf..e3762e806617 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -34,7 +34,7 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
struct drm_crtc *crtc = &acrtc->base;
struct drm_device *dev = crtc->dev;
@@ -54,14 +54,14 @@ void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
{
return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
}
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc)
{
return acrtc->dm_irq_params.freesync_config.state ==
@@ -70,13 +70,16 @@ bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
VRR_STATE_ACTIVE_FIXED;
}
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int rc;
+ if (acrtc->otg_inst == -1)
+ return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -86,7 +89,7 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -105,8 +108,7 @@ static void vblank_control_worker(struct work_struct *work)
else if (dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
- dc_allow_idle_optimizations(
- dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
+ dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
@@ -152,23 +154,38 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct vblank_control_work *work;
int rc = 0;
+ if (acrtc->otg_inst == -1)
+ goto skip;
+
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_vrr_active(acrtc_state))
- rc = dm_set_vupdate_irq(crtc, true);
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
- rc = dm_set_vupdate_irq(crtc, false);
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
}
if (rc)
return rc;
- irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ if (amdgpu_in_reset(adev)) {
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ rc = -EBUSY;
+ } else {
+ rc = (enable)
+ ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
+ : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
+ }
- if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- return -EBUSY;
+ if (rc)
+ return rc;
+skip:
if (amdgpu_in_reset(adev))
return 0;
@@ -193,12 +210,12 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0;
}
-int dm_enable_vblank(struct drm_crtc *crtc)
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
{
return dm_set_vblank(crtc, true);
}
-void dm_disable_vblank(struct drm_crtc *crtc)
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
{
dm_set_vblank(crtc, false);
}
@@ -294,8 +311,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = dm_enable_vblank,
- .disable_vblank = dm_disable_vblank,
+ .enable_vblank = amdgpu_dm_crtc_enable_vblank,
+ .disable_vblank = amdgpu_dm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
@@ -375,7 +392,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
dm_update_crtc_active_planes(crtc, crtc_state);
if (WARN_ON(unlikely(!dm_crtc_state->stream &&
- modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
+ amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 1ac8692354cf..17e948753f59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -27,21 +27,21 @@
#ifndef __AMDGPU_DM_CRTC_H__
#define __AMDGPU_DM_CRTC_H__
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream);
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc);
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
-int dm_enable_vblank(struct drm_crtc *crtc);
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
-void dm_disable_vblank(struct drm_crtc *crtc);
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc);
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 461037a3dd75..827fcb4fb3b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -34,9 +34,9 @@
#include "dmub/dmub_srv.h"
#include "resource.h"
#include "dsc.h"
-#include "dc_link_dp.h"
#include "link_hwss.h"
#include "dc/dc_dmub_srv.h"
+#include "link/protocols/link_dp_capability.h"
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
#include "amdgpu_dm_psr.h"
@@ -419,67 +419,38 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
return result;
}
-static int dp_lttpr_status_show(struct seq_file *m, void *d)
+static int dp_lttpr_status_show(struct seq_file *m, void *unused)
{
- char *data;
- struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
- struct dc_link *link = connector->dc_link;
- uint32_t read_size = 1;
- uint8_t repeater_count = 0;
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps;
- data = kzalloc(read_size, GFP_KERNEL);
- if (!data)
- return 0;
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
- dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
+ seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n",
+ dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt),
+ caps.phy_repeater_cnt);
- switch ((uint8_t)*data) {
- case 0x80:
- repeater_count = 1;
- break;
- case 0x40:
- repeater_count = 2;
- break;
- case 0x20:
- repeater_count = 3;
- break;
- case 0x10:
- repeater_count = 4;
- break;
- case 0x8:
- repeater_count = 5;
- break;
- case 0x4:
- repeater_count = 6;
- break;
- case 0x2:
- repeater_count = 7;
+ seq_puts(m, "phy repeater mode: ");
+
+ switch (caps.mode) {
+ case DP_PHY_REPEATER_MODE_TRANSPARENT:
+ seq_puts(m, "transparent");
break;
- case 0x1:
- repeater_count = 8;
+ case DP_PHY_REPEATER_MODE_NON_TRANSPARENT:
+ seq_puts(m, "non-transparent");
break;
- case 0x0:
- repeater_count = 0;
+ case 0x00:
+ seq_puts(m, "non lttpr");
break;
default:
- repeater_count = (uint8_t)*data;
+ seq_printf(m, "read error (raw: 0x%x)", caps.mode);
break;
}
- seq_printf(m, "phy repeater count: %d\n", repeater_count);
-
- dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
-
- if ((uint8_t)*data == 0x55)
- seq_printf(m, "phy repeater mode: transparent\n");
- else if ((uint8_t)*data == 0xAA)
- seq_printf(m, "phy repeater mode: non-transparent\n");
- else if ((uint8_t)*data == 0x00)
- seq_printf(m, "phy repeater mode: non lttpr\n");
- else
- seq_printf(m, "phy repeater mode: read error\n");
-
- kfree(data);
+ seq_puts(m, "\n");
return 0;
}
@@ -753,7 +724,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
- dc_link_set_test_pattern(
+ dc_link_dp_set_test_pattern(
link,
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_RGB,
@@ -976,7 +947,6 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return 0;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/*
* Returns the HDCP capability of the Display (1.4 for now).
*
@@ -1013,7 +983,6 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
return 0;
}
-#endif
/*
* Returns whether the connected display is internal and not hotpluggable.
@@ -1192,7 +1161,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
break;
}
dpcd_caps = aconnector->dc_link->dpcd_caps;
- if (aconnector->port) {
+ if (aconnector->mst_output_port) {
/* aconnector sets dsc_aux during get_modes call
* if MST connector has it means it can either
* enable DSC on the sink device or on MST branch
@@ -1279,14 +1248,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
mutex_lock(&aconnector->hpd_lock);
/* Don't support for mst end device*/
- if (aconnector->mst_port) {
+ if (aconnector->mst_root) {
mutex_unlock(&aconnector->hpd_lock);
return -EINVAL;
}
if (param[0] == 1) {
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) &&
new_connection_type != dc_connection_none)
goto unlock;
@@ -1323,7 +1292,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
/* If the aconnector is the root node in mst topology */
if (aconnector->mst_mgr.mst_state == true)
- reset_cur_dp_mst_topology(link);
+ dc_link_reset_cur_dp_mst_topology(link);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
@@ -1375,16 +1344,11 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1481,12 +1445,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1566,16 +1530,11 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1670,12 +1629,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Safely get CRTC state
@@ -1755,16 +1714,11 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1859,12 +1813,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1940,16 +1894,11 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2041,12 +1990,12 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -2120,16 +2069,11 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2181,16 +2125,11 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2257,16 +2196,11 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2333,16 +2267,11 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2578,13 +2507,13 @@ static int dp_is_mst_connector_show(struct seq_file *m, void *unused)
if (aconnector->mst_mgr.mst_state) {
role = "root";
- } else if (aconnector->mst_port &&
- aconnector->mst_port->mst_mgr.mst_state) {
+ } else if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_state) {
role = "end";
- mgr = &aconnector->mst_port->mst_mgr;
- port = aconnector->port;
+ mgr = &aconnector->mst_root->mst_mgr;
+ port = aconnector->mst_output_port;
drm_modeset_lock(&mgr->base.lock, NULL);
if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
@@ -2662,9 +2591,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
-#endif
DEFINE_SHOW_ATTRIBUTE(internal_display);
DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
@@ -2795,9 +2722,7 @@ static const struct {
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"lttpr_status", &dp_lttpr_status_fops},
{"test_pattern", &dp_phy_test_pattern_fops},
-#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
-#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
@@ -2818,14 +2743,13 @@ static const struct {
{"is_dpia_link", &is_dpia_link_fops}
};
-#ifdef CONFIG_DRM_AMD_DC_HDCP
static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
{"hdcp_sink_capability", &hdcp_sink_capability_fops}
};
-#endif
+
/*
* Force YUV420 output if available from the given mode
*/
@@ -2870,6 +2794,22 @@ static int psr_get(void *data, u64 *val)
}
/*
+ * Read PSR state residency
+ */
+static int psr_read_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency;
+
+ link->dc->link_srv->edp_get_psr_residency(link, &residency);
+
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Set dmcub trace event IRQ enable or disable.
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
* Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
@@ -2904,6 +2844,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
+ "%llu\n");
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -3067,6 +3009,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+ debugfs_create_file_unsafe("psr_residency", 0444, dir,
+ connector, &psr_residency_fops);
debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
&current_backlight_fops);
debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
@@ -3084,7 +3028,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
debugfs_create_file(hdmi_debugfs_entries[i].name,
@@ -3092,7 +3035,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
hdmi_debugfs_entries[i].fops);
}
}
-#endif
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
@@ -3245,46 +3187,24 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
*/
static int crc_win_update_set(void *data, u64 val)
{
- struct drm_crtc *new_crtc = data;
- struct drm_crtc *old_crtc = NULL;
- struct amdgpu_crtc *new_acrtc, *old_acrtc;
- struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
- struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
-
- if (!crc_rd_wrk)
- return 0;
+ struct drm_crtc *crtc = data;
+ struct amdgpu_crtc *acrtc;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
if (val) {
- new_acrtc = to_amdgpu_crtc(new_crtc);
+ acrtc = to_amdgpu_crtc(crtc);
mutex_lock(&adev->dm.dc_lock);
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- if (crc_rd_wrk->crtc) {
- old_crtc = crc_rd_wrk->crtc;
- old_acrtc = to_amdgpu_crtc(old_crtc);
- }
- if (old_crtc && old_crtc != new_crtc) {
- old_acrtc->dm_irq_params.window_param.activated = false;
- old_acrtc->dm_irq_params.window_param.update_win = false;
- old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param.activated = true;
+ acrtc->dm_irq_params.window_param.update_win = true;
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- } else {
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- }
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
}
@@ -3453,12 +3373,12 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (!aconnector->dc_link)
continue;
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
continue;
link = aconnector->dc_link;
- dp_receiver_power_ctrl(link, false);
- drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
+ dc_link_dp_receiver_power_ctrl(link, false);
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false);
link->mst_stream_alloc_table.stream_count = 0;
memset(link->mst_stream_alloc_table.stream_allocations, 0,
sizeof(link->mst_stream_alloc_table.stream_allocations));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index a7fd98f57f94..5536d17306d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct mod_hdcp_display_query query;
+ unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector = aconnector;
+ hdcp_w->aconnector[conn_index] = aconnector;
query.display = NULL;
mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
@@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
} else {
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
cancel_delayed_work(&hdcp_w->property_validate_dwork);
}
@@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct drm_connector_state *conn_state = aconnector->base.state;
+ unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector = aconnector;
+ hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ unsigned int conn_index;
mutex_lock(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
cancel_delayed_work(&hdcp_w->property_validate_dwork);
- hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
+ hdcp_w->encryption_status[conn_index] =
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
process_output(hdcp_w);
@@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work)
}
+
static void event_property_update(struct work_struct *work)
{
-
struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
- struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
- struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_device *dev;
long ret;
+ unsigned int conn_index;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
+ aconnector = hdcp_work->aconnector[conn_index];
+ if (!aconnector)
+ continue;
- if (aconnector->base.state && aconnector->base.state->commit) {
- ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+ connector = &aconnector->base;
- if (ret == 0) {
- DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
- hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- }
- }
+ /* check if display connected */
+ if (connector->status != connector_status_connected)
+ continue;
- if (aconnector->base.state) {
- if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
- if (aconnector->base.state->hdcp_content_type ==
+ conn_state = aconnector->base.state;
+
+ if (!conn_state)
+ continue;
+
+ dev = connector->dev;
+
+ if (!dev)
+ continue;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+ if (conn_state->commit) {
+ ret = wait_for_completion_interruptible_timeout(
+ &conn_state->commit->hw_done, 10 * HZ);
+ if (ret == 0) {
+ DRM_ERROR(
+ "HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status[conn_index] =
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+ if (hdcp_work->encryption_status[conn_index] !=
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
+ if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE0 &&
- hdcp_work->encryption_status <=
- MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
- drm_hdcp_update_content_protection(&aconnector->base,
+ hdcp_work->encryption_status[conn_index] <=
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
+ drm_hdcp_update_content_protection(
+ connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED);
- else if (aconnector->base.state->hdcp_content_type ==
+ } else if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE1 &&
- hdcp_work->encryption_status ==
- MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
- drm_hdcp_update_content_protection(&aconnector->base,
+ hdcp_work->encryption_status[conn_index] ==
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
+ drm_hdcp_update_content_protection(
+ connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ }
} else {
- drm_hdcp_update_content_protection(&aconnector->base,
- DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
+ drm_hdcp_update_content_protection(
+ connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
}
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-
- mutex_unlock(&hdcp_work->mutex);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
static void event_property_validate(struct work_struct *work)
@@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work)
struct hdcp_workqueue *hdcp_work =
container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
struct mod_hdcp_display_query query;
- struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
-
- if (!aconnector)
- return;
+ struct amdgpu_dm_connector *aconnector;
+ unsigned int conn_index;
mutex_lock(&hdcp_work->mutex);
- query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
+ conn_index++) {
+ aconnector = hdcp_work->aconnector[conn_index];
+
+ if (!aconnector)
+ continue;
+
+ /* check if display connected */
+ if (aconnector->base.status != connector_status_connected)
+ continue;
- if (query.encryption_status != hdcp_work->encryption_status) {
- hdcp_work->encryption_status = query.encryption_status;
- schedule_work(&hdcp_work->property_update_work);
+ if (!aconnector->base.state)
+ continue;
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index,
+ &query);
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
+ aconnector->base.index,
+ aconnector->base.state->content_protection,
+ query.encryption_status,
+ hdcp_work->encryption_status[conn_index]);
+
+ if (query.encryption_status !=
+ hdcp_work->encryption_status[conn_index]) {
+ DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
+ hdcp_work->encryption_status[conn_index], query.encryption_status);
+
+ hdcp_work->encryption_status[conn_index] =
+ query.encryption_status;
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n");
+
+ schedule_work(&hdcp_work->property_update_work);
+ }
}
mutex_unlock(&hdcp_work->mutex);
@@ -493,9 +559,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
+ link->dp.dp2_enabled = config->dp2_enabled;
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- link->adjust.auth_delay = 0;
+ link->adjust.auth_delay = 2;
link->adjust.hdcp1.disable = 0;
conn_state = aconnector->base.state;
@@ -686,6 +753,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+
+ memset(hdcp_work[i].aconnector, 0,
+ sizeof(struct amdgpu_dm_connector *) *
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ memset(hdcp_work[i].encryption_status, 0,
+ sizeof(enum mod_hdcp_encryption_status) *
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
}
cp_psp->funcs.update_stream_config = update_config;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 09294ff122fe..69b445b011c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -43,7 +43,7 @@ struct hdcp_workqueue {
struct delayed_work callback_dwork;
struct delayed_work watchdog_timer_dwork;
struct delayed_work property_validate_dwork;
- struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX];
struct mutex mutex;
struct mod_hdcp hdcp;
@@ -51,7 +51,20 @@ struct hdcp_workqueue {
struct mod_hdcp_display display;
struct mod_hdcp_link link;
- enum mod_hdcp_encryption_status encryption_status;
+ enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX];
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+ /* un-desired, desired, enabled */
+ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX];
+ /* hdcp1.x, hdcp2.x */
+ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX];
+
uint8_t max_link;
uint8_t *srm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6994c9a1ed85..c6ce2b7123b7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -38,10 +38,15 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_dm_mst_types.h"
+#include "dpcd_defs.h"
+#include "dc/inc/core_types.h"
#include "dm_helpers.h"
#include "ddc_service_types.h"
+/* MST Dock */
+static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
+
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@@ -120,23 +125,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
}
static void
-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
- struct amdgpu_dm_connector *aconnector,
+fill_dc_mst_payload_table_from_drm(struct dc_link *link,
+ bool enable,
+ struct drm_dp_mst_atomic_payload *target_payload,
struct dc_dp_mst_stream_allocation_table *table)
{
struct dc_dp_mst_stream_allocation_table new_table = { 0 };
struct dc_dp_mst_stream_allocation *sa;
- struct drm_dp_mst_atomic_payload *payload;
+ struct link_mst_stream_allocation_table copy_of_link_table =
+ link->mst_stream_alloc_table;
- /* Fill payload info*/
- list_for_each_entry(payload, &mst_state->payloads, next) {
- if (payload->delete)
- continue;
+ int i;
+ int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
+ struct link_mst_stream_allocation *dc_alloc;
- sa = &new_table.stream_allocations[new_table.stream_count];
- sa->slot_count = payload->time_slots;
- sa->vcp_id = payload->vcpi;
- new_table.stream_count++;
+ /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
+ if (enable) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
+ dc_alloc->vcp_id = target_payload->vcpi;
+ dc_alloc->slot_count = target_payload->time_slots;
+ } else {
+ for (i = 0; i < copy_of_link_table.stream_count; i++) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == target_payload->vcpi) {
+ dc_alloc->vcp_id = 0;
+ dc_alloc->slot_count = 0;
+ break;
+ }
+ }
+ ASSERT(i != copy_of_link_table.stream_count);
+ }
+
+ /* Fill payload info*/
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[i];
+ if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
+ sa = &new_table.stream_allocations[new_table.stream_count];
+ sa->slot_count = dc_alloc->slot_count;
+ sa->vcp_id = dc_alloc->vcp_id;
+ new_table.stream_count++;
+ }
}
/* Overwrite the old table */
@@ -148,6 +180,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
+static void dm_helpers_construct_old_payload(
+ struct dc_link *link,
+ int pbn_per_slot,
+ struct drm_dp_mst_atomic_payload *new_payload,
+ struct drm_dp_mst_atomic_payload *old_payload)
+{
+ struct link_mst_stream_allocation_table current_link_table =
+ link->mst_stream_alloc_table;
+ struct link_mst_stream_allocation *dc_alloc;
+ int i;
+
+ *old_payload = *new_payload;
+
+ /* Set correct time_slots/PBN of old payload.
+ * other fields (delete & dsc_enabled) in
+ * struct drm_dp_mst_atomic_payload are don't care fields
+ * while calling drm_dp_remove_payload()
+ */
+ for (i = 0; i < current_link_table.stream_count; i++) {
+ dc_alloc =
+ &current_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == new_payload->vcpi) {
+ old_payload->time_slots = dc_alloc->slot_count;
+ old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+ break;
+ }
+ }
+
+ /* make sure there is an old payload*/
+ ASSERT(i != current_link_table.stream_count);
+
+}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -159,7 +225,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -168,24 +234,33 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
* that blocks before commit guaranteeing that the state
* is not gonna be swapped while still in use in commit tail */
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return false;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
- if (enable)
- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
- else
- drm_dp_remove_payload(mst_mgr, mst_state, payload);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+ if (enable) {
+ target_payload = new_payload;
+
+ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+ } else {
+ /* construct old payload by VCPI*/
+ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+ new_payload, &old_payload);
+ target_payload = &old_payload;
+
+ drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+ }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}
@@ -220,10 +295,10 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return ACT_FAILED;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
if (!mst_mgr->mst_state)
return ACT_FAILED;
@@ -247,22 +322,27 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct drm_dp_mst_atomic_payload *payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
+ int ret = 0;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return false;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
if (!enable) {
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
}
- if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
+ if (enable)
+ ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload);
+
+ if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
set_flag, false);
} else {
@@ -369,6 +449,7 @@ bool dm_helpers_dp_mst_start_top_mgr(
bool boot)
{
struct amdgpu_dm_connector *aconnector = link->priv;
+ int ret;
if (!aconnector) {
DRM_ERROR("Failed to find connector for link!");
@@ -384,7 +465,16 @@ bool dm_helpers_dp_mst_start_top_mgr(
DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
- return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
+ return false;
+ }
+
+ DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0],
+ aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+
+ return true;
}
bool dm_helpers_dp_mst_stop_top_mgr(
@@ -424,8 +514,8 @@ bool dm_helpers_dp_read_dpcd(
return false;
}
- return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
- data, size) > 0;
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
+ size) == size;
}
bool dm_helpers_dp_write_dpcd(
@@ -481,7 +571,6 @@ bool dm_helpers_submit_i2c(
return result;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -598,7 +687,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
return;
data[0] |= (1 << 1); // set bit 1 to 1
- return;
if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
return;
@@ -649,7 +737,6 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
return ret;
}
-#endif
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
@@ -675,15 +762,13 @@ bool dm_helpers_dp_write_dsc_enable(
if (!aconnector->dsc_aux)
return false;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
// apply w/a to synaptics
if (needs_dsc_aux_workaround(aconnector->dc_link) &&
(aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
aconnector->dsc_aux, stream, enable_dsc);
-#endif
- port = aconnector->port;
+ port = aconnector->mst_output_port;
if (enable) {
if (port->passthrough_aux) {
@@ -719,17 +804,13 @@ bool dm_helpers_dp_write_dsc_enable(
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
-#endif
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
-#if defined(CONFIG_DRM_AMD_DC_DCN)
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
}
-#endif
}
return ret;
@@ -960,6 +1041,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
sizeof(new_downspread));
}
+bool dm_helpers_dp_handle_test_pattern_request(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ union link_test_pattern dpcd_test_pattern,
+ union test_misc dpcd_test_params)
+{
+ enum dp_test_pattern test_pattern;
+ enum dp_test_pattern_color_space test_pattern_color_space =
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+ enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+ enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
+ !pipes[i].prev_odm_pipe) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ if (pipe_ctx == NULL)
+ return false;
+
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case LINK_TEST_PATTERN_COLOR_RAMP:
+ test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+ break;
+ case LINK_TEST_PATTERN_VERTICAL_BARS:
+ test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+ break; /* black and white */
+ case LINK_TEST_PATTERN_COLOR_SQUARES:
+ test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+ TEST_DYN_RANGE_VESA ?
+ DP_TEST_PATTERN_COLOR_SQUARES :
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
+ switch (dpcd_test_params.bits.BPC) {
+ case 0: // 6 bits
+ requestColorDepth = COLOR_DEPTH_666;
+ break;
+ case 1: // 8 bits
+ requestColorDepth = COLOR_DEPTH_888;
+ break;
+ case 2: // 10 bits
+ requestColorDepth = COLOR_DEPTH_101010;
+ break;
+ case 3: // 12 bits
+ requestColorDepth = COLOR_DEPTH_121212;
+ break;
+ default:
+ break;
+ }
+
+ switch (dpcd_test_params.bits.CLR_FORMAT) {
+ case 0:
+ requestPixelEncoding = PIXEL_ENCODING_RGB;
+ break;
+ case 1:
+ requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case 2:
+ requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ default:
+ requestPixelEncoding = PIXEL_ENCODING_RGB;
+ break;
+ }
+
+ if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
+ && pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
+ || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
+ && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
+ DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",
+ __func__,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->timing.pixel_encoding,
+ requestColorDepth,
+ requestPixelEncoding);
+ pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+ pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
+
+ dc_link_update_dsc_config(pipe_ctx);
+
+ aconnector->timing_changed = true;
+ /* store current timing */
+ if (aconnector->timing_requested)
+ *aconnector->timing_requested = pipe_ctx->stream->timing;
+ else
+ DC_LOG_ERROR("%s: timing storage failed\n", __func__);
+
+ }
+
+ dc_link_dp_set_test_pattern(
+ (struct dc_link *) link,
+ test_pattern,
+ test_pattern_color_space,
+ NULL,
+ NULL,
+ 0);
+
+ return false;
+}
+
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
{
// TODO
@@ -977,3 +1180,38 @@ void dm_helpers_dp_mst_update_branch_bandwidth(
// TODO
}
+static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
+{
+ bool ret_val = false;
+
+ switch (branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_0060AD:
+ case DP_BRANCH_DEVICE_ID_00E04C:
+ case DP_BRANCH_DEVICE_ID_90CC24:
+ ret_val = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret_val;
+}
+
+enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
+{
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ switch (dpcd_caps->dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true &&
+ dpcd_caps->allow_invalid_MSA_timing_param == true &&
+ dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id))
+ as_type = FREESYNC_TYPE_PCON_IN_WHITELIST;
+ break;
+ default:
+ break;
+ }
+
+ return as_type;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 1edf7385f8d8..810ab682f424 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -31,16 +31,14 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_mst_types.h"
+#include "amdgpu_dm_hdcp.h"
#include "dc.h"
#include "dm_helpers.h"
-#include "dc_link_ddc.h"
-#include "dc_link_dp.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
-#include "i2caux_interface.h"
#include "dmub_cmd.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
@@ -49,7 +47,7 @@
#include "dc/dcn20/dcn20_resource.h"
bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
-
+#define PEAK_FACTOR_X1000 1006
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
@@ -132,7 +130,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(aconnector->edid);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(aconnector->port);
+ drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
kfree(aconnector);
}
@@ -144,7 +142,7 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
int r;
r = drm_dp_mst_connector_late_register(connector,
- amdgpu_dm_connector->port);
+ amdgpu_dm_connector->mst_output_port);
if (r < 0)
return r;
@@ -160,8 +158,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = aconnector->port;
- struct amdgpu_dm_connector *root = aconnector->mst_port;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct amdgpu_dm_connector *root = aconnector->mst_root;
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -176,6 +174,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
if (dc_link->sink_count)
dc_link_remove_remote_sink(dc_link, dc_sink);
+ DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
+ dc_sink, dc_link->sink_count);
+
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -197,7 +198,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool needs_dsc_aux_workaround(struct dc_link *link)
{
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
@@ -208,10 +208,25 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
+static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
+{
+ u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
+
+ if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
+ DRM_INFO("Synaptics Cascaded MST hub\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
- struct drm_dp_mst_port *port = aconnector->port;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
u8 dsc_caps[16] = { 0 };
u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
u8 *dsc_branch_dec_caps = NULL;
@@ -229,7 +244,11 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
*/
if (!aconnector->dsc_aux && !port->parent->port_parent &&
needs_dsc_aux_workaround(aconnector->dc_link))
- aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+ aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
+
+ /* synaptics cascaded MST hub case */
+ if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
return false;
@@ -267,7 +286,6 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
-#endif
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
@@ -279,7 +297,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
- edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
if (!edid) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
@@ -307,6 +325,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
+ DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
+ dc_sink, aconnector->dc_link->sink_count);
+
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
}
@@ -340,15 +361,41 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
+ DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
+ dc_sink, aconnector->dc_link->sink_count);
+
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+ if (aconnector->dc_sink && connector->state) {
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (adev->dm.hdcp_workqueue) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ connector->state->hdcp_content_type =
+ hdcp_w->hdcp_content_type[connector->index];
+ connector->state->content_protection =
+ hdcp_w->content_protection[connector->index];
+ }
+ }
+
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
memset(&aconnector->dc_sink->dsc_caps,
0, sizeof(aconnector->dc_sink->dsc_caps));
@@ -356,7 +403,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!retrieve_downstream_port_device(aconnector))
memset(&aconnector->mst_downstream_port_present,
0, sizeof(aconnector->mst_downstream_port_present));
-#endif
}
}
@@ -386,15 +432,15 @@ dm_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
- struct drm_dp_mst_port *port = aconnector->port;
+ struct amdgpu_dm_connector *master = aconnector->mst_root;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
int connection_status;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
- aconnector->port);
+ aconnector->mst_output_port);
if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {
uint8_t dpcd_rev;
@@ -435,6 +481,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
if (aconnector->dc_link->sink_count)
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
+ aconnector->dc_link, aconnector->dc_link->sink_count);
+
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -451,8 +500,8 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr;
- struct drm_dp_mst_port *mst_port = aconnector->port;
+ struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr;
+ struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;
return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
}
@@ -468,7 +517,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
- kfree(encoder);
}
static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
@@ -515,8 +563,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return NULL;
connector = &aconnector->base;
- aconnector->port = port;
- aconnector->mst_port = master;
+ aconnector->mst_output_port = port;
+ aconnector->mst_root = master;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
@@ -613,8 +661,6 @@ int dm_mst_get_pbn_divider(struct dc_link *link)
dc_link_get_link_cap(link)) / (8 * 1000 * 54);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
struct dsc_mst_fairness_params {
struct dc_crtc_timing *timing;
struct dc_sink *sink;
@@ -628,12 +674,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+ u8 link_coding_cap;
+ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+ if (link_coding_cap == DP_128b_132b_ENCODING)
+ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+ return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps = div_u64(peak_kbps, 1000);
+ peak_kbps *= fec_overhead_multiplier_x1000;
+ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -644,16 +703,19 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
{
struct drm_connector *drm_connector;
int i;
+ struct dc_dsc_config_options dsc_options = {0};
for (i = 0; i < count; i++) {
drm_connector = &params[i].aconnector->base;
+ dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
+
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
- params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- drm_connector->display_info.max_dsc_bpp,
+ &dsc_options,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -696,15 +758,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
u64 kbps;
struct drm_connector *drm_connector = &param.aconnector->base;
- uint32_t max_dsc_target_bpp_limit_override =
- drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
- param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;
@@ -727,11 +790,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -827,6 +891,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -856,7 +921,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -869,7 +934,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -898,17 +963,13 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
-#endif
-
/* Set up params */
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -922,7 +983,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!aconnector)
continue;
- if (!aconnector->port)
+ if (!aconnector->mst_output_port)
continue;
stream->timing.flags.DSC = 0;
@@ -930,7 +991,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
params[count].aconnector = aconnector;
- params[count].port = aconnector->port;
+ params[count].port = aconnector->mst_output_port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
debugfs_overwrite = true;
@@ -964,7 +1025,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -983,7 +1044,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -991,7 +1052,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1125,6 +1186,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct resource_pool *res_pool;
int link_vars_start_index = 0;
int ret = 0;
@@ -1133,13 +1195,14 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
for (i = 0; i < dc_state->stream_count; i++) {
stream = dc_state->streams[i];
+ res_pool = stream->ctx->dc->res_pool;
if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
continue;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink || !aconnector->port)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1148,13 +1211,14 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
- if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (!res_pool->funcs->remove_stream_from_ctx ||
+ res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mst_mgr = aconnector->port->mgr;
+ mst_mgr = aconnector->mst_output_port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
@@ -1200,7 +1264,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink || !aconnector->port)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1212,7 +1276,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mst_mgr = aconnector->port->mgr;
+ mst_mgr = aconnector->mst_output_port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
@@ -1346,6 +1410,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
goto clean_exit;
}
@@ -1406,14 +1471,12 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1427,8 +1490,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
* with DSC enabled.
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
- aconnector->port->passthrough_aux) {
- mst_mgr = aconnector->port->mgr;
+ aconnector->mst_output_port->passthrough_aux) {
+ mst_mgr = aconnector->mst_output_port->mgr;
mutex_lock(&mst_mgr->lock);
cur_link_settings = stream->link->verified_link_cap;
@@ -1436,7 +1499,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
&cur_link_settings
);
- down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn);
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
/* pick the bottleneck */
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
@@ -1455,16 +1518,13 @@ enum dc_status dm_dp_mst_is_port_support_mode(
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
-#endif
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
- if (pbn > aconnector->port->full_pbn)
+ if (pbn > aconnector->mst_output_port->full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
}
-#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 97fd70df531b..1e4ede1e57ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+
+/**
+ * Panamera MST Hub detection
+ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
+ * Check from beginning of branch device vendor specific field (050Ch)
+ */
+#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
+#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
+#define SYNAPTICS_CASCADED_HUB_ID 0x5A
+#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3c50b3ff7954..322668973747 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -67,7 +67,16 @@ static const uint32_t overlay_formats[] = {
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
+};
+
+static const uint32_t video_formats[] = {
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
};
static const u32 cursor_formats[] = {
@@ -81,12 +90,12 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
{
return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
}
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value)
{
@@ -732,25 +741,7 @@ static int get_plane_formats(const struct drm_plane *plane,
return num_formats;
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane)
-{
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_property,
- 0);
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_size_property,
- MAX_COLOR_LUT_ENTRIES);
- drm_object_attach_property(&plane->base, dm->ctm_property,
- 0);
- drm_object_attach_property(&plane->base, dm->sdr_boost_property,
- DEFAULT_SDR_BOOST);
-
- return 0;
-}
-#endif
-
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -909,7 +900,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_new->dc_state;
bool force_disable_dcc = !plane_state->dcc.enable;
- fill_plane_buffer_attributes(
+ amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
@@ -990,7 +981,7 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
*min_downscale = 1000;
}
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
struct drm_framebuffer *fb = state->fb;
@@ -1044,7 +1035,7 @@ int dm_plane_helper_check_state(struct drm_plane_state *state,
state, new_crtc_state, min_scale, max_scale, true, true);
}
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
@@ -1152,11 +1143,11 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -1220,7 +1211,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
@@ -1305,7 +1296,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
plane->state->crtc_w = new_state->crtc_w;
plane->state->crtc_h = new_state->crtc_h;
- handle_cursor_update(plane, old_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
@@ -1328,10 +1319,6 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
if (amdgpu_state)
__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (amdgpu_state)
- amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST;
-#endif
}
static struct drm_plane_state *
@@ -1351,15 +1338,6 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
dc_plane_state_retain(dm_plane_state->dc_state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (dm_plane_state->degamma_lut)
- drm_property_blob_get(dm_plane_state->degamma_lut);
- if (dm_plane_state->ctm)
- drm_property_blob_get(dm_plane_state->ctm);
-
- dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost;
-#endif
-
return &dm_plane_state->base;
}
@@ -1427,103 +1405,12 @@ static void dm_drm_plane_destroy_state(struct drm_plane *plane,
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- drm_property_blob_put(dm_plane_state->degamma_lut);
- drm_property_blob_put(dm_plane_state->ctm);
-#endif
if (dm_plane_state->dc_state)
dc_plane_state_release(dm_plane_state->dc_state);
drm_atomic_helper_plane_destroy_state(plane, state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-/* copied from drm_atomic_uapi.c */
-static int atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL)
- return -EINVAL;
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
-int dm_drm_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- int ret = 0;
- bool replaced;
-
- if (property == adev->dm.degamma_lut_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->degamma_lut,
- val, -1, sizeof(struct drm_color_lut),
- &replaced);
- } else if (property == adev->dm.ctm_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->ctm,
- val,
- sizeof(struct drm_color_ctm), -1,
- &replaced);
- } else if (property == adev->dm.sdr_boost_property) {
- dm_plane_state->sdr_boost = val;
- } else {
- return -EINVAL;
- }
-
- return ret;
-}
-
-int dm_drm_plane_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
-
- if (property == adev->dm.degamma_lut_property) {
- *val = (dm_plane_state->degamma_lut) ?
- dm_plane_state->degamma_lut->base.id : 0;
- } else if (property == adev->dm.ctm_property) {
- *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0;
- } else if (property == adev->dm.sdr_boost_property) {
- *val = dm_plane_state->sdr_boost;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1532,10 +1419,6 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
.format_mod_supported = dm_plane_format_mod_supported,
-#ifdef CONFIG_DRM_AMD_DC_HDR
- .atomic_set_property = dm_drm_plane_set_property,
- .atomic_get_property = dm_drm_plane_get_property,
-#endif
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@@ -1606,9 +1489,6 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- attach_color_mgmt_properties(dm, plane);
-#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
@@ -1616,3 +1496,14 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
+bool is_video_format(uint32_t format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(video_formats); i++)
+ if (format == video_formats[i])
+ return true;
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 286981a2dd40..930f1572f898 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -29,17 +29,17 @@
#include "dc.h"
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -56,10 +56,11 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value);
+bool is_video_format(uint32_t format);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 26291db0a3cf..d647f68fd563 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index 1743ca0a3641..c42aa947c969 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -89,6 +89,7 @@ void dc_fpu_begin(const char *function_name, const int line)
if (*pcpu == 1) {
#if defined(CONFIG_X86)
+ migrate_disable();
kernel_fpu_begin();
#elif defined(CONFIG_PPC64)
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
@@ -129,6 +130,7 @@ void dc_fpu_end(const char *function_name, const int line)
if (*pcpu <= 0) {
#if defined(CONFIG_X86)
kernel_fpu_end();
+ migrate_enable();
#elif defined(CONFIG_PPC64)
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
disable_kernel_vsx();
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index b9effadfc4bb..69ffd4424dc7 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,14 +22,13 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
+DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual dsc
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
KCOV_INSTRUMENT := n
DC_LIBS += dcn20
-DC_LIBS += dsc
DC_LIBS += dcn10
DC_LIBS += dcn21
DC_LIBS += dcn201
@@ -56,17 +55,14 @@ ifdef CONFIG_DRM_AMD_DC_SI
DC_LIBS += dce60
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
DC_LIBS += hdcp
-endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
-dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
+DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index a1a00f432168..27af9d3c2b73 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -33,7 +33,6 @@
#include "include/gpio_service_interface.h"
#include "include/grph_object_ctrl_defs.h"
#include "include/bios_parser_interface.h"
-#include "include/i2caux_interface.h"
#include "include/logger_interface.h"
#include "command_table.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 074e70a5c458..cce47d3f1a13 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -32,7 +32,6 @@
#include "dc_bios_types.h"
#include "include/grph_object_ctrl_defs.h"
#include "include/bios_parser_interface.h"
-#include "include/i2caux_interface.h"
#include "include/logger_interface.h"
#include "command_table2.h"
@@ -516,11 +515,8 @@ static enum bp_result get_gpio_i2c_info(
info->i2c_slave_address = record->i2c_slave_addr;
/* TODO: check how to get register offset for en, Y, etc. */
- info->gpio_info.clk_a_register_index =
- le16_to_cpu(
- header->gpio_pin[table_index].data_a_reg_index);
- info->gpio_info.clk_a_shift =
- header->gpio_pin[table_index].gpio_bitshift;
+ info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index);
+ info->gpio_info.clk_a_shift = pin->gpio_bitshift;
return BP_RESULT_OK;
}
@@ -1698,14 +1694,15 @@ static enum bp_result bios_parser_enable_disp_power_gating(
static enum bp_result bios_parser_enable_lvtma_control(
struct dc_bios *dcb,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
if (!bp->cmd_tbl.enable_lvtma_control)
return BP_RESULT_FAILURE;
- return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance);
+ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
}
static bool bios_parser_is_accelerated_mode(
@@ -2064,7 +2061,7 @@ static enum bp_result bios_parser_get_encoder_cap_info(
if (!info)
return BP_RESULT_BADINPUT;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
/* encoder cap record not available in v1_5 */
if (bp->object_info_tbl.revision.minor == 5)
return BP_RESULT_NORECORD;
@@ -2929,7 +2926,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -3032,14 +3028,8 @@ static enum bp_result construct_integrated_info(
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
for (j = i; j > 0; --j) {
if (info->disp_clk_voltage[j].max_supported_clk <
- info->disp_clk_voltage[j-1].max_supported_clk
- ) {
- /* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
- }
+ info->disp_clk_voltage[j-1].max_supported_clk)
+ swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index f52f7ff7ead4..1ef9e4053bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
static void init_enable_lvtma_control(struct bios_parser *bp)
{
@@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
static void enable_lvtma_control_dmcub(
struct dc_dmub_srv *dmcub,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
union dmub_rb_cmd cmd;
@@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub(
uc_pwr_on;
cmd.lvtma_control.data.panel_inst =
panel_instance;
+ cmd.lvtma_control.data.bypass_panel_control_wait =
+ bypass_panel_control_wait;
dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
@@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub(
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
enum bp_result result = BP_RESULT_FAILURE;
@@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control(
bp->base.ctx->dc->debug.dmub_command_table) {
enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
uc_pwr_on,
- panel_instance);
+ panel_instance,
+ bypass_panel_control_wait);
return BP_RESULT_OK;
}
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index be060b4b87db..b6d09bf6cf72 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,8 @@ struct cmd_tbl {
struct bios_parser *bp, uint8_t id);
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
};
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 271d8e573181..ad390e4cd0a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -74,7 +74,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN10
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f276abb63bcd..6127d6045336 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -29,6 +29,7 @@
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
+#include "link.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
@@ -103,7 +104,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (dc->hwss.exit_optimized_pwr_state)
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
@@ -115,7 +116,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
+ dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -128,13 +129,13 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
- dc_link_set_psr_allow_active(edp_link,
+ dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -220,7 +221,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dce120_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case FAMILY_RV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
@@ -350,7 +351,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
default:
ASSERT(0); /* Unknown Asic */
break;
@@ -363,7 +364,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
switch (clk_mgr_base->ctx->asic_id.chip_family) {
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
@@ -404,7 +405,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
default:
break;
}
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
kfree(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index f0577dcd1af6..811720749faf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -162,7 +162,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
-struct clk_mgr_funcs dcn201_funcs = {
+static struct clk_mgr_funcs dcn201_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn201_update_clocks,
.init_clocks = dcn201_init_clocks,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ca6dfd2d7561..bd9fd0b54f46 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -706,7 +706,7 @@ void rn_clk_mgr_construct(
enum pp_smu_status status = 0;
int is_green_sardine = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3ce0ee0d012f..694a9d3d92ae 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -577,8 +577,7 @@ void dcn3_clk_mgr_construct(
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 24715ca2fa94..01383aac6b41 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
};
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
unsigned int voltage)
{
@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.num_entries = j + 1;
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
}
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 1c0569b1dc8f..f9e2e0c3095e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,6 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
+#include "link.h"
#include "logger_types.h"
#undef DC_LOGGER
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 20a06c04e4a1..5cb44f838bde 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dcn314_smu.h"
@@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index f47cfe6b42bd..0765334f0825 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
+ msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
+ DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
else
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 07edd9777edf..b737cbc468f5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,7 +46,7 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "dc_link_dp.h"
+#include "link.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
@@ -87,6 +87,16 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
+static bool should_disable_otg(struct pipe_ctx *pipe)
+{
+ bool ret = true;
+
+ if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled &&
+ pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc))
+ ret = false;
+ return ret;
+}
+
static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
@@ -98,12 +108,16 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- reset_sync_context_for_pipe(dc, context, i);
- } else
- pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ dc_is_virtual_signal(pipe->stream->signal))) {
+
+ /* This w/a should not trigger when we have a dig active */
+ if (should_disable_otg(pipe)) {
+ if (disable) {
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
}
}
}
@@ -508,6 +522,11 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[0];
bw_params->clk_table.entries[i].wck_ratio = 1;
i++;
+ } else if (clock_table->NumDcfClkLevelsEnabled != clock_table->NumSocClkLevelsEnabled) {
+ bw_params->clk_table.entries[i-1].voltage = clock_table->SocVoltage[clock_table->NumSocClkLevelsEnabled - 1];
+ bw_params->clk_table.entries[i-1].socclk_mhz = clock_table->SocClocks[clock_table->NumSocClkLevelsEnabled - 1];
+ bw_params->clk_table.entries[i-1].dispclk_mhz = clock_table->DispClocks[clock_table->NumDispClkLevelsEnabled - 1];
+ bw_params->clk_table.entries[i-1].dppclk_mhz = clock_table->DppClocks[clock_table->NumDispClkLevelsEnabled - 1];
}
bw_params->clk_table.num_entries = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 3edc81e2d417..93db4dbee713 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "dc_link_dp.h"
+#include "link.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 200fcec19186..8d9444db092a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
@@ -255,6 +255,167 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s
}
}
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
+}
+
+static void dcn32_update_clocks_update_dentist(
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ uint32_t new_disp_divider = 0;
+ uint32_t new_dispclk_wdivider = 0;
+ uint32_t old_dispclk_wdivider = 0;
+ uint32_t i;
+ uint32_t dentist_dispclk_wdivider_readback = 0;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+
+ if (clk_mgr->base.clks.dispclk_khz == 0)
+ return;
+
+ new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+
+ new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &old_dispclk_wdivider);
+
+ /* When changing divider to or from 127, some extra programming is required to prevent corruption */
+ if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t fifo_level;
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ true);
+ for (j = 0; j < N - 4; j++)
+ dccg->funcs->otg_drop_pixel(
+ dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ false);
+ }
+ } else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) {
+ /* request clock with 126 divider first */
+ uint32_t temp_disp_divider = dentist_get_divider_from_did(126);
+ uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
+
+ if (clk_mgr->smu_present)
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback != 126) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, 126);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ uint32_t fifo_level;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
+ for (j = 0; j < 12 - N; j++)
+ dccg->funcs->otg_add_pixel(dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
+ }
+ }
+
+ /* do requested DISPCLK updates*/
+ if (clk_mgr->smu_present)
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback > new_dispclk_wdivider) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, new_dispclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
+}
+
+static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dispclk_wdivider;
+ int disp_divider;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+ disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+ /* Return DISPCLK freq in Khz */
+ if (disp_divider)
+ return (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+ return 0;
+}
+
+
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -396,9 +557,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
-
update_dispclk = true;
}
@@ -417,19 +575,19 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn20_update_clocks_update_dentist(clk_mgr, context);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
- dcn20_update_clocks_update_dentist(clk_mgr, context);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.
*/
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
@@ -711,6 +869,7 @@ static struct clk_mgr_funcs dcn32_funcs = {
.are_clock_states_equal = dcn32_are_clock_states_equal,
.enable_pme_wa = dcn32_enable_pme_wa,
.is_smu_present = dcn32_is_smu_present,
+ .get_dispclk_from_dentist = dcn32_get_dispclk_from_dentist,
};
void dcn32_clk_mgr_construct(
@@ -719,6 +878,8 @@ void dcn32_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
+ struct clk_log_info log_info = {0};
+
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn32_funcs;
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
@@ -752,6 +913,7 @@ void dcn32_clk_mgr_construct(
clk_mgr->base.clks.ref_dtbclk_khz = 268750;
}
+
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
@@ -759,6 +921,8 @@ void dcn32_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
+ dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+
if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;
@@ -783,8 +947,7 @@ void dcn32_clk_mgr_construct(
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
index 57e09c7c95f5..186daada7b03 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
@@ -32,6 +32,9 @@ void dcn32_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower);
+
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0cb8d1f934d1..52564b93f7eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -33,6 +33,7 @@
#include "resource.h"
+#include "gpio_service_interface.h"
#include "clk_mgr.h"
#include "clock_source.h"
#include "dc_bios_types.h"
@@ -52,12 +53,10 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "dc_link.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dm_helpers.h"
#include "mem_input.h"
-#include "dc_link_dp.h"
#include "dc_dmub_srv.h"
#include "dsc.h"
@@ -68,14 +67,14 @@
#include "dmub/dmub_srv.h"
-#include "i2caux_interface.h"
-
#include "dce/dmub_psr.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
+#include "hw_sequencer_private.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -149,7 +148,7 @@ static void destroy_links(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
if (NULL != dc->links[i])
- link_destroy(&dc->links[i]);
+ dc->link_srv->destroy_link(&dc->links[i]);
}
}
@@ -218,7 +217,7 @@ static bool create_links(
link_init_params.connector_index = i;
link_init_params.link_index = dc->link_count;
link_init_params.dc = dc;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
@@ -240,7 +239,7 @@ static bool create_links(
link_init_params.dc = dc;
link_init_params.is_dpia_link = true;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
link->dc = dc;
@@ -382,16 +381,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- * dc_stream_adjust_vmin_vmax:
+ * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*
* Looks up the pipe context of dc_stream_state and updates the
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
* Rate, which is a power-saving feature that targets reducing panel
* refresh rate while the screen is static
*
- * @dc: dc reference
- * @stream: Initial dc stream state
- * @adjust: Updated parameters for vertical_total_min and vertical_total_max
+ * Return: %true if the pipe context is found and adjusted;
+ * %false if the pipe context is not found.
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -399,6 +400,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -419,14 +428,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
}
/**
- * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal
+ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
+ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
*
* @dc: [in] dc reference
* @stream: [in] Initial dc stream state
- * @adjust: [in] Updated parameters for vertical_total_min and
+ * @refresh_rate: [in] new refresh_rate
*
- * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used
- * by DRR (Dynamic Refresh Rate)
+ * Return: %true if the pipe context is found and there is an associated
+ * timing_generator for the DC;
+ * %false if the pipe context is not found or there is no
+ * timing_generator for the DC.
*/
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
@@ -518,14 +530,15 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
}
bool
-dc_stream_forward_crc_window(struct dc *dc,
- struct rect *rect, struct dc_stream_state *stream, bool is_stop)
+dc_stream_forward_crc_window(struct dc_stream_state *stream,
+ struct rect *rect, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
struct otg_phy_mux mux_mapping;
struct pipe_ctx *pipe;
int i;
+ struct dc *dc = stream->ctx->dc;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -566,7 +579,10 @@ dc_stream_forward_crc_window(struct dc *dc,
* once.
*
* By default, only CRC0 is configured, and the entire frame is used to
- * calculate the crc.
+ * calculate the CRC.
+ *
+ * Return: %false if the stream is not found or CRC capture is not supported;
+ * %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
@@ -635,7 +651,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
*
* Return:
- * false if stream is not found, or if CRCs are not enabled.
+ * %false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -807,6 +823,9 @@ static void dc_destruct(struct dc *dc)
dc_destroy_resource_pool(dc);
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
@@ -862,12 +881,17 @@ static bool dc_construct_ctx(struct dc *dc,
dc_ctx->perf_trace = dc_perf_trace_create();
if (!dc_ctx->perf_trace) {
+ kfree(dc_ctx);
ASSERT_CRITICAL(false);
return false;
}
dc->ctx = dc_ctx;
+ dc->link_srv = link_create_link_service();
+ if (!dc->link_srv)
+ return false;
+
return true;
}
@@ -976,7 +1000,7 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
if (dc->res_pool->funcs->update_bw_bounding_box) {
@@ -1049,6 +1073,53 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
}
}
+static void phantom_pipe_blank(
+ struct dc *dc,
+ struct timing_generator *tg,
+ int width,
+ int height)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ otg_active_width = width;
+ otg_active_height = height;
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (tg->funcs->is_tg_enabled(tg))
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -1107,8 +1178,14 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* again for different use.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (tg->funcs->enable_crtc)
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
+ }
}
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1191,7 +1268,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- core_link_disable_stream(pipe);
+ dc->link_srv->set_dpms_off(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1290,7 +1367,7 @@ static void detect_edp_presence(struct dc *dc)
int i;
int edp_num;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (!edp_num)
return;
@@ -1299,7 +1376,7 @@ static void detect_edp_presence(struct dc *dc)
if (dc->config.edp_not_connected) {
edp_link->edp_sink_present = false;
} else {
- dc_link_detect_sink(edp_link, &type);
+ dc_link_detect_connection_type(edp_link, &type);
edp_link->edp_sink_present = (type != dc_connection_none);
}
}
@@ -1316,16 +1393,12 @@ void dc_hardware_init(struct dc *dc)
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
dc->ctx->cp_psp = init_params->cp_psp;
-#endif
}
void dc_deinit_callbacks(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
-#endif
}
void dc_destroy(struct dc **dc)
@@ -1650,7 +1723,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (is_edp_ilr_optimization_required(link, crtc_timing)) {
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
}
@@ -1740,6 +1813,8 @@ void dc_z10_save_init(struct dc *dc)
*
* Applies given context to the hardware and copy it into current context.
* It's up to the user to release the src context afterwards.
+ *
+ * Return: an enum dc_status result code for the operation
*/
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{
@@ -1991,52 +2066,6 @@ context_alloc_fail:
return res;
}
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
- int i;
-
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs. */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- result = dc_commit_streams(dc, context->streams, context->stream_count);
- return result == DC_OK;
- }
-
- if (!streams_changed(dc, context->streams, context->stream_count))
- return DC_OK;
-
- DC_LOG_DC("%s: %d streams\n",
- __func__, context->stream_count);
-
- for (i = 0; i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
-
- dc_stream_log(dc, stream);
- }
-
- /*
- * Previous validation was perfomred with fast_validation = true and
- * the full DML state required for hardware programming was skipped.
- *
- * Re-validate here to calculate these parameters / watermarks.
- */
- result = dc_validate_global_state(dc, context, false);
- if (result != DC_OK) {
- DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
- dc_status_to_str(result), result);
- return result;
- }
-
- result = dc_commit_state_no_check(dc, context);
-
- return (result == DC_OK);
-}
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
@@ -2123,27 +2152,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- if (is_flip_pending_in_pipes(dc, context))
- return;
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == NULL ||
- context->res_ctx.pipe_ctx[i].plane_state == NULL) {
- context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
- }
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ }
- process_deferred_updates(dc);
+ process_deferred_updates(dc);
- dc->hwss.optimize_bandwidth(dc, context);
+ dc->hwss.optimize_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
- dc->hwss.update_dsc_pg(dc, context, true);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
dc->optimized_required = false;
dc->wm_optimized_required = false;
@@ -2948,6 +2983,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vsp_infopacket)
stream->vsp_infopacket = *update->vsp_infopacket;
+ if (update->adaptive_sync_infopacket)
+ stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3153,12 +3191,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
stream_update->vtem_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
}
if (stream_update->hdr_static_metadata &&
@@ -3194,14 +3235,16 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dsc_config)
- dp_update_dsc_config(pipe_ctx);
+ dc->link_srv->update_dsc_config(pipe_ctx);
if (stream_update->mst_bw_update) {
if (stream_update->mst_bw_update->is_increase)
- dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
- else
- dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
- }
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ }
if (stream_update->pending_test_pattern) {
dc_link_dp_set_test_pattern(stream->link,
@@ -3214,7 +3257,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- core_link_disable_stream(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3224,7 +3267,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- core_link_enable_stream(dc->current_state, pipe_ctx);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
}
@@ -3325,6 +3368,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
+ bool subvp_curr_use = false;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
@@ -3334,6 +3378,21 @@ static void commit_planes_for_stream(struct dc *dc,
dc_z10_restore(dc);
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* wait for all double-buffer activity to clear on all pipes */
+ int pipe_idx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+ }
+ }
+
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
@@ -3381,6 +3440,15 @@ static void commit_planes_for_stream(struct dc *dc,
break;
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ subvp_curr_use = true;
+ break;
+ }
+ }
+
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -3426,22 +3494,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
- }
- }
-
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -3470,14 +3522,9 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
-
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
return;
}
@@ -3652,54 +3699,30 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- /* For phantom pipe OTG enable, it has to be done after any previous pipe
- * that was in use has already been programmed at gotten its double buffer
- * update for "disable".
- */
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- /* If an active, non-phantom pipe is being transitioned into a phantom
- * pipe, wait for the double buffer update to complete first before we do
- * ANY phantom pipe programming.
- */
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- }
- }
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
+ if (subvp_curr_use) {
+ /* If enabling subvp or transitioning from subvp->subvp, enable the
+ * phantom streams before we program front end for the phantom pipes.
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.enable_phantom_streams)
+ dc->hwss.enable_phantom_streams(dc, context);
}
}
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
+
+ if (subvp_prev_use && !subvp_curr_use) {
+ /* If disabling subvp, disable phantom streams after front end
+ * programming has completed (we turn on phantom OTG in order
+ * to complete the plane disable for phantom pipes).
+ */
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ }
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
-
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
@@ -4063,24 +4086,30 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
dc_update_planes_and_stream(dc, srf_updates,
surface_count, stream,
stream_update);
return;
}
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
-
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4103,12 +4132,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
- } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ } else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
- *
- * Only relevant for DCN behavior where we can guarantee the optimization
- * is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}
@@ -4285,7 +4311,7 @@ void dc_resume(struct dc *dc)
uint32_t i;
for (i = 0; i < dc->link_count; i++)
- core_link_resume(dc->links[i]);
+ dc->link_srv->resume(dc->links[i]);
}
bool dc_is_dmcu_initialized(struct dc *dc)
@@ -4297,157 +4323,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address)
-{
- if (dc->res_pool->oem_device)
- return dce_i2c_oem_device_present(
- dc->res_pool,
- dc->res_pool->oem_device,
- slave_address);
-
- return false;
-}
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd)
-{
-
- struct dc_link *link = dc->links[link_index];
- struct ddc_service *ddc = link->ddc;
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-}
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd)
-{
- struct ddc_service *ddc = dc->res_pool->oem_device;
- if (ddc)
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-
- return false;
-}
-
-static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
-{
- if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- dc_sink_retain(sink);
-
- dc_link->remote_sinks[dc_link->sink_count] = sink;
- dc_link->sink_count++;
-
- return true;
-}
-
-/*
- * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
- *
- * EDID length is in bytes
- */
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data)
-{
- struct dc_sink *dc_sink;
- enum dc_edid_status edid_status;
-
- if (len > DC_MAX_EDID_BUFFER_SIZE) {
- dm_error("Max EDID buffer size breached!\n");
- return NULL;
- }
-
- if (!init_data) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- if (!init_data->link) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- dc_sink = dc_sink_create(init_data);
-
- if (!dc_sink)
- return NULL;
-
- memmove(dc_sink->dc_edid.raw_edid, edid, len);
- dc_sink->dc_edid.length = len;
-
- if (!link_add_remote_sink_helper(
- link,
- dc_sink))
- goto fail_add_sink;
-
- edid_status = dm_helpers_parse_edid_caps(
- link,
- &dc_sink->dc_edid,
- &dc_sink->edid_caps);
-
- /*
- * Treat device as no EDID device if EDID
- * parsing fails
- */
- if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
- dc_sink->dc_edid.length = 0;
- dm_error("Bad EDID, status%d!\n", edid_status);
- }
-
- return dc_sink;
-
-fail_add_sink:
- dc_sink_release(dc_sink);
- return NULL;
-}
-
-/*
- * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
- *
- * Note that this just removes the struct dc_sink - it doesn't
- * program hardware or alter other members of dc_link
- */
-void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
-{
- int i;
-
- if (!link->sink_count) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- for (i = 0; i < link->sink_count; i++) {
- if (link->remote_sinks[i] == sink) {
- dc_sink_release(sink);
- link->remote_sinks[i] = NULL;
-
- /* shrink array to remove empty place */
- while (i < link->sink_count - 1) {
- link->remote_sinks[i] = link->remote_sinks[i+1];
- i++;
- }
- link->remote_sinks[i] = NULL;
- link->sink_count--;
- return;
- }
- }
-}
-
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -4704,7 +4579,7 @@ bool dc_enable_dmub_notifications(struct dc *dc)
/**
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
*
- * dc: [in] dc structure
+ * @dc: [in] dc structure
*
* Enables DMUB unsolicited notifications to x86 via outbox.
*/
@@ -4905,8 +4780,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
/**
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
- * @dc [in]: dc structure
- * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable
+ * @dc: [in] dc structure
+ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
*
* Submits dpia hpd int enable command to dmub via inbox message
*/
@@ -4970,7 +4845,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
return;
}
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
/* Determine panel inst */
for (i = 0; i < edp_num; i++) {
@@ -4987,7 +4862,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
}
/**
- * dc_extended_blank_supported 0 Decide whether extended blank is supported
+ * dc_extended_blank_supported - Decide whether extended blank is supported
*
* @dc: [in] Current DC state
*
@@ -4996,7 +4871,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
* ability to enter z9/z10.
*
* Return:
- * Indicate whether extended blank is supported (true or false)
+ * Indicate whether extended blank is supported (%true or %false)
*/
bool dc_extended_blank_supported(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 471078fc3900..2acbf692193f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -73,28 +73,38 @@ struct out_csc_color_matrix_type {
static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_RGB_TYPE,
- { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0} },
{ COLOR_SPACE_RGB_LIMITED_TYPE,
- { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { 0x1B67, 0, 0, 0x201,
+ 0, 0x1B67, 0, 0x201,
+ 0, 0, 0x1B67, 0x201} },
{ COLOR_SPACE_YCBCR601_TYPE,
- { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
- 0xF6B7, 0xE04, 0x1004} },
+ { 0xE04, 0xF444, 0xFDB9, 0x1004,
+ 0x831, 0x1016, 0x320, 0x201,
+ 0xFB45, 0xF6B7, 0xE04, 0x1004} },
{ COLOR_SPACE_YCBCR709_TYPE,
- { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
- 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+ { 0xE04, 0xF345, 0xFEB7, 0x1004,
+ 0x5D3, 0x1399, 0x1FA, 0x201,
+ 0xFCCA, 0xF533, 0xE04, 0x1004} },
/* TODO: correct values below */
{ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
- { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
- 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { 0xE00, 0xF447, 0xFDB9, 0x1000,
+ 0x991, 0x12C9, 0x3A6, 0x200,
+ 0xFB47, 0xF6B9, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
- { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
- 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ { 0xE00, 0xF349, 0xFEB7, 0x1000,
+ 0x6CE, 0x16E3, 0x24F, 0x200,
+ 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
- { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
- 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+ { 0x1000, 0xF149, 0xFEB7, 0x1004,
+ 0x0868, 0x15B2, 0x01E6, 0x201,
+ 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
- { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
- 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
+ { 0x0000, 0x0000, 0x0000, 0x1000,
+ 0x0000, 0x0000, 0x0000, 0x0200,
+ 0x0000, 0x0000, 0x0000, 0x1000} },
};
static bool is_rgb_type(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
deleted file mode 100644
index 342e906ae26e..000000000000
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ /dev/null
@@ -1,4961 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include <linux/slab.h>
-
-#include "dm_services.h"
-#include "atomfirmware.h"
-#include "dm_helpers.h"
-#include "dc.h"
-#include "grph_object_id.h"
-#include "gpio_service_interface.h"
-#include "core_status.h"
-#include "dc_link_dp.h"
-#include "dc_link_dpia.h"
-#include "dc_link_ddc.h"
-#include "link_hwss.h"
-#include "opp.h"
-
-#include "link_encoder.h"
-#include "hw_sequencer.h"
-#include "resource.h"
-#include "abm.h"
-#include "fixed31_32.h"
-#include "dpcd_defs.h"
-#include "dmcu.h"
-#include "hw/clk_mgr.h"
-#include "dce/dmub_psr.h"
-#include "dmub/dmub_srv.h"
-#include "inc/hw/panel_cntl.h"
-#include "inc/link_enc_cfg.h"
-#include "inc/link_dpcd.h"
-#include "link/link_dp_trace.h"
-
-#include "dc/dcn30/dcn30_vpg.h"
-
-#define DC_LOGGER_INIT(logger)
-
-#define LINK_INFO(...) \
- DC_LOG_HW_HOTPLUG( \
- __VA_ARGS__)
-
-#define RETIMER_REDRIVER_INFO(...) \
- DC_LOG_RETIMER_REDRIVER( \
- __VA_ARGS__)
-
-/*******************************************************************************
- * Private functions
- ******************************************************************************/
-static void dc_link_destruct(struct dc_link *link)
-{
- int i;
-
- if (link->hpd_gpio) {
- dal_gpio_destroy_irq(&link->hpd_gpio);
- link->hpd_gpio = NULL;
- }
-
- if (link->ddc)
- dal_ddc_service_destroy(&link->ddc);
-
- if (link->panel_cntl)
- link->panel_cntl->funcs->destroy(&link->panel_cntl);
-
- if (link->link_enc) {
- /* Update link encoder resource tracking variables. These are used for
- * the dynamic assignment of link encoders to streams. Virtual links
- * are not assigned encoder resources on creation.
- */
- if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
- link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
- link->dc->res_pool->dig_link_enc_count--;
- }
- link->link_enc->funcs->destroy(&link->link_enc);
- }
-
- if (link->local_sink)
- dc_sink_release(link->local_sink);
-
- for (i = 0; i < link->sink_count; ++i)
- dc_sink_release(link->remote_sinks[i]);
-}
-
-struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service)
-{
- enum bp_result bp_result;
- struct graphics_object_hpd_info hpd_info;
- struct gpio_pin_info pin_info;
-
- if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
- return NULL;
-
- bp_result = dcb->funcs->get_gpio_pin_info(dcb,
- hpd_info.hpd_int_gpio_uid, &pin_info);
-
- if (bp_result != BP_RESULT_OK) {
- ASSERT(bp_result == BP_RESULT_NORECORD);
- return NULL;
- }
-
- return dal_gpio_service_create_irq(gpio_service,
- pin_info.offset,
- pin_info.mask);
-}
-
-/*
- * Function: program_hpd_filter
- *
- * @brief
- * Programs HPD filter on associated HPD line
- *
- * @param [in] delay_on_connect_in_ms: Connect filter timeout
- * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
- *
- * @return
- * true on success, false otherwise
- */
-static bool program_hpd_filter(const struct dc_link *link)
-{
- bool result = false;
- struct gpio *hpd;
- int delay_on_connect_in_ms = 0;
- int delay_on_disconnect_in_ms = 0;
-
- if (link->is_hpd_filter_disabled)
- return false;
- /* Verify feature is supported */
- switch (link->connector_signal) {
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- /* Program hpd filter */
- delay_on_connect_in_ms = 500;
- delay_on_disconnect_in_ms = 100;
- break;
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- /* Program hpd filter to allow DP signal to settle */
- /* 500: not able to detect MST <-> SST switch as HPD is low for
- * only 100ms on DELL U2413
- * 0: some passive dongle still show aux mode instead of i2c
- * 20-50: not enough to hide bouncing HPD with passive dongle.
- * also see intermittent i2c read issues.
- */
- delay_on_connect_in_ms = 80;
- delay_on_disconnect_in_ms = 0;
- break;
- case SIGNAL_TYPE_LVDS:
- case SIGNAL_TYPE_EDP:
- default:
- /* Don't program hpd filter */
- return false;
- }
-
- /* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (!hpd)
- return result;
-
- /* Setup HPD filtering */
- if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
- struct gpio_hpd_config config;
-
- config.delay_on_connect = delay_on_connect_in_ms;
- config.delay_on_disconnect = delay_on_disconnect_in_ms;
-
- dal_irq_setup_hpd_filter(hpd, &config);
-
- dal_gpio_close(hpd);
-
- result = true;
- } else {
- ASSERT_CRITICAL(false);
- }
-
- /* Release HPD handle */
- dal_gpio_destroy_irq(&hpd);
-
- return result;
-}
-
-bool dc_link_wait_for_t12(struct dc_link *link)
-{
- if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
- link->dc->hwss.edp_wait_for_T12(link);
-
- return true;
- }
-
- return false;
-}
-
-/**
- * dc_link_detect_sink() - Determine if there is a sink connected
- *
- * @link: pointer to the dc link
- * @type: Returned connection type
- * Does not detect downstream devices, such as MST sinks
- * or display connected through active dongles
- */
-bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
-{
- uint32_t is_hpd_high = 0;
- struct gpio *hpd_pin;
-
- if (link->connector_signal == SIGNAL_TYPE_LVDS) {
- *type = dc_connection_single;
- return true;
- }
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /*in case it is not on*/
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- /* Link may not have physical HPD pin. */
- if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
- *type = dc_connection_none;
- else
- *type = dc_connection_single;
-
- return true;
- }
-
- /* todo: may need to lock gpio access */
- hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
- if (!hpd_pin)
- goto hpd_gpio_failure;
-
- dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
- dal_gpio_get_value(hpd_pin, &is_hpd_high);
- dal_gpio_close(hpd_pin);
- dal_gpio_destroy_irq(&hpd_pin);
-
- if (is_hpd_high) {
- *type = dc_connection_single;
- /* TODO: need to do the actual detection */
- } else {
- *type = dc_connection_none;
- }
-
- return true;
-
-hpd_gpio_failure:
- return false;
-}
-
-static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
-{
- enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
-
- switch (sink_signal) {
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_LVDS:
- case SIGNAL_TYPE_RGB:
- transaction_type = DDC_TRANSACTION_TYPE_I2C;
- break;
-
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_EDP:
- transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- break;
-
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- /* MST does not use I2COverAux, but there is the
- * SPECIAL use case for "immediate dwnstrm device
- * access" (EPR#370830).
- */
- transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- break;
-
- default:
- break;
- }
-
- return transaction_type;
-}
-
-static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
- struct graphics_object_id downstream)
-{
- if (downstream.type == OBJECT_TYPE_CONNECTOR) {
- switch (downstream.id) {
- case CONNECTOR_ID_SINGLE_LINK_DVII:
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_DAC1:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_ID_INTERNAL_DAC2:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
- return SIGNAL_TYPE_RGB;
- default:
- return SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- break;
- case CONNECTOR_ID_DUAL_LINK_DVII:
- {
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_DAC1:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_ID_INTERNAL_DAC2:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
- return SIGNAL_TYPE_RGB;
- default:
- return SIGNAL_TYPE_DVI_DUAL_LINK;
- }
- }
- break;
- case CONNECTOR_ID_SINGLE_LINK_DVID:
- return SIGNAL_TYPE_DVI_SINGLE_LINK;
- case CONNECTOR_ID_DUAL_LINK_DVID:
- return SIGNAL_TYPE_DVI_DUAL_LINK;
- case CONNECTOR_ID_VGA:
- return SIGNAL_TYPE_RGB;
- case CONNECTOR_ID_HDMI_TYPE_A:
- return SIGNAL_TYPE_HDMI_TYPE_A;
- case CONNECTOR_ID_LVDS:
- return SIGNAL_TYPE_LVDS;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC:
- return SIGNAL_TYPE_DISPLAY_PORT;
- case CONNECTOR_ID_EDP:
- return SIGNAL_TYPE_EDP;
- default:
- return SIGNAL_TYPE_NONE;
- }
- } else if (downstream.type == OBJECT_TYPE_ENCODER) {
- switch (downstream.id) {
- case ENCODER_ID_EXTERNAL_NUTMEG:
- case ENCODER_ID_EXTERNAL_TRAVIS:
- return SIGNAL_TYPE_DISPLAY_PORT;
- default:
- return SIGNAL_TYPE_NONE;
- }
- }
-
- return SIGNAL_TYPE_NONE;
-}
-
-/*
- * dc_link_is_dp_sink_present() - Check if there is a native DP
- * or passive DP-HDMI dongle connected
- */
-bool dc_link_is_dp_sink_present(struct dc_link *link)
-{
- enum gpio_result gpio_result;
- uint32_t clock_pin = 0;
- uint8_t retry = 0;
- struct ddc *ddc;
-
- enum connector_id connector_id =
- dal_graphics_object_id_get_connector_id(link->link_id);
-
- bool present =
- ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP) ||
- (connector_id == CONNECTOR_ID_USBC));
-
- ddc = dal_ddc_service_get_ddc_pin(link->ddc);
-
- if (!ddc) {
- BREAK_TO_DEBUGGER();
- return present;
- }
-
- /* Open GPIO and set it to I2C mode */
- /* Note: this GpioMode_Input will be converted
- * to GpioConfigType_I2cAuxDualMode in GPIO component,
- * which indicates we need additional delay
- */
-
- if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
- GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
- dal_ddc_close(ddc);
-
- return present;
- }
-
- /*
- * Read GPIO: DP sink is present if both clock and data pins are zero
- *
- * [W/A] plug-unplug DP cable, sometimes customer board has
- * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
- * then monitor can't br light up. Add retry 3 times
- * But in real passive dongle, it need additional 3ms to detect
- */
- do {
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
- if (clock_pin)
- udelay(1000);
- else
- break;
- } while (retry++ < 3);
-
- present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
-
- dal_ddc_close(ddc);
-
- return present;
-}
-
-/*
- * @brief
- * Detect output sink type
- */
-static enum signal_type link_detect_sink(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- enum signal_type result;
- struct graphics_object_id enc_id;
-
- if (link->is_dig_mapping_flexible)
- enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN};
- else
- enc_id = link->link_enc->id;
- result = get_basic_signal_type(enc_id, link->link_id);
-
- /* Use basic signal type for link without physical connector. */
- if (link->ep_type != DISPLAY_ENDPOINT_PHY)
- return result;
-
- /* Internal digital encoder will detect only dongles
- * that require digital signal
- */
-
- /* Detection mechanism is different
- * for different native connectors.
- * LVDS connector supports only LVDS signal;
- * PCIE is a bus slot, the actual connector needs to be detected first;
- * eDP connector supports only eDP signal;
- * HDMI should check straps for audio
- */
-
- /* PCIE detects the actual connector on add-on board */
- if (link->link_id.id == CONNECTOR_ID_PCIE) {
- /* ZAZTODO implement PCIE add-on card detection */
- }
-
- switch (link->link_id.id) {
- case CONNECTOR_ID_HDMI_TYPE_A: {
- /* check audio support:
- * if native HDMI is not supported, switch to DVI
- */
- struct audio_support *aud_support =
- &link->dc->res_pool->audio_support;
-
- if (!aud_support->hdmi_audio_native)
- if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
- result = SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- break;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC: {
- /* DP HPD short pulse. Passive DP dongle will not
- * have short pulse
- */
- if (reason != DETECT_REASON_HPDRX) {
- /* Check whether DP signal detected: if not -
- * we assume signal is DVI; it could be corrected
- * to HDMI after dongle detection
- */
- if (!dm_helpers_is_dp_sink_present(link))
- result = SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- }
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
- struct audio_support *audio_support)
-{
- enum signal_type signal = SIGNAL_TYPE_NONE;
-
- switch (dongle_type) {
- case DISPLAY_DONGLE_DP_HDMI_DONGLE:
- if (audio_support->hdmi_audio_on_dongle)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case DISPLAY_DONGLE_DP_DVI_DONGLE:
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
- if (audio_support->hdmi_audio_native)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- default:
- signal = SIGNAL_TYPE_NONE;
- break;
- }
-
- return signal;
-}
-
-static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
- struct display_sink_capability *sink_cap,
- struct audio_support *audio_support)
-{
- dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
-
- return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
- audio_support);
-}
-
-static void link_disconnect_sink(struct dc_link *link)
-{
- if (link->local_sink) {
- dc_sink_release(link->local_sink);
- link->local_sink = NULL;
- }
-
- link->dpcd_sink_count = 0;
- //link->dpcd_caps.dpcd_rev.raw = 0;
-}
-
-static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
-{
- dc_sink_release(link->local_sink);
- link->local_sink = prev_sink;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
-{
- bool ret = false;
-
- switch (signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
- * we can poll for bksv but some displays have an issue with this. Since its so rare
- * for a display to not be 1.4 capable, this assumtion is ok
- */
- ret = true;
- break;
- default:
- break;
- }
- return ret;
-}
-
-bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
-{
- bool ret = false;
-
- switch (signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
- link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
- (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
-{
- struct hdcp_protection_message msg22;
- struct hdcp_protection_message msg14;
-
- memset(&msg22, 0, sizeof(struct hdcp_protection_message));
- memset(&msg14, 0, sizeof(struct hdcp_protection_message));
- memset(link->hdcp_caps.rx_caps.raw, 0,
- sizeof(link->hdcp_caps.rx_caps.raw));
-
- if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->ddc->transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
- link->connector_signal == SIGNAL_TYPE_EDP) {
- msg22.data = link->hdcp_caps.rx_caps.raw;
- msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
- msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
- } else {
- msg22.data = &link->hdcp_caps.rx_caps.fields.version;
- msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
- msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
- }
- msg22.version = HDCP_VERSION_22;
- msg22.link = HDCP_LINK_PRIMARY;
- msg22.max_retries = 5;
- dc_process_hdcp_msg(signal, link, &msg22);
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- msg14.data = &link->hdcp_caps.bcaps.raw;
- msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
- msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
- msg14.version = HDCP_VERSION_14;
- msg14.link = HDCP_LINK_PRIMARY;
- msg14.max_retries = 5;
-
- dc_process_hdcp_msg(signal, link, &msg14);
- }
-
-}
-#endif
-
-static void read_current_link_settings_on_detect(struct dc_link *link)
-{
- union lane_count_set lane_count_set = {0};
- uint8_t link_bw_set;
- uint8_t link_rate_set;
- uint32_t read_dpcd_retry_cnt = 10;
- enum dc_status status = DC_ERROR_UNEXPECTED;
- int i;
- union max_down_spread max_down_spread = {0};
-
- // Read DPCD 00101h to find out the number of lanes currently set
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
- /* First DPCD read after VDD ON can fail if the particular board
- * does not have HPD pin wired correctly. So if DPCD read fails,
- * which it should never happen, retry a few times. Target worst
- * case scenario of 80 ms.
- */
- if (status == DC_OK) {
- link->cur_link_settings.lane_count =
- lane_count_set.bits.LANE_COUNT_SET;
- break;
- }
-
- msleep(8);
- }
-
- // Read DPCD 00100h to find if standard link rates are set
- core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
-
- if (link_bw_set == 0) {
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the edp link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- // edp_supported_link_rates_count = 0 for DP
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
- }
- } else {
- // Link Rate not found. Seamless boot may not work.
- ASSERT(false);
- }
- } else {
- link->cur_link_settings.link_rate = link_bw_set;
- link->cur_link_settings.use_link_rate_set = false;
- }
- // Read DPCD 00003h to find the max down spread.
- core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
- &max_down_spread.raw, sizeof(max_down_spread));
- link->cur_link_settings.link_spread =
- max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-}
-
-static bool detect_dp(struct dc_link *link,
- struct display_sink_capability *sink_caps,
- enum dc_detect_reason reason)
-{
- struct audio_support *audio_support = &link->dc->res_pool->audio_support;
-
- sink_caps->signal = link_detect_sink(link, reason);
- sink_caps->transaction_type =
- get_ddc_transaction_type(sink_caps->signal);
-
- if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
- return false;
-
- if (is_dp_branch_device(link))
- /* DP SST branch */
- link->type = dc_connection_sst_branch;
- } else {
- /* DP passive dongles */
- sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
- link->dpcd_caps.dongle_type = sink_caps->dongle_type;
- link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
- link->dpcd_caps.dpcd_rev.raw = 0;
- }
-
- return true;
-}
-
-static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
-{
- if (old_edid->length != new_edid->length)
- return false;
-
- if (new_edid->length == 0)
- return false;
-
- return (memcmp(old_edid->raw_edid,
- new_edid->raw_edid, new_edid->length) == 0);
-}
-
-static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
-{
- /**
- * something is terribly wrong if time out is > 200ms. (5Hz)
- * 500 microseconds * 400 tries us 200 ms
- **/
- unsigned int sleep_time_in_microseconds = 500;
- unsigned int tries_allowed = 400;
- bool is_in_alt_mode;
- unsigned long long enter_timestamp;
- unsigned long long finish_timestamp;
- unsigned long long time_taken_in_ns;
- int tries_taken;
-
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (!link->link_enc->funcs->is_in_alt_mode)
- return true;
-
- is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
-
- if (is_in_alt_mode)
- return true;
-
- enter_timestamp = dm_get_timestamp(link->ctx);
-
- for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
- udelay(sleep_time_in_microseconds);
- /* ask the link if alt mode is enabled, if so return ok */
- if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns =
- dm_get_elapse_time_in_ns(link->ctx,
- finish_timestamp,
- enter_timestamp);
- DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
- return true;
- }
- }
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
- enter_timestamp);
- DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
- return false;
-}
-
-static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
-{
- /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
- * reports DSC support.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->type == dc_connection_mst_branch &&
- link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
- link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 &&
- link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
- !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
- link->wa_flags.dpia_mst_dsc_always_on = true;
-}
-
-static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
-{
- /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- link->wa_flags.dpia_mst_dsc_always_on = false;
-}
-
-static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
-{
- DC_LOGGER_INIT(link->ctx->logger);
-
- LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
-
- link->type = dc_connection_mst_branch;
- apply_dpia_mst_dsc_always_on_wa(link);
-
- dm_helpers_dp_update_branch_info(link->ctx, link);
- if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
- link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
- link_disconnect_sink(link);
- } else {
- link->type = dc_connection_sst_branch;
- }
-
- return link->type == dc_connection_mst_branch;
-}
-
-bool reset_cur_dp_mst_topology(struct dc_link *link)
-{
- DC_LOGGER_INIT(link->ctx->logger);
-
- LINK_INFO("link=%d, mst branch is now Disconnected\n",
- link->link_index);
-
- revert_dpia_mst_dsc_always_on_wa(link);
- return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-}
-
-static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
- enum dc_detect_reason reason)
-{
- int i;
- bool can_apply_seamless_boot = false;
-
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
- can_apply_seamless_boot = true;
- break;
- }
- }
-
- return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT;
-}
-
-static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
-{
- dc_z10_restore(dc);
- clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
-}
-
-static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc)
-{
- clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
-}
-
-static void set_all_streams_dpms_off_for_link(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
- struct dc_stream_update stream_update;
- bool dpms_off = true;
- struct link_resource link_res = {0};
-
- memset(&stream_update, 0, sizeof(stream_update));
- stream_update.dpms_off = &dpms_off;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- stream_update.stream = pipe_ctx->stream;
- dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipe_ctx->stream, &stream_update,
- link->ctx->dc->current_state);
- }
- }
-
- /* link can be also enabled by vbios. In this case it is not recorded
- * in pipe_ctx. Disable link phy here to make sure it is completely off
- */
- dp_disable_link_phy(link, &link_res, link->connector_signal);
-}
-
-static void verify_link_capability_destructive(struct dc_link *link,
- struct dc_sink *sink,
- enum dc_detect_reason reason)
-{
- bool should_prepare_phy_clocks =
- should_prepare_phy_clocks_for_link_verification(link->dc, reason);
-
- if (should_prepare_phy_clocks)
- prepare_phy_clocks_for_destructive_link_verification(link->dc);
-
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- struct dc_link_settings known_limit_link_setting =
- dp_get_max_link_cap(link);
- set_all_streams_dpms_off_for_link(link);
- dp_verify_link_cap_with_retries(
- link, &known_limit_link_setting,
- LINK_TRAINING_MAX_VERIFY_RETRY);
- } else {
- ASSERT(0);
- }
-
- if (should_prepare_phy_clocks)
- restore_phy_clocks_for_destructive_link_verification(link->dc);
-}
-
-static void verify_link_capability_non_destructive(struct dc_link *link)
-{
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- if (dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- /* TODO - should we check link encoder's max link caps here?
- * How do we know which link encoder to check from?
- */
- link->verified_link_cap = link->reported_link_cap;
- else
- link->verified_link_cap = dp_get_max_link_cap(link);
- }
-}
-
-static bool should_verify_link_capability_destructively(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- bool destrictive = false;
- struct dc_link_settings max_link_cap;
- bool is_link_enc_unavailable = link->link_enc &&
- link->dc->res_pool->funcs->link_encs_assign &&
- !link_enc_cfg_is_link_enc_avail(
- link->ctx->dc,
- link->link_enc->preferred_engine,
- link);
-
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- max_link_cap = dp_get_max_link_cap(link);
- destrictive = true;
-
- if (link->dc->debug.skip_detection_link_training ||
- dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- destrictive = false;
- } else if (dp_get_link_encoding_format(&max_link_cap) ==
- DP_8b_10b_ENCODING) {
- if (link->dpcd_caps.is_mst_capable ||
- is_link_enc_unavailable) {
- destrictive = false;
- }
- }
- }
-
- return destrictive;
-}
-
-static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
- enum dc_detect_reason reason)
-{
- if (should_verify_link_capability_destructively(link, reason))
- verify_link_capability_destructive(link, sink, reason);
- else
- verify_link_capability_non_destructive(link);
-}
-
-
-/**
- * detect_link_and_local_sink() - Detect if a sink is attached to a given link
- *
- * link->local_sink is created or destroyed as needed.
- *
- * This does not create remote sinks.
- */
-static bool detect_link_and_local_sink(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- struct dc_sink_init_data sink_init_data = { 0 };
- struct display_sink_capability sink_caps = { 0 };
- uint32_t i;
- bool converter_disable_audio = false;
- struct audio_support *aud_support = &link->dc->res_pool->audio_support;
- bool same_edid = false;
- enum dc_edid_status edid_status;
- struct dc_context *dc_ctx = link->ctx;
- struct dc *dc = dc_ctx->dc;
- struct dc_sink *sink = NULL;
- struct dc_sink *prev_sink = NULL;
- struct dpcd_caps prev_dpcd_caps;
- enum dc_connection_type new_connection_type = dc_connection_none;
- const uint32_t post_oui_delay = 30; // 30ms
-
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (dc_is_virtual_signal(link->connector_signal))
- return false;
-
- if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- (!link->dc->config.allow_edp_hotplug_detection)) &&
- link->local_sink) {
- // need to re-write OUI and brightness in resume case
- if (link->connector_signal == SIGNAL_TYPE_EDP &&
- (link->dpcd_sink_ext_caps.bits.oled == 1)) {
- dpcd_set_source_specific_data(link);
- msleep(post_oui_delay);
- dc_link_set_default_brightness_aux(link);
- //TODO: use cached
- }
-
- return true;
- }
-
- if (!dc_link_detect_sink(link, &new_connection_type)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- prev_sink = link->local_sink;
- if (prev_sink) {
- dc_sink_retain(prev_sink);
- memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
- }
-
- link_disconnect_sink(link);
- if (new_connection_type != dc_connection_none) {
- link->type = new_connection_type;
- link->link_state_valid = false;
-
- /* From Disconnected-to-Connected. */
- switch (link->connector_signal) {
- case SIGNAL_TYPE_HDMI_TYPE_A: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- if (aud_support->hdmi_audio_native)
- sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- }
-
- case SIGNAL_TYPE_DVI_SINGLE_LINK: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- }
-
- case SIGNAL_TYPE_DVI_DUAL_LINK: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
- break;
- }
-
- case SIGNAL_TYPE_LVDS: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_LVDS;
- break;
- }
-
- case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
-
- detect_edp_sink_caps(link);
- read_current_link_settings_on_detect(link);
-
- /* Disable power sequence on MIPI panel + converter
- */
- if (dc->config.enable_mipi_converter_optimization &&
- dc_ctx->dce_version == DCN_VERSION_3_01 &&
- link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 &&
- memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580,
- sizeof(link->dpcd_caps.branch_dev_name)) == 0) {
- dc->config.edp_no_power_sequencing = true;
-
- if (!link->dpcd_caps.set_power_state_capable_edp)
- link->wa_flags.dp_keep_receiver_powered = true;
- }
-
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- sink_caps.signal = SIGNAL_TYPE_EDP;
- break;
- }
-
- case SIGNAL_TYPE_DISPLAY_PORT: {
- /* wa HPD high coming too early*/
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
- /* if alt mode times out, return false */
- if (!wait_for_entering_dp_alt_mode(link))
- return false;
- }
-
- if (!detect_dp(link, &sink_caps, reason)) {
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- }
-
- /* Active SST downstream branch device unplug*/
- if (link->type == dc_connection_sst_branch &&
- link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink)
- /* Downstream unplug */
- dc_sink_release(prev_sink);
- return true;
- }
-
- /* disable audio for non DP to HDMI active sst converter */
- if (link->type == dc_connection_sst_branch &&
- is_dp_active_dongle(link) &&
- (link->dpcd_caps.dongle_type !=
- DISPLAY_DONGLE_DP_HDMI_CONVERTER))
- converter_disable_audio = true;
- break;
- }
-
- default:
- DC_ERROR("Invalid connector type! signal:%d\n",
- link->connector_signal);
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- } /* switch() */
-
- if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
- link->dpcd_sink_count =
- link->dpcd_caps.sink_count.bits.SINK_COUNT;
- else
- link->dpcd_sink_count = 1;
-
- dal_ddc_service_set_transaction_type(link->ddc,
- sink_caps.transaction_type);
-
- link->aux_mode =
- dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
-
- sink_init_data.link = link;
- sink_init_data.sink_signal = sink_caps.signal;
-
- sink = dc_sink_create(&sink_init_data);
- if (!sink) {
- DC_ERROR("Failed to create sink!\n");
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- }
-
- sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
- sink->converter_disable_audio = converter_disable_audio;
-
- /* dc_sink_create returns a new reference */
- link->local_sink = sink;
-
- edid_status = dm_helpers_read_local_edid(link->ctx,
- link, sink);
-
- switch (edid_status) {
- case EDID_BAD_CHECKSUM:
- DC_LOG_ERROR("EDID checksum invalid.\n");
- break;
- case EDID_PARTIAL_VALID:
- DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
- break;
- case EDID_NO_RESPONSE:
- DC_LOG_ERROR("No EDID read.\n");
- /*
- * Abort detection for non-DP connectors if we have
- * no EDID
- *
- * DP needs to report as connected if HDP is high
- * even if we have no EDID in order to go to
- * fail-safe mode
- */
- if (dc_is_hdmi_signal(link->connector_signal) ||
- dc_is_dvi_signal(link->connector_signal)) {
- if (prev_sink)
- dc_sink_release(prev_sink);
-
- return false;
- }
-
- if (link->type == dc_connection_sst_branch &&
- link->dpcd_caps.dongle_type ==
- DISPLAY_DONGLE_DP_VGA_CONVERTER &&
- reason == DETECT_REASON_HPDRX) {
- /* Abort detection for DP-VGA adapters when EDID
- * can't be read and detection reason is VGA-side
- * hotplug
- */
- if (prev_sink)
- dc_sink_release(prev_sink);
- link_disconnect_sink(link);
-
- return true;
- }
-
- break;
- default:
- break;
- }
-
- // Check if edid is the same
- if ((prev_sink) &&
- (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
- same_edid = is_same_edid(&prev_sink->dc_edid,
- &sink->dc_edid);
-
- if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
- link->ctx->dc->debug.hdmi20_disable = true;
-
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- query_hdcp_capability(sink->sink_signal, link);
-#endif
- } else {
- // If edid is the same, then discard new sink and revert back to original sink
- if (same_edid) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- }
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- query_hdcp_capability(sink->sink_signal, link);
-#endif
- }
-
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
-
- if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
- dp_trace_init(link);
-
- /* Connectivity log: detection */
- for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
- CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
- DC_EDID_BLOCK_SIZE,
- "%s: [Block %d] ", sink->edid_caps.display_name, i);
- }
-
- DC_LOG_DETECTION_EDID_PARSER("%s: "
- "manufacturer_id = %X, "
- "product_id = %X, "
- "serial_number = %X, "
- "manufacture_week = %d, "
- "manufacture_year = %d, "
- "display_name = %s, "
- "speaker_flag = %d, "
- "audio_mode_count = %d\n",
- __func__,
- sink->edid_caps.manufacturer_id,
- sink->edid_caps.product_id,
- sink->edid_caps.serial_number,
- sink->edid_caps.manufacture_week,
- sink->edid_caps.manufacture_year,
- sink->edid_caps.display_name,
- sink->edid_caps.speaker_flags,
- sink->edid_caps.audio_mode_count);
-
- for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
- DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
- "format_code = %d, "
- "channel_count = %d, "
- "sample_rate = %d, "
- "sample_size = %d\n",
- __func__,
- i,
- sink->edid_caps.audio_modes[i].format_code,
- sink->edid_caps.audio_modes[i].channel_count,
- sink->edid_caps.audio_modes[i].sample_rate,
- sink->edid_caps.audio_modes[i].sample_size);
- }
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* Init dc_panel_config by HW config */
- if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
- dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
- /* Pickup base DM settings */
- dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
- // Override dc_panel_config if system has specific settings
- dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
- }
-
- } else {
- /* From Connected-to-Disconnected. */
- link->type = dc_connection_none;
- sink_caps.signal = SIGNAL_TYPE_NONE;
- /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
- * is not cleared. If we emulate a DP signal on this connection, it thinks
- * the dongle is still there and limits the number of modes we can emulate.
- * Clear dongle_max_pix_clk on disconnect to fix this
- */
- link->dongle_max_pix_clk = 0;
-
- dc_link_clear_dprx_states(link);
- dp_trace_reset(link);
- }
-
- LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
- link->link_index, sink,
- (sink_caps.signal ==
- SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
- prev_sink, same_edid);
-
- if (prev_sink)
- dc_sink_release(prev_sink);
-
- return true;
-}
-
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
-{
- bool is_local_sink_detect_success;
- bool is_delegated_to_mst_top_mgr = false;
- enum dc_connection_type pre_link_type = link->type;
-
- is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
-
- if (is_local_sink_detect_success && link->local_sink)
- verify_link_capability(link, link->local_sink, reason);
-
- if (is_local_sink_detect_success && link->local_sink &&
- dc_is_dp_signal(link->local_sink->sink_signal) &&
- link->dpcd_caps.is_mst_capable)
- is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
-
- if (is_local_sink_detect_success &&
- pre_link_type == dc_connection_mst_branch &&
- link->type != dc_connection_mst_branch)
- is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link);
-
- return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
-}
-
-bool dc_link_get_hpd_state(struct dc_link *dc_link)
-{
- uint32_t state;
-
- dal_gpio_lock_pin(dc_link->hpd_gpio);
- dal_gpio_get_value(dc_link->hpd_gpio, &state);
- dal_gpio_unlock_pin(dc_link->hpd_gpio);
-
- return state;
-}
-
-static enum hpd_source_id get_hpd_line(struct dc_link *link)
-{
- struct gpio *hpd;
- enum hpd_source_id hpd_id;
-
- hpd_id = HPD_SOURCEID_UNKNOWN;
-
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (hpd) {
- switch (dal_irq_get_source(hpd)) {
- case DC_IRQ_SOURCE_HPD1:
- hpd_id = HPD_SOURCEID1;
- break;
- case DC_IRQ_SOURCE_HPD2:
- hpd_id = HPD_SOURCEID2;
- break;
- case DC_IRQ_SOURCE_HPD3:
- hpd_id = HPD_SOURCEID3;
- break;
- case DC_IRQ_SOURCE_HPD4:
- hpd_id = HPD_SOURCEID4;
- break;
- case DC_IRQ_SOURCE_HPD5:
- hpd_id = HPD_SOURCEID5;
- break;
- case DC_IRQ_SOURCE_HPD6:
- hpd_id = HPD_SOURCEID6;
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
-
- dal_gpio_destroy_irq(&hpd);
- }
-
- return hpd_id;
-}
-
-static enum channel_id get_ddc_line(struct dc_link *link)
-{
- struct ddc *ddc;
- enum channel_id channel;
-
- channel = CHANNEL_ID_UNKNOWN;
-
- ddc = dal_ddc_service_get_ddc_pin(link->ddc);
-
- if (ddc) {
- switch (dal_ddc_get_line(ddc)) {
- case GPIO_DDC_LINE_DDC1:
- channel = CHANNEL_ID_DDC1;
- break;
- case GPIO_DDC_LINE_DDC2:
- channel = CHANNEL_ID_DDC2;
- break;
- case GPIO_DDC_LINE_DDC3:
- channel = CHANNEL_ID_DDC3;
- break;
- case GPIO_DDC_LINE_DDC4:
- channel = CHANNEL_ID_DDC4;
- break;
- case GPIO_DDC_LINE_DDC5:
- channel = CHANNEL_ID_DDC5;
- break;
- case GPIO_DDC_LINE_DDC6:
- channel = CHANNEL_ID_DDC6;
- break;
- case GPIO_DDC_LINE_DDC_VGA:
- channel = CHANNEL_ID_DDC_VGA;
- break;
- case GPIO_DDC_LINE_I2C_PAD:
- channel = CHANNEL_ID_I2C_PAD;
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
- }
-
- return channel;
-}
-
-static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
-{
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_UNIPHY:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_A;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_B;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY1:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_C;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_D;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY2:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_E;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_F;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY3:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_G;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_EXTERNAL_NUTMEG:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_NUTMEG_CRT;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_EXTERNAL_TRAVIS:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_TRAVIS_CRT;
- case ENUM_ID_2:
- return TRANSMITTER_TRAVIS_LCD;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- default:
- return TRANSMITTER_UNKNOWN;
- }
-}
-
-static bool dc_link_construct_legacy(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- uint8_t i;
- struct ddc_service_init_data ddc_service_init_data = { 0 };
- struct dc_context *dc_ctx = init_params->ctx;
- struct encoder_init_data enc_init_data = { 0 };
- struct panel_cntl_init_data panel_cntl_init_data = { 0 };
- struct integrated_info *info;
- struct dc_bios *bios = init_params->dc->ctx->dc_bios;
- const struct dc_vbios_funcs *bp_funcs = bios->funcs;
- struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto create_fail;
-
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
-
- link->link_status.dpcd_caps = &link->dpcd_caps;
-
- link->dc = init_params->dc;
- link->ctx = dc_ctx;
- link->link_index = init_params->link_index;
-
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- link->link_id =
- bios->funcs->get_connector_id(bios, init_params->connector_index);
-
- link->ep_type = DISPLAY_ENDPOINT_PHY;
-
- DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
-
- if (bios->funcs->get_disp_connector_caps_info) {
- bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
- link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
- DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
- }
-
- if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
- __func__, init_params->connector_index,
- link->link_id.type, OBJECT_TYPE_CONNECTOR);
- goto create_fail;
- }
-
- if (link->dc->res_pool->funcs->link_init)
- link->dc->res_pool->funcs->link_init(link);
-
- link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (link->hpd_gpio) {
- dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
- dal_gpio_unlock_pin(link->hpd_gpio);
- link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
-
- DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
- DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
- }
-
- switch (link->link_id.id) {
- case CONNECTOR_ID_HDMI_TYPE_A:
- link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
-
- break;
- case CONNECTOR_ID_SINGLE_LINK_DVID:
- case CONNECTOR_ID_SINGLE_LINK_DVII:
- link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case CONNECTOR_ID_DUAL_LINK_DVID:
- case CONNECTOR_ID_DUAL_LINK_DVII:
- link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
- break;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC:
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
-
- if (link->hpd_gpio)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
-
- break;
- case CONNECTOR_ID_EDP:
- link->connector_signal = SIGNAL_TYPE_EDP;
-
- if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
-
- switch (link->dc->config.allow_edp_hotplug_detection) {
- case 1: // only the 1st eDP handles hotplug
- if (link->link_index == 0)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
- else
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- break;
- case 2: // only the 2nd eDP handles hotplug
- if (link->link_index == 1)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
- else
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- break;
- default:
- break;
- }
- }
-
- break;
- case CONNECTOR_ID_LVDS:
- link->connector_signal = SIGNAL_TYPE_LVDS;
- break;
- default:
- DC_LOG_WARNING("Unsupported Connector type:%d!\n",
- link->link_id.id);
- goto create_fail;
- }
-
- /* TODO: #DAL3 Implement id to str function.*/
- LINK_INFO("Connector[%d] description:"
- "signal %d\n",
- init_params->connector_index,
- link->connector_signal);
-
- ddc_service_init_data.ctx = link->ctx;
- ddc_service_init_data.id = link->link_id;
- ddc_service_init_data.link = link;
- link->ddc = dal_ddc_service_create(&ddc_service_init_data);
-
- if (!link->ddc) {
- DC_ERROR("Failed to create ddc_service!\n");
- goto ddc_create_fail;
- }
-
- if (!link->ddc->ddc_pin) {
- DC_ERROR("Failed to get I2C info for connector!\n");
- goto ddc_create_fail;
- }
-
- link->ddc_hw_inst =
- dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
-
-
- if (link->dc->res_pool->funcs->panel_cntl_create &&
- (link->link_id.id == CONNECTOR_ID_EDP ||
- link->link_id.id == CONNECTOR_ID_LVDS)) {
- panel_cntl_init_data.ctx = dc_ctx;
- panel_cntl_init_data.inst =
- panel_cntl_init_data.ctx->dc_edp_id_count;
- link->panel_cntl =
- link->dc->res_pool->funcs->panel_cntl_create(
- &panel_cntl_init_data);
- panel_cntl_init_data.ctx->dc_edp_id_count++;
-
- if (link->panel_cntl == NULL) {
- DC_ERROR("Failed to create link panel_cntl!\n");
- goto panel_cntl_create_fail;
- }
- }
-
- enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
- enc_init_data.connector = link->link_id;
- enc_init_data.channel = get_ddc_line(link);
- enc_init_data.hpd_source = get_hpd_line(link);
-
- link->hpd_src = enc_init_data.hpd_source;
-
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
- link->link_enc =
- link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
-
- if (!link->link_enc) {
- DC_ERROR("Failed to create link encoder!\n");
- goto link_enc_create_fail;
- }
-
- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
-
- /* Update link encoder tracking variables. These are used for the dynamic
- * assignment of link encoders to streams.
- */
- link->eng_id = link->link_enc->preferred_engine;
- link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
- link->dc->res_pool->dig_link_enc_count++;
-
- link->link_enc_hw_inst = link->link_enc->transmitter;
-
- for (i = 0; i < 4; i++) {
- if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
- link->link_id, i,
- &link->device_tag) != BP_RESULT_OK) {
- DC_ERROR("Failed to find device tag!\n");
- goto device_tag_fail;
- }
-
- /* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
- */
- if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
- link->device_tag.dev_id))
- continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
- link->connector_signal != SIGNAL_TYPE_RGB)
- continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
- link->connector_signal == SIGNAL_TYPE_RGB)
- continue;
-
- DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
- DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
- DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
- break;
- }
-
- if (bios->integrated_info)
- memcpy(info, bios->integrated_info, sizeof(*info));
-
- /* Look for channel mapping corresponding to connector and device tag */
- for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
- struct external_display_path *path =
- &info->ext_disp_conn_info.path[i];
-
- if (path->device_connector_id.enum_id == link->link_id.enum_id &&
- path->device_connector_id.id == link->link_id.id &&
- path->device_connector_id.type == link->link_id.type) {
- if (link->device_tag.acpi_device != 0 &&
- path->device_acpi_enum == link->device_tag.acpi_device) {
- link->ddi_channel_mapping = path->channel_mapping;
- link->chip_caps = path->caps;
- DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
- DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
- } else if (path->device_tag ==
- link->device_tag.dev_id.raw_device_tag) {
- link->ddi_channel_mapping = path->channel_mapping;
- link->chip_caps = path->caps;
- DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
- DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
- }
-
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
- link->bios_forced_drive_settings.VOLTAGE_SWING =
- (info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
- link->bios_forced_drive_settings.PRE_EMPHASIS =
- ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
- }
-
- break;
- }
- }
-
- if (bios->funcs->get_atom_dc_golden_table)
- bios->funcs->get_atom_dc_golden_table(bios);
-
- /*
- * TODO check if GPIO programmed correctly
- *
- * If GPIO isn't programmed correctly HPD might not rise or drain
- * fast enough, leading to bounces.
- */
- program_hpd_filter(link);
-
- link->psr_settings.psr_vtotal_control_support = false;
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
- kfree(info);
- return true;
-device_tag_fail:
- link->link_enc->funcs->destroy(&link->link_enc);
-link_enc_create_fail:
- if (link->panel_cntl != NULL)
- link->panel_cntl->funcs->destroy(&link->panel_cntl);
-panel_cntl_create_fail:
- dal_ddc_service_destroy(&link->ddc);
-ddc_create_fail:
-create_fail:
-
- if (link->hpd_gpio) {
- dal_gpio_destroy_irq(&link->hpd_gpio);
- link->hpd_gpio = NULL;
- }
-
- DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
- kfree(info);
-
- return false;
-}
-
-static bool dc_link_construct_dpia(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- struct ddc_service_init_data ddc_service_init_data = { 0 };
- struct dc_context *dc_ctx = init_params->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- /* Initialized irq source for hpd and hpd rx */
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
- link->link_status.dpcd_caps = &link->dpcd_caps;
-
- link->dc = init_params->dc;
- link->ctx = dc_ctx;
- link->link_index = init_params->link_index;
-
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- /* Dummy Init for linkid */
- link->link_id.type = OBJECT_TYPE_CONNECTOR;
- link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
- link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
- link->is_internal_display = false;
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
- LINK_INFO("Connector[%d] description:signal %d\n",
- init_params->connector_index,
- link->connector_signal);
-
- link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
- link->is_dig_mapping_flexible = true;
-
- /* TODO: Initialize link : funcs->link_init */
-
- ddc_service_init_data.ctx = link->ctx;
- ddc_service_init_data.id = link->link_id;
- ddc_service_init_data.link = link;
- /* Set indicator for dpia link so that ddc won't be created */
- ddc_service_init_data.is_dpia_link = true;
-
- link->ddc = dal_ddc_service_create(&ddc_service_init_data);
- if (!link->ddc) {
- DC_ERROR("Failed to create ddc_service!\n");
- goto ddc_create_fail;
- }
-
- /* Set dpia port index : 0 to number of dpia ports */
- link->ddc_hw_inst = init_params->connector_index;
-
- /* TODO: Create link encoder */
-
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
-
- return true;
-
-ddc_create_fail:
- return false;
-}
-
-static bool dc_link_construct(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- /* Handle dpia case */
- if (init_params->is_dpia_link)
- return dc_link_construct_dpia(link, init_params);
- else
- return dc_link_construct_legacy(link, init_params);
-}
-/*******************************************************************************
- * Public functions
- ******************************************************************************/
-struct dc_link *link_create(const struct link_init_data *init_params)
-{
- struct dc_link *link =
- kzalloc(sizeof(*link), GFP_KERNEL);
-
- if (NULL == link)
- goto alloc_fail;
-
- if (false == dc_link_construct(link, init_params))
- goto construct_fail;
-
- /*
- * Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
- * since struct preferred_link_setting won't be reset after S3.
- */
- link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
-
- return link;
-
-construct_fail:
- kfree(link);
-
-alloc_fail:
- return NULL;
-}
-
-void link_destroy(struct dc_link **link)
-{
- dc_link_destruct(*link);
- kfree(*link);
- *link = NULL;
-}
-
-static void enable_stream_features(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
-
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
- struct dc_link *link = stream->link;
- union down_spread_ctrl old_downspread;
- union down_spread_ctrl new_downspread;
-
- memset(&old_downspread, 0, sizeof(old_downspread));
-
- core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &old_downspread.raw, sizeof(old_downspread));
-
- new_downspread.raw = old_downspread.raw;
-
- new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
- (stream->ignore_msa_timing_param) ? 1 : 0;
-
- if (new_downspread.raw != old_downspread.raw) {
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &new_downspread.raw, sizeof(new_downspread));
- }
-
- } else {
- dm_helpers_mst_enable_stream_features(stream);
- }
-}
-
-static enum dc_status enable_link_dp(struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- enum dc_status status;
- bool skip_video_pattern;
- struct dc_link *link = stream->link;
- const struct dc_link_settings *link_settings =
- &pipe_ctx->link_config.dp_link_settings;
- bool fec_enable;
- int i;
- bool apply_seamless_boot_optimization = false;
- uint32_t bl_oled_enable_delay = 50; // in ms
- uint32_t post_oui_delay = 30; // 30ms
- /* Reduce link bandwidth between failed link training attempts. */
- bool do_fallback = false;
-
- // check for seamless boot
- for (i = 0; i < state->stream_count; i++) {
- if (state->streams[i]->apply_seamless_boot_optimization) {
- apply_seamless_boot_optimization = true;
- break;
- }
- }
-
- /* Train with fallback when enabling DPIA link. Conventional links are
- * trained with fallback during sink detection.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- do_fallback = true;
-
- /*
- * Temporary w/a to get DP2.0 link rates to work with SST.
- * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
- */
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->dc->debug.set_mst_en_for_sst) {
- dp_enable_mst_on_sink(link, true);
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- /*in case it is not on*/
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
- } else {
- pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr,
- state, false);
- }
-
- // during mode switch we do DP_SET_POWER off then on, and OUI is lost
- dpcd_set_source_specific_data(link);
- if (link->dpcd_sink_ext_caps.raw != 0) {
- post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
- msleep(post_oui_delay);
- }
-
- // similarly, mode switch can cause loss of cable ID
- dpcd_write_cable_id_to_dprx(link);
-
- skip_video_pattern = true;
-
- if (link_settings->link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal,
- do_fallback)) {
- status = DC_OK;
- } else {
- status = DC_FAIL_DP_LINK_TRAINING;
- }
-
- if (link->preferred_training_settings.fec_enable)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
-
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_enable(link, fec_enable);
-
- // during mode set we do DP_SET_POWER off then on, aux writes are lost
- if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
- link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
- link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- dc_link_set_default_brightness_aux(link); // TODO: use cached if known
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- msleep(bl_oled_enable_delay);
- dc_link_backlight_enable_aux(link, true);
- }
-
- return status;
-}
-
-static enum dc_status enable_link_edp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- return enable_link_dp(state, pipe_ctx);
-}
-
-static enum dc_status enable_link_dp_mst(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_link *link = pipe_ctx->stream->link;
-
- /* sink signal type after MST branch is MST. Multiple MST sinks
- * share one link. Link DP PHY is enable or training only once.
- */
- if (link->link_status.link_active)
- return DC_OK;
-
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
- /* to make sure the pending down rep can be processed
- * before enabling the link
- */
- dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
-
- /* set the sink to MST mode before enabling the link */
- dp_enable_mst_on_sink(link, true);
-
- return enable_link_dp(state, pipe_ctx);
-}
-
-void dc_link_blank_all_dp_displays(struct dc *dc)
-{
- unsigned int i;
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
- (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
- continue;
-
- /* DP 2.0 spec requires that we read LTTPR caps first */
- dp_retrieve_lttpr_cap(dc->links[i]);
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dc_link_blank_dp_stream(dc->links[i], true);
- }
-
-}
-
-void dc_link_blank_all_edp_displays(struct dc *dc)
-{
- unsigned int i;
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) ||
- (!dc->links[i]->edp_sink_present))
- continue;
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dc_link_blank_dp_stream(dc->links[i], true);
- }
-}
-
-void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
-{
- unsigned int j;
- struct dc *dc = link->ctx->dc;
- enum signal_type signal = link->connector_signal;
-
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- link->link_enc->funcs->get_dig_frontend &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
- unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
-
- if (fe != ENGINE_ID_UNKNOWN)
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
- dp_receiver_power_ctrl(link, false);
- }
-}
-
-static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
- enum engine_id eng_id,
- struct ext_hdmi_settings *settings)
-{
- bool result = false;
- int i = 0;
- struct integrated_info *integrated_info =
- pipe_ctx->stream->ctx->dc_bios->integrated_info;
-
- if (integrated_info == NULL)
- return false;
-
- /*
- * Get retimer settings from sbios for passing SI eye test for DCE11
- * The setting values are varied based on board revision and port id
- * Therefore the setting values of each ports is passed by sbios.
- */
-
- // Check if current bios contains ext Hdmi settings
- if (integrated_info->gpu_cap_info & 0x20) {
- switch (eng_id) {
- case ENGINE_ID_DIGA:
- settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp0_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp0_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGB:
- settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp1_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp1_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGC:
- settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp2_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp2_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGD:
- settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp3_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp3_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- default:
- break;
- }
-
- if (result == true) {
- // Validate settings from bios integrated info table
- if (settings->slv_addr == 0)
- return false;
- if (settings->reg_num > 9)
- return false;
- if (settings->reg_num_6g > 3)
- return false;
-
- for (i = 0; i < settings->reg_num; i++) {
- if (settings->reg_settings[i].i2c_reg_index > 0x20)
- return false;
- }
-
- for (i = 0; i < settings->reg_num_6g; i++) {
- if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
- return false;
- }
- }
- }
-
- return result;
-}
-
-static bool i2c_write(struct pipe_ctx *pipe_ctx,
- uint8_t address, uint8_t *buffer, uint32_t length)
-{
- struct i2c_command cmd = {0};
- struct i2c_payload payload = {0};
-
- memset(&payload, 0, sizeof(payload));
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.number_of_payloads = 1;
- cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
- cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
-
- payload.address = address;
- payload.data = buffer;
- payload.length = length;
- payload.write = true;
- cmd.payloads = &payload;
-
- if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
- pipe_ctx->stream->link, &cmd))
- return true;
-
- return false;
-}
-
-static void write_i2c_retimer_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_vga_mode,
- bool is_over_340mhz,
- struct ext_hdmi_settings *settings)
-{
- uint8_t slave_address = (settings->slv_addr >> 1);
- uint8_t buffer[2];
- const uint8_t apply_rx_tx_change = 0x4;
- uint8_t offset = 0xA;
- uint8_t value = 0;
- int i = 0;
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- /* Start Ext-Hdmi programming*/
-
- for (i = 0; i < settings->reg_num; i++) {
- /* Apply 3G settings */
- if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
-
- buffer[0] = settings->reg_settings[i].i2c_reg_index;
- buffer[1] = settings->reg_settings[i].i2c_reg_val;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
-
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
- * needs to be set to 1 on every 0xA-0xC write.
- */
- if (settings->reg_settings[i].i2c_reg_index == 0xA ||
- settings->reg_settings[i].i2c_reg_index == 0xB ||
- settings->reg_settings[i].i2c_reg_index == 0xC) {
-
- /* Query current value from offset 0xA */
- if (settings->reg_settings[i].i2c_reg_index == 0xA)
- value = settings->reg_settings[i].i2c_reg_val;
- else {
- i2c_success =
- dal_ddc_service_query_ddc_data(
- pipe_ctx->stream->link->ddc,
- slave_address, &offset, 1, &value, 1);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- buffer[0] = offset;
- /* Set APPLY_RX_TX_CHANGE bit to 1 */
- buffer[1] = value | apply_rx_tx_change;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
- }
- }
-
- /* Apply 3G settings */
- if (is_over_340mhz) {
- for (i = 0; i < settings->reg_num_6g; i++) {
- /* Apply 3G settings */
- if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
-
- buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
- buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
-
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
- * needs to be set to 1 on every 0xA-0xC write.
- */
- if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
- settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
- settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
-
- /* Query current value from offset 0xA */
- if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
- value = settings->reg_settings_6g[i].i2c_reg_val;
- else {
- i2c_success =
- dal_ddc_service_query_ddc_data(
- pipe_ctx->stream->link->ddc,
- slave_address, &offset, 1, &value, 1);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- buffer[0] = offset;
- /* Set APPLY_RX_TX_CHANGE bit to 1 */
- buffer[1] = value | apply_rx_tx_change;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
- }
- }
- }
-
- if (is_vga_mode) {
- /* Program additional settings if using 640x480 resolution */
-
- /* Write offset 0xFF to 0x01 */
- buffer[0] = 0xff;
- buffer[1] = 0x01;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x00 to 0x23 */
- buffer[0] = 0x00;
- buffer[1] = 0x23;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0xff to 0x00 */
- buffer[0] = 0xff;
- buffer[1] = 0x00;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- }
-
- return;
-
-i2c_write_fail:
- DC_LOG_DEBUG("Set retimer failed");
-}
-
-static void write_i2c_default_retimer_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_vga_mode,
- bool is_over_340mhz)
-{
- uint8_t slave_address = (0xBA >> 1);
- uint8_t buffer[2];
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- /* Program Slave Address for tuning single integrity */
- /* Write offset 0x0A to 0x13 */
- buffer[0] = 0x0A;
- buffer[1] = 0x13;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0B to 0xDA or 0xD8 */
- buffer[0] = 0x0B;
- buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0C to 0x1D or 0x91 */
- buffer[0] = 0x0C;
- buffer[1] = is_over_340mhz ? 0x1D : 0x91;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
-
- if (is_vga_mode) {
- /* Program additional settings if using 640x480 resolution */
-
- /* Write offset 0xFF to 0x01 */
- buffer[0] = 0xff;
- buffer[1] = 0x01;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x00 to 0x23 */
- buffer[0] = 0x00;
- buffer[1] = 0x23;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0xff to 0x00 */
- buffer[0] = 0xff;
- buffer[1] = 0x00;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- return;
-
-i2c_write_fail:
- DC_LOG_DEBUG("Set default retimer failed");
-}
-
-static void write_i2c_redriver_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_over_340mhz)
-{
- uint8_t slave_address = (0xF0 >> 1);
- uint8_t buffer[16];
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- // Program Slave Address for tuning single integrity
- buffer[3] = 0x4E;
- buffer[4] = 0x4E;
- buffer[5] = 0x4E;
- buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
-
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\
- \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\
- offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\
- i2c_success = %d\n",
- slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
-
- if (!i2c_success)
- DC_LOG_DEBUG("Set redriver failed");
-}
-
-static void disable_link(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- /*
- * TODO: implement call for dp_set_hw_test_pattern
- * it is needed for compliance testing
- */
-
- /* Here we need to specify that encoder output settings
- * need to be calculated as for the set mode,
- * it will lead to querying dynamic link capabilities
- * which should be done before enable output
- */
-
- if (dc_is_dp_signal(signal)) {
- /* SST DP, eDP */
- struct dc_link_settings link_settings = link->cur_link_settings;
- if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, link_res, signal);
- else
- dp_disable_link_phy_mst(link, link_res, signal);
-
- if (dc_is_dp_sst_signal(signal) ||
- link->mst_stream_alloc_table.stream_count == 0) {
- if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
- dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, link_res, false);
- }
- }
- } else if (signal != SIGNAL_TYPE_VIRTUAL) {
- link->dc->hwss.disable_link_output(link, link_res, signal);
- }
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count <= 0)
- link->link_status.link_active = false;
- } else {
- link->link_status.link_active = false;
- }
-}
-
-static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- enum dc_color_depth display_color_depth;
- enum engine_id eng_id;
- struct ext_hdmi_settings settings = {0};
- bool is_over_340mhz = false;
- bool is_vga_mode = (stream->timing.h_addressable == 640)
- && (stream->timing.v_addressable == 480);
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (stream->phy_pix_clk == 0)
- stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
- if (stream->phy_pix_clk > 340000)
- is_over_340mhz = true;
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
- /* DP159, Retimer settings */
- eng_id = pipe_ctx->stream_res.stream_enc->id;
-
- if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
- write_i2c_retimer_setting(pipe_ctx,
- is_vga_mode, is_over_340mhz, &settings);
- } else {
- write_i2c_default_retimer_setting(pipe_ctx,
- is_vga_mode, is_over_340mhz);
- }
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
- /* PI3EQX1204, Redriver settings */
- write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
- }
- }
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_write_scdc_data(
- stream->link->ddc,
- stream->phy_pix_clk,
- stream->timing.flags.LTE_340MCSC_SCRAMBLE);
-
- memset(&stream->link->cur_link_settings, 0,
- sizeof(struct dc_link_settings));
-
- display_color_depth = stream->timing.display_color_depth;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
- display_color_depth = COLOR_DEPTH_888;
-
- dc->hwss.enable_tmds_link_output(
- link,
- &pipe_ctx->link_res,
- pipe_ctx->stream->signal,
- pipe_ctx->clock_source->id,
- display_color_depth,
- stream->phy_pix_clk);
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_read_scdc_data(link->ddc);
-}
-
-static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc *dc = stream->ctx->dc;
-
- if (stream->phy_pix_clk == 0)
- stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
-
- memset(&stream->link->cur_link_settings, 0,
- sizeof(struct dc_link_settings));
- dc->hwss.enable_lvds_link_output(
- link,
- &pipe_ctx->link_res,
- pipe_ctx->clock_source->id,
- stream->phy_pix_clk);
-
-}
-
-bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
-{
- bool ret = false;
- union dpcd_alpm_configuration alpm_config;
-
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- memset(&alpm_config, 0, sizeof(alpm_config));
-
- alpm_config.bits.ENABLE = (enable ? true : false);
- ret = dm_helpers_dp_write_dpcd(link->ctx, link,
- DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw,
- sizeof(alpm_config.raw));
- }
- return ret;
-}
-
-/****************************enable_link***********************************/
-static enum dc_status enable_link(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- enum dc_status status = DC_ERROR_UNEXPECTED;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
-
- /* There's some scenarios where driver is unloaded with display
- * still enabled. When driver is reloaded, it may cause a display
- * to not light up if there is a mismatch between old and new
- * link settings. Need to call disable first before enabling at
- * new link settings.
- */
- if (link->link_status.link_active) {
- disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
-
- switch (pipe_ctx->stream->signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- status = enable_link_dp(state, pipe_ctx);
- break;
- case SIGNAL_TYPE_EDP:
- status = enable_link_edp(state, pipe_ctx);
- break;
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- status = enable_link_dp_mst(state, pipe_ctx);
- msleep(200);
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- enable_link_hdmi(pipe_ctx);
- status = DC_OK;
- break;
- case SIGNAL_TYPE_LVDS:
- enable_link_lvds(pipe_ctx);
- status = DC_OK;
- break;
- case SIGNAL_TYPE_VIRTUAL:
- status = DC_OK;
- break;
- default:
- break;
- }
-
- if (status == DC_OK)
- pipe_ctx->stream->link->link_status.link_active = true;
-
- return status;
-}
-
-static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
-{
-
- uint32_t pxl_clk = timing->pix_clk_100hz;
-
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- pxl_clk /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- pxl_clk = pxl_clk * 2 / 3;
-
- if (timing->display_color_depth == COLOR_DEPTH_101010)
- pxl_clk = pxl_clk * 10 / 8;
- else if (timing->display_color_depth == COLOR_DEPTH_121212)
- pxl_clk = pxl_clk * 12 / 8;
-
- return pxl_clk;
-}
-
-static bool dp_active_dongle_validate_timing(
- const struct dc_crtc_timing *timing,
- const struct dpcd_caps *dpcd_caps)
-{
- const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
-
- switch (dpcd_caps->dongle_type) {
- case DISPLAY_DONGLE_DP_VGA_CONVERTER:
- case DISPLAY_DONGLE_DP_DVI_CONVERTER:
- case DISPLAY_DONGLE_DP_DVI_DONGLE:
- if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
- return true;
- else
- return false;
- default:
- break;
- }
-
- if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
- dongle_caps->extendedCapValid == true) {
- /* Check Pixel Encoding */
- switch (timing->pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
- return false;
- break;
- case PIXEL_ENCODING_YCBCR420:
- if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
- return false;
- break;
- default:
- /* Invalid Pixel Encoding*/
- return false;
- }
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- case COLOR_DEPTH_888:
- /*888 and 666 should always be supported*/
- break;
- case COLOR_DEPTH_101010:
- if (dongle_caps->dp_hdmi_max_bpc < 10)
- return false;
- break;
- case COLOR_DEPTH_121212:
- if (dongle_caps->dp_hdmi_max_bpc < 12)
- return false;
- break;
- case COLOR_DEPTH_141414:
- case COLOR_DEPTH_161616:
- default:
- /* These color depths are currently not supported */
- return false;
- }
-
- /* Check 3D format */
- switch (timing->timing_3d_format) {
- case TIMING_3D_FORMAT_NONE:
- case TIMING_3D_FORMAT_FRAME_ALTERNATE:
- /*Only frame alternate 3D is supported on active dongle*/
- break;
- default:
- /*other 3D formats are not supported due to bad infoframe translation */
- return false;
- }
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
- struct dc_crtc_timing outputTiming = *timing;
-
- if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
- /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
- outputTiming.flags.DSC = 0;
- if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
- return false;
- } else { // DP to HDMI TMDS converter
- if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
- return false;
- }
-#else
- if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
- return false;
-#endif
- }
-
- if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
- dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
- dongle_caps->dfp_cap_ext.supported) {
-
- if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
- return false;
-
- if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
- return false;
-
- if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
- return false;
-
- if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_666 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
- return false;
- }
- }
-
- return true;
-}
-
-enum dc_status dc_link_validate_mode_timing(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- const struct dc_crtc_timing *timing)
-{
- uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10;
- struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
-
- /* A hack to avoid failing any modes for EDID override feature on
- * topology change such as lower quality cable for DP or different dongle
- */
- if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
- return DC_OK;
-
- /* Passive Dongle */
- if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
- return DC_EXCEED_DONGLE_CAP;
-
- /* Active Dongle*/
- if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
- return DC_EXCEED_DONGLE_CAP;
-
- switch (stream->signal) {
- case SIGNAL_TYPE_EDP:
- case SIGNAL_TYPE_DISPLAY_PORT:
- if (!dp_validate_mode_timing(
- link,
- timing))
- return DC_NO_DP_LINK_BANDWIDTH;
- break;
-
- default:
- break;
- }
-
- return DC_OK;
-}
-
-static struct abm *get_abm_from_stream_res(const struct dc_link *link)
-{
- int i;
- struct dc *dc = NULL;
- struct abm *abm = NULL;
-
- if (!link || !link->ctx)
- return NULL;
-
- dc = link->ctx->dc;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
-
- if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
- break;
- }
- }
- return abm;
-}
-
-int dc_link_get_backlight_level(const struct dc_link *link)
-{
- struct abm *abm = get_abm_from_stream_res(link);
- struct panel_cntl *panel_cntl = link->panel_cntl;
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- bool fw_set_brightness = true;
-
- if (dmcu)
- fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
-
- if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
- return panel_cntl->funcs->get_current_backlight(panel_cntl);
- else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
- return (int) abm->funcs->get_current_backlight(abm);
- else
- return DC_ERROR_UNEXPECTED;
-}
-
-int dc_link_get_target_backlight_pwm(const struct dc_link *link)
-{
- struct abm *abm = get_abm_from_stream_res(link);
-
- if (abm == NULL || abm->funcs->get_target_backlight == NULL)
- return DC_ERROR_UNEXPECTED;
-
- return (int) abm->funcs->get_target_backlight(abm);
-}
-
-static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
-{
- int i;
- struct dc *dc = link->ctx->dc;
- struct pipe_ctx *pipe_ctx = NULL;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
- pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- break;
- }
- }
- }
-
- return pipe_ctx;
-}
-
-bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
-{
- struct dc *dc = link->ctx->dc;
-
- DC_LOGGER_INIT(link->ctx->logger);
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_pwm_u16_16, backlight_pwm_u16_16);
-
- if (dc_is_embedded_signal(link->connector_signal)) {
- struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
-
- if (pipe_ctx) {
- /* Disable brightness ramping when the display is blanked
- * as it can hang the DMCU
- */
- if (pipe_ctx->plane_state == NULL)
- frame_ramp = 0;
- } else {
- return false;
- }
-
- dc->hwss.set_backlight_level(
- pipe_ctx,
- backlight_pwm_u16_16,
- frame_ramp);
- }
- return true;
-}
-
-bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
- bool wait, bool force_static, const unsigned int *power_opts)
-{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (psr == NULL && force_static)
- return false;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
- if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
- // Don't enter PSR if panel is not connected
- return false;
- }
-
- /* Set power optimization flag */
- if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
- link->psr_settings.psr_power_opt = *power_opts;
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
- psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
- }
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled &&
- force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
-
- /* Enable or Disable PSR */
- if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
- link->psr_settings.psr_allow_active = *allow_active;
-
- if (!link->psr_settings.psr_allow_active)
- dc_z10_restore(dc);
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
- } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
- link->psr_settings.psr_feature_enabled)
- dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
- else
- return false;
- }
-
- return true;
-}
-
-bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
-{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_get_state(psr, state, panel_inst);
- else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
- dmcu->funcs->get_psr_state(dmcu, state);
-
- return true;
-}
-
-static inline enum physical_phy_id
-transmitter_to_phy_id(enum transmitter transmitter_value)
-{
- switch (transmitter_value) {
- case TRANSMITTER_UNIPHY_A:
- return PHYLD_0;
- case TRANSMITTER_UNIPHY_B:
- return PHYLD_1;
- case TRANSMITTER_UNIPHY_C:
- return PHYLD_2;
- case TRANSMITTER_UNIPHY_D:
- return PHYLD_3;
- case TRANSMITTER_UNIPHY_E:
- return PHYLD_4;
- case TRANSMITTER_UNIPHY_F:
- return PHYLD_5;
- case TRANSMITTER_NUTMEG_CRT:
- return PHYLD_6;
- case TRANSMITTER_TRAVIS_CRT:
- return PHYLD_7;
- case TRANSMITTER_TRAVIS_LCD:
- return PHYLD_8;
- case TRANSMITTER_UNIPHY_G:
- return PHYLD_9;
- case TRANSMITTER_COUNT:
- return PHYLD_COUNT;
- case TRANSMITTER_UNKNOWN:
- return PHYLD_UNKNOWN;
- default:
- WARN_ONCE(1, "Unknown transmitter value %d\n",
- transmitter_value);
- return PHYLD_UNKNOWN;
- }
-}
-
-bool dc_link_setup_psr(struct dc_link *link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context)
-{
- struct dc *dc;
- struct dmcu *dmcu;
- struct dmub_psr *psr;
- int i;
- unsigned int panel_inst;
- /* updateSinkPsrDpcdConfig*/
- union dpcd_psr_configuration psr_configuration;
- union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
-
- psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
-
- if (!link)
- return false;
-
- dc = link->ctx->dc;
- dmcu = dc->res_pool->dmcu;
- psr = dc->res_pool->psr;
-
- if (!dmcu && !psr)
- return false;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
- psr_configuration.bits.ENABLE = 1;
- psr_configuration.bits.CRC_VERIFICATION = 1;
- psr_configuration.bits.FRAME_CAPTURE_INDICATION =
- psr_config->psr_frame_capture_indication_req;
-
- /* Check for PSR v2*/
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- /* For PSR v2 selective update.
- * Indicates whether sink should start capturing
- * immediately following active scan line,
- * or starting with the 2nd active scan line.
- */
- psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
- /*For PSR v2, determines whether Sink should generate
- * IRQ_HPD when CRC mismatch is detected.
- */
- psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
- /* For PSR v2, set the bit when the Source device will
- * be enabling PSR2 operation.
- */
- psr_configuration.bits.ENABLE_PSR2 = 1;
- /* For PSR v2, the Sink device must be able to receive
- * SU region updates early in the frame time.
- */
- psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1;
- }
-
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 368,
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- dc_power_alpm_dpcd_enable(link, true);
- psr_context->su_granularity_required =
- psr_config->su_granularity_required;
- psr_context->su_y_granularity =
- psr_config->su_y_granularity;
- psr_context->line_time_in_us =
- psr_config->line_time_in_us;
-
- if (link->psr_settings.psr_vtotal_control_support) {
- psr_context->rate_control_caps = psr_config->rate_control_caps;
- vtotal_control.bits.ENABLE = true;
- core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE,
- &vtotal_control.raw, sizeof(vtotal_control.raw));
- }
- }
-
- psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
- psr_context->transmitterId = link->link_enc->transmitter;
- psr_context->engineId = link->link_enc->preferred_engine;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
- /* dmcu -1 for all controller id values,
- * therefore +1 here
- */
- psr_context->controllerId =
- dc->current_state->res_ctx.
- pipe_ctx[i].stream_res.tg->inst + 1;
- break;
- }
- }
-
- /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
- psr_context->phyType = PHY_TYPE_UNIPHY;
- /*PhyId is associated with the transmitter id*/
- psr_context->smuPhyId =
- transmitter_to_phy_id(link->link_enc->transmitter);
-
- psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
- timing.pix_clk_100hz * 100),
- stream->timing.v_total),
- stream->timing.h_total);
-
- psr_context->psrSupportedDisplayConfig = true;
- psr_context->psrExitLinkTrainingRequired =
- psr_config->psr_exit_link_training_required;
- psr_context->sdpTransmitLineNumDeadline =
- psr_config->psr_sdp_transmit_line_num_deadline;
- psr_context->psrFrameCaptureIndicationReq =
- psr_config->psr_frame_capture_indication_req;
-
- psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
-
- psr_context->numberOfControllers =
- link->dc->res_pool->timing_generator_count;
-
- psr_context->rfb_update_auto_en = true;
-
- /* 2 frames before enter PSR. */
- psr_context->timehyst_frames = 2;
- /* half a frame
- * (units in 100 lines, i.e. a value of 1 represents 100 lines)
- */
- psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
- psr_context->aux_repeats = 10;
-
- psr_context->psr_level.u32all = 0;
-
- /*skip power down the single pipe since it blocks the cstate*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
- switch(link->ctx->asic_id.chip_family) {
- case FAMILY_YELLOW_CARP:
- case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_1:
- if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
- break;
- default:
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
- break;
- }
- }
-#else
- if (link->ctx->asic_id.chip_family >= FAMILY_RV)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
-
- /* SMU will perform additional powerdown sequence.
- * For unsupported ASICs, set psr_level flag to skip PSR
- * static screen notification to SMU.
- * (Always set for DAL2, did not check ASIC)
- */
- psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
- psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations;
-
- /* Complete PSR entry before aborting to prevent intermittent
- * freezes on certain eDPs
- */
- psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
-
- /* enable ALPM */
- psr_context->psr_level.bits.DISABLE_ALPM = 0;
- psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1;
-
- /* Controls additional delay after remote frame capture before
- * continuing power down, default = 0
- */
- psr_context->frame_delay = 0;
-
- if (psr) {
- link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
- link, psr_context, panel_inst);
- link->psr_settings.psr_power_opt = 0;
- link->psr_settings.psr_allow_active = 0;
- }
- else
- link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
-
- /* psr_enabled == 0 indicates setup_psr did not succeed, but this
- * should not happen since firmware should be running at this point
- */
- if (link->psr_settings.psr_feature_enabled == 0)
- ASSERT(0);
-
- return true;
-
-}
-
-void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
-{
- struct dc *dc = link->ctx->dc;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return;
-
- /* PSR residency measurements only supported on DMCUB */
- if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_get_residency(psr, residency, panel_inst);
- else
- *residency = 0;
-}
-
-bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
-{
- struct dc *dc = link->ctx->dc;
- struct dmub_psr *psr = dc->res_pool->psr;
-
- if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support)
- return false;
-
- psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su);
-
- return true;
-}
-
-const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
-{
- return &link->link_status;
-}
-
-void core_link_resume(struct dc_link *link)
-{
- if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
- program_hpd_filter(link);
-}
-
-static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
-{
- struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
- &stream->link->cur_link_settings);
- link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
-
- mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
-
- return dc_fixpt_div_int(mbytes_per_sec, 54);
-}
-
-static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
-{
- struct fixed31_32 peak_kbps;
- uint32_t numerator = 0;
- uint32_t denominator = 1;
-
- /*
- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
- * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
- * common multiplier to render an integer PBN for all link rate/lane
- * counts combinations
- * calculate
- * peak_kbps *= (1006/1000)
- * peak_kbps *= (64/54)
- * peak_kbps *= 8 convert to bytes
- */
-
- numerator = 64 * PEAK_FACTOR_X1000;
- denominator = 54 * 8 * 1000 * 1000;
- kbps *= numerator;
- peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
-
- return peak_kbps;
-}
-
-static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
-{
- uint64_t kbps;
-
- kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
- return get_pbn_from_bw_in_kbps(kbps);
-}
-
-static void update_mst_stream_alloc_table(
- struct dc_link *link,
- struct stream_encoder *stream_enc,
- struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
- const struct dc_dp_mst_stream_allocation_table *proposed_table)
-{
- struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
- struct link_mst_stream_allocation *dc_alloc;
-
- int i;
- int j;
-
- /* if DRM proposed_table has more than one new payload */
- ASSERT(proposed_table->stream_count -
- link->mst_stream_alloc_table.stream_count < 2);
-
- /* copy proposed_table to link, add stream encoder */
- for (i = 0; i < proposed_table->stream_count; i++) {
-
- for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
- dc_alloc =
- &link->mst_stream_alloc_table.stream_allocations[j];
-
- if (dc_alloc->vcp_id ==
- proposed_table->stream_allocations[i].vcp_id) {
-
- work_table[i] = *dc_alloc;
- work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count;
- break; /* exit j loop */
- }
- }
-
- /* new vcp_id */
- if (j == link->mst_stream_alloc_table.stream_count) {
- work_table[i].vcp_id =
- proposed_table->stream_allocations[i].vcp_id;
- work_table[i].slot_count =
- proposed_table->stream_allocations[i].slot_count;
- work_table[i].stream_enc = stream_enc;
- work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
- }
- }
-
- /* update link->mst_stream_alloc_table with work_table */
- link->mst_stream_alloc_table.stream_count =
- proposed_table->stream_count;
- for (i = 0; i < MAX_CONTROLLER_NUM; i++)
- link->mst_stream_alloc_table.stream_allocations[i] =
- work_table[i];
-}
-
-static void remove_stream_from_alloc_table(
- struct dc_link *link,
- struct stream_encoder *dio_stream_enc,
- struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
-{
- int i = 0;
- struct link_mst_stream_allocation_table *table =
- &link->mst_stream_alloc_table;
-
- if (hpo_dp_stream_enc) {
- for (; i < table->stream_count; i++)
- if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
- break;
- } else {
- for (; i < table->stream_count; i++)
- if (dio_stream_enc == table->stream_allocations[i].stream_enc)
- break;
- }
-
- if (i < table->stream_count) {
- i++;
- for (; i < table->stream_count; i++)
- table->stream_allocations[i-1] = table->stream_allocations[i];
- memset(&table->stream_allocations[table->stream_count-1], 0,
- sizeof(struct link_mst_stream_allocation));
- table->stream_count--;
- }
-}
-
-static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
-{
- const uint32_t VCP_Y_PRECISION = 1000;
- uint64_t vcp_x, vcp_y;
-
- // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
- avg_time_slots_per_mtp = dc_fixpt_add(
- avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION));
-
- vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp);
- vcp_y = dc_fixpt_floor(
- dc_fixpt_mul_int(
- dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)),
- VCP_Y_PRECISION));
-
- if (link->type == dc_connection_mst_branch)
- DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
- "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
- else
- DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
- "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
-}
-
-/*
- * Payload allocation/deallocation for SST introduced in DP2.0
- */
-static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
- bool allocate)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct link_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- const struct dc_link_settings empty_link_settings = {0};
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* slot X.Y for SST payload deallocate */
- if (!allocate) {
- avg_time_slots_per_mtp = dc_fixpt_from_int(0);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
- avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
- }
-
- /* calculate VC payload and update branch with new payload allocation table*/
- if (!dpcd_write_128b_132b_sst_payload_allocation_table(
- stream,
- link,
- &proposed_table,
- allocate)) {
- DC_LOG_ERROR("SST Update Payload: Failed to update "
- "allocation table for "
- "pipe idx: %d\n",
- pipe_ctx->pipe_idx);
- return DC_FAIL_DP_PAYLOAD_ALLOCATION;
- }
-
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
-
- ASSERT(proposed_table.stream_count == 1);
-
- //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
- DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
- "vcp_id: %d "
- "slot_count: %d\n",
- (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
- proposed_table.stream_allocations[0].vcp_id,
- proposed_table.stream_allocations[0].slot_count);
-
- /* program DP source TX for payload */
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &proposed_table);
-
- /* poll for ACT handled */
- if (!dpcd_poll_for_allocation_change_trigger(link)) {
- // Failures will result in blackscreen and errors logged
- BREAK_TO_DEBUGGER();
- }
-
- /* slot X.Y for SST payload allocate */
- if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_128b_132b_ENCODING) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
- avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
- }
-
- /* Always return DC_OK.
- * If part of sequence fails, log failure(s) and show blackscreen
- */
- return DC_OK;
-}
-
-/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
- * because stream_encoder is not exposed to dm
- */
-enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- int i;
- enum act_return_status ret;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* enable_link_dp_mst already check link->enabled_stream_count
- * and stream is in link->stream[]. This is called during set mode,
- * stream_enc is available.
- */
-
- /* get calculate VC payload for stream: stream_alloc */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true))
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* program DP source TX for payload */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link,
- &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* send down message */
- ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- if (ret != ACT_LINK_LOST) {
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
- }
-
- /* slot X.Y for only current stream */
- pbn_per_slot = get_pbn_per_slot(stream);
- if (pbn_per_slot.value == 0) {
- DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n");
- return DC_UNSUPPORTED_VALUE;
- }
- pbn = get_pbn_from_timing(pipe_ctx);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- return DC_OK;
-
-}
-
-enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- uint8_t i;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* decrease throttled vcp size */
- pbn_per_slot = get_pbn_per_slot(stream);
- pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
-
- /* notify immediate branch device table update */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true)) {
- /* update mst stream allocation table software state */
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- } else {
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- }
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* poll for immediate branch device ACT handled */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- return DC_OK;
-}
-
-enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- uint8_t i;
- enum act_return_status ret;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* notify immediate branch device table update */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true)) {
- /* update mst stream allocation table software state */
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- }
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* poll for immediate branch device ACT handled */
- ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- if (ret != ACT_LINK_LOST) {
- /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
- }
-
- /* increase throttled vcp size */
- pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
- pbn_per_slot = get_pbn_per_slot(stream);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- return DC_OK;
-}
-
-static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- int i;
- bool mst_mode = (link->type == dc_connection_mst_branch);
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- const struct dc_link_settings empty_link_settings = {0};
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* deallocate_mst_payload is called before disable link. When mode or
- * disable/enable monitor, new stream is created which is not in link
- * stream[] yet. For this, payload is not allocated yet, so de-alloc
- * should not done. For new mode set, map_resources will get engine
- * for new stream, so stream_enc->id should be validated until here.
- */
-
- /* slot X.Y */
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
-
- if (mst_mode) {
- /* when link is in mst mode, reply on mst manager to remove
- * payload
- */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- false))
-
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- } else {
- /* when link is no longer in mst mode (mst hub unplugged),
- * remove payload with default dc logic
- */
- remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc);
- }
-
- DC_LOG_MST("%s"
- "stream_count: %d: ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_DEBUG("Unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- if (mst_mode) {
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
- }
-
- return DC_OK;
-}
-
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
-{
- struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
- struct link_encoder *link_enc = NULL;
- struct cp_psp_stream_config config = {0};
- enum dp_panel_mode panel_mode =
- dp_get_panel_mode(pipe_ctx->stream->link);
-
- if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
- return;
-
- link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
- ASSERT(link_enc);
- if (link_enc == NULL)
- return;
-
- /* otg instance */
- config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
-
- /* dig front end */
- config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
-
- /* stream encoder index */
- config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- if (is_dp_128b_132b_signal(pipe_ctx))
- config.stream_enc_idx =
- pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
-
- /* dig back end */
- config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
-
- /* link encoder index */
- config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (is_dp_128b_132b_signal(pipe_ctx))
- config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
-
- /* dio output index is dpia index for DPIA endpoint & dcio index by default */
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
- else
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
-
-
- /* phy index */
- config.phy_idx = resource_transmitter_to_phy_idx(
- pipe_ctx->stream->link->dc, link_enc->transmitter);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
- config.phy_idx = 0;
-
- /* stream properties */
- config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
- config.mst_enabled = (pipe_ctx->stream->signal ==
- SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
- config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
- config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
- 1 : 0;
- config.dpms_off = dpms_off;
-
- /* dm stream context */
- config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
-
- cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
-}
-#endif
-
-static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct link_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- uint8_t req_slot_count = 0;
- uint8_t vc_id = 1; /// VC ID always 1 for SST
- struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings;
- const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- stream->link->cur_link_settings = link_settings;
-
- if (link_hwss->ext.enable_dp_link_output)
- link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res,
- stream->signal, pipe_ctx->clock_source->id,
- &link_settings);
-
-#ifdef DIAGS_BUILD
- /* Workaround for FPGA HPO capture DP link data:
- * HPO capture will set link to active mode
- * This workaround is required to get a capture from start of frame
- */
- if (!dc->debug.fpga_hpo_capture_en) {
- struct encoder_set_dp_phy_pattern_param params = {0};
- params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE;
-
- /* Set link active */
- stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern(
- stream->link->hpo_dp_link_enc,
- &params);
- }
-#endif
-
- /* Enable DP_STREAM_ENC */
- dc->hwss.enable_stream(pipe_ctx);
-
- /* Set DPS PPS SDP (AKA "info frames") */
- if (pipe_ctx->stream->timing.flags.DSC) {
- dp_set_dsc_pps_sdp(pipe_ctx, true, true);
- }
-
- /* Allocate Payload */
- if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
- // MST case
- uint8_t i;
-
- proposed_table.stream_count = state->stream_count;
- for (i = 0; i < state->stream_count; i++) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- proposed_table.stream_allocations[i].slot_count = req_slot_count;
- proposed_table.stream_allocations[i].vcp_id = i+1;
- /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
- proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
- }
- } else {
- // SST case
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- proposed_table.stream_count = 1; /// Always 1 stream for SST
- proposed_table.stream_allocations[0].slot_count = req_slot_count;
- proposed_table.stream_allocations[0].vcp_id = vc_id;
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
- }
-
- link_hwss->ext.update_stream_allocation_table(stream->link,
- &pipe_ctx->link_res,
- &proposed_table);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
-
- dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
- dc->hwss.enable_audio_stream(pipe_ctx);
-}
-
-void core_link_enable_stream(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- enum dc_status status;
- struct link_encoder *link_enc;
- enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
- struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
-
- if (is_dp_128b_132b_signal(pipe_ctx))
- vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- if (pipe_ctx->stream->sink) {
- if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
- pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
- pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
- }
- }
-
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- return;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
- && !is_dp_128b_132b_signal(pipe_ctx)) {
- if (link_enc)
- link_enc->funcs->setup(
- link_enc,
- pipe_ctx->stream->signal);
- }
-
- pipe_ctx->stream->link->link_state_valid = true;
-
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- otg_out_dest = OUT_MUX_HPO_DP;
- else
- otg_out_dest = OUT_MUX_DIO;
- pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
- }
-
- link_hwss->setup_stream_attribute(pipe_ctx);
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- bool apply_edp_fast_boot_optimization =
- pipe_ctx->stream->apply_edp_fast_boot_optimization;
-
- pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
-
- // Enable VPG before building infoframe
- if (vpg && vpg->funcs->vpg_poweron)
- vpg->funcs->vpg_poweron(vpg);
-
- resource_build_info_frame(pipe_ctx);
- dc->hwss.update_info_frame(pipe_ctx);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
-
- /* Do not touch link on seamless boot optimization. */
- if (pipe_ctx->stream->apply_seamless_boot_optimization) {
- pipe_ctx->stream->dpms_off = false;
-
- /* Still enable stream features & audio on seamless boot for DP external displays */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
- enable_stream_features(pipe_ctx);
- dc->hwss.enable_audio_stream(pipe_ctx);
- }
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
- return;
- }
-
- /* eDP lit up by bios already, no need to enable again. */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC &&
- !pipe_ctx->next_odm_pipe) {
- pipe_ctx->stream->dpms_off = false;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
- return;
- }
-
- if (pipe_ctx->stream->dpms_off)
- return;
-
- /* Have to setup DSC before DIG FE and BE are connected (which happens before the
- * link training). This is to make sure the bandwidth sent to DIG BE won't be
- * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
- * will be automatically set at a later time when the video is enabled
- * (DP_VID_STREAM_EN = 1).
- */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
-
- }
-
- status = enable_link(state, pipe_ctx);
-
- if (status != DC_OK) {
- DC_LOG_WARNING("enabling link %u failed: %d\n",
- pipe_ctx->stream->link->link_index,
- status);
-
- /* Abort stream enable *unless* the failure was due to
- * DP link training - some DP monitors will recover and
- * show the stream anyway. But MST displays can't proceed
- * without link training.
- */
- if (status != DC_FAIL_DP_LINK_TRAINING ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- if (false == stream->link->link_status.link_active)
- disable_link(stream->link, &pipe_ctx->link_res,
- pipe_ctx->stream->signal);
- BREAK_TO_DEBUGGER();
- return;
- }
- }
-
- /* turn off otg test pattern if enable */
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
-
- /* This second call is needed to reconfigure the DIG
- * as a workaround for the incorrect value being applied
- * from transmitter control.
- */
- if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
- is_dp_128b_132b_signal(pipe_ctx)))
- if (link_enc)
- link_enc->funcs->setup(
- link_enc,
- pipe_ctx->stream->signal);
-
- dc->hwss.enable_stream(pipe_ctx);
-
- /* Set DPS PPS SDP (AKA "info frames") */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal)) {
- dp_set_dsc_on_rx(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true, true);
- }
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- dc_link_allocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- is_dp_128b_132b_signal(pipe_ctx))
- dc_link_update_sst_payload(pipe_ctx, true);
-
- dc->hwss.unblank_stream(pipe_ctx,
- &pipe_ctx->stream->link->cur_link_settings);
-
- if (stream->sink_patches.delay_ignore_msa > 0)
- msleep(stream->sink_patches.delay_ignore_msa);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- enable_stream_features(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
-
- dc->hwss.enable_audio_stream(pipe_ctx);
-
- } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (is_dp_128b_132b_signal(pipe_ctx))
- fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- core_link_set_avmute(pipe_ctx, false);
- }
-}
-
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
-
- if (is_dp_128b_132b_signal(pipe_ctx))
- vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- if (pipe_ctx->stream->sink) {
- if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
- pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
- pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
- }
- }
-
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- return;
-
- if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) {
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- core_link_set_avmute(pipe_ctx, true);
- }
-
- dc->hwss.disable_audio_stream(pipe_ctx);
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, true);
-#endif
- dc->hwss.blank_stream(pipe_ctx);
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- deallocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- is_dp_128b_132b_signal(pipe_ctx))
- dc_link_update_sst_payload(pipe_ctx, false);
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- struct ext_hdmi_settings settings = {0};
- enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
-
- unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- //Need to inform that sink is going to use legacy HDMI mode.
- dal_ddc_service_write_scdc_data(
- link->ddc,
- 165000,//vbios only handles 165Mhz.
- false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
- /* DP159, Retimer settings */
- if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
- write_i2c_retimer_setting(pipe_ctx,
- false, false, &settings);
- else
- write_i2c_default_retimer_setting(pipe_ctx,
- false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
- /* PI3EQX1204, Redriver settings */
- write_i2c_redriver_setting(pipe_ctx, false);
- }
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- !is_dp_128b_132b_signal(pipe_ctx)) {
-
- /* In DP1.x SST mode, our encoder will go to TPS1
- * when link is on but stream is off.
- * Disabling link before stream will avoid exposing TPS1 pattern
- * during the disable sequence as it will confuse some receivers
- * state machine.
- * In DP2 or MST mode, our encoder will stay video active
- */
- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- dc->hwss.disable_stream(pipe_ctx);
- } else {
- dc->hwss.disable_stream(pipe_ctx);
- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
-
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, false);
- }
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
- pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
- }
-
- if (vpg && vpg->funcs->vpg_powerdown)
- vpg->funcs->vpg_powerdown(vpg);
-}
-
-void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
- return;
-
- dc->hwss.set_avmute(pipe_ctx, enable);
-}
-
-/**
- * dc_link_enable_hpd_filter:
- * If enable is true, programs HPD filter on associated HPD line using
- * delay_on_disconnect/delay_on_connect values dependent on
- * link->connector_signal
- *
- * If enable is false, programs HPD filter on associated HPD line with no
- * delays on connect or disconnect
- *
- * @link: pointer to the dc link
- * @enable: boolean specifying whether to enable hbd
- */
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
-{
- struct gpio *hpd;
-
- if (enable) {
- link->is_hpd_filter_disabled = false;
- program_hpd_filter(link);
- } else {
- link->is_hpd_filter_disabled = true;
- /* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-
- if (!hpd)
- return;
-
- /* Setup HPD filtering */
- if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
- struct gpio_hpd_config config;
-
- config.delay_on_connect = 0;
- config.delay_on_disconnect = 0;
-
- dal_irq_setup_hpd_filter(hpd, &config);
-
- dal_gpio_close(hpd);
- } else {
- ASSERT_CRITICAL(false);
- }
- /* Release HPD handle */
- dal_gpio_destroy_irq(&hpd);
- }
-}
-
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
- struct link_resource link_res;
-
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i] == link)
- break;
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- dc_link_get_cur_link_res(link, &link_res);
- dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
- struct dc_link_settings store_settings = *link_setting;
-
- link->preferred_link_setting = store_settings;
-
- /* Retrain with preferred link settings only relevant for
- * DP signal type
- * Check for non-DP signal or if passive dongle present
- */
- if (!dc_is_dp_signal(link->connector_signal) ||
- link->dongle_max_pix_clk > 0)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link) {
- if (pipe->stream->link == link) {
- link_stream = pipe->stream;
- break;
- }
- }
- }
-
- /* Stream not found */
- if (i == MAX_PIPES)
- return;
-
- /* Cannot retrain link if backend is off */
- if (link_stream->dpms_off)
- return;
-
- if (decide_link_settings(link_stream, &store_settings))
- dp_retrain_link_dp_test(link, &store_settings, false);
-}
-
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain)
-{
- if (lt_overrides != NULL)
- link->preferred_training_settings = *lt_overrides;
- else
- memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings));
-
- if (link_setting != NULL) {
- link->preferred_link_setting = *link_setting;
- if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
- /* TODO: add dc update for acquiring link res */
- skip_immediate_retrain = true;
- } else {
- link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
- link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
- }
-
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->type == dc_connection_mst_branch)
- dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
-
- /* Retrain now, or wait until next stream update to apply */
- if (skip_immediate_retrain == false)
- dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
-}
-
-void dc_link_enable_hpd(const struct dc_link *link)
-{
- dc_link_dp_enable_hpd(link);
-}
-
-void dc_link_disable_hpd(const struct dc_link *link)
-{
- dc_link_dp_disable_hpd(link);
-}
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
-}
-
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting)
-{
- uint32_t total_data_bw_efficiency_x10000 = 0;
- uint32_t link_rate_per_lane_kbps = 0;
-
- switch (dp_get_link_encoding_format(link_setting)) {
- case DP_8b_10b_ENCODING:
- /* For 8b/10b encoding:
- * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
- * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
- */
- link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
- total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
- if (dc_link_should_enable_fec(link)) {
- total_data_bw_efficiency_x10000 /= 100;
- total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
- }
- break;
- case DP_128b_132b_ENCODING:
- /* For 128b/132b encoding:
- * link rate is defined in the unit of 10mbps per lane.
- * total data bandwidth efficiency is always 96.71%.
- */
- link_rate_per_lane_kbps = link_setting->link_rate * 10000;
- total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
- break;
- default:
- break;
- }
-
- /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
- return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
-}
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link)
-{
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- return &link->preferred_link_setting;
- return &link->verified_link_cap;
-}
-
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link)
-{
- dp_overwrite_extended_receiver_cap(link);
-}
-
-bool dc_link_is_fec_supported(const struct dc_link *link)
-{
- /* TODO - use asic cap instead of link_enc->features
- * we no longer know which link enc to use for this link before commit
- */
- struct link_encoder *link_enc = NULL;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- return (dc_is_dp_signal(link->connector_signal) && link_enc &&
- link_enc->features.fec_supported &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
- !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
-}
-
-bool dc_link_should_enable_fec(const struct dc_link *link)
-{
- bool force_disable = false;
-
- if (link->fec_state == dc_link_fec_enabled)
- force_disable = false;
- else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
- link->local_sink &&
- link->local_sink->edid_caps.panel_patch.disable_fec)
- force_disable = true;
- else if (link->connector_signal == SIGNAL_TYPE_EDP
- && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
- dsc_support.DSC_SUPPORT == false
- || link->panel_config.dsc.disable_dsc_edp
- || !link->dc->caps.edp_dsc_support))
- force_disable = true;
-
- return !force_disable && dc_link_is_fec_supported(link);
-}
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (timing->flags.DSC)
- return dc_dsc_stream_bandwidth_in_kbps(timing,
- timing->dsc_cfg.bits_per_pixel,
- timing->dsc_cfg.num_slices_h,
- timing->dsc_cfg.is_dp);
-#endif /* CONFIG_DRM_AMD_DC_DCN */
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- ASSERT(bits_per_channel != 0);
- bits_per_channel = 8;
- break;
- }
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-void dc_link_get_cur_link_res(const struct dc_link *link,
- struct link_resource *link_res)
-{
- int i;
- struct pipe_ctx *pipe = NULL;
-
- memset(link_res, 0, sizeof(*link_res));
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
- if (pipe->stream->link == link) {
- *link_res = pipe->link_res;
- break;
- }
- }
- }
-
-}
-
-/**
- * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state
- * @dc: pointer to dc of the dm calling this
- * @map: a dc link resource snapshot defined internally to dc.
- *
- * DM needs to capture a snapshot of current link resource allocation mapping
- * and store it in its persistent storage.
- *
- * Some of the link resource is using first come first serve policy.
- * The allocation mapping depends on original hotplug order. This information
- * is lost after driver is loaded next time. The snapshot is used in order to
- * restore link resource to its previous state so user will get consistent
- * link capability allocation across reboot.
- *
- * Return: none (void function)
- *
- */
-void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
-{
- struct dc_link *link;
- uint32_t i;
- uint32_t hpo_dp_recycle_map = 0;
-
- *map = 0;
-
- if (dc->caps.dp_hpo) {
- for (i = 0; i < dc->caps.max_links; i++) {
- link = dc->links[i];
- if (link->link_status.link_active &&
- dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
- dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
- /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
- * but current link doesn't use it.
- */
- hpo_dp_recycle_map |= (1 << i);
- }
- *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
- }
-}
-
-/**
- * dc_restore_link_res_map() - restore link resource allocation state from a snapshot
- * @dc: pointer to dc of the dm calling this
- * @map: a dc link resource snapshot defined internally to dc.
- *
- * DM needs to call this function after initial link detection on boot and
- * before first commit streams to restore link resource allocation state
- * from previous boot session.
- *
- * Some of the link resource is using first come first serve policy.
- * The allocation mapping depends on original hotplug order. This information
- * is lost after driver is loaded next time. The snapshot is used in order to
- * restore link resource to its previous state so user will get consistent
- * link capability allocation across reboot.
- *
- * Return: none (void function)
- *
- */
-void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
-{
- struct dc_link *link;
- uint32_t i;
- unsigned int available_hpo_dp_count;
- uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
- >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
-
- if (dc->caps.dp_hpo) {
- available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
- /* remove excess 128b/132b encoding support for not recycled links */
- for (i = 0; i < dc->caps.max_links; i++) {
- if ((hpo_dp_recycle_map & (1 << i)) == 0) {
- link = dc->links[i];
- if (link->type != dc_connection_none &&
- dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
- if (available_hpo_dp_count > 0)
- available_hpo_dp_count--;
- else
- /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
- link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
- }
- }
- }
- /* remove excess 128b/132b encoding support for recycled links */
- for (i = 0; i < dc->caps.max_links; i++) {
- if ((hpo_dp_recycle_map & (1 << i)) != 0) {
- link = dc->links[i];
- if (link->type != dc_connection_none &&
- dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
- if (available_hpo_dp_count > 0)
- available_hpo_dp_count--;
- else
- /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
- link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
- }
- }
- }
- }
-}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
deleted file mode 100644
index dedd1246ce58..000000000000
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ /dev/null
@@ -1,7553 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- */
-#include "dm_services.h"
-#include "dc.h"
-#include "dc_link_dp.h"
-#include "dm_helpers.h"
-#include "opp.h"
-#include "dsc.h"
-#include "clk_mgr.h"
-#include "resource.h"
-
-#include "inc/core_types.h"
-#include "link_hwss.h"
-#include "dc_link_ddc.h"
-#include "core_status.h"
-#include "dpcd_defs.h"
-#include "dc_dmub_srv.h"
-#include "dce/dmub_hw_lock_mgr.h"
-#include "inc/dc_link_dpia.h"
-#include "inc/link_enc_cfg.h"
-#include "link/link_dp_trace.h"
-
-/*Travis*/
-static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
-/*Nutmeg*/
-static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
-
-#define DC_LOGGER \
- link->ctx->logger
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
-
-#include "link_dpcd.h"
-
-#ifndef MAX
-#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
-#endif
-#ifndef MIN
-#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-#endif
-
- /* maximum pre emphasis level allowed for each voltage swing level*/
- static const enum dc_pre_emphasis
- voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
- PRE_EMPHASIS_LEVEL2,
- PRE_EMPHASIS_LEVEL1,
- PRE_EMPHASIS_DISABLED };
-
-enum {
- POST_LT_ADJ_REQ_LIMIT = 6,
- POST_LT_ADJ_REQ_TIMEOUT = 200
-};
-
-struct dp_lt_fallback_entry {
- enum dc_lane_count lane_count;
- enum dc_link_rate link_rate;
-};
-
-static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
- /* This link training fallback array is ordered by
- * link bandwidth from highest to lowest.
- * DP specs makes it a normative policy to always
- * choose the next highest link bandwidth during
- * link training fallback.
- */
- {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
- {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
- {LANE_COUNT_TWO, LINK_RATE_UHBR20},
- {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
- {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
- {LANE_COUNT_ONE, LINK_RATE_UHBR20},
- {LANE_COUNT_TWO, LINK_RATE_UHBR10},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
- {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
- {LANE_COUNT_TWO, LINK_RATE_HIGH3},
- {LANE_COUNT_ONE, LINK_RATE_UHBR10},
- {LANE_COUNT_TWO, LINK_RATE_HIGH2},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH},
- {LANE_COUNT_ONE, LINK_RATE_HIGH3},
- {LANE_COUNT_FOUR, LINK_RATE_LOW},
- {LANE_COUNT_ONE, LINK_RATE_HIGH2},
- {LANE_COUNT_TWO, LINK_RATE_HIGH},
- {LANE_COUNT_TWO, LINK_RATE_LOW},
- {LANE_COUNT_ONE, LINK_RATE_HIGH},
- {LANE_COUNT_ONE, LINK_RATE_LOW},
-};
-
-static const struct dc_link_settings fail_safe_link_settings = {
- .lane_count = LANE_COUNT_ONE,
- .link_rate = LINK_RATE_LOW,
- .link_spread = LINK_SPREAD_DISABLED,
-};
-
-static bool decide_fallback_link_setting(
- struct dc_link *link,
- struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result);
-static void maximize_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
-static void override_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
-
-static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- union training_aux_rd_interval training_rd_interval;
- uint32_t wait_in_micro_secs = 100;
-
- memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
- }
-
- return wait_in_micro_secs;
-}
-
-static uint32_t get_eq_training_aux_rd_interval(
- struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- union training_aux_rd_interval training_rd_interval;
-
- memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- core_link_read_dpcd(
- link,
- DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- }
-
- switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
- case 0: return 400;
- case 1: return 4000;
- case 2: return 8000;
- case 3: return 12000;
- case 4: return 16000;
- case 5: return 32000;
- case 6: return 64000;
- default: return 400;
- }
-}
-
-void dp_wait_for_training_aux_rd_interval(
- struct dc_link *link,
- uint32_t wait_in_micro_secs)
-{
- if (wait_in_micro_secs > 1000)
- msleep(wait_in_micro_secs/1000);
- else
- udelay(wait_in_micro_secs);
-
- DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
- __func__,
- wait_in_micro_secs);
-}
-
-enum dpcd_training_patterns
- dc_dp_training_pattern_to_dpcd_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern)
-{
- enum dpcd_training_patterns dpcd_tr_pattern =
- DPCD_TRAINING_PATTERN_VIDEOIDLE;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
- break;
- case DP_128b_132b_TPS1:
- dpcd_tr_pattern = DPCD_128b_132b_TPS1;
- break;
- case DP_128b_132b_TPS2:
- dpcd_tr_pattern = DPCD_128b_132b_TPS2;
- break;
- case DP_128b_132b_TPS2_CDS:
- dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
- break;
- case DP_TRAINING_PATTERN_VIDEOIDLE:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
- break;
- default:
- ASSERT(0);
- DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
- __func__, pattern);
- break;
- }
-
- return dpcd_tr_pattern;
-}
-
-static void dpcd_set_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern training_pattern)
-{
- union dpcd_training_pattern dpcd_pattern = {0};
-
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(
- link, training_pattern);
-
- core_link_write_dpcd(
- link,
- DP_TRAINING_PATTERN_SET,
- &dpcd_pattern.raw,
- 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
- __func__,
- DP_TRAINING_PATTERN_SET,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-}
-
-static enum dc_dp_training_pattern decide_cr_training_pattern(
- const struct dc_link_settings *link_settings)
-{
- switch (dp_get_link_encoding_format(link_settings)) {
- case DP_8b_10b_ENCODING:
- default:
- return DP_TRAINING_PATTERN_SEQUENCE_1;
- case DP_128b_132b_ENCODING:
- return DP_128b_132b_TPS1;
- }
-}
-
-static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- struct link_encoder *link_enc;
- struct encoder_feature_support *enc_caps;
- struct dpcd_caps *rx_caps = &link->dpcd_caps;
- enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- enc_caps = &link_enc->features;
-
- switch (dp_get_link_encoding_format(link_settings)) {
- case DP_8b_10b_ENCODING:
- if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
- rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
- pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
- else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
- rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
- pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
- else
- pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- break;
- case DP_128b_132b_ENCODING:
- pattern = DP_128b_132b_TPS2;
- break;
- default:
- pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- break;
- }
- return pattern;
-}
-
-static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
-{
- uint8_t link_rate = 0;
- enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings);
-
- if (encoding == DP_128b_132b_ENCODING)
- switch (link_settings->link_rate) {
- case LINK_RATE_UHBR10:
- link_rate = 0x1;
- break;
- case LINK_RATE_UHBR20:
- link_rate = 0x2;
- break;
- case LINK_RATE_UHBR13_5:
- link_rate = 0x4;
- break;
- default:
- link_rate = 0;
- break;
- }
- else if (encoding == DP_8b_10b_ENCODING)
- link_rate = (uint8_t) link_settings->link_rate;
- else
- link_rate = 0;
-
- return link_rate;
-}
-
-static void dp_fixed_vs_pe_read_lane_adjust(
- struct dc_link *link,
- union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
-{
- const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
- const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint32_t vendor_lttpr_read_address = 0xF0053;
- uint8_t dprx_vs = 0;
- uint8_t dprx_pe = 0;
- uint8_t lane;
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- vendor_lttpr_read_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
- /* W/A to read lane settings requested by DPRX */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_vs,
- 1);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_pe,
- 1);
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3;
- }
-}
-
-static void dp_fixed_vs_pe_set_retimer_lane_settings(
- struct dc_link *link,
- const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
- uint8_t lane_count)
-{
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint8_t lane = 0;
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Force LTTPR to output desired VS and PE */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-}
-
-enum dc_status dpcd_set_link_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings)
-{
- uint8_t rate;
- enum dc_status status;
-
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
-
- downspread.raw = (uint8_t)
- (lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- lt_settings->link_settings.use_link_rate_set == true) {
- rate = 0;
- /* WA for some MUX chips that will power down with eDP and lose supported
- * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure
- * MUX chip gets link rate set back before link training.
- */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- uint8_t supported_link_rates[16];
-
- core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
- supported_link_rates, sizeof(supported_link_rates));
- }
- status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
- status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &lt_settings->link_settings.link_rate_set, 1);
- } else {
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
- }
-
- if (rate) {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_RATE_SET,
- lt_settings->link_settings.link_rate_set,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
- }
-
- return status;
-}
-
-uint8_t dc_dp_initialize_scrambling_data_symbols(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern)
-{
- uint8_t disable_scrabled_data_symbols = 0;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- disable_scrabled_data_symbols = 1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- case DP_128b_132b_TPS1:
- case DP_128b_132b_TPS2:
- disable_scrabled_data_symbols = 0;
- break;
- default:
- ASSERT(0);
- DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
- __func__, pattern);
- break;
- }
- return disable_scrabled_data_symbols;
-}
-
-static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
-{
- return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
-}
-
-static void dpcd_set_lt_pattern_and_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings,
- enum dc_dp_training_pattern pattern,
- uint32_t offset)
-{
- uint32_t dpcd_base_lt_offset;
-
- uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = {0};
- uint32_t size_in_bytes;
- bool edp_workaround = false; /* TODO link_prop.INTERNAL */
- dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
-
- if (is_repeater(lt_settings, offset))
- dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /*****************************************************************
- * DpcdAddress_TrainingPatternSet
- *****************************************************************/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
-
- dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
- dc_dp_initialize_scrambling_data_symbols(link, pattern);
-
- dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
- = dpcd_pattern.raw;
-
- if (is_repeater(lt_settings, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
- __func__,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
- }
-
- /* concatenate everything into one buffer*/
- size_in_bytes = lt_settings->link_settings.lane_count *
- sizeof(lt_settings->dpcd_lane_settings[0]);
-
- // 0x00103 - 0x00102
- memmove(
- &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
- lt_settings->dpcd_lane_settings,
- size_in_bytes);
-
- if (is_repeater(lt_settings, offset)) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- } else {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- }
- if (edp_workaround) {
- /* for eDP write in 2 parts because the 5-byte burst is
- * causing issues on some eDP panels (EPR#366724)
- */
- core_link_write_dpcd(
- link,
- DP_TRAINING_PATTERN_SET,
- &dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw));
-
- core_link_write_dpcd(
- link,
- DP_TRAINING_LANE0_SET,
- (uint8_t *)(lt_settings->dpcd_lane_settings),
- size_in_bytes);
-
- } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- dpcd_lt_buffer,
- sizeof(dpcd_lt_buffer));
- } else
- /* write it all in (1 + number-of-lanes)-byte burst*/
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- dpcd_lt_buffer,
- size_in_bytes + sizeof(dpcd_pattern.raw));
-}
-
-bool dp_is_cr_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- uint32_t lane;
- /*LANEx_CR_DONE bits All 1's?*/
- for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
- if (!dpcd_lane_status[lane].bits.CR_DONE_0)
- return false;
- }
- return true;
-}
-
-bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- bool done = true;
- uint32_t lane;
- for (lane = 0; lane < (uint32_t)(ln_count); lane++)
- if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
- done = false;
- return done;
-}
-
-bool dp_is_symbol_locked(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- bool locked = true;
- uint32_t lane;
- for (lane = 0; lane < (uint32_t)(ln_count); lane++)
- if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0)
- locked = false;
- return locked;
-}
-
-bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
-{
- return align_status.bits.INTERLANE_ALIGN_DONE == 1;
-}
-
-void dp_hw_to_dpcd_lane_settings(
- const struct link_training_settings *lt_settings,
- const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[])
-{
- uint8_t lane = 0;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) {
- dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
- dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
- (hw_lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (hw_lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
- hw_lane_settings[lane].FFE_PRESET.settings.level;
- }
- }
-}
-
-void dp_decide_lane_settings(
- const struct link_training_settings *lt_settings,
- const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[])
-{
- uint32_t lane;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) {
- hw_lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(ln_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- hw_lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(ln_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
- }
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- hw_lane_settings[lane].FFE_PRESET.raw =
- ln_adjust[lane].tx_ffe.PRESET_VALUE;
- }
- }
- dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
-
- if (lt_settings->disallow_per_lane_settings) {
- /* we find the maximum of the requested settings across all lanes*/
- /* and set this maximum for all lanes*/
- maximize_lane_settings(lt_settings, hw_lane_settings);
- override_lane_settings(lt_settings, hw_lane_settings);
-
- if (lt_settings->always_match_dpcd_with_hw_lane_settings)
- dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
- }
-
-}
-
-static uint8_t get_nibble_at_index(const uint8_t *buf,
- uint32_t index)
-{
- uint8_t nibble;
- nibble = buf[index / 2];
-
- if (index % 2)
- nibble >>= 4;
- else
- nibble &= 0x0F;
-
- return nibble;
-}
-
-static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
- enum dc_voltage_swing voltage)
-{
- enum dc_pre_emphasis pre_emphasis;
- pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
-
- if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
- pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
-
- return pre_emphasis;
-
-}
-
-static void maximize_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
-{
- uint32_t lane;
- struct dc_lane_settings max_requested;
-
- max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
- max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
- max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
-
- /* Determine what the maximum of the requested settings are*/
- for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
- if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
- max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
-
- if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
- max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
- if (lane_settings[lane].FFE_PRESET.settings.level >
- max_requested.FFE_PRESET.settings.level)
- max_requested.FFE_PRESET.settings.level =
- lane_settings[lane].FFE_PRESET.settings.level;
- }
-
- /* make sure the requested settings are
- * not higher than maximum settings*/
- if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
- max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
-
- if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
- max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
- max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
-
- /* make sure the pre-emphasis matches the voltage swing*/
- if (max_requested.PRE_EMPHASIS >
- get_max_pre_emphasis_for_voltage_swing(
- max_requested.VOLTAGE_SWING))
- max_requested.PRE_EMPHASIS =
- get_max_pre_emphasis_for_voltage_swing(
- max_requested.VOLTAGE_SWING);
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
- lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
- lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
- }
-}
-
-static void override_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
-{
- uint32_t lane;
-
- if (lt_settings->voltage_swing == NULL &&
- lt_settings->pre_emphasis == NULL &&
- lt_settings->ffe_preset == NULL &&
- lt_settings->post_cursor2 == NULL)
-
- return;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (lt_settings->voltage_swing)
- lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
- if (lt_settings->pre_emphasis)
- lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
- if (lt_settings->post_cursor2)
- lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
- if (lt_settings->ffe_preset)
- lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
- }
-}
-
-enum dc_status dp_get_lane_status_and_lane_adjust(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- union lane_status ln_status[LANE_COUNT_DP_MAX],
- union lane_align_status_updated *ln_align,
- union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- uint32_t offset)
-{
- unsigned int lane01_status_address = DP_LANE0_1_STATUS;
- uint8_t lane_adjust_offset = 4;
- unsigned int lane01_adjust_address;
- uint8_t dpcd_buf[6] = {0};
- uint32_t lane;
- enum dc_status status;
-
- if (is_repeater(link_training_setting, offset)) {
- lane01_status_address =
- DP_LANE0_1_STATUS_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- lane_adjust_offset = 3;
- }
-
- status = core_link_read_dpcd(
- link,
- lane01_status_address,
- (uint8_t *)(dpcd_buf),
- sizeof(dpcd_buf));
-
- if (status != DC_OK) {
- DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X,"
- " keep current lane status and lane adjust unchanged",
- __func__,
- lane01_status_address);
- return status;
- }
-
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->link_settings.lane_count);
- lane++) {
-
- ln_status[lane].raw =
- get_nibble_at_index(&dpcd_buf[0], lane);
- ln_adjust[lane].raw =
- get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
- }
-
- ln_align->raw = dpcd_buf[2];
-
- if (is_repeater(link_training_setting, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
- __func__,
- offset,
- lane01_status_address, dpcd_buf[0],
- lane01_status_address + 1, dpcd_buf[1]);
-
- lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
- __func__,
- offset,
- lane01_adjust_address,
- dpcd_buf[lane_adjust_offset],
- lane01_adjust_address + 1,
- dpcd_buf[lane_adjust_offset + 1]);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
- __func__,
- lane01_status_address, dpcd_buf[0],
- lane01_status_address + 1, dpcd_buf[1]);
-
- lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
-
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
- __func__,
- lane01_adjust_address,
- dpcd_buf[lane_adjust_offset],
- lane01_adjust_address + 1,
- dpcd_buf[lane_adjust_offset + 1]);
- }
-
- return status;
-}
-
-static enum dc_status dpcd_128b_132b_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting)
-{
- enum dc_status status = core_link_write_dpcd(link,
- DP_TRAINING_LANE0_SET,
- (uint8_t *)(link_training_setting->dpcd_lane_settings),
- sizeof(link_training_setting->dpcd_lane_settings));
-
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- return status;
-}
-
-
-enum dc_status dpcd_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- uint32_t offset)
-{
- unsigned int lane0_set_address;
- enum dc_status status;
-
- lane0_set_address = DP_TRAINING_LANE0_SET;
-
- if (is_repeater(link_training_setting, offset))
- lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- status = core_link_write_dpcd(link,
- lane0_set_address,
- (uint8_t *)(link_training_setting->dpcd_lane_settings),
- link_training_setting->link_settings.lane_count);
-
- if (is_repeater(link_training_setting, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
- " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- }
-
- return status;
-}
-
-bool dp_is_max_vs_reached(
- const struct link_training_settings *lt_settings)
-{
- uint32_t lane;
- for (lane = 0; lane <
- (uint32_t)(lt_settings->link_settings.lane_count);
- lane++) {
- if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
- == VOLTAGE_SWING_MAX_LEVEL)
- return true;
- }
- return false;
-
-}
-
-static bool perform_post_lt_adj_req_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum dc_lane_count lane_count =
- lt_settings->link_settings.lane_count;
-
- uint32_t adj_req_count;
- uint32_t adj_req_timer;
- bool req_drv_setting_changed;
- uint32_t lane;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- req_drv_setting_changed = false;
- for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
- adj_req_count++) {
-
- req_drv_setting_changed = false;
-
- for (adj_req_timer = 0;
- adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
- adj_req_timer++) {
-
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- DPRX);
-
- if (dpcd_lane_status_updated.bits.
- POST_LT_ADJ_REQ_IN_PROGRESS == 0)
- return true;
-
- if (!dp_is_cr_done(lane_count, dpcd_lane_status))
- return false;
-
- if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) ||
- !dp_is_symbol_locked(lane_count, dpcd_lane_status) ||
- !dp_is_interlane_aligned(dpcd_lane_status_updated))
- return false;
-
- for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
-
- if (lt_settings->
- dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
-
- req_drv_setting_changed = true;
- break;
- }
- }
-
- if (req_drv_setting_changed) {
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- dc_link_dp_set_drive_settings(link,
- link_res,
- lt_settings);
- break;
- }
-
- msleep(1);
- }
-
- if (!req_drv_setting_changed) {
- DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
- __func__);
-
- ASSERT(0);
- return true;
- }
- }
- DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
- __func__);
-
- ASSERT(0);
- return true;
-
-}
-
-/* Only used for channel equalization */
-uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
-{
- unsigned int aux_rd_interval_us = 400;
-
- switch (dpcd_aux_read_interval) {
- case 0x01:
- aux_rd_interval_us = 4000;
- break;
- case 0x02:
- aux_rd_interval_us = 8000;
- break;
- case 0x03:
- aux_rd_interval_us = 12000;
- break;
- case 0x04:
- aux_rd_interval_us = 16000;
- break;
- case 0x05:
- aux_rd_interval_us = 32000;
- break;
- case 0x06:
- aux_rd_interval_us = 64000;
- break;
- default:
- break;
- }
-
- return aux_rd_interval_us;
-}
-
-enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- enum link_training_result result = LINK_TRAINING_SUCCESS;
-
- if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE0;
- else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE1;
- else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE23;
- else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE23;
- return result;
-}
-
-static enum link_training_result perform_channel_equalization_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
- tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, offset);
- else
- dpcd_set_lane_settings(link, lt_settings, offset);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- if (is_repeater(lt_settings, offset))
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink*/
-
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- offset);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status))
- return dpcd_lane_status[0].bits.CR_DONE_0 ?
- LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
- LINK_TRAINING_EQ_FAIL_CR;
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated))
- return LINK_TRAINING_SUCCESS;
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
-
- return LINK_TRAINING_EQ_FAIL_EQ;
-
-}
-
-static void start_clock_recovery_pattern_early(struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
- __func__);
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
- dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
- udelay(400);
-}
-
-static enum link_training_result perform_clock_recovery_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
-
- /* najeeb - The synaptics MST hub can put the LT in
- * infinite loop by switching the VS
- */
- /* between level 0 and level 1 continuously, here
- * we try for CR lock for LinkTrainingMaxCRRetry count*/
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings*/
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- offset);
-
- /* 2. update DPCD of the receiver*/
- if (!retry_count)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.*/
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- offset);
- else
- dpcd_set_lane_settings(
- link,
- lt_settings,
- offset);
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- offset);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status))
- return LINK_TRAINING_SUCCESS;
-
- /* 6. max VS reached*/
- if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) &&
- dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings*/
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient*/
- if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
- dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- return dp_get_cr_failure(lane_count, dpcd_lane_status);
-}
-
-static inline enum link_training_result dp_transition_to_video_idle(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- enum link_training_result status)
-{
- union lane_count_set lane_count_set = {0};
-
- /* 4. mainlink output idle pattern*/
- dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- /*
- * 5. post training adjust if required
- * If the upstream DPTX and downstream DPRX both support TPS4,
- * TPS4 must be used instead of POST_LT_ADJ_REQ.
- */
- if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
- /* delay 5ms after Main Link output idle pattern and then check
- * DPCD 0202h.
- */
- if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
- msleep(5);
- status = dp_check_link_loss_status(link, lt_settings);
- }
- return status;
- }
-
- if (status == LINK_TRAINING_SUCCESS &&
- perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
- status = LINK_TRAINING_LQA_FAIL;
-
- lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
- core_link_write_dpcd(
- link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
-
- return status;
-}
-
-enum link_training_result dp_check_link_loss_status(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- union lane_status lane_status;
- uint8_t dpcd_buf[6] = {0};
- uint32_t lane;
-
- core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- (uint8_t *)(dpcd_buf),
- sizeof(dpcd_buf));
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /*
- * check lanes status
- */
- lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- status = LINK_TRAINING_LINK_LOSS;
- break;
- }
- }
-
- return status;
-}
-
-static inline void decide_8b_10b_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- struct link_training_settings *lt_settings)
-{
- memset(lt_settings, '\0', sizeof(struct link_training_settings));
-
- /* Initialize link settings */
- lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
- lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
- lt_settings->link_settings.link_rate = link_setting->link_rate;
- lt_settings->link_settings.lane_count = link_setting->lane_count;
- /* TODO hard coded to SS for now
- * lt_settings.link_settings.link_spread =
- * dal_display_path_is_ss_supported(
- * path_mode->display_path) ?
- * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
- * LINK_SPREAD_DISABLED;
- */
- lt_settings->link_settings.link_spread = link->dp_ss_off ?
- LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
- lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
- lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
- lt_settings->enhanced_framing = 1;
- lt_settings->should_set_fec_ready = true;
- lt_settings->disallow_per_lane_settings = true;
- lt_settings->always_match_dpcd_with_hw_lane_settings = true;
- lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
- dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-}
-
-static inline void decide_128b_132b_training_settings(struct dc_link *link,
- const struct dc_link_settings *link_settings,
- struct link_training_settings *lt_settings)
-{
- memset(lt_settings, 0, sizeof(*lt_settings));
-
- lt_settings->link_settings = *link_settings;
- /* TODO: should decide link spread when populating link_settings */
- lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
- LINK_SPREAD_05_DOWNSPREAD_30KHZ;
-
- lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
- lt_settings->eq_pattern_time = 2500;
- lt_settings->eq_wait_time_limit = 400000;
- lt_settings->eq_loop_count_limit = 20;
- lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
- lt_settings->cds_pattern_time = 2500;
- lt_settings->cds_wait_time_limit = (dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->disallow_per_lane_settings = true;
- lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-}
-
-void dp_decide_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_settings,
- struct link_training_settings *lt_settings)
-{
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- decide_8b_10b_training_settings(link, link_settings, lt_settings);
- else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
- decide_128b_132b_training_settings(link, link_settings, lt_settings);
-}
-
-static void override_training_settings(
- struct dc_link *link,
- const struct dc_link_training_overrides *overrides,
- struct link_training_settings *lt_settings)
-{
- uint32_t lane;
-
- /* Override link spread */
- if (!link->dp_ss_off && overrides->downspread != NULL)
- lt_settings->link_settings.link_spread = *overrides->downspread ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ
- : LINK_SPREAD_DISABLED;
-
- /* Override lane settings */
- if (overrides->voltage_swing != NULL)
- lt_settings->voltage_swing = overrides->voltage_swing;
- if (overrides->pre_emphasis != NULL)
- lt_settings->pre_emphasis = overrides->pre_emphasis;
- if (overrides->post_cursor2 != NULL)
- lt_settings->post_cursor2 = overrides->post_cursor2;
- if (overrides->ffe_preset != NULL)
- lt_settings->ffe_preset = overrides->ffe_preset;
- /* Override HW lane settings with BIOS forced values if present */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
- lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
- lt_settings->always_match_dpcd_with_hw_lane_settings = false;
- }
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
- lt_settings->voltage_swing != NULL ?
- *lt_settings->voltage_swing :
- VOLTAGE_SWING_LEVEL0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
- lt_settings->pre_emphasis != NULL ?
- *lt_settings->pre_emphasis
- : PRE_EMPHASIS_DISABLED;
- lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
- lt_settings->post_cursor2 != NULL ?
- *lt_settings->post_cursor2
- : POST_CURSOR2_DISABLED;
- }
-
- if (lt_settings->always_match_dpcd_with_hw_lane_settings)
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- /* Initialize training timings */
- if (overrides->cr_pattern_time != NULL)
- lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
-
- if (overrides->eq_pattern_time != NULL)
- lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
-
- if (overrides->pattern_for_cr != NULL)
- lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
- if (overrides->pattern_for_eq != NULL)
- lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
-
- if (overrides->enhanced_framing != NULL)
- lt_settings->enhanced_framing = *overrides->enhanced_framing;
-
- if (link->preferred_training_settings.fec_enable != NULL)
- lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
-
- #if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
- lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
-#endif
- dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
-}
-
-uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
-{
- switch (lttpr_repeater_count) {
- case 0x80: // 1 lttpr repeater
- return 1;
- case 0x40: // 2 lttpr repeaters
- return 2;
- case 0x20: // 3 lttpr repeaters
- return 3;
- case 0x10: // 4 lttpr repeaters
- return 4;
- case 0x08: // 5 lttpr repeaters
- return 5;
- case 0x04: // 6 lttpr repeaters
- return 6;
- case 0x02: // 7 lttpr repeaters
- return 7;
- case 0x01: // 8 lttpr repeaters
- return 8;
- default:
- break;
- }
- return 0; // invalid value
-}
-
-static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
-{
- uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
-
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- return core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-}
-
-static enum dc_status configure_lttpr_mode_non_transparent(
- struct dc_link *link,
- const struct link_training_settings *lt_settings)
-{
- /* aux timeout is already set to extended */
- /* RESET/SET lttpr mode to enable non transparent mode */
- uint8_t repeater_cnt;
- uint32_t aux_interval_address;
- uint8_t repeater_id;
- enum dc_status result = DC_ERROR_UNEXPECTED;
- uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
-
- enum dp_link_encoding encoding = dp_get_link_encoding_format(&lt_settings->link_settings);
-
- if (encoding == DP_8b_10b_ENCODING) {
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- result = core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-
- }
-
- if (result == DC_OK) {
- link->dpcd_caps.lttpr_caps.mode = repeater_mode;
- }
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
-
- repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- result = core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-
- if (result == DC_OK) {
- link->dpcd_caps.lttpr_caps.mode = repeater_mode;
- }
-
- if (encoding == DP_8b_10b_ENCODING) {
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- /* Driver does not need to train the first hop. Skip DPCD read and clear
- * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
-
- for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
- aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
- core_link_read_dpcd(
- link,
- aux_interval_address,
- (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
- sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
- link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
- }
- }
- }
-
- return result;
-}
-
-static void repeater_training_done(struct dc_link *link, uint32_t offset)
-{
- union dpcd_training_pattern dpcd_pattern = {0};
-
- const uint32_t dpcd_base_lt_offset =
- DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- /* Set training not in progress*/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
-
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- &dpcd_pattern.raw,
- 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-}
-
-static void print_status_message(
- struct dc_link *link,
- const struct link_training_settings *lt_settings,
- enum link_training_result status)
-{
- char *link_rate = "Unknown";
- char *lt_result = "Unknown";
- char *lt_spread = "Disabled";
-
- switch (lt_settings->link_settings.link_rate) {
- case LINK_RATE_LOW:
- link_rate = "RBR";
- break;
- case LINK_RATE_RATE_2:
- link_rate = "R2";
- break;
- case LINK_RATE_RATE_3:
- link_rate = "R3";
- break;
- case LINK_RATE_HIGH:
- link_rate = "HBR";
- break;
- case LINK_RATE_RBR2:
- link_rate = "RBR2";
- break;
- case LINK_RATE_RATE_6:
- link_rate = "R6";
- break;
- case LINK_RATE_HIGH2:
- link_rate = "HBR2";
- break;
- case LINK_RATE_HIGH3:
- link_rate = "HBR3";
- break;
- case LINK_RATE_UHBR10:
- link_rate = "UHBR10";
- break;
- case LINK_RATE_UHBR13_5:
- link_rate = "UHBR13.5";
- break;
- case LINK_RATE_UHBR20:
- link_rate = "UHBR20";
- break;
- default:
- break;
- }
-
- switch (status) {
- case LINK_TRAINING_SUCCESS:
- lt_result = "pass";
- break;
- case LINK_TRAINING_CR_FAIL_LANE0:
- lt_result = "CR failed lane0";
- break;
- case LINK_TRAINING_CR_FAIL_LANE1:
- lt_result = "CR failed lane1";
- break;
- case LINK_TRAINING_CR_FAIL_LANE23:
- lt_result = "CR failed lane23";
- break;
- case LINK_TRAINING_EQ_FAIL_CR:
- lt_result = "CR failed in EQ";
- break;
- case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
- lt_result = "CR failed in EQ partially";
- break;
- case LINK_TRAINING_EQ_FAIL_EQ:
- lt_result = "EQ failed";
- break;
- case LINK_TRAINING_LQA_FAIL:
- lt_result = "LQA failed";
- break;
- case LINK_TRAINING_LINK_LOSS:
- lt_result = "Link loss";
- break;
- case DP_128b_132b_LT_FAILED:
- lt_result = "LT_FAILED received";
- break;
- case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
- lt_result = "max loop count reached";
- break;
- case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
- lt_result = "channel EQ timeout";
- break;
- case DP_128b_132b_CDS_DONE_TIMEOUT:
- lt_result = "CDS timeout";
- break;
- default:
- break;
- }
-
- switch (lt_settings->link_settings.link_spread) {
- case LINK_SPREAD_DISABLED:
- lt_spread = "Disabled";
- break;
- case LINK_SPREAD_05_DOWNSPREAD_30KHZ:
- lt_spread = "0.5% 30KHz";
- break;
- case LINK_SPREAD_05_DOWNSPREAD_33KHZ:
- lt_spread = "0.5% 33KHz";
- break;
- default:
- break;
- }
-
- /* Connectivity log: link training */
-
- /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
-
- CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
- link_rate,
- lt_settings->link_settings.lane_count,
- lt_result,
- lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
- lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
- lt_spread);
-}
-
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- /* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
-
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- /* Notify DP sink the PHY settings from source */
- dpcd_set_lane_settings(link, lt_settings, DPRX);
-}
-
-bool dc_link_dp_perform_link_training_skip_aux(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting)
-{
- struct link_training_settings lt_settings = {0};
-
- dp_decide_training_settings(
- link,
- link_setting,
- &lt_settings);
- override_training_settings(
- link,
- &link->preferred_training_settings,
- &lt_settings);
-
- /* 1. Perform_clock_recovery_sequence. */
-
- /* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
-
- /* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
-
- /* wait receiver to lock-on*/
- dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
-
- /* 2. Perform_channel_equalization_sequence. */
-
- /* transmit training pattern for channel equalization. */
- dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
-
- /* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
-
- /* wait receiver to lock-on. */
- dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
-
- /* 3. Perform_link_training_int. */
-
- /* Mainlink output idle pattern. */
- dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- print_status_message(link, &lt_settings, LINK_TRAINING_SUCCESS);
-
- return true;
-}
-
-enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
-{
- enum dc_status status = DC_OK;
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
- status = configure_lttpr_mode_transparent(link);
-
- else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- status = configure_lttpr_mode_non_transparent(link, lt_settings);
-
- return status;
-}
-
-static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
-{
- uint8_t sink_status = 0;
- uint8_t i;
-
- /* clear training pattern set */
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
-
- if (encoding == DP_128b_132b_ENCODING) {
- /* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
- if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
- (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
- break;
- udelay(1000);
- }
- }
-}
-
-enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
- struct link_training_settings *lt_settings)
-{
- enum dp_link_encoding encoding =
- dp_get_link_encoding_format(
- &lt_settings->link_settings);
- enum dc_status status;
-
- status = core_link_write_dpcd(
- link,
- DP_MAIN_LINK_CHANNEL_CODING_SET,
- (uint8_t *) &encoding,
- 1);
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
- __func__,
- DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
-
- return status;
-}
-
-static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
- uint32_t *interval_in_us)
-{
- union dp_128b_132b_training_aux_rd_interval dpcd_interval;
- uint32_t interval_unit = 0;
-
- dpcd_interval.raw = 0;
- core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
- &dpcd_interval.raw, sizeof(dpcd_interval.raw));
- interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
- /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
- * INTERVAL_UNIT. The maximum is 256 ms
- */
- *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
-}
-
-static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- uint8_t loop_count;
- uint32_t aux_rd_interval = 0;
- uint32_t wait_time = 0;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- enum dc_status status = DC_OK;
- enum link_training_result result = LINK_TRAINING_SUCCESS;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Transmit 128b/132b_TPS1 over Main-Link */
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
- /* Set TRAINING_PATTERN_SET to 01h */
- dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
-
- /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
- dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
- dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
-
- /* Set loop counter to start from 1 */
- loop_count = 1;
-
- /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
- dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
- lt_settings->pattern_for_eq, DPRX);
-
- /* poll for channel EQ done */
- while (result == LINK_TRAINING_SUCCESS) {
- dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
- wait_time += aux_rd_interval;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
- dpcd_lane_status)) {
- /* pass */
- break;
- } else if (loop_count >= lt_settings->eq_loop_count_limit) {
- result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else {
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_128b_132b_set_lane_settings(link, lt_settings);
- }
- loop_count++;
- }
-
- /* poll for EQ interlane align done */
- while (result == LINK_TRAINING_SUCCESS) {
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
- /* pass */
- break;
- } else if (wait_time >= lt_settings->eq_wait_time_limit) {
- result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else {
- dp_wait_for_training_aux_rd_interval(link,
- lt_settings->eq_pattern_time);
- wait_time += lt_settings->eq_pattern_time;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- }
- }
-
- return result;
-}
-
-static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- /* Assumption: assume hardware has transmitted eq pattern */
- enum dc_status status = DC_OK;
- enum link_training_result result = LINK_TRAINING_SUCCESS;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- uint32_t wait_time = 0;
-
- /* initiate CDS done sequence */
- dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
-
- /* poll for CDS interlane align done and symbol lock */
- while (result == LINK_TRAINING_SUCCESS) {
- dp_wait_for_training_aux_rd_interval(link,
- lt_settings->cds_pattern_time);
- wait_time += lt_settings->cds_pattern_time;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
- dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
- /* pass */
- break;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else if (wait_time >= lt_settings->cds_wait_time_limit) {
- result = DP_128b_132b_CDS_DONE_TIMEOUT;
- }
- }
-
- return result;
-}
-
-static enum link_training_result dp_perform_8b_10b_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
-
- uint8_t repeater_cnt;
- uint8_t repeater_id;
- uint8_t lane = 0;
-
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
-
- /* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
- repeater_id--) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS) {
- repeater_training_done(link, repeater_id);
- break;
- }
-
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- repeater_id);
-
- repeater_training_done(link, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS)
- break;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->dpcd_lane_settings[lane].raw = 0;
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
- }
- }
- }
-
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- DPRX);
- }
- }
-
- return status;
-}
-
-static enum link_training_result dp_perform_128b_132b_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result result = LINK_TRAINING_SUCCESS;
-
- /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
- if (link->dc->debug.legacy_dp2_lt) {
- struct link_training_settings legacy_settings;
-
- decide_8b_10b_training_settings(link,
- &lt_settings->link_settings,
- &legacy_settings);
- return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
- }
-
- dpcd_set_link_settings(link, lt_settings);
-
- if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
-
- if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
-
- return result;
-}
-
-static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- uint8_t toggle_rate = 0x6;
- uint8_t target_rate = 0x6;
- bool apply_toggle_rate_wa = false;
- uint8_t repeater_cnt;
- uint8_t repeater_id;
-
- /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */
- if (lt_settings->cr_pattern_time < 16000)
- lt_settings->cr_pattern_time = 16000;
-
- /* Fixed VS/PE specific: Toggle link rate */
- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
- target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
- toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
-
- if (apply_toggle_rate_wa)
- lt_settings->link_settings.link_rate = toggle_rate;
-
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
-
- /* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
-
- /* Fixed VS/PE specific: Toggle link rate back*/
- if (apply_toggle_rate_wa) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &target_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = target_rate;
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
- repeater_id--) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS) {
- repeater_training_done(link, repeater_id);
- break;
- }
-
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- repeater_id);
-
- repeater_training_done(link, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS)
- break;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->dpcd_lane_settings[lane].raw = 0;
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
- }
- }
- }
-
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- DPRX);
- }
- }
-
- return status;
-}
-
-static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t vendor_lttpr_write_address = 0xF004F;
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
- uint8_t toggle_rate;
- uint8_t rate;
-
- /* Only 8b/10b is supported */
- ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
- return status;
- }
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
- }
-
- /* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- /* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
-
- /* 1. set link rate, lane count and spread. */
-
- downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = rate;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
-
- /* 2. Perform link training */
-
- /* Perform Clock Recovery Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- const uint8_t max_vendor_dpcd_retries = 10;
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
- uint8_t i = 0;
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings */
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- 0);
-
- /* 2. update DPCD of the receiver */
- if (!retry_count) {
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.
- */
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- 0);
- /* Vendor specific: Disable intercept */
- for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
- break;
-
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
- }
- } else {
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- dpcd_set_lane_settings(
- link,
- lt_settings,
- 0);
- }
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 6. max VS reached*/
- if (dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings */
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient
- */
- if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- status = dp_get_cr_failure(lane_count, dpcd_lane_status);
- }
-
- /* Perform Channel EQ Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
- status = LINK_TRAINING_EQ_FAIL_EQ;
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, 0);
- else
- dpcd_set_lane_settings(link, lt_settings, 0);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_EQ_FAIL_CR;
- break;
- }
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
- }
-
- return status;
-}
-
-
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings,
- bool skip_video_pattern)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings = {0};
- enum dp_link_encoding encoding =
- dp_get_link_encoding_format(link_settings);
-
- /* decide training settings */
- dp_decide_training_settings(
- link,
- link_settings,
- &lt_settings);
-
- override_training_settings(
- link,
- &link->preferred_training_settings,
- &lt_settings);
-
- /* reset previous training states */
- dpcd_exit_training_mode(link, encoding);
-
- /* configure link prior to entering training mode */
- dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
- dpcd_configure_channel_coding(link, &lt_settings);
-
- /* enter training mode:
- * Per DP specs starting from here, DPTX device shall not issue
- * Non-LT AUX transactions inside training mode.
- */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
- status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
- else if (encoding == DP_8b_10b_ENCODING)
- status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
- else if (encoding == DP_128b_132b_ENCODING)
- status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
- else
- ASSERT(0);
-
- /* exit training mode */
- dpcd_exit_training_mode(link, encoding);
-
- /* switch to video idle */
- if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
- status = dp_transition_to_video_idle(link,
- link_res,
- &lt_settings,
- status);
-
- /* dump debug data */
- print_status_message(link, &lt_settings, status);
- if (status != LINK_TRAINING_SUCCESS)
- link->ctx->dc->debug_data.ltFailCount++;
- return status;
-}
-
-bool perform_link_training_with_retries(
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern,
- int attempts,
- struct pipe_ctx *pipe_ctx,
- enum signal_type signal,
- bool do_fallback)
-{
- int j;
- uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
- enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings cur_link_settings = *link_setting;
- struct dc_link_settings max_link_settings = *link_setting;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- int fail_count = 0;
- bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
- bool is_link_bw_min = /* RBR x 1 */
- (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE);
-
- dp_trace_commit_lt_init(link);
-
- if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
- /* We need to do this before the link training to ensure the idle
- * pattern in SST mode will be sent right after the link training
- */
- link_hwss->setup_stream_encoder(pipe_ctx);
-
- dp_trace_set_lt_start_timestamp(link, false);
- j = 0;
- while (j < attempts && fail_count < (attempts * 10)) {
-
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n",
- __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
- cur_link_settings.lane_count);
-
- dp_enable_link_phy(
- link,
- &pipe_ctx->link_res,
- signal,
- pipe_ctx->clock_source->id,
- &cur_link_settings);
-
- if (stream->sink_patches.dppowerup_delay > 0) {
- int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
-
- msleep(delay_dp_power_up_in_ms);
- }
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr)
- /* ASSR is bound to fail with unsigned PSP
- * verstage used during devlopment phase.
- * Report and continue with eDP panel mode to
- * perform eDP link training with right settings
- */
- cp_psp->funcs.enable_assr(cp_psp->handle, link);
- }
-#endif
-
- dp_set_panel_mode(link, panel_mode);
-
- if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
- return true;
- } else {
- /** @todo Consolidate USB4 DP and DPx.x training. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dc_link_dpia_perform_link_training(link,
- &pipe_ctx->link_res,
- &cur_link_settings,
- skip_video_pattern);
-
- /* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
- /* Update verified link settings to current one
- * Because DPIA LT might fallback to lower link setting.
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
- link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
- dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
- }
- }
- } else {
- status = dc_link_dp_perform_link_training(link,
- &pipe_ctx->link_res,
- &cur_link_settings,
- skip_video_pattern);
- }
-
- dp_trace_lt_total_count_increment(link, false);
- dp_trace_lt_result_update(link, status, false);
- dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
- return true;
- }
-
- fail_count++;
- dp_trace_lt_fail_count_update(link, fail_count, false);
- if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
- /* latest link training still fail or link training is aborted
- * skip delay and keep PHY on
- */
- if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
- break;
- }
-
- DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n",
- __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
- cur_link_settings.lane_count, status);
-
- dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
-
- /* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT) {
- enum dc_connection_type type = dc_connection_none;
-
- dc_link_detect_sink(link, &type);
- if (type == dc_connection_none) {
- DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
- break;
- }
- }
-
- /* Try to train again at original settings if:
- * - not falling back between training attempts;
- * - aborted previous attempt due to reasons other than sink unplug;
- * - successfully trained but at a link rate lower than that required by stream;
- * - reached minimum link bandwidth.
- */
- if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
- (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
- is_link_bw_min) {
- j++;
- cur_link_settings = *link_setting;
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
- is_link_bw_low = false;
- is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE);
-
- } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
- uint32_t req_bw;
- uint32_t link_bw;
-
- decide_fallback_link_setting(link, &max_link_settings,
- &cur_link_settings, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
- */
- req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
- is_link_bw_low = (req_bw > link_bw);
- is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low)
- DC_LOG_WARNING(
- "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
- __func__, link->link_index, req_bw, link_bw);
- }
-
- msleep(delay_between_attempts);
- }
- return false;
-}
-
-static enum clock_source_id get_clock_source_id(struct dc_link *link)
-{
- enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
- struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
-
- if (dp_cs != NULL) {
- dp_cs_id = dp_cs->id;
- } else {
- /*
- * dp clock source is not initialized for some reason.
- * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
- */
- ASSERT(dp_cs);
- }
-
- return dp_cs_id;
-}
-
-static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res,
- bool mst_enable)
-{
- if (mst_enable == false &&
- link->type == dc_connection_mst_branch) {
- /* Disable MST on link. Use only local sink. */
- dp_disable_link_phy_mst(link, link_res, link->connector_signal);
-
- link->type = dc_connection_single;
- link->local_sink = link->remote_sinks[0];
- link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
- dc_sink_retain(link->local_sink);
- dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
- } else if (mst_enable == true &&
- link->type == dc_connection_single &&
- link->remote_sinks[0] != NULL) {
- /* Re-enable MST on link. */
- dp_disable_link_phy(link, link_res, link->connector_signal);
- dp_enable_mst_on_sink(link, true);
-
- link->type = dc_connection_mst_branch;
- link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
- }
-}
-
-bool dc_link_dp_sync_lt_begin(struct dc_link *link)
-{
- /* Begin Sync LT. During this time,
- * DPCD:600h must not be powered down.
- */
- link->sync_lt_in_progress = true;
-
- /*Clear any existing preferred settings.*/
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- return true;
-}
-
-enum link_training_result dc_link_dp_sync_lt_attempt(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct dc_link_settings *link_settings,
- struct dc_link_training_overrides *lt_overrides)
-{
- struct link_training_settings lt_settings = {0};
- enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
- enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
- enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
- bool fec_enable = false;
-
- dp_decide_training_settings(
- link,
- link_settings,
- &lt_settings);
- override_training_settings(
- link,
- lt_overrides,
- &lt_settings);
- /* Setup MST Mode */
- if (lt_overrides->mst_enable)
- set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);
-
- /* Disable link */
- dp_disable_link_phy(link, link_res, link->connector_signal);
-
- /* Enable link */
- dp_cs_id = get_clock_source_id(link);
- dp_enable_link_phy(link, link_res, link->connector_signal,
- dp_cs_id, link_settings);
-
- /* Set FEC enable */
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
- fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, NULL, fec_enable);
- }
-
- if (lt_overrides->alternate_scrambler_reset) {
- if (*lt_overrides->alternate_scrambler_reset)
- panel_mode = DP_PANEL_MODE_EDP;
- else
- panel_mode = DP_PANEL_MODE_DEFAULT;
- } else
- panel_mode = dp_get_panel_mode(link);
-
- dp_set_panel_mode(link, panel_mode);
-
- /* Attempt to train with given link training settings */
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, &lt_settings, DPRX);
-
- /* Set link rate, lane count and spread. */
- dpcd_set_link_settings(link, &lt_settings);
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- lt_status = perform_clock_recovery_sequence(link, link_res, &lt_settings, DPRX);
- if (lt_status == LINK_TRAINING_SUCCESS) {
- lt_status = perform_channel_equalization_sequence(link,
- link_res,
- &lt_settings,
- DPRX);
- }
-
- /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
- /* 4. print status message*/
- print_status_message(link, &lt_settings, lt_status);
-
- return lt_status;
-}
-
-bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
-{
- /* If input parameter is set, shut down phy.
- * Still shouldn't turn off dp_receiver (DPCD:600h)
- */
- if (link_down == true) {
- struct dc_link_settings link_settings = link->cur_link_settings;
- dp_disable_link_phy(link, NULL, link->connector_signal);
- if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_ready(link, NULL, false);
- }
-
- link->sync_lt_in_progress = false;
- return true;
-}
-
-static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
-{
- enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
-
- if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
- lttpr_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
- lttpr_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
- lttpr_max_link_rate = LINK_RATE_UHBR10;
-
- return lttpr_max_link_rate;
-}
-
-static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
-{
- enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
-
- if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
- cable_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
- cable_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
- cable_max_link_rate = LINK_RATE_UHBR10;
-
- return cable_max_link_rate;
-}
-
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
-{
- struct link_encoder *link_enc = NULL;
-
- if (!max_link_enc_cap) {
- DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
- return false;
- }
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (link_enc && link_enc->funcs->get_max_link_cap) {
- link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
- return true;
- }
-
- DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
- max_link_enc_cap->lane_count = 1;
- max_link_enc_cap->link_rate = 6;
- return false;
-}
-
-
-struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
-{
- struct dc_link_settings max_link_cap = {0};
- enum dc_link_rate lttpr_max_link_rate;
- enum dc_link_rate cable_max_link_rate;
- struct link_encoder *link_enc = NULL;
-
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- /* get max link encoder capability */
- if (link_enc)
- link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
-
- /* Lower link settings based on sink's link cap */
- if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count =
- link->reported_link_cap.lane_count;
- if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate =
- link->reported_link_cap.link_rate;
- if (link->reported_link_cap.link_spread <
- max_link_cap.link_spread)
- max_link_cap.link_spread =
- link->reported_link_cap.link_spread;
-
- /* Lower link settings based on cable attributes
- * Cable ID is a DP2 feature to identify max certified link rate that
- * a cable can carry. The cable identification method requires both
- * cable and display hardware support. Since the specs comes late, it is
- * anticipated that the first round of DP2 cables and displays may not
- * be fully compatible to reliably return cable ID data. Therefore the
- * decision of our cable id policy is that if the cable can return non
- * zero cable id data, we will take cable's link rate capability into
- * account. However if we get zero data, the cable link rate capability
- * is considered inconclusive. In this case, we will not take cable's
- * capability into account to avoid of over limiting hardware capability
- * from users. The max overall link rate capability is still determined
- * after actual dp pre-training. Cable id is considered as an auxiliary
- * method of determining max link bandwidth capability.
- */
- cable_max_link_rate = get_cable_max_link_rate(link);
-
- if (!link->dc->debug.ignore_cable_id &&
- cable_max_link_rate != LINK_RATE_UNKNOWN &&
- cable_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = cable_max_link_rate;
-
- /* account for lttpr repeaters cap
- * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
- */
- if (dp_is_lttpr_present(link)) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
- lttpr_max_link_rate = get_lttpr_max_link_rate(link);
-
- if (lttpr_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = lttpr_max_link_rate;
-
- DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
- __func__,
- max_link_cap.lane_count,
- max_link_cap.link_rate);
- }
-
- if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
- link->dc->debug.disable_uhbr)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
-
- return max_link_cap;
-}
-
-static enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data)
-{
- static enum dc_status retval;
-
- /* The HW reads 16 bytes from 200h on HPD,
- * but if we get an AUX_DEFER, the HW cannot retry
- * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
- * fail, so we now explicitly read 6 bytes which is
- * the req from the above mentioned test cases.
- *
- * For DP 1.4 we need to read those from 2002h range.
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
- /* Read 14 bytes in a single read and then copy only the required fields.
- * This is more efficient than doing it in two separate AUX reads. */
-
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
-
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT_ESI,
- tmp,
- sizeof(tmp));
-
- if (retval != DC_OK)
- return retval;
-
- irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
- }
-
- return retval;
-}
-
-bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data)
-{
- uint8_t irq_reg_rx_power_state = 0;
- enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
- union lane_status lane_status;
- uint32_t lane;
- bool sink_status_changed;
- bool return_code;
-
- sink_status_changed = false;
- return_code = false;
-
- if (link->cur_link_settings.lane_count == 0)
- return return_code;
-
- /*1. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)
- */
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- sink_status_changed = true;
- break;
- }
- }
-
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
-
- DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
-
- return_code = true;
-
- /*2. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
-
- if (dpcd_result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
- __func__);
- } else {
- if (irq_reg_rx_power_state != DP_SET_POWER_D0)
- return_code = false;
- }
- }
-
- return return_code;
-}
-
-static bool dp_verify_link_cap(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int *fail_count)
-{
- struct dc_link_settings cur_link_settings = {0};
- struct dc_link_settings max_link_settings = *known_limit_link_setting;
- bool success = false;
- bool skip_video_pattern;
- enum clock_source_id dp_cs_id = get_clock_source_id(link);
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- union hpd_irq_data irq_data;
- struct link_resource link_res;
-
- memset(&irq_data, 0, sizeof(irq_data));
- cur_link_settings = max_link_settings;
-
- /* Grant extended timeout request */
- if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
- uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
-
- core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
- }
-
- do {
- if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings))
- continue;
-
- skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW;
- dp_enable_link_phy(
- link,
- &link_res,
- link->connector_signal,
- dp_cs_id,
- &cur_link_settings);
-
- status = dc_link_dp_perform_link_training(
- link,
- &link_res,
- &cur_link_settings,
- skip_video_pattern);
-
- if (status == LINK_TRAINING_SUCCESS) {
- success = true;
- udelay(1000);
- if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
- hpd_rx_irq_check_link_loss_status(
- link,
- &irq_data))
- (*fail_count)++;
-
- } else {
- (*fail_count)++;
- }
- dp_trace_lt_total_count_increment(link, true);
- dp_trace_lt_result_update(link, status, true);
- dp_disable_link_phy(link, &link_res, link->connector_signal);
- } while (!success && decide_fallback_link_setting(link,
- &max_link_settings, &cur_link_settings, status));
-
- link->verified_link_cap = success ?
- cur_link_settings : fail_safe_link_settings;
- return success;
-}
-
-static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
- struct dc_link_settings *link_settings)
-{
- /* Temporary Renoir-specific workaround PHY will sometimes be in bad
- * state on hotplugging display from certain USB-C dongle, so add extra
- * cycle of enabling and disabling the PHY before first link training.
- */
- struct link_resource link_res = {0};
- enum clock_source_id dp_cs_id = get_clock_source_id(link);
-
- dp_enable_link_phy(link, &link_res, link->connector_signal,
- dp_cs_id, link_settings);
- dp_disable_link_phy(link, &link_res, link->connector_signal);
-}
-
-bool dp_verify_link_cap_with_retries(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int attempts)
-{
- int i = 0;
- bool success = false;
- int fail_count = 0;
-
- dp_trace_detect_lt_init(link);
-
- if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
- link->dc->debug.usbc_combo_phy_reset_wa)
- apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
-
- dp_trace_set_lt_start_timestamp(link, false);
- for (i = 0; i < attempts; i++) {
- enum dc_connection_type type = dc_connection_none;
-
- memset(&link->verified_link_cap, 0,
- sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
- link->verified_link_cap = fail_safe_link_settings;
- break;
- } else if (dp_verify_link_cap(link, known_limit_link_setting,
- &fail_count) && fail_count == 0) {
- success = true;
- break;
- }
- msleep(10);
- }
-
- dp_trace_lt_fail_count_update(link, fail_count, true);
- dp_trace_set_lt_end_timestamp(link, true);
-
- return success;
-}
-
-/* in DP compliance test, DPR-120 may have
- * a random value in its MAX_LINK_BW dpcd field.
- * We map it to the maximum supported link rate that
- * is smaller than MAX_LINK_BW in this case.
- */
-static enum dc_link_rate get_link_rate_from_max_link_bw(
- uint8_t max_link_bw)
-{
- enum dc_link_rate link_rate;
-
- if (max_link_bw >= LINK_RATE_HIGH3) {
- link_rate = LINK_RATE_HIGH3;
- } else if (max_link_bw < LINK_RATE_HIGH3
- && max_link_bw >= LINK_RATE_HIGH2) {
- link_rate = LINK_RATE_HIGH2;
- } else if (max_link_bw < LINK_RATE_HIGH2
- && max_link_bw >= LINK_RATE_HIGH) {
- link_rate = LINK_RATE_HIGH;
- } else if (max_link_bw < LINK_RATE_HIGH
- && max_link_bw >= LINK_RATE_LOW) {
- link_rate = LINK_RATE_LOW;
- } else {
- link_rate = LINK_RATE_UNKNOWN;
- }
-
- return link_rate;
-}
-
-static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
-{
- return lane_count <= LANE_COUNT_ONE;
-}
-
-static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
-{
- return link_rate <= LINK_RATE_LOW;
-}
-
-static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
-{
- switch (lane_count) {
- case LANE_COUNT_FOUR:
- return LANE_COUNT_TWO;
- case LANE_COUNT_TWO:
- return LANE_COUNT_ONE;
- case LANE_COUNT_ONE:
- return LANE_COUNT_UNKNOWN;
- default:
- return LANE_COUNT_UNKNOWN;
- }
-}
-
-static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
-{
- switch (link_rate) {
- case LINK_RATE_UHBR20:
- return LINK_RATE_UHBR13_5;
- case LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR10;
- case LINK_RATE_UHBR10:
- return LINK_RATE_HIGH3;
- case LINK_RATE_HIGH3:
- return LINK_RATE_HIGH2;
- case LINK_RATE_HIGH2:
- return LINK_RATE_HIGH;
- case LINK_RATE_HIGH:
- return LINK_RATE_LOW;
- case LINK_RATE_LOW:
- return LINK_RATE_UNKNOWN;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
-{
- switch (lane_count) {
- case LANE_COUNT_ONE:
- return LANE_COUNT_TWO;
- case LANE_COUNT_TWO:
- return LANE_COUNT_FOUR;
- default:
- return LANE_COUNT_UNKNOWN;
- }
-}
-
-static enum dc_link_rate increase_link_rate(struct dc_link *link,
- enum dc_link_rate link_rate)
-{
- switch (link_rate) {
- case LINK_RATE_LOW:
- return LINK_RATE_HIGH;
- case LINK_RATE_HIGH:
- return LINK_RATE_HIGH2;
- case LINK_RATE_HIGH2:
- return LINK_RATE_HIGH3;
- case LINK_RATE_HIGH3:
- return LINK_RATE_UHBR10;
- case LINK_RATE_UHBR10:
- /* upto DP2.x specs UHBR13.5 is the only link rate that could be
- * not supported by DPRX when higher link rate is supported.
- * so we treat it as a special case for code simplicity. When we
- * have new specs with more link rates like this, we should
- * consider a more generic solution to handle discrete link
- * rate capabilities.
- */
- return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ?
- LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20;
- case LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR20;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static bool decide_fallback_link_setting_max_bw_policy(
- struct dc_link *link,
- const struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result)
-{
- uint8_t cur_idx = 0, next_idx;
- bool found = false;
-
- if (training_result == LINK_TRAINING_ABORT)
- return false;
-
- while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
- /* find current index */
- if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
- dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
- break;
- else
- cur_idx++;
-
- next_idx = cur_idx + 1;
-
- while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
- /* find next index */
- if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count ||
- dp_lt_fallbacks[next_idx].link_rate > max->link_rate)
- next_idx++;
- else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 &&
- link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0)
- /* upto DP2.x specs UHBR13.5 is the only link rate that
- * could be not supported by DPRX when higher link rate
- * is supported. so we treat it as a special case for
- * code simplicity. When we have new specs with more
- * link rates like this, we should consider a more
- * generic solution to handle discrete link rate
- * capabilities.
- */
- next_idx++;
- else
- break;
-
- if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
- cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
- cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
- found = true;
- }
-
- return found;
-}
-
-/*
- * function: set link rate and lane count fallback based
- * on current link setting and last link training result
- * return value:
- * true - link setting could be set
- * false - has reached minimum setting
- * and no further fallback could be done
- */
-static bool decide_fallback_link_setting(
- struct dc_link *link,
- struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result)
-{
- if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
- link->dc->debug.force_dp2_lt_fallback_method)
- return decide_fallback_link_setting_max_bw_policy(link, max, cur,
- training_result);
-
- switch (training_result) {
- case LINK_TRAINING_CR_FAIL_LANE0:
- case LINK_TRAINING_CR_FAIL_LANE1:
- case LINK_TRAINING_CR_FAIL_LANE23:
- case LINK_TRAINING_LQA_FAIL:
- {
- if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- } else if (!reached_minimum_lane_count(cur->lane_count)) {
- cur->link_rate = max->link_rate;
- if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
- return false;
- else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
- cur->lane_count = LANE_COUNT_ONE;
- else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
- cur->lane_count = LANE_COUNT_TWO;
- else
- cur->lane_count = reduce_lane_count(cur->lane_count);
- } else {
- return false;
- }
- break;
- }
- case LINK_TRAINING_EQ_FAIL_EQ:
- case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
- {
- if (!reached_minimum_lane_count(cur->lane_count)) {
- cur->lane_count = reduce_lane_count(cur->lane_count);
- } else if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- /* Reduce max link rate to avoid potential infinite loop.
- * Needed so that any subsequent CR_FAIL fallback can't
- * re-set the link rate higher than the link rate from
- * the latest EQ_FAIL fallback.
- */
- max->link_rate = cur->link_rate;
- cur->lane_count = max->lane_count;
- } else {
- return false;
- }
- break;
- }
- case LINK_TRAINING_EQ_FAIL_CR:
- {
- if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- /* Reduce max link rate to avoid potential infinite loop.
- * Needed so that any subsequent CR_FAIL fallback can't
- * re-set the link rate higher than the link rate from
- * the latest EQ_FAIL fallback.
- */
- max->link_rate = cur->link_rate;
- cur->lane_count = max->lane_count;
- } else {
- return false;
- }
- break;
- }
- default:
- return false;
- }
- return true;
-}
-
-bool dp_validate_mode_timing(
- struct dc_link *link,
- const struct dc_crtc_timing *timing)
-{
- uint32_t req_bw;
- uint32_t max_bw;
-
- const struct dc_link_settings *link_setting;
-
- /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 &&
- !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
- dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL)
- return false;
-
- /*always DP fail safe mode*/
- if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 &&
- timing->h_addressable == (uint32_t) 640 &&
- timing->v_addressable == (uint32_t) 480)
- return true;
-
- link_setting = dc_link_get_link_cap(link);
-
- /* TODO: DYNAMIC_VALIDATION needs to be implemented */
- /*if (flags.DYNAMIC_VALIDATION == 1 &&
- link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
- link_setting = &link->verified_link_cap;
- */
-
- req_bw = dc_bandwidth_in_kbps_from_timing(timing);
- max_bw = dc_link_bandwidth_kbps(link, link_setting);
-
- if (req_bw <= max_bw) {
- /* remember the biggest mode here, during
- * initial link training (to get
- * verified_link_cap), LS sends event about
- * cannot train at reported cap to upper
- * layer and upper layer will re-enumerate modes.
- * this is not necessary if the lower
- * verified_link_cap is enough to drive
- * all the modes */
-
- /* TODO: DYNAMIC_VALIDATION needs to be implemented */
- /* if (flags.DYNAMIC_VALIDATION == 1)
- dpsst->max_req_bw_for_verified_linkcap = dal_max(
- dpsst->max_req_bw_for_verified_linkcap, req_bw); */
- return true;
- } else
- return false;
-}
-
-static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
-{
- struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
- struct dc_link_settings current_link_setting =
- initial_link_setting;
- uint32_t link_bw;
-
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
- return false;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
-
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- }
- }
-
- return false;
-}
-
-bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
-{
- struct dc_link_settings initial_link_setting;
- struct dc_link_settings current_link_setting;
- uint32_t link_bw;
-
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
- link->dpcd_caps.edp_supported_link_rates_count == 0) {
- *link_setting = link->verified_link_cap;
- return true;
- }
-
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = true;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
-
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
- }
- }
- return false;
-}
-
-static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw,
- enum dc_link_rate max_link_rate)
-{
- struct dc_link_settings initial_link_setting;
- struct dc_link_settings current_link_setting;
- uint32_t link_bw;
-
- unsigned int policy = 0;
-
- policy = link->panel_config.dsc.force_dsc_edp_policy;
- if (max_link_rate == LINK_RATE_UNKNOWN)
- max_link_rate = link->verified_link_cap.link_rate;
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
- link->dpcd_caps.edp_supported_link_rates_count == 0)) {
- /* for DSC enabled case, we search for minimum lane count */
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = LINK_RATE_LOW;
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = false;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
- return false;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
- if (policy) {
- /* minimize lane */
- if (current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- } else {
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- current_link_setting.link_rate = initial_link_setting.link_rate;
- } else
- break;
- }
- } else {
- /* minimize link rate */
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- }
- }
- }
- return false;
- }
-
- /* if optimize edp link is supported */
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = true;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
- if (policy) {
- /* minimize lane */
- if (current_link_setting.link_rate_set <
- link->dpcd_caps.edp_supported_link_rates_count
- && current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- } else {
- if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- } else
- break;
- }
- } else {
- /* minimize link rate */
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
- }
- }
- }
- return false;
-}
-
-static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
-{
- *link_setting = link->verified_link_cap;
- return true;
-}
-
-bool decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
-{
- struct dc_link *link = stream->link;
- uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
-
- memset(link_setting, 0, sizeof(*link_setting));
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
- */
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return true;
- }
-
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- decide_mst_link_settings(link, link_setting);
- } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* enable edp link optimization for DSC eDP case */
- if (stream->timing.flags.DSC) {
- enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
-
- if (link->panel_config.dsc.force_dsc_edp_policy) {
- /* calculate link max link rate cap*/
- struct dc_link_settings tmp_link_setting;
- struct dc_crtc_timing tmp_timing = stream->timing;
- uint32_t orig_req_bw;
-
- tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
- tmp_timing.flags.DSC = 0;
- orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
- decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
- max_link_rate = tmp_link_setting.link_rate;
- }
- decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
- } else {
- decide_edp_link_settings(link, link_setting, req_bw);
- }
- } else {
- decide_dp_link_settings(link, link_setting, req_bw);
- }
-
- return link_setting->lane_count != LANE_COUNT_UNKNOWN &&
- link_setting->link_rate != LINK_RATE_UNKNOWN;
-}
-
-/*************************Short Pulse IRQ***************************/
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
-{
- /*
- * Don't handle RX IRQ unless one of following is met:
- * 1) The link is established (cur_link_settings != unknown)
- * 2) We know we're dealing with a branch device, SST or MST
- */
-
- if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
- return true;
-
- return false;
-}
-
-static bool handle_hpd_irq_psr_sink(struct dc_link *link)
-{
- union dpcd_psr_configuration psr_configuration;
-
- if (!link->psr_settings.psr_feature_enabled)
- return false;
-
- dm_helpers_dp_read_dpcd(
- link->ctx,
- link,
- 368,/*DpcdAddress_PSR_Enable_Cfg*/
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- if (psr_configuration.bits.ENABLE) {
- unsigned char dpcdbuf[3] = {0};
- union psr_error_status psr_error_status;
- union psr_sink_psr_status psr_sink_psr_status;
-
- dm_helpers_dp_read_dpcd(
- link->ctx,
- link,
- 0x2006, /*DpcdAddress_PSR_Error_Status*/
- (unsigned char *) dpcdbuf,
- sizeof(dpcdbuf));
-
- /*DPCD 2006h ERROR STATUS*/
- psr_error_status.raw = dpcdbuf[0];
- /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
- psr_sink_psr_status.raw = dpcdbuf[2];
-
- if (psr_error_status.bits.LINK_CRC_ERROR ||
- psr_error_status.bits.RFB_STORAGE_ERROR ||
- psr_error_status.bits.VSC_SDP_ERROR) {
- bool allow_active;
-
- /* Acknowledge and clear error bits */
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 8198,/*DpcdAddress_PSR_Error_Status*/
- &psr_error_status.raw,
- sizeof(psr_error_status.raw));
-
- /* PSR error, disable and re-enable PSR */
- if (link->psr_settings.psr_allow_active) {
- allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- }
-
- return true;
- } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
- PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
- /* No error is detect, PSR is active.
- * We should return with IRQ_HPD handled without
- * checking for loss of sync since PSR would have
- * powered down main link.
- */
- return true;
- }
- }
- return false;
-}
-
-static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
-{
- switch (test_rate) {
- case DP_TEST_LINK_RATE_RBR:
- return LINK_RATE_LOW;
- case DP_TEST_LINK_RATE_HBR:
- return LINK_RATE_HIGH;
- case DP_TEST_LINK_RATE_HBR2:
- return LINK_RATE_HIGH2;
- case DP_TEST_LINK_RATE_HBR3:
- return LINK_RATE_HIGH3;
- case DP_TEST_LINK_RATE_UHBR10:
- return LINK_RATE_UHBR10;
- case DP_TEST_LINK_RATE_UHBR20:
- return LINK_RATE_UHBR20;
- case DP_TEST_LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR13_5;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static void dp_test_send_link_training(struct dc_link *link)
-{
- struct dc_link_settings link_settings = {0};
- uint8_t test_rate = 0;
-
- core_link_read_dpcd(
- link,
- DP_TEST_LANE_COUNT,
- (unsigned char *)(&link_settings.lane_count),
- 1);
- core_link_read_dpcd(
- link,
- DP_TEST_LINK_RATE,
- &test_rate,
- 1);
- link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
-
- /* Set preferred link settings */
- link->verified_link_cap.lane_count = link_settings.lane_count;
- link->verified_link_cap.link_rate = link_settings.link_rate;
-
- dp_retrain_link_dp_test(link, &link_settings, false);
-}
-
-/* TODO Raven hbr2 compliance eye output is unstable
- * (toggling on and off) with debugger break
- * This caueses intermittent PHY automation failure
- * Need to look into the root cause */
-static void dp_test_send_phy_test_pattern(struct dc_link *link)
-{
- union phy_test_pattern dpcd_test_pattern;
- union lane_adjust dpcd_lane_adjustment[2];
- unsigned char dpcd_post_cursor_2_adjustment = 0;
- unsigned char test_pattern_buffer[
- (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
- unsigned int test_pattern_size = 0;
- enum dp_test_pattern test_pattern;
- union lane_adjust dpcd_lane_adjust;
- unsigned int lane;
- struct link_training_settings link_training_settings;
-
- dpcd_test_pattern.raw = 0;
- memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
- memset(&link_training_settings, 0, sizeof(link_training_settings));
-
- /* get phy test pattern and pattern parameters from DP receiver */
- core_link_read_dpcd(
- link,
- DP_PHY_TEST_PATTERN,
- &dpcd_test_pattern.raw,
- sizeof(dpcd_test_pattern));
- core_link_read_dpcd(
- link,
- DP_ADJUST_REQUEST_LANE0_1,
- &dpcd_lane_adjustment[0].raw,
- sizeof(dpcd_lane_adjustment));
-
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
- link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
-
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
- dp_fixed_vs_pe_read_lane_adjust(
- link,
- link_training_settings.dpcd_lane_settings);
-
- /*get post cursor 2 parameters
- * For DP 1.1a or eariler, this DPCD register's value is 0
- * For DP 1.2 or later:
- * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
- * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
- */
- core_link_read_dpcd(
- link,
- DP_ADJUST_REQUEST_POST_CURSOR2,
- &dpcd_post_cursor_2_adjustment,
- sizeof(dpcd_post_cursor_2_adjustment));
-
- /* translate request */
- switch (dpcd_test_pattern.bits.PATTERN) {
- case PHY_TEST_PATTERN_D10_2:
- test_pattern = DP_TEST_PATTERN_D102;
- break;
- case PHY_TEST_PATTERN_SYMBOL_ERROR:
- test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
- break;
- case PHY_TEST_PATTERN_PRBS7:
- test_pattern = DP_TEST_PATTERN_PRBS7;
- break;
- case PHY_TEST_PATTERN_80BIT_CUSTOM:
- test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
- break;
- case PHY_TEST_PATTERN_CP2520_1:
- /* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
- DP_TEST_PATTERN_TRAINING_PATTERN4 :
- DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
- break;
- case PHY_TEST_PATTERN_CP2520_2:
- /* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
- DP_TEST_PATTERN_TRAINING_PATTERN4 :
- DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
- break;
- case PHY_TEST_PATTERN_CP2520_3:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
- break;
- case PHY_TEST_PATTERN_128b_132b_TPS1:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
- break;
- case PHY_TEST_PATTERN_128b_132b_TPS2:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
- break;
- case PHY_TEST_PATTERN_PRBS9:
- test_pattern = DP_TEST_PATTERN_PRBS9;
- break;
- case PHY_TEST_PATTERN_PRBS11:
- test_pattern = DP_TEST_PATTERN_PRBS11;
- break;
- case PHY_TEST_PATTERN_PRBS15:
- test_pattern = DP_TEST_PATTERN_PRBS15;
- break;
- case PHY_TEST_PATTERN_PRBS23:
- test_pattern = DP_TEST_PATTERN_PRBS23;
- break;
- case PHY_TEST_PATTERN_PRBS31:
- test_pattern = DP_TEST_PATTERN_PRBS31;
- break;
- case PHY_TEST_PATTERN_264BIT_CUSTOM:
- test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
- break;
- case PHY_TEST_PATTERN_SQUARE_PULSE:
- test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
- break;
- default:
- test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
- break;
- }
-
- if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
- test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
- DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
- core_link_read_dpcd(
- link,
- DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
- test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
- core_link_read_dpcd(
- link,
- DP_PHY_SQUARE_PATTERN,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
- test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
- core_link_read_dpcd(
- link,
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- for (lane = 0; lane <
- (unsigned int)(link->cur_link_settings.lane_count);
- lane++) {
- dpcd_lane_adjust.raw =
- get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
- if (dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_8b_10b_ENCODING) {
- link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)
- (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)
- (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
- (enum dc_post_cursor2)
- ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
- } else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_128b_132b_ENCODING) {
- link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
- dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
- }
- }
-
- dp_hw_to_dpcd_lane_settings(&link_training_settings,
- link_training_settings.hw_lane_settings,
- link_training_settings.dpcd_lane_settings);
- /*Usage: Measure DP physical lane signal
- * by DP SI test equipment automatically.
- * PHY test pattern request is generated by equipment via HPD interrupt.
- * HPD needs to be active all the time. HPD should be active
- * all the time. Do not touch it.
- * forward request to DS
- */
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
- &link_training_settings,
- test_pattern_buffer,
- test_pattern_size);
-}
-
-static void dp_test_send_link_test_pattern(struct dc_link *link)
-{
- union link_test_pattern dpcd_test_pattern;
- union test_misc dpcd_test_params;
- enum dp_test_pattern test_pattern;
- enum dp_test_pattern_color_space test_pattern_color_space =
- DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
- enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = NULL;
- int i;
-
- memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
- memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream == NULL)
- continue;
-
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
- pipe_ctx = &pipes[i];
- break;
- }
- }
-
- if (pipe_ctx == NULL)
- return;
-
- /* get link test pattern and pattern parameters */
- core_link_read_dpcd(
- link,
- DP_TEST_PATTERN,
- &dpcd_test_pattern.raw,
- sizeof(dpcd_test_pattern));
- core_link_read_dpcd(
- link,
- DP_TEST_MISC0,
- &dpcd_test_params.raw,
- sizeof(dpcd_test_params));
-
- switch (dpcd_test_pattern.bits.PATTERN) {
- case LINK_TEST_PATTERN_COLOR_RAMP:
- test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
- break;
- case LINK_TEST_PATTERN_VERTICAL_BARS:
- test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
- break; /* black and white */
- case LINK_TEST_PATTERN_COLOR_SQUARES:
- test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
- TEST_DYN_RANGE_VESA ?
- DP_TEST_PATTERN_COLOR_SQUARES :
- DP_TEST_PATTERN_COLOR_SQUARES_CEA);
- break;
- default:
- test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
- break;
- }
-
- if (dpcd_test_params.bits.CLR_FORMAT == 0)
- test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
- else
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
-
- switch (dpcd_test_params.bits.BPC) {
- case 0: // 6 bits
- requestColorDepth = COLOR_DEPTH_666;
- break;
- case 1: // 8 bits
- requestColorDepth = COLOR_DEPTH_888;
- break;
- case 2: // 10 bits
- requestColorDepth = COLOR_DEPTH_101010;
- break;
- case 3: // 12 bits
- requestColorDepth = COLOR_DEPTH_121212;
- break;
- default:
- break;
- }
-
- switch (dpcd_test_params.bits.CLR_FORMAT) {
- case 0:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
- break;
- case 1:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422;
- break;
- case 2:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444;
- break;
- default:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
- break;
- }
-
-
- if (requestColorDepth != COLOR_DEPTH_UNDEFINED
- && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
- DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
- __func__,
- pipe_ctx->stream->timing.display_color_depth,
- requestColorDepth);
- pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
- }
-
- dp_update_dsc_config(pipe_ctx);
-
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- NULL,
- NULL,
- 0);
-}
-
-static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
-{
- union audio_test_mode dpcd_test_mode = {0};
- struct audio_test_pattern_type dpcd_pattern_type = {0};
- union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
- enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
-
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
- unsigned int channel_count;
- unsigned int channel = 0;
- unsigned int modes = 0;
- unsigned int sampling_rate_in_hz = 0;
-
- // get audio test mode and test pattern parameters
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_MODE,
- &dpcd_test_mode.raw,
- sizeof(dpcd_test_mode));
-
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_PATTERN_TYPE,
- &dpcd_pattern_type.value,
- sizeof(dpcd_pattern_type));
-
- channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
-
- // read pattern periods for requested channels when sawTooth pattern is requested
- if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
- dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
-
- test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
- DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
- // read period for each channel
- for (channel = 0; channel < channel_count; channel++) {
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_PERIOD_CH1 + channel,
- &dpcd_pattern_period[channel].raw,
- sizeof(dpcd_pattern_period[channel]));
- }
- }
-
- // translate sampling rate
- switch (dpcd_test_mode.bits.sampling_rate) {
- case AUDIO_SAMPLING_RATE_32KHZ:
- sampling_rate_in_hz = 32000;
- break;
- case AUDIO_SAMPLING_RATE_44_1KHZ:
- sampling_rate_in_hz = 44100;
- break;
- case AUDIO_SAMPLING_RATE_48KHZ:
- sampling_rate_in_hz = 48000;
- break;
- case AUDIO_SAMPLING_RATE_88_2KHZ:
- sampling_rate_in_hz = 88200;
- break;
- case AUDIO_SAMPLING_RATE_96KHZ:
- sampling_rate_in_hz = 96000;
- break;
- case AUDIO_SAMPLING_RATE_176_4KHZ:
- sampling_rate_in_hz = 176400;
- break;
- case AUDIO_SAMPLING_RATE_192KHZ:
- sampling_rate_in_hz = 192000;
- break;
- default:
- sampling_rate_in_hz = 0;
- break;
- }
-
- link->audio_test_data.flags.test_requested = 1;
- link->audio_test_data.flags.disable_video = disable_video;
- link->audio_test_data.sampling_rate = sampling_rate_in_hz;
- link->audio_test_data.channel_count = channel_count;
- link->audio_test_data.pattern_type = test_pattern;
-
- if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
- for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
- link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
- }
- }
-}
-
-void dc_link_dp_handle_automated_test(struct dc_link *link)
-{
- union test_request test_request;
- union test_response test_response;
-
- memset(&test_request, 0, sizeof(test_request));
- memset(&test_response, 0, sizeof(test_response));
-
- core_link_read_dpcd(
- link,
- DP_TEST_REQUEST,
- &test_request.raw,
- sizeof(union test_request));
- if (test_request.bits.LINK_TRAINING) {
- /* ACK first to let DP RX test box monitor LT sequence */
- test_response.bits.ACK = 1;
- core_link_write_dpcd(
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
- dp_test_send_link_training(link);
- /* no acknowledge request is needed again */
- test_response.bits.ACK = 0;
- }
- if (test_request.bits.LINK_TEST_PATTRN) {
- dp_test_send_link_test_pattern(link);
- test_response.bits.ACK = 1;
- }
-
- if (test_request.bits.AUDIO_TEST_PATTERN) {
- dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
- test_response.bits.ACK = 1;
- }
-
- if (test_request.bits.PHY_TEST_PATTERN) {
- dp_test_send_phy_test_pattern(link);
- test_response.bits.ACK = 1;
- }
-
- /* send request acknowledgment */
- if (test_response.bits.ACK)
- core_link_write_dpcd(
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
-}
-
-void dc_link_dp_handle_link_loss(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- break;
- }
-
- if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_disable_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off
- && pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- // Always use max settings here for DP 1.4a LL Compliance CTS
- if (link->is_automated) {
- pipe_ctx->link_config.dp_link_settings.lane_count =
- link->verified_link_cap.lane_count;
- pipe_ctx->link_config.dp_link_settings.link_rate =
- link->verified_link_cap.link_rate;
- pipe_ctx->link_config.dp_link_settings.link_spread =
- link->verified_link_cap.link_spread;
- }
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
- }
-}
-
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work)
-{
- union hpd_irq_data hpd_irq_dpcd_data = {0};
- union device_service_irq device_service_clear = {0};
- enum dc_status result;
- bool status = false;
-
- if (out_link_loss)
- *out_link_loss = false;
-
- if (has_left_work)
- *has_left_work = false;
- /* For use cases related to down stream connection status change,
- * PSR and device auto test, refer to function handle_sst_hpd_irq
- * in DAL2.1*/
-
- DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
- __func__, link->link_index);
-
-
- /* All the "handle_hpd_irq_xxx()" methods
- * should be called only after
- * dal_dpsst_ls_read_hpd_irq_data
- * Order of calls is important too
- */
- result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
- if (out_hpd_irq_dpcd_data)
- *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
-
- if (result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
- __func__);
- return false;
- }
-
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- link->is_automated = true;
- device_service_clear.bits.AUTOMATED_TEST = 1;
- core_link_write_dpcd(
- link,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- &device_service_clear.raw,
- sizeof(device_service_clear.raw));
- device_service_clear.raw = 0;
- if (defer_handling && has_left_work)
- *has_left_work = true;
- else
- dc_link_dp_handle_automated_test(link);
- return false;
- }
-
- if (!dc_link_dp_allow_hpd_rx_irq(link)) {
- DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
- __func__, link->link_index);
- return false;
- }
-
- if (handle_hpd_irq_psr_sink(link))
- /* PSR-related error was detected and handled */
- return true;
-
- /* If PSR-related error handled, Main link may be off,
- * so do not handle as a normal sink status change interrupt.
- */
-
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
- if (defer_handling && has_left_work)
- *has_left_work = true;
- return true;
- }
-
- /* check if we have MST msg and return since we poll for it */
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- if (defer_handling && has_left_work)
- *has_left_work = true;
- return false;
- }
-
- /* For now we only handle 'Downstream port status' case.
- * If we got sink count changed it means
- * Downstream port status changed,
- * then DM should call DC to do the detection.
- * NOTE: Do not handle link loss on eDP since it is internal link*/
- if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- hpd_rx_irq_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
- /* Connectivity log: link loss */
- CONN_DATA_LINK_LOSS(link,
- hpd_irq_dpcd_data.raw,
- sizeof(hpd_irq_dpcd_data),
- "Status: ");
-
- if (defer_handling && has_left_work)
- *has_left_work = true;
- else
- dc_link_dp_handle_link_loss(link);
-
- status = false;
- if (out_link_loss)
- *out_link_loss = true;
-
- dp_trace_link_loss_increment(link);
- }
-
- if (link->type == dc_connection_sst_branch &&
- hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
- != link->dpcd_sink_count)
- status = true;
-
- /* reasons for HPD RX:
- * 1. Link Loss - ie Re-train the Link
- * 2. MST sideband message
- * 3. Automated Test - ie. Internal Commit
- * 4. CP (copy protection) - (not interesting for DM???)
- * 5. DRR
- * 6. Downstream Port status changed
- * -ie. Detect - this the only one
- * which is interesting for DM because
- * it must call dc_link_detect.
- */
- return status;
-}
-
-/*query dpcd for version and mst cap addresses*/
-bool is_mst_supported(struct dc_link *link)
-{
- bool mst = false;
- enum dc_status st = DC_OK;
- union dpcd_rev rev;
- union mstm_cap cap;
-
- if (link->preferred_training_settings.mst_enable &&
- *link->preferred_training_settings.mst_enable == false) {
- return false;
- }
-
- rev.raw = 0;
- cap.raw = 0;
-
- st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
- sizeof(rev));
-
- if (st == DC_OK && rev.raw >= DPCD_REV_12) {
-
- st = core_link_read_dpcd(link, DP_MSTM_CAP,
- &cap.raw, sizeof(cap));
- if (st == DC_OK && cap.bits.MST_CAP == 1)
- mst = true;
- }
- return mst;
-
-}
-
-bool is_dp_active_dongle(const struct dc_link *link)
-{
- return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) &&
- (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER);
-}
-
-bool is_dp_branch_device(const struct dc_link *link)
-{
- return link->dpcd_caps.is_branch_dev;
-}
-
-static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
-{
- switch (bpc) {
- case DOWN_STREAM_MAX_8BPC:
- return 8;
- case DOWN_STREAM_MAX_10BPC:
- return 10;
- case DOWN_STREAM_MAX_12BPC:
- return 12;
- case DOWN_STREAM_MAX_16BPC:
- return 16;
- default:
- break;
- }
-
- return -1;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
-{
- switch (bw) {
- case 0b001:
- return 9000000;
- case 0b010:
- return 18000000;
- case 0b011:
- return 24000000;
- case 0b100:
- return 32000000;
- case 0b101:
- return 40000000;
- case 0b110:
- return 48000000;
- }
-
- return 0;
-}
-
-/*
- * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
- */
-static uint32_t intersect_frl_link_bw_support(
- const uint32_t max_supported_frl_bw_in_kbps,
- const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
-{
- uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
-
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
-
- return supported_bw_in_kbps;
-}
-#endif
-
-static void read_dp_device_vendor_id(struct dc_link *link)
-{
- struct dp_device_vendor_id dp_id;
-
- /* read IEEE branch device id */
- core_link_read_dpcd(
- link,
- DP_BRANCH_OUI,
- (uint8_t *)&dp_id,
- sizeof(dp_id));
-
- link->dpcd_caps.branch_dev_id =
- (dp_id.ieee_oui[0] << 16) +
- (dp_id.ieee_oui[1] << 8) +
- dp_id.ieee_oui[2];
-
- memmove(
- link->dpcd_caps.branch_dev_name,
- dp_id.ieee_device_id,
- sizeof(dp_id.ieee_device_id));
-}
-
-
-
-static void get_active_converter_info(
- uint8_t data, struct dc_link *link)
-{
- union dp_downstream_port_present ds_port = { .byte = data };
- memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
-
- /* decode converter info*/
- if (!ds_port.fields.PORT_PRESENT) {
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- ddc_service_set_dongle_type(link->ddc,
- link->dpcd_caps.dongle_type);
- link->dpcd_caps.is_branch_dev = false;
- return;
- }
-
- /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
-
- switch (ds_port.fields.PORT_TYPE) {
- case DOWNSTREAM_VGA:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
- break;
- case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
- /* At this point we don't know is it DVI or HDMI or DP++,
- * assume DVI.*/
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
- break;
- default:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- break;
- }
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
- union dwnstream_port_caps_byte0 *port_caps =
- (union dwnstream_port_caps_byte0 *)det_caps;
- if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
- det_caps, sizeof(det_caps)) == DC_OK) {
-
- switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
- /*Handle DP case as DONGLE_NONE*/
- case DOWN_STREAM_DETAILED_DP:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- break;
- case DOWN_STREAM_DETAILED_VGA:
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_VGA_CONVERTER;
- break;
- case DOWN_STREAM_DETAILED_DVI:
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_DVI_CONVERTER;
- break;
- case DOWN_STREAM_DETAILED_HDMI:
- case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
- /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_HDMI_CONVERTER;
-
- link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
- if (ds_port.fields.DETAILED_CAPS) {
-
- union dwnstream_port_caps_byte3_hdmi
- hdmi_caps = {.raw = det_caps[3] };
- union dwnstream_port_caps_byte2
- hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
- det_caps[1] * 2500;
-
- link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
- hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
- /*YCBCR capability only for HDMI case*/
- if (port_caps->bits.DWN_STRM_PORTX_TYPE
- == DOWN_STREAM_DETAILED_HDMI) {
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
- hdmi_caps.bits.YCrCr422_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
- hdmi_caps.bits.YCrCr420_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
- hdmi_caps.bits.YCrCr422_CONVERSION;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
- hdmi_caps.bits.YCrCr420_CONVERSION;
- }
-
- link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
- translate_dpcd_max_bpc(
- hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->dc->caps.dp_hdmi21_pcon_support) {
- union hdmi_encoded_link_bw hdmi_encoded_link_bw;
-
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
- dc_link_bw_kbps_from_raw_frl_link_rate_data(
- hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
-
- // Intersect reported max link bw support with the supported link rate post FRL link training
- if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
- &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
- hdmi_encoded_link_bw);
- }
-
- if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
- }
-#endif
-
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
- }
-
- break;
- }
- }
- }
-
- ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
-
- {
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
-
- core_link_read_dpcd(
- link,
- DP_BRANCH_REVISION_START,
- (uint8_t *)&dp_hw_fw_revision,
- sizeof(dp_hw_fw_revision));
-
- link->dpcd_caps.branch_hw_revision =
- dp_hw_fw_revision.ieee_hw_rev;
-
- memmove(
- link->dpcd_caps.branch_fw_revision,
- dp_hw_fw_revision.ieee_fw_rev,
- sizeof(dp_hw_fw_revision.ieee_fw_rev));
- }
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
- union dp_dfp_cap_ext dfp_cap_ext;
- memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
- core_link_read_dpcd(
- link,
- DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
- dfp_cap_ext.raw,
- sizeof(dfp_cap_ext.raw));
- link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
- dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
- (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
- dfp_cap_ext.fields.max_video_h_active_width[0] +
- (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
- dfp_cap_ext.fields.max_video_v_active_height[0] +
- (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
- dfp_cap_ext.fields.encoding_format_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
- dfp_cap_ext.fields.rgb_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
- dfp_cap_ext.fields.ycbcr444_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
- dfp_cap_ext.fields.ycbcr422_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
- dfp_cap_ext.fields.ycbcr420_color_depth_caps;
- DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
- DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
- DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
- DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
- DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
- }
-}
-
-static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
- int length)
-{
- int retry = 0;
-
- if (!link->dpcd_caps.dpcd_rev.raw) {
- do {
- dp_receiver_power_ctrl(link, true);
- core_link_read_dpcd(link, DP_DPCD_REV,
- dpcd_data, length);
- link->dpcd_caps.dpcd_rev.raw = dpcd_data[
- DP_DPCD_REV -
- DP_DPCD_REV];
- } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
- }
-
- if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
- switch (link->dpcd_caps.branch_dev_id) {
- /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
- * all internal circuits including AUX communication preventing
- * reading DPCD table and EDID (spec violation).
- * Encoder will skip DP RX power down on disable_output to
- * keep receiver powered all the time.*/
- case DP_BRANCH_DEVICE_ID_0010FA:
- case DP_BRANCH_DEVICE_ID_0080E1:
- case DP_BRANCH_DEVICE_ID_00E04C:
- link->wa_flags.dp_keep_receiver_powered = true;
- break;
-
- /* TODO: May need work around for other dongles. */
- default:
- link->wa_flags.dp_keep_receiver_powered = false;
- break;
- }
- } else
- link->wa_flags.dp_keep_receiver_powered = false;
-}
-
-/* Read additional sink caps defined in source specific DPCD area
- * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
- */
-static bool dpcd_read_sink_ext_caps(struct dc_link *link)
-{
- uint8_t dpcd_data;
-
- if (!link)
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
- return false;
-
- link->dpcd_sink_ext_caps.raw = dpcd_data;
- return true;
-}
-
-enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
-{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
-
- /* Logic to determine LTTPR support*/
- bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
-
- if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
- return false;
-
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* If this chip cap is set, at least one retimer must exist in the chain
- * Override count to 1 if we receive a known bad count (0 or an invalid value)
- */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
- ASSERT(0);
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
- DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- }
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = dp_is_lttpr_present(link);
-
- if (is_lttpr_present)
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
-
- DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
- return status;
-}
-
-bool dp_is_lttpr_present(struct dc_link *link)
-{
- return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
-}
-
-enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
-{
- enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
-
- if (encoding == DP_8b_10b_ENCODING)
- return dp_decide_8b_10b_lttpr_mode(link);
- else if (encoding == DP_128b_132b_ENCODING)
- return dp_decide_128b_132b_lttpr_mode(link);
-
- ASSERT(0);
- return LTTPR_MODE_NON_LTTPR;
-}
-
-void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
-{
- if (!dp_is_lttpr_present(link))
- return;
-
- if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
- *override = LTTPR_MODE_TRANSPARENT;
- } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
- *override = LTTPR_MODE_NON_TRANSPARENT;
- } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
- *override = LTTPR_MODE_NON_LTTPR;
- }
- DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
-}
-
-enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
-{
- bool is_lttpr_present = dp_is_lttpr_present(link);
- bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
- bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
-
- if (!is_lttpr_present)
- return LTTPR_MODE_NON_LTTPR;
-
- if (vbios_lttpr_aware) {
- if (vbios_lttpr_force_non_transparent) {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
- return LTTPR_MODE_NON_TRANSPARENT;
- } else {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
- return LTTPR_MODE_TRANSPARENT;
- }
- }
-
- if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- link->dc->caps.extended_aux_timeout_support) {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
- return LTTPR_MODE_NON_TRANSPARENT;
- }
-
- DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
- return LTTPR_MODE_NON_LTTPR;
-}
-
-enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
-{
- enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
-
- if (dp_is_lttpr_present(link))
- mode = LTTPR_MODE_NON_TRANSPARENT;
-
- DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
- return mode;
-}
-
-static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
-{
- union dmub_rb_cmd cmd;
-
- if (!link->ctx->dmub_srv ||
- link->ep_type != DISPLAY_ENDPOINT_PHY ||
- link->link_enc->features.flags.bits.DP_IS_USB_C == 0)
- return false;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID;
- cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
- cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
- link->dc, link->link_enc->transmitter);
- if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1) {
- cable_id->raw = cmd.cable_id.data.output_raw;
- DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
- }
- return cmd.cable_id.header.ret_status == 1;
-}
-
-static union dp_cable_id intersect_cable_id(
- union dp_cable_id *a, union dp_cable_id *b)
-{
- union dp_cable_id out;
-
- out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY,
- b->bits.UHBR10_20_CAPABILITY);
- out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY,
- b->bits.UHBR13_5_CAPABILITY);
- out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE);
-
- return out;
-}
-
-static void retrieve_cable_id(struct dc_link *link)
-{
- union dp_cable_id usbc_cable_id;
-
- link->dpcd_caps.cable_id.raw = 0;
- core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
- &link->dpcd_caps.cable_id.raw, sizeof(uint8_t));
-
- if (get_usbc_cable_id(link, &usbc_cable_id))
- link->dpcd_caps.cable_id = intersect_cable_id(
- &link->dpcd_caps.cable_id, &usbc_cable_id);
-}
-
-static enum dc_status wake_up_aux_channel(struct dc_link *link)
-{
- enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t aux_channel_retry_cnt = 0;
- uint8_t dpcd_power_state = '\0';
-
- while (status != DC_OK && aux_channel_retry_cnt < 10) {
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
- udelay(1000);
- aux_channel_retry_cnt++;
- }
- }
-
- if (status != DC_OK) {
- dpcd_power_state = DP_SET_POWER_D0;
- status = core_link_write_dpcd(
- link,
- DP_SET_POWER,
- &dpcd_power_state,
- sizeof(dpcd_power_state));
-
- dpcd_power_state = DP_SET_POWER_D3;
- status = core_link_write_dpcd(
- link,
- DP_SET_POWER,
- &dpcd_power_state,
- sizeof(dpcd_power_state));
- return DC_ERROR_UNEXPECTED;
- }
-
- return DC_OK;
-}
-
-static bool retrieve_link_cap(struct dc_link *link)
-{
- /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
- * which means size 16 will be good for both of those DPCD register block reads
- */
- uint8_t dpcd_data[16];
- /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
- */
- uint8_t dpcd_dprx_data = '\0';
-
- struct dp_device_vendor_id sink_id;
- union down_stream_port_count down_strm_port_count;
- union edp_configuration_cap edp_config_cap;
- union dp_downstream_port_present ds_port = { 0 };
- enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
- int i;
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
- const uint32_t post_oui_delay = 30; // 30ms
-
- memset(dpcd_data, '\0', sizeof(dpcd_data));
- memset(&down_strm_port_count,
- '\0', sizeof(union down_stream_port_count));
- memset(&edp_config_cap, '\0',
- sizeof(union edp_configuration_cap));
-
- /* if extended timeout is supported in hardware,
- * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
- * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
- */
- dc_link_aux_try_to_configure_timeout(link->ddc,
- LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
-
- status = dp_retrieve_lttpr_cap(link);
-
- if (status != DC_OK) {
- status = wake_up_aux_channel(link);
- if (status == DC_OK)
- dp_retrieve_lttpr_cap(link);
- else
- return false;
- }
-
- if (dp_is_lttpr_present(link))
- configure_lttpr_mode_transparent(link);
-
- /* Read DP tunneling information. */
- status = dpcd_get_tunneling_device_data(link);
-
- dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
-
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
- if (status == DC_OK)
- break;
- }
-
- if (status != DC_OK) {
- dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
- return false;
- }
-
- if (!dp_is_lttpr_present(link))
- dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
-
- {
- union training_aux_rd_interval aux_rd_interval;
-
- aux_rd_interval.raw =
- dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
-
- link->dpcd_caps.ext_receiver_cap_field_present =
- aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
-
- if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
- uint8_t ext_cap_data[16];
-
- memset(ext_cap_data, '\0', sizeof(ext_cap_data));
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DP13_DPCD_REV,
- ext_cap_data,
- sizeof(ext_cap_data));
- if (status == DC_OK) {
- memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
- break;
- }
- }
- if (status != DC_OK)
- dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
- }
- }
-
- link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
-
- if (link->dpcd_caps.ext_receiver_cap_field_present) {
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dpcd_dprx_data,
- sizeof(dpcd_dprx_data));
- if (status == DC_OK)
- break;
- }
-
- link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
-
- if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed.\n", __func__);
- }
-
- else {
- link->dpcd_caps.dprx_feature.raw = 0;
- }
-
-
- /* Error condition checking...
- * It is impossible for Sink to report Max Lane Count = 0.
- * It is possible for Sink to report Max Link Rate = 0, if it is
- * an eDP device that is reporting specialized link rates in the
- * SUPPORTED_LINK_RATE table.
- */
- if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
- return false;
-
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
- read_dp_device_vendor_id(link);
-
- /* TODO - decouple raw mst capability from policy decision */
- link->dpcd_caps.is_mst_capable = is_mst_supported(link);
-
- get_active_converter_info(ds_port.byte, link);
-
- dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
-
- down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
- DP_DPCD_REV];
-
- link->dpcd_caps.allow_invalid_MSA_timing_param =
- down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
-
- link->dpcd_caps.max_ln_count.raw = dpcd_data[
- DP_MAX_LANE_COUNT - DP_DPCD_REV];
-
- link->dpcd_caps.max_down_spread.raw = dpcd_data[
- DP_MAX_DOWNSPREAD - DP_DPCD_REV];
-
- link->reported_link_cap.lane_count =
- link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
- link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw(
- dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]);
- link->reported_link_cap.link_spread =
- link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-
- edp_config_cap.raw = dpcd_data[
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
- link->dpcd_caps.panel_mode_edp =
- edp_config_cap.bits.ALT_SCRAMBLER_RESET;
- link->dpcd_caps.dpcd_display_control_capable =
- edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
- link->dpcd_caps.channel_coding_cap.raw =
- dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV];
- link->test_pattern_enabled = false;
- link->compliance_test_state.raw = 0;
-
- /* read sink count */
- core_link_read_dpcd(link,
- DP_SINK_COUNT,
- &link->dpcd_caps.sink_count.raw,
- sizeof(link->dpcd_caps.sink_count.raw));
-
- /* read sink ieee oui */
- core_link_read_dpcd(link,
- DP_SINK_OUI,
- (uint8_t *)(&sink_id),
- sizeof(sink_id));
-
- link->dpcd_caps.sink_dev_id =
- (sink_id.ieee_oui[0] << 16) +
- (sink_id.ieee_oui[1] << 8) +
- (sink_id.ieee_oui[2]);
-
- memmove(
- link->dpcd_caps.sink_dev_id_str,
- sink_id.ieee_device_id,
- sizeof(sink_id.ieee_device_id));
-
- /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
- {
- uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
-
- if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
- !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
- sizeof(str_mbp_2017))) {
- link->reported_link_cap.link_rate = 0x0c;
- }
- }
-
- core_link_read_dpcd(
- link,
- DP_SINK_HW_REVISION_START,
- (uint8_t *)&dp_hw_fw_revision,
- sizeof(dp_hw_fw_revision));
-
- link->dpcd_caps.sink_hw_revision =
- dp_hw_fw_revision.ieee_hw_rev;
-
- memmove(
- link->dpcd_caps.sink_fw_revision,
- dp_hw_fw_revision.ieee_fw_rev,
- sizeof(dp_hw_fw_revision.ieee_fw_rev));
-
- /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
- {
- uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
- uint8_t fwrev_mbp_2018[] = { 7, 4 };
- uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
-
- /* We also check for the firmware revision as 16,1 models have an
- * identical device id and are incorrectly quirked otherwise.
- */
- if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
- !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
- sizeof(str_mbp_2018)) &&
- (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
- sizeof(fwrev_mbp_2018)) ||
- !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
- sizeof(fwrev_mbp_2018_vega)))) {
- link->reported_link_cap.link_rate = LINK_RATE_RBR2;
- }
- }
-
- memset(&link->dpcd_caps.dsc_caps, '\0',
- sizeof(link->dpcd_caps.dsc_caps));
- memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
- /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
- status = core_link_read_dpcd(
- link,
- DP_FEC_CAPABILITY,
- &link->dpcd_caps.fec_cap.raw,
- sizeof(link->dpcd_caps.fec_cap.raw));
- status = core_link_read_dpcd(
- link,
- DP_DSC_SUPPORT,
- link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
- status = core_link_read_dpcd(
- link,
- DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
- DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
- DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
- DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
- DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
- }
-
- /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode
- * only if required.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around &&
- link->dpcd_caps.is_branch_dev &&
- link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
- link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
- (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE ||
- link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) {
- /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA.
- * Clear FEC and DSC capabilities as a work around if that is not the case.
- */
- link->wa_flags.dpia_forced_tbt3_mode = true;
- memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps));
- memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
- DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index);
- } else
- link->wa_flags.dpia_forced_tbt3_mode = false;
- }
-
- if (!dpcd_read_sink_ext_caps(link))
- link->dpcd_sink_ext_caps.raw = 0;
-
- if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
-
- core_link_read_dpcd(link,
- DP_128b_132b_SUPPORTED_LINK_RATES,
- &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
- sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
- if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
- else
- dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
- DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
- DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
- link->reported_link_cap.link_rate / 100,
- link->reported_link_cap.link_rate % 100);
-
- core_link_read_dpcd(link,
- DP_SINK_VIDEO_FALLBACK_FORMATS,
- &link->dpcd_caps.fallback_formats.raw,
- sizeof(link->dpcd_caps.fallback_formats.raw));
- DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
- if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
- DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
- DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
- DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.raw == 0) {
- DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
- link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
- }
-
- core_link_read_dpcd(link,
- DP_FEC_CAPABILITY_1,
- &link->dpcd_caps.fec_cap1.raw,
- sizeof(link->dpcd_caps.fec_cap1.raw));
- DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
- if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
- DC_LOG_DP2("\tFEC aggregated error counters are supported");
- }
-
- retrieve_cable_id(link);
- dpcd_write_cable_id_to_dprx(link);
-
- /* Connectivity log: detection */
- CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
-
- return true;
-}
-
-bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
-{
- uint8_t dpcd_data[16];
- uint32_t read_dpcd_retry_cnt = 3;
- enum dc_status status = DC_ERROR_UNEXPECTED;
- union dp_downstream_port_present ds_port = { 0 };
- union down_stream_port_count down_strm_port_count;
- union edp_configuration_cap edp_config_cap;
-
- int i;
-
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
- if (status == DC_OK)
- break;
- }
-
- link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
-
- if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
- return false;
-
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
- get_active_converter_info(ds_port.byte, link);
-
- down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
- DP_DPCD_REV];
-
- link->dpcd_caps.allow_invalid_MSA_timing_param =
- down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
-
- link->dpcd_caps.max_ln_count.raw = dpcd_data[
- DP_MAX_LANE_COUNT - DP_DPCD_REV];
-
- link->dpcd_caps.max_down_spread.raw = dpcd_data[
- DP_MAX_DOWNSPREAD - DP_DPCD_REV];
-
- link->reported_link_cap.lane_count =
- link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
- link->reported_link_cap.link_rate = dpcd_data[
- DP_MAX_LINK_RATE - DP_DPCD_REV];
- link->reported_link_cap.link_spread =
- link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-
- edp_config_cap.raw = dpcd_data[
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
- link->dpcd_caps.panel_mode_edp =
- edp_config_cap.bits.ALT_SCRAMBLER_RESET;
- link->dpcd_caps.dpcd_display_control_capable =
- edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
-
- return true;
-}
-
-bool detect_dp_sink_caps(struct dc_link *link)
-{
- return retrieve_link_cap(link);
-}
-
-static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
-{
- enum dc_link_rate link_rate;
- // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
- switch (link_rate_in_khz) {
- case 1620000:
- link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane
- break;
- case 2160000:
- link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane
- break;
- case 2430000:
- link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane
- break;
- case 2700000:
- link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane
- break;
- case 3240000:
- link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane
- break;
- case 4320000:
- link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane
- break;
- case 5400000:
- link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane
- break;
- case 8100000:
- link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane
- break;
- default:
- link_rate = LINK_RATE_UNKNOWN;
- break;
- }
- return link_rate;
-}
-
-void detect_edp_sink_caps(struct dc_link *link)
-{
- uint8_t supported_link_rates[16];
- uint32_t entry;
- uint32_t link_rate_in_khz;
- enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
- uint8_t backlight_adj_cap;
- uint8_t general_edp_cap;
-
- retrieve_link_cap(link);
- link->dpcd_caps.edp_supported_link_rates_count = 0;
- memset(supported_link_rates, 0, sizeof(supported_link_rates));
-
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->panel_config.ilr.optimize_edp_link_rate ||
- link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
- // Read DPCD 00010h - 0001Fh 16 bytes at one shot
- core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
- supported_link_rates, sizeof(supported_link_rates));
-
- for (entry = 0; entry < 16; entry += 2) {
- // DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multiplier to find link rate in kHz.
- link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
- supported_link_rates[entry]) * 200;
-
- if (link_rate_in_khz != 0) {
- link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
- link->dpcd_caps.edp_supported_link_rates_count++;
-
- if (link->reported_link_cap.link_rate < link_rate)
- link->reported_link_cap.link_rate = link_rate;
- }
- }
- }
- core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
- &backlight_adj_cap, sizeof(backlight_adj_cap));
-
- link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
-
- core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
- &general_edp_cap, sizeof(general_edp_cap));
-
- link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
-
- dc_link_set_default_brightness_aux(link);
-
- core_link_read_dpcd(link, DP_EDP_DPCD_REV,
- &link->dpcd_caps.edp_rev,
- sizeof(link->dpcd_caps.edp_rev));
- /*
- * PSR is only valid for eDP v1.3 or higher.
- */
- if (link->dpcd_caps.edp_rev >= DP_EDP_13) {
- core_link_read_dpcd(link, DP_PSR_SUPPORT,
- &link->dpcd_caps.psr_info.psr_version,
- sizeof(link->dpcd_caps.psr_info.psr_version));
- if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
- core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
- &link->dpcd_caps.psr_info.force_psrsu_cap,
- sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
- core_link_read_dpcd(link, DP_PSR_CAPS,
- &link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
- if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) {
- core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY,
- &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap,
- sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap));
- }
- }
-
- /*
- * ALPM is only valid for eDP v1.4 or higher.
- */
- if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
- core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
- &link->dpcd_caps.alpm_caps.raw,
- sizeof(link->dpcd_caps.alpm_caps.raw));
-}
-
-void dc_link_dp_enable_hpd(const struct dc_link *link)
-{
- struct link_encoder *encoder = link->link_enc;
-
- if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
- encoder->funcs->enable_hpd(encoder);
-}
-
-void dc_link_dp_disable_hpd(const struct dc_link *link)
-{
- struct link_encoder *encoder = link->link_enc;
-
- if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
- encoder->funcs->disable_hpd(encoder);
-}
-
-static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
-{
- if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
- test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
- return true;
- else
- return false;
-}
-
-static void set_crtc_test_pattern(struct dc_link *link,
- struct pipe_ctx *pipe_ctx,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space)
-{
- enum controller_dp_test_pattern controller_test_pattern;
- enum dc_color_depth color_depth = pipe_ctx->
- stream->timing.display_color_depth;
- struct bit_depth_reduction_params params;
- struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- int width = pipe_ctx->stream->timing.h_addressable +
- pipe_ctx->stream->timing.h_border_left +
- pipe_ctx->stream->timing.h_border_right;
- int height = pipe_ctx->stream->timing.v_addressable +
- pipe_ctx->stream->timing.v_border_bottom +
- pipe_ctx->stream->timing.v_border_top;
-
- memset(&params, 0, sizeof(params));
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_COLOR_SQUARES:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
- break;
- case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
- break;
- case DP_TEST_PATTERN_VERTICAL_BARS:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
- break;
- case DP_TEST_PATTERN_HORIZONTAL_BARS:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
- break;
- case DP_TEST_PATTERN_COLOR_RAMP:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
- break;
- default:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
- break;
- }
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_COLOR_SQUARES:
- case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
- case DP_TEST_PATTERN_VERTICAL_BARS:
- case DP_TEST_PATTERN_HORIZONTAL_BARS:
- case DP_TEST_PATTERN_COLOR_RAMP:
- {
- /* disable bit depth reduction */
- pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- controller_test_pattern, color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- enum controller_dp_color_space controller_color_space;
- int opp_cnt = 1;
- int offset = 0;
- int dpg_width = width;
-
- switch (test_pattern_color_space) {
- case DP_TEST_PATTERN_COLOR_SPACE_RGB:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
- default:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
- DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
- ASSERT(0);
- break;
- }
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
- dpg_width = width / opp_cnt;
- offset = dpg_width;
-
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
- odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- odm_pipe,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- offset);
- offset += offset;
- }
- }
- }
- break;
- case DP_TEST_PATTERN_VIDEO_MODE:
- {
- /* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
- pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
- int dpg_width;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- dpg_width = width / opp_cnt;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
- odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- odm_pipe,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
- }
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
- }
- }
- break;
-
- default:
- break;
- }
-}
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = NULL;
- unsigned int lane;
- unsigned int i;
- unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
- union dpcd_training_pattern training_pattern;
- enum dpcd_phy_test_patterns pattern;
-
- memset(&training_pattern, 0, sizeof(training_pattern));
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream == NULL)
- continue;
-
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
- pipe_ctx = &pipes[i];
- break;
- }
- }
-
- if (pipe_ctx == NULL)
- return false;
-
- /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
- if (link->test_pattern_enabled && test_pattern ==
- DP_TEST_PATTERN_VIDEO_MODE) {
- /* Set CRTC Test Pattern */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
- (uint8_t *)p_custom_pattern,
- (uint32_t)cust_pattern_size);
-
- /* Unblank Stream */
- link->dc->hwss.unblank_stream(
- pipe_ctx,
- &link->verified_link_cap);
- /* TODO:m_pHwss->MuteAudioEndpoint
- * (pPathMode->pDisplayPath, false);
- */
-
- /* Reset Test Pattern state */
- link->test_pattern_enabled = false;
-
- return true;
- }
-
- /* Check for PHY Test Patterns */
- if (is_dp_phy_pattern(test_pattern)) {
- /* Set DPCD Lane Settings before running test pattern */
- if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- dp_fixed_vs_pe_set_retimer_lane_settings(
- link,
- p_link_settings->dpcd_lane_settings,
- p_link_settings->link_settings.lane_count);
- } else {
- dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
- }
- dpcd_set_lane_settings(link, p_link_settings, DPRX);
- }
-
- /* Blank stream if running test pattern */
- if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
- /*TODO:
- * m_pHwss->
- * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
- */
- /* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
- }
-
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
- (uint8_t *)p_custom_pattern,
- (uint32_t)cust_pattern_size);
-
- if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
- /* Set Test Pattern state */
- link->test_pattern_enabled = true;
- if (p_link_settings != NULL)
- dpcd_set_link_settings(link,
- p_link_settings);
- }
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_VIDEO_MODE:
- pattern = PHY_TEST_PATTERN_NONE;
- break;
- case DP_TEST_PATTERN_D102:
- pattern = PHY_TEST_PATTERN_D10_2;
- break;
- case DP_TEST_PATTERN_SYMBOL_ERROR:
- pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
- break;
- case DP_TEST_PATTERN_PRBS7:
- pattern = PHY_TEST_PATTERN_PRBS7;
- break;
- case DP_TEST_PATTERN_80BIT_CUSTOM:
- pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
- break;
- case DP_TEST_PATTERN_CP2520_1:
- pattern = PHY_TEST_PATTERN_CP2520_1;
- break;
- case DP_TEST_PATTERN_CP2520_2:
- pattern = PHY_TEST_PATTERN_CP2520_2;
- break;
- case DP_TEST_PATTERN_CP2520_3:
- pattern = PHY_TEST_PATTERN_CP2520_3;
- break;
- case DP_TEST_PATTERN_128b_132b_TPS1:
- pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
- break;
- case DP_TEST_PATTERN_128b_132b_TPS2:
- pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
- break;
- case DP_TEST_PATTERN_PRBS9:
- pattern = PHY_TEST_PATTERN_PRBS9;
- break;
- case DP_TEST_PATTERN_PRBS11:
- pattern = PHY_TEST_PATTERN_PRBS11;
- break;
- case DP_TEST_PATTERN_PRBS15:
- pattern = PHY_TEST_PATTERN_PRBS15;
- break;
- case DP_TEST_PATTERN_PRBS23:
- pattern = PHY_TEST_PATTERN_PRBS23;
- break;
- case DP_TEST_PATTERN_PRBS31:
- pattern = PHY_TEST_PATTERN_PRBS31;
- break;
- case DP_TEST_PATTERN_264BIT_CUSTOM:
- pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
- break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
- pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
- break;
- default:
- return false;
- }
-
- if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
- /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
- return false;
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
- core_link_write_dpcd(link,
- DP_LINK_SQUARE_PATTERN,
- p_custom_pattern,
- 1);
-
-#endif
- /* tell receiver that we are sending qualification
- * pattern DP 1.2 or later - DP receiver's link quality
- * pattern is set using DPCD LINK_QUAL_LANEx_SET
- * register (0x10B~0x10E)\
- */
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
- link_qual_pattern[lane] =
- (unsigned char)(pattern);
-
- core_link_write_dpcd(link,
- DP_LINK_QUAL_LANE0_SET,
- link_qual_pattern,
- sizeof(link_qual_pattern));
- } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
- link->dpcd_caps.dpcd_rev.raw == 0) {
- /* tell receiver that we are sending qualification
- * pattern DP 1.1a or earlier - DP receiver's link
- * quality pattern is set using
- * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
- * register (0x102). We will use v_1.3 when we are
- * setting test pattern for DP 1.1.
- */
- core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
- &training_pattern.raw,
- sizeof(training_pattern));
- training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
- core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
- &training_pattern.raw,
- sizeof(training_pattern));
- }
- } else {
- enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
-
- switch (test_pattern_color_space) {
- case DP_TEST_PATTERN_COLOR_SPACE_RGB:
- color_space = COLOR_SPACE_SRGB;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_SRGB_LIMITED;
- break;
-
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
- color_space = COLOR_SPACE_YCBCR601;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_YCBCR601_LIMITED;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
- color_space = COLOR_SPACE_YCBCR709;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_YCBCR709_LIMITED;
- break;
- default:
- break;
- }
-
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
- union dmub_hw_lock_flags hw_locks = { 0 };
- struct dmub_hw_lock_inst_flags inst_flags = { 0 };
-
- hw_locks.bits.lock_dig = 1;
- inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
-
- dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
- true,
- &hw_locks,
- &inst_flags);
- } else
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe_ctx->stream_res.tg);
- }
-
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- /* update MSA to requested color space */
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
- &pipe_ctx->stream->timing,
- color_space,
- pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
- link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
-
- if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
- else
- pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
- resource_build_info_frame(pipe_ctx);
- link->dc->hwss.update_info_frame(pipe_ctx);
- }
-
- /* CRTC Patterns */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VACTIVE);
-
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
- union dmub_hw_lock_flags hw_locks = { 0 };
- struct dmub_hw_lock_inst_flags inst_flags = { 0 };
-
- hw_locks.bits.lock_dig = 1;
- inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
-
- dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
- false,
- &hw_locks,
- &inst_flags);
- } else
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe_ctx->stream_res.tg);
- }
-
- /* Set Test Pattern state */
- link->test_pattern_enabled = true;
- }
-
- return true;
-}
-
-void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
-{
- unsigned char mstmCntl;
-
- core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
- if (enable)
- mstmCntl |= DP_MST_EN;
- else
- mstmCntl &= (~DP_MST_EN);
-
- core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
-}
-
-void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
-{
- union dpcd_edp_config edp_config_set;
- bool panel_mode_edp = false;
-
- memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
-
- if (panel_mode != DP_PANEL_MODE_DEFAULT) {
-
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
-
- default:
- break;
- }
-
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum dc_status result;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DC_OK);
- }
- }
- DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
- "eDP panel mode enabled: %d \n",
- link->link_index,
- link->dpcd_caps.panel_mode_edp,
- panel_mode_edp);
-}
-
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
-{
- /* We need to explicitly check that connector
- * is not DP. Some Travis_VGA get reported
- * by video bios as DP.
- */
- if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
-
- switch (link->dpcd_caps.branch_dev_id) {
- case DP_BRANCH_DEVICE_ID_0022B9:
- /* alternate scrambler reset is required for Travis
- * for the case when external chip does not
- * provide sink device id, alternate scrambler
- * scheme will be overriden later by querying
- * Encoder features
- */
- if (strncmp(
- link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_2,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- case DP_BRANCH_DEVICE_ID_00001A:
- /* alternate scrambler reset is required for Travis
- * for the case when external chip does not provide
- * sink device id, alternate scrambler scheme will
- * be overriden later by querying Encoder feature
- */
- if (strncmp(link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_3,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- default:
- break;
- }
- }
-
- if (link->dpcd_caps.panel_mode_edp &&
- (link->connector_signal == SIGNAL_TYPE_EDP ||
- (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->is_internal_display))) {
- return DP_PANEL_MODE_EDP;
- }
-
- return DP_PANEL_MODE_DEFAULT;
-}
-
-enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
-{
- /* FEC has to be "set ready" before the link training.
- * The policy is to always train with FEC
- * if the sink supports it and leave it enabled on link.
- * If FEC is not supported, disable it.
- */
- struct link_encoder *link_enc = NULL;
- enum dc_status status = DC_OK;
- uint8_t fec_config = 0;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_link_should_enable_fec(link))
- return status;
-
- if (link_enc->funcs->fec_set_ready &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
- if (ready) {
- fec_config = 1;
- status = core_link_write_dpcd(link,
- DP_FEC_CONFIGURATION,
- &fec_config,
- sizeof(fec_config));
- if (status == DC_OK) {
- link_enc->funcs->fec_set_ready(link_enc, true);
- link->fec_state = dc_link_fec_ready;
- } else {
- link_enc->funcs->fec_set_ready(link_enc, false);
- link->fec_state = dc_link_fec_not_ready;
- dm_error("dpcd write failed to set fec_ready");
- }
- } else if (link->fec_state == dc_link_fec_ready) {
- fec_config = 0;
- status = core_link_write_dpcd(link,
- DP_FEC_CONFIGURATION,
- &fec_config,
- sizeof(fec_config));
- link_enc->funcs->fec_set_ready(link_enc, false);
- link->fec_state = dc_link_fec_not_ready;
- }
- }
-
- return status;
-}
-
-void dp_set_fec_enable(struct dc_link *link, bool enable)
-{
- struct link_encoder *link_enc = NULL;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_link_should_enable_fec(link))
- return;
-
- if (link_enc->funcs->fec_set_enable &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
- if (link->fec_state == dc_link_fec_ready && enable) {
- /* Accord to DP spec, FEC enable sequence can first
- * be transmitted anytime after 1000 LL codes have
- * been transmitted on the link after link training
- * completion. Using 1 lane RBR should have the maximum
- * time for transmitting 1000 LL codes which is 6.173 us.
- * So use 7 microseconds delay instead.
- */
- udelay(7);
- link_enc->funcs->fec_set_enable(link_enc, true);
- link->fec_state = dc_link_fec_enabled;
- } else if (link->fec_state == dc_link_fec_enabled && !enable) {
- link_enc->funcs->fec_set_enable(link_enc, false);
- link->fec_state = dc_link_fec_ready;
- }
- }
-}
-
-void dpcd_set_source_specific_data(struct dc_link *link)
-{
- if (!link->dc->vendor_signature.is_valid) {
- enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
- struct dpcd_amd_signature amd_signature = {0};
- struct dpcd_amd_device_id amd_device_id = {0};
-
- amd_device_id.device_id_byte1 =
- (uint8_t)(link->ctx->asic_id.chip_id);
- amd_device_id.device_id_byte2 =
- (uint8_t)(link->ctx->asic_id.chip_id >> 8);
- amd_device_id.dce_version =
- (uint8_t)(link->ctx->dce_version);
- amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
- amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
-
- core_link_read_dpcd(link, DP_SOURCE_OUI,
- (uint8_t *)(&amd_signature),
- sizeof(amd_signature));
-
- if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
- (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
- (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
-
- amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
-
- core_link_write_dpcd(link, DP_SOURCE_OUI,
- (uint8_t *)(&amd_signature),
- sizeof(amd_signature));
- }
-
- core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
- (uint8_t *)(&amd_device_id),
- sizeof(amd_device_id));
-
- if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
- link->dc->caps.min_horizontal_blanking_period != 0) {
-
- uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
-
- if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
- result_write_min_hblank = core_link_write_dpcd(link,
- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
- sizeof(hblank_size));
-
- if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
- link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
- } else {
- DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
- }
- }
-
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
- "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
- result_write_min_hblank,
- link->link_index,
- link->ctx->dce_version,
- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED,
- link->dc->caps.min_horizontal_blanking_period,
- link->dpcd_caps.branch_dev_id,
- link->dpcd_caps.branch_dev_name[0],
- link->dpcd_caps.branch_dev_name[1],
- link->dpcd_caps.branch_dev_name[2],
- link->dpcd_caps.branch_dev_name[3],
- link->dpcd_caps.branch_dev_name[4],
- link->dpcd_caps.branch_dev_name[5]);
- } else {
- core_link_write_dpcd(link, DP_SOURCE_OUI,
- link->dc->vendor_signature.data.raw,
- sizeof(link->dc->vendor_signature.data.raw));
- }
-}
-
-void dpcd_write_cable_id_to_dprx(struct dc_link *link)
-{
- if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED ||
- link->dpcd_caps.cable_id.raw == 0 ||
- link->dprx_states.cable_id_written)
- return;
-
- core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX,
- &link->dpcd_caps.cable_id.raw,
- sizeof(link->dpcd_caps.cable_id.raw));
-
- link->dprx_states.cable_id_written = 1;
-}
-
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
- bool isHDR,
- uint32_t backlight_millinits,
- uint32_t transition_time_in_ms)
-{
- struct dpcd_source_backlight_set dpcd_backlight_set;
- uint8_t backlight_control = isHDR ? 1 : 0;
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- // OLEDs have no PWM, they can only use AUX
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- backlight_control = 1;
-
- *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
- *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
-
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *)(&dpcd_backlight_set),
- sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
- &backlight_control, 1) != DC_OK)
- return false;
-
- return true;
-}
-
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
- uint32_t *backlight_millinits_avg,
- uint32_t *backlight_millinits_peak)
-{
- union dpcd_source_backlight_get dpcd_backlight_get;
-
- memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
- dpcd_backlight_get.raw,
- sizeof(union dpcd_source_backlight_get)) != DC_OK)
- return false;
-
- *backlight_millinits_avg =
- dpcd_backlight_get.bytes.backlight_millinits_avg;
- *backlight_millinits_peak =
- dpcd_backlight_get.bytes.backlight_millinits_peak;
-
- /* On non-supported panels dpcd_read usually succeeds with 0 returned */
- if (*backlight_millinits_avg == 0 ||
- *backlight_millinits_avg > *backlight_millinits_peak)
- return false;
-
- return true;
-}
-
-bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
-{
- uint8_t backlight_enable = enable ? 1 : 0;
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
- &backlight_enable, 1) != DC_OK)
- return false;
-
- return true;
-}
-
-// we read default from 0x320 because we expect BIOS wrote it there
-// regular get_backlight_nit reads from panel set at 0x326
-bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
-{
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *) backlight_millinits,
- sizeof(uint32_t)) != DC_OK)
- return false;
-
- return true;
-}
-
-bool dc_link_set_default_brightness_aux(struct dc_link *link)
-{
- uint32_t default_backlight;
-
- if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
- if (!dc_link_read_default_bl_aux(link, &default_backlight))
- default_backlight = 150000;
- // if < 5 nits or > 5000, it might be wrong readback
- if (default_backlight < 5000 || default_backlight > 5000000)
- default_backlight = 150000; //
-
- return dc_link_set_backlight_level_nits(link, true,
- default_backlight, 0);
- }
- return false;
-}
-
-bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing)
-{
- struct dc_link_settings link_setting;
- uint8_t link_bw_set;
- uint8_t link_rate_set;
- uint32_t req_bw;
- union lane_count_set lane_count_set = {0};
-
- ASSERT(link || crtc_timing); // invalid input
-
- if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->panel_config.ilr.optimize_edp_link_rate)
- return false;
-
-
- // Read DPCD 00100h to find if standard link rates are set
- core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
-
- if (link_bw_set) {
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
- return true;
- }
-
- // Read DPCD 00115h to find the edp link rate set used
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- // Read DPCD 00101h to find out the number of lanes currently set
- core_link_read_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, sizeof(lane_count_set));
-
- req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
-
- if (!crtc_timing->flags.DSC)
- decide_edp_link_settings(link, &link_setting, req_bw);
- else
- decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
-
- if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
- lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
- return true;
- }
-
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
- return false;
-}
-
-enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings)
-{
- if ((link_settings->link_rate >= LINK_RATE_LOW) &&
- (link_settings->link_rate <= LINK_RATE_HIGH3))
- return DP_8b_10b_ENCODING;
- else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
- (link_settings->link_rate <= LINK_RATE_UHBR20))
- return DP_128b_132b_ENCODING;
- return DP_UNKNOWN_ENCODING;
-}
-
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
-{
- struct dc_link_settings link_settings = {0};
-
- if (!dc_is_dp_signal(link->connector_signal))
- return DP_UNKNOWN_ENCODING;
-
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- link_settings = link->preferred_link_setting;
- } else {
- decide_mst_link_settings(link, &link_settings);
- }
-
- return dp_get_link_encoding_format(&link_settings);
-}
-
-// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
-static void get_lane_status(
- struct dc_link *link,
- uint32_t lane_count,
- union lane_status *status,
- union lane_align_status_updated *status_updated)
-{
- unsigned int lane;
- uint8_t dpcd_buf[3] = {0};
-
- if (status == NULL || status_updated == NULL) {
- return;
- }
-
- core_link_read_dpcd(
- link,
- DP_LANE0_1_STATUS,
- dpcd_buf,
- sizeof(dpcd_buf));
-
- for (lane = 0; lane < lane_count; lane++) {
- status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane);
- }
-
- status_updated->raw = dpcd_buf[2];
-}
-
-bool dpcd_write_128b_132b_sst_payload_allocation_table(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- struct link_mst_stream_allocation_table *proposed_table,
- bool allocate)
-{
- const uint8_t vc_id = 1; /// VC ID always 1 for SST
- const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
- bool result = false;
- uint8_t req_slot_count = 0;
- struct fixed31_32 avg_time_slots_per_mtp = { 0 };
- union payload_table_update_status update_status = { 0 };
- const uint32_t max_retries = 30;
- uint32_t retries = 0;
-
- if (allocate) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- /// Validation should filter out modes that exceed link BW
- ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
- if (req_slot_count > MAX_MTP_SLOT_COUNT)
- return false;
- } else {
- /// Leave req_slot_count = 0 if allocate is false.
- }
-
- proposed_table->stream_count = 1; /// Always 1 stream for SST
- proposed_table->stream_allocations[0].slot_count = req_slot_count;
- proposed_table->stream_allocations[0].vcp_id = vc_id;
-
- if (link->aux_access_disabled)
- return true;
-
- /// Write DPCD 2C0 = 1 to start updating
- update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1);
-
- /// Program the changes in DPCD 1C0 - 1C2
- ASSERT(vc_id == 1);
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_SET,
- &vc_id,
- 1);
-
- ASSERT(start_time_slot == 0);
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
- &start_time_slot,
- 1);
-
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
- &req_slot_count,
- 1);
-
- /// Poll till DPCD 2C0 read 1
- /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
-
- while (retries < max_retries) {
- if (core_link_read_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1) == DC_OK) {
- if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
- DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
- result = true;
- break;
- }
- } else {
- union dpcd_rev dpcdRev;
-
- if (core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- &dpcdRev.raw,
- 1) != DC_OK) {
- DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
- "of sink while polling payload table "
- "updated status bit.");
- break;
- }
- }
- retries++;
- msleep(5);
- }
-
- if (!result && retries == max_retries) {
- DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
- "continue on. Something is wrong with the branch.");
- // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
- }
-
- return result;
-}
-
-bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
-{
- /*
- * wait for ACT handled
- */
- int i;
- const int act_retries = 30;
- enum act_return_status result = ACT_FAILED;
- union payload_table_update_status update_status = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated lane_status_updated;
-
- if (link->aux_access_disabled)
- return true;
- for (i = 0; i < act_retries; i++) {
- get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
-
- if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_interlane_aligned(lane_status_updated)) {
- DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
- "polling for ACT handled.");
- result = ACT_LINK_LOST;
- break;
- }
- core_link_read_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1);
-
- if (update_status.bits.ACT_HANDLED == 1) {
- DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
- result = ACT_SUCCESS;
- break;
- }
-
- msleep(5);
- }
-
- if (result == ACT_FAILED) {
- DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
- "continue on. Something is wrong with the branch.");
- }
-
- return (result == ACT_SUCCESS);
-}
-
-struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link)
-{
- struct fixed31_32 link_bw_effective =
- dc_fixpt_from_int(
- dc_link_bandwidth_kbps(link, &link->cur_link_settings));
- struct fixed31_32 timeslot_bw_effective =
- dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
- struct fixed31_32 timing_bw =
- dc_fixpt_from_int(
- dc_bandwidth_in_kbps_from_timing(&stream->timing));
- struct fixed31_32 avg_time_slots_per_mtp =
- dc_fixpt_div(timing_bw, timeslot_bw_effective);
-
- return avg_time_slots_per_mtp;
-}
-
-bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
-{
- /* If this assert is hit then we have a link encoder dynamic management issue */
- ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal));
-}
-
-void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
-{
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- return;
-
- link->dc->hwss.edp_power_control(link, true);
- if (wait_for_hpd)
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- if (link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_backlight_control(link, true);
-}
-
-void dc_link_clear_dprx_states(struct dc_link *link)
-{
- memset(&link->dprx_states, 0, sizeof(link->dprx_states));
-}
-
-void dp_receiver_power_ctrl(struct dc_link *link, bool on)
-{
- uint8_t state;
-
- state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
-
- if (link->sync_lt_in_progress)
- return;
-
- core_link_write_dpcd(link, DP_SET_POWER, &state,
- sizeof(state));
-
-}
-
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
-{
- if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
- core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
- &dp_test_mode, sizeof(dp_test_mode));
-}
-
-
-static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
-{
- switch (lttpr_repeater_count) {
- case 0x80: // 1 lttpr repeater
- return 1;
- case 0x40: // 2 lttpr repeaters
- return 2;
- case 0x20: // 3 lttpr repeaters
- return 3;
- case 0x10: // 4 lttpr repeaters
- return 4;
- case 0x08: // 5 lttpr repeaters
- return 5;
- case 0x04: // 6 lttpr repeaters
- return 6;
- case 0x02: // 7 lttpr repeaters
- return 7;
- case 0x01: // 8 lttpr repeaters
- return 8;
- default:
- break;
- }
- return 0; // invalid value
-}
-
-static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
-{
- return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
-}
-
-void dp_enable_link_phy(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal,
- enum clock_source_id clock_source,
- const struct dc_link_settings *link_settings)
-{
- link->cur_link_settings = *link_settings;
- link->dc->hwss.enable_dp_link_output(link, link_res, signal,
- clock_source, link_settings);
- dp_receiver_power_ctrl(link, true);
-}
-
-void edp_add_delay_for_T9(struct dc_link *link)
-{
- if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
- udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
-}
-
-bool edp_receiver_ready_T9(struct dc_link *link)
-{
- unsigned int tries = 0;
- unsigned char sinkstatus = 0;
- unsigned char edpRev = 0;
- enum dc_status result = DC_OK;
-
- result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- if (result == DC_OK && edpRev >= DP_EDP_12) {
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
- }
-
- return result;
-}
-bool edp_receiver_ready_T7(struct dc_link *link)
-{
- unsigned char sinkstatus = 0;
- unsigned char edpRev = 0;
- enum dc_status result = DC_OK;
-
- /* use absolute time stamp to constrain max T7*/
- unsigned long long enter_timestamp = 0;
- unsigned long long finish_timestamp = 0;
- unsigned long long time_taken_in_ns = 0;
-
- result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
- if (result == DC_OK && edpRev >= DP_EDP_12) {
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
- }
-
- if (link && link->panel_config.pps.extra_t7_ms > 0)
- udelay(link->panel_config.pps.extra_t7_ms * 1000);
-
- return result;
-}
-
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- struct dc *dc = link->ctx->dc;
-
- if (!link->wa_flags.dp_keep_receiver_powered)
- dp_receiver_power_ctrl(link, false);
-
- dc->hwss.disable_link_output(link, link_res, signal);
- /* Clear current link setting.*/
- memset(&link->cur_link_settings, 0,
- sizeof(link->cur_link_settings));
-
- if (dc->clk_mgr->funcs->notify_link_rate_change)
- dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-}
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count > 0)
- return;
-
- dp_disable_link_phy(link, link_res, signal);
-
- /* set the sink to SST mode after disabling the link */
- dp_enable_mst_on_sink(link, false);
-}
-
-bool dp_set_hw_training_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dc_dp_training_pattern pattern,
- uint32_t offset)
-{
- enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
- break;
- case DP_128b_132b_TPS1:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
- break;
- case DP_128b_132b_TPS2:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
- break;
- default:
- break;
- }
-
- dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
-
- return true;
-}
-
-void dp_set_hw_lane_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct link_training_settings *link_settings,
- uint32_t offset)
-{
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
-
- if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
- return;
-
- if (link_hwss->ext.set_dp_lane_settings)
- link_hwss->ext.set_dp_lane_settings(link, link_res,
- &link_settings->link_settings,
- link_settings->hw_lane_settings);
-
- memmove(link->cur_lane_setting,
- link_settings->hw_lane_settings,
- sizeof(link->cur_lane_setting));
-}
-
-void dp_set_hw_test_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dp_test_pattern test_pattern,
- uint8_t *custom_pattern,
- uint32_t custom_pattern_size)
-{
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- struct encoder_set_dp_phy_pattern_param pattern_param = {0};
-
- pattern_param.dp_phy_pattern = test_pattern;
- pattern_param.custom_pattern = custom_pattern;
- pattern_param.custom_pattern_size = custom_pattern_size;
- pattern_param.dp_panel_mode = dp_get_panel_mode(link);
-
- if (link_hwss->ext.set_dp_link_test_pattern)
- link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param);
-}
-
-void dp_retrain_link_dp_test(struct dc_link *link,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern)
-{
- struct pipe_ctx *pipes =
- &link->dc->current_state->res_ctx.pipe_ctx[0];
- unsigned int i;
- bool do_fallback = false;
-
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
- pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL &&
- pipes[i].stream->link == link) {
- udelay(100);
-
- pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
- pipes[i].stream_res.stream_enc);
-
- /* disable any test pattern that might be active */
- dp_set_hw_test_pattern(link, &pipes[i].link_res,
- DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- dp_receiver_power_ctrl(link, false);
-
- link->dc->hwss.disable_stream(&pipes[i]);
- if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
- (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
-
- if (link->link_enc)
- link->link_enc->funcs->disable_output(
- link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT);
-
- /* Clear current link setting. */
- memset(&link->cur_link_settings, 0,
- sizeof(link->cur_link_settings));
-
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- do_fallback = true;
-
- perform_link_training_with_retries(
- link_setting,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- &pipes[i],
- SIGNAL_TYPE_DISPLAY_PORT,
- do_fallback);
-
- link->dc->hwss.enable_stream(&pipes[i]);
-
- link->dc->hwss.unblank_stream(&pipes[i],
- link_setting);
-
- link->dc->hwss.enable_audio_stream(&pipes[i]);
- }
- }
-}
-
-#undef DC_LOGGER
-#define DC_LOGGER \
- dsc->ctx->logger
-static void dsc_optc_config_log(struct display_stream_compressor *dsc,
- struct dsc_optc_config *config)
-{
- uint32_t precision = 1 << 28;
- uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
- uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
- uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
-
- /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
- * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
- * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
- */
- ll_bytes_per_pix_fraq *= 10000000;
- ll_bytes_per_pix_fraq /= precision;
-
- DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
- config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
- DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
- DC_LOG_DSC("\tslice_width %d", config->slice_width);
-}
-
-bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- bool result = false;
-
- if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- result = true;
- else
- result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
- return result;
-}
-
-/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
- * i.e. after dp_enable_dsc_on_rx() had been called
- */
-void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- if (enable) {
- struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
- enum optc_dsc_mode optc_dsc_mode;
-
- /* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
- dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
- dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
- dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
-
- odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
- odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- }
- dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
- dsc_cfg.pic_width *= opp_cnt;
-
- optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
-
- /* Enable DSC in encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
- && !is_dp_128b_132b_signal(pipe_ctx)) {
- DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
- dsc_optc_config_log(dsc, &dsc_optc_cfg);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
- optc_dsc_mode,
- dsc_optc_cfg.bytes_per_pixel,
- dsc_optc_cfg.slice_width);
-
- /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
- }
-
- /* Enable DSC in OPTC */
- DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
- dsc_optc_config_log(dsc, &dsc_optc_cfg);
- pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
- optc_dsc_mode,
- dsc_optc_cfg.bytes_per_pixel,
- dsc_optc_cfg.slice_width);
- } else {
- /* disable DSC in OPTC */
- pipe_ctx->stream_res.tg->funcs->set_dsc_config(
- pipe_ctx->stream_res.tg,
- OPTC_DSC_DISABLED, 0, 0);
-
- /* disable DSC in stream encoder */
- if (dc_is_dp_signal(stream->signal)) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- false,
- NULL,
- true);
- else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
- pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL, true);
- }
- }
-
- /* disable DSC block */
- pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
- }
-}
-
-bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- bool result = false;
-
- if (!pipe_ctx->stream->timing.flags.DSC)
- goto out;
- if (!dsc)
- goto out;
-
- if (enable) {
- {
- dp_set_dsc_on_stream(pipe_ctx, true);
- result = true;
- }
- } else {
- dp_set_dsc_on_rx(pipe_ctx, false);
- dp_set_dsc_on_stream(pipe_ctx, false);
- result = true;
- }
-out:
- return result;
-}
-
-/*
- * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
- * hence PPS info packet update need to use frame update instead of immediate update.
- * Added parameter immediate_update for this purpose.
- * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
- * which is the only place where a "false" would be passed in for param immediate_update.
- *
- * immediate_update is only applicable when DSC is enabled.
- */
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc_stream_state *stream = pipe_ctx->stream;
-
- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
- return false;
-
- if (enable) {
- struct dsc_config dsc_cfg;
- uint8_t dsc_packed_pps[128];
-
- memset(&dsc_cfg, 0, sizeof(dsc_cfg));
- memset(dsc_packed_pps, 0, 128);
-
- /* Enable DSC hw block */
- dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
-
- DC_LOG_DSC(" ");
- dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
- memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
- if (dc_is_dp_signal(stream->signal)) {
- DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- true,
- &dsc_packed_pps[0],
- immediate_update);
- else
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc,
- true,
- &dsc_packed_pps[0],
- immediate_update);
- }
- } else {
- /* disable DSC PPS in stream encoder */
- memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
- if (dc_is_dp_signal(stream->signal)) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- false,
- NULL,
- true);
- else
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL, true);
- }
- }
-
- return true;
-}
-
-
-bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-
- if (!pipe_ctx->stream->timing.flags.DSC)
- return false;
- if (!dsc)
- return false;
-
- dp_set_dsc_on_stream(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true, false);
- return true;
-}
-
-#undef DC_LOGGER
-#define DC_LOGGER \
- link->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 614f022d1cff..30c0644d4418 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "dc_link_dp.h"
+#include "link.h"
#define DC_LOGGER dc->ctx->logger
@@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
struct dc_link_settings link_settings = {0};
- decide_link_settings(stream, &link_settings);
+ stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings);
if ((link_settings.link_rate >= LINK_RATE_LOW) &&
link_settings.link_rate <= LINK_RATE_HIGH3) {
is_dig_stream = true;
@@ -305,15 +305,17 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
+ /* skip it if the link is mappable endpoint. */
+ if (stream->link->is_dig_mapping_flexible)
+ continue;
+
/* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream))
continue;
/* Physical endpoints have a fixed mapping to DIG link encoders. */
- if (!stream->link->is_dig_mapping_flexible) {
- eng_id = stream->link->eng_id;
- add_link_enc_assignment(state, stream, eng_id);
- }
+ eng_id = stream->link->eng_id;
+ add_link_enc_assignment(state, stream, eng_id);
}
/* (b) Retain previous assignments for mappable endpoints if encoders still available. */
@@ -325,11 +327,12 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = state->streams[i];
- /* Skip stream if not supported by DIG link encoder. */
- if (!is_dig_link_enc_stream(stream))
+ /* Skip it if the link is NOT mappable endpoint. */
+ if (!stream->link->is_dig_mapping_flexible)
continue;
- if (!stream->link->is_dig_mapping_flexible)
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
continue;
for (j = 0; j < prev_state->stream_count; j++) {
@@ -338,6 +341,7 @@ void link_enc_cfg_link_encs_assign(
if (stream == prev_stream && stream->link == prev_stream->link &&
prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
+
if (is_avail_link_enc(state, eng_id, stream))
add_link_enc_assignment(state, stream, eng_id);
}
@@ -350,6 +354,15 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
+ struct link_encoder *link_enc = NULL;
+
+ /* Skip it if the link is NOT mappable endpoint. */
+ if (!stream->link->is_dig_mapping_flexible)
+ continue;
+
+ /* Skip if encoder assignment retained in step (b) above. */
+ if (stream->link_enc)
+ continue;
/* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream)) {
@@ -358,24 +371,18 @@ void link_enc_cfg_link_encs_assign(
}
/* Mappable endpoints have a flexible mapping to DIG link encoders. */
- if (stream->link->is_dig_mapping_flexible) {
- struct link_encoder *link_enc = NULL;
- /* Skip if encoder assignment retained in step (b) above. */
- if (stream->link_enc)
- continue;
+ /* For MST, multiple streams will share the same link / display
+ * endpoint. These streams should use the same link encoder
+ * assigned to that endpoint.
+ */
+ link_enc = get_link_enc_used_by_link(state, stream->link);
+ if (link_enc == NULL)
+ eng_id = find_first_avail_link_enc(stream->ctx, state);
+ else
+ eng_id = link_enc->preferred_engine;
- /* For MST, multiple streams will share the same link / display
- * endpoint. These streams should use the same link encoder
- * assigned to that endpoint.
- */
- link_enc = get_link_enc_used_by_link(state, stream->link);
- if (link_enc == NULL)
- eng_id = find_first_avail_link_enc(stream->ctx, state);
- else
- eng_id = link_enc->preferred_engine;
- add_link_enc_assignment(state, stream, eng_id);
- }
+ add_link_enc_assignment(state, stream, eng_id);
}
link_enc_cfg_validate(dc, state);
@@ -420,10 +427,6 @@ void link_enc_cfg_link_enc_unassign(
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
- /* Only DIG link encoders. */
- if (!is_dig_link_enc_stream(stream))
- return;
-
if (stream->link_enc)
eng_id = stream->link_enc->preferred_engine;
@@ -619,7 +622,6 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
int i, j;
uint8_t valid_count = 0;
uint8_t dig_stream_count = 0;
- int matching_stream_ptrs = 0;
int eng_ids_per_ep_id[MAX_PIPES] = {0};
int ep_ids_per_eng_id[MAX_PIPES] = {0};
int valid_bitmap = 0;
@@ -642,9 +644,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid) {
- if (assignment.stream == state->streams[i])
- matching_stream_ptrs++;
- else
+ if (assignment.stream != state->streams[i])
valid_stream_ptrs = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
new file mode 100644
index 000000000000..18e098568cb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file provides single entrance to link functionality declared in dc
+ * public headers. The file is intended to be used as a thin translation layer
+ * that directly calls link internal functions without adding new functional
+ * behavior.
+ *
+ * When exporting a new link related dc function, add function declaration in
+ * dc.h with detail interface documentation, then add function implementation
+ * in this file which calls link functions.
+ */
+#include "link.h"
+#include "dce/dce_i2c.h"
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num)
+{
+ int i;
+
+ *edp_num = 0;
+ for (i = 0; i < dc->link_count; i++) {
+ // report any eDP links, even unconnected DDI's
+ if (!dc->links[i])
+ continue;
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
+ edp_links[*edp_num] = dc->links[i];
+ if (++(*edp_num) == MAX_NUM_EDP)
+ return;
+ }
+ }
+}
+
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num, i;
+
+ *inst_out = 0;
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (link == edp_links[i])
+ break;
+ (*inst_out)++;
+ }
+ return true;
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ return link->dc->link_srv->detect_link(link, reason);
+}
+
+bool dc_link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type)
+{
+ return link->dc->link_srv->detect_connection_type(link, type);
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+ return link->dc->link_srv->get_status(link);
+}
+
+/* return true if the connected receiver supports the hdcp version */
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
+{
+ return link->dc->link_srv->is_hdcp1x_supported(link, signal);
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
+{
+ return link->dc->link_srv->is_hdcp2x_supported(link, signal);
+}
+
+void dc_link_clear_dprx_states(struct dc_link *link)
+{
+ link->dc->link_srv->clear_dprx_states(link);
+}
+
+bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link)
+{
+ return link->dc->link_srv->reset_cur_dp_mst_topology(link);
+}
+
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
+}
+
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
+{
+ dc->link_srv->get_cur_res_map(dc, map);
+}
+
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
+{
+ dc->link_srv->restore_res_map(dc, map);
+}
+
+bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ return link->dc->link_srv->update_dsc_config(pipe_ctx);
+}
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address)
+{
+ if (dc->res_pool->oem_device)
+ return dce_i2c_oem_device_present(
+ dc->res_pool,
+ dc->res_pool->oem_device,
+ slave_address);
+
+ return false;
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+}
+
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd)
+{
+ struct ddc_service *ddc = dc->res_pool->oem_device;
+
+ if (ddc)
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+
+ return false;
+}
+
+void dc_link_dp_handle_automated_test(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_automated_test(link);
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ return link->dc->link_srv->dp_set_test_pattern(link, test_pattern,
+ test_pattern_color_space, p_link_settings,
+ p_custom_pattern, cust_pattern_size);
+}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link)
+{
+ struct link_resource link_res;
+
+ dc->link_srv->get_cur_link_res(link, &link_res);
+ dc->link_srv->dp_set_drive_settings(link, &link_res, lt_settings);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ dc->link_srv->dp_set_preferred_link_settings(dc, link_setting, link);
+}
+
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ dc->link_srv->dp_set_preferred_training_settings(dc, link_setting,
+ lt_overrides, link, skip_immediate_retrain);
+}
+
+bool dc_dp_trace_is_initialized(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_is_initialized(link);
+}
+
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged)
+{
+ link->dc->link_srv->dp_trace_set_is_logged_flag(link, in_detection, is_logged);
+}
+
+bool dc_dp_trace_is_logged(struct dc_link *link, bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_is_logged(link, in_detection);
+}
+
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_end_timestamp(link, in_detection);
+}
+
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_counts(link, in_detection);
+}
+
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_get_link_loss_count(link);
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ return link->dc->link_srv->add_remote_sink(link, edid, len, init_data);
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ link->dc->link_srv->remove_remote_sink(link, sink);
+}
+
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ const struct dc *dc = ddc->link->dc;
+
+ return dc->link_srv->aux_transfer_raw(
+ ddc, payload, operation_result);
+}
+
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(const struct dc *dc, uint8_t bw)
+{
+ return dc->link_srv->bw_kbps_from_raw_frl_link_rate_data(bw);
+}
+
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ return link->dc->link_srv->edp_decide_link_settings(link, link_setting, req_bw);
+}
+
+
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap)
+{
+ return link->dc->link_srv->dp_get_max_link_enc_cap(link, max_link_enc_cap);
+}
+
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link)
+{
+ return link->dc->link_srv->mst_decide_link_encoding_format(link);
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_get_verified_link_cap(link);
+}
+
+bool dc_link_is_dp_sink_present(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_sink_present(link);
+}
+
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_fec_supported(link);
+}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ link->dc->link_srv->dp_overwrite_extended_receiver_cap(link);
+}
+
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_enable_fec(link);
+}
+
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw)
+{
+ return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
+}
+
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+{
+ link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
+}
+
+bool dc_link_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ return link->dc->link_srv->dp_parse_link_loss_status(link, hpd_irq_dpcd_data);
+}
+
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_allow_hpd_rx_irq(link);
+}
+
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_link_loss(link);
+}
+
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ return link->dc->link_srv->dp_read_hpd_rx_irq_data(link, irq_data);
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ return link->dc->link_srv->dp_handle_hpd_rx_irq(link, out_hpd_irq_dpcd_data,
+ out_link_loss, defer_handling, has_left_work);
+}
+
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ link->dc->link_srv->dpcd_write_rx_power_ctrl(link, on);
+}
+
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting)
+{
+ return link->dc->link_srv->dp_decide_lttpr_mode(link, link_setting);
+}
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+{
+ link->dc->link_srv->edp_panel_backlight_power_on(link, wait_for_hpd);
+}
+
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_backlight_level(link);
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ return link->dc->link_srv->edp_get_backlight_level_nits(link,
+ backlight_millinits_avg,
+ backlight_millinits_peak);
+}
+
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ return link->dc->link_srv->edp_set_backlight_level(link,
+ backlight_pwm_u16_16, frame_ramp);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ return link->dc->link_srv->edp_set_backlight_level_nits(link, isHDR,
+ backlight_millinits, transition_time_in_ms);
+}
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_target_backlight_pwm(link);
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+{
+ return link->dc->link_srv->edp_get_psr_state(link, state);
+}
+
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_psr_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
+}
+
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ return link->dc->link_srv->edp_wait_for_t12(link);
+}
+
+bool dc_link_get_hpd_state(struct dc_link *link)
+{
+ return link->dc->link_srv->get_hpd_state(link);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->disable_hpd(link);
+}
+
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ link->dc->link_srv->enable_hpd_filter(link, enable);
+}
+
+bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+{
+ return dc->link_srv->validate_dpia_bandwidth(streams, count);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da164685547d..117d80cb36fb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,11 +40,11 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "virtual/virtual_link_hwss.h"
-#include "link/link_hwss_dio.h"
-#include "link/link_hwss_dpia.h"
-#include "link/link_hwss_hpo_dp.h"
+#include "link/hwss/link_hwss_dio.h"
+#include "link/hwss/link_hwss_dpia.h"
+#include "link/hwss/link_hwss_hpo_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -232,7 +232,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
init_data->num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc);
@@ -276,7 +276,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_21:
res_pool = dcn321_create_resource_pool(init_data, dc);
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
}
@@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
struct dc_stream_status *stream_status = NULL;
struct resource_pool *pool = dc->res_pool;
+ if (!plane_state)
+ return true;
+
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
stream_status = &context->stream_status[i];
@@ -2213,7 +2216,7 @@ enum dc_status dc_remove_stream_from_ctx(
del_pipe->stream_res.stream_enc,
false);
- if (is_dp_128b_132b_signal(del_pipe)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
@@ -2513,9 +2516,10 @@ enum dc_status resource_map_pool_resources(
* and link settings
*/
if (dc_is_dp_signal(stream->signal)) {
- if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
+ if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
- if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
&context->res_ctx, pool, stream);
@@ -3269,6 +3273,50 @@ static void set_hfvs_info_packet(
*info_packet = stream->hfvsif_infopacket;
}
+static void adaptive_sync_override_dp_info_packets_sdp_line_num(
+ const struct dc_crtc_timing *timing,
+ struct enc_sdp_line_num *sdp_line_num,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+{
+ uint32_t asic_blank_start = 0;
+ uint32_t asic_blank_end = 0;
+ uint32_t v_update = 0;
+
+ const struct dc_crtc_timing *tg = timing;
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = tg->v_total - tg->v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = (asic_blank_start - tg->v_border_bottom -
+ tg->v_addressable - tg->v_border_top);
+
+ if (pipe_dlg_param->vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ sdp_line_num->adaptive_sync_line_num_valid = true;
+ sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
+ } else {
+ sdp_line_num->adaptive_sync_line_num_valid = false;
+ sdp_line_num->adaptive_sync_line_num = 0;
+ }
+}
+
+static void set_adaptive_sync_info_packet(
+ struct dc_info_packet *info_packet,
+ const struct dc_stream_state *stream,
+ struct encoder_info_frame *info_frame,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+{
+ if (!stream->adaptive_sync_infopacket.valid)
+ return;
+
+ adaptive_sync_override_dp_info_packets_sdp_line_num(
+ &stream->timing,
+ &info_frame->sdp_line_num,
+ pipe_dlg_param);
+
+ *info_packet = stream->adaptive_sync_infopacket;
+}
static void set_vtem_info_packet(
struct dc_info_packet *info_packet,
@@ -3361,6 +3409,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->vsc.valid = false;
info->hfvsif.valid = false;
info->vtem.valid = false;
+ info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
/* HDMi and DP have different info packets*/
@@ -3381,6 +3430,10 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+ set_adaptive_sync_info_packet(&info->adaptive_sync,
+ pipe_ctx->stream,
+ info,
+ &pipe_ctx->pipe_dlg_param);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -3636,7 +3689,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
/* TODO: validate audio ASIC caps, encoder */
if (res == DC_OK)
- res = dc_link_validate_mode_timing(stream,
+ res = dc->link_srv->validate_mode_timing(stream,
link,
&stream->timing);
@@ -3763,7 +3816,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
@@ -3820,9 +3873,20 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
- IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
+ struct pipe_ctx *first_pipe = pipe_ctx_check;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ /* When ODM combine is enabled, this case is expected. If the disabled pipe
+ * is part of the ODM tree, then we should not print an error.
+ * */
+ if (first_pipe->pipe_idx == disabled_master_pipe_idx)
+ continue;
+
DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
- i, disabled_master_pipe_idx);
+ i, disabled_master_pipe_idx);
+ }
}
}
@@ -3967,17 +4031,56 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
else
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
#endif
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
#endif
}
return true;
}
+
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc =
+ find_first_free_match_hpo_dp_stream_enc_for_link(
+ &context->res_ctx, dc->res_pool, pipe_ctx->stream);
+
+ if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
+ return DC_NO_STREAM_ENC_RESOURCE;
+
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true);
+ }
+
+ if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) {
+ if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+ }
+ } else {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc) {
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false);
+ pipe_ctx->stream_res.hpo_dp_stream_enc = NULL;
+ }
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
+ }
+
+ return DC_OK;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 4b372aa52801..5f6392ae31a6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -35,19 +35,15 @@
*/
/**
- *****************************************************************************
- * Function: dc_stat_get_dmub_notification
+ * dc_stat_get_dmub_notification
*
- * @brief
- * Calls dmub layer to retrieve dmub notification
+ * Calls dmub layer to retrieve dmub notification
*
- * @param
- * [in] dc: dc structure
- * [in] notify: dmub notification structure
+ * @dc: dc structure
+ * @notify: dmub notification structure
*
- * @return
+ * Returns
* None
- *****************************************************************************
*/
void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify)
{
@@ -65,6 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
@@ -72,19 +69,15 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
}
/**
- *****************************************************************************
- * Function: dc_stat_get_dmub_dataout
+ * dc_stat_get_dmub_dataout
*
- * @brief
- * Calls dmub layer to retrieve dmub gpint dataout
+ * Calls dmub layer to retrieve dmub gpint dataout
*
- * @param
- * [in] dc: dc structure
- * [in] dataout: dmub gpint dataout
+ * @dc: dc structure
+ * @dataout: dmub gpint dataout
*
- * @return
+ * Returns
* None
- *****************************************************************************
*/
void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 20e534f73513..72b261ad9587 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -408,7 +408,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position)
{
- struct dc *dc = stream->ctx->dc;
+ struct dc *dc;
bool reset_idle_optimizations = false;
if (NULL == stream) {
@@ -481,6 +481,7 @@ bool dc_stream_add_writeback(struct dc *dc,
}
if (!isDrc) {
+ ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES);
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
@@ -526,6 +527,11 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
+ }
+
// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
for (i = 0; i < stream->num_wb_info; i++) {
/*dynamic update*/
@@ -540,7 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
if (stream->writeback_info[i].wb_enabled) {
if (j < i)
/* trim the array */
- stream->writeback_info[j] = stream->writeback_info[i];
+ memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
+ sizeof(struct dc_writeback_info));
j++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index cde8ed2560b3..eda2152dcd1f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,9 +47,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_save_init(dc);
-#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 85ebeaa2de18..30f0ba05a6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,9 +29,7 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#include "hdcp_types.h"
-#endif
+#include "hdcp_msg_types.h"
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -47,12 +45,11 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.215"
+#define DC_VER "3.2.230"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
-#define MAX_SINKS_PER_LINK 4
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
@@ -85,8 +82,6 @@ enum det_size {
struct dc_plane_cap {
enum dc_plane_type type;
- uint32_t blends_with_above : 1;
- uint32_t blends_with_below : 1;
uint32_t per_pixel_alpha : 1;
struct {
uint32_t argb8888 : 1;
@@ -410,7 +405,8 @@ struct dc_config {
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
-
+ bool use_old_fixed_vs_sequence;
+ bool disable_subvp_drr;
};
enum visual_confirm {
@@ -717,6 +713,7 @@ struct dc_bounding_box_overrides {
struct dc_state;
struct resource_pool;
struct dce_hwseq;
+struct link_service;
/**
* struct dc_debug_options - DC debug struct
@@ -796,6 +793,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
+ int minimum_z8_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@@ -872,6 +870,15 @@ struct dc_debug_options {
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
+ bool disable_unbounded_requesting;
+ bool dig_fifo_off_in_blank;
+ bool temp_mst_deallocation_sequence;
+ bool override_dispclk_programming;
+ bool disable_fpo_optimizations;
+ bool support_eDP1_5;
+ uint32_t fpo_vactive_margin_us;
+ bool disable_fpo_vactive;
+ bool disable_boot_optimizations;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -888,6 +895,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_PIPES * 2];
+ struct link_service *link_srv;
struct dc_state *current_state;
struct resource_pool *res_pool;
@@ -989,11 +997,7 @@ struct dc_init_data {
};
struct dc_callback_init {
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#else
- uint8_t reserved;
-#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
@@ -1360,116 +1364,689 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context);
-
struct dc_state *dc_create_state(struct dc *dc);
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
+struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
+ struct dc_stream_state *stream,
+ int mpcc_inst);
+
+
+uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+
+/* The function returns minimum bandwidth required to drive a given timing
+ * return - minimum required timing bandwidth in kbps.
+ */
+uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+
/* Link Interfaces */
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
-struct dpcd_caps {
- union dpcd_rev dpcd_rev;
- union max_lane_count max_ln_count;
- union max_down_spread max_down_spread;
- union dprx_feature dprx_feature;
-
- /* valid only for eDP v1.4 or higher*/
- uint8_t edp_supported_link_rates_count;
- enum dc_link_rate edp_supported_link_rates[8];
-
- /* dongle type (DP converter, CV smart dongle) */
- enum display_dongle_type dongle_type;
- bool is_dongle_type_one;
- /* branch device or sink device */
- bool is_branch_dev;
- /* Dongle's downstream count. */
- union sink_count sink_count;
- bool is_mst_capable;
- /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
- indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
- struct dc_dongle_caps dongle_caps;
-
- uint32_t sink_dev_id;
- int8_t sink_dev_id_str[6];
- int8_t sink_hw_revision;
- int8_t sink_fw_revision[2];
-
- uint32_t branch_dev_id;
- int8_t branch_dev_name[6];
- int8_t branch_hw_revision;
- int8_t branch_fw_revision[2];
-
- bool allow_invalid_MSA_timing_param;
- bool panel_mode_edp;
- bool dpcd_display_control_capable;
- bool ext_receiver_cap_field_present;
- bool set_power_state_capable_edp;
- bool dynamic_backlight_capable_edp;
- union dpcd_fec_capability fec_cap;
- struct dpcd_dsc_capabilities dsc_caps;
- struct dc_lttpr_caps lttpr_caps;
- struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
-
- union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
- union dp_main_line_channel_coding_cap channel_coding_cap;
- union dp_sink_video_fallback_formats fallback_formats;
- union dp_fec_capability1 fec_cap1;
- union dp_cable_id cable_id;
- uint8_t edp_rev;
- union edp_alpm_caps alpm_caps;
- struct edp_psr_info psr_info;
-};
-
-union dpcd_sink_ext_caps {
- struct {
- /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
- * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
- */
- uint8_t sdr_aux_backlight_control : 1;
- uint8_t hdr_aux_backlight_control : 1;
- uint8_t reserved_1 : 2;
- uint8_t oled : 1;
- uint8_t reserved : 3;
- } bits;
- uint8_t raw;
-};
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+ bool is_automated; /* Indicates automated testing */
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-union hdcp_rx_caps {
- struct {
- uint8_t version;
- uint8_t reserved;
- struct {
- uint8_t repeater : 1;
- uint8_t hdcp_capable : 1;
- uint8_t reserved : 6;
- } byte0;
- } fields;
- uint8_t raw[3];
-};
+ uint8_t ddc_hw_inst;
-union hdcp_bcaps {
- struct {
- uint8_t HDCP_CAPABLE:1;
- uint8_t REPEATER:1;
- uint8_t RESERVED:6;
- } bits;
- uint8_t raw;
-};
+ uint8_t hpd_src;
-struct hdcp_caps {
- union hdcp_rx_caps rx_caps;
- union hdcp_bcaps bcaps;
-};
-#endif
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
-#include "dc_link.h"
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
-uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+};
+
+/* Return an enumerated dc_link.
+ * dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index);
+
+/* Return instance id of the edp link. Inst 0 is primary edp link. */
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out);
+
+/* Return an array of link pointers to edp links. */
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num);
+
+/* The function initiates detection handshake over the given link. It first
+ * determines if there are display connections over the link. If so it initiates
+ * detection protocols supported by the connected receiver device. The function
+ * contains protocol specific handshake sequences which are sometimes mandatory
+ * to establish a proper connection between TX and RX. So it is always
+ * recommended to call this function as the first link operation upon HPD event
+ * or power up event. Upon completion, the function will update link structure
+ * in place based on latest RX capabilities. The function may also cause dpms
+ * to be reset to off for all currently enabled streams to the link. It is DM's
+ * responsibility to serialize detection and DPMS updates.
+ *
+ * @reason - Indicate which event triggers this detection. dc may customize
+ * detection flow depending on the triggering events.
+ * return false - if detection is not fully completed. This could happen when
+ * there is an unrecoverable error during detection or detection is partially
+ * completed (detection has been delegated to dm mst manager ie.
+ * link->connection_type == dc_connection_mst_branch when returning false).
+ * return true - detection is completed, link has been fully updated with latest
+ * detection result.
+ */
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
+
+struct dc_sink_init_data;
+
+/* When link connection type is dc_connection_mst_branch, remote sink can be
+ * added to the link. The interface creates a remote sink and associates it with
+ * current link. The sink will be retained by link until remove remote sink is
+ * called.
+ *
+ * @dc_link - link the remote sink will be added to.
+ * @edid - byte array of EDID raw data.
+ * @len - size of the edid in byte
+ * @init_data -
+ */
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+/* Remove remote sink from a link with dc_connection_mst_branch connection type.
+ * @link - link the sink should be removed from
+ * @sink - sink to be removed.
+ */
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Enable HPD interrupt handler for a given link */
+void dc_link_enable_hpd(const struct dc_link *link);
+
+/* Disable HPD interrupt handler for a given link */
+void dc_link_disable_hpd(const struct dc_link *link);
+
+/* determine if there is a sink connected to the link
+ *
+ * @type - dc_connection_single if connected, dc_connection_none otherwise.
+ * return - false if an unexpected error occurs, true otherwise.
+ *
+ * NOTE: This function doesn't detect downstream sink connections i.e
+ * dc_connection_mst_branch, dc_connection_sst_branch. In this case, it will
+ * return dc_connection_single if the branch device is connected despite of
+ * downstream sink's connection status.
+ */
+bool dc_link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
+
+/* query current hpd pin value
+ * return - true HPD is asserted (HPD high), false otherwise (HPD low)
+ *
+ */
+bool dc_link_get_hpd_state(struct dc_link *link);
+
+/* Getter for cached link status from given link */
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link);
+
+/* enable/disable hardware HPD filter.
+ *
+ * @link - The link the HPD pin is associated with.
+ * @enable = true - enable hardware HPD filter. HPD event will only queued to irq
+ * handler once after no HPD change has been detected within dc default HPD
+ * filtering interval since last HPD event. i.e if display keeps toggling hpd
+ * pulses within default HPD interval, no HPD event will be received until HPD
+ * toggles have stopped. Then HPD event will be queued to irq handler once after
+ * dc default HPD filtering interval since last HPD event.
+ *
+ * @enable = false - disable hardware HPD filter. HPD event will be queued
+ * immediately to irq handler after no HPD change has been detected within
+ * IRQ_HPD (aka HPD short pulse) interval (i.e 2ms).
+ */
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
+/* submit i2c read/write payloads through ddc channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+/* submit i2c read/write payloads through oem channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd);
+
+enum aux_return_code_type;
+/* Attempt to transfer the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned in the payload->reply
+ * and the result through operation_result. Returns the number of bytes
+ * transferred,or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address
+);
+
+/* return true if the connected receiver supports the hdcp version */
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
+
+/* Notify DC about DP RX Interrupt (aka DP IRQ_HPD).
+ *
+ * TODO - When defer_handling is true the function will have a different purpose.
+ * It no longer does complete hpd rx irq handling. We should create a separate
+ * interface specifically for this case.
+ *
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM.
+ */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+/* handle DP specs define test automation sequence*/
+void dc_link_dp_handle_automated_test(struct dc_link *link);
+
+/* handle DP Link loss sequence and try to recover RX link loss with best
+ * effort
+ */
+void dc_link_dp_handle_link_loss(struct dc_link *link);
+
+/* Determine if hpd rx irq should be handled or ignored
+ * return true - hpd rx irq should be handled.
+ * return false - it is safe to ignore hpd rx irq event
+ */
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
+
+/* Determine if link loss is indicated with a given hpd_irq_dpcd_data.
+ * @link - link the hpd irq data associated with
+ * @hpd_irq_dpcd_data - input hpd irq data
+ * return - true if hpd irq data indicates a link lost
+ */
+bool dc_link_check_link_loss_status(struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+/* Read hpd rx irq data from a given link
+ * @link - link where the hpd irq data should be read from
+ * @irq_data - output hpd irq data
+ * return - DC_OK if hpd irq data is read successfully, otherwise hpd irq data
+ * read has failed.
+ */
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+
+/* The function clears recorded DP RX states in the link. DM should call this
+ * function when it is resuming from S3 power state to previously connected links.
+ *
+ * TODO - in the future we should consider to expand link resume interface to
+ * support clearing previous rx states. So we don't have to rely on dm to call
+ * this interface explicitly.
+ */
+void dc_link_clear_dprx_states(struct dc_link *link);
+
+/* Destruct the mst topology of the link and reset the allocated payload table
+ *
+ * NOTE: this should only be called if DM chooses not to call dc_link_detect but
+ * still wants to reset MST topology on an unplug event */
+bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link);
+
+/* The function calculates effective DP link bandwidth when a given link is
+ * using the given link settings.
+ *
+ * return - total effective link bandwidth in kbps.
+ */
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+/* The function takes a snapshot of current link resource allocation state
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to capture a snapshot of current link resource allocation mapping
+ * and store it in its persistent storage.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
+
+/* This function restores link resource allocation state from a snapshot
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to call this function after initial link detection on boot and
+ * before first commit streams to restore link resource allocation state
+ * from previous boot session.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
+
+/* TODO: this is not meant to be exposed to DM. Should switch to stream update
+ * interface i.e stream_update->dsc_config
+ */
+bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
+
+/* translate a raw link rate data to bandwidth in kbps */
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(const struct dc *dc, uint8_t bw);
+
+/* determine the optimal bandwidth given link and required bw.
+ * @link - current detected link
+ * @req_bw - requested bandwidth in kbps
+ * @link_settings - returned most optimal link settings that can fit the
+ * requested bandwidth
+ * return - false if link can't support requested bandwidth, true if link
+ * settings is found.
+ */
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_settings,
+ uint32_t req_bw);
+
+/* return the max dp link settings can be driven by the link without considering
+ * connected RX device and its capability
+ */
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+/* determine when the link is driving MST mode, what DP link channel coding
+ * format will be used. The decision will remain unchanged until next HPD event.
+ *
+ * @link - a link with DP RX connection
+ * return - if stream is committed to this link with MST signal type, type of
+ * channel coding format dc will choose.
+ */
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link);
+
+/* get max dp link settings the link can enable with all things considered. (i.e
+ * TX/RX/Cable capabilities and dp override policies.
+ *
+ * @link - a link with DP RX connection
+ * return - max dp link settings the link can enable.
+ *
+ */
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+
+/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
+ * to a link with dp connector signal type.
+ * @link - a link with dp connector signal type
+ * return - true if connected, false otherwise
+ */
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
+/* Force DP lane settings update to main-link video signal and notify the change
+ * to DP RX via DPCD. This is a debug interface used for video signal integrity
+ * tuning purpose. The interface assumes link has already been enabled with DP
+ * signal.
+ *
+ * @lt_settings - a container structure with desired hw_lane_settings
+ */
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link);
+
+/* Enable a test pattern in Link or PHY layer in an active link for compliance
+ * test or debugging purpose. The test pattern will remain until next un-plug.
+ *
+ * @link - active link with DP signal output enabled.
+ * @test_pattern - desired test pattern to output.
+ * NOTE: set to DP_TEST_PATTERN_VIDEO_MODE to disable previous test pattern.
+ * @test_pattern_color_space - for video test pattern choose a desired color
+ * space.
+ * @p_link_settings - For PHY pattern choose a desired link settings
+ * @p_custom_pattern - some test pattern will require a custom input to
+ * customize some pattern details. Otherwise keep it to NULL.
+ * @cust_pattern_size - size of the custom pattern input.
+ *
+ */
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/* Force DP link settings to always use a specific value until reboot to a
+ * specific link. If link has already been enabled, the interface will also
+ * switch to desired link settings immediately. This is a debug interface to
+ * generic dp issue trouble shooting.
+ */
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+
+/* Force DP link to customize a specific link training behavior by overriding to
+ * standard DP specs defined protocol. This is a debug interface to trouble shoot
+ * display specific link training issues or apply some display specific
+ * workaround in link training.
+ *
+ * @link_settings - if not NULL, force preferred link settings to the link.
+ * @lt_override - a set of override pointers. If any pointer is none NULL, dc
+ * will apply this particular override in future link training. If NULL is
+ * passed in, dc resets previous overrides.
+ * NOTE: DM must keep the memory from override pointers until DM resets preferred
+ * training settings.
+ */
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+/* return - true if FEC is supported with connected DP RX, false otherwise */
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
+/* query FEC enablement policy to determine if FEC will be enabled by dc during
+ * link enablement.
+ * return - true if FEC should be enabled, false otherwise.
+ */
+bool dc_link_should_enable_fec(const struct dc_link *link);
+
+/* determine lttpr mode the current link should be enabled with a specific link
+ * settings.
+ */
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+/* Force DP RX to update its power state.
+ * NOTE: this interface doesn't update dp main-link. Calling this function will
+ * cause DP TX main-link and DP RX power states out of sync. DM has to restore
+ * RX power state back upon finish DM specific execution requiring DP RX in a
+ * specific power state.
+ * @on - true to set DP RX in D0 power state, false to set DP RX in D3 power
+ * state.
+ */
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+/* Force link to read base dp receiver caps from dpcd 000h - 00Fh and overwrite
+ * current value read from extended receiver cap from 02200h - 0220Fh.
+ * Some DP RX has problems of providing accurate DP receiver caps from extended
+ * field, this interface is a workaround to revert link back to use base caps.
+ */
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
+ bool wait_for_hpd);
+
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
+
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
+/* Determine if dp trace has been initialized to reflect upto date result *
+ * return - true if trace is initialized and has valid data. False dp trace
+ * doesn't have valid result.
+ */
+bool dc_dp_trace_is_initialized(struct dc_link *link);
+
+/* Query a dp trace flag to indicate if the current dp trace data has been
+ * logged before
+ */
+bool dc_dp_trace_is_logged(struct dc_link *link,
+ bool in_detection);
+
+/* Set dp trace flag to indicate whether DM has already logged the current dp
+ * trace data. DM can set is_logged to true upon logging and check
+ * dc_dp_trace_is_logged before logging to avoid logging the same result twice.
+ */
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+
+/* Obtain driver time stamp for last dp link training end. The time stamp is
+ * formatted based on dm_get_timestamp DM function.
+ * @in_detection - true to get link training end time stamp of last link
+ * training in detection sequence. false to get link training end time stamp
+ * of last link training in commit (dpms) sequence
+ */
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link training attempts dc has done with latest sequence.
+ * @in_detection - true to get link training count of last link
+ * training in detection sequence. false to get link training count of last link
+ * training in commit (dpms) sequence
+ */
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link loss has happened since last link training attempts */
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+
+/*
+ * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
+ */
+/*
+ * Send a request from DP-Tx requesting to allocate BW remotely after
+ * allocating it locally. This will get processed by CM and a CB function
+ * will be called.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: The requested bw in Kbyte to allocated
+ *
+ * return: none
+ */
+void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
+ uint8_t bw, uint8_t result);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw);
+
+/*
+ * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
+ * available BW for each host router
+ *
+ * @dc: pointer to dc struct
+ * @stream: pointer to all possible streams
+ * @num_streams: number of valid DPIA streams
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
+ const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */
@@ -1489,7 +2066,7 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
@@ -1502,6 +2079,11 @@ struct dc_sink_fec_caps {
bool is_topology_fec_supported;
};
+struct scdc_caps {
+ union hdmi_scdc_manufacturer_OUI_data manufacturer_OUI;
+ union hdmi_scdc_device_id_data device_id;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1515,6 +2097,7 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
+ struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
@@ -1575,7 +2158,6 @@ void dc_resume(struct dc *dc);
void dc_power_down_on_boot(struct dc *dc);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/*
* HDCP Interfaces
*/
@@ -1583,7 +2165,6 @@ enum hdcp_message_status dc_process_hdcp_msg(
enum signal_type signal,
struct dc_link *link,
struct hdcp_protection_message *message_info);
-#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 260ac4458870..be9aa1a71847 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -140,7 +140,8 @@ struct dc_vbios_funcs {
enum bp_result (*enable_lvtma_control)(
struct dc_bios *bios,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
enum bp_result (*get_soc_bb_info)(
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 7769bd099a5a..428e3a9ab65a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -77,6 +77,32 @@ struct aux_reply_transaction_data {
uint8_t *data;
};
+struct aux_payload {
+ /* set following flag to read/write I2C data,
+ * reset it to read/write DPCD data */
+ bool i2c_over_aux;
+ /* set following flag to write data,
+ * reset it to read data */
+ bool write;
+ bool mot;
+ bool write_status_update;
+
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+ /*
+ * used to return the reply type of the transaction
+ * ignored if NULL
+ */
+ uint8_t *reply;
+ /* expressed in milliseconds
+ * zero means "use default value"
+ */
+ uint32_t defer_delay;
+
+};
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+
struct i2c_payload {
bool write;
uint8_t address;
@@ -90,6 +116,8 @@ enum i2c_command_engine {
I2C_COMMAND_ENGINE_HW
};
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
struct i2c_command {
struct i2c_payload *payloads;
uint8_t number_of_payloads;
@@ -150,6 +178,9 @@ enum display_dongle_type {
DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
};
+#define DC_MAX_EDID_BUFFER_SIZE 2048
+#define DC_EDID_BLOCK_SIZE 128
+
struct ddc_service {
struct ddc *ddc_pin;
struct ddc_flags flags;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6ccf477d1c4d..a9b9490a532c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -302,29 +302,32 @@ static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_
return pipes;
}
-static int dc_dmub_srv_get_timing_generator_offset(struct dc *dc, struct dc_stream_state *stream)
+static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *head_pipe,
+ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
{
- int tg_inst = 0;
- int i = 0;
+ int j;
+ int pipe_idx = 0;
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
- if (pipe->stream == stream && pipe->stream_res.tg) {
- tg_inst = pipe->stream_res.tg->inst;
- break;
+ if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
+ fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
}
}
- return tg_inst;
+ fams_pipe_data->pipe_count = pipe_idx;
}
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
{
union dmub_rb_cmd cmd = { 0 };
struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
- int i = 0;
+ int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
+ int pipe_idx = 0;
if (dc == NULL)
return false;
@@ -337,17 +340,40 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
- for (i = 0; context && i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
- uint8_t min_refresh_in_hz = (stream->timing.min_refresh_in_uhz + 999999) / 1000000;
- int tg_inst = dc_dmub_srv_get_timing_generator_offset(dc, stream);
+ if (should_manage_pstate) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
- config_data->pipe_data[tg_inst].pix_clk_100hz = stream->timing.pix_clk_100hz;
- config_data->pipe_data[tg_inst].min_refresh_in_hz = min_refresh_in_hz;
- config_data->pipe_data[tg_inst].max_ramp_step = ramp_up_num_steps;
- config_data->pipe_data[tg_inst].pipes = dc_dmub_srv_get_pipes_for_stream(dc, stream);
+ /* If FAMS is being used to support P-State and there is a stream
+ * that does not use FAMS, we are in an FPO + VActive scenario.
+ * Assign vactive stretch margin in this case.
+ */
+ if (!pipe->stream->fpo_in_use) {
+ cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
+ break;
+ }
+ pipe_idx++;
+ }
}
+ for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream && pipe->stream->fpo_in_use) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
+
+ config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
+ config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
+ config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
+ config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
+ dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
+ k++;
+ }
+ }
cmd.fw_assisted_mclk_switch.header.payload_bytes =
sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
@@ -421,7 +447,6 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
}
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
@@ -638,7 +663,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
pipe_data->pipe_config.subvp_data.main_vblank_end =
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
- pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
/* Calculate the scaling factor from the src and dst height.
@@ -680,11 +705,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
- pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
}
@@ -698,7 +723,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
*
* @dc: [in] current dc state
* @context: [in] new dc state
- * @cmd: [in] DMUB cmd to be populated with SubVP info
+ * @enable: [in] if true enables the pipes population
*
* This function loops through each pipe and populates the DMUB SubVP CMD info
* based on the pipe (e.g. SubVP, VBLANK).
@@ -750,7 +775,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
!pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
- } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -775,7 +801,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}
-#endif
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 2c54b6e0498b..49aab1924665 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -27,6 +27,7 @@
#define DC_DP_TYPES_H
#include "os_types.h"
+#include "dc_ddc_types.h"
enum dc_lane_count {
LANE_COUNT_UNKNOWN = 0,
@@ -46,14 +47,15 @@ enum dc_lane_count {
*/
enum dc_link_rate {
LINK_RATE_UNKNOWN = 0,
- LINK_RATE_LOW = 0x06, // Rate_1 (RBR) - 1.62 Gbps/Lane
- LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
- LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
- LINK_RATE_HIGH = 0x0A, // Rate_4 (HBR) - 2.70 Gbps/Lane
- LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane
- LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
- LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane
- LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane
+ LINK_RATE_LOW = 0x06, // Rate_1 (RBR) - 1.62 Gbps/Lane
+ LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
+ LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
+ LINK_RATE_HIGH = 0x0A, // Rate_4 (HBR) - 2.70 Gbps/Lane
+ LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane
+ LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
+ LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2) - 5.40 Gbps/Lane
+ LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane
+ LINK_RATE_HIGH3 = 0x1E, // Rate_9 (HBR3) - 8.10 Gbps/Lane
/* Starting from DP2.0 link rate enum directly represents actual
* link rate value in unit of 10 mbps
*/
@@ -149,7 +151,6 @@ struct dc_link_settings {
enum dc_link_spread link_spread;
bool use_link_rate_set;
uint8_t link_rate_set;
- bool dpcd_source_device_specific_field_support;
};
union dc_dp_ffe_preset {
@@ -362,14 +363,10 @@ enum dpcd_downstream_port_detailed_type {
union dwnstream_port_caps_byte2 {
struct {
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;
uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;
uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;
uint8_t RESERVED:1;
-#else
- uint8_t RESERVED:6;
-#endif
} bits;
uint8_t raw;
};
@@ -407,7 +404,6 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
union hdmi_sink_encoded_link_bw_support {
struct {
uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
@@ -429,7 +425,6 @@ union hdmi_encoded_link_bw {
} bits;
uint8_t raw;
};
-#endif
/*4-byte structure for detailed capabilities of a down-stream port
(DP-to-TMDS converter).*/
@@ -509,7 +504,11 @@ union down_spread_ctrl {
1 = Main link signal is downspread <= 0.5%
with frequency in the range of 30kHz ~ 33kHz*/
uint8_t SPREAD_AMP:1;
- uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+ uint8_t RESERVED2:1;/*Bit 5 = RESERVED. Read all 0s*/
+ /* Bit 6 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE.
+ 0 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is not enabled by the Source device (default)
+ 1 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is enabled by Source device */
+ uint8_t FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE:1;
/*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
0 = Source device will send valid data for the MSA Timing Params
1 = Source device may send invalid data for these MSA Timing Params*/
@@ -865,6 +864,21 @@ struct psr_caps {
unsigned int psr_power_opt_flag;
};
+union dpcd_dprx_feature_enumeration_list_cont_1 {
+ struct {
+ uint8_t ADAPTIVE_SYNC_SDP_SUPPORT:1;
+ uint8_t AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED: 1;
+ uint8_t RESERVED0: 2;
+ uint8_t VSC_EXT_SDP_VER1_SUPPORT: 1;
+ uint8_t RESERVED1: 3;
+ } bits;
+ uint8_t raw;
+};
+
+struct adaptive_sync_caps {
+ union dpcd_dprx_feature_enumeration_list_cont_1 dp_adap_sync_caps;
+};
+
/* Length of router topology ID read from DPCD in bytes. */
#define DPCD_USB4_TOPOLOGY_ID_LEN 5
@@ -908,12 +922,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif
-#ifndef DP_LINK_SQUARE_PATTERN
-#define DP_LINK_SQUARE_PATTERN 0x10F
-#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
-#endif
#ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161
#endif
@@ -926,9 +934,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
-#endif
#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
#endif
@@ -972,7 +977,6 @@ struct dpcd_usb4_dp_tunneling_info {
#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
/* TODO - Use DRM header to replace above once available */
#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
-
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1107,4 +1111,296 @@ struct edp_psr_info {
uint8_t force_psrsu_cap;
};
+struct dprx_states {
+ bool cable_id_written;
+};
+
+enum dpcd_downstream_port_max_bpc {
+ DOWN_STREAM_MAX_8BPC = 0,
+ DOWN_STREAM_MAX_10BPC,
+ DOWN_STREAM_MAX_12BPC,
+ DOWN_STREAM_MAX_16BPC
+};
+
+enum link_training_offset {
+ DPRX = 0,
+ LTTPR_PHY_REPEATER1 = 1,
+ LTTPR_PHY_REPEATER2 = 2,
+ LTTPR_PHY_REPEATER3 = 3,
+ LTTPR_PHY_REPEATER4 = 4,
+ LTTPR_PHY_REPEATER5 = 5,
+ LTTPR_PHY_REPEATER6 = 6,
+ LTTPR_PHY_REPEATER7 = 7,
+ LTTPR_PHY_REPEATER8 = 8
+};
+
+#define MAX_REPEATER_CNT 8
+
+struct dc_lttpr_caps {
+ union dpcd_rev revision;
+ uint8_t mode;
+ uint8_t max_lane_count;
+ uint8_t max_link_rate;
+ uint8_t phy_repeater_cnt;
+ uint8_t max_ext_timeout;
+ union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
+ union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+ uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+};
+
+struct dc_dongle_dfp_cap_ext {
+ bool supported;
+ uint16_t max_pixel_rate_in_mps;
+ uint16_t max_video_h_active_width;
+ uint16_t max_video_v_active_height;
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+};
+
+struct dc_dongle_caps {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool extendedCapValid;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ bool is_dp_hdmi_ycbcr422_pass_through;
+ bool is_dp_hdmi_ycbcr420_pass_through;
+ bool is_dp_hdmi_ycbcr422_converter;
+ bool is_dp_hdmi_ycbcr420_converter;
+ uint32_t dp_hdmi_max_bpc;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
+ uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
+ struct dc_dongle_dfp_cap_ext dfp_cap_ext;
+};
+
+struct dpcd_caps {
+ union dpcd_rev dpcd_rev;
+ union max_lane_count max_ln_count;
+ union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
+
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool is_dongle_type_one;
+ /* branch device or sink device */
+ bool is_branch_dev;
+ /* Dongle's downstream count. */
+ union sink_count sink_count;
+ bool is_mst_capable;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
+ int8_t sink_dev_id_str[6];
+ int8_t sink_hw_revision;
+ int8_t sink_fw_revision[2];
+
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
+ int8_t branch_fw_revision[2];
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+ bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
+ bool set_power_state_capable_edp;
+ bool dynamic_backlight_capable_edp;
+ union dpcd_fec_capability fec_cap;
+ struct dpcd_dsc_capabilities dsc_caps;
+ struct dc_lttpr_caps lttpr_caps;
+ struct adaptive_sync_caps adaptive_sync_caps;
+ struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+
+ union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
+ union dp_main_line_channel_coding_cap channel_coding_cap;
+ union dp_sink_video_fallback_formats fallback_formats;
+ union dp_fec_capability1 fec_cap1;
+ union dp_cable_id cable_id;
+ uint8_t edp_rev;
+ union edp_alpm_caps alpm_caps;
+ struct edp_psr_info psr_info;
+};
+
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved_2 : 1;
+ uint8_t miniled : 1;
+ uint8_t reserved : 1;
+ } bits;
+ uint8_t raw;
+};
+
+enum dc_link_fec_state {
+ dc_link_fec_not_ready,
+ dc_link_fec_ready,
+ dc_link_fec_enabled
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char ENABLE_PSR2 : 1;
+ unsigned char EARLY_TRANSPORT_ENABLE : 1;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_alpm_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char IRQ_HPD_ENABLE : 1;
+ unsigned char RESERVED : 6;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_sink_active_vtotal_control_mode {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char VSC_SDP_ERROR :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct edp_trace_power_timestamps {
+ uint64_t poweroff;
+ uint64_t poweron;
+};
+
+struct dp_trace_lt_counts {
+ unsigned int total;
+ unsigned int fail;
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL_LANE0,
+ LINK_TRAINING_CR_FAIL_LANE1,
+ LINK_TRAINING_CR_FAIL_LANE23,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+ LINK_TRAINING_LQA_FAIL,
+ /* one of the CR,EQ or symbol lock is dropped */
+ LINK_TRAINING_LINK_LOSS,
+ /* Abort link training (because sink unplugged) */
+ LINK_TRAINING_ABORT,
+ DP_128b_132b_LT_FAILED,
+ DP_128b_132b_MAX_LOOP_COUNT_REACHED,
+ DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
+ DP_128b_132b_CDS_DONE_TIMEOUT,
+};
+
+struct dp_trace_lt {
+ struct dp_trace_lt_counts counts;
+ struct dp_trace_timestamps {
+ unsigned long long start;
+ unsigned long long end;
+ } timestamps;
+ enum link_training_result result;
+ bool is_logged;
+};
+
+struct dp_trace {
+ struct dp_trace_lt detect_lt_trace;
+ struct dp_trace_lt commit_lt_trace;
+ unsigned int link_loss_count;
+ bool is_initialized;
+ struct edp_trace_power_timestamps edp_trace_power_timestamps;
+};
+
+/* TODO - This is a temporary location for any new DPCD definitions.
+ * We should move these to drm_dp header.
+ */
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
+#endif
+#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#endif
+#ifndef DP_TUNNELING_IRQ
+#define DP_TUNNELING_IRQ (1 << 5)
+#endif
+/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
+#ifndef DP_TUNNELING_CAPABILITIES
+#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_ID
+#define USB4_DRIVER_ID 0xE000F /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_BW_CAPABILITY
+#define USB4_DRIVER_BW_CAPABILITY 0xE0020 /* 1.4a */
+#endif
+#ifndef DP_IN_ADAPTER_TUNNEL_INFO
+#define DP_IN_ADAPTER_TUNNEL_INFO 0xE0021 /* 1.4a */
+#endif
+#ifndef DP_BW_GRANULALITY
+#define DP_BW_GRANULALITY 0xE0022 /* 1.4a */
+#endif
+#ifndef ESTIMATED_BW
+#define ESTIMATED_BW 0xE0023 /* 1.4a */
+#endif
+#ifndef ALLOCATED_BW
+#define ALLOCATED_BW 0xE0024 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_STATUS
+#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
+#endif
+#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
+#endif
+#ifndef REQUESTED_BW
+#define REQUESTED_BW 0xE0031 /* 1.4a */
+#endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 684713b2cff7..0e92a322c2ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -54,6 +54,12 @@ struct dc_dsc_policy {
bool enable_dsc_when_not_needed;
};
+struct dc_dsc_config_options {
+ uint32_t dsc_min_slice_height_override;
+ uint32_t max_target_bpp_limit_override_x16;
+ uint32_t slice_height_granularity;
+};
+
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
@@ -71,8 +77,7 @@ bool dc_dsc_compute_bandwidth_range(
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
@@ -100,4 +105,6 @@ void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable);
void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable);
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
new file mode 100644
index 000000000000..b015e80672ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HDMI_TYPES_H
+#define DC_HDMI_TYPES_H
+
+#include "os_types.h"
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+struct dp_hdmi_dongle_signature_data {
+ int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+ uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS 0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_CONFIG_1 0x31
+#define HDMI_SCDC_SOURCE_TEST_REQ 0x35
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+#define HDMI_SCDC_MANUFACTURER_OUI 0xD0
+#define HDMI_SCDC_DEVICE_ID 0xDB
+
+union hdmi_scdc_update_read_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t STATUS_UPDATE:1;
+ uint8_t CED_UPDATE:1;
+ uint8_t RR_TEST:1;
+ uint8_t RESERVED:5;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_status_flags_data {
+ uint8_t byte;
+ struct {
+ uint8_t CLOCK_DETECTED:1;
+ uint8_t CH0_LOCKED:1;
+ uint8_t CH1_LOCKED:1;
+ uint8_t CH2_LOCKED:1;
+ uint8_t RESERVED:4;
+ } fields;
+};
+
+union hdmi_scdc_ced_data {
+ uint8_t byte[11];
+ struct {
+ uint8_t CH0_8LOW:8;
+ uint8_t CH0_7HIGH:7;
+ uint8_t CH0_VALID:1;
+ uint8_t CH1_8LOW:8;
+ uint8_t CH1_7HIGH:7;
+ uint8_t CH1_VALID:1;
+ uint8_t CH2_8LOW:8;
+ uint8_t CH2_7HIGH:7;
+ uint8_t CH2_VALID:1;
+ uint8_t CHECKSUM:8;
+ uint8_t RESERVED:8;
+ uint8_t RESERVED2:8;
+ uint8_t RESERVED3:8;
+ uint8_t RESERVED4:4;
+ } fields;
+};
+
+union hdmi_scdc_manufacturer_OUI_data {
+ uint8_t byte[3];
+ struct {
+ uint8_t Manufacturer_OUI_1:8;
+ uint8_t Manufacturer_OUI_2:8;
+ uint8_t Manufacturer_OUI_3:8;
+ } fields;
+};
+
+union hdmi_scdc_device_id_data {
+ uint8_t byte;
+ struct {
+ uint8_t Hardware_Minor_Rev:4;
+ uint8_t Hardware_Major_Rev:4;
+ } fields;
+};
+
+#endif /* DC_HDMI_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 848db8676adf..100d62162b71 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -797,6 +797,29 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
+#define DC_DSC_QP_SET_SIZE 15
+#define DC_DSC_RC_BUF_THRESH_SIZE 14
+struct dc_dsc_rc_params_override {
+ int32_t rc_model_size;
+ int32_t rc_buf_thresh[DC_DSC_RC_BUF_THRESH_SIZE];
+ int32_t rc_minqp[DC_DSC_QP_SET_SIZE];
+ int32_t rc_maxqp[DC_DSC_QP_SET_SIZE];
+ int32_t rc_offset[DC_DSC_QP_SET_SIZE];
+
+ int32_t rc_tgt_offset_hi;
+ int32_t rc_tgt_offset_lo;
+ int32_t rc_edge_factor;
+ int32_t rc_quant_incr_limit0;
+ int32_t rc_quant_incr_limit1;
+
+ int32_t initial_fullness_offset;
+ int32_t initial_delay;
+
+ int32_t flatness_min_qp;
+ int32_t flatness_max_qp;
+ int32_t flatness_det_thresh;
+};
+
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
uint32_t num_slices_v; /* Number of DSC slices - vertical */
@@ -806,11 +829,12 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
+ const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
};
/**
@@ -1061,5 +1085,19 @@ struct tg_color {
uint16_t color_b_cb;
};
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
+};
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
deleted file mode 100644
index 2e18bcf6b11a..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * Copyright 2012-14 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DC_LINK_H_
-#define DC_LINK_H_
-
-#include "dc.h"
-#include "dc_types.h"
-#include "grph_object_defs.h"
-
-struct link_resource;
-
-enum dc_link_fec_state {
- dc_link_fec_not_ready,
- dc_link_fec_ready,
- dc_link_fec_enabled
-};
-
-struct dc_link_status {
- bool link_active;
- struct dpcd_caps *dpcd_caps;
-};
-
-struct dprx_states {
- bool cable_id_written;
-};
-
-/* DP MST stream allocation (payload bandwidth number) */
-struct link_mst_stream_allocation {
- /* DIG front */
- const struct stream_encoder *stream_enc;
- /* HPO DP Stream Encoder */
- const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
- /* associate DRM payload table with DC stream encoder */
- uint8_t vcp_id;
- /* number of slots required for the DP stream in transport packet */
- uint8_t slot_count;
-};
-
-/* DP MST stream allocation table */
-struct link_mst_stream_allocation_table {
- /* number of DP video streams */
- int stream_count;
- /* array of stream allocations */
- struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
-};
-
-struct edp_trace_power_timestamps {
- uint64_t poweroff;
- uint64_t poweron;
-};
-
-struct dp_trace_lt_counts {
- unsigned int total;
- unsigned int fail;
-};
-
-struct dp_trace_lt {
- struct dp_trace_lt_counts counts;
- struct dp_trace_timestamps {
- unsigned long long start;
- unsigned long long end;
- } timestamps;
- enum link_training_result result;
- bool is_logged;
-};
-
-struct dp_trace {
- struct dp_trace_lt detect_lt_trace;
- struct dp_trace_lt commit_lt_trace;
- unsigned int link_loss_count;
- bool is_initialized;
- struct edp_trace_power_timestamps edp_trace_power_timestamps;
-};
-
-/* PSR feature flags */
-struct psr_settings {
- bool psr_feature_enabled; // PSR is supported by sink
- bool psr_allow_active; // PSR is currently active
- enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
- bool psr_vtotal_control_support; // Vtotal control is supported by sink
-
- /* These parameters are calculated in Driver,
- * based on display timing and Sink capabilities.
- * If VBLANK region is too small and Sink takes a long time
- * to set up RFB, it may take an extra frame to enter PSR state.
- */
- bool psr_frame_capture_indication_req;
- unsigned int psr_sdp_transmit_line_num_deadline;
- uint8_t force_ffu_mode;
- unsigned int psr_power_opt;
-};
-
-/* To split out "global" and "per-panel" config settings.
- * Add a struct dc_panel_config under dc_link
- */
-struct dc_panel_config {
- /* extra panel power sequence parameters */
- struct pps {
- unsigned int extra_t3_ms;
- unsigned int extra_t7_ms;
- unsigned int extra_delay_backlight_off;
- unsigned int extra_post_t7_ms;
- unsigned int extra_pre_t11_ms;
- unsigned int extra_t12_ms;
- unsigned int extra_post_OUI_ms;
- } pps;
- /* PSR */
- struct psr {
- bool disable_psr;
- bool disallow_psrsu;
- bool rc_disable;
- bool rc_allow_static_screen;
- bool rc_allow_fullscreen_VPB;
- } psr;
- /* ABM */
- struct varib {
- unsigned int varibright_feature_enable;
- unsigned int def_varibright_level;
- unsigned int abm_config_setting;
- } varib;
- /* edp DSC */
- struct dsc {
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
- } dsc;
- /* eDP ILR */
- struct ilr {
- bool optimize_edp_link_rate; /* eDP ILR */
- } ilr;
-};
-
-/*
- * USB4 DPIA BW ALLOCATION STRUCTS
- */
-struct dc_dpia_bw_alloc {
- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int padding_bw; // The Padding "Un-used" BW allocated by CM for padding reasons
- int sink_max_bw; // The Max BW that sink can require/support
- int estimated_bw; // The estimated available BW for this DPIA
- int bw_granularity; // BW Granularity
- bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
-};
-
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool is_internal_display;
-
- /* TODO: Rename. Flag an endpoint as having a programmable mapping to a
- * DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
-
- bool test_pattern_enabled;
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- struct hdcp_caps hdcp_caps;
-#endif
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- } wa_flags;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
-};
-
-const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-
-/**
- * dc_get_link_at_index() - Return an enumerated dc_link.
- *
- * dc_link order is constant and determined at
- * boot time. They cannot be created or destroyed.
- * Use dc_get_caps() to get number of links.
- */
-static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
-{
- return dc->links[link_index];
-}
-
-static inline void get_edp_links(const struct dc *dc,
- struct dc_link **edp_links,
- int *edp_num)
-{
- int i;
-
- *edp_num = 0;
- for (i = 0; i < dc->link_count; i++) {
- // report any eDP links, even unconnected DDI's
- if (!dc->links[i])
- continue;
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
- edp_links[*edp_num] = dc->links[i];
- if (++(*edp_num) == MAX_NUM_EDP)
- return;
- }
- }
-}
-
-static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
- const struct dc_link *link,
- unsigned int *inst_out)
-{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
-
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- return false;
- get_edp_links(dc, edp_links, &edp_num);
- if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index))
- *inst_out = 1;
- else
- *inst_out = 0;
- return true;
-}
-
-/* Set backlight level of an embedded panel (eDP, LVDS).
- * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
- * and 16 bit fractional, where 1.0 is max backlight value.
- */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
-
-/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
- bool isHDR,
- uint32_t backlight_millinits,
- uint32_t transition_time_in_ms);
-
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
- uint32_t *backlight_millinits,
- uint32_t *backlight_millinits_peak);
-
-bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
-
-bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
-bool dc_link_set_default_brightness_aux(struct dc_link *link);
-
-int dc_link_get_backlight_level(const struct dc_link *dc_link);
-
-int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
- bool wait, bool force_static, const unsigned int *power_opts);
-
-bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
-
-bool dc_link_setup_psr(struct dc_link *dc_link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context);
-
-bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
-
-void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
-
-void dc_link_blank_all_dp_displays(struct dc *dc);
-void dc_link_blank_all_edp_displays(struct dc *dc);
-
-void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
-bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
- uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
-
-/* Request DC to detect if there is a Panel connected.
- * boot - If this call is during initial boot.
- * Return false for any type of detection failure or MST detection
- * true otherwise. True meaning further action is required (status update
- * and OS notification).
- */
-enum dc_detect_reason {
- DETECT_REASON_BOOT,
- DETECT_REASON_RESUMEFROMS3S4,
- DETECT_REASON_HPD,
- DETECT_REASON_HPDRX,
- DETECT_REASON_FALLBACK,
- DETECT_REASON_RETRAIN,
- DETECT_REASON_TDR,
-};
-
-bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
-bool dc_link_get_hpd_state(struct dc_link *dc_link);
-enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-
-/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
- * Return:
- * true - Downstream port status changed. DM should call DC to do the
- * detection.
- * false - no change in Downstream port status. No further action required
- * from DM. */
-bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work);
-
-/*
- * On eDP links this function call will stall until T12 has elapsed.
- * If the panel is not in power off state, this function will return
- * immediately.
- */
-bool dc_link_wait_for_t12(struct dc_link *link);
-
-void dc_link_dp_handle_automated_test(struct dc_link *link);
-void dc_link_dp_handle_link_loss(struct dc_link *link);
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
-
-struct dc_sink_init_data;
-
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *dc_link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data);
-
-void dc_link_remove_remote_sink(
- struct dc_link *link,
- struct dc_sink *sink);
-
-/* Used by diagnostics for virtual link at the moment */
-
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings);
-
-bool dc_link_dp_perform_link_training_skip_aux(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting);
-
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings,
- bool skip_video_pattern);
-
-bool dc_link_dp_sync_lt_begin(struct dc_link *link);
-
-enum link_training_result dc_link_dp_sync_lt_attempt(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_settings);
-
-bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down);
-
-void dc_link_dp_enable_hpd(const struct dc_link *link);
-
-void dc_link_dp_disable_hpd(const struct dc_link *link);
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
-
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
-
-bool dc_link_is_dp_sink_present(struct dc_link *link);
-
-bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
-/*
- * DPCD access interfaces
- */
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
-bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link);
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link);
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain);
-void dc_link_enable_hpd(const struct dc_link *link);
-void dc_link_disable_hpd(const struct dc_link *link);
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting);
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link);
-
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link);
-
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address
-);
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd);
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd);
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing);
-
-bool dc_link_is_fec_supported(const struct dc_link *link);
-bool dc_link_should_enable_fec(const struct dc_link *link);
-
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
-
-void dc_link_get_cur_link_res(const struct dc_link *link,
- struct link_resource *link_res);
-/* take a snapshot of current link resource allocation state */
-void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
-/* restore link resource allocation state from a snapshot */
-void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
-void dc_link_clear_dprx_states(struct dc_link *link);
-struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service);
-void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
- bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
- bool in_detection,
- bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
- bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-
-/* Destruct the mst topology of the link and reset the allocated payload table */
-bool reset_cur_dp_mst_topology(struct dc_link *link);
-#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index dfd3df1d2f7e..25284006019c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -144,7 +144,7 @@ struct test_pattern {
unsigned int cust_pattern_size;
};
-#define SUBVP_DRR_MARGIN_US 600 // 600us for DRR margin (SubVP + DRR)
+#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
enum mall_stream_type {
SUBVP_NONE, // subvp not in use
@@ -190,6 +190,7 @@ struct dc_stream_state {
struct dc_info_packet vsp_infopacket;
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
+ struct dc_info_packet adaptive_sync_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -292,6 +293,7 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
+ bool fpo_in_use;
struct mall_stream_config mall_stream_config;
};
@@ -313,6 +315,7 @@ struct dc_stream_update {
struct dc_info_packet *vsp_infopacket;
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
+ struct dc_info_packet *adaptive_sync_infopacket;
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
@@ -543,9 +546,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *nom_v_pos);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-bool dc_stream_forward_crc_window(struct dc *dc,
+bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
- struct dc_stream_state *stream,
bool is_stop);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index dc78e2404b48..45ab48fe5d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -32,14 +32,15 @@
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
+#include "dc_ddc_types.h"
#include "dc_dp_types.h"
+#include "dc_hdmi_types.h"
#include "dc_hw_types.h"
#include "dal_types.h"
#include "grph_object_defs.h"
+#include "grph_object_ctrl_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
/* forward declarations */
struct dc_plane_state;
@@ -82,13 +83,8 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define DC_MAX_EDID_BUFFER_SIZE 2048
-#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
-#define MAX_REPEATER_CNT 8
-
-#include "dc_ddc_types.h"
enum tiling_mode {
TILING_MODE_INVALID,
@@ -374,66 +370,6 @@ struct dc_csc_adjustments {
struct fixed31_32 hue;
};
-enum dpcd_downstream_port_max_bpc {
- DOWN_STREAM_MAX_8BPC = 0,
- DOWN_STREAM_MAX_10BPC,
- DOWN_STREAM_MAX_12BPC,
- DOWN_STREAM_MAX_16BPC
-};
-
-
-enum link_training_offset {
- DPRX = 0,
- LTTPR_PHY_REPEATER1 = 1,
- LTTPR_PHY_REPEATER2 = 2,
- LTTPR_PHY_REPEATER3 = 3,
- LTTPR_PHY_REPEATER4 = 4,
- LTTPR_PHY_REPEATER5 = 5,
- LTTPR_PHY_REPEATER6 = 6,
- LTTPR_PHY_REPEATER7 = 7,
- LTTPR_PHY_REPEATER8 = 8
-};
-
-struct dc_lttpr_caps {
- union dpcd_rev revision;
- uint8_t mode;
- uint8_t max_lane_count;
- uint8_t max_link_rate;
- uint8_t phy_repeater_cnt;
- uint8_t max_ext_timeout;
- union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
- union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
- uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
-};
-
-struct dc_dongle_dfp_cap_ext {
- bool supported;
- uint16_t max_pixel_rate_in_mps;
- uint16_t max_video_h_active_width;
- uint16_t max_video_v_active_height;
- struct dp_encoding_format_caps encoding_format_caps;
- struct dp_color_depth_caps rgb_color_depth_caps;
- struct dp_color_depth_caps ycbcr444_color_depth_caps;
- struct dp_color_depth_caps ycbcr422_color_depth_caps;
- struct dp_color_depth_caps ycbcr420_color_depth_caps;
-};
-
-struct dc_dongle_caps {
- /* dongle type (DP converter, CV smart dongle) */
- enum display_dongle_type dongle_type;
- bool extendedCapValid;
- /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
- indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
- bool is_dp_hdmi_s3d_converter;
- bool is_dp_hdmi_ycbcr422_pass_through;
- bool is_dp_hdmi_ycbcr420_pass_through;
- bool is_dp_hdmi_ycbcr422_converter;
- bool is_dp_hdmi_ycbcr420_converter;
- uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk_in_khz;
- uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
- struct dc_dongle_dfp_cap_ext dfp_cap_ext;
-};
/* Scaling format */
enum scaling_transformation {
SCALING_TRANSFORMATION_UNINITIALIZED,
@@ -690,6 +626,7 @@ struct psr_config {
uint8_t su_y_granularity;
unsigned int line_time_in_us;
uint8_t rate_control_caps;
+ uint16_t dsc_slice_height;
};
union dmcu_psr_level {
@@ -801,6 +738,7 @@ struct psr_context {
uint8_t su_y_granularity;
unsigned int line_time_in_us;
uint8_t rate_control_caps;
+ uint16_t dsc_slice_height;
};
struct colorspace_transform {
@@ -873,9 +811,7 @@ struct dc_context {
uint32_t dc_edp_id_count;
uint64_t fbc_gpu_addr;
struct dc_dmub_srv *dmub_srv;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#endif
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
};
@@ -1000,4 +936,155 @@ struct otg_phy_mux {
};
#endif
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_RESUMEFROMS3S4,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+ DETECT_REASON_FALLBACK,
+ DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
+};
+
+struct dc_link_status {
+ bool link_active;
+ struct dpcd_caps *dpcd_caps;
+};
+
+union hdcp_rx_caps {
+ struct {
+ uint8_t version;
+ uint8_t reserved;
+ struct {
+ uint8_t repeater : 1;
+ uint8_t hdcp_capable : 1;
+ uint8_t reserved : 6;
+ } byte0;
+ } fields;
+ uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+ struct {
+ uint8_t HDCP_CAPABLE:1;
+ uint8_t REPEATER:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+struct hdcp_caps {
+ union hdcp_rx_caps rx_caps;
+ union hdcp_bcaps bcaps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* HPO DP Stream Encoder */
+ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+#define MAX_CONTROLLER_NUM 6
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+ bool psr_vtotal_control_support; // Vtotal control is supported by sink
+ unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+ uint8_t force_ffu_mode;
+ unsigned int psr_power_opt;
+};
+
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ /* extra panel power sequence parameters */
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ /* nit brightness */
+ struct nits_brightness {
+ unsigned int peak; /* nits */
+ unsigned int max_avg; /* nits */
+ unsigned int min; /* 1/10000 nits */
+ unsigned int max_nonboost_brightness_millinits;
+ unsigned int min_brightness_millinits;
+ } nits_brightness;
+ /* PSR */
+ struct psr {
+ bool disable_psr;
+ bool disallow_psrsu;
+ bool rc_disable;
+ bool rc_allow_static_screen;
+ bool rc_allow_fullscreen_VPB;
+ } psr;
+ /* ABM */
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+ /* edp DSC */
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
+};
+
+/*
+ * USB4 DPIA BW ALLOCATION STRUCTS
+ */
+struct dc_dpia_bw_alloc {
+ int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
+ int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+ int sink_max_bw; // The Max BW that sink can require/support
+ int estimated_bw; // The estimated available BW for this DPIA
+ int bw_granularity; // BW Granularity
+ bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
+};
+
+#define MAX_SINKS_PER_LINK 4
+
+enum dc_hpd_enable_select {
+ HPD_EN_FOR_ALL_EDP = 0,
+ HPD_EN_FOR_PRIMARY_EDP_ONLY,
+ HPD_EN_FOR_SECONDARY_EDP_ONLY,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 140297c8ff55..739298d2dff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -832,13 +832,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
LOG_FLAG_I2cAux_DceAux,
"dce_aux_transfer_with_retries: payload->defer_delay=%u",
payload->defer_delay);
- if (payload->defer_delay > 1) {
- msleep(payload->defer_delay);
- defer_time_in_ms += payload->defer_delay;
- } else if (payload->defer_delay <= 1) {
- udelay(payload->defer_delay * 1000);
- defer_time_in_ms += payload->defer_delay;
- }
+ fsleep(payload->defer_delay * 1000);
+ defer_time_in_ms += payload->defer_delay;
}
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index e69f1899fbf0..c850ed49281f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -26,7 +26,7 @@
#ifndef __DAL_AUX_ENGINE_DCE110_H__
#define __DAL_AUX_ENGINE_DCE110_H__
-#include "i2caux_interface.h"
+#include "gpio_service_interface.h"
#include "inc/hw/aux_engine.h"
enum aux_return_code_type;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 165392380842..462c7a3ec3cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -930,7 +930,13 @@ static bool dce112_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
return true;
}
/* First disable SS
@@ -995,7 +1001,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Enable DTO */
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
if (encoding == DP_128b_132b_ENCODING)
@@ -1009,9 +1014,6 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
-#else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
-#endif
} else {
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
@@ -1023,7 +1025,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- #if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1,
@@ -1031,17 +1032,12 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
- #else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
- #endif
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE(PIXEL_RATE_CNTL[inst],
PIPE0_DTO_SRC_SEL, 0);
-#endif
/*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
bp_pc_params.controller_id = pix_clk_params->controller_id;
@@ -1161,6 +1157,7 @@ const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
{25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
{59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
{74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
+ {89910, 90000, 90000, 1000, 1001}, //90Mhz -> 89.91
{125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
{148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
{167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
@@ -1274,7 +1271,14 @@ static bool dcn3_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ /* Enable DTO */
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
} else
// For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index aaf33c79b09b..f600b7431e23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -204,23 +204,17 @@
type DP_DTO0_MODULO; \
type DP_DTO0_ENABLE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_REG_FIELD_LIST_DCN32(type) \
type PIPE0_DTO_SRC_SEL;
-#endif
struct dce110_clk_src_shift {
CS_REG_FIELD_LIST(uint8_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint8_t)
-#endif
};
struct dce110_clk_src_mask{
CS_REG_FIELD_LIST(uint32_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint32_t)
-#endif
};
struct dce110_clk_src_regs {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index d3cc5ec46956..e74266cc0098 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -586,7 +586,7 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (state == PSR_STATE0)
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 09260c23c3bd..fa314493ffc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dce_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index d9fd4ec60588..670d5ab9d998 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1009,7 +1009,7 @@ static void dce_transform_set_pixel_storage_depth(
color_depth = COLOR_DEPTH_101010;
pixel_depth = 0;
expan_mode = 1;
- BREAK_TO_DEBUGGER();
+ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
break;
}
@@ -1023,8 +1023,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- DC_LOG_WARNING("%s: Capability not supported",
- __func__);
+ DC_LOG_DC("%s: Capability not supported", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index fb0dec4ed3a6..9fc48208c2e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -148,7 +148,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
int edp_num;
uint8_t panel_mask = 0;
- get_edp_links(dc->dc, edp_links, &edp_num);
+ dc_get_edp_links(dc->dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (edp_links[i]->link_status.link_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 2d3201b77d6a..9705d8f88382 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -33,6 +33,9 @@
#define MAX_PIPES 6
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
+
/*
* Convert dmcub psr state to dmcu psr state.
*/
@@ -215,7 +218,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
@@ -250,7 +253,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Set PSR vtotal requirement for FreeSync PSR.
*/
static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
@@ -417,6 +420,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
+ copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 74005b9d352a..289e42070ece 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -26,8 +26,9 @@
#ifndef _DMUB_PSR_H_
#define _DMUB_PSR_H_
-#include "os_types.h"
-#include "dc_link.h"
+#include "dc_types.h"
+struct dc_link;
+struct dmub_psr_funcs;
struct dmub_psr {
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 913a1fe6b3da..8d2460d06bce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -46,7 +46,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -54,7 +54,6 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
-#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -65,7 +64,6 @@
#include "dcn10/dcn10_hw_sequencer.h"
-#include "link/link_dp_trace.h"
#include "dce110_hw_sequencer.h"
#define GAMMA_HW_POINTS_NUM 256
@@ -653,10 +651,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else
+ else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
+ }
}
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
@@ -737,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready(
/* obtain HPD */
/* TODO what to do with this? */
- hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+ hpd = ctx->dc->link_srv->get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
if (!hpd) {
BREAK_TO_DEBUGGER();
@@ -775,10 +779,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
- if (false == edp_hpd_high) {
- DC_LOG_WARNING(
- "%s: wait timed out!\n", __func__);
- }
+ /* ensure that the panel is detected */
+ ASSERT(edp_hpd_high);
}
void dce110_edp_power_control(
@@ -807,19 +809,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweron_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link),
- dp_trace_get_edp_poweron_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -834,7 +836,7 @@ void dce110_edp_power_control(
link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ if (ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -875,14 +877,16 @@ void dce110_edp_power_control(
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
- if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
+
+ if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_ON,
- panel_instance);
- else
+ panel_instance, link->link_powered_externally);
+ } else {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_OFF,
- panel_instance);
+ panel_instance, link->link_powered_externally);
+ }
}
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
@@ -892,13 +896,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- dp_trace_set_edp_power_timestamp(link, power_up);
+ ctx->dc->link_srv->dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- dp_trace_get_edp_poweroff_timestamp(link),
- dp_trace_get_edp_poweron_timestamp(link));
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -926,14 +930,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
@@ -941,7 +945,6 @@ void dce110_edp_wait_for_T12(
msleep(t12_duration - time_since_edp_poweroff_ms);
}
}
-
/*todo: cloned in stream enc, fix*/
/*
* @brief
@@ -1015,21 +1018,25 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T7-ready.
*/
- edp_receiver_ready_T7(link);
+ ctx->dc->link_srv->edp_receiver_ready_T7(link);
else
DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
}
+ /* Setting link_powered_externally will bypass delays in the backlight
+ * as they are not required if the link is being powered by a different
+ * source.
+ */
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLON,
- panel_instance);
+ panel_instance, link->link_powered_externally);
else
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLOFF,
- panel_instance);
+ panel_instance, link->link_powered_externally);
}
link_transmitter_control(ctx->dc_bios, &cntl);
@@ -1042,7 +1049,7 @@ void dce110_edp_backlight_control(
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
- dc_link_backlight_enable_aux(link, enable);
+ ctx->dc->link_srv->edp_backlight_enable_aux(link, enable);
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
@@ -1054,7 +1061,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- edp_add_delay_for_T9(link);
+ ctx->dc->link_srv->edp_add_delay_for_T9(link);
else
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
@@ -1142,6 +1149,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+ int dp_hpo_inst;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1150,7 +1161,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1161,7 +1172,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_hwss->reset_stream_encoder(pipe_ctx);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ dto_params.otg_inst = tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
* we are just disabling a single HPO stream. Shouldn't we disable HPO
* HW control only when HPOs for all streams are disabled?
@@ -1203,7 +1223,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
pipe_ctx->stream_res.hpo_dp_stream_enc);
@@ -1225,7 +1245,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- edp_receiver_ready_T9(link);
+ link->dc->link_srv->edp_receiver_ready_T9(link);
}
}
}
@@ -1408,7 +1428,7 @@ static enum dc_status dce110_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1512,7 +1532,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
+ if (!(hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)))
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
@@ -1544,17 +1564,17 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->inst);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
if (!stream->dpms_off)
- core_link_enable_stream(context, pipe_ctx);
+ dc->link_srv->set_dpms_on(context, pipe_ctx);
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
+ if (hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
}
@@ -1580,7 +1600,7 @@ static void power_down_encoders(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- dc_link_blank_dp_stream(dc->links[i], false);
+ dc->link_srv->blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1719,7 +1739,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (hws->funcs.init_pipes)
hws->funcs.init_pipes(dc, context);
@@ -2063,7 +2083,7 @@ static void dce110_reset_hw_ctx_wrap(
* disabled already, no need to disable again.
*/
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
- core_link_disable_stream(pipe_ctx_old);
+ dc->link_srv->set_dpms_off(pipe_ctx_old);
/* free acquired resources*/
if (pipe_ctx_old->stream_res.audio) {
@@ -3011,10 +3031,12 @@ void dce110_enable_dp_link_output(
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
unsigned int i;
-
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
}
@@ -3034,13 +3056,13 @@ void dce110_enable_dp_link_output(
pipes[i].clock_source->funcs->program_pix_clk(
pipes[i].clock_source,
&pipes[i].stream_res.pix_clk_params,
- dp_get_link_encoding_format(link_settings),
+ dc->link_srv->dp_get_encoding_format(link_settings),
&pipes[i].pll_settings);
}
}
}
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
@@ -3057,7 +3079,7 @@ void dce110_enable_dp_link_output(
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void dce110_disable_link_output(struct dc_link *link,
@@ -3076,13 +3098,14 @@ void dce110_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
static const struct hw_sequencer_funcs dce110_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 758f4b3b0087..08028a1779ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -71,8 +71,6 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dp_receiver_power_ctrl(struct dc_link *link, bool on);
-
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index f808315b2835..a4a45a6ce61e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -401,8 +401,6 @@ static const struct resource_caps stoney_resource_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
- .blends_with_below = true,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
@@ -428,7 +426,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index dda596fa1cd7..fee331accc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 71b3a6949001..c9e045666dcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -59,6 +59,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_BLACK_OFFSET, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
@@ -209,6 +210,7 @@
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
@@ -495,6 +497,7 @@
type AUTOCAL_MODE; \
type AUTOCAL_NUM_PIPE; \
type AUTOCAL_PIPE_ID; \
+ type SCL_BOUNDARY_MODE; \
type SCL_BLACK_OFFSET_RGB_Y; \
type SCL_BLACK_OFFSET_CBCR; \
type SCL_V_NUM_TAPS; \
@@ -1108,6 +1111,7 @@ struct dcn_dpp_mask {
uint32_t LB_DATA_FORMAT; \
uint32_t LB_MEMORY_CTRL; \
uint32_t DSCL_AUTOCAL; \
+ uint32_t DSCL_CONTROL; \
uint32_t SCL_BLACK_OFFSET; \
uint32_t SCL_TAP_CONTROL; \
uint32_t SCL_COEF_RAM_TAP_SELECT; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f607a0e28f14..b33955928bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -581,7 +581,7 @@ static void dpp1_dscl_set_manual_ratio_init(
* dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area
*
* @dpp: DPP data struct
- * @recount: Rectangle information
+ * @recout: Rectangle information
*
* This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on
* the values specified in the recount parameter.
@@ -655,6 +655,10 @@ void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
AUTOCAL_NUM_PIPE, 0,
AUTOCAL_PIPE_ID, 0);
+ /*clean scaler boundary mode when Autocal off*/
+ REG_SET(DSCL_CONTROL, 0,
+ SCL_BOUNDARY_MODE, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index b6391a5ead78..365a3215f6d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,8 +23,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#include "reg_helper.h"
#include "resource.h"
#include "dwb.h"
@@ -129,6 +127,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
dwbc10->dwbc_shift = dwbc_shift;
dwbc10->dwbc_mask = dwbc_mask;
}
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index d56ea7c8171e..5268c46ae907 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,8 +24,6 @@
#ifndef __DC_DWBC_DCN10_H__
#define __DC_DWBC_DCN10_H__
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -267,5 +265,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
int inst);
#endif
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index ba1c0621f0f8..e8752077571a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -172,6 +172,10 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;
uint32_t SDPIF_REQUEST_RATE_LIMIT;
+ uint32_t DCHUBBUB_SDPIF_CFG0;
+ uint32_t DCHUBBUB_SDPIF_CFG1;
+ uint32_t DCHUBBUB_CLOCK_CNTL;
+ uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL;
};
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
@@ -362,7 +366,13 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\
- type SDPIF_REQUEST_RATE_LIMIT
+ type SDPIF_REQUEST_RATE_LIMIT;\
+ type DISPCLK_R_DCHUBBUB_GATE_DIS;\
+ type DCFCLK_R_DCHUBBUB_GATE_DIS;\
+ type SDPIF_MAX_NUM_OUTSTANDING;\
+ type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\
+ type SDPIF_PORT_CONTROL;\
+ type DET_MEM_PWR_LS_MODE
struct dcn_hubbub_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index a142a00bc432..bf399819ca80 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -755,8 +755,8 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
return false;
}
-uint32_t aperture_default_system = 1;
-uint32_t context0_default_system; /* = 0;*/
+static uint32_t aperture_default_system = 1;
+static uint32_t context0_default_system; /* = 0;*/
static void hubp1_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fe2023f18b7d..1c3b6f25a782 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,7 +45,6 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
-#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
#include "link_hwss.h"
@@ -56,8 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -728,11 +726,15 @@ void dcn10_hubp_pg_control(
}
}
-static void power_on_plane(
+static void power_on_plane_resources(
struct dce_hwseq *hws,
int plane_id)
{
DC_LOGGER_INIT(hws->ctx->logger);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, plane_id, true);
+
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@@ -921,7 +923,7 @@ enum dc_status dcn10_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1019,7 +1021,7 @@ static void dcn10_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1239,11 +1241,15 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
"Power gated front end %d\n", hubp->inst);
}
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
/* disable HW used by plane.
@@ -1566,7 +1572,7 @@ void dcn10_init_hw(struct dc *dc)
}
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -1640,7 +1646,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
int edp_num;
int i = 0;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
@@ -2464,7 +2470,7 @@ static void dcn10_enable_plane(
undo_DEGVIDCN10_253_wa(dc);
- power_on_plane(dc->hwseq,
+ power_on_plane_resources(dc->hwseq,
pipe_ctx->plane_res.hubp->inst);
/* enable DCFCLK current DCHUB */
@@ -2901,7 +2907,7 @@ void dcn10_blank_pixel_data(
dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
- } else if (blank) {
+ } else {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank) {
stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
@@ -3225,12 +3231,16 @@ static void dcn10_config_stereo_parameters(
timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
- enum display_dongle_type dongle = \
- stream->link->ddc->dongle_type;
- if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
- dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
- dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
- flags->DISABLE_STEREO_DP_SYNC = 1;
+
+ if (stream->link && stream->link->ddc) {
+ enum display_dongle_type dongle = \
+ stream->link->ddc->dongle_type;
+
+ if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ flags->DISABLE_STEREO_DP_SYNC = 1;
+ }
}
flags->RIGHT_EYE_POLARITY =\
stream->timing.flags.RIGHT_EYE_3D_POLARITY;
@@ -3383,7 +3393,9 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
// Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
+ if (!test_pipe->plane_state ||
+ !test_pipe->plane_state->visible ||
+ test_pipe->plane_state->layer_index == cur_layer)
continue;
r2 = test_pipe->plane_res.scl_data.recout;
@@ -3626,7 +3638,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index fbccb7263ad2..ee08b545aaea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn10_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
@@ -1220,7 +1219,6 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1322,7 +1320,7 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 88ac5f6f4c96..db766689af58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -161,10 +161,20 @@ struct dcn_optc_registers {
uint32_t OTG_CRC_CNTL2;
uint32_t OTG_CRC0_DATA_RG;
uint32_t OTG_CRC0_DATA_B;
+ uint32_t OTG_CRC1_DATA_B;
+ uint32_t OTG_CRC2_DATA_B;
+ uint32_t OTG_CRC3_DATA_B;
+ uint32_t OTG_CRC1_DATA_RG;
+ uint32_t OTG_CRC2_DATA_RG;
+ uint32_t OTG_CRC3_DATA_RG;
uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL;
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL;
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL;
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL;
uint32_t GSL_SOURCE_SELECT;
uint32_t DWB_SOURCE_SELECT;
uint32_t OTG_DSC_START_POSITION;
@@ -464,6 +474,15 @@ struct dcn_optc_registers {
type CRC0_R_CR;\
type CRC0_G_Y;\
type CRC0_B_CB;\
+ type CRC1_R_CR;\
+ type CRC1_G_Y;\
+ type CRC1_B_CB;\
+ type CRC2_R_CR;\
+ type CRC2_G_Y;\
+ type CRC2_B_CB;\
+ type CRC3_R_CR;\
+ type CRC3_G_Y;\
+ type CRC3_B_CB;\
type OTG_CRC0_WINDOWA_X_START;\
type OTG_CRC0_WINDOWA_X_END;\
type OTG_CRC0_WINDOWA_Y_START;\
@@ -472,6 +491,15 @@ struct dcn_optc_registers {
type OTG_CRC0_WINDOWB_X_END;\
type OTG_CRC0_WINDOWB_Y_START;\
type OTG_CRC0_WINDOWB_Y_END;\
+ type OTG_CRC_WINDOW_DB_EN;\
+ type OTG_CRC1_WINDOWA_X_START;\
+ type OTG_CRC1_WINDOWA_X_END;\
+ type OTG_CRC1_WINDOWA_Y_START;\
+ type OTG_CRC1_WINDOWA_Y_END;\
+ type OTG_CRC1_WINDOWB_X_START;\
+ type OTG_CRC1_WINDOWB_X_END;\
+ type OTG_CRC1_WINDOWB_Y_START;\
+ type OTG_CRC1_WINDOWB_Y_END;\
type GSL0_READY_SOURCE_SEL;\
type GSL1_READY_SOURCE_SEL;\
type GSL2_READY_SOURCE_SEL;\
@@ -519,11 +547,13 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
- type OTG_V_TOTAL_LAST_USED_BY_DRR;
+ type OTG_V_TOTAL_LAST_USED_BY_DRR;\
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
+
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
TG_REG_FIELD_LIST_DCN3_2(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 6bfac8088ab0..21ec1ba5ed75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -504,8 +504,6 @@ static const struct resource_caps rv2_res_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -544,8 +542,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_AVOID,
- .force_single_disp_pipe_split = false,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 484e7cdf00b8..f496e952ceec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
@@ -753,12 +753,19 @@ void enc1_stream_encoder_update_dp_info_packets(
* use other packetIndex (such as 5,6) for other info packet
*/
+ if (info_frame->adaptive_sync.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -926,7 +933,7 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
@@ -945,7 +952,7 @@ void enc1_stream_encoder_dp_blank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
@@ -1018,7 +1025,8 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
@@ -1463,10 +1471,9 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
-#endif
+
enc1_se_enable_audio_clock(enc, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 915a20461c77..7bdc146f7cb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -205,6 +205,11 @@
type PHYDSYMCLK_GATE_DISABLE; \
type PHYESYMCLK_GATE_DISABLE;
+#define DCCG314_REG_FIELD_LIST(type) \
+ type DSCCLK3_DTO_PHASE;\
+ type DSCCLK3_DTO_MODULO;\
+ type DSCCLK3_DTO_ENABLE;
+
#define DCCG32_REG_FIELD_LIST(type) \
type DPSTREAMCLK0_EN;\
type DPSTREAMCLK1_EN;\
@@ -230,12 +235,14 @@
type DTBCLK_P2_SRC_SEL;\
type DTBCLK_P2_EN;\
type DTBCLK_P3_SRC_SEL;\
- type DTBCLK_P3_EN;
+ type DTBCLK_P3_EN;\
+ type DENTIST_DISPCLK_CHG_DONE;
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
DCCG3_REG_FIELD_LIST(uint8_t)
DCCG31_REG_FIELD_LIST(uint8_t)
+ DCCG314_REG_FIELD_LIST(uint8_t)
DCCG32_REG_FIELD_LIST(uint8_t)
};
@@ -243,6 +250,7 @@ struct dccg_mask {
DCCG_REG_FIELD_LIST(uint32_t)
DCCG3_REG_FIELD_LIST(uint32_t)
DCCG31_REG_FIELD_LIST(uint32_t)
+ DCCG314_REG_FIELD_LIST(uint32_t)
DCCG32_REG_FIELD_LIST(uint32_t)
};
@@ -272,6 +280,7 @@ struct dccg_registers {
uint32_t DSCCLK0_DTO_PARAM;
uint32_t DSCCLK1_DTO_PARAM;
uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DSCCLK3_DTO_PARAM;
uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
uint32_t DPSTREAMCLK_GATE_DISABLE;
uint32_t DCCG_GATE_DISABLE_CNTL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 784a8b6f360d..5bd698cd6d20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -28,6 +28,7 @@
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
+#include "dsc/rc_calc.h"
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
@@ -49,7 +50,7 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
static void dsc2_disconnect(struct display_stream_compressor *dsc);
-const struct dsc_funcs dcn20_dsc_funcs = {
+static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
@@ -200,7 +201,6 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
- DC_LOG_DSC(" ");
DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
@@ -345,10 +345,38 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
+static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
+{
+ uint8_t i;
+
+ rc->rc_model_size = override->rc_model_size;
+ for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
+ rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
+ for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
+ rc->qp_min[i] = override->rc_minqp[i];
+ rc->qp_max[i] = override->rc_maxqp[i];
+ rc->ofs[i] = override->rc_offset[i];
+ }
+
+ rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
+ rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
+ rc->rc_edge_factor = override->rc_edge_factor;
+ rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
+ rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
+
+ rc->initial_fullness_offset = override->initial_fullness_offset;
+ rc->initial_xmit_delay = override->initial_delay;
+
+ rc->flatness_min_qp = override->flatness_min_qp;
+ rc->flatness_max_qp = override->flatness_max_qp;
+ rc->flatness_det_thresh = override->flatness_det_thresh;
+}
+
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
+ struct rc_params rc;
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
@@ -413,7 +441,12 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
- if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) {
+ calc_rc_params(&rc, &dsc_reg_vals->pps);
+
+ if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
+ dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
+
+ if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f1490e97b6ce..f8667be57046 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -301,7 +301,7 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn20_dwbc_funcs = {
+static const struct dwbc_funcs dcn20_dwbc_funcs = {
.get_caps = dwb2_get_caps,
.enable = dwb2_enable,
.disable = dwb2_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6291a241158a..422fbf79da64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -46,16 +46,15 @@
#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h"
-#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -191,10 +190,15 @@ void dcn20_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
@@ -225,6 +229,10 @@ void dcn20_enable_power_gating_plane(
REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
if (REG(DOMAIN21_PG_CONFIG))
REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
}
void dcn20_dccg_init(struct dce_hwseq *hws)
@@ -582,6 +590,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.gsl_group != 0)
dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
+ if (hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
+
dc->hwss.set_flip_control_gsl(pipe_ctx, false);
hubp->funcs->hubp_clk_cntl(hubp, false);
@@ -605,6 +616,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
@@ -612,6 +626,12 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn20_plane_atomic_disable(dc, pipe_ctx);
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled
+ */
+ if (is_phantom)
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
@@ -700,7 +720,7 @@ enum dc_status dcn20_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1110,11 +1130,15 @@ void dcn20_blank_pixel_data(
}
-static void dcn20_power_on_plane(
+static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
DC_LOGGER_INIT(hws->ctx->logger);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@@ -1138,7 +1162,7 @@ static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
//}
- dcn20_power_on_plane(dc->hwseq, pipe_ctx);
+ dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
@@ -1700,10 +1724,8 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
- }
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
@@ -1766,6 +1788,15 @@ static void dcn20_program_pipe(
&pipe_ctx->stream->bit_depth_params,
&pipe_ctx->stream->clamping);
}
+
+ /* Set ABM pipe after other pipe configurations done */
+ if (pipe_ctx->plane_state->visible) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
}
void dcn20_program_front_end_for_ctx(
@@ -1803,6 +1834,20 @@ void dcn20_program_front_end_for_ctx(
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+ }
+ }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -1948,6 +1993,16 @@ void dcn20_post_unlock_program_front_end(
}
}
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq && hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
@@ -1999,8 +2054,11 @@ void dcn20_prepare_bandwidth(
}
}
- /* program dchubbub watermarks */
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ /* program dchubbub watermarks:
+ * For assigning wm_optimized_required, use |= operator since we don't want
+ * to clear the value if the optimize has not happened yet
+ */
+ dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2055,6 +2113,15 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ dc_dmub_srv_p_state_delegate(dc,
+ true, context);
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->clks.fw_based_mclk_switching = true;
+ } else {
+ dc->clk_mgr->clks.fw_based_mclk_switching = false;
+ }
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2359,7 +2426,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -2412,7 +2479,7 @@ static void dcn20_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -2432,7 +2499,7 @@ static void dcn20_reset_back_end_for_pipe(
}
}
else if (pipe_ctx->stream_res.dsc) {
- dp_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -2615,6 +2682,37 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ int count = 1;
+
+ while (odm_pipe != NULL) {
+ count++;
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ return count;
+}
+
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -2628,12 +2726,43 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dtbclk_dto_params dto_params = {0};
+ struct dccg *dccg = dc->res_pool->dccg;
+ enum phyd32clk_clock_source phyd32clk;
+ int dp_hpo_inst;
+ struct dce_hwseq *hws = dc->hwseq;
+ unsigned int k1_div = PIXEL_RATE_DIV_NA;
+ unsigned int k2_div = PIXEL_RATE_DIV_NA;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ k1_div, k2_div);
+ }
+
link_hwss->setup_stream_encoder(pipe_ctx);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
@@ -2644,7 +2773,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 2f9bfaeaba8d..51a57dae1811 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn20_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index ccd91792991b..259a98e4ee2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -297,7 +297,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch);
}
-const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
.config_mcif_buf = mmhubbub2_config_mcif_buf,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 116f67a0b989..5da6e44f284a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -542,7 +542,7 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
-const struct mpc_funcs dcn20_mpc_funcs = {
+static const struct mpc_funcs dcn20_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 8a0dd0d7134b..1d8c5805ef20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -62,7 +62,6 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
-#include "dc_link_ddc.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -90,6 +89,7 @@
#include "amdgpu_socbb.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -670,8 +670,6 @@ static const struct resource_caps res_cap_nv10 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -714,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1213,8 +1211,11 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
- if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
struct hubp *dcn20_hubp_create(
@@ -1389,6 +1390,9 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->top_pipe)
+ continue;
+
if (pipe_ctx->stream != dc_stream)
continue;
@@ -2222,14 +2226,10 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
- enum swizzle_mode_values swizzle = DC_SW_LINEAR;
-
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
if (bpp == 64)
- swizzle = DC_SW_64KB_D;
- else
- swizzle = DC_SW_64KB_S;
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
- plane_state->tiling_info.gfx9.swizzle = swizzle;
return DC_OK;
}
@@ -2766,7 +2766,7 @@ static bool dcn20_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index b40489e678f9..0b47aeb60e79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -423,6 +423,22 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc,
}
}
+static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1);
+
+ REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
static void enc2_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -530,7 +546,8 @@ void enc2_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
@@ -587,6 +604,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc2_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc2_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc2_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
.send_immediate_sdp_message =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index f50ab961bc17..a7268027a472 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -185,13 +185,6 @@ static bool dpp201_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 61bcfa03c4e7..1aeb04fbd89d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -541,8 +541,6 @@ void dcn201_pipe_control_lock(
bool lock)
{
struct dce_hwseq *hws = dc->hwseq;
- struct hubp *hubp = NULL;
- hubp = dc->res_pool->hubps[pipe->pipe_idx];
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
index 7f9ec59ef443..8d31fa131cd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn201_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
index 95c4c55f067c..1af03a86ec9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
@@ -76,7 +76,7 @@ static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->shared_bottom = false;
}
-const struct mpc_funcs dcn201_mpc_funcs = {
+static const struct mpc_funcs dcn201_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 407d995bfa99..6ea70da28aaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -74,7 +74,7 @@
#define MIN_DISP_CLK_KHZ 100000
#define MIN_DPP_CLK_KHZ 100000
-struct _vcs_dpi_ip_params_st dcn201_ip = {
+static struct _vcs_dpi_ip_params_st dcn201_ip = {
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -136,7 +136,7 @@ struct _vcs_dpi_ip_params_st dcn201_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.clock_limits = {
{
.state = 0,
@@ -571,8 +571,6 @@ static const struct resource_caps res_cap_dnc201 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 69cc192a7e71..2a182c2f57d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
return;
pipe_ctx->stream->dpms_off = false;
- core_link_enable_stream(context, pipe_ctx);
- core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_on(context, pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_off(pipe_ctx);
pipe_ctx->stream->dpms_off = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index 0a1ba6e7081c..eb9abb9f9698 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn21_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index fbcf0afeae0d..19aaa557b2db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -609,8 +609,6 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -642,7 +640,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1393,15 +1391,13 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
plane_state->dcc.enable = 1;
/* align to our worst case block width */
plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
}
- result = dcn20_patch_unknown_plane_state(plane_state);
- return result;
+
+ return dcn20_patch_unknown_plane_state(plane_state);
}
static const struct resource_funcs dcn21_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
index 95528e5ef89e..55e388c4c98b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -123,7 +123,6 @@ void afmt3_se_audio_setup(
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -131,7 +130,6 @@ void afmt3_se_audio_setup(
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 6f3c2fb60790..1fb8fd7afc95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
/* #include "dcn3ag/dcn3ag_phy_fw.h" */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 17df53793c92..9d08127d209b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -28,6 +28,7 @@
#include "dcn30_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "dc.h"
#include "core_types.h"
#include <linux/delay.h>
@@ -404,6 +405,22 @@ static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1);
+
+ REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -452,12 +469,20 @@ void enc3_stream_encoder_update_dp_info_packets(
* use other packetIndex (such as 5,6) for other info packet
*/
+ if (info_frame->adaptive_sync.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync,
+ true);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -803,6 +828,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
index 54ee230e7f98..06310973ded2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -292,6 +292,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(
void enc3_stream_encoder_stop_hdmi_info_packets(
struct stream_encoder *enc);
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index 6263408d71fc..2082372d69ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -102,6 +102,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
@@ -237,6 +238,7 @@
TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index f14f69616692..0d98918bf0fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -220,7 +220,7 @@ void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn30_dwbc_funcs = {
+static const struct dwbc_funcs dcn30_dwbc_funcs = {
.get_caps = dwb3_get_caps,
.enable = dwb3_enable,
.disable = dwb3_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index dc3e8df706b3..e46bbe7ddcc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -47,13 +47,9 @@ void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 8c5045711264..8263a07f265f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -50,8 +50,7 @@
#include "dpcd_defs.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dcn30_resource.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
@@ -91,8 +90,8 @@ bool dcn30_set_blend_lut(
return result;
}
-static bool dcn30_set_mpc_shaper_3dlut(
- struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
+static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -104,19 +103,18 @@ static bool dcn30_set_mpc_shaper_3dlut(
const struct pwl_params *shaper_lut = NULL;
//get the shaper lut params
if (stream->func_shaper) {
- if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
- else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
- stream->func_shaper,
- &dpp_base->shaper_params, true);
+ } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(stream->func_shaper,
+ &dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
}
if (stream->lut3d_func &&
- stream->lut3d_func->state.bits.initialized == 1 &&
- stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ stream->lut3d_func->state.bits.initialized == 1 &&
+ stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
@@ -125,20 +123,22 @@ static bool dcn30_set_mpc_shaper_3dlut(
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
if (mpcc_id_projected != mpcc_id)
BREAK_TO_DEBUGGER();
- /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
+ /* find the reason why logical layer assigned a different
+ * mpcc_id into acquire_post_bldn_3dlut
+ */
acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
- stream->lut3d_func->state.bits.rmu_mux_num);
+ stream->lut3d_func->state.bits.rmu_mux_num);
if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
BREAK_TO_DEBUGGER();
- result = mpc->funcs->program_3dlut(mpc,
- &stream->lut3d_func->lut_3d,
- stream->lut3d_func->state.bits.rmu_mux_num);
+
+ result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
+ stream->lut3d_func->state.bits.rmu_mux_num);
result = mpc->funcs->program_shaper(mpc, shaper_lut,
- stream->lut3d_func->state.bits.rmu_mux_num);
- } else
- /*loop through the available mux and release the requested mpcc_id*/
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ } else {
+ // loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
-
+ }
return result;
}
@@ -323,13 +323,10 @@ void dcn30_enable_writeback(
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
- struct timing_generator *optc;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
- /* set the OPTC source mux */
- optc = dc->res_pool->timing_generators[dwb->otg_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
@@ -534,13 +531,8 @@ void dcn30_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -567,7 +559,7 @@ void dcn30_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
@@ -629,7 +621,8 @@ void dcn30_init_hw(struct dc *dc)
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ //if softmax is enabled then hardmax will be set by a different call
+ if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
@@ -675,10 +668,16 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else
+ else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
+ }
}
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
@@ -984,16 +983,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
}
void dcn30_prepare_bandwidth(struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context)
{
+ bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ /* Any transition into an FPO config should disable MCLK switching first to avoid
+ * driver and FW P-State synchronization issues.
+ */
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ dc->optimized_required = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ }
+
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
+ /*
+ * enabled -> enabled: do not disable
+ * enabled -> disabled: disable
+ * disabled -> enabled: don't care
+ * disabled -> disabled: don't care
+ */
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ dc_dmub_srv_p_state_delegate(dc, false, context);
- dc_dmub_srv_p_state_delegate(dc,
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ /* After disabling P-State, restore the original value to ensure we get the correct P-State
+ * on the next optimize. */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
index 7a93eff183d9..6f2a0d5d963b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -211,7 +211,7 @@ static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
}
-const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
.warmup_mcif = mmhubbub3_warmup_mcif,
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index ad1c1b703874..6cf40c1332bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1399,7 +1399,7 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
-const struct mpc_funcs dcn30_mpc_funcs = {
+static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 867d60151aeb..c95f000b63b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -291,6 +291,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
}
+static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */
+
+}
+
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
@@ -360,6 +368,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index dd45a5499b07..fb06dc9a4893 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -279,6 +279,7 @@
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh)
@@ -317,6 +318,7 @@
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
void dcn30_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index c18c52a60100..67a34cda3774 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -680,8 +680,6 @@ static const struct resource_caps res_cap_dcn3 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -703,7 +701,9 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 167,
.nv12 = 167,
.fp16 = 167
- }
+ },
+ 16,
+ 16
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -1207,8 +1207,11 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
static struct hubp *dcn30_hubp_create(
@@ -1477,8 +1480,8 @@ bool dcn30_acquire_post_bldn_3dlut(
state->bits.mpc_rmu2_mux = mpcc_id;
ret = true;
break;
- }
}
+ }
return ret;
}
@@ -1648,7 +1651,8 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ bool fast_validate,
+ bool allow_self_refresh_only)
{
bool out = false;
bool repopulate_pipes = false;
@@ -1675,7 +1679,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate) {
+ if (!fast_validate || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1688,11 +1692,12 @@ noinline bool dcn30_internal_validate_bw(
if (vlevel < context->bw_ctx.dml.soc.num_states)
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (allow_self_refresh_only &&
+ (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If mode is unsupported or there's still no p-state support
+ * then fall back to favoring voltage.
*
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
@@ -2013,6 +2018,8 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc,
if (context->streams[0]->vrr_active_variable)
return false;
+ context->streams[0]->fpo_in_use = true;
+
return true;
}
@@ -2063,7 +2070,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2590,7 +2597,7 @@ static bool dcn30_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index 7d063c7d6a4b..8e6b8b7368fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ bool fast_validate,
+ bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
index c9fbaed23965..1b39a6e8a1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn301_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 8cf10351f271..5ac2a272c380 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -651,8 +651,6 @@ static struct resource_caps res_cap_dcn301 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -689,7 +687,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_AVOID,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1414,7 +1412,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn301_update_bw_bounding_box
+ .update_bw_bounding_box = dcn301_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 47cffd0e6830..9f93c43115ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -47,6 +47,7 @@
#include "dcn10/dcn10_resource.h"
+#include "link.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
@@ -146,8 +147,6 @@ static const struct resource_caps res_cap_dcn302 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1125,6 +1124,12 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
+
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn302_destroy_resource_pool(struct resource_pool **pool)
@@ -1216,6 +1221,7 @@ static bool dcn302_resource_construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1497,6 +1503,17 @@ static bool dcn302_resource_construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
+ } else {
+ pool->oem_device = NULL;
+ }
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index c14d35894b2e..7f72ef882ca4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -29,7 +29,7 @@
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
@@ -126,8 +126,6 @@ static const struct resource_caps res_cap_dcn303 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1053,8 +1051,11 @@ static void dcn303_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
- if (pool->oem_device != NULL)
- dal_ddc_service_destroy(&pool->oem_device);
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn303_destroy_resource_pool(struct resource_pool **pool)
@@ -1163,7 +1164,6 @@ static bool dcn303_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc->caps.mall_size_per_mem_channel = 4;
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel *
@@ -1171,7 +1171,6 @@ static bool dcn303_resource_construct(
1024 * 1024;
dc->caps.cursor_cache_size =
dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
-#endif
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1421,7 +1420,7 @@ static bool dcn303_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
index 24e9ff65434d..05aac3e444b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -72,40 +72,6 @@ static void apg31_disable(
REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
}
-static union audio_cea_channels speakers_to_channels(
- struct audio_speaker_flags speaker_flags)
-{
- union audio_cea_channels cea_channels = {0};
-
- /* these are one to one */
- cea_channels.channels.FL = speaker_flags.FL_FR;
- cea_channels.channels.FR = speaker_flags.FL_FR;
- cea_channels.channels.LFE = speaker_flags.LFE;
- cea_channels.channels.FC = speaker_flags.FC;
-
- /* if Rear Left and Right exist move RC speaker to channel 7
- * otherwise to channel 5
- */
- if (speaker_flags.RL_RR) {
- cea_channels.channels.RL_RC = speaker_flags.RL_RR;
- cea_channels.channels.RR = speaker_flags.RL_RR;
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
- } else {
- cea_channels.channels.RL_RC = speaker_flags.RC;
- }
-
- /* FRONT Left Right Center and REAR Left Right Center are exclusive */
- if (speaker_flags.FLC_FRC) {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
- cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
- } else {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
- cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
- }
-
- return cea_channels;
-}
-
static void apg31_se_audio_setup(
struct apg *apg,
unsigned int az_inst,
@@ -113,24 +79,17 @@ static void apg31_se_audio_setup(
{
struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
- uint32_t speakers = 0;
- uint32_t channels = 0;
-
ASSERT(audio_info);
/* This should not happen.it does so we don't get BSOD*/
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
- channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
-
/* DisplayPort only allows for one audio stream with stream ID 0 */
REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
/* When running in "pair mode", pairs of audio channels have their own enable
* this is for really old audio drivers */
REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
- // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 7f34418e6308..4c2fdfea162f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -66,17 +66,8 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
- //DTO must be enabled to generate a 0Hz clock output
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
- REG_UPDATE(DPPCLK_DTO_CTRL,
- DPPCLK_DTO_ENABLE[dpp_inst], 1);
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, 0,
- DPPCLK0_DTO_MODULO, 1);
- } else {
- REG_UPDATE(DPPCLK_DTO_CTRL,
- DPPCLK_DTO_ENABLE[dpp_inst], 0);
- }
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -369,6 +360,15 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst)
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 1);
break;
+ case 3:
+ if (REG(DSCCLK3_DTO_PARAM)) {
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK3_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 1);
+ }
+ break;
default:
BREAK_TO_DEBUGGER();
return;
@@ -404,6 +404,15 @@ void dccg31_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL,
DSCCLK2_DTO_ENABLE, 0);
break;
+ case 3:
+ if (REG(DSCCLK3_DTO_PARAM)) {
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK3_DTO_ENABLE, 0);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 0);
+ }
+ break;
default:
BREAK_TO_DEBUGGER();
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index ab70ebd8f223..745a5d187a98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -30,7 +30,6 @@
#include "link_encoder.h"
#include "dcn31_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
@@ -38,6 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
+#include "link.h"
#define CTX \
enc10->base.ctx
@@ -486,7 +486,7 @@ void dcn31_link_encoder_enable_dp_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
@@ -533,7 +533,7 @@ void dcn31_link_encoder_enable_dp_mst_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 80dfaa4d4d81..5b7ad38f85e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
@@ -242,7 +241,10 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
REG_UPDATE(DP_DPHY_SYM32_CONTROL,
MODE, DP2_TEST_PATTERN);
break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
+ case DP_TEST_PATTERN_SQUARE:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 16639bd03adf..0278bae50a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -26,7 +26,7 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_stream_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
+#include "dc.h"
#define DC_LOGGER \
enc3->base.ctx->logger
@@ -430,6 +430,22 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
MSA_DATA_LANE_3, 0);
}
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num(
+ struct hpo_dp_stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1);
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
struct hpo_dp_stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -458,12 +474,20 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
&info_frame->hdrsmd,
true);
+ if (info_frame->adaptive_sync.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync,
+ true);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid);
/* check if dynamic metadata packet transmission is enabled */
REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL,
@@ -714,6 +738,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
.dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,
.disable = dcn31_hpo_dp_stream_enc_disable,
.set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute,
+ .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,
.stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,
.dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 6360dc9502e7..7e7cd5b64e6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
return false;
}
+void hubbub31_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /*Enable clock gate*/
+ if (hubbub->ctx->dc->debug.disable_clock_gate) {
+ /*done in hwseq*/
+ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
+ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ }
+
+ /*
+ only the DCN will determine when to connect the SDP port
+ */
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1);
+}
static const struct hubbub_funcs hubbub31_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
index 70c60de448ac..89d6208287b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -42,6 +42,10 @@
SR(DCHUBBUB_COMPBUF_CTRL),\
SR(COMPBUF_RESERVED_SPACE),\
SR(DCHUBBUB_DEBUG_CTRL_0),\
+ SR(DCHUBBUB_CLOCK_CNTL),\
+ SR(DCHUBBUB_SDPIF_CFG0),\
+ SR(DCHUBBUB_SDPIF_CFG1),\
+ SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\
@@ -120,11 +124,17 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
- HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
+void hubbub31_init(struct hubbub *hubbub);
+
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4226a051df41..62ce36c75c4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -45,8 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -98,7 +97,7 @@ static void enable_memory_low_power(struct dc *dc)
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
#endif
@@ -203,7 +202,7 @@ void dcn31_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc->ctx->dmub_srv);
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -231,7 +230,7 @@ void dcn31_init_hw(struct dc *dc)
}
if (num_opps > 1) {
- dc_link_blank_all_edp_displays(dc);
+ dc->link_srv->blank_all_edp_displays(dc);
break;
}
}
@@ -286,16 +285,20 @@ void dcn31_init_hw(struct dc *dc)
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
#endif
+
+ // Get DMCUB capabilities
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
void dcn31_dsc_pg_control(
@@ -415,7 +418,17 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else {
+ else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ return;
+ } else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -556,7 +569,7 @@ static void dcn31_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -575,7 +588,7 @@ static void dcn31_reset_back_end_for_pipe(
}
}
} else if (pipe_ctx->stream_res.dsc) {
- dp_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
pipe_ctx->stream = NULL;
@@ -623,43 +636,3 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust)
-{
- int i = 0;
- struct drr_params params = {0};
- unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/
- unsigned int num_frames = 2;
- params.vertical_total_max = adjust.v_total_max;
- params.vertical_total_min = adjust.v_total_min;
- params.vertical_total_mid = adjust.v_total_mid;
- params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
- for (i = 0; i < num_pipes; i++) {
- if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
- if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
- }
- }
-}
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
- if (params->triggers.surface_update)
- triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/
- if (params->triggers.cursor_update)
- triggers |= 0x10;/*bit4*/
- if (params->triggers.force_trigger)
- triggers |= 0x1;
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index e7e03a8722e0..edfc01d6ad73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -56,8 +56,4 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 7c2da70ffe21..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -64,9 +64,9 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index fe449f7aa771..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -40,7 +40,6 @@
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
-#define STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN 0x2000 /*bit 13*/
static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
@@ -232,32 +231,6 @@ void optc3_init_odm(struct timing_generator *optc)
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t framecount;
- uint32_t events;
-
- if (num_frames > 0xFF)
- num_frames = 0xFF;
- REG_GET_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, &events,
- OTG_STATIC_SCREEN_FRAME_COUNT, &framecount);
-
- if (events == event_triggers && num_frames == framecount)
- return;
- if ((event_triggers & STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN)
- != 0)
- event_triggers = event_triggers &
- ~STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN;
-
- REG_UPDATE_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
- OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
-}
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
@@ -293,7 +266,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
index 5fc6c63580d7..30b81a448ce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -263,8 +263,5 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc);
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
void optc3_init_odm(struct timing_generator *optc);
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 3ca517dcc82d..ff8cd5076434 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -827,8 +827,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1795,7 +1793,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
@@ -1967,6 +1965,8 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->config.use_old_fixed_vs_sequence = true;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 0b769ee71405..de7bfba2c179 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -274,6 +274,32 @@ static void dccg314_set_dpstreamclk(
}
}
+static void dccg314_init(struct dccg *dccg)
+{
+ int otg_inst;
+
+ /* Set HPO stream encoder to use refclk to avoid case where PHY is
+ * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
+ * will cause DCN to hang.
+ */
+ for (otg_inst = 0; otg_inst < 4; otg_inst++)
+ dccg31_disable_symclk32_se(dccg, otg_inst);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ for (otg_inst = 0; otg_inst < 2; otg_inst++)
+ dccg31_disable_symclk32_le(dccg, otg_inst);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ for (otg_inst = 0; otg_inst < 4; otg_inst++)
+ dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst,
+ otg_inst);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ for (otg_inst = 0; otg_inst < 5; otg_inst++)
+ dccg31_set_physymclk(dccg, otg_inst,
+ PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+}
+
static void dccg314_set_valid_pixel_rate(
struct dccg *dccg,
int ref_dtbclk_khz,
@@ -289,10 +315,33 @@ static void dccg314_set_valid_pixel_rate(
dccg314_set_dtbclk_dto(dccg, &dto_params);
}
+static void dccg314_dpp_root_clock_control(
+ struct dccg *dccg,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (clock_on) {
+ /* turn off the DTO and leave phase/modulo at max */
+ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, 0xFF,
+ DPPCLK0_DTO_MODULO, 0xFF);
+ } else {
+ /* turn on the DTO to generate a 0hz clock */
+ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ }
+}
+
static const struct dccg_funcs dccg314_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
+ .dpp_root_clock_control = dccg314_dpp_root_clock_control,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
- .dccg_init = dccg31_init,
+ .dccg_init = dccg314_init,
.set_dpstreamclk = dccg314_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg31_disable_symclk32_se,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
index 6a35986307af..90687a9e8fdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
@@ -68,6 +68,7 @@
SR(DSCCLK0_DTO_PARAM),\
SR(DSCCLK1_DTO_PARAM),\
SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK3_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
@@ -149,12 +150,20 @@
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_MODULO, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
@@ -178,6 +187,7 @@
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_DTO_ENABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 38842f938bed..467509a65fa7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -278,10 +278,11 @@ static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
- /* New to DCN314 - disable the FIFO before VID stream disable. */
- enc314_disable_fifo(enc);
-
enc1_stream_encoder_dp_blank(link, enc);
+
+ /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
+ if (enc->ctx->dc->debug.dig_fifo_off_in_blank)
+ enc314_disable_fifo(enc);
}
static void enc314_stream_encoder_dp_unblank(
@@ -295,12 +296,14 @@ static void enc314_stream_encoder_dp_unblank(
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
+ uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
+ pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
@@ -328,6 +331,10 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
+
+ REG_UPDATE(DP_PIXEL_FORMAT,
+ DP_PIXEL_PER_CYCLE_PROCESSING_MODE,
+ pix_per_cycle);
}
/* make sure stream is disabled before resetting steer fifo */
@@ -365,7 +372,7 @@ static void enc314_stream_encoder_dp_unblank(
*/
enc314_enable_fifo(enc);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
@@ -428,6 +435,8 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
index 33dfdf8b4100..ed0772387903 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
@@ -280,6 +280,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(
void enc3_stream_encoder_stop_hdmi_info_packets(
struct stream_encoder *enc);
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index a0741794db62..cc3fe9cac5b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -46,9 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "dc_link_dp.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -348,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -391,3 +389,102 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
+
+void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
+{
+ if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ return;
+
+ if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control)
+ hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
+ hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
+}
+
+void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+{
+ struct dc_context *ctx = hws->ctx;
+ union dmub_rb_cmd cmd;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ PERF_TRACE();
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.domain_control.header.type = DMUB_CMD__VBIOS;
+ cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
+ cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
+ cmd.domain_control.data.inst = hubp_inst;
+ cmd.domain_control.data.power_gate = !power_on;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(ctx->dmub_srv);
+
+ PERF_TRACE();
+}
+static void apply_symclk_on_tx_off_wa(struct dc_link *link)
+{
+ /* There are use cases where SYMCLK is referenced by OTG. For instance
+ * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
+ * However current link interface will power off PHY when disabling link
+ * output. This will turn off SYMCLK generated by PHY. The workaround is
+ * to identify such case where SYMCLK is still in use by OTG when we
+ * power off PHY. When this is detected, we will temporarily power PHY
+ * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
+ * program_pix_clk interface. When OTG is disabled, we will then power
+ * off PHY by calling disable link output again.
+ *
+ * In future dcn generations, we plan to rework transmitter control
+ * interface so that we could have an option to set SYMCLK ON TX OFF
+ * state in one step without this workaround
+ */
+
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ if (link->phy_state.symclk_ref_cnts.otg > 0) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ break;
+ }
+ }
+ }
+}
+
+void dcn314_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+ apply_symclk_on_tx_off_wa(link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index 244280298212..6d0b62503caa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,4 +41,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+
+void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
+
+void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 31feb4b0edee..a588f46b166f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -66,9 +66,9 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
@@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .disable_link_output = dce110_disable_link_output,
+ .disable_link_output = dcn314_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
@@ -137,7 +137,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
- .hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_root_clock_control = dcn314_dpp_root_clock_control,
+ .hubp_pg_control = dcn314_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 41edbd64ea21..0086cafb0f7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -228,7 +228,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
@@ -241,7 +241,6 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
- .set_odm_combine = optc314_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index f9ea1e86707f..abeeede38fb3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -855,8 +855,6 @@ static const struct resource_caps res_cap_dcn314 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -874,8 +872,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
+ // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -886,11 +885,14 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
+ .minimum_z8_residency_time = 2000,
.psr_skip_crtc_disable = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
@@ -900,7 +902,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
- .sanity_checks = false,
+ .sanity_checks = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
@@ -1694,6 +1696,81 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
+static bool filter_modes_for_single_channel_workaround(struct dc *dc,
+ struct dc_state *context)
+{
+ // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
+ if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
+ int total_phy_pix_clk = 0;
+
+ for (int i = 0; i < context->stream_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream)
+ total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+
+ if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
+ return true;
+ }
+ return false;
+}
+
+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ if (filter_modes_for_single_channel_workaround(dc, context))
+ goto validate_fail;
+
+ DC_FP_START();
+ // do not support self refresh only
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ DC_FP_END();
+
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1701,7 +1778,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
- .validate_bandwidth = dcn31_validate_bandwidth,
+ .validate_bandwidth = dcn314_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
@@ -1763,7 +1840,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
- dc->caps.max_downscale_ratio = 600;
+ dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index 0dd3153aa5c1..49ffe71018df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7887078c5f64..41c972c8eb19 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index b4d5076e124c..9ead347a33e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1776,7 +1774,7 @@ static bool dcn316_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index e4472c6be6c3..ffbb739d85b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,20 @@
#define DC_LOGGER \
dccg->ctx->logger
+/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
+ * without the probability of causing a DIG FIFO error.
+ */
+static void dccg32_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
+
static void dccg32_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -110,21 +124,29 @@ static void dccg32_set_pixel_rate_div(
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG0_PIXEL_RATE_DIVK1, k1,
OTG0_PIXEL_RATE_DIVK2, k2);
+
+ dccg32_wait_for_dentist_change_done(dccg);
break;
case 1:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG1_PIXEL_RATE_DIVK1, k1,
OTG1_PIXEL_RATE_DIVK2, k2);
+
+ dccg32_wait_for_dentist_change_done(dccg);
break;
case 2:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG2_PIXEL_RATE_DIVK1, k1,
OTG2_PIXEL_RATE_DIVK2, k2);
+
+ dccg32_wait_for_dentist_change_done(dccg);
break;
case 3:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG3_PIXEL_RATE_DIVK1, k1,
OTG3_PIXEL_RATE_DIVK2, k2);
+
+ dccg32_wait_for_dentist_change_done(dccg);
break;
default:
BREAK_TO_DEBUGGER();
@@ -271,8 +293,7 @@ static void dccg32_set_dpstreamclk(
dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
- switch (otg_inst)
- {
+ switch (dp_hpo_inst) {
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
index 1c46fad0977b..8071ab98d708 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
@@ -31,42 +31,6 @@
#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
.field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
-
-#define DCCG_REG_LIST_DCN32() \
- SR(DPPCLK_DTO_CTRL),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL),\
- SR(PHYDSYMCLK_CLOCK_CNTL),\
- SR(PHYESYMCLK_CLOCK_CNTL),\
- SR(DPSTREAMCLK_CNTL),\
- SR(HDMISTREAMCLK_CNTL),\
- SR(SYMCLK32_SE_CNTL),\
- SR(SYMCLK32_LE_CNTL),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 0),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 1),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 2),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 3),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 0),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 1),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 2),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
- SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
- SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
- SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
-
-
#define DCCG_MASK_SH_LIST_DCN32(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -147,7 +111,8 @@
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh)
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
struct dccg *dccg32_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index 076969d928af..501388014855 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index d19fc93dbc75..2fef1419ae91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -211,10 +211,8 @@ static void enc32_stream_encoder_hdmi_set_stream_attribute(
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* Disable Audio Content Protection packet transmission */
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
-#endif
/* following belongs to audio */
/* Enable Audio InfoFrame packet transmission. */
@@ -276,10 +274,10 @@ static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_
dc->debug.enable_dp_dig_pixel_rate_div_policy;
}
-static void enc32_stream_encoder_dp_unblank(
- struct dc_link *link,
- struct stream_encoder *enc,
- const struct encoder_unblank_param *param)
+void enc32_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct dc *dc = enc->ctx->dc;
@@ -288,6 +286,7 @@ static void enc32_stream_encoder_dp_unblank(
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
+ uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
@@ -295,6 +294,7 @@ static void enc32_stream_encoder_dp_unblank(
|| is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
+ pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
@@ -322,6 +322,10 @@ static void enc32_stream_encoder_dp_unblank(
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
+
+ REG_UPDATE(DP_PIXEL_FORMAT,
+ DP_PIXEL_PER_CYCLE_PROCESSING_MODE,
+ pix_per_cycle);
}
/* make sure stream is disabled before resetting steer fifo */
@@ -373,7 +377,7 @@ static void enc32_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
@@ -421,6 +425,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0);
}
+static void enc32_reset_fifo(struct stream_encoder *enc, bool reset)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val = reset ? 1 : 0;
+ uint32_t is_symclk_on;
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
+ REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
+
+ if (is_symclk_on)
+ REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
+ else
+ udelay(10);
+}
+
+void enc32_enable_fifo(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
+ enc32_reset_fifo(enc, true);
+ enc32_reset_fifo(enc, false);
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
+}
+
static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
.dp_set_odm_combine =
enc32_dp_set_odm_combine,
@@ -436,6 +467,8 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
@@ -466,6 +499,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.set_input_mode = enc32_set_dig_input_mode,
+ .enable_fifo = enc32_enable_fifo,
};
void dcn32_dio_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index ecd041a446d2..1be5410cce97 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -31,70 +31,6 @@
#include "stream_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
-#define SE_DCN32_REG_LIST(id)\
- SRI(AFMT_CNTL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(HDMI_CONTROL, DIG, id), \
- SRI(HDMI_DB_CONTROL, DIG, id), \
- SRI(HDMI_GC, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
- SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
- SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_32_0, DIG, id),\
- SRI(HDMI_ACR_32_1, DIG, id),\
- SRI(HDMI_ACR_44_0, DIG, id),\
- SRI(HDMI_ACR_44_1, DIG, id),\
- SRI(HDMI_ACR_48_0, DIG, id),\
- SRI(HDMI_ACR_48_1, DIG, id),\
- SRI(DP_DB_CNTL, DP, id), \
- SRI(DP_MSA_MISC, DP, id), \
- SRI(DP_MSA_VBID_MISC, DP, id), \
- SRI(DP_MSA_COLORIMETRY, DP, id), \
- SRI(DP_MSA_TIMING_PARAM1, DP, id), \
- SRI(DP_MSA_TIMING_PARAM2, DP, id), \
- SRI(DP_MSA_TIMING_PARAM3, DP, id), \
- SRI(DP_MSA_TIMING_PARAM4, DP, id), \
- SRI(DP_MSE_RATE_CNTL, DP, id), \
- SRI(DP_MSE_RATE_UPDATE, DP, id), \
- SRI(DP_PIXEL_FORMAT, DP, id), \
- SRI(DP_SEC_CNTL, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id), \
- SRI(DP_SEC_CNTL2, DP, id), \
- SRI(DP_SEC_CNTL5, DP, id), \
- SRI(DP_SEC_CNTL6, DP, id), \
- SRI(DP_STEER_FIFO, DP, id), \
- SRI(DP_VID_M, DP, id), \
- SRI(DP_VID_N, DP, id), \
- SRI(DP_VID_STREAM_CNTL, DP, id), \
- SRI(DP_VID_TIMING, DP, id), \
- SRI(DP_SEC_AUD_N, DP, id), \
- SRI(DP_SEC_TIMESTAMP, DP, id), \
- SRI(DP_DSC_CNTL, DP, id), \
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DP_SEC_FRAMING4, DP, id), \
- SRI(DP_GSP11_CNTL, DP, id), \
- SRI(DME_CONTROL, DME, id),\
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(DIG_CLOCK_PATTERN, DIG, id), \
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-
#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
@@ -258,4 +194,12 @@ void dcn32_dio_stream_encoder_construct(
const struct dcn10_stream_encoder_shift *se_shift,
const struct dcn10_stream_encoder_mask *se_mask);
+
+void enc32_enable_fifo(struct stream_encoder *enc);
+
+void enc32_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
#endif /* __DC_DIO_STREAM_ENCODER_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
index 4dbad8d4b4fc..8af01f579690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 9501403a48a9..eb08ccc38e79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -945,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
}
+void hubbub32_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /* Enable clock gate*/
+ if (hubbub->ctx->dc->debug.disable_clock_gate) {
+ /*done in hwseq*/
+ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
+
+ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ }
+ /*
+ ignore the "df_pre_cstate_req" from the SDP port control.
+ only the DCN will determine when to connect the SDP port
+ */
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, 1);
+ /*Set SDP's max outstanding request to 512
+ must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
+ SDPIF_MAX_NUM_OUTSTANDING, 1);
+ /*must set the registers back to 256 in zero frame buffer mode*/
+ REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512);
+}
+
static const struct hubbub_funcs hubbub32_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index 786f9ce07f92..ad33427192c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -28,63 +28,6 @@
#include "dcn21/dcn21_hubbub.h"
-#define HUBBUB_REG_LIST_DCN32(id)\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
- SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
- SR(DCHUBBUB_ARB_SAT_LEVEL),\
- SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
- SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
- SR(DCHUBBUB_SOFT_RESET),\
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE),\
- HUBBUB_SR_WATERMARK_REG_LIST(), \
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
- SR(DCHUBBUB_DET0_CTRL),\
- SR(DCHUBBUB_DET1_CTRL),\
- SR(DCHUBBUB_DET2_CTRL),\
- SR(DCHUBBUB_DET3_CTRL),\
- SR(DCHUBBUB_COMPBUF_CTRL),\
- SR(COMPBUF_RESERVED_SPACE),\
- SR(DCHUBBUB_DEBUG_CTRL_0),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCN_VM_FAULT_ADDR_MSB),\
- SR(DCN_VM_FAULT_ADDR_LSB),\
- SR(DCN_VM_FAULT_CNTL),\
- SR(DCN_VM_FAULT_STATUS),\
- SR(SDPIF_REQUEST_RATE_LIMIT)
-
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
@@ -96,6 +39,7 @@
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
@@ -161,7 +105,14 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
- HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh)
+ HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
+
+
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
@@ -191,6 +142,8 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+void hubbub32_init(struct hubbub *hubbub);
+
void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index ac1c6458dd55..2d604f7ee782 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -47,6 +47,15 @@ void hubp32_update_force_pstate_disallow(struct hubp *hubp, bool pstate_disallow
DATA_UCLK_PSTATE_FORCE_VALUE, 0);
}
+void hubp32_update_force_cursor_pstate_disallow(struct hubp *hubp, bool pstate_disallow)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_2(UCLK_PSTATE_FORCE,
+ CURSOR_UCLK_PSTATE_FORCE_EN, pstate_disallow,
+ CURSOR_UCLK_PSTATE_FORCE_VALUE, 0);
+}
+
void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -155,7 +164,11 @@ void hubp32_cursor_set_attributes(
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
}
-
+void hubp32_init(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -184,6 +197,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
+ .hubp_update_force_cursor_pstate_disallow = hubp32_update_force_cursor_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
index 56ef71151536..d2acbc129609 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
@@ -31,12 +31,6 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
-#define HUBP_REG_LIST_DCN32(id)\
- HUBP_REG_LIST_DCN30(id),\
- SRI(DCHUBP_MALL_CONFIG, HUBP, id),\
- SRI(DCHUBP_VMPG_CONFIG, HUBP, id),\
- SRI(UCLK_PSTATE_FORCE, HUBPREQ, id)
-
#define HUBP_MASK_SH_LIST_DCN32(mask_sh)\
HUBP_MASK_SH_LIST_DCN31(mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, USE_MALL_SEL, mask_sh),\
@@ -52,6 +46,8 @@
void hubp32_update_force_pstate_disallow(struct hubp *hubp, bool pstate_disallow);
+void hubp32_update_force_cursor_pstate_disallow(struct hubp *hubp, bool pstate_disallow);
+
void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
void hubp32_prepare_subvp_buffering(struct hubp *hubp, bool enable);
@@ -61,6 +57,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp);
void hubp32_cursor_set_attributes(struct hubp *hubp,
const struct dc_cursor_attributes *attr);
+void hubp32_init(struct hubp *hubp);
+
bool hubp32_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index b8767be1e4c5..1f5ee5cde6e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -50,7 +50,7 @@
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32_resource.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger)
@@ -131,10 +131,15 @@ void dcn32_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
@@ -146,6 +151,9 @@ void dcn32_enable_power_gating_plane(
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -188,7 +196,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
/* First, check no-memory-request case */
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->stream_status[i].plane_count)
+ if ((dc->current_state->stream_status[i].plane_count) &&
+ (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
/* Fail eligibility on a visible stream */
break;
}
@@ -206,151 +215,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- int i, j;
- struct dc_stream_state *stream = NULL;
- struct dc_plane_state *plane = NULL;
- uint32_t cursor_size = 0;
- uint32_t total_lines = 0;
- uint32_t lines_per_way = 0;
+ int i;
uint8_t num_ways = 0;
- uint8_t bytes_per_pixel = 0;
- uint8_t cursor_bpp = 0;
- uint16_t mblk_width = 0;
- uint16_t mblk_height = 0;
- uint16_t mall_alloc_width_blk_aligned = 0;
- uint16_t mall_alloc_height_blk_aligned = 0;
- uint16_t num_mblks = 0;
- uint32_t bytes_in_mall = 0;
- uint32_t cache_lines_used = 0;
- uint32_t cache_lines_per_plane = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- /* If PSR is supported on an eDP panel that's connected, but that panel is
- * not in PSR at the time of trying to enter MALL SS, we have to include it
- * in the static screen CAB calculation
- */
- if (!pipe->stream || !pipe->plane_state ||
- (pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- pipe->stream->link->psr_settings.psr_allow_active) ||
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
- continue;
-
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mblk_width = DCN3_2_MBLK_WIDTH;
- mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+ uint32_t mall_ss_size_bytes = 0;
- /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
- * FLOOR(vp_x_start, blk_width)
- *
- * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
- */
- mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
- pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
- (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
-
- /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
- * FLOOR(vp_y_start, blk_height)
- *
- * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
- */
- mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
- pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
- (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
-
- num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
- ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
-
- /*For DCC:
- * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
- */
- if (pipe->plane_state->dcc.enable)
- num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
- (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
-
- bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
-
- /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
- * (MALL is 64-byte aligned)
- */
- cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
- cache_lines_used += cache_lines_per_plane;
- }
+ mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+ // TODO add additional logic for PSR active stream exclusion optimization
+ // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
// Include cursor size for CAB allocation
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
- struct hubp *hubp = pipe->plane_res.hubp;
-
- if (pipe->stream && pipe->plane_state && hubp)
- /* Find the cursor plane and use the exact size instead of
- using the max for calculation */
-
- if (hubp->curs_attr.width > 0) {
- cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
-
- switch (pipe->stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- cursor_bpp = 4;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- cursor_bpp = 4;
- break;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- cursor_bpp = 8;
- break;
- }
+ if (!pipe->stream || !pipe->plane_state)
+ continue;
- if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
- cursor_size > 16384) {
- /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
- */
- cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
- DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
- dc->caps.cache_line_size + 2;
- break;
- }
- }
+ mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
}
// Convert number of cache lines required to number of ways
- total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
-
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
-
- if (stream->cursor_position.enable && plane &&
- dc->debug.alloc_extra_way_for_cursor &&
- cursor_size > 16384) {
- /* Cursor caching is not supported since it won't be on the same line.
- * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
- * this case triggers corruption. If we're at the edge, then dont trigger display refresh
- * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
- */
- num_ways++;
- /* We only expect one cursor plane */
- break;
- }
- }
- }
if (dc->debug.force_mall_ss_num_ways > 0) {
num_ways = dc->debug.force_mall_ss_num_ways;
+ } else {
+ num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
}
+
return num_ways;
}
@@ -365,6 +254,13 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (!dc->ctx->dmub_srv)
return false;
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ /* MALL SS messaging is not supported with PSR at this time */
+ if (dc->current_state->streams[i] != NULL &&
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+ }
+
if (enable) {
if (dc->current_state) {
@@ -518,7 +414,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
}
-static bool dcn32_set_mpc_shaper_3dlut(
+bool dcn32_set_mpc_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -675,39 +571,56 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
return ret;
}
-/* Program P-State force value according to if pipe is using SubVP or not:
+/* Program P-State force value according to if pipe is using SubVP / FPO or not:
* 1. Reset P-State force on all pipes first
* 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB)
*/
-void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context)
+void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
{
int i;
- int num_subvp = 0;
- /* Unforce p-state for each pipe
+
+ /* Unforce p-state for each pipe if it is not FPO or SubVP.
+ * For FPO and SubVP, if it's already forced disallow, leave
+ * it as disallow.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
- hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
- num_subvp++;
- }
+ if (!pipe->stream || (pipe->stream && !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ pipe->stream->fpo_in_use))) {
+ if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
+ hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
+ }
- if (num_subvp == 0)
- return;
+ /* Today only FPO uses cursor P-State force. Only clear cursor P-State force
+ * if it's not FPO.
+ */
+ if (!pipe->stream || (pipe->stream && !pipe->stream->fpo_in_use)) {
+ if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
+ hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false);
+ }
+ }
/* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
- if (pipe->stream && pipe->plane_state && (pipe->stream->mall_stream_config.type == SUBVP_MAIN)) {
- struct hubp *hubp = pipe->plane_res.hubp;
+ if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
+ hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
+ }
+ if (pipe->stream && pipe->stream->fpo_in_use) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
+ /* For now only force cursor p-state disallow for FPO
+ * Needs to be added for subvp once FW side gets updated
+ */
+ if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
+ hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true);
}
}
}
@@ -781,10 +694,6 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
if (hws && hws->funcs.update_mall_sel)
hws->funcs.update_mall_sel(dc, context);
- //update subvp force pstate
- if (hws && hws->funcs.subvp_update_force_pstate)
- dc->hwseq->funcs.subvp_update_force_pstate(dc, context);
-
// Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -803,6 +712,35 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
}
}
+static void dcn32_initialize_min_clocks(struct dc *dc)
+{
+ struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
+
+ clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
+ clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
+ clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
+ clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
+ clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
+ if (dc->debug.disable_boot_optimizations) {
+ clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
+ } else {
+ /* Even though DPG_EN = 1 for the connected display, it still requires the
+ * correct timing so we cannot set DISPCLK to min freq or it could cause
+ * audio corruption. Read current DISPCLK from DENTIST and request the same
+ * freq to ensure that the timing is valid and unchanged.
+ */
+ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
+ clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ clocks->fclk_p_state_change_support = true;
+ clocks->p_state_change_support = true;
+ }
+
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ dc->current_state,
+ true);
+}
+
void dcn32_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -878,13 +816,14 @@ void dcn32_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* enable_power_gating_plane before dsc_pg_control because
+ * FORCEON = 1 with hw default value on bootup, resume from s3
+ */
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -893,10 +832,29 @@ void dcn32_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
- hws->funcs.init_pipes(dc, dc->current_state);
+ /* Disable boot optimizations means power down everything including PHY, DIG,
+ * and OTG (i.e. the boot is not optimized because we do a full power down).
+ */
+ if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
+ dc->hwss.enable_accelerated_mode(dc, dc->current_state);
+ else
+ hws->funcs.init_pipes(dc, dc->current_state);
+
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+
+ dcn32_initialize_min_clocks(dc);
+
+ /* On HW init, allow idle optimizations after pipes have been turned off.
+ *
+ * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
+ * is reset (i.e. not in idle at the time hw init is called), but software state
+ * still has idle_optimizations = true, so we must disable idle optimizations first
+ * (i.e. set false), then re-enable (set true).
+ */
+ dc_allow_idle_optimizations(dc, false);
+ dc_allow_idle_optimizations(dc, true);
}
/* In headless boot cases, DIG may be turned
@@ -908,7 +866,7 @@ void dcn32_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
@@ -966,8 +924,6 @@ void dcn32_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -975,7 +931,7 @@ void dcn32_init_hw(struct dc *dc)
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
@@ -992,6 +948,7 @@ void dcn32_init_hw(struct dc *dc)
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
}
@@ -1175,16 +1132,16 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
- } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
+ } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
@@ -1239,7 +1196,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -1266,7 +1223,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
return false;
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
return false;
@@ -1300,7 +1257,8 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
@@ -1332,7 +1290,7 @@ void dcn32_disable_link_output(struct dc_link *link,
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}
@@ -1450,3 +1408,122 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
}
+
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ unsigned int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* If an active, non-phantom pipe is being transitioned into a phantom
+ * pipe, wait for the double buffer update to complete first before we do
+ * ANY phantom pipe programming.
+ */
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
+ old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ }
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // If old context or new context has phantom pipes, apply
+ // the phantom timings now. We can't change the phantom
+ // pipe configuration safely without driver acquiring
+ // the DMCUB lock first.
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ break;
+ }
+ }
+}
+
+/* Blank pixel data during initialization */
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ struct output_pixel_processor *bottom_opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (num_opps == 2) {
+ otg_active_width = otg_active_width / 2;
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src1) {
+ bottom_opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (num_opps == 2) {
+ if (bottom_opp && bottom_opp->funcs->opp_set_disp_pattern_generator) {
+ bottom_opp->funcs->opp_set_disp_pattern_generator(
+ bottom_opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+ hws->funcs.wait_for_blank_complete(bottom_opp);
+ }
+ }
+
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 7de36529cf99..6694c1d14aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -54,6 +54,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
+bool dcn32_set_mpc_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream);
+
bool dcn32_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream);
@@ -64,7 +67,7 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context);
void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context);
-void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context);
+void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context);
void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
@@ -102,4 +105,10 @@ void dcn32_update_dsc_pg(struct dc *dc,
struct dc_state *context,
bool safe_to_disable);
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index dc4649458567..8085f2acb1a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -30,6 +30,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32_hwseq.h"
+#include "dcn32_init.h"
static const struct hw_sequencer_funcs dcn32_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
@@ -94,7 +95,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
- .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
+ .does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
@@ -106,6 +107,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
+ .enable_phantom_streams = dcn32_enable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
@@ -130,7 +132,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
- .init_blank = dcn20_init_blank,
+ .init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
@@ -147,7 +149,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn32_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
- .subvp_update_force_pstate = dcn32_subvp_update_force_pstate,
+ .update_force_pstate = dcn32_update_force_pstate,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 206a5ddbaf6d..c8041cfd594d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -42,7 +42,7 @@
mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name
-static void mpc32_mpc_init(struct mpc *mpc)
+void mpc32_mpc_init(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
@@ -254,7 +254,7 @@ static void mpc32_program_post1dlut_pwl(
}
}
-static bool mpc32_program_post1dlut(
+bool mpc32_program_post1dlut(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-static bool mpc32_program_shaper(
+bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-static bool mpc32_program_3dlut(
+bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
index 61f33c0d8e59..2c2ecd053806 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
@@ -310,6 +310,19 @@ struct dcn32_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN3_0;
MPC_REG_VARIABLE_LIST_DCN32;
};
+void mpc32_mpc_init(struct mpc *mpc);
+bool mpc32_program_3dlut(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int mpcc_id);
+bool mpc32_program_post1dlut(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+bool mpc32_program_shaper(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
void dcn32_mpc_construct(struct dcn30_mpc *mpc30,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
index 5e57c39235fa..b92ba8c75694 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
@@ -28,77 +28,6 @@
#include "dcn10/dcn10_optc.h"
-#define OPTC_COMMON_REG_LIST_DCN3_2(inst) \
- SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
- SRI(OTG_VUPDATE_PARAM, OTG, inst),\
- SRI(OTG_VREADY_PARAM, OTG, inst),\
- SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\
- SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
- SRI(OTG_H_TOTAL, OTG, inst),\
- SRI(OTG_H_BLANK_START_END, OTG, inst),\
- SRI(OTG_H_SYNC_A, OTG, inst),\
- SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_H_TIMING_CNTL, OTG, inst),\
- SRI(OTG_V_TOTAL, OTG, inst),\
- SRI(OTG_V_BLANK_START_END, OTG, inst),\
- SRI(OTG_V_SYNC_A, OTG, inst),\
- SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_CONTROL, OTG, inst),\
- SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_STATUS, OTG, inst),\
- SRI(OTG_V_TOTAL_MAX, OTG, inst),\
- SRI(OTG_V_TOTAL_MIN, OTG, inst),\
- SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
- SRI(OTG_TRIGA_CNTL, OTG, inst),\
- SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
- SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
- SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
- SRI(OTG_STATUS, OTG, inst),\
- SRI(OTG_STATUS_POSITION, OTG, inst),\
- SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
- SRI(OTG_M_CONST_DTO0, OTG, inst),\
- SRI(OTG_M_CONST_DTO1, OTG, inst),\
- SRI(OTG_CLOCK_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
- SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
- SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
- SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(CONTROL, VTG, inst),\
- SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst),\
- SRI(OTG_CRC_CNTL, OTG, inst),\
- SRI(OTG_CRC0_DATA_RG, OTG, inst),\
- SRI(OTG_CRC0_DATA_B, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
- SR(GSL_SOURCE_SELECT),\
- SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GSL_WINDOW_X, OTG, inst),\
- SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
- SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
- SRI(OTG_DSC_START_POSITION, OTG, inst),\
- SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\
- SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\
- SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
- SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
- SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
- SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
-
#define OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e4dbc8353ea3..22dd1ebea618 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -57,7 +57,6 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "dc_link_dp.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
@@ -69,7 +68,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -325,7 +324,6 @@ static const struct dcn10_link_enc_shift le_shift = {
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
-
//DPCS_DCN31_MASK_SH_LIST(_MASK)
};
@@ -658,8 +656,6 @@ static const struct resource_caps res_cap_dcn32 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -726,6 +722,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
+ .disable_fpo_optimizations = false,
+ .fpo_vactive_margin_us = 2000, // 2000us
+ .disable_fpo_vactive = true,
+ .disable_boot_optimizations = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1506,8 +1508,11 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1611,7 +1616,6 @@ bool dcn32_acquire_post_bldn_3dlut(
struct dc_transfer_func **shaper)
{
bool ret = false;
- union dc_3dlut_state *state;
ASSERT(*lut == NULL && *shaper == NULL);
*lut = NULL;
@@ -1620,7 +1624,6 @@ bool dcn32_acquire_post_bldn_3dlut(
if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) {
*lut = pool->mpc_lut[mpcc_id];
*shaper = pool->mpc_shaper[mpcc_id];
- state = &pool->mpc_lut[mpcc_id]->state;
res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true;
ret = true;
}
@@ -1913,8 +1916,8 @@ int dcn32_populate_dml_pipes_from_context(
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool subvp_in_use = false;
- uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
+ bool vsr_odm_support = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1932,12 +1935,15 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
+ res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
if (context->stream_count == 1 &&
context->stream_status[0].plane_count == 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
- dc->debug.enable_single_display_2to1_odm_policy) {
+ dc->debug.enable_single_display_2to1_odm_policy &&
+ !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
@@ -2002,7 +2008,7 @@ int dcn32_populate_dml_pipes_from_context(
}
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
+ dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
@@ -2017,7 +2023,7 @@ int dcn32_populate_dml_pipes_from_context(
// In general cases we want to keep the dram clock change requirement
// (prefer configs that support MCLK switch). Only override to false
// for SubVP
- if (subvp_in_use)
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use)
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false;
else
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
@@ -2073,6 +2079,14 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.restore_mall_state = dcn32_restore_mall_state,
};
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* DCN32 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
static bool dcn32_resource_construct(
uint8_t num_virtual_links,
@@ -2086,27 +2100,28 @@ static bool dcn32_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
- #undef REG_STRUCT
- #define REG_STRUCT bios_regs
- bios_regs_init();
-
- #undef REG_STRUCT
- #define REG_STRUCT clk_src_regs
- clk_src_regs_init(0, A),
- clk_src_regs_init(1, B),
- clk_src_regs_init(2, C),
- clk_src_regs_init(3, D),
- clk_src_regs_init(4, E);
- #undef REG_STRUCT
- #define REG_STRUCT abm_regs
- abm_regs_init(0),
- abm_regs_init(1),
- abm_regs_init(2),
- abm_regs_init(3);
-
- #undef REG_STRUCT
- #define REG_STRUCT dccg_regs
- dccg_regs_init();
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
DC_FP_START();
@@ -2115,7 +2130,7 @@ static bool dcn32_resource_construct(
pool->base.res_cap = &res_cap_dcn32;
/* max number of pipes for ASIC before checking for pipe fuses */
num_pipes = pool->base.res_cap->num_timing_generator;
- pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
+ pipe_fuses = read_pipe_fuses(ctx);
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
if (pipe_fuses & 1 << i)
@@ -2149,13 +2164,19 @@ static bool dcn32_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.mall_size_per_mem_channel = 0;
+ dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
- dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
+
+ /* Calculate the available MALL space */
+ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc, dc->ctx->dc_bios->vram_info.num_chans) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
@@ -2176,6 +2197,7 @@ static bool dcn32_resource_construct(
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.seamless_odm = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -2449,7 +2471,7 @@ static bool dcn32_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
@@ -2592,3 +2614,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
return idle_pipe;
}
+
+unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
+{
+ /*
+ * DCN32 and DCN321 SKUs may have different sizes for MALL
+ * but we may not be able to access all the MALL space.
+ * If the num_chans is power of 2, then we can access all
+ * of the available MALL space. Otherwise, we can only
+ * access:
+ *
+ * max_cab_size_in_bytes = total_cache_size_in_bytes *
+ * ((2^floor(log2(num_chans)))/num_chans)
+ *
+ * Calculating the MALL sizes for all available SKUs, we
+ * have come up with the follow simplified check.
+ * - we have max_chans which provides the max MALL size.
+ * Each chans supports 4MB of MALL so:
+ *
+ * total_cache_size_in_bytes = max_chans * 4 MB
+ *
+ * - we have avail_chans which shows the number of channels
+ * we can use if we can't access the entire MALL space.
+ * It is generally half of max_chans
+ * - so we use the following checks:
+ *
+ * if (num_chans == max_chans), return max_chans
+ * if (num_chans < max_chans), return avail_chans
+ *
+ * - exception is GC_11_0_0 where we can't access max_chans,
+ * so we define max_avail_chans as the maximum available
+ * MALL space
+ *
+ */
+ int gc_11_0_0_max_chans = 48;
+ int gc_11_0_0_max_avail_chans = 32;
+ int gc_11_0_0_avail_chans = 16;
+ int gc_11_0_3_max_chans = 16;
+ int gc_11_0_3_avail_chans = 8;
+ int gc_11_0_2_max_chans = 8;
+ int gc_11_0_2_avail_chans = 4;
+
+ if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_0_max_chans) ?
+ gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
+ } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_2_max_chans) ?
+ gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
+ } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_3_max_chans) ?
+ gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 13fbc574910b..3937dbc1e552 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -38,6 +38,8 @@
#define DCN3_2_MBLK_HEIGHT_4BPE 128
#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
+#define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -96,8 +98,17 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel);
-uint32_t dcn32_helper_calculate_num_ways_for_subvp
- (struct dc *dc,
+uint32_t dcn32_helper_mall_bytes_to_ways(
+ struct dc *dc,
+ uint32_t total_size_in_mall_bytes);
+
+uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool ignore_cursor_buf);
+
+uint32_t dcn32_helper_calculate_num_ways_for_subvp(
+ struct dc *dc,
struct dc_state *context);
void dcn32_merge_pipes_for_subvp(struct dc *dc,
@@ -112,6 +123,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+bool dcn32_is_center_timing(struct pipe_ctx *pipe);
+bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
@@ -134,6 +147,14 @@ void dcn32_restore_mall_state(struct dc *dc,
struct dc_state *context,
struct mall_temp_config *temp_config);
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context);
+
+bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
+
+unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
+
+double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
@@ -454,6 +475,7 @@ void dcn32_restore_mall_state(struct dc *dc,
SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \
SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \
SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \
+ SRI_ARR(DSCL_CONTROL, DSCL, id), \
SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \
SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
@@ -1258,7 +1280,8 @@ void dcn32_restore_mall_state(struct dc *dc,
DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
- SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
+ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \
+ SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) \
)
/* VMID */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 783935c4e664..eeca16faf31a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -27,19 +27,78 @@
#include "dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
+#include "dml/dcn32/dcn32_fpu.h"
static bool is_dual_plane(enum surface_pixel_format format)
{
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+
+uint32_t dcn32_helper_mall_bytes_to_ways(
+ struct dc *dc,
+ uint32_t total_size_in_mall_bytes)
+{
+ uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
+
+ /* add 2 lines for worst case alignment */
+ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
+
+ total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
+ lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
+ num_ways = cache_lines_used / lines_per_way;
+ if (cache_lines_used % lines_per_way > 0)
+ num_ways++;
+
+ return num_ways;
+}
+
+uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool ignore_cursor_buf)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+ uint32_t cursor_mall_size_bytes = 0;
+
+ switch (pipe_ctx->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+
+ /* only count if cursor is enabled, and if additional allocation needed outside of the
+ * DCN cursor buffer
+ */
+ if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
+ cursor_size > 16384)) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ * Note: add 1 mblk in case of cursor misalignment
+ */
+ cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+ DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
+ }
+
+ return cursor_mall_size_bytes;
+}
+
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
*
- * This function first checks the bytes required per pixel on the SubVP pipe, then calculates
- * the total number of pixels required in the SubVP MALL region. These are used to calculate
- * the number of cache lines used (then number of ways required) for SubVP MCLK switching.
+ * Gets total allocation required for the phantom viewport calculated by DML in bytes and
+ * converts to number of cache ways.
*
* @param [in] dc: current dc state
* @param [in] context: new dc state
@@ -48,106 +107,19 @@ static bool is_dual_plane(enum surface_pixel_format format)
*
* ********************************************************************************************
*/
-uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
+uint32_t dcn32_helper_calculate_num_ways_for_subvp(
+ struct dc *dc,
+ struct dc_state *context)
{
- uint32_t num_ways = 0;
- uint32_t bytes_per_pixel = 0;
- uint32_t cache_lines_used = 0;
- uint32_t lines_per_way = 0;
- uint32_t total_cache_lines = 0;
- uint32_t bytes_in_mall = 0;
- uint32_t num_mblks = 0;
- uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0, j = 0;
- uint16_t mblk_width = 0;
- uint16_t mblk_height = 0;
- uint32_t full_vp_width_blk_aligned = 0;
- uint32_t full_vp_height_blk_aligned = 0;
- uint32_t mall_alloc_width_blk_aligned = 0;
- uint32_t mall_alloc_height_blk_aligned = 0;
- uint16_t full_vp_height = 0;
- bool subvp_in_use = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /* Find the phantom pipes.
- * - For pipe split case we need to loop through the bottom and next ODM
- * pipes or only half the viewport size is counted
- */
- if (pipe->stream && pipe->plane_state &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- struct pipe_ctx *main_pipe = NULL;
-
- subvp_in_use = true;
- /* Get full viewport height from main pipe (required for MBLK calculation) */
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- main_pipe = &context->res_ctx.pipe_ctx[j];
- if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
- full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
- break;
- }
- }
-
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mblk_width = DCN3_2_MBLK_WIDTH;
- mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
-
- /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
- * FLOOR(vp_x_start, blk_width)
- */
- full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
- pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
- (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
-
- /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
- * FLOOR(vp_y_start, blk_height)
- */
- full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
- full_vp_height + mblk_height - 1) / mblk_height * mblk_height) -
- (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
-
- /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
- mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
-
- /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
- mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
- mblk_height * mblk_height + mblk_height;
-
- /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
- * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
- * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
- * (Should be divisible, but round up if not)
- */
- num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
- ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
-
- /*For DCC:
- * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
- */
- if (pipe->plane_state->dcc.enable)
- num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
- (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
-
- bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
- // (MALL is 64-byte aligned)
- cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
-
- cache_lines_used += cache_lines_per_plane;
+ if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
+ if (dc->debug.force_subvp_num_ways) {
+ return dc->debug.force_subvp_num_ways;
+ } else {
+ return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
}
+ } else {
+ return 0;
}
-
- total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
- num_ways = dc->debug.force_subvp_num_ways;
-
- return num_ways;
}
void dcn32_merge_pipes_for_subvp(struct dc *dc,
@@ -255,6 +227,39 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
return false;
}
+bool dcn32_is_center_timing(struct pipe_ctx *pipe)
+{
+ bool is_center_timing = false;
+
+ if (pipe->stream) {
+ if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
+ pipe->stream->timing.v_addressable != pipe->stream->src.height) {
+ is_center_timing = true;
+ }
+ }
+
+ if (pipe->plane_state) {
+ if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
+ pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
+ is_center_timing = true;
+ }
+ }
+
+ return is_center_timing;
+}
+
+bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
+{
+ bool psr_capable = false;
+
+ if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
+ psr_capable = true;
+ }
+ return psr_capable;
+}
+
+#define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -266,7 +271,6 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
* If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
* number of DET for that given plane will be split among the pipes driving that plane.
*
- *
* High level algorithm:
* 1. Split total DET among number of streams
* 2. For each stream, split DET among the planes
@@ -274,6 +278,18 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
* among those pipes.
* 4. Assign the DET override to the DML pipes.
*
+ * Special cases:
+ *
+ * For two displays that have a large difference in pixel rate, we may experience
+ * underflow on the larger display when we divide the DET equally. For this, we
+ * will implement a modified algorithm to assign more DET to larger display.
+ *
+ * 1. Calculate difference in pixel rates ( multiplier ) between two displays
+ * 2. If the multiplier exceeds DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER, then
+ * implement the modified DET override algorithm.
+ * 3. Assign smaller DET size for lower pixel display and higher DET size for
+ * higher pixel display
+ *
* @param [in]: dc: Current DC state
* @param [in]: context: New DC state to be programmed
* @param [in]: pipes: Array of DML pipes
@@ -293,18 +309,46 @@ void dcn32_determine_det_override(struct dc *dc,
struct dc_plane_state *current_plane = NULL;
uint8_t stream_count = 0;
+ int phy_pix_clk_mult, lower_mode_stream_index;
+ int phy_pix_clk[MAX_PIPES] = {0};
+ bool use_new_det_override_algorithm = false;
+
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ phy_pix_clk[i] = context->streams[i]->phy_pix_clk;
stream_count++;
}
}
+ /* Check for special case with two displays, one with much higher pixel rate */
+ if (stream_count == 2) {
+ ASSERT((phy_pix_clk[0] > 0) && (phy_pix_clk[1] > 0));
+ if (phy_pix_clk[0] < phy_pix_clk[1]) {
+ lower_mode_stream_index = 0;
+ phy_pix_clk_mult = phy_pix_clk[1] / phy_pix_clk[0];
+ } else {
+ lower_mode_stream_index = 1;
+ phy_pix_clk_mult = phy_pix_clk[0] / phy_pix_clk[1];
+ }
+
+ if (phy_pix_clk_mult >= DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER)
+ use_new_det_override_algorithm = true;
+ }
+
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
continue;
+
+ if (use_new_det_override_algorithm) {
+ if (i == lower_mode_stream_index)
+ stream_segments = 4;
+ else
+ stream_segments = 14;
+ }
+
if (context->stream_status[i].plane_count > 0)
plane_segments = stream_segments / context->stream_status[i].plane_count;
else
@@ -357,6 +401,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
+ bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -373,7 +418,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
*/
if (pipe_cnt == 1) {
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
if (!is_dual_plane(pipe->plane_state->format)) {
pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
pipes[0].pipe.src.unbounded_req_mode = true;
@@ -456,3 +501,158 @@ void dcn32_restore_mall_state(struct dc *dc,
pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
}
}
+
+#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
+/*
+ * Scaling factor for v_blank stretch calculations considering timing in
+ * micro-seconds and pixel clock in 100hz.
+ * Note: the parenthesis are necessary to ensure the correct order of
+ * operation where V_SCALE is used.
+ */
+#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
+
+static int get_frame_rate_at_max_stretch_100hz(
+ struct dc_stream_state *fpo_candidate_stream,
+ uint32_t fpo_vactive_margin_us)
+{
+ struct dc_crtc_timing *timing = NULL;
+ uint32_t sec_per_100_lines;
+ uint32_t max_v_blank;
+ uint32_t curr_v_blank;
+ uint32_t v_stretch_max;
+ uint32_t stretched_frame_pix_cnt;
+ uint32_t scaled_stretched_frame_pix_cnt;
+ uint32_t scaled_refresh_rate;
+ uint32_t v_scale;
+
+ if (fpo_candidate_stream == NULL)
+ return 0;
+
+ /* check if refresh rate at least 120hz */
+ timing = &fpo_candidate_stream->timing;
+ if (timing == NULL)
+ return 0;
+
+ v_scale = 10000 / (MAX_STRETCHED_V_BLANK + fpo_vactive_margin_us);
+
+ sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1;
+ max_v_blank = sec_per_100_lines / v_scale + 1;
+ curr_v_blank = timing->v_total - timing->v_addressable;
+ v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0);
+ stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total;
+ scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000;
+ scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1;
+
+ return scaled_refresh_rate;
+
+}
+
+static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(
+ struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us)
+{
+ int refresh_rate_max_stretch_100hz;
+ int min_refresh_100hz;
+
+ if (fpo_candidate_stream == NULL)
+ return false;
+
+ refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(fpo_candidate_stream, fpo_vactive_margin_us);
+ min_refresh_100hz = fpo_candidate_stream->timing.min_refresh_in_uhz / 10000;
+
+ if (refresh_rate_max_stretch_100hz < min_refresh_100hz)
+ return false;
+
+ return true;
+}
+
+static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
+{
+ int refresh_rate = 0;
+ int h_v_total = 0;
+ struct dc_crtc_timing *timing = NULL;
+
+ if (fpo_candidate_stream == NULL)
+ return 0;
+
+ /* check if refresh rate at least 120hz */
+ timing = &fpo_candidate_stream->timing;
+ if (timing == NULL)
+ return 0;
+
+ h_v_total = timing->h_total * timing->v_total;
+ if (h_v_total == 0)
+ return 0;
+
+ refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1;
+ return refresh_rate;
+}
+
+/**
+ * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch - Determines if config can support FPO
+ *
+ * @param [in]: dc - current dc state
+ * @param [in]: context - new dc state
+ *
+ * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
+ */
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
+{
+ int refresh_rate = 0;
+ const int minimum_refreshrate_supported = 120;
+ struct dc_stream_state *fpo_candidate_stream = NULL;
+ bool is_fpo_vactive = false;
+ uint32_t fpo_vactive_margin_us = 0;
+
+ if (context == NULL)
+ return NULL;
+
+ if (dc->debug.disable_fams)
+ return NULL;
+
+ if (!dc->caps.dmub_caps.mclk_sw)
+ return NULL;
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down)
+ return NULL;
+
+ /* For FPO we can support up to 2 display configs if:
+ * - first display uses FPO
+ * - Second display switches in VACTIVE */
+ if (context->stream_count > 2)
+ return NULL;
+ else if (context->stream_count == 2) {
+ DC_FP_START();
+ dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
+ DC_FP_END();
+
+ DC_FP_START();
+ is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US);
+ DC_FP_END();
+ if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
+ return NULL;
+ } else
+ fpo_candidate_stream = context->streams[0];
+
+ if (!fpo_candidate_stream)
+ return NULL;
+
+ if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
+ return NULL;
+
+ refresh_rate = get_refresh_rate(fpo_candidate_stream);
+ if (refresh_rate < minimum_refreshrate_supported)
+ return NULL;
+
+ fpo_vactive_margin_us = is_fpo_vactive ? dc->debug.fpo_vactive_margin_us : 0; // For now hardcode the FPO + Vactive stretch margin to be 2000us
+ if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us))
+ return NULL;
+
+ // check if freesync enabled
+ if (!fpo_candidate_stream->allow_freesync)
+ return NULL;
+
+ if (fpo_candidate_stream->vrr_active_variable)
+ return NULL;
+
+ return fpo_candidate_stream;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index fa9b6603cfd3..13be5f06d987 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn321_dio_link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index d1f36df03c2e..a60ddb343d13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -60,7 +60,6 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "dc_link_dp.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
@@ -73,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -656,8 +655,6 @@ static const struct resource_caps res_cap_dcn321 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -724,6 +721,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
+ .disable_fpo_optimizations = false,
+ .fpo_vactive_margin_us = 2000, // 2000us
+ .disable_fpo_vactive = true,
+ .disable_boot_optimizations = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1491,8 +1494,11 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1626,6 +1632,14 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.restore_mall_state = dcn32_restore_mall_state,
};
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* DCN321 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
static bool dcn321_resource_construct(
uint8_t num_virtual_links,
@@ -1668,7 +1682,7 @@ static bool dcn321_resource_construct(
pool->base.res_cap = &res_cap_dcn321;
/* max number of pipes for ASIC before checking for pipe fuses */
num_pipes = pool->base.res_cap->num_timing_generator;
- pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
+ pipe_fuses = read_pipe_fuses(ctx);
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
if (pipe_fuses & 1 << i)
@@ -1702,12 +1716,18 @@ static bool dcn321_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.mall_size_per_mem_channel = 0;
+ dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
- dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
+
+ /* Calculate the available MALL space */
+ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc, dc->ctx->dc_bios->vram_info.num_chans) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
@@ -1990,7 +2010,7 @@ static bool dcn321_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index af1c50ed905a..7ce9a5b6c33b 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -161,6 +161,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_link *link,
struct dc_sink *sink);
+bool dm_helpers_dp_handle_test_pattern_request(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ union link_test_pattern dpcd_test_pattern,
+ union test_misc dpcd_test_params);
+
void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);
@@ -193,6 +199,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
enum set_config_status *operation_result);
+enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);
enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 0ecea87cf48f..01db035589c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -26,7 +26,8 @@
# subcomponents.
ifdef CONFIG_X86
-dml_ccflags := -mhard-float -msse
+dml_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float
+dml_ccflags := $(dml_ccflags-y) -msse
endif
ifdef CONFIG_PPC64
@@ -128,7 +129,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
DML += dcn20/dcn20_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index c26da3bb2892..f1c1a4b5fcac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -26,12 +26,12 @@
#include "resource.h"
#include "clk_mgr.h"
-#include "dc_link_dp.h"
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
+#include "link.h"
#include "dcn20_fpu.h"
#define DC_LOGGER_INIT(logger)
@@ -917,19 +917,19 @@ void dcn20_populate_dml_writeback_from_context(struct dc *dc,
}
void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt, int i)
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt, int i)
{
- int k;
+ int k;
- dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
- wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
+ for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
+ wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
@@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -949,7 +949,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int plane_count;
int i;
unsigned int optimized_min_dst_y_next_start_us;
- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
plane_count = 0;
optimized_min_dst_y_next_start_us = 0;
@@ -974,6 +973,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -986,28 +988,60 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
}
}
}
- /* zstate only supported on PWRSEQ0 and when there's <2 planes*/
- if (link->link_index != 0 || stream_status->plane_count > 1)
+
+ /* Don't support multi-plane configurations */
+ if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
+ if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
- } else if (allow_z8) {
- return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
} else {
return DCN_ZSTATE_SUPPORT_DISALLOW;
}
}
-void dcn20_calculate_dlg_params(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+static void dcn20_adjust_freesync_v_startup(
+ const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
+void dcn20_calculate_dlg_params(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
{
int i, pipe_idx;
@@ -1049,6 +1083,7 @@ void dcn20_calculate_dlg_params(
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
@@ -1057,11 +1092,17 @@ void dcn20_calculate_dlg_params(
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
}
+
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+
pipe_idx++;
}
/*save a original dppclock copy*/
@@ -1079,6 +1120,7 @@ void dcn20_calculate_dlg_params(
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ /* cstate disabled on 201 */
if (dc->ctx->dce_version == DCN_VERSION_2_01)
cstate_en = false;
@@ -1162,11 +1204,10 @@ static void swizzle_to_dml_params(
}
}
-int dcn20_populate_dml_pipes_from_context(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+int dcn20_populate_dml_pipes_from_context(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1218,6 +1259,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+ pipes[pipe_cnt].pipe.dest.use_maximum_vstartup = dc->ctx->dce_version == DCN_VERSION_2_01;
+
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
@@ -1257,8 +1300,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
pipes[pipe_cnt].dout.dp_lanes = 4;
- if (res_ctx->pipe_ctx[i].stream->link)
- pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_na;
+ pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_na;
pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
@@ -1302,7 +1344,7 @@ int dcn20_populate_dml_pipes_from_context(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
- if (is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&res_ctx->pipe_ctx[i]))
pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -1318,7 +1360,6 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.is_virtual = 1;
pipes[pipe_cnt].dout.output_type = dm_dp;
pipes[pipe_cnt].dout.dp_lanes = 4;
- pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_hbr2;
}
switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
@@ -1468,6 +1509,7 @@ int dcn20_populate_dml_pipes_from_context(
default:
break;
}
+
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
pipes[pipe_cnt].pipe.src.viewport_x_y = scl->viewport.x;
@@ -1576,13 +1618,12 @@ int dcn20_populate_dml_pipes_from_context(
return pipe_cnt;
}
-void dcn20_calculate_wm(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *out_pipe_cnt,
- int *pipe_split_from,
- int vlevel,
- bool fast_validate)
+void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *out_pipe_cnt,
+ int *pipe_split_from,
+ int vlevel,
+ bool fast_validate)
{
int pipe_cnt, i, pipe_idx;
@@ -1694,8 +1735,11 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
-void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
- struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
+void dcn20_update_bounding_box(struct dc *dc,
+ struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks,
+ unsigned int *uclk_states,
+ unsigned int num_states)
{
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -1757,9 +1801,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
-void dcn20_cap_soc_clocks(
- struct _vcs_dpi_soc_bounding_box_st *bb,
- struct pp_smu_nv_clock_table max_clocks)
+void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table max_clocks)
{
int i;
@@ -1915,80 +1958,80 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
+ struct dc_state *context,
+ bool fast_validate)
{
- bool voltage_supported = false;
- bool full_pstate_supported = false;
- bool dummy_pstate_supported = false;
- double p_state_latency_us;
+ bool voltage_supported = false;
+ bool full_pstate_supported = false;
+ bool dummy_pstate_supported = false;
+ double p_state_latency_us;
- dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
- p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
- context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
- dc->debug.disable_dram_clock_change_vactive_support;
- context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
- dc->debug.enable_dram_clock_change_one_display_vactive;
+ p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+ dc->debug.disable_dram_clock_change_vactive_support;
+ context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+ dc->debug.enable_dram_clock_change_one_display_vactive;
- /*Unsafe due to current pipe merge and split logic*/
- ASSERT(context != dc->current_state);
+ /*Unsafe due to current pipe merge and split logic*/
+ ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true);
- }
+ if (fast_validate) {
+ return dcn20_validate_bandwidth_internal(dc, context, true);
+ }
- // Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
- full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ // Best case, we support full UCLK switch latency
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
- (voltage_supported && full_pstate_supported)) {
- context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
- goto restore_dml_state;
- }
+ if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
+ (voltage_supported && full_pstate_supported)) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
+ goto restore_dml_state;
+ }
- // Fallback: Try to only support G6 temperature read latency
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ // Fallback: Try to only support G6 temperature read latency
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
- dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
- context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
- goto restore_dml_state;
- }
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ goto restore_dml_state;
+ }
- // ERROR: fallback is supposed to always work.
- ASSERT(false);
+ // ERROR: fallback is supposed to always work.
+ ASSERT(false);
restore_dml_state:
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
- return voltage_supported;
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ return voltage_supported;
}
void dcn20_fpu_set_wm_ranges(int i,
- struct pp_smu_wm_range_sets *ranges,
- struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
{
- dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
- ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
+ ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
}
void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
- int vlevel,
- int max_mpc_comb,
- int pipe_idx,
- bool is_validating_bw)
+ int vlevel,
+ int max_mpc_comb,
+ int pipe_idx,
+ bool is_validating_bw)
{
- dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
- if (is_validating_bw)
- v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
- else
- v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
+ if (is_validating_bw)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
+ else
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
}
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
@@ -2290,7 +2333,7 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
k++;
}
- memcpy(dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
+ memcpy(&dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index d3b5b6fedf04..6266b0788387 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index edd098c7eb92..989d83ee3842 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 1d84ae50311d..b7c2844d0cbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
@@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
- mode_lib->vba.ODMCombineEnabled[k] = false;
+ mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index d4c0f9cdac8e..a352c703e258 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
dc_assert_fp_enabled();
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
}
@@ -384,9 +386,38 @@ void dcn30_fpu_calculate_wm_and_dlg(
int i, pipe_idx;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+ unsigned int dummy_latency_index = 0;
dc_assert_fp_enabled();
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i])
+ context->streams[i]->fpo_in_use = false;
+ }
+
+ if (!pstate_en) {
+ /* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching =
+ dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ context, pipes, pipe_cnt, vlevel);
+
+ /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
+ * we reinstate the original dram_clock_change_latency_us on the context
+ * and all variables that may have changed up to this point, except the
+ * newly found dummy_latency_index
+ */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+ }
+ }
+
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -449,15 +480,29 @@ void dcn30_fpu_calculate_wm_and_dlg(
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
unsigned int min_dram_speed_mts_margin = 160;
- if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
+ dm_dram_clock_change_unsupported) {
+ int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1;
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ min_dram_speed_mts =
+ dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
+ }
+
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ /* find largest table entry that is lower than dram speed,
+ * but lower than DPM0 still uses DPM0
+ */
+ for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin >
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
+ break;
+ }
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
@@ -520,6 +565,20 @@ void dcn30_fpu_calculate_wm_and_dlg(
pipe_idx++;
}
+ // WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
+ dc->dml.soc.num_chans <= 4 &&
+ context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
+ context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
+
+ for (i = 0; i < dc->dml.soc.num_states; i++) {
+ if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
+ context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
+ break;
+ }
+ }
+ }
+
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
@@ -634,7 +693,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 379729b02847..7d0626e42ea6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -23,9 +23,7 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_30.h"
#include "../dml_inline_defs.h"
@@ -1802,7 +1800,10 @@ static unsigned int CalculateVMAndRowBytes(
}
if (SurfaceTiling == dm_sw_linear) {
- *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
+ if (PTEBufferSizeInRequests == 0)
+ *dpte_row_height = 1;
+ else
+ *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {
@@ -4863,7 +4864,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
- &v->UrgentBurstFactorChroma[k],
+ &v->UrgentBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
}
@@ -6632,4 +6633,3 @@ static noinline_for_stack void UseMinimumDCFCLK(
}
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8179be1f34bb..cd3cfcb2a2b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -23,8 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
-
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
@@ -1792,4 +1790,3 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b37d14369a62..59836570603a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.maximum_dsc_bits_per_component = 10,
.dsc422_native_support = false,
.is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 49,
+ .line_buffer_fixed_bpp = 48,
.line_buffer_size_bits = 789504,
.max_line_buffer_lines = 12,
.writeback_interface_buffer_size_kbytes = 90,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ec351c8418cb..bd674dc30df3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
@@ -878,7 +877,9 @@ static bool CalculatePrefetchSchedule(
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
+#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
+#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
@@ -1060,7 +1061,9 @@ static bool CalculatePrefetchSchedule(
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
+#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
+#endif
#ifdef __DML_VBA_DEBUG__
@@ -4304,11 +4307,11 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4316,107 +4319,201 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
@@ -5094,7 +5191,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
- &v->UrgentBurstFactorChroma[k],
+ &v->UrgentBurstFactorChromaPre[k],
&v->NotUrgentLatencyHidingPre[k]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 35d10b4d018b..2244e4fb8c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -902,7 +902,6 @@ static void dml_rq_dlg_get_dlg_params(
double hratio_c;
double vratio_l;
double vratio_c;
- bool scl_enable;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
@@ -1020,7 +1019,6 @@ static void dml_rq_dlg_get_dlg_params(
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
- scl_enable = scl->scl_enable;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 6a1cf6adea77..9e54e3d0eb78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -97,7 +97,7 @@ struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -149,8 +149,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
.num_states = 5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
- .sr_exit_z8_time_us = 280.0,
- .sr_enter_plus_exit_z8_time_us = 350.0,
+ .sr_exit_z8_time_us = 268.0,
+ .sr_enter_plus_exit_z8_time_us = 393.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -308,6 +308,10 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
upscaled = true;
+ /* Apply HostVM policy - either based on hypervisor globally enabled, or rIOMMU active */
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
+ pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled || dc->res_pool->hubbub->riommu_active;
+
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
@@ -346,7 +350,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (pipe_cnt == 1 && pipe->plane_state
+ && pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
dc->config.enable_4to1MPC = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 950669f2c10d..7eb2173b7691 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -27,7 +27,6 @@
#define UNIT_TEST 0
#if !UNIT_TEST
#include "dc.h"
-#include "dc_link.h"
#endif
#include "../display_mode_lib.h"
#include "display_mode_vba_314.h"
@@ -900,7 +899,9 @@ static bool CalculatePrefetchSchedule(
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
+#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
+#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
@@ -1082,7 +1083,9 @@ static bool CalculatePrefetchSchedule(
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
+#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
+#endif
#ifdef __DML_VBA_DEBUG__
@@ -3183,7 +3186,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
- v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
+ v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
@@ -4402,11 +4405,11 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4414,107 +4417,201 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
@@ -5191,7 +5288,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
- &v->UrgentBurstFactorChroma[k],
+ &v->UrgentBurstFactorChromaPre[k],
&v->NotUrgentLatencyHidingPre[k]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 61ee9ba063a7..ea4eb66066c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -51,7 +51,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
@@ -988,7 +988,6 @@ static void dml_rq_dlg_get_dlg_params(
double hratio_c;
double vratio_l;
double vratio_c;
- bool scl_enable;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
@@ -1001,7 +1000,6 @@ static void dml_rq_dlg_get_dlg_params(
unsigned int vupdate_width;
unsigned int vready_offset;
- unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int vstartup_start;
@@ -1118,7 +1116,6 @@ static void dml_rq_dlg_get_dlg_params(
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
- scl_enable = scl->scl_enable;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
@@ -1130,17 +1127,8 @@ static void dml_rq_dlg_get_dlg_params(
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
-
- dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
- if (scl_enable)
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
- else
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
-
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
-
if (dout->dsc_enable) {
double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f94abd124021..47beb4ea779d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -24,13 +24,14 @@
*
*/
#include "dcn32_fpu.h"
-#include "dc_link_dp.h"
#include "dcn32/dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "display_mode_vba_util_32.h"
+#include "dml/dcn32/display_mode_vba_32.h"
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -108,7 +109,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
{
.state = 0,
.dcfclk_mhz = 1564.0,
- .fabricclk_mhz = 400.0,
+ .fabricclk_mhz = 2500.0,
.dispclk_mhz = 2150.0,
.dppclk_mhz = 2150.0,
.phyclk_mhz = 810.0,
@@ -116,7 +117,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.phyclk_d32_mhz = 625.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 716.667,
- .dram_speed_mts = 16000.0,
+ .dram_speed_mts = 18000.0,
.dtbclk_mhz = 1564.0,
},
},
@@ -130,7 +131,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .fclk_change_latency_us = 20,
+ .fclk_change_latency_us = 25,
.usr_retraining_latency_us = 2,
.smn_latency_us = 2,
.mall_allocated_for_dcn_mbytes = 64,
@@ -147,7 +148,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.max_avg_fabric_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_strobe_percent = 50.0,
.max_avg_dram_bw_use_normal_percent = 15.0,
- .num_chans = 8,
+ .num_chans = 24,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.return_bus_width_bytes = 64,
@@ -691,9 +692,11 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe &&
+ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
- vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
+ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
+ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
+ dcn32_allow_subvp_with_active_margin(pipe)))) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@@ -877,6 +880,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
+ const struct dc_config *config = &dc->config;
+
+ if (config->disable_subvp_drr)
+ return false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -977,10 +984,12 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipe = pipe;
}
- // Use ignore_msa_timing_param flag to identify as DRR
- if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
- // SUBVP + DRR case
- schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]);
+ // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On
+ if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param &&
+ (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync ||
+ context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) {
+ // SUBVP + DRR case -- only allowed if run through DRR validation path
+ schedulable = false;
} else if (found) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
@@ -1084,12 +1093,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
+ int i = 0;
bool found_supported_config = false;
struct pipe_ctx *pipe = NULL;
uint32_t non_subvp_pipes = 0;
bool drr_pipe_found = false;
uint32_t drr_pipe_index = 0;
- uint32_t i = 0;
dc_assert_fp_enabled();
@@ -1169,15 +1178,25 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* Check that vlevel requested supports pstate or not
+ * if not, select the lowest vlevel that supports it
+ */
+ for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
+ if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
+ *vlevel = i;
+ break;
+ }
+ }
+
if (*vlevel < context->bw_ctx.dml.soc.num_states &&
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported
&& subvp_validate_static_schedulability(dc, context, *vlevel)) {
found_supported_config = true;
- } else if (*vlevel < context->bw_ctx.dml.soc.num_states &&
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- /* Case where 1 SubVP is added, and DML reports MCLK unsupported. This handles
- * the case for SubVP + DRR, where the DRR display does not support MCLK switch
- * at it's native refresh rate / timing.
+ } else if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed.
+ * This handles the case for SubVP + DRR, where the DRR display does not support MCLK
+ * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp
+ * display.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1185,7 +1204,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
// Use ignore_msa_timing_param flag to identify as DRR
- if (pipe->stream->ignore_msa_timing_param) {
+ if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {
drr_pipe_found = true;
drr_pipe_index = i;
}
@@ -1194,6 +1213,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
// If there is only 1 remaining non SubVP pipe that is DRR, check static
// schedulability for SubVP + DRR.
if (non_subvp_pipes == 1 && drr_pipe_found) {
+ /* find lowest vlevel that supports the config */
+ for (i = *vlevel; i >= 0; i--) {
+ if (vba->ModeSupport[i][vba->maxMpcComb]) {
+ *vlevel = i;
+ } else {
+ break;
+ }
+ }
+
found_supported_config = subvp_drr_schedulable(dc, context,
&context->res_ctx.pipe_ctx[drr_pipe_index]);
}
@@ -1242,12 +1270,44 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
}
+static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
@@ -1270,7 +1330,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
- context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
+
+ /* Pstate change might not be supported by hardware, but it might be
+ * possible with firmware driven vertical blank stretching.
+ */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
@@ -1294,6 +1358,10 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
unbounded_req_enabled = false;
}
+ context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1325,6 +1393,34 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
else
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+
+ context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ /* MALL Allocation Sizes */
+ /* count from active, top pipes per plane only */
+ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
+ (context->res_ctx.pipe_ctx[i].top_pipe == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
+ context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ /* SS: all active surfaces stored in MALL */
+ if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+
+ if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
+ /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
+ context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+ }
+ } else {
+ /* SUBVP: phantom surfaces only stored in MALL */
+ context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+ }
+ }
+
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
@@ -1345,6 +1441,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz
* 1000;
+ context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
+
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1530,6 +1628,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
}
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
if (!fast_validate)
dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
@@ -1549,16 +1648,12 @@ bool dcn32_internal_validate_bw(struct dc *dc,
* to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_none;
+ context->bw_ctx.dml.validate_max_state = fast_validate;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */
- if (vlevel == context->bw_ctx.dml.soc.num_states) {
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_stutter;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- }
+ context->bw_ctx.dml.validate_max_state = false;
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
@@ -1645,6 +1740,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
@@ -1660,6 +1756,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -1834,7 +1931,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
bool subvp_in_use = dcn32_subvp_in_use(dc, context);
unsigned int min_dram_speed_mts_margin;
bool need_fclk_lat_as_dummy = false;
- bool is_subvp_p_drr = true;
+ bool is_subvp_p_drr = false;
+ struct dc_stream_state *fpo_candidate_stream = NULL;
dc_assert_fp_enabled();
@@ -1842,7 +1940,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
if (subvp_in_use) {
/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
if (!pstate_en) {
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter;
pstate_en = true;
is_subvp_p_drr = true;
}
@@ -1860,17 +1959,26 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
}
}
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i])
+ context->streams[i]->fpo_in_use = false;
+ }
- if (!pstate_en) {
+ if (!pstate_en || (!dc->debug.disable_fpo_optimizations &&
+ pstate_en && vlevel != 0)) {
/* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching =
- dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+ fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+ if (fpo_candidate_stream) {
+ fpo_candidate_stream->fpo_in_use = true;
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
+ }
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
@@ -1891,11 +1999,25 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
- maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
- dm_dram_clock_change_unsupported;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ if (vlevel_temp < vlevel) {
+ vlevel = vlevel_temp;
+ maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ pstate_en = true;
+ } else {
+ /* Restore FCLK latency and re-run validation to go back to original validation
+ * output if we find that enabling FPO does not give us any benefit (i.e. lower
+ * voltage level)
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i])
+ context->streams[i]->fpo_in_use = false;
+ }
+ context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ }
}
}
@@ -2038,13 +2160,23 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
+ * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
+ */
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
} else {
/* Set A:
* All clocks min.
* DCFCLK: Min, as reported by PM FW, when available
* UCLK: Min, as reported by PM FW, when available
*/
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+
+ /* For set A set the correct latency values (i.e. non-dummy values) unconditionally
+ */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
@@ -2090,10 +2222,6 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
- }
-
/* revert fclk lat changes if required */
if (need_fclk_lat_as_dummy)
context->bw_ctx.dml.soc.fclk_change_latency_us =
@@ -2217,6 +2345,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -2325,7 +2456,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -2334,7 +2464,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
@@ -2443,8 +2572,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
}
/* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans)
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ }
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
@@ -2622,3 +2754,131 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
}
+
+bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
+{
+ bool allow = false;
+ uint32_t refresh_rate = 0;
+
+ /* Allow subvp on displays that have active margin for 2560x1440@60hz displays
+ * only for now. There must be no scaling as well.
+ *
+ * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
+ * for p-state switching.
+ */
+ if (pipe->stream && pipe->plane_state) {
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
+ / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ if (pipe->stream->timing.v_addressable == 1440 &&
+ pipe->stream->timing.h_addressable == 2560 &&
+ refresh_rate >= 55 && refresh_rate <= 65 &&
+ pipe->plane_state->src_rect.height == 1440 &&
+ pipe->plane_state->src_rect.width == 2560 &&
+ pipe->plane_state->dst_rect.height == 1440 &&
+ pipe->plane_state->dst_rect.width == 2560)
+ allow = true;
+ }
+ return allow;
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ *
+ * @return: Max vratio for prefetch
+ *
+ * *******************************************************************************************
+ */
+double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
+{
+ double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
+ int i;
+
+ /* For single display MPO configs, allow the max vratio to be 8
+ * if any plane is YUV420 format
+ */
+ if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) {
+ for (i = 0; i < context->stream_status[0].plane_count; i++) {
+ if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr ||
+ context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) {
+ max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ }
+ }
+ }
+ return max_vratio_pre;
+}
+
+/**
+ * dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case
+ *
+ * This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config).
+ * For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the
+ * other display has ActiveMargin <= 0. This function will choose the pipe/stream that has
+ * ActiveMargin <= 0 to be the FPO stream candidate if found.
+ *
+ *
+ * @param [in]: dc - current dc state
+ * @param [in]: context - new dc state
+ * @param [out]: fpo_candidate_stream - pointer to FPO stream candidate if one is found
+ *
+ * Return: void
+ */
+void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream)
+{
+ unsigned int i, pipe_idx;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
+ *fpo_candidate_stream = pipe->stream;
+ break;
+ }
+ pipe_idx++;
+ }
+}
+
+/**
+ * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE
+ *
+ * @param [in]: dc - current dc state
+ * @param [in]: context - new dc state
+ * @param [in]: vactive_margin_req_us - The vactive marign required for a vactive pipe to be
+ * considered "found"
+ *
+ * Return: True if VACTIVE display is found, false otherwise
+ */
+bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req_us)
+{
+ unsigned int i, pipe_idx;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ bool vactive_found = false;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us) {
+ vactive_found = true;
+ break;
+ }
+ pipe_idx++;
+ }
+ return vactive_found;
+}
+
+void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
+{
+ dc_assert_fp_enabled();
+ dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index ab010e7e840b..dcf512cd3072 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -76,4 +76,10 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
+void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream);
+
+bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req);
+
+void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 4b8f5fa0f0ad..d75248b6cae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_32.h"
#include "../dml_inline_defs.h"
@@ -387,6 +386,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
+ mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
@@ -411,6 +411,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->BlockWidthC,
v->BlockHeightY,
v->BlockHeightC,
+ mode_lib->vba.DCCMetaPitchY,
+ mode_lib->vba.DCCMetaPitchC,
/* Output */
v->SurfaceSizeInMALL,
@@ -687,7 +689,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&
@@ -807,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
- v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
+ (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@@ -893,8 +897,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
- if (v->VRatioPrefetchY[k] > __DML_MAX_VRATIO_PRE__
- || v->VRatioPrefetchC[k] > __DML_MAX_VRATIO_PRE__)
+ if (v->VRatioPrefetchY[k] > v->MaxVRatioPre
+ || v->VRatioPrefetchC[k] > v->MaxVRatioPre)
VRatioPrefetchMoreThanMax = true;
//bool DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = false;
@@ -939,6 +943,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->UrgBurstFactorLumaPre,
v->UrgBurstFactorChromaPre,
v->UrgBurstFactorCursorPre,
+ v->PrefetchBandwidth,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&MaxTotalRDBandwidth,
@@ -969,6 +976,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
+ v->PrefetchBandwidth,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0],
@@ -1636,9 +1646,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
static void mode_support_configuration(struct vba_vars_st *v,
struct display_mode_lib *mode_lib)
{
- int i, j;
+ int i, j, start_state;
+
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
- for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (mode_lib->vba.ScaleRatioAndTapsSupport == true
&& mode_lib->vba.SourceFormatPixelAndScanSupport == true
@@ -1707,7 +1722,7 @@ static void mode_support_configuration(struct vba_vars_st *v,
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
- int i, j;
+ int i, j, start_state;
unsigned int k, m;
unsigned int MaximumMPCCombine;
unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth;
@@ -1720,6 +1735,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
#endif
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
/*Scale Ratio, taps Support Check*/
@@ -2009,7 +2028,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible;
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = true;
@@ -2286,7 +2305,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ExceededMultistreamSlots[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) {
@@ -2335,8 +2354,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.ForcedOutputLinkBPP[k] != 0)
mode_lib->vba.DSCOnlyIfNecessaryWithBPP = true;
- if ((mode_lib->vba.DSCEnable[k] || mode_lib->vba.DSCEnable[k])
- && mode_lib->vba.OutputFormat[k] == dm_n422
+ if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.OutputFormat[k] == dm_n422
&& !mode_lib->vba.DSC422NativeSupport)
mode_lib->vba.DSC422NativeNotSupported = true;
@@ -2386,7 +2404,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
@@ -2403,7 +2421,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true;
mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
@@ -2421,7 +2439,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2458,7 +2476,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Check DSC Unit and Slices Support */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.NotEnoughDSCUnits[i] = false;
mode_lib->vba.NotEnoughDSCSlices[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
@@ -2493,7 +2511,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement(
mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k],
@@ -2520,7 +2538,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k];
@@ -2626,6 +2644,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
+ mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
@@ -2650,12 +2669,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MacroTileWidthC,
mode_lib->vba.MacroTileHeightY,
mode_lib->vba.MacroTileHeightC,
+ mode_lib->vba.DCCMetaPitchY,
+ mode_lib->vba.DCCMetaPitchC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
&mode_lib->vba.ExceededMALLSize);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.swath_width_luma_ub_this_state[k] =
@@ -2882,7 +2903,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Calculate Return BW
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2961,7 +2982,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j)
mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i];
}
@@ -3083,7 +3104,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DCFCLKState);
} // UseMinimumRequiredDCFCLK == true
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i,
mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j],
@@ -3092,7 +3113,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Re-ordering Buffer Support Check
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024
/ mode_lib->vba.ReturnBWPerState[i][j]
@@ -3114,7 +3135,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.ReadBandwidthChroma[k];
}
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] =
dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j]
@@ -3138,7 +3159,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Prefetch Check */
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
@@ -3196,7 +3217,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,
mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],
@@ -3289,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
- v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
+ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
@@ -3333,7 +3355,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Output */
&mode_lib->vba.UrgentBurstFactorCursorPre[k],
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
- &mode_lib->vba.UrgentBurstFactorChroma[k],
+ &mode_lib->vba.UrgentBurstFactorChromaPre[k],
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
}
@@ -3358,6 +3380,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentBurstFactorLumaPre,
mode_lib->vba.UrgentBurstFactorChromaPre,
mode_lib->vba.UrgentBurstFactorCursorPre,
+ v->PrefetchBW,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // Single *PrefetchBandwidth
@@ -3382,8 +3407,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
- if (mode_lib->vba.VRatioPreY[i][j][k] > __DML_MAX_VRATIO_PRE__
- || mode_lib->vba.VRatioPreC[i][j][k] > __DML_MAX_VRATIO_PRE__
+ if (mode_lib->vba.VRatioPreY[i][j][k] > mode_lib->vba.MaxVRatioPre
+ || mode_lib->vba.VRatioPreC[i][j][k] > mode_lib->vba.MaxVRatioPre
|| mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) {
mode_lib->vba.VRatioInPrefetchSupported[i][j] = false;
}
@@ -3639,7 +3664,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe) {
if (mode_lib->vba.ViewportWidthChroma[k] > mode_lib->vba.SurfaceWidthC[k]
@@ -3656,7 +3680,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaximumMPCCombine = 0;
- for (i = v->soc.num_states; i >= 0; i--) {
+ for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true ||
mode_lib->vba.ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index c8b28c83ddf4..d98e36a9a09c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -44,7 +44,8 @@
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
// Prefetch schedule max vratio
-#define __DML_MAX_VRATIO_PRE__ 4.0
+#define __DML_MAX_VRATIO_PRE__ 7.9
+#define __DML_MAX_BW_RATIO_PRE__ 4.0
#define __DML_VBA_MAX_DST_Y_PRE__ 63.75
@@ -52,6 +53,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
+#define MIN_DCFCLK_FREQ_MHZ 200
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 5af601cff1a0..61cc4904ade4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1772,6 +1772,7 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCN,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
bool DCCEnable[],
bool ViewportStationary[],
unsigned int ViewportXStartY[],
@@ -1796,13 +1797,17 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int ReadBlockWidthC[],
unsigned int ReadBlockHeightY[],
unsigned int ReadBlockHeightC[],
+ unsigned int DCCMetaPitchY[],
+ unsigned int DCCMetaPitchC[],
/* Output */
unsigned int SurfaceSizeInMALL[],
bool *ExceededMALLSize)
{
- unsigned int TotalSurfaceSizeInMALL = 0;
unsigned int k;
+ unsigned int TotalSurfaceSizeInMALLForSS = 0;
+ unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
+ unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (ViewportStationary[k]) {
@@ -1828,18 +1833,18 @@ void dml32_CalculateSurfaceSizeInMall(
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_min(dml_ceil(SurfaceWidthY[k], 8 * Read256BytesBlockWidthY[k]),
+ (dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]),
dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k])
- dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k]))
* dml_min(dml_ceil(SurfaceHeightY[k], 8 *
Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] +
ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 *
- Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8
- * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256;
+ Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 *
+ Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_min(dml_ceil(SurfaceWidthC[k], 8 *
+ dml_min(dml_ceil(DCCMetaPitchC[k], 8 *
Read256BytesBlockWidthC[k]),
dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8
* Read256BytesBlockWidthC[k] - 1, 8 *
@@ -1872,16 +1877,16 @@ void dml32_CalculateSurfaceSizeInMall(
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] + 8 *
+ (dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1), 8 *
Read256BytesBlockWidthY[k]) *
dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 *
Read256BytesBlockHeightY[k] - 1), 8 *
- Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256;
+ Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] + 8 *
+ dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 *
Read256BytesBlockWidthC[k] - 1), 8 *
Read256BytesBlockWidthC[k]) *
dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 *
@@ -1894,10 +1899,14 @@ void dml32_CalculateSurfaceSizeInMall(
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
+ /* SS and Subvp counted separate as they are never used at the same time */
+ if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe)
+ TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k];
+ else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
+ TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k];
}
- *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024);
+ *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) ||
+ (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
@@ -3471,7 +3480,7 @@ bool dml32_CalculatePrefetchSchedule(
double prefetch_sw_bytes;
double bytes_pp;
double dep_bytes;
- unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ unsigned int max_vratio_pre = v->MaxVRatioPre;
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
@@ -6134,29 +6143,46 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
+ double PrefetchBW[],
+ double VRatio[],
+ double MaxVRatioPre,
/* output */
- double *PrefetchBandwidth,
+ double *MaxPrefetchBandwidth,
double *FractionOfUrgentBandwidth,
bool *PrefetchBandwidthSupport)
{
unsigned int k;
+ double ActiveBandwidthPerSurface;
bool NotEnoughUrgentLatencyHiding = false;
+ double TotalActiveBandwidth = 0;
+ double TotalPrefetchBandwidth = 0;
+
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (NotUrgentLatencyHiding[k]) {
NotEnoughUrgentLatencyHiding = true;
}
}
- *PrefetchBandwidth = 0;
+ *MaxPrefetchBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- *PrefetchBandwidth = *PrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]),
+ ActiveBandwidthPerSurface = ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]);
+
+ TotalActiveBandwidth += ActiveBandwidthPerSurface;
+
+ TotalPrefetchBandwidth = TotalPrefetchBandwidth + PrefetchBW[k] * VRatio[k];
+
+ *MaxPrefetchBandwidth = *MaxPrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
+ ActiveBandwidthPerSurface,
NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
}
- *PrefetchBandwidthSupport = (*PrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
- *FractionOfUrgentBandwidth = *PrefetchBandwidth / ReturnBW;
+ if (MaxVRatioPre == __DML_MAX_VRATIO_PRE__)
+ *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && (TotalPrefetchBandwidth <= TotalActiveBandwidth * __DML_MAX_BW_RATIO_PRE__) && !NotEnoughUrgentLatencyHiding;
+ else
+ *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
+
+ *FractionOfUrgentBandwidth = *MaxPrefetchBandwidth / ReturnBW;
}
double dml32_CalculateBandwidthAvailableForImmediateFlip(unsigned int NumberOfActiveSurfaces,
@@ -6245,7 +6271,8 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX])
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting)
{
int k;
double SwathSizeAllSurfaces = 0;
@@ -6257,12 +6284,15 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
- /* calculate sum of single swath size for all pipes in bytes*/
+ if (UseUnboundedRequesting == dm_unbounded_requesting)
+ return false;
+
+ /* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
- SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
+ SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
if (SwathHeightC[k] != 0)
- SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
+ SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
else
SwathSizePerSurfaceC[k] = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 779c6805f599..592d174df6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -334,6 +334,7 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCN,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
bool DCCEnable[],
bool ViewportStationary[],
unsigned int ViewportXStartY[],
@@ -358,6 +359,8 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int ReadBlockWidthC[],
unsigned int ReadBlockHeightY[],
unsigned int ReadBlockHeightC[],
+ unsigned int DCCMetaPitchY[],
+ unsigned int DCCMetaPitchC[],
/* Output */
unsigned int SurfaceSizeInMALL[],
@@ -1093,9 +1096,12 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
+ double PrefetchBW[],
+ double VRatio[],
+ double MaxVRatioPre,
/* output */
- double *PrefetchBandwidth,
+ double *MaxPrefetchBandwidth,
double *FractionOfUrgentBandwidth,
bool *PrefetchBandwidthSupport);
@@ -1157,6 +1163,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]);
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index f4b176599be7..342a1bcb4927 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.clock_limits = {
{
.state = 0,
- .dcfclk_mhz = 1564.0,
- .fabricclk_mhz = 400.0,
- .dispclk_mhz = 2150.0,
- .dppclk_mhz = 2150.0,
+ .dcfclk_mhz = 1434.0,
+ .fabricclk_mhz = 2250.0,
+ .dispclk_mhz = 1720.0,
+ .dppclk_mhz = 1720.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .phyclk_d32_mhz = 625.0,
+ .phyclk_d32_mhz = 313.0,
.socclk_mhz = 1200.0,
- .dscclk_mhz = 716.667,
- .dram_speed_mts = 1600.0,
+ .dscclk_mhz = 573.333,
+ .dram_speed_mts = 16000.0,
.dtbclk_mhz = 1564.0,
},
},
@@ -125,18 +125,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
- .round_trip_ping_latency_dcfclk_cycles = 263,
+ .round_trip_ping_latency_dcfclk_cycles = 207,
.urgent_latency_pixel_data_only_us = 4,
.urgent_latency_pixel_mixed_with_vm_data_us = 4,
.urgent_latency_vm_data_only_us = 4,
- .fclk_change_latency_us = 20,
- .usr_retraining_latency_us = 2,
- .smn_latency_us = 2,
- .mall_allocated_for_dcn_mbytes = 64,
+ .fclk_change_latency_us = 7,
+ .usr_retraining_latency_us = 0,
+ .smn_latency_us = 0,
+ .mall_allocated_for_dcn_mbytes = 32,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 100.0,
+ .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
@@ -294,6 +294,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -402,7 +405,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -411,7 +413,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
@@ -534,8 +535,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
}
/* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans)
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ }
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 4125d3d111d1..bdf3ac6cadd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -41,51 +41,51 @@
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
-const struct dml_funcs dml20_funcs = {
+static const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20_recalculate,
.rq_dlg_get_dlg_reg = dml20_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml20v2_funcs = {
+static const struct dml_funcs dml20v2_funcs = {
.validate = dml20v2_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20v2_recalculate,
.rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml21_funcs = {
- .validate = dml21_ModeSupportAndSystemConfigurationFull,
- .recalculate = dml21_recalculate,
- .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
- .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
+static const struct dml_funcs dml21_funcs = {
+ .validate = dml21_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml21_recalculate,
+ .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml30_funcs = {
+static const struct dml_funcs dml30_funcs = {
.validate = dml30_ModeSupportAndSystemConfigurationFull,
.recalculate = dml30_recalculate,
.rq_dlg_get_dlg_reg = dml30_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml30_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml31_funcs = {
+static const struct dml_funcs dml31_funcs = {
.validate = dml31_ModeSupportAndSystemConfigurationFull,
.recalculate = dml31_recalculate,
.rq_dlg_get_dlg_reg = dml31_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml314_funcs = {
+static const struct dml_funcs dml314_funcs = {
.validate = dml314_ModeSupportAndSystemConfigurationFull,
.recalculate = dml314_recalculate,
.rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
};
-const struct dml_funcs dml32_funcs = {
+static const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
- .recalculate = dml32_recalculate,
+ .recalculate = dml32_recalculate,
.rq_dlg_get_dlg_reg_v2 = dml32_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg_v2 = dml32_rq_dlg_get_rq_reg
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3d643d50c3eb..a9d49ef58fb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -91,6 +91,7 @@ struct display_mode_lib {
struct dal_logger *logger;
struct dml_funcs funcs;
struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
+ bool validate_max_state;
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 64d602e6412f..3c077164f362 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -246,6 +246,7 @@ struct _vcs_dpi_soc_bounding_box_st {
bool disable_dram_clock_change_vactive_support;
bool allow_dram_clock_one_display_vactive;
enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank;
+ double max_vratio_pre;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 8e6585dab20e..f9653f511baa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -202,6 +202,7 @@ dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes);
dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear);
dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE);
dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL);
+dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL)
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -411,6 +412,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
soc->urgent_latency_adjustment_fabric_clock_component_us;
mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference =
soc->urgent_latency_adjustment_fabric_clock_reference_mhz;
+ mode_lib->vba.MaxVRatioPre = soc->max_vratio_pre;
}
static void fetch_ip_params(struct display_mode_lib *mode_lib)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 81e53e67cd0b..07993741f5e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -143,6 +143,7 @@ dml_get_pipe_attr_decl(vready_at_or_after_vsync);
dml_get_pipe_attr_decl(min_dst_y_next_start);
dml_get_pipe_attr_decl(vstartup_calculated);
dml_get_pipe_attr_decl(subviewport_lines_needed_in_mall);
+dml_get_pipe_attr_decl(surface_size_in_mall);
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -262,6 +263,7 @@ struct vba_vars_st {
int maxMpcComb;
bool UseMaximumVStartup;
+ double MaxVRatioPre;
double WritebackDISPCLK;
double DPPCLKUsingSingleDPPLuma;
double DPPCLKUsingSingleDPPChroma;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d52cbc0e9b67..2bdc47615543 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -47,6 +47,59 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC)
+ return dc_dsc_stream_bandwidth_in_kbps(timing,
+ timing->dsc_cfg.bits_per_pixel,
+ timing->dsc_cfg.num_slices_h,
+ timing->dsc_cfg.is_dp);
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ ASSERT(bits_per_channel != 0);
+ bits_per_channel = 8;
+ break;
+ }
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+}
+
+
/* Forward Declerations */
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
@@ -79,8 +132,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -352,6 +404,11 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
+ struct dc_dsc_config_options options = {0};
+
+ options.dsc_min_slice_height_override = dsc_min_slice_height_override;
+ options.max_target_bpp_limit_override_x16 = max_bpp_x16;
+ options.slice_height_granularity = 1;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
@@ -360,7 +417,7 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- dsc_min_slice_height_override, max_bpp_x16, &config);
+ &options, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -740,8 +797,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -760,7 +816,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override_x16, &policy);
+ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -909,12 +965,13 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- if (min_slice_height_override == 0)
+ if (options->dsc_min_slice_height_override == 0)
slice_height = min(policy.min_slice_height, pic_height);
else
- slice_height = min(min_slice_height_override, pic_height);
+ slice_height = min((int)(options->dsc_min_slice_height_override), pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
+ slice_height % options->slice_height_granularity != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
slice_height++;
@@ -958,8 +1015,7 @@ done:
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -971,8 +1027,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_min_slice_height_override,
- max_target_bpp_limit_override * 16, dsc_cfg);
+ timing, options, dsc_cfg);
return is_dsc_possible;
}
@@ -1104,3 +1159,10 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
{
dsc_policy_disable_dsc_stream_overhead = disable;
}
+
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
+{
+ options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
+ options->max_target_bpp_limit_override_x16 = 0;
+ options->slice_height_granularity = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index ad80bde9bc0f..31574940ccc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -46,7 +46,10 @@ struct dsc_parameters {
uint32_t rc_buffer_model_size;
};
-int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params);
+struct rc_params;
+int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
+ const struct rc_params *rc,
+ struct dsc_parameters *dsc_params);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index e97cf09be9d5..64cee8c80110 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -39,6 +39,7 @@
*/
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
{
+#if defined(CONFIG_DRM_AMD_DC_FP)
enum colour_mode mode;
enum bits_per_comp bpc;
bool is_navite_422_or_420;
@@ -59,4 +60,5 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
slice_width, slice_height,
pps->dsc_version_minor);
DC_FP_END();
+#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index f0aea988fef0..36d6c1646a51 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -95,19 +95,19 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i];
}
-int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
+int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
+ const struct rc_params *rc,
+ struct dsc_parameters *dsc_params)
{
int ret;
- struct rc_params rc;
struct drm_dsc_config dsc_cfg;
unsigned long long tmp;
- calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
- dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
+ dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
copy_pps_fields(&dsc_cfg, &dsc_params->pps);
- copy_rc_to_cfg(&dsc_cfg, &rc);
+ copy_rc_to_cfg(&dsc_cfg, rc);
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index 9b63c6c0cc84..e0bd0c722e00 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
index 687d4f128480..36a5736c58c9 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
index 9fd8b269dd79..985f10b39750 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
@@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 308a543178a5..59884ef651b3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -113,6 +113,13 @@
(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
+#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \
+ {DDC_MASK_SH_LIST_COMMON(mask_sh),\
+ 0,\
+ 0,\
+ 0,\
+ 0}
+
struct ddc_registers {
struct gpio_registers gpio;
uint32_t ddc_setup;
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 4233955e3c47..25ffc052d53b 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -27,13 +27,12 @@
#include "dm_services.h"
#include "dm_helpers.h"
-#include "include/hdcp_types.h"
-#include "include/i2caux_interface.h"
+#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "link_hwss.h"
-#include "inc/link_dpcd.h"
+#include "link/protocols/link_dpcd.h"
#define DC_LOGGER \
link->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 525f8f0b8732..2eb597a24425 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -51,38 +51,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
#include "link_hwss.h"
-/************ link *****************/
-struct link_init_data {
- const struct dc *dc;
- struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
- uint32_t connector_index; /* this will be mapped to the HPD pins */
- uint32_t link_index; /* this is mapped to DAL display_index
- TODO: remove it when DC is complete. */
- bool is_dpia_link;
-};
-
-struct dc_link *link_create(const struct link_init_data *init_params);
-void link_destroy(struct dc_link **link);
-
-enum dc_status dc_link_validate_mode_timing(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- const struct dc_crtc_timing *timing);
-
-void core_link_resume(struct dc_link *link);
-
-void core_link_enable_stream(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx);
-
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx);
-
-void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
#include "transform.h"
#include "dpp.h"
@@ -450,10 +421,11 @@ struct pipe_ctx {
struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
int det_buffer_size_kb;
bool unbounded_req;
+ unsigned int surface_size_in_mall_bytes;
- union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
+ union pipe_update_flags update_flags;
};
/* Data used for dynamic link encoder assignment.
@@ -507,6 +479,9 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int mall_ss_size_bytes;
+ unsigned int mall_ss_psr_active_size_bytes;
+ unsigned int mall_subvp_size_bytes;
unsigned int legacy_svp_drr_stream_index;
bool legacy_svp_drr_stream_index_valid;
};
@@ -547,15 +522,6 @@ struct dc_state {
struct resource_context res_ctx;
/**
- * @bw_ctx: The output from bandwidth and watermark calculations and the DML
- *
- * Each context must have its own instance of VBA, and in order to
- * initialize and obtain IP and SOC, the base DML instance from DC is
- * initially copied into every context.
- */
- struct bw_context bw_ctx;
-
- /**
* @pp_display_cfg: PowerPlay clocks and settings
* Note: this is a big struct, do *not* put on stack!
*/
@@ -570,6 +536,15 @@ struct dc_state {
struct clk_mgr *clk_mgr;
/**
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
+ *
+ * Each context must have its own instance of VBA, and in order to
+ * initialize and obtain IP and SOC, the base DML instance from DC is
+ * initially copied into every context.
+ */
+ struct bw_context bw_ctx;
+
+ /**
* @refcount: refcount reference
*
* Notice that dc_state is used around the code to capture the current
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
deleted file mode 100644
index 95fb61d62778..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DAL_DDC_SERVICE_H__
-#define __DAL_DDC_SERVICE_H__
-
-#include "include/ddc_service_types.h"
-#include "include/i2caux_interface.h"
-
-#define EDID_SEGMENT_SIZE 256
-
-/* Address range from 0x00 to 0x1F.*/
-#define DP_ADAPTOR_TYPE2_SIZE 0x20
-#define DP_ADAPTOR_TYPE2_REG_ID 0x10
-#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
-/* Identifies adaptor as Dual-mode adaptor */
-#define DP_ADAPTOR_TYPE2_ID 0xA0
-/* MHz*/
-#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
-/* MHz*/
-#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
-/* kHZ*/
-#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
-/* kHZ*/
-#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
-
-#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
-
-struct ddc_service;
-struct graphics_object_id;
-enum ddc_result;
-struct av_sync_data;
-struct dp_receiver_id_info;
-
-struct i2c_payloads;
-struct aux_payloads;
-enum aux_return_code_type;
-
-void dal_ddc_i2c_payloads_add(
- struct i2c_payloads *payloads,
- uint32_t address,
- uint32_t len,
- uint8_t *data,
- bool write);
-
-struct ddc_service_init_data {
- struct graphics_object_id id;
- struct dc_context *ctx;
- struct dc_link *link;
- bool is_dpia_link;
-};
-
-struct ddc_service *dal_ddc_service_create(
- struct ddc_service_init_data *ddc_init_data);
-
-void dal_ddc_service_destroy(struct ddc_service **ddc);
-
-enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
-
-void dal_ddc_service_set_transaction_type(
- struct ddc_service *ddc,
- enum ddc_transaction_type type);
-
-bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
-
-void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap);
-
-bool dal_ddc_service_query_ddc_data(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *write_buf,
- uint32_t write_size,
- uint8_t *read_buf,
- uint32_t read_size);
-
-bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
- struct aux_payload *payload);
-
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
- struct aux_payload *payload,
- enum aux_return_code_type *operation_result);
-
-bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
- struct aux_payload *payload);
-
-bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
- uint32_t timeout);
-
-void dal_ddc_service_write_scdc_data(
- struct ddc_service *ddc_service,
- uint32_t pix_clk,
- bool lte_340_scramble);
-
-void dal_ddc_service_read_scdc_data(
- struct ddc_service *ddc_service);
-
-void ddc_service_set_dongle_type(struct ddc_service *ddc,
- enum display_dongle_type dongle_type);
-
-void dal_ddc_service_set_ddc_pin(
- struct ddc_service *ddc_service,
- struct ddc *ddc);
-
-struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
-
-uint32_t get_defer_delay(struct ddc_service *ddc);
-
-#endif /* __DAL_DDC_SERVICE_H__ */
-
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
deleted file mode 100644
index e8d8c5cb1309..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_LINK_DP_H__
-#define __DC_LINK_DP_H__
-
-#define LINK_TRAINING_ATTEMPTS 4
-#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
-#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
-#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
-#define MAX_MTP_SLOT_COUNT 64
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
-#define TRAINING_AUX_RD_INTERVAL 100 //us
-#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
-
-struct dc_link;
-struct dc_stream_state;
-struct dc_link_settings;
-
-enum {
- LINK_TRAINING_MAX_RETRY_COUNT = 5,
- /* to avoid infinite loop where-in the receiver
- * switches between different VS
- */
- LINK_TRAINING_MAX_CR_RETRY = 100,
- /*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
- LINK_TRAINING_MAX_VERIFY_RETRY = 2,
- PEAK_FACTOR_X1000 = 1006,
-};
-
-struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
-
-bool dp_verify_link_cap_with_retries(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int attempts);
-
-bool dp_validate_mode_timing(
- struct dc_link *link,
- const struct dc_crtc_timing *timing);
-
-bool decide_edp_link_settings(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw);
-
-bool decide_link_settings(
- struct dc_stream_state *stream,
- struct dc_link_settings *link_setting);
-
-bool perform_link_training_with_retries(
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern,
- int attempts,
- struct pipe_ctx *pipe_ctx,
- enum signal_type signal,
- bool do_fallback);
-
-bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data);
-
-bool is_mst_supported(struct dc_link *link);
-
-bool detect_dp_sink_caps(struct dc_link *link);
-
-void detect_edp_sink_caps(struct dc_link *link);
-
-bool is_dp_active_dongle(const struct dc_link *link);
-
-bool is_dp_branch_device(const struct dc_link *link);
-
-bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing);
-
-void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
-
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
-void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
-
-bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
-
-void dpcd_set_source_specific_data(struct dc_link *link);
-
-void dpcd_write_cable_id_to_dprx(struct dc_link *link);
-
-/* Write DPCD link configuration data. */
-enum dc_status dpcd_set_link_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings);
-/* Write DPCD drive settings. */
-enum dc_status dpcd_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- uint32_t offset);
-/* Read training status and adjustment requests from DPCD. */
-enum dc_status dp_get_lane_status_and_lane_adjust(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- union lane_status ln_status[LANE_COUNT_DP_MAX],
- union lane_align_status_updated *ln_align,
- union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- uint32_t offset);
-
-void dp_wait_for_training_aux_rd_interval(
- struct dc_link *link,
- uint32_t wait_in_micro_secs);
-
-bool dp_is_cr_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-
-enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-
-bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-bool dp_is_symbol_locked(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
-
-bool dp_is_max_vs_reached(
- const struct link_training_settings *lt_settings);
-void dp_hw_to_dpcd_lane_settings(
- const struct link_training_settings *lt_settings,
- const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[]);
-void dp_decide_lane_settings(
- const struct link_training_settings *lt_settings,
- const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[]);
-
-uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
-
-enum dpcd_training_patterns
- dc_dp_training_pattern_to_dpcd_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern);
-
-uint8_t dc_dp_initialize_scrambling_data_symbols(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern);
-
-enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
-void dp_set_fec_enable(struct dc_link *link, bool enable);
-bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
-void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
-bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
-
-/* Initialize output parameter lt_settings. */
-void dp_decide_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- struct link_training_settings *lt_settings);
-
-/* Convert PHY repeater count read from DPCD uint8_t. */
-uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count);
-
-/* Check DPCD training status registers to detect link loss. */
-enum link_training_result dp_check_link_loss_status(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting);
-
-enum dc_status dpcd_configure_lttpr_mode(
- struct dc_link *link,
- struct link_training_settings *lt_settings);
-
-enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
-enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_is_lttpr_present(struct dc_link *link);
-enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
-void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
-enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
-enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
-bool dpcd_write_128b_132b_sst_payload_allocation_table(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- struct link_mst_stream_allocation_table *proposed_table,
- bool allocate);
-
-enum dc_status dpcd_configure_channel_coding(
- struct dc_link *link,
- struct link_training_settings *lt_settings);
-
-bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
-
-struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link);
-void enable_dp_hpo_output(struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings);
-void disable_dp_hpo_output(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal);
-
-void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
-void dp_receiver_power_ctrl(struct dc_link *link, bool on);
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
-void dp_enable_link_phy(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal,
- enum clock_source_id clock_source,
- const struct dc_link_settings *link_settings);
-void edp_add_delay_for_T9(struct dc_link *link);
-bool edp_receiver_ready_T9(struct dc_link *link);
-bool edp_receiver_ready_T7(struct dc_link *link);
-
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal);
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal);
-
-bool dp_set_hw_training_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dc_dp_training_pattern pattern,
- uint32_t offset);
-
-void dp_set_hw_lane_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct link_training_settings *link_settings,
- uint32_t offset);
-
-void dp_set_hw_test_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dp_test_pattern test_pattern,
- uint8_t *custom_pattern,
- uint32_t custom_pattern_size);
-
-void dp_retrain_link_dp_test(struct dc_link *link,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
deleted file mode 100644
index 39c1d1d07357..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_LINK_DPIA_H__
-#define __DC_LINK_DPIA_H__
-
-/* This module implements functionality for training DPIA links. */
-
-struct dc_link;
-struct dc_link_settings;
-
-/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
-#define DPIA_CLK_SYNC_DELAY 16000
-
-/* Extend interval between training status checks for manual testing. */
-#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
-
-/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
-/* DPCD DP Tunneling over USB4 */
-#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
-#define DP_IN_ADAPTER_INFO 0xe000e
-#define DP_USB4_DRIVER_ID 0xe000f
-#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
-
-/* SET_CONFIG message types sent by driver. */
-enum dpia_set_config_type {
- DPIA_SET_CFG_SET_LINK = 0x01,
- DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
- DPIA_SET_CFG_SET_TRAINING = 0x18,
- DPIA_SET_CFG_SET_VSPE = 0x19
-};
-
-/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
-enum dpia_set_config_ts {
- DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
- DPIA_TS_TPS1 = 0x01,
- DPIA_TS_TPS2 = 0x02,
- DPIA_TS_TPS3 = 0x03,
- DPIA_TS_TPS4 = 0x07,
- DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
-};
-
-/* SET_CONFIG message data associated with messages sent by driver. */
-union dpia_set_config_data {
- struct {
- uint8_t mode : 1;
- uint8_t reserved : 7;
- } set_link;
- struct {
- uint8_t stage;
- } set_training;
- struct {
- uint8_t swing : 2;
- uint8_t max_swing_reached : 1;
- uint8_t pre_emph : 2;
- uint8_t max_pre_emph_reached : 1;
- uint8_t reserved : 2;
- } set_vspe;
- uint8_t raw;
-};
-
-/* Read tunneling device capability from DPCD and update link capability
- * accordingly.
- */
-enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
-
-/* Query hot plug status of USB4 DP tunnel.
- * Returns true if HPD high.
- */
-bool dc_link_dpia_query_hpd_status(struct dc_link *link);
-
-/* Train DP tunneling link for USB4 DPIA display endpoint.
- * DPIA equivalent of dc_link_dp_perfrorm_link_training.
- * Aborts link training upon detection of sink unplug.
- */
-enum link_training_result dc_link_dpia_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-
-#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 2ae630bf2aee..7254182b7c72 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -27,7 +27,6 @@
#define __DAL_AUX_ENGINE_H__
#include "dc_ddc_types.h"
-#include "include/i2caux_interface.h"
enum aux_return_code_type;
@@ -81,7 +80,12 @@ enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
-union aux_config;
+union aux_config {
+ struct {
+ uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
+ } bits;
+ uint32_t raw;
+};
struct aux_engine {
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 591ab1389e3b..bef843cc32a1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -293,6 +293,9 @@ struct clk_mgr_funcs {
/* Get SMU present */
bool (*is_smu_present)(struct clk_mgr *clk_mgr);
+
+ int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index ce006762f257..ad6acd1b34e1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -148,18 +148,21 @@ struct dccg_funcs {
struct dccg *dccg,
int inst);
-void (*set_pixel_rate_div)(
- struct dccg *dccg,
- uint32_t otg_inst,
- enum pixel_rate_div k1,
- enum pixel_rate_div k2);
-
-void (*set_valid_pixel_rate)(
- struct dccg *dccg,
- int ref_dtbclk_khz,
- int otg_inst,
- int pixclk_khz);
+ void (*set_pixel_rate_div)(struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div k1,
+ enum pixel_rate_div k2);
+ void (*set_valid_pixel_rate)(
+ struct dccg *dccg,
+ int ref_dtbclk_khz,
+ int otg_inst,
+ int pixclk_khz);
+
+ void (*dpp_root_clock_control)(
+ struct dccg *dccg,
+ unsigned int dpp_inst,
+ bool clock_on);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 5b0265c0df61..beb26dc8a07f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -187,6 +187,7 @@ struct hubbub_funcs {
void (*init_crb)(struct hubbub *hubbub);
void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow);
void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel);
+ void (*dchubbub_init)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 131fcfa28bca..f4aa76e02518 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -70,28 +70,38 @@ struct dpp_input_csc_matrix {
};
static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} },
- {COLOR_SPACE_2020_YCBCR,
- {0x2F30, 0x2000, 0, 0xE869, 0xEDB7, 0x2000, 0xFABC, 0xBC6, 0,
- 0x2000, 0x3C34, 0xE1E6} },
- {COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- {0x35E0, 0x255F, 0, 0xE2B3, 0xEB20, 0x255F, 0xF9FD, 0xB1E, 0,
- 0x255F, 0x44BD, 0xDB43} }
+ { COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_SRGB_LIMITED,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_YCBCR601,
+ { 0x2cdd, 0x2000, 0, 0xe991,
+ 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6 } },
+ { COLOR_SPACE_YCBCR601_LIMITED,
+ { 0x3353, 0x2568, 0, 0xe400,
+ 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a } },
+ { COLOR_SPACE_YCBCR709,
+ { 0x3265, 0x2000, 0, 0xe6ce,
+ 0xf105, 0x2000, 0xfa01, 0xa7d,
+ 0, 0x2000, 0x3b61, 0xe24f } },
+ { COLOR_SPACE_YCBCR709_LIMITED,
+ { 0x39a6, 0x2568, 0, 0xe0d6,
+ 0xeedd, 0x2568, 0xf925, 0x9a8,
+ 0, 0x2568, 0x43ee, 0xdbb2 } },
+ { COLOR_SPACE_2020_YCBCR,
+ { 0x2F30, 0x2000, 0, 0xE869,
+ 0xEDB7, 0x2000, 0xFABC, 0xBC6,
+ 0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ { 0x35E0, 0x255F, 0, 0xE2B3,
+ 0xEB20, 0x255F, 0xF9FD, 0xB1E,
+ 0, 0x255F, 0x44BD, 0xDB43 } }
};
struct dpp_grph_csc_adjustment {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index b982be64c792..86b711dcc785 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -53,9 +53,7 @@ enum dwb_source {
/* DCN1.x, DCN2.x support 2 pipes */
enum dwb_pipe {
dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dwb_pipe1,
-#endif
dwb_pipe_max_num,
};
@@ -72,14 +70,11 @@ enum wbscl_coef_filter_type_sel {
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_boundary_mode {
DWBSCL_BOUNDARY_MODE_EDGE = 0,
DWBSCL_BOUNDARY_MODE_BLACK = 1
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_output_csc_mode {
DWB_OUTPUT_CSC_DISABLE = 0,
DWB_OUTPUT_CSC_COEF_A = 1,
@@ -132,7 +127,6 @@ struct dwb_efc_display_settings {
unsigned int dwbOutputBlack; // 0 - Normal, 1 - Output Black
};
-#endif
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -208,7 +202,7 @@ struct dwbc_funcs {
struct dwb_warmup_params *warmup_params);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index d5ea7545583e..7f3f9b69e903 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -146,7 +146,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
@@ -203,6 +203,7 @@ struct hubp_funcs {
void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
void (*hubp_update_force_pstate_disallow)(struct hubp *hubp, bool allow);
+ void (*hubp_update_force_cursor_pstate_disallow)(struct hubp *hubp, bool allow);
void (*hubp_update_mall_sel)(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
void (*hubp_prepare_subvp_buffering)(struct hubp *hubp, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index a819f0f97c5f..b95ae9596c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -275,20 +275,6 @@ enum dc_lut_mode {
LUT_RAM_B
};
-enum symclk_state {
- SYMCLK_OFF_TX_OFF,
- SYMCLK_ON_TX_ON,
- SYMCLK_ON_TX_OFF,
-};
-
-struct phy_state {
- struct {
- uint8_t otg : 1;
- uint8_t reserved : 7;
- } symclk_ref_cnts;
- enum symclk_state symclk_state;
-};
-
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index ec572a9e4054..dbe7afa9d3a2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -75,58 +75,6 @@ struct encoder_feature_support {
bool fec_supported;
};
-union dpcd_psr_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
- unsigned char CRC_VERIFICATION : 1;
- unsigned char FRAME_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char LINE_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
- unsigned char ENABLE_PSR2 : 1;
- /* For eDP 1.5, PSR v2 w/ early transport */
- unsigned char EARLY_TRANSPORT_ENABLE : 1;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_alpm_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_sink_active_vtotal_control_mode {
- struct {
- unsigned char ENABLE : 1;
- unsigned char RESERVED : 7;
- } bits;
- unsigned char raw;
-};
-
-union psr_error_status {
- struct {
- unsigned char LINK_CRC_ERROR :1;
- unsigned char RFB_STORAGE_ERROR :1;
- unsigned char VSC_SDP_ERROR :1;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
-union psr_sink_psr_status {
- struct {
- unsigned char SINK_SELF_REFRESH_STATUS :3;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
struct link_encoder {
const struct link_encoder_funcs *funcs;
int32_t aux_channel_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 42db4b7b79fd..c4fbbf08ef86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -30,7 +30,6 @@
#include "audio_types.h"
#include "hw_shared.h"
-#include "dc_link.h"
struct dc_bios;
struct dc_context;
@@ -72,6 +71,12 @@ enum dynamic_metadata_mode {
dmdata_dolby_vision
};
+struct enc_sdp_line_num {
+ /* Adaptive Sync SDP */
+ bool adaptive_sync_line_num_valid;
+ uint32_t adaptive_sync_line_num;
+};
+
struct encoder_info_frame {
/* auxiliary video information */
struct dc_info_packet avi;
@@ -85,6 +90,9 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* Adaptive Sync SDP*/
+ struct dc_info_packet adaptive_sync;
+ struct enc_sdp_line_num sdp_line_num;
};
struct encoder_unblank_param {
@@ -154,6 +162,10 @@ struct stream_encoder_funcs {
void (*stop_hdmi_info_packets)(
struct stream_encoder *enc);
+ void (*update_dp_info_packets_sdp_line_num)(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void (*update_dp_info_packets)(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
@@ -302,6 +314,10 @@ struct hpo_dp_stream_encoder_funcs {
bool compressed_format,
bool double_buffer_en);
+ void (*update_dp_info_packets_sdp_line_num)(
+ struct hpo_dp_stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void (*update_dp_info_packets)(
struct hpo_dp_stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 0e42e721dd15..c21e7ffd5bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -182,7 +182,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
uint32_t vtotal_change_limit);
void (*init_odm)(struct timing_generator *tg);
+ void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index c43523f9ff6d..88ac723d10aa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -266,6 +266,7 @@ struct hw_sequencer_funcs {
void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index a4d61bb724b6..4513544559be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -115,6 +115,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
+ void (*dpp_root_clock_control)(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
void (*dpp_pg_control)(struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on);
@@ -148,9 +152,9 @@ struct hwseq_private_funcs {
void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
- void (*subvp_update_force_pstate)(struct dc *dc, struct dc_state *context);
+ void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
unsigned int *k1_div,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
new file mode 100644
index 000000000000..f839494d59d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_H__
+#define __DC_LINK_H__
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This header defines link component function interfaces aka link_service.
+ * link_service provides the only entry point to link functions with function
+ * pointer style. This header is strictly private in dc and should never be
+ * included by DM because it exposes too much dc detail including all dc
+ * private types defined in core_types.h. Otherwise it will break DM - DC
+ * encapsulation and turn DM into a maintenance nightmare.
+ *
+ * The following shows a link component relation map.
+ *
+ * DM to DC:
+ * DM includes dc.h
+ * dc_link_exports.c or other dc files implement dc.h
+ *
+ * DC to Link:
+ * dc_link_exports.c or other dc files include link.h
+ * link_factory.c implements link.h
+ *
+ * Link sub-component to Link sub-component:
+ * link_factory.c includes --> link_xxx.h
+ * link_xxx.c implements link_xxx.h
+
+ * As you can see if you ever need to add a new dc link function and call it on
+ * DM/dc side, it is very difficult because you will need layers of translation.
+ * The most appropriate approach to implement new requirements on DM/dc side is
+ * to extend or generalize the functionality of existing link function
+ * interfaces so minimal modification is needed outside link component to
+ * achieve your new requirements. This approach reduces or even eliminates the
+ * effort needed outside link component to support a new link feature. This also
+ * reduces code discrepancy among DMs to support the same link feature. If we
+ * test full code path on one version of DM, and there is no feature specific
+ * modification required on other DMs, then we can have higher confidence that
+ * the feature will run on other DMs and produce the same result. The following
+ * are some good examples to start with:
+ *
+ * - detect_link --> to add new link detection or capability retrieval routines
+ *
+ * - validate_mode_timing --> to add new timing validation conditions
+ *
+ * - set_dpms_on/set_dpms_off --> to include new link enablement sequences
+ *
+ * If you must add new link functions, you will need to:
+ * 1. declare the function pointer here under the suitable commented category.
+ * 2. Implement your function in the suitable link_xxx.c file.
+ * 3. Assign the function to link_service in link_factory.c
+ * 4. NEVER include link_xxx.h headers outside link component.
+ * 5. NEVER include link.h on DM side.
+ */
+#include "core_types.h"
+
+struct link_service *link_create_link_service(void);
+void link_destroy_link_service(struct link_service **link_srv);
+
+struct link_init_data {
+ const struct dc *dc;
+ struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
+ uint32_t connector_index; /* this will be mapped to the HPD pins */
+ uint32_t link_index; /* this is mapped to DAL display_index
+ TODO: remove it when DC is complete. */
+ bool is_dpia_link;
+};
+
+struct ddc_service_init_data {
+ struct graphics_object_id id;
+ struct dc_context *ctx;
+ struct dc_link *link;
+ bool is_dpia_link;
+};
+
+struct link_service {
+ /************************** Factory ***********************************/
+ struct dc_link *(*create_link)(
+ const struct link_init_data *init_params);
+ void (*destroy_link)(struct dc_link **link);
+
+
+ /************************** Detection *********************************/
+ bool (*detect_link)(struct dc_link *link, enum dc_detect_reason reason);
+ bool (*detect_connection_type)(struct dc_link *link,
+ enum dc_connection_type *type);
+ struct dc_sink *(*add_remote_sink)(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+ void (*remove_remote_sink)(struct dc_link *link, struct dc_sink *sink);
+ bool (*get_hpd_state)(struct dc_link *link);
+ struct gpio *(*get_hpd_gpio)(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*enable_hpd_filter)(struct dc_link *link, bool enable);
+ bool (*reset_cur_dp_mst_topology)(struct dc_link *link);
+ const struct dc_link_status *(*get_status)(const struct dc_link *link);
+ bool (*is_hdcp1x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ bool (*is_hdcp2x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ void (*clear_dprx_states)(struct dc_link *link);
+
+
+ /*************************** Resource *********************************/
+ void (*get_cur_res_map)(const struct dc *dc, uint32_t *map);
+ void (*restore_res_map)(const struct dc *dc, uint32_t *map);
+ void (*get_cur_link_res)(const struct dc_link *link,
+ struct link_resource *link_res);
+
+
+ /*************************** Validation *******************************/
+ enum dc_status (*validate_mode_timing)(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+ uint32_t (*dp_link_bandwidth_kbps)(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
+ bool (*validate_dpia_bandwidth)(
+ const struct dc_stream_state *stream,
+ const unsigned int num_streams);
+
+
+ /*************************** DPMS *************************************/
+ void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*set_dpms_off)(struct pipe_ctx *pipe_ctx);
+ void (*resume)(struct dc_link *link);
+ void (*blank_all_dp_displays)(struct dc *dc);
+ void (*blank_all_edp_displays)(struct dc *dc);
+ void (*blank_dp_stream)(struct dc_link *link, bool hw_init);
+ enum dc_status (*increase_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ enum dc_status (*reduce_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ void (*set_dsc_on_stream)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*set_dsc_enable)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*update_dsc_config)(struct pipe_ctx *pipe_ctx);
+
+
+ /*************************** DDC **************************************/
+ struct ddc_service *(*create_ddc_service)(
+ struct ddc_service_init_data *ddc_init_data);
+ void (*destroy_ddc_service)(struct ddc_service **ddc);
+ bool (*query_ddc_data)(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+ int (*aux_transfer_raw)(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+ bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc,
+ struct aux_payload *payload);
+ bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc);
+ uint32_t (*get_aux_defer_delay)(struct ddc_service *ddc);
+
+
+ /*************************** DP Capability ****************************/
+ bool (*dp_is_sink_present)(struct dc_link *link);
+ bool (*dp_is_fec_supported)(const struct dc_link *link);
+ bool (*dp_is_128b_132b_signal)(struct pipe_ctx *pipe_ctx);
+ bool (*dp_get_max_link_enc_cap)(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+ const struct dc_link_settings *(*dp_get_verified_link_cap)(
+ const struct dc_link *link);
+ enum dp_link_encoding (*dp_get_encoding_format)(
+ const struct dc_link_settings *link_settings);
+ bool (*dp_should_enable_fec)(const struct dc_link *link);
+ bool (*dp_decide_link_settings)(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+ enum dp_link_encoding (*mst_decide_link_encoding_format)(
+ const struct dc_link *link);
+ bool (*edp_decide_link_settings)(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
+ uint32_t (*bw_kbps_from_raw_frl_link_rate_data)(uint8_t bw);
+ bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
+ enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+
+ /*************************** DP DPIA/PHY ******************************/
+ int (*dpia_handle_usb4_bandwidth_allocation_for_link)(
+ struct dc_link *link, int peak_bw);
+ void (*dpia_handle_bw_alloc_response)(
+ struct dc_link *link, uint8_t bw, uint8_t result);
+ void (*dp_set_drive_settings)(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+ void (*dpcd_write_rx_power_ctrl)(struct dc_link *link, bool on);
+
+
+ /*************************** DP IRQ Handler ***************************/
+ bool (*dp_parse_link_loss_status)(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+ bool (*dp_should_allow_hpd_rx_irq)(const struct dc_link *link);
+ void (*dp_handle_link_loss)(struct dc_link *link);
+ enum dc_status (*dp_read_hpd_rx_irq_data)(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+ bool (*dp_handle_hpd_rx_irq)(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data,
+ bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+
+
+ /*************************** eDP Panel Control ************************/
+ void (*edp_panel_backlight_power_on)(
+ struct dc_link *link, bool wait_for_hpd);
+ int (*edp_get_backlight_level)(const struct dc_link *link);
+ bool (*edp_get_backlight_level_nits)(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+ bool (*edp_set_backlight_level)(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+ bool (*edp_set_backlight_level_nits)(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+ int (*edp_get_target_backlight_pwm)(const struct dc_link *link);
+ bool (*edp_get_psr_state)(
+ const struct dc_link *link, enum dc_psr_state *state);
+ bool (*edp_set_psr_allow_active)(
+ struct dc_link *link,
+ const bool *allow_active,
+ bool wait,
+ bool force_static,
+ const unsigned int *power_opts);
+ bool (*edp_setup_psr)(struct dc_link *link,
+ const struct dc_stream_state *stream,
+ struct psr_config *psr_config,
+ struct psr_context *psr_context);
+ bool (*edp_set_sink_vtotal_in_psr_active)(
+ const struct dc_link *link,
+ uint16_t psr_vtotal_idle,
+ uint16_t psr_vtotal_su);
+ void (*edp_get_psr_residency)(
+ const struct dc_link *link, uint32_t *residency);
+ bool (*edp_wait_for_t12)(struct dc_link *link);
+ bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+ bool (*edp_backlight_enable_aux)(struct dc_link *link, bool enable);
+ void (*edp_add_delay_for_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T7)(struct dc_link *link);
+ bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
+
+
+ /*************************** DP CTS ************************************/
+ void (*dp_handle_automated_test)(struct dc_link *link);
+ bool (*dp_set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+ void (*dp_set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*dp_set_preferred_training_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+
+ /*************************** DP Trace *********************************/
+ bool (*dp_trace_is_initialized)(struct dc_link *link);
+ void (*dp_trace_set_is_logged_flag)(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+ bool (*dp_trace_is_logged)(struct dc_link *link, bool in_detection);
+ unsigned long long (*dp_trace_get_lt_end_timestamp)(
+ struct dc_link *link, bool in_detection);
+ const struct dp_trace_lt_counts *(*dp_trace_get_lt_counts)(
+ struct dc_link *link, bool in_detection);
+ unsigned int (*dp_trace_get_link_loss_count)(struct dc_link *link);
+ void (*dp_trace_set_edp_power_timestamp)(struct dc_link *link,
+ bool power_up);
+ uint64_t (*dp_trace_get_edp_poweron_timestamp)(struct dc_link *link);
+ uint64_t (*dp_trace_get_edp_poweroff_timestamp)(struct dc_link *link);
+ void (*dp_trace_source_sequence)(
+ struct dc_link *link, uint8_t dp_test_mode);
+};
+#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5040836f404d..eaeb684c8a48 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -165,10 +165,6 @@ bool resource_validate_attach_surfaces(
struct dc_state *context,
const struct resource_pool *pool);
-void resource_validate_ctx_update_pointer_after_copy(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx);
-
enum dc_status resource_map_clock_resources(
const struct dc *dc,
struct dc_state *context,
@@ -205,7 +201,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
const struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -236,4 +232,13 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
struct pipe_ctx *pri_pipe,
struct pipe_ctx *sec_pipe,
bool odm);
+
+/* A test harness interface that modifies dp encoder resources in the given dc
+ * state and bypasses the need to revalidate. The interface assumes that the
+ * test harness interface is called with pre-validated link config stored in the
+ * pipe_ctx and updates dp encoder resources according to the link config.
+ */
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 5f4f6dd79511..3c7cb3dc046b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -37,7 +37,7 @@
#include "soc15_hw_ip.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn201(
+static enum dc_irq_source to_dal_irq_source_dcn201(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
@@ -136,11 +136,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.ack = NULL
};
-static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
- .set = NULL,
- .ack = NULL
-};
-
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index 054c2a727eb2..a52b56e2859e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -23,8 +23,41 @@
# It abstracts the control and status of back end pipe such as DIO, HPO, DPIA,
# PHY, HPD, DDC and etc).
-LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o
+LINK = link_detection.o link_dpms.o link_factory.o link_resource.o \
+link_validation.o
-AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK))
+AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/, \
+$(LINK))
AMD_DISPLAY_FILES += $(AMD_DAL_LINK)
+###############################################################################
+# accessories
+###############################################################################
+LINK_ACCESSORIES = link_dp_trace.o link_dp_cts.o link_fpga.o
+
+AMD_DAL_LINK_ACCESSORIES = $(addprefix $(AMDDALPATH)/dc/link/accessories/, \
+$(LINK_ACCESSORIES))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES)
+###############################################################################
+# hwss
+###############################################################################
+LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o
+
+AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \
+$(LINK_HWSS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_HWSS)
+###############################################################################
+# protocols
+###############################################################################
+LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \
+link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \
+link_dp_training_dpia.o link_dp_training_auxless.o \
+link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \
+link_edp_panel_control.o link_dp_irq_handler.o link_dp_dpia_bw.o
+
+AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \
+$(LINK_PROTOCOLS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_PROTOCOLS) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
new file mode 100644
index 000000000000..db9f1baa27e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -0,0 +1,1011 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_dp_cts.h"
+#include "link/link_resource.h"
+#include "link/protocols/link_dpcd.h"
+#include "link/protocols/link_dp_training.h"
+#include "link/protocols/link_dp_phy.h"
+#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/link_dpms.h"
+#include "resource.h"
+#include "dm_helpers.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
+{
+ switch (test_rate) {
+ case DP_TEST_LINK_RATE_RBR:
+ return LINK_RATE_LOW;
+ case DP_TEST_LINK_RATE_HBR:
+ return LINK_RATE_HIGH;
+ case DP_TEST_LINK_RATE_HBR2:
+ return LINK_RATE_HIGH2;
+ case DP_TEST_LINK_RATE_HBR3:
+ return LINK_RATE_HIGH3;
+ case DP_TEST_LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR10;
+ case DP_TEST_LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR20;
+ case DP_TEST_LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR13_5;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
+{
+ return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+ if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+ return true;
+ else
+ return false;
+}
+
+static void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+
+ udelay(100);
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++) {
+ link_set_dpms_off(pipes[i]);
+ pipes[i]->link_config.dp_link_settings = *link_setting;
+ update_dp_encoder_resources_for_test_harness(
+ link->dc,
+ state,
+ pipes[i]);
+ }
+
+ for (i = count-1; i >= 0; i--)
+ link_set_dpms_on(state, pipes[i]);
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+ uint8_t test_rate = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LANE_COUNT,
+ (unsigned char *)(&link_settings.lane_count),
+ 1);
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LINK_RATE,
+ &test_rate,
+ 1);
+ link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
+
+ /* Set preferred link settings */
+ link->verified_link_cap.lane_count = link_settings.lane_count;
+ link->verified_link_cap.link_rate = link_settings.link_rate;
+
+ dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+{
+ union audio_test_mode dpcd_test_mode = {0};
+ struct audio_test_pattern_type dpcd_pattern_type = {0};
+ union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int channel_count;
+ unsigned int channel = 0;
+ unsigned int modes = 0;
+ unsigned int sampling_rate_in_hz = 0;
+
+ // get audio test mode and test pattern parameters
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_MODE,
+ &dpcd_test_mode.raw,
+ sizeof(dpcd_test_mode));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PATTERN_TYPE,
+ &dpcd_pattern_type.value,
+ sizeof(dpcd_pattern_type));
+
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
+
+ // read pattern periods for requested channels when sawTooth pattern is requested
+ if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+ dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+ test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+ // read period for each channel
+ for (channel = 0; channel < channel_count; channel++) {
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PERIOD_CH1 + channel,
+ &dpcd_pattern_period[channel].raw,
+ sizeof(dpcd_pattern_period[channel]));
+ }
+ }
+
+ // translate sampling rate
+ switch (dpcd_test_mode.bits.sampling_rate) {
+ case AUDIO_SAMPLING_RATE_32KHZ:
+ sampling_rate_in_hz = 32000;
+ break;
+ case AUDIO_SAMPLING_RATE_44_1KHZ:
+ sampling_rate_in_hz = 44100;
+ break;
+ case AUDIO_SAMPLING_RATE_48KHZ:
+ sampling_rate_in_hz = 48000;
+ break;
+ case AUDIO_SAMPLING_RATE_88_2KHZ:
+ sampling_rate_in_hz = 88200;
+ break;
+ case AUDIO_SAMPLING_RATE_96KHZ:
+ sampling_rate_in_hz = 96000;
+ break;
+ case AUDIO_SAMPLING_RATE_176_4KHZ:
+ sampling_rate_in_hz = 176400;
+ break;
+ case AUDIO_SAMPLING_RATE_192KHZ:
+ sampling_rate_in_hz = 192000;
+ break;
+ default:
+ sampling_rate_in_hz = 0;
+ break;
+ }
+
+ link->audio_test_data.flags.test_requested = 1;
+ link->audio_test_data.flags.disable_video = disable_video;
+ link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+ link->audio_test_data.channel_count = channel_count;
+ link->audio_test_data.pattern_type = test_pattern;
+
+ if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+ for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
+ link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
+ }
+ }
+}
+
+/* TODO Raven hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+ union phy_test_pattern dpcd_test_pattern;
+ union lane_adjust dpcd_lane_adjustment[2];
+ unsigned char dpcd_post_cursor_2_adjustment = 0;
+ unsigned char test_pattern_buffer[
+ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ unsigned int test_pattern_size = 0;
+ enum dp_test_pattern test_pattern;
+ union lane_adjust dpcd_lane_adjust;
+ unsigned int lane;
+ struct link_training_settings link_training_settings;
+ unsigned char no_preshoot = 0;
+ unsigned char no_deemphasis = 0;
+
+ dpcd_test_pattern.raw = 0;
+ memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+ memset(&link_training_settings, 0, sizeof(link_training_settings));
+
+ /* get phy test pattern and pattern parameters from DP receiver */
+ core_link_read_dpcd(
+ link,
+ DP_PHY_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_LANE0_1,
+ &dpcd_lane_adjustment[0].raw,
+ sizeof(dpcd_lane_adjustment));
+
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
+
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ dp_fixed_vs_pe_read_lane_adjust(
+ link,
+ link_training_settings.dpcd_lane_settings);
+
+ /*get post cursor 2 parameters
+ * For DP 1.1a or eariler, this DPCD register's value is 0
+ * For DP 1.2 or later:
+ * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+ * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+ */
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_POST_CURSOR2,
+ &dpcd_post_cursor_2_adjustment,
+ sizeof(dpcd_post_cursor_2_adjustment));
+
+ /* translate request */
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case PHY_TEST_PATTERN_D10_2:
+ test_pattern = DP_TEST_PATTERN_D102;
+ break;
+ case PHY_TEST_PATTERN_SYMBOL_ERROR:
+ test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case PHY_TEST_PATTERN_PRBS7:
+ test_pattern = DP_TEST_PATTERN_PRBS7;
+ break;
+ case PHY_TEST_PATTERN_80BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case PHY_TEST_PATTERN_PRBS9:
+ test_pattern = DP_TEST_PATTERN_PRBS9;
+ break;
+ case PHY_TEST_PATTERN_PRBS11:
+ test_pattern = DP_TEST_PATTERN_PRBS11;
+ break;
+ case PHY_TEST_PATTERN_PRBS15:
+ test_pattern = DP_TEST_PATTERN_PRBS15;
+ break;
+ case PHY_TEST_PATTERN_PRBS23:
+ test_pattern = DP_TEST_PATTERN_PRBS23;
+ break;
+ case PHY_TEST_PATTERN_PRBS31:
+ test_pattern = DP_TEST_PATTERN_PRBS31;
+ break;
+ case PHY_TEST_PATTERN_264BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_SQUARE:
+ test_pattern = DP_TEST_PATTERN_SQUARE;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ no_preshoot = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ no_deemphasis = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+ no_preshoot = 1;
+ no_deemphasis = 1;
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (is_dp_phy_sqaure_pattern(test_pattern)) {
+ test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
+ core_link_read_dpcd(
+ link,
+ DP_PHY_SQUARE_PATTERN,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ for (lane = 0; lane <
+ (unsigned int)(link->cur_link_settings.lane_count);
+ lane++) {
+ dpcd_lane_adjust.raw =
+ dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+ if (link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_8b_10b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ } else if (link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis;
+ }
+ }
+
+ dp_hw_to_dpcd_lane_settings(&link_training_settings,
+ link_training_settings.hw_lane_settings,
+ link_training_settings.dpcd_lane_settings);
+ /*Usage: Measure DP physical lane signal
+ * by DP SI test equipment automatically.
+ * PHY test pattern request is generated by equipment via HPD interrupt.
+ * HPD needs to be active all the time. HPD should be active
+ * all the time. Do not touch it.
+ * forward request to DS
+ */
+ dp_set_test_pattern(
+ link,
+ test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
+ &link_training_settings,
+ test_pattern_buffer,
+ test_pattern_size);
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+ struct pipe_ctx *pipe_ctx,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space)
+{
+ enum controller_dp_test_pattern controller_test_pattern;
+ enum dc_color_depth color_depth = pipe_ctx->
+ stream->timing.display_color_depth;
+ struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+ int width = pipe_ctx->stream->timing.h_addressable +
+ pipe_ctx->stream->timing.h_border_left +
+ pipe_ctx->stream->timing.h_border_right;
+ int height = pipe_ctx->stream->timing.v_addressable +
+ pipe_ctx->stream->timing.v_border_bottom +
+ pipe_ctx->stream->timing.v_border_top;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ break;
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+ break;
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+ break;
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+ break;
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+ break;
+ default:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ break;
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ {
+ /* disable bit depth reduction */
+ pipe_ctx->stream->bit_depth_params = params;
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct pipe_ctx *odm_pipe;
+ enum controller_dp_color_space controller_color_space;
+ int opp_cnt = 1;
+ int offset = 0;
+ int dpg_width = width;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
+ default:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
+ ASSERT(0);
+ break;
+ }
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
+
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ pipe_ctx,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
+ }
+ }
+ }
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ {
+ /* restore bitdepth reduction */
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
+ pipe_ctx->stream->bit_depth_params = params;
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int dpg_width;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ dpg_width = width / opp_cnt;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+ }
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ pipe_ctx,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dp_handle_automated_test(struct dc_link *link)
+{
+ union test_request test_request;
+ union test_response test_response;
+
+ memset(&test_request, 0, sizeof(test_request));
+ memset(&test_response, 0, sizeof(test_response));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+ if (test_request.bits.LINK_TRAINING) {
+ /* ACK first to let DP RX test box monitor LT sequence */
+ test_response.bits.ACK = 1;
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ dp_test_send_link_training(link);
+ /* no acknowledge request is needed again */
+ test_response.bits.ACK = 0;
+ }
+ if (test_request.bits.LINK_TEST_PATTRN) {
+ union test_misc dpcd_test_params;
+ union link_test_pattern dpcd_test_pattern;
+
+ memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+ memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+ /* get link test pattern and pattern parameters */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_TEST_MISC0,
+ &dpcd_test_params.raw,
+ sizeof(dpcd_test_params));
+ test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link,
+ dpcd_test_pattern, dpcd_test_params) ? 1 : 0;
+ }
+
+ if (test_request.bits.AUDIO_TEST_PATTERN) {
+ dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+ test_response.bits.ACK = 1;
+ }
+
+ if (test_request.bits.PHY_TEST_PATTERN) {
+ dp_test_send_phy_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+
+ /* send request acknowledgment */
+ if (test_response.bits.ACK)
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+}
+
+bool dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ unsigned int lane;
+ unsigned int i;
+ unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+ union dpcd_training_pattern training_pattern;
+ enum dpcd_phy_test_patterns pattern;
+
+ memset(&training_pattern, 0, sizeof(training_pattern));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ if (pipe_ctx == NULL)
+ return false;
+
+ /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
+ if (link->test_pattern_enabled && test_pattern ==
+ DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set CRTC Test Pattern */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ /* Unblank Stream */
+ link->dc->hwss.unblank_stream(
+ pipe_ctx,
+ &link->verified_link_cap);
+ /* TODO:m_pHwss->MuteAudioEndpoint
+ * (pPathMode->pDisplayPath, false);
+ */
+
+ /* Reset Test Pattern state */
+ link->test_pattern_enabled = false;
+
+ return true;
+ }
+
+ /* Check for PHY Test Patterns */
+ if (is_dp_phy_pattern(test_pattern)) {
+ /* Set DPCD Lane Settings before running test pattern */
+ if (p_link_settings != NULL) {
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ dp_fixed_vs_pe_set_retimer_lane_settings(
+ link,
+ p_link_settings->dpcd_lane_settings,
+ p_link_settings->link_settings.lane_count);
+ } else {
+ dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
+ }
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ }
+
+ /* Blank stream if running test pattern */
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /*TODO:
+ * m_pHwss->
+ * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+ */
+ /* Blank stream */
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ if (p_link_settings != NULL)
+ dpcd_set_link_settings(link,
+ p_link_settings);
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ pattern = PHY_TEST_PATTERN_NONE;
+ break;
+ case DP_TEST_PATTERN_D102:
+ pattern = PHY_TEST_PATTERN_D10_2;
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ pattern = PHY_TEST_PATTERN_PRBS7;
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ pattern = PHY_TEST_PATTERN_CP2520_1;
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ pattern = PHY_TEST_PATTERN_CP2520_2;
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ pattern = PHY_TEST_PATTERN_CP2520_3;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ pattern = PHY_TEST_PATTERN_PRBS9;
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ pattern = PHY_TEST_PATTERN_PRBS11;
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ pattern = PHY_TEST_PATTERN_PRBS15;
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ pattern = PHY_TEST_PATTERN_PRBS23;
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ pattern = PHY_TEST_PATTERN_PRBS31;
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_SQUARE:
+ pattern = PHY_TEST_PATTERN_SQUARE;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+ break;
+ default:
+ return false;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+ /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+ return false;
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ if (is_dp_phy_sqaure_pattern(test_pattern))
+ core_link_write_dpcd(link,
+ DP_LINK_SQUARE_PATTERN,
+ p_custom_pattern,
+ 1);
+
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.2 or later - DP receiver's link quality
+ * pattern is set using DPCD LINK_QUAL_LANEx_SET
+ * register (0x10B~0x10E)\
+ */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+ link_qual_pattern[lane] =
+ (unsigned char)(pattern);
+
+ core_link_write_dpcd(link,
+ DP_LINK_QUAL_LANE0_SET,
+ link_qual_pattern,
+ sizeof(link_qual_pattern));
+ } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+ link->dpcd_caps.dpcd_rev.raw == 0) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.1a or earlier - DP receiver's link
+ * quality pattern is set using
+ * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+ * register (0x102). We will use v_1.3 when we are
+ * setting test pattern for DP 1.1.
+ */
+ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ }
+ } else {
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
+ break;
+
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ color_space = COLOR_SPACE_YCBCR601;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ color_space = COLOR_SPACE_YCBCR709;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ /* update MSA to requested color space */
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream->timing,
+ color_space,
+ pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
+ }
+
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ }
+
+ return true;
+}
+
+void dp_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ * Check for non-DP signal or if passive dongle present
+ */
+ if (!dc_is_dp_signal(link->connector_signal) ||
+ link->dongle_max_pix_clk > 0)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link) {
+ link_stream = pipe->stream;
+ break;
+ }
+ }
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return;
+
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
+ if (link_decide_link_settings(link_stream, &store_settings))
+ dp_retrain_link_dp_test(link, &store_settings, false);
+}
+
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ if (lt_overrides != NULL)
+ link->preferred_training_settings = *lt_overrides;
+ else
+ memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings));
+
+ if (link_setting != NULL) {
+ link->preferred_link_setting = *link_setting;
+ } else {
+ link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
+ link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->type == dc_connection_mst_branch)
+ dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
+
+ /* Retrain now, or wait until next stream update to apply */
+ if (skip_immediate_retrain == false)
+ dp_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
new file mode 100644
index 000000000000..eae23ea7f6ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_DP_CTS_H__
+#define __LINK_DP_CTS_H__
+#include "link.h"
+void dp_handle_automated_test(struct dc_link *link);
+bool dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+void dp_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+#endif /* __LINK_DP_CTS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index 2c1a3bfcdb50..fbcd8fb58ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -22,8 +22,9 @@
* Authors: AMD
*
*/
-#include "dc_link.h"
#include "link_dp_trace.h"
+#include "link/protocols/link_dpcd.h"
+#include "link.h"
void dp_trace_init(struct dc_link *link)
{
@@ -36,7 +37,7 @@ void dp_trace_reset(struct dc_link *link)
memset(&link->dp_trace, 0, sizeof(link->dp_trace));
}
-bool dc_dp_trace_is_initialized(struct dc_link *link)
+bool dp_trace_is_initialized(struct dc_link *link)
{
return link->dp_trace.is_initialized;
}
@@ -75,7 +76,7 @@ void dp_trace_lt_total_count_increment(struct dc_link *link,
link->dp_trace.commit_lt_trace.counts.total++;
}
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged)
{
@@ -85,8 +86,7 @@ void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
link->dp_trace.commit_lt_trace.is_logged = is_logged;
}
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection)
+bool dp_trace_is_logged(struct dc_link *link, bool in_detection)
{
if (in_detection)
return link->dp_trace.detect_lt_trace.is_logged;
@@ -122,7 +122,7 @@ void dp_trace_set_lt_end_timestamp(struct dc_link *link,
link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx);
}
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -131,7 +131,7 @@ unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
return link->dp_trace.commit_lt_trace.timestamps.end;
}
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -140,7 +140,7 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
return &link->dp_trace.commit_lt_trace.counts;
}
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link)
{
return link->dp_trace.link_loss_count;
}
@@ -163,4 +163,11 @@ uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweroff;
-} \ No newline at end of file
+}
+
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode)
+{
+ if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
+ core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+ &dp_test_mode, sizeof(dp_test_mode));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index 26700e3cd65e..ab437a0c9101 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,10 +24,11 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
+#include "link.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
+bool dp_trace_is_initialized(struct dc_link *link);
void dp_trace_detect_lt_init(struct dc_link *link);
void dp_trace_commit_lt_init(struct dc_link *link);
void dp_trace_link_loss_increment(struct dc_link *link);
@@ -36,10 +37,10 @@ void dp_trace_lt_fail_count_update(struct dc_link *link,
bool in_detection);
void dp_trace_lt_total_count_increment(struct dc_link *link,
bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
+bool dp_trace_is_logged(struct dc_link *link,
bool in_detection);
void dp_trace_lt_result_update(struct dc_link *link,
enum link_training_result result,
@@ -48,15 +49,15 @@ void dp_trace_set_lt_start_timestamp(struct dc_link *link,
bool in_detection);
void dp_trace_set_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link);
void dp_trace_set_edp_power_timestamp(struct dc_link *link,
bool power_up);
uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode);
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c
new file mode 100644
index 000000000000..d3cc604eed67
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_fpga.h"
+#include "link/link_dpms.h"
+#include "dm_helpers.h"
+#include "link_hwss.h"
+#include "dccg.h"
+#include "resource.h"
+
+#define DC_LOGGER_INIT(logger)
+
+void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ uint8_t req_slot_count = 0;
+ uint8_t vc_id = 1; /// VC ID always 1 for SST
+ struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings;
+ const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ stream->link->cur_link_settings = link_settings;
+
+ if (link_hwss->ext.enable_dp_link_output)
+ link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res,
+ stream->signal, pipe_ctx->clock_source->id,
+ &link_settings);
+
+ /* Enable DP_STREAM_ENC */
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ link_set_dsc_pps_packet(pipe_ctx, true, true);
+ }
+
+ /* Allocate Payload */
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
+ // MST case
+ uint8_t i;
+
+ proposed_table.stream_count = state->stream_count;
+ for (i = 0; i < state->stream_count; i++) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_allocations[i].slot_count = req_slot_count;
+ proposed_table.stream_allocations[i].vcp_id = i+1;
+ /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
+ proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
+ }
+ } else {
+ // SST case
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_count = 1; /// Always 1 stream for SST
+ proposed_table.stream_allocations[0].slot_count = req_slot_count;
+ proposed_table.stream_allocations[0].vcp_id = vc_id;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(stream->link,
+ &pipe_ctx->link_res,
+ &proposed_table);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+
+ dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
+ dc->hwss.enable_audio_stream(pipe_ctx);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h
new file mode 100644
index 000000000000..3a80f5595943
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_FPGA_H__
+#define __LINK_FPGA_H__
+#include "link.h"
+void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+#endif /* __LINK_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 33148b753c03..bebf9c4c8702 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -24,7 +24,6 @@
*/
#include "link_hwss_dio.h"
#include "core_types.h"
-#include "dc_link_dp.h"
#include "link_enc_cfg.h"
void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
@@ -45,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
@@ -64,7 +63,8 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->id,
false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
@@ -106,7 +106,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
&stream->timing);
if (dc_is_dp_signal(stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
void enable_dio_dp_link_output(struct dc_link *link,
@@ -127,7 +128,8 @@ void enable_dio_dp_link_output(struct dc_link *link,
link_enc,
link_settings,
clock_source);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void disable_dio_link_output(struct dc_link *link,
@@ -137,7 +139,8 @@ void disable_dio_link_output(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->disable_output(link_enc, signal);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
void set_dio_dp_link_test_pattern(struct dc_link *link,
@@ -147,7 +150,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
void set_dio_dp_lane_settings(struct dc_link *link,
@@ -196,7 +199,8 @@ void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc, false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
@@ -215,7 +219,8 @@ void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 9a108c3d7831..8b8a099feeb0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -26,6 +26,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
+#include "link.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 861f3cd5b356..861f3cd5b356 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..ad16ec5d9bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index 2f46e1ac4ce0..edd7d026a762 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -26,7 +26,6 @@
#include "dm_helpers.h"
#include "core_types.h"
#include "dccg.h"
-#include "dc_link_dp.h"
#include "clk_mgr.h"
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
@@ -69,7 +68,8 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank;
uint32_t link_bw_in_kbps =
- dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings);
+ hpo_dp_stream_encoder->ctx->dc->link_srv->dp_link_bandwidth_kbps(
+ pipe_ctx->stream->link, link_settings);
uint16_t hblank_min_symbol_width = 0;
if (link_bw_in_kbps > 0) {
@@ -87,57 +87,20 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
hblank_min_symbol_width);
}
-static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
- int count = 1;
-
- while (odm_pipe != NULL) {
- count++;
- odm_pipe = odm_pipe->next_odm_pipe;
- }
-
- return count;
-}
-
static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
- enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
-
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
- dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- dto_params.otg_inst = tg->inst;
- dto_params.timing = &pipe_ctx->stream->timing;
stream_enc->funcs->disable(stream_enc);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
@@ -153,7 +116,8 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
stream->use_vsc_sdp_for_colorimetry,
stream->timing.flags.DSC,
false);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
@@ -239,7 +203,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link,
{
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
link_res->hpo_dp_link_enc, tp_params);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
static void set_hpo_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 57d447ec27b8..3cbb94b41a23 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,6 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
+#include "link.h"
bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
new file mode 100644
index 000000000000..a131e30fd7d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -0,0 +1,1427 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file manages link detection states and receiver states by using various
+ * link protocols. It also provides helper functions to interpret certain
+ * capabilities or status based on the states it manages or retrieve them
+ * directly from connected receivers.
+ */
+
+#include "link_dpms.h"
+#include "link_detection.h"
+#include "link_hwss.h"
+#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_hpd.h"
+#include "protocols/link_dpcd.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_training.h"
+#include "accessories/link_dp_trace.h"
+
+#include "link_enc_cfg.h"
+#include "dm_helpers.h"
+#include "clk_mgr.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+/*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+#define LINK_TRAINING_MAX_VERIFY_RETRY 2
+
+static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
+
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830).
+ */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+
+ return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
+{
+ if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+ switch (downstream.id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ {
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ }
+ }
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ case CONNECTOR_ID_VGA:
+ return SIGNAL_TYPE_RGB;
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case CONNECTOR_ID_LVDS:
+ return SIGNAL_TYPE_LVDS;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case CONNECTOR_ID_EDP:
+ return SIGNAL_TYPE_EDP;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ } else if (downstream.type == OBJECT_TYPE_ENCODER) {
+ switch (downstream.id) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ }
+
+ return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink_signal_type(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ enum signal_type result;
+ struct graphics_object_id enc_id;
+
+ if (link->is_dig_mapping_flexible)
+ enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN};
+ else
+ enc_id = link->link_enc->id;
+ result = get_basic_signal_type(enc_id, link->link_id);
+
+ /* Use basic signal type for link without physical connector. */
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ return result;
+
+ /* Internal digital encoder will detect only dongles
+ * that require digital signal
+ */
+
+ /* Detection mechanism is different
+ * for different native connectors.
+ * LVDS connector supports only LVDS signal;
+ * PCIE is a bus slot, the actual connector needs to be detected first;
+ * eDP connector supports only eDP signal;
+ * HDMI should check straps for audio
+ */
+
+ /* PCIE detects the actual connector on add-on board */
+ if (link->link_id.id == CONNECTOR_ID_PCIE) {
+ /* ZAZTODO implement PCIE add-on card detection */
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A: {
+ /* check audio support:
+ * if native HDMI is not supported, switch to DVI
+ */
+ struct audio_support *aud_support =
+ &link->dc->res_pool->audio_support;
+
+ if (!aud_support->hdmi_audio_native)
+ if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC: {
+ /* DP HPD short pulse. Passive DP dongle will not
+ * have short pulse
+ */
+ if (reason != DETECT_REASON_HPDRX) {
+ /* Check whether DP signal detected: if not -
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+ if (!dm_helpers_is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ switch (dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (audio_support->hdmi_audio_on_dongle)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ if (audio_support->hdmi_audio_native)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ default:
+ signal = SIGNAL_TYPE_NONE;
+ break;
+ }
+
+ return signal;
+}
+
+static void read_scdc_caps(struct ddc_service *ddc_service,
+ struct dc_sink *sink)
+{
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+
+ link_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
+ sizeof(sink->scdc_caps.manufacturer_OUI.byte));
+
+ offset = HDMI_SCDC_DEVICE_ID;
+
+ link_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &(sink->scdc_caps.device_id.byte),
+ sizeof(sink->scdc_caps.device_id.byte));
+}
+
+static bool i2c_read(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *buffer,
+ uint32_t len)
+{
+ uint8_t offs_data = 0;
+ struct i2c_payload payloads[2] = {
+ {
+ .write = true,
+ .address = address,
+ .length = 1,
+ .data = &offs_data },
+ {
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = buffer } };
+
+ struct i2c_command command = {
+ .payloads = payloads,
+ .number_of_payloads = 2,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ return dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+}
+
+enum {
+ DP_SINK_CAP_SIZE =
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+static void query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap)
+{
+ uint8_t i;
+ bool is_valid_hdmi_signature;
+ enum display_dongle_type *dongle = &sink_cap->dongle_type;
+ uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+ bool is_type2_dongle = false;
+ int retry_count = 2;
+ struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+ /* Assume we have no valid DP passive dongle connected */
+ *dongle = DISPLAY_DONGLE_NONE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+ /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+ if (!i2c_read(
+ ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf))) {
+ /* Passive HDMI dongles can sometimes fail here without retrying*/
+ while (retry_count > 0) {
+ if (i2c_read(ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf)))
+ break;
+ retry_count--;
+ }
+ if (retry_count == 0) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
+ }
+
+ /* Check if Type 2 dongle.*/
+ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+ is_type2_dongle = true;
+
+ dongle_signature =
+ (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+ is_valid_hdmi_signature = true;
+
+ /* Check EOT */
+ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+ is_valid_hdmi_signature = false;
+ }
+
+ /* Check signature */
+ for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+ /* If its not the right signature,
+ * skip mismatch in subversion byte.*/
+ if (dongle_signature->id[i] !=
+ dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+ if (is_type2_dongle) {
+ is_valid_hdmi_signature = false;
+ break;
+ }
+
+ }
+ }
+
+ if (is_type2_dongle) {
+ uint32_t max_tmds_clk =
+ type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+ max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+ if (0 == max_tmds_clk ||
+ max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+ max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle %dMhz: ",
+ max_tmds_clk);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+ max_tmds_clk);
+
+ }
+
+ /* Multiply by 1000 to convert to kHz. */
+ sink_cap->max_hdmi_pixel_clock =
+ max_tmds_clk * 1000;
+ }
+ sink_cap->is_dongle_type_one = false;
+
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ }
+ sink_cap->is_dongle_type_one = true;
+ }
+
+ return;
+}
+
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
+{
+ query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+ return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+ audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+ //link->dpcd_caps.dpcd_rev.raw = 0;
+}
+
+static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
+{
+ dc_sink_release(link->local_sink);
+ link->local_sink = prev_sink;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+ struct hdcp_protection_message msg22;
+ struct hdcp_protection_message msg14;
+
+ memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+ memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+ memset(link->hdcp_caps.rx_caps.raw, 0,
+ sizeof(link->hdcp_caps.rx_caps.raw));
+
+ if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->ddc->transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ msg22.data = link->hdcp_caps.rx_caps.raw;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+ msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+ } else {
+ msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+ msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+ }
+ msg22.version = HDCP_VERSION_22;
+ msg22.link = HDCP_LINK_PRIMARY;
+ msg22.max_retries = 5;
+ dc_process_hdcp_msg(signal, link, &msg22);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ msg14.data = &link->hdcp_caps.bcaps.raw;
+ msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+ msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+ msg14.version = HDCP_VERSION_14;
+ msg14.link = HDCP_LINK_PRIMARY;
+ msg14.max_retries = 5;
+
+ dc_process_hdcp_msg(signal, link, &msg14);
+ }
+
+}
+static void read_current_link_settings_on_detect(struct dc_link *link)
+{
+ union lane_count_set lane_count_set = {0};
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t read_dpcd_retry_cnt = 10;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
+ union max_down_spread max_down_spread = {0};
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+ /* First DPCD read after VDD ON can fail if the particular board
+ * does not have HPD pin wired correctly. So if DPCD read fails,
+ * which it should never happen, retry a few times. Target worst
+ * case scenario of 80 ms.
+ */
+ if (status == DC_OK) {
+ link->cur_link_settings.lane_count =
+ lane_count_set.bits.LANE_COUNT_SET;
+ break;
+ }
+
+ msleep(8);
+ }
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set == 0) {
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
+ }
+ } else {
+ link->cur_link_settings.link_rate = link_bw_set;
+ link->cur_link_settings.use_link_rate_set = false;
+ }
+ // Read DPCD 00003h to find the max down spread.
+ core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
+ &max_down_spread.raw, sizeof(max_down_spread));
+ link->cur_link_settings.link_spread =
+ max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+}
+
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ enum dc_detect_reason reason)
+{
+ struct audio_support *audio_support = &link->dc->res_pool->audio_support;
+
+ sink_caps->signal = link_detect_sink_signal_type(link, reason);
+ sink_caps->transaction_type =
+ get_ddc_transaction_type(sink_caps->signal);
+
+ if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ if (!detect_dp_sink_caps(link))
+ return false;
+
+ if (is_dp_branch_device(link))
+ /* DP SST branch */
+ link->type = dc_connection_sst_branch;
+ } else {
+ /* DP passive dongles */
+ sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+ sink_caps,
+ audio_support);
+ link->dpcd_caps.dongle_type = sink_caps->dongle_type;
+ link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
+ link->dpcd_caps.dpcd_rev.raw = 0;
+ }
+
+ return true;
+}
+
+static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
+{
+ if (old_edid->length != new_edid->length)
+ return false;
+
+ if (new_edid->length == 0)
+ return false;
+
+ return (memcmp(old_edid->raw_edid,
+ new_edid->raw_edid, new_edid->length) == 0);
+}
+
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
+{
+
+ /**
+ * something is terribly wrong if time out is > 200ms. (5Hz)
+ * 500 microseconds * 400 tries us 200 ms
+ **/
+ unsigned int sleep_time_in_microseconds = 500;
+ unsigned int tries_allowed = 400;
+ bool is_in_alt_mode;
+ unsigned long long enter_timestamp;
+ unsigned long long finish_timestamp;
+ unsigned long long time_taken_in_ns;
+ int tries_taken;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /**
+ * this function will only exist if we are on dcn21 (is_in_alt_mode is a
+ * function pointer, so checking to see if it is equal to 0 is the same
+ * as checking to see if it is null
+ **/
+ if (!link->link_enc->funcs->is_in_alt_mode)
+ return true;
+
+ is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
+ DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+
+ if (is_in_alt_mode)
+ return true;
+
+ enter_timestamp = dm_get_timestamp(link->ctx);
+
+ for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
+ udelay(sleep_time_in_microseconds);
+ /* ask the link if alt mode is enabled, if so return ok */
+ if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns =
+ dm_get_elapse_time_in_ns(link->ctx,
+ finish_timestamp,
+ enter_timestamp);
+ DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return true;
+ }
+ }
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
+ enter_timestamp);
+ DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return false;
+}
+
+static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+ /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+ * reports DSC support.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+}
+
+static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+ /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->wa_flags.dpia_mst_dsc_always_on = false;
+}
+
+static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
+{
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+
+ link->type = dc_connection_mst_branch;
+ apply_dpia_mst_dsc_always_on_wa(link);
+
+ dm_helpers_dp_update_branch_info(link->ctx, link);
+ if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
+ link_disconnect_sink(link);
+ } else {
+ link->type = dc_connection_sst_branch;
+ }
+
+ return link->type == dc_connection_mst_branch;
+}
+
+bool link_reset_cur_dp_mst_topology(struct dc_link *link)
+{
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ LINK_INFO("link=%d, mst branch is now Disconnected\n",
+ link->link_index);
+
+ revert_dpia_mst_dsc_always_on_wa(link);
+ return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+}
+
+static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
+ enum dc_detect_reason reason)
+{
+ int i;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
+
+ return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT;
+}
+
+static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+ dc_z10_restore(dc);
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+}
+
+static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+}
+
+static void verify_link_capability_destructive(struct dc_link *link,
+ struct dc_sink *sink,
+ enum dc_detect_reason reason)
+{
+ bool should_prepare_phy_clocks =
+ should_prepare_phy_clocks_for_link_verification(link->dc, reason);
+
+ if (should_prepare_phy_clocks)
+ prepare_phy_clocks_for_destructive_link_verification(link->dc);
+
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ struct dc_link_settings known_limit_link_setting =
+ dp_get_max_link_cap(link);
+ link_set_all_streams_dpms_off_for_link(link);
+ dp_verify_link_cap_with_retries(
+ link, &known_limit_link_setting,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
+ } else {
+ ASSERT(0);
+ }
+
+ if (should_prepare_phy_clocks)
+ restore_phy_clocks_for_destructive_link_verification(link->dc);
+}
+
+static void verify_link_capability_non_destructive(struct dc_link *link)
+{
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ if (dc_is_embedded_signal(link->local_sink->sink_signal) ||
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* TODO - should we check link encoder's max link caps here?
+ * How do we know which link encoder to check from?
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ else
+ link->verified_link_cap = dp_get_max_link_cap(link);
+ }
+}
+
+static bool should_verify_link_capability_destructively(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ bool destrictive = false;
+ struct dc_link_settings max_link_cap;
+ bool is_link_enc_unavailable = link->link_enc &&
+ link->dc->res_pool->funcs->link_encs_assign &&
+ !link_enc_cfg_is_link_enc_avail(
+ link->ctx->dc,
+ link->link_enc->preferred_engine,
+ link);
+
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ max_link_cap = dp_get_max_link_cap(link);
+ destrictive = true;
+
+ if (link->dc->debug.skip_detection_link_training ||
+ dc_is_embedded_signal(link->local_sink->sink_signal) ||
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ destrictive = false;
+ } else if (link_dp_get_encoding_format(&max_link_cap) ==
+ DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.is_mst_capable ||
+ is_link_enc_unavailable) {
+ destrictive = false;
+ }
+ }
+ }
+
+ return destrictive;
+}
+
+static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
+ enum dc_detect_reason reason)
+{
+ if (should_verify_link_capability_destructively(link, reason))
+ verify_link_capability_destructive(link, sink, reason);
+ else
+ verify_link_capability_non_destructive(link);
+}
+
+/*
+ * detect_link_and_local_sink() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks.
+ */
+static bool detect_link_and_local_sink(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ uint32_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ bool same_edid = false;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc *dc = dc_ctx->dc;
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+ struct dpcd_caps prev_dpcd_caps;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ enum dc_connection_type pre_connection_type = link->type;
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (dc_is_virtual_signal(link->connector_signal))
+ return false;
+
+ if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ (!link->dc->config.allow_edp_hotplug_detection)) &&
+ link->local_sink) {
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP &&
+ (link->dpcd_sink_ext_caps.bits.oled == 1)) {
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ set_default_brightness_aux(link);
+ //TODO: use cached
+ }
+
+ return true;
+ }
+
+ if (!link_detect_connection_type(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ prev_sink = link->local_sink;
+ if (prev_sink) {
+ dc_sink_retain(prev_sink);
+ memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
+ }
+
+ link_disconnect_sink(link);
+ if (new_connection_type != dc_connection_none) {
+ link->type = new_connection_type;
+ link->link_state_valid = false;
+
+ /* From Disconnected-to-Connected. */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ if (aud_support->hdmi_audio_native)
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
+
+ /* Disable power sequence on MIPI panel + converter
+ */
+ if (dc->config.enable_mipi_converter_optimization &&
+ dc_ctx->dce_version == DCN_VERSION_3_01 &&
+ link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 &&
+ memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580,
+ sizeof(link->dpcd_caps.branch_dev_name)) == 0) {
+ dc->config.edp_no_power_sequencing = true;
+
+ if (!link->dpcd_caps.set_power_state_capable_edp)
+ link->wa_flags.dp_keep_receiver_powered = true;
+ }
+
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+
+ /* wa HPD high coming too early*/
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
+
+ /* if alt mode times out, return false */
+ if (!wait_for_entering_dp_alt_mode(link))
+ return false;
+ }
+
+ if (!detect_dp(link, &sink_caps, reason)) {
+ link->type = pre_connection_type;
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ }
+
+ /* Active SST downstream branch device unplug*/
+ if (link->type == dc_connection_sst_branch &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+ if (prev_sink)
+ /* Downstream unplug */
+ dc_sink_release(prev_sink);
+ return true;
+ }
+
+ /* disable audio for non DP to HDMI active sst converter */
+ if (link->type == dc_connection_sst_branch &&
+ is_dp_active_dongle(link) &&
+ (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER))
+ converter_disable_audio = true;
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ } /* switch() */
+
+ if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+ link->dpcd_sink_count =
+ link->dpcd_caps.sink_count.bits.SINK_COUNT;
+ else
+ link->dpcd_sink_count = 1;
+
+ set_ddc_transaction_type(link->ddc,
+ sink_caps.transaction_type);
+
+ link->aux_mode =
+ link_is_in_aux_transaction_mode(link->ddc);
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ }
+
+ sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+ sink->converter_disable_audio = converter_disable_audio;
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(link->ctx,
+ link, sink);
+
+ switch (edid_status) {
+ case EDID_BAD_CHECKSUM:
+ DC_LOG_ERROR("EDID checksum invalid.\n");
+ break;
+ case EDID_PARTIAL_VALID:
+ DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
+ break;
+ case EDID_NO_RESPONSE:
+ DC_LOG_ERROR("No EDID read.\n");
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ return false;
+ }
+
+ if (link->type == dc_connection_sst_branch &&
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER &&
+ reason == DETECT_REASON_HPDRX) {
+ /* Abort detection for DP-VGA adapters when EDID
+ * can't be read and detection reason is VGA-side
+ * hotplug
+ */
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return true;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ // Check if edid is the same
+ if ((prev_sink) &&
+ (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+ same_edid = is_same_edid(&prev_sink->dc_edid,
+ &sink->dc_edid);
+
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
+ if (dc_is_hdmi_signal(link->connector_signal))
+ read_scdc_caps(link->ddc, link->local_sink);
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ /*
+ * TODO debug why certain monitors don't like
+ * two link trainings
+ */
+ query_hdcp_capability(sink->sink_signal, link);
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+ }
+ query_hdcp_capability(sink->sink_signal, link);
+ }
+
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
+ dp_trace_init(link);
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ }
+
+ DC_LOG_DETECTION_EDID_PARSER("%s: "
+ "manufacturer_id = %X, "
+ "product_id = %X, "
+ "serial_number = %X, "
+ "manufacture_week = %d, "
+ "manufacture_year = %d, "
+ "display_name = %s, "
+ "speaker_flag = %d, "
+ "audio_mode_count = %d\n",
+ __func__,
+ sink->edid_caps.manufacturer_id,
+ sink->edid_caps.product_id,
+ sink->edid_caps.serial_number,
+ sink->edid_caps.manufacture_week,
+ sink->edid_caps.manufacture_year,
+ sink->edid_caps.display_name,
+ sink->edid_caps.speaker_flags,
+ sink->edid_caps.audio_mode_count);
+
+ for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+ DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
+ "format_code = %d, "
+ "channel_count = %d, "
+ "sample_rate = %d, "
+ "sample_size = %d\n",
+ __func__,
+ i,
+ sink->edid_caps.audio_modes[i].format_code,
+ sink->edid_caps.audio_modes[i].channel_count,
+ sink->edid_caps.audio_modes[i].sample_rate,
+ sink->edid_caps.audio_modes[i].sample_size);
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ // Init dc_panel_config by HW config
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ // Pickup base DM settings
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
+ }
+
+ } else {
+ /* From Connected-to-Disconnected. */
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+ memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps));
+ /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
+ * is not cleared. If we emulate a DP signal on this connection, it thinks
+ * the dongle is still there and limits the number of modes we can emulate.
+ * Clear dongle_max_pix_clk on disconnect to fix this
+ */
+ link->dongle_max_pix_clk = 0;
+
+ dc_link_clear_dprx_states(link);
+ dp_trace_reset(link);
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
+ link->link_index, sink,
+ (sink_caps.signal ==
+ SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+ prev_sink, same_edid);
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ return true;
+}
+
+/*
+ * link_detect_connection_type() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
+bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type)
+{
+ uint32_t is_hpd_high = 0;
+
+ if (link->connector_signal == SIGNAL_TYPE_LVDS) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /*in case it is not on*/
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ /* Link may not have physical HPD pin. */
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
+ if (link->is_hpd_pending || !dpia_query_hpd_status(link))
+ *type = dc_connection_none;
+ else
+ *type = dc_connection_single;
+
+ return true;
+ }
+
+
+ if (!query_hpd_status(link, &is_hpd_high))
+ goto hpd_gpio_failure;
+
+ if (is_hpd_high) {
+ *type = dc_connection_single;
+ /* TODO: need to do the actual detection */
+ } else {
+ *type = dc_connection_none;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* eDP is not connected, power down it */
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, false);
+ }
+ }
+
+ return true;
+
+hpd_gpio_failure:
+ return false;
+}
+
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ bool is_local_sink_detect_success;
+ bool is_delegated_to_mst_top_mgr = false;
+ enum dc_connection_type pre_link_type = link->type;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
+
+ if (is_local_sink_detect_success && link->local_sink)
+ verify_link_capability(link, link->local_sink, reason);
+
+ DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
+ link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
+
+ if (is_local_sink_detect_success && link->local_sink &&
+ dc_is_dp_signal(link->local_sink->sink_signal) &&
+ link->dpcd_caps.is_mst_capable)
+ is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
+
+ if (is_local_sink_detect_success &&
+ pre_link_type == dc_connection_mst_branch &&
+ link->type != dc_connection_mst_branch)
+ is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link);
+
+ return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
+}
+
+void link_clear_dprx_states(struct dc_link *link)
+{
+ memset(&link->dprx_states, 0, sizeof(link->dprx_states));
+}
+
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal)
+{
+ bool ret = false;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+ * we can poll for bksv but some displays have an issue with this. Since its so rare
+ * for a display to not be 1.4 capable, this assumtion is ok
+ */
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal)
+{
+ bool ret = false;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+ link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+ (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+const struct dc_link_status *link_get_status(const struct dc_link *link)
+{
+ return &link->link_status;
+}
+
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ /*
+ * Treat device as no EDID device if EDID
+ * parsing fails
+ */
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
+ dc_sink->dc_edid.length = 0;
+ dm_error("Bad EDID, status%d!\n", edid_status);
+ }
+
+ return dc_sink;
+
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
new file mode 100644
index 000000000000..7da05078721e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DETECTION_H__
+#define __DC_LINK_DETECTION_H__
+#include "link.h"
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
+bool link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink);
+bool link_reset_cur_dp_mst_topology(struct dc_link *link);
+const struct dc_link_status *link_get_status(const struct dc_link *link);
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
+void link_clear_dprx_states(struct dc_link *link);
+#endif /* __DC_LINK_DETECTION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h
deleted file mode 100644
index 669e995f825f..000000000000
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DC_INC_LINK_DP_DPIA_BW_H_
-#define DC_INC_LINK_DP_DPIA_BW_H_
-
-// XXX: TODO: Re-add for Phase 2
-/* Number of Host Routers per motherboard is 2 and 2 DPIA per host router */
-#define MAX_HR_NUM 2
-
-struct dc_host_router_bw_alloc {
- int max_bw[MAX_HR_NUM]; // The Max BW that each Host Router has available to be shared btw DPIAs
- int total_estimated_bw[MAX_HR_NUM]; // The Total Verified and available BW that Host Router has
-};
-
-/*
- * Enable BW Allocation Mode Support from the DP-Tx side
- *
- * @link: pointer to the dc_link struct instance
- *
- * return: SUCCESS or FAILURE
- */
-bool set_dptx_usb4_bw_alloc_support(struct dc_link *link);
-
-/*
- * Send a request from DP-Tx requesting to allocate BW remotely after
- * allocating it locally. This will get processed by CM and a CB function
- * will be called.
- *
- * @link: pointer to the dc_link struct instance
- * @req_bw: The requested bw in Kbyte to allocated
- *
- * return: none
- */
-void set_usb4_req_bw_req(struct dc_link *link, int req_bw);
-
-/*
- * CB function for when the status of the Req above is complete. We will
- * find out the result of allocating on CM and update structs accordingly
- *
- * @link: pointer to the dc_link struct instance
- *
- * return: none
- */
-void get_usb4_req_bw_resp(struct dc_link *link);
-
-#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
new file mode 100644
index 000000000000..2267fb097830
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -0,0 +1,2541 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns the programming sequence of stream's dpms state associated
+ * with the link and link's enable/disable sequences as result of the stream's
+ * dpms state change.
+ *
+ * TODO - The reason link owns stream's dpms programming sequence is
+ * because dpms programming sequence is highly dependent on underlying signal
+ * specific link protocols. This unfortunately causes link to own a portion of
+ * stream state programming sequence. This creates a gray area where the
+ * boundary between link and stream is not clearly defined.
+ */
+
+#include "link_dpms.h"
+#include "link_hwss.h"
+#include "link_validation.h"
+#include "accessories/link_fpga.h"
+#include "accessories/link_dp_trace.h"
+#include "protocols/link_dpcd.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_hpd.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_training.h"
+#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_dp_dpia_bw.h"
+
+#include "dm_helpers.h"
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dsc.h"
+#include "dccg.h"
+#include "clk_mgr.h"
+#include "atomfirmware.h"
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+
+#define RETIMER_REDRIVER_INFO(...) \
+ DC_LOG_RETIMER_REDRIVER( \
+ __VA_ARGS__)
+#include "dc/dcn30/dcn30_vpg.h"
+
+#define MAX_MTP_SLOT_COUNT 64
+#define LINK_TRAINING_ATTEMPTS 4
+#define PEAK_FACTOR_X1000 1006
+
+void link_blank_all_dp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
+ (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
+ continue;
+
+ /* DP 2.0 spec requires that we read LTTPR caps first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ link_blank_dp_stream(dc->links[i], true);
+ }
+
+}
+
+void link_blank_all_edp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) ||
+ (!dc->links[i]->edp_sink_present))
+ continue;
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ link_blank_dp_stream(dc->links[i], true);
+ }
+}
+
+void link_blank_dp_stream(struct dc_link *link, bool hw_init)
+{
+ unsigned int j;
+ struct dc *dc = link->ctx->dc;
+ enum signal_type signal = link->connector_signal;
+
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->funcs->get_dig_frontend &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (fe != ENGINE_ID_UNKNOWN)
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ dpcd_write_rx_power_ctrl(link, false);
+ }
+}
+
+void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+ struct dc_stream_update stream_update;
+ bool dpms_off = true;
+ struct link_resource link_res = {0};
+
+ memset(&stream_update, 0, sizeof(stream_update));
+ stream_update.dpms_off = &dpms_off;
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++) {
+ stream_update.stream = pipes[i]->stream;
+ dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
+ pipes[i]->stream, &stream_update,
+ state);
+ }
+
+ /* link can be also enabled by vbios. In this case it is not recorded
+ * in pipe_ctx. Disable link phy here to make sure it is completely off
+ */
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+}
+
+void link_resume(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+ program_hpd_filter(link);
+}
+
+/* This function returns true if the pipe is used to feed video signal directly
+ * to the link.
+ */
+static bool is_master_pipe_for_link(const struct dc_link *link,
+ const struct pipe_ctx *pipe)
+{
+ return (pipe->stream &&
+ pipe->stream->link &&
+ pipe->stream->link == link &&
+ pipe->top_pipe == NULL &&
+ pipe->prev_odm_pipe == NULL);
+}
+
+/*
+ * This function finds all master pipes feeding to a given link with dpms set to
+ * on in given dc state.
+ */
+void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
+ struct dc_state *state,
+ uint8_t *count,
+ struct pipe_ctx *pipes[MAX_PIPES])
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+
+ *count = 0;
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (is_master_pipe_for_link(link, pipe) &&
+ pipe->stream->dpms_off == false) {
+ pipes[(*count)++] = pipe;
+ }
+ }
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+ enum engine_id eng_id,
+ struct ext_hdmi_settings *settings)
+{
+ bool result = false;
+ int i = 0;
+ struct integrated_info *integrated_info =
+ pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+ if (integrated_info == NULL)
+ return false;
+
+ /*
+ * Get retimer settings from sbios for passing SI eye test for DCE11
+ * The setting values are varied based on board revision and port id
+ * Therefore the setting values of each ports is passed by sbios.
+ */
+
+ // Check if current bios contains ext Hdmi settings
+ if (integrated_info->gpu_cap_info & 0x20) {
+ switch (eng_id) {
+ case ENGINE_ID_DIGA:
+ settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp0_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp0_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp1_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp1_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp2_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp2_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp3_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp3_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ if (result == true) {
+ // Validate settings from bios integrated info table
+ if (settings->slv_addr == 0)
+ return false;
+ if (settings->reg_num > 9)
+ return false;
+ if (settings->reg_num_6g > 3)
+ return false;
+
+ for (i = 0; i < settings->reg_num; i++) {
+ if (settings->reg_settings[i].i2c_reg_index > 0x20)
+ return false;
+ }
+
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+ return false;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool write_i2c(struct pipe_ctx *pipe_ctx,
+ uint8_t address, uint8_t *buffer, uint32_t length)
+{
+ struct i2c_command cmd = {0};
+ struct i2c_payload payload = {0};
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.number_of_payloads = 1;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+ payload.address = address;
+ payload.data = buffer;
+ payload.length = length;
+ payload.write = true;
+ cmd.payloads = &payload;
+
+ if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
+ pipe_ctx->stream->link, &cmd))
+ return true;
+
+ return false;
+}
+
+static void write_i2c_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz,
+ struct ext_hdmi_settings *settings)
+{
+ uint8_t slave_address = (settings->slv_addr >> 1);
+ uint8_t buffer[2];
+ const uint8_t apply_rx_tx_change = 0x4;
+ uint8_t offset = 0xA;
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Start Ext-Hdmi programming*/
+
+ for (i = 0; i < settings->reg_num; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+ settings->reg_settings[i].i2c_reg_index == 0xB ||
+ settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings[i].i2c_reg_val;
+ else {
+ i2c_success =
+ link_query_ddc_data(
+ pipe_ctx->stream->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+ }
+ }
+
+ /* Apply 3G settings */
+ if (is_over_340mhz) {
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings_6g[i].i2c_reg_val;
+ else {
+ i2c_success =
+ link_query_ddc_data(
+ pipe_ctx->stream->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+ }
+ }
+ }
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ }
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
+}
+
+static void write_i2c_default_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Program Slave Address for tuning single integrity */
+ /* Write offset 0x0A to 0x13 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x13;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
+}
+
+static void write_i2c_redriver_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ // Program Slave Address for tuning single integrity
+ buffer[3] = 0x4E;
+ buffer[4] = 0x4E;
+ buffer[5] = 0x4E;
+ buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\
+ \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\
+ offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\
+ i2c_success = %d\n",
+ slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
+
+ if (!i2c_success)
+ DC_LOG_DEBUG("Set redriver failed");
+}
+
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ struct link_encoder *link_enc = NULL;
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
+
+ if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
+ return;
+
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ ASSERT(link_enc);
+ if (link_enc == NULL)
+ return;
+
+ /* otg instance */
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+
+ /* dig front end */
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
+
+ /* stream encoder index */
+ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ config.stream_enc_idx =
+ pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+
+ /* dig back end */
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
+
+ /* link encoder index */
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+
+ /* dio output index is dpia index for DPIA endpoint & dcio index by default */
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
+ else
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+
+ /* phy index */
+ config.phy_idx = resource_transmitter_to_phy_idx(
+ pipe_ctx->stream->link->dc, link_enc->transmitter);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
+ config.phy_idx = 0;
+
+ /* stream properties */
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
+ config.mst_enabled = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
+ config.dp2_enabled = dp_is_128b_132b_signal(pipe_ctx) ? 1 : 0;
+ config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
+ 1 : 0;
+ config.dpms_off = dpms_off;
+
+ /* dm stream context */
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+}
+
+static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ return;
+
+ dc->hwss.set_avmute(pipe_ctx, enable);
+}
+
+static void enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+ unsigned char mstmCntl;
+
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+ if (enable)
+ mstmCntl |= DP_MST_EN;
+ else
+ mstmCntl &= (~DP_MST_EN);
+
+ core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}
+
+static void dsc_optc_config_log(struct display_stream_compressor *dsc,
+ struct dsc_optc_config *config)
+{
+ uint32_t precision = 1 << 28;
+ uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
+ uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
+ uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
+ * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
+ * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
+ */
+ ll_bytes_per_pix_fraq *= 10000000;
+ ll_bytes_per_pix_fraq /= precision;
+
+ DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
+ config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
+ DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
+ DC_LOG_DSC("\tslice_width %d", config->slice_width);
+}
+
+static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ result = true;
+ else
+ result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
+ return result;
+}
+
+/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
+ * i.e. after dp_enable_dsc_on_rx() had been called
+ */
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ struct dsc_optc_config dsc_optc_cfg;
+ enum optc_dsc_mode optc_dsc_mode;
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+
+ odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
+ odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+ }
+ dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
+ dsc_cfg.pic_width *= opp_cnt;
+
+ optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+
+ /* Enable DSC in encoder */
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
+ DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width);
+
+ /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
+ }
+
+ /* Enable DSC in OPTC */
+ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width);
+ } else {
+ /* disable DSC in OPTC */
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(
+ pipe_ctx->stream_res.tg,
+ OPTC_DSC_DISABLED, 0, 0);
+
+ /* disable DSC in stream encoder */
+ if (dc_is_dp_signal(stream->signal)) {
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
+ }
+
+ /* disable DSC block */
+ pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
+ }
+}
+
+/*
+ * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
+ * hence PPS info packet update need to use frame update instead of immediate update.
+ * Added parameter immediate_update for this purpose.
+ * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
+ * which is the only place where a "false" would be passed in for param immediate_update.
+ *
+ * immediate_update is only applicable when DSC is enabled.
+ */
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+ return false;
+
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ uint8_t dsc_packed_pps[128];
+
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+ memset(dsc_packed_pps, 0, 128);
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+
+ dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
+ memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
+ if (dc_is_dp_signal(stream->signal)) {
+ DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ }
+ } else {
+ /* disable DSC PPS in stream encoder */
+ memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
+ if (dc_is_dp_signal(stream->signal)) {
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
+ }
+
+ return true;
+}
+
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ bool result = false;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ goto out;
+ if (!dsc)
+ goto out;
+
+ if (enable) {
+ {
+ link_set_dsc_on_stream(pipe_ctx, true);
+ result = true;
+ }
+ } else {
+ dp_set_dsc_on_rx(pipe_ctx, false);
+ link_set_dsc_on_stream(pipe_ctx, false);
+ result = true;
+ }
+out:
+ return result;
+}
+
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ return false;
+ if (!dsc)
+ return false;
+
+ link_set_dsc_on_stream(pipe_ctx, true);
+ link_set_dsc_pps_packet(pipe_ctx, true, false);
+ return true;
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ memset(&old_downspread, 0, sizeof(old_downspread));
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw, sizeof(old_downspread));
+
+ new_downspread.raw = old_downspread.raw;
+
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
+
+ } else {
+ dm_helpers_mst_enable_stream_features(stream);
+ }
+}
+
+static void log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+{
+ const uint32_t VCP_Y_PRECISION = 1000;
+ uint64_t vcp_x, vcp_y;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
+ avg_time_slots_per_mtp = dc_fixpt_add(
+ avg_time_slots_per_mtp,
+ dc_fixpt_from_fraction(
+ 1,
+ 2*VCP_Y_PRECISION));
+
+ vcp_x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+ vcp_y = dc_fixpt_floor(
+ dc_fixpt_mul_int(
+ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ dc_fixpt_floor(
+ avg_time_slots_per_mtp)),
+ VCP_Y_PRECISION));
+
+
+ if (link->type == dc_connection_mst_branch)
+ DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
+ "X: %llu "
+ "Y: %llu/%d",
+ vcp_x,
+ vcp_y,
+ VCP_Y_PRECISION);
+ else
+ DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
+ "X: %llu "
+ "Y: %llu/%d",
+ vcp_x,
+ vcp_y,
+ VCP_Y_PRECISION);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dp_link_bandwidth_kbps(stream->link,
+ &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
+}
+
+static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
+{
+ struct fixed31_32 peak_kbps;
+ uint32_t numerator = 0;
+ uint32_t denominator = 1;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+ peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+ uint64_t kbps;
+
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+ return get_pbn_from_bw_in_kbps(kbps);
+}
+
+
+// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
+static void get_lane_status(
+ struct dc_link *link,
+ uint32_t lane_count,
+ union lane_status *status,
+ union lane_align_status_updated *status_updated)
+{
+ unsigned int lane;
+ uint8_t dpcd_buf[3] = {0};
+
+ if (status == NULL || status_updated == NULL) {
+ return;
+ }
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ dpcd_buf,
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane < lane_count; lane++) {
+ status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane);
+ }
+
+ status_updated->raw = dpcd_buf[2];
+}
+
+static bool poll_for_allocation_change_trigger(struct dc_link *link)
+{
+ /*
+ * wait for ACT handled
+ */
+ int i;
+ const int act_retries = 30;
+ enum act_return_status result = ACT_FAILED;
+ union payload_table_update_status update_status = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated lane_status_updated;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link->aux_access_disabled)
+ return true;
+ for (i = 0; i < act_retries; i++) {
+ get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
+
+ if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(lane_status_updated)) {
+ DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
+ "polling for ACT handled.");
+ result = ACT_LINK_LOST;
+ break;
+ }
+ core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ if (update_status.bits.ACT_HANDLED == 1) {
+ DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
+ result = ACT_SUCCESS;
+ break;
+ }
+
+ fsleep(5000);
+ }
+
+ if (result == ACT_FAILED) {
+ DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
+ "continue on. Something is wrong with the branch.");
+ }
+
+ return (result == ACT_SUCCESS);
+}
+
+static void update_mst_stream_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
+ const struct dc_dp_mst_stream_allocation_table *proposed_table)
+{
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
+ struct link_mst_stream_allocation *dc_alloc;
+
+ int i;
+ int j;
+
+ /* if DRM proposed_table has more than one new payload */
+ ASSERT(proposed_table->stream_count -
+ link->mst_stream_alloc_table.stream_count < 2);
+
+ /* copy proposed_table to link, add stream encoder */
+ for (i = 0; i < proposed_table->stream_count; i++) {
+
+ for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+ dc_alloc =
+ &link->mst_stream_alloc_table.stream_allocations[j];
+
+ if (dc_alloc->vcp_id ==
+ proposed_table->stream_allocations[i].vcp_id) {
+
+ work_table[i] = *dc_alloc;
+ work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count;
+ break; /* exit j loop */
+ }
+ }
+
+ /* new vcp_id */
+ if (j == link->mst_stream_alloc_table.stream_count) {
+ work_table[i].vcp_id =
+ proposed_table->stream_allocations[i].vcp_id;
+ work_table[i].slot_count =
+ proposed_table->stream_allocations[i].slot_count;
+ work_table[i].stream_enc = stream_enc;
+ work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
+ }
+ }
+
+ /* update link->mst_stream_alloc_table with work_table */
+ link->mst_stream_alloc_table.stream_count =
+ proposed_table->stream_count;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+ link->mst_stream_alloc_table.stream_allocations[i] =
+ work_table[i];
+}
+
+static void remove_stream_from_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *dio_stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
+{
+ int i = 0;
+ struct link_mst_stream_allocation_table *table =
+ &link->mst_stream_alloc_table;
+
+ if (hpo_dp_stream_enc) {
+ for (; i < table->stream_count; i++)
+ if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
+ break;
+ } else {
+ for (; i < table->stream_count; i++)
+ if (dio_stream_enc == table->stream_allocations[i].stream_enc)
+ break;
+ }
+
+ if (i < table->stream_count) {
+ i++;
+ for (; i < table->stream_count; i++)
+ table->stream_allocations[i-1] = table->stream_allocations[i];
+ memset(&table->stream_allocations[table->stream_count-1], 0,
+ sizeof(struct link_mst_stream_allocation));
+ table->stream_count--;
+ }
+}
+
+static enum dc_status deallocate_mst_payload_with_temp_drm_wa(
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+ /* adjust for drm changes*/
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ DC_LOG_MST("%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_DEBUG("Unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+ }
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+
+ return DC_OK;
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link->dc->debug.temp_mst_deallocation_sequence)
+ return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx);
+
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+ * should not done. For new mode set, map_resources will get engine
+ * for new stream, so stream_enc->id should be validated until here.
+ */
+
+ /* slot X.Y */
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+ if (mst_mode) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ } else {
+ /* when link is no longer in mst mode (mst hub unplugged),
+ * remove payload with default dc logic
+ */
+ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ }
+
+ DC_LOG_MST("%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_DEBUG("Unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ return DC_OK;
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ int i;
+ enum act_return_status ret;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* enable_link_dp_mst already check link->enabled_stream_count
+ * and stream is in link->stream[]. This is called during set mode,
+ * stream_enc is available.
+ */
+
+ /* get calculate VC payload for stream: stream_alloc */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* program DP source TX for payload */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link,
+ &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* send down message */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ if (pbn_per_slot.value == 0) {
+ DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n");
+ return DC_UNSUPPORTED_VALUE;
+ }
+ pbn = get_pbn_from_timing(pipe_ctx);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link)
+{
+ struct fixed31_32 link_bw_effective =
+ dc_fixpt_from_int(
+ dp_link_bandwidth_kbps(link, &link->cur_link_settings));
+ struct fixed31_32 timeslot_bw_effective =
+ dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
+ struct fixed31_32 timing_bw =
+ dc_fixpt_from_int(
+ dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ struct fixed31_32 avg_time_slots_per_mtp =
+ dc_fixpt_div(timing_bw, timeslot_bw_effective);
+
+ return avg_time_slots_per_mtp;
+}
+
+
+static bool write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate)
+{
+ const uint8_t vc_id = 1; /// VC ID always 1 for SST
+ const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
+ bool result = false;
+ uint8_t req_slot_count = 0;
+ struct fixed31_32 avg_time_slots_per_mtp = { 0 };
+ union payload_table_update_status update_status = { 0 };
+ const uint32_t max_retries = 30;
+ uint32_t retries = 0;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (allocate) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ /// Validation should filter out modes that exceed link BW
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
+ if (req_slot_count > MAX_MTP_SLOT_COUNT)
+ return false;
+ } else {
+ /// Leave req_slot_count = 0 if allocate is false.
+ }
+
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ if (link->aux_access_disabled)
+ return true;
+
+ /// Write DPCD 2C0 = 1 to start updating
+ update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ /// Program the changes in DPCD 1C0 - 1C2
+ ASSERT(vc_id == 1);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_SET,
+ &vc_id,
+ 1);
+
+ ASSERT(start_time_slot == 0);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
+ &start_time_slot,
+ 1);
+
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
+ &req_slot_count,
+ 1);
+
+ /// Poll till DPCD 2C0 read 1
+ /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
+
+ while (retries < max_retries) {
+ if (core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1) == DC_OK) {
+ if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
+ DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
+ result = true;
+ break;
+ }
+ } else {
+ union dpcd_rev dpcdRev;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ &dpcdRev.raw,
+ 1) != DC_OK) {
+ DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
+ "of sink while polling payload table "
+ "updated status bit.");
+ break;
+ }
+ }
+ retries++;
+ fsleep(5000);
+ }
+
+ if (!result && retries == max_retries) {
+ DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
+ "continue on. Something is wrong with the branch.");
+ // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
+ }
+
+ return result;
+}
+
+/*
+ * Payload allocation/deallocation for SST introduced in DP2.0
+ */
+static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
+ bool allocate)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ const struct dc_link_settings empty_link_settings = {0};
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* slot X.Y for SST payload deallocate */
+ if (!allocate) {
+ avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+ avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+ }
+
+ /* calculate VC payload and update branch with new payload allocation table*/
+ if (!write_128b_132b_sst_payload_allocation_table(
+ stream,
+ link,
+ &proposed_table,
+ allocate)) {
+ DC_LOG_ERROR("SST Update Payload: Failed to update "
+ "allocation table for "
+ "pipe idx: %d\n",
+ pipe_ctx->pipe_idx);
+ return DC_FAIL_DP_PAYLOAD_ALLOCATION;
+ }
+
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+
+ ASSERT(proposed_table.stream_count == 1);
+
+ //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
+ DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
+ "vcp_id: %d "
+ "slot_count: %d\n",
+ (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
+ proposed_table.stream_allocations[0].vcp_id,
+ proposed_table.stream_allocations[0].slot_count);
+
+ /* program DP source TX for payload */
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &proposed_table);
+
+ /* poll for ACT handled */
+ if (!poll_for_allocation_change_trigger(link)) {
+ // Failures will result in blackscreen and errors logged
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* slot X.Y for SST payload allocate */
+ if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
+
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+ avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+ }
+
+ /* Always return DC_OK.
+ * If part of sequence fails, log failure(s) and show blackscreen
+ */
+ return DC_OK;
+}
+
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* decrease throttled vcp size */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ } else {
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ return DC_OK;
+}
+
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* increase throttled vcp size */
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ pbn_per_slot = get_pbn_per_slot(stream);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+
+static void disable_link_dp(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc_link_settings link_settings = link->cur_link_settings;
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->mst_stream_alloc_table.stream_count > 0)
+ /* disable MST link only when last vc payload is deallocated */
+ return;
+
+ dp_disable_link_phy(link, link_res, signal);
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, false);
+ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ /* set the sink to SST mode after disabling the link */
+ enable_mst_on_sink(link, false);
+
+ if (link_dp_get_encoding_format(&link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, link_res, false);
+ }
+}
+
+static void disable_link(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ if (dc_is_dp_signal(signal)) {
+ disable_link_dp(link, link_res, signal);
+ } else if (signal != SIGNAL_TYPE_VIRTUAL) {
+ link->dc->hwss.disable_link_output(link, link_res, signal);
+ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dc_color_depth display_color_depth;
+ enum engine_id eng_id;
+ struct ext_hdmi_settings settings = {0};
+ bool is_over_340mhz = false;
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+ write_i2c_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz, &settings);
+ } else {
+ write_i2c_default_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz);
+ }
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+ }
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ write_scdc_data(
+ stream->link->ddc,
+ stream->phy_pix_clk,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+ memset(&stream->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+
+ display_color_depth = stream->timing.display_color_depth;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ display_color_depth = COLOR_DEPTH_888;
+
+ dc->hwss.enable_tmds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->stream->signal,
+ pipe_ctx->clock_source->id,
+ display_color_depth,
+ stream->phy_pix_clk);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ read_scdc_data(link->ddc);
+}
+
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ bool skip_video_pattern;
+ struct dc_link *link = stream->link;
+ const struct dc_link_settings *link_settings =
+ &pipe_ctx->link_config.dp_link_settings;
+ bool fec_enable;
+ int i;
+ bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
+ uint32_t post_oui_delay = 30; // 30ms
+ /* Reduce link bandwidth between failed link training attempts. */
+ bool do_fallback = false;
+ int lt_attempts = LINK_TRAINING_ATTEMPTS;
+
+ // Increase retry count if attempting DP1.x on FIXED_VS link
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ lt_attempts = 10;
+
+ // check for seamless boot
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i]->apply_seamless_boot_optimization) {
+ apply_seamless_boot_optimization = true;
+ break;
+ }
+ }
+
+ /*
+ * If the link is DP-over-USB4 do the following:
+ * - Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+ * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+ * in case stream needs more or less bw from what has been allocated
+ * earlier at plug time.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ do_fallback = true;
+ }
+
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+ * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
+ */
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->dc->debug.set_mst_en_for_sst) {
+ enable_mst_on_sink(link, true);
+ }
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
+ /*in case it is not on*/
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
+ } else {
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+ }
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
+ if (link->dpcd_sink_ext_caps.raw != 0) {
+ post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
+ msleep(post_oui_delay);
+ }
+
+ // similarly, mode switch can cause loss of cable ID
+ dpcd_write_cable_id_to_dprx(link);
+
+ skip_video_pattern = true;
+
+ if (link_settings->link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(link_settings,
+ skip_video_pattern,
+ lt_attempts,
+ pipe_ctx,
+ pipe_ctx->stream->signal,
+ do_fallback)) {
+ status = DC_OK;
+ } else {
+ status = DC_FAIL_DP_LINK_TRAINING;
+ }
+
+ if (link->preferred_training_settings.fec_enable)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ edp_backlight_enable_aux(link, true);
+ }
+
+ return status;
+}
+
+static enum dc_status enable_link_edp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc *dc = stream->ctx->dc;
+
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
+
+ memset(&stream->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+ dc->hwss.enable_lvds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->clock_source->id,
+ stream->phy_pix_clk);
+
+}
+
+static enum dc_status enable_link_dp_mst(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ unsigned char mstm_cntl;
+
+ /* sink signal type after MST branch is MST. Multiple MST sinks
+ * share one link. Link DP PHY is enable or training only once.
+ */
+ if (link->link_status.link_active)
+ return DC_OK;
+
+ /* clear payload table */
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1);
+ if (mstm_cntl & DP_MST_EN)
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
+ /* to make sure the pending down rep can be processed
+ * before enabling the link
+ */
+ dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
+
+ /* set the sink to MST mode before enabling the link */
+ enable_mst_on_sink(link, true);
+
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static enum dc_status enable_link(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* There's some scenarios where driver is unloaded with display
+ * still enabled. When driver is reloaded, it may cause a display
+ * to not light up if there is a mismatch between old and new
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+ if (link->link_status.link_active) {
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ switch (pipe_ctx->stream->signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ status = enable_link_dp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_edp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ status = enable_link_dp_mst(state, pipe_ctx);
+ msleep(200);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ enable_link_lvds(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+ default:
+ break;
+ }
+
+ if (status == DC_OK) {
+ pipe_ctx->stream->link->link_status.link_active = true;
+ }
+
+ return status;
+}
+
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ ASSERT(is_master_pipe_for_link(link, pipe_ctx));
+
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ if (pipe_ctx->stream->sink) {
+ if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ pipe_ctx->stream->sink->edid_caps.display_name,
+ pipe_ctx->stream->signal);
+ }
+ }
+
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+ if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) {
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ set_avmute(pipe_ctx, true);
+ }
+
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ update_psp_stream_config(pipe_ctx, true);
+ dc->hwss.blank_stream(pipe_ctx);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ dp_is_128b_132b_signal(pipe_ctx))
+ update_sst_payload(pipe_ctx, false);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ struct ext_hdmi_settings settings = {0};
+ enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ unsigned short masked_chip_caps = link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ //Need to inform that sink is going to use legacy HDMI mode.
+ write_scdc_data(
+ link->ddc,
+ 165000,//vbios only handles 165Mhz.
+ false);
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
+ write_i2c_retimer_setting(pipe_ctx,
+ false, false, &settings);
+ else
+ write_i2c_default_retimer_setting(pipe_ctx,
+ false, false);
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, false);
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ !dp_is_128b_132b_signal(pipe_ctx)) {
+
+ /* In DP1.x SST mode, our encoder will go to TPS1
+ * when link is on but stream is off.
+ * Disabling link before stream will avoid exposing TPS1 pattern
+ * during the disable sequence as it will confuse some receivers
+ * state machine.
+ * In DP2 or MST mode, our encoder will stay video active
+ */
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, false);
+ }
+ if (dp_is_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+ }
+
+ if (vpg && vpg->funcs->vpg_powerdown)
+ vpg->funcs->vpg_powerdown(vpg);
+}
+
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ enum dc_status status;
+ struct link_encoder *link_enc;
+ enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
+ ASSERT(is_master_pipe_for_link(link, pipe_ctx));
+
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ if (pipe_ctx->stream->sink) {
+ if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ pipe_ctx->stream->sink->edid_caps.display_name,
+ pipe_ctx->stream->signal);
+ }
+ }
+
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
+ }
+
+ pipe_ctx->stream->link->link_state_valid = true;
+
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ otg_out_dest = OUT_MUX_HPO_DP;
+ else
+ otg_out_dest = OUT_MUX_DIO;
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
+ }
+
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ bool apply_edp_fast_boot_optimization =
+ pipe_ctx->stream->apply_edp_fast_boot_optimization;
+
+ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+
+ // Enable VPG before building infoframe
+ if (vpg && vpg->funcs->vpg_poweron)
+ vpg->funcs->vpg_poweron(vpg);
+
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
+ /* Do not touch link on seamless boot optimization. */
+ if (pipe_ctx->stream->apply_seamless_boot_optimization) {
+ pipe_ctx->stream->dpms_off = false;
+
+ /* Still enable stream features & audio on seamless boot for DP external displays */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ enable_stream_features(pipe_ctx);
+ dc->hwss.enable_audio_stream(pipe_ctx);
+ }
+
+ update_psp_stream_config(pipe_ctx, false);
+ return;
+ }
+
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
+ pipe_ctx->stream->dpms_off = false;
+ update_psp_stream_config(pipe_ctx, false);
+ return;
+ }
+
+ if (pipe_ctx->stream->dpms_off)
+ return;
+
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, true);
+
+ }
+
+ status = enable_link(state, pipe_ctx);
+
+ if (status != DC_OK) {
+ DC_LOG_WARNING("enabling link %u failed: %d\n",
+ pipe_ctx->stream->link->link_index,
+ status);
+
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
+ */
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (false == stream->link->link_status.link_active)
+ disable_link(stream->link, &pipe_ctx->link_res,
+ pipe_ctx->stream->signal);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
+
+ /* turn off otg test pattern if enable */
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
+ dp_is_128b_132b_signal(pipe_ctx))) {
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
+ }
+
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
+ link_set_dsc_pps_packet(pipe_ctx, true, true);
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ dp_is_128b_132b_signal(pipe_ctx))
+ update_sst_payload(pipe_ctx, true);
+
+ dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->link->cur_link_settings);
+
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+ update_psp_stream_config(pipe_ctx, false);
+
+ dc->hwss.enable_audio_stream(pipe_ctx);
+
+ } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, true);
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ set_avmute(pipe_ctx, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
new file mode 100644
index 000000000000..9398f9c1666a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DPMS_H__
+#define __DC_LINK_DPMS_H__
+
+#include "link.h"
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
+void link_resume(struct dc_link *link);
+void link_blank_all_dp_displays(struct dc *dc);
+void link_blank_all_edp_displays(struct dc *dc);
+void link_blank_dp_stream(struct dc_link *link, bool hw_init);
+void link_set_all_streams_dpms_off_for_link(struct dc_link *link);
+void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
+ struct dc_state *state,
+ uint8_t *count,
+ struct pipe_ctx *pipes[MAX_PIPES]);
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
+ bool enable, bool immediate_update);
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
+#endif /* __DC_LINK_DPMS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
new file mode 100644
index 000000000000..1515c817f03b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns the creation/destruction of link structure.
+ */
+#include "link_factory.h"
+#include "link_detection.h"
+#include "link_resource.h"
+#include "link_validation.h"
+#include "link_dpms.h"
+#include "accessories/link_dp_cts.h"
+#include "accessories/link_dp_trace.h"
+#include "accessories/link_fpga.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia_bw.h"
+#include "protocols/link_dp_dpia.h"
+#include "protocols/link_dp_irq_handler.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_training.h"
+#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_hpd.h"
+#include "gpio_service_interface.h"
+#include "atomfirmware.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+
+/* link factory owns the creation/destruction of link structures. */
+static void construct_link_service_factory(struct link_service *link_srv)
+{
+
+ link_srv->create_link = link_create;
+ link_srv->destroy_link = link_destroy;
+}
+
+/* link_detection manages link detection states and receiver states by using
+ * various link protocols. It also provides helper functions to interpret
+ * certain capabilities or status based on the states it manages or retrieve
+ * them directly from connected receivers.
+ */
+static void construct_link_service_detection(struct link_service *link_srv)
+{
+ link_srv->detect_link = link_detect;
+ link_srv->detect_connection_type = link_detect_connection_type;
+ link_srv->add_remote_sink = link_add_remote_sink;
+ link_srv->remove_remote_sink = link_remove_remote_sink;
+ link_srv->get_hpd_state = link_get_hpd_state;
+ link_srv->get_hpd_gpio = link_get_hpd_gpio;
+ link_srv->enable_hpd = link_enable_hpd;
+ link_srv->disable_hpd = link_disable_hpd;
+ link_srv->enable_hpd_filter = link_enable_hpd_filter;
+ link_srv->reset_cur_dp_mst_topology = link_reset_cur_dp_mst_topology;
+ link_srv->get_status = link_get_status;
+ link_srv->is_hdcp1x_supported = link_is_hdcp14;
+ link_srv->is_hdcp2x_supported = link_is_hdcp22;
+ link_srv->clear_dprx_states = link_clear_dprx_states;
+}
+
+/* link resource implements accessors to link resource. */
+static void construct_link_service_resource(struct link_service *link_srv)
+{
+ link_srv->get_cur_res_map = link_get_cur_res_map;
+ link_srv->restore_res_map = link_restore_res_map;
+ link_srv->get_cur_link_res = link_get_cur_link_res;
+}
+
+/* link validation owns timing validation against various link limitations. (ex.
+ * link bandwidth, receiver capability or our hardware capability) It also
+ * provides helper functions exposing bandwidth formulas used in validation.
+ */
+static void construct_link_service_validation(struct link_service *link_srv)
+{
+ link_srv->validate_mode_timing = link_validate_mode_timing;
+ link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
+ link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+}
+
+/* link dpms owns the programming sequence of stream's dpms state associated
+ * with the link and link's enable/disable sequences as result of the stream's
+ * dpms state change.
+ */
+static void construct_link_service_dpms(struct link_service *link_srv)
+{
+ link_srv->set_dpms_on = link_set_dpms_on;
+ link_srv->set_dpms_off = link_set_dpms_off;
+ link_srv->resume = link_resume;
+ link_srv->blank_all_dp_displays = link_blank_all_dp_displays;
+ link_srv->blank_all_edp_displays = link_blank_all_edp_displays;
+ link_srv->blank_dp_stream = link_blank_dp_stream;
+ link_srv->increase_mst_payload = link_increase_mst_payload;
+ link_srv->reduce_mst_payload = link_reduce_mst_payload;
+ link_srv->set_dsc_on_stream = link_set_dsc_on_stream;
+ link_srv->set_dsc_enable = link_set_dsc_enable;
+ link_srv->update_dsc_config = link_update_dsc_config;
+}
+
+/* link ddc implements generic display communication protocols such as i2c, aux
+ * and scdc. It should not contain any specific applications of these
+ * protocols such as display capability query, detection, or handshaking such as
+ * link training.
+ */
+static void construct_link_service_ddc(struct link_service *link_srv)
+{
+ link_srv->create_ddc_service = link_create_ddc_service;
+ link_srv->destroy_ddc_service = link_destroy_ddc_service;
+ link_srv->query_ddc_data = link_query_ddc_data;
+ link_srv->aux_transfer_raw = link_aux_transfer_raw;
+ link_srv->aux_transfer_with_retries_no_mutex =
+ link_aux_transfer_with_retries_no_mutex;
+ link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode;
+ link_srv->get_aux_defer_delay = link_get_aux_defer_delay;
+}
+
+/* link dp capability implements dp specific link capability retrieval sequence.
+ * It is responsible for retrieving, parsing, overriding, deciding capability
+ * obtained from dp link. Link capability consists of encoders, DPRXs, cables,
+ * retimers, usb and all other possible backend capabilities.
+ */
+static void construct_link_service_dp_capability(struct link_service *link_srv)
+{
+ link_srv->dp_is_sink_present = dp_is_sink_present;
+ link_srv->dp_is_fec_supported = dp_is_fec_supported;
+ link_srv->dp_is_128b_132b_signal = dp_is_128b_132b_signal;
+ link_srv->dp_get_max_link_enc_cap = dp_get_max_link_enc_cap;
+ link_srv->dp_get_verified_link_cap = dp_get_verified_link_cap;
+ link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
+ link_srv->dp_should_enable_fec = dp_should_enable_fec;
+ link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->mst_decide_link_encoding_format =
+ mst_decide_link_encoding_format;
+ link_srv->edp_decide_link_settings = edp_decide_link_settings;
+ link_srv->bw_kbps_from_raw_frl_link_rate_data =
+ link_bw_kbps_from_raw_frl_link_rate_data;
+ link_srv->dp_overwrite_extended_receiver_cap =
+ dp_overwrite_extended_receiver_cap;
+ link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+}
+
+/* link dp phy/dpia implements basic dp phy/dpia functionality such as
+ * enable/disable output and set lane/drive settings. It is responsible for
+ * maintaining and update software state representing current phy/dpia status
+ * such as current link settings.
+ */
+static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv)
+{
+ link_srv->dpia_handle_usb4_bandwidth_allocation_for_link =
+ dpia_handle_usb4_bandwidth_allocation_for_link;
+ link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response;
+ link_srv->dp_set_drive_settings = dp_set_drive_settings;
+ link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl;
+}
+
+/* link dp irq handler implements DP HPD short pulse handling sequence according
+ * to DP specifications
+ */
+static void construct_link_service_dp_irq_handler(struct link_service *link_srv)
+{
+ link_srv->dp_parse_link_loss_status = dp_parse_link_loss_status;
+ link_srv->dp_should_allow_hpd_rx_irq = dp_should_allow_hpd_rx_irq;
+ link_srv->dp_handle_link_loss = dp_handle_link_loss;
+ link_srv->dp_read_hpd_rx_irq_data = dp_read_hpd_rx_irq_data;
+ link_srv->dp_handle_hpd_rx_irq = dp_handle_hpd_rx_irq;
+}
+
+/* link edp panel control implements retrieval and configuration of eDP panel
+ * features such as PSR and ABM and it also manages specs defined eDP panel
+ * power sequences.
+ */
+static void construct_link_service_edp_panel_control(struct link_service *link_srv)
+{
+ link_srv->edp_panel_backlight_power_on = edp_panel_backlight_power_on;
+ link_srv->edp_get_backlight_level = edp_get_backlight_level;
+ link_srv->edp_get_backlight_level_nits = edp_get_backlight_level_nits;
+ link_srv->edp_set_backlight_level = edp_set_backlight_level;
+ link_srv->edp_set_backlight_level_nits = edp_set_backlight_level_nits;
+ link_srv->edp_get_target_backlight_pwm = edp_get_target_backlight_pwm;
+ link_srv->edp_get_psr_state = edp_get_psr_state;
+ link_srv->edp_set_psr_allow_active = edp_set_psr_allow_active;
+ link_srv->edp_setup_psr = edp_setup_psr;
+ link_srv->edp_set_sink_vtotal_in_psr_active =
+ edp_set_sink_vtotal_in_psr_active;
+ link_srv->edp_get_psr_residency = edp_get_psr_residency;
+ link_srv->edp_wait_for_t12 = edp_wait_for_t12;
+ link_srv->edp_is_ilr_optimization_required =
+ edp_is_ilr_optimization_required;
+ link_srv->edp_backlight_enable_aux = edp_backlight_enable_aux;
+ link_srv->edp_add_delay_for_T9 = edp_add_delay_for_T9;
+ link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
+ link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
+ link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
+}
+
+/* link dp cts implements dp compliance test automation protocols and manual
+ * testing interfaces for debugging and certification purpose.
+ */
+static void construct_link_service_dp_cts(struct link_service *link_srv)
+{
+ link_srv->dp_handle_automated_test = dp_handle_automated_test;
+ link_srv->dp_set_test_pattern = dp_set_test_pattern;
+ link_srv->dp_set_preferred_link_settings =
+ dp_set_preferred_link_settings;
+ link_srv->dp_set_preferred_training_settings =
+ dp_set_preferred_training_settings;
+}
+
+/* link dp trace implements tracing interfaces for tracking major dp sequences
+ * including execution status and timestamps
+ */
+static void construct_link_service_dp_trace(struct link_service *link_srv)
+{
+ link_srv->dp_trace_is_initialized = dp_trace_is_initialized;
+ link_srv->dp_trace_set_is_logged_flag = dp_trace_set_is_logged_flag;
+ link_srv->dp_trace_is_logged = dp_trace_is_logged;
+ link_srv->dp_trace_get_lt_end_timestamp = dp_trace_get_lt_end_timestamp;
+ link_srv->dp_trace_get_lt_counts = dp_trace_get_lt_counts;
+ link_srv->dp_trace_get_link_loss_count = dp_trace_get_link_loss_count;
+ link_srv->dp_trace_set_edp_power_timestamp =
+ dp_trace_set_edp_power_timestamp;
+ link_srv->dp_trace_get_edp_poweron_timestamp =
+ dp_trace_get_edp_poweron_timestamp;
+ link_srv->dp_trace_get_edp_poweroff_timestamp =
+ dp_trace_get_edp_poweroff_timestamp;
+ link_srv->dp_trace_source_sequence = dp_trace_source_sequence;
+}
+
+static void construct_link_service(struct link_service *link_srv)
+{
+ /* All link service functions should fall under some sub categories.
+ * If a new function doesn't perfectly fall under an existing sub
+ * category, it must be that you are either adding a whole new aspect of
+ * responsibility to link service or something doesn't belong to link
+ * service. In that case please contact the arch owner to arrange a
+ * design review meeting.
+ */
+ construct_link_service_factory(link_srv);
+ construct_link_service_detection(link_srv);
+ construct_link_service_resource(link_srv);
+ construct_link_service_validation(link_srv);
+ construct_link_service_dpms(link_srv);
+ construct_link_service_ddc(link_srv);
+ construct_link_service_dp_capability(link_srv);
+ construct_link_service_dp_phy_or_dpia(link_srv);
+ construct_link_service_dp_irq_handler(link_srv);
+ construct_link_service_edp_panel_control(link_srv);
+ construct_link_service_dp_cts(link_srv);
+ construct_link_service_dp_trace(link_srv);
+}
+
+struct link_service *link_create_link_service(void)
+{
+ struct link_service *link_srv = kzalloc(sizeof(*link_srv), GFP_KERNEL);
+
+ if (link_srv == NULL)
+ goto fail;
+
+ construct_link_service(link_srv);
+
+ return link_srv;
+fail:
+ return NULL;
+}
+
+void link_destroy_link_service(struct link_service **link_srv)
+{
+ kfree(*link_srv);
+ *link_srv = NULL;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
+{
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_A;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_B;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_C;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_D;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_E;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_F;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_G;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_NUTMEG_CRT;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_TRAVIS_CRT;
+ case ENUM_ID_2:
+ return TRANSMITTER_TRAVIS_LCD;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+}
+
+static void link_destruct(struct dc_link *link)
+{
+ int i;
+
+ if (link->hpd_gpio) {
+ dal_gpio_destroy_irq(&link->hpd_gpio);
+ link->hpd_gpio = NULL;
+ }
+
+ if (link->ddc)
+ link_destroy_ddc_service(&link->ddc);
+
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+ if (link->link_enc) {
+ /* Update link encoder resource tracking variables. These are used for
+ * the dynamic assignment of link encoders to streams. Virtual links
+ * are not assigned encoder resources on creation.
+ */
+ if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
+ link->dc->res_pool->dig_link_enc_count--;
+ }
+ link->link_enc->funcs->destroy(&link->link_enc);
+ }
+
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ for (i = 0; i < link->sink_count; ++i)
+ dc_sink_release(link->remote_sinks[i]);
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+ struct ddc *ddc;
+ enum channel_id channel;
+
+ channel = CHANNEL_ID_UNKNOWN;
+
+ ddc = get_ddc_pin(link->ddc);
+
+ if (ddc) {
+ switch (dal_ddc_get_line(ddc)) {
+ case GPIO_DDC_LINE_DDC1:
+ channel = CHANNEL_ID_DDC1;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ channel = CHANNEL_ID_DDC2;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ channel = CHANNEL_ID_DDC3;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ channel = CHANNEL_ID_DDC4;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ channel = CHANNEL_ID_DDC5;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ channel = CHANNEL_ID_DDC6;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ channel = CHANNEL_ID_DDC_VGA;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ channel = CHANNEL_ID_I2C_PAD;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static bool construct_phy(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ uint8_t i;
+ struct ddc_service_init_data ddc_service_init_data = { 0 };
+ struct dc_context *dc_ctx = init_params->ctx;
+ struct encoder_init_data enc_init_data = { 0 };
+ struct panel_cntl_init_data panel_cntl_init_data = { 0 };
+ struct integrated_info info = { 0 };
+ struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+ const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+ struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ link->link_id =
+ bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+ link->ep_type = DISPLAY_ENDPOINT_PHY;
+
+ DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+
+ if (bios->funcs->get_disp_connector_caps_info) {
+ bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
+ link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
+ DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
+ }
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
+ goto create_fail;
+ }
+
+ if (link->dc->res_pool->funcs->link_init)
+ link->dc->res_pool->funcs->link_init(link);
+
+ link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (link->hpd_gpio) {
+ dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
+ dal_gpio_unlock_pin(link->hpd_gpio);
+ link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
+
+ DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
+ DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ if (link->hpd_gpio)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+
+ break;
+ case CONNECTOR_ID_EDP:
+ link->connector_signal = SIGNAL_TYPE_EDP;
+
+ if (link->hpd_gpio) {
+ if (!link->dc->config.allow_edp_hotplug_detection)
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case HPD_EN_FOR_ALL_EDP:
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ break;
+ case HPD_EN_FOR_PRIMARY_EDP_ONLY:
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case HPD_EN_FOR_SECONDARY_EDP_ONLY:
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ }
+ }
+
+ break;
+ case CONNECTOR_ID_LVDS:
+ link->connector_signal = SIGNAL_TYPE_LVDS;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+ link->link_id.id);
+ goto create_fail;
+ }
+
+ /* TODO: #DAL3 Implement id to str function.*/
+ LINK_INFO("Connector[%d] description:"
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ link->ddc = link_create_ddc_service(&ddc_service_init_data);
+
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
+ link->ddc_hw_inst =
+ dal_ddc_get_line(get_ddc_pin(link->ddc));
+
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst =
+ panel_cntl_init_data.ctx->dc_edp_id_count;
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+ panel_cntl_init_data.ctx->dc_edp_id_count++;
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
+
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
+ enc_init_data.connector = link->link_id;
+ enc_init_data.channel = get_ddc_line(link);
+ enc_init_data.hpd_source = get_hpd_line(link);
+
+ link->hpd_src = enc_init_data.hpd_source;
+
+ enc_init_data.transmitter =
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
+
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+
+ if (!link->link_enc) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
+ /* Update link encoder tracking variables. These are used for the dynamic
+ * assignment of link encoders to streams.
+ */
+ link->eng_id = link->link_enc->preferred_engine;
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
+ link->dc->res_pool->dig_link_enc_count++;
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
+ for (i = 0; i < 4; i++) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+ &link->device_tag) != BP_RESULT_OK) {
+ DC_ERROR("Failed to find device tag!\n");
+ goto device_tag_fail;
+ }
+
+ /* Look for device tag that matches connector signal,
+ * CRT for rgb, LCD for other supported signal tyes
+ */
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+ link->device_tag.dev_id))
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+ link->connector_signal != SIGNAL_TYPE_RGB)
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+ link->connector_signal == SIGNAL_TYPE_RGB)
+ continue;
+
+ DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
+ break;
+ }
+
+ if (bios->integrated_info)
+ info = *bios->integrated_info;
+
+ /* Look for channel mapping corresponding to connector and device tag */
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+ struct external_display_path *path =
+ &info.ext_disp_conn_info.path[i];
+
+ if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+ path->device_connector_id.id == link->link_id.id &&
+ path->device_connector_id.type == link->link_id.type) {
+ if (link->device_tag.acpi_device != 0 &&
+ path->device_acpi_enum == link->device_tag.acpi_device) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
+ } else if (path->device_tag ==
+ link->device_tag.dev_id.raw_device_tag) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
+ }
+
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ link->bios_forced_drive_settings.VOLTAGE_SWING =
+ (info.ext_disp_conn_info.fixdpvoltageswing & 0x3);
+ link->bios_forced_drive_settings.PRE_EMPHASIS =
+ ((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
+ }
+
+ break;
+ }
+ }
+
+ if (bios->funcs->get_atom_dc_golden_table)
+ bios->funcs->get_atom_dc_golden_table(bios);
+
+ /*
+ * TODO check if GPIO programmed correctly
+ *
+ * If GPIO isn't programmed correctly HPD might not rise or drain
+ * fast enough, leading to bounces.
+ */
+ program_hpd_filter(link);
+
+ link->psr_settings.psr_vtotal_control_support = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
+ return true;
+device_tag_fail:
+ link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+ if (link->panel_cntl != NULL)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
+ link_destroy_ddc_service(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+ if (link->hpd_gpio) {
+ dal_gpio_destroy_irq(&link->hpd_gpio);
+ link->hpd_gpio = NULL;
+ }
+
+ DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
+ return false;
+}
+
+static bool construct_dpia(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ struct ddc_service_init_data ddc_service_init_data = { 0 };
+ struct dc_context *dc_ctx = init_params->ctx;
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ /* Initialized irq source for hpd and hpd rx */
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ /* Dummy Init for linkid */
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
+ link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
+ link->is_internal_display = false;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ LINK_INFO("Connector[%d] description:signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
+ link->is_dig_mapping_flexible = true;
+
+ /* TODO: Initialize link : funcs->link_init */
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ /* Set indicator for dpia link so that ddc wont be created */
+ ddc_service_init_data.is_dpia_link = true;
+
+ link->ddc = link_create_ddc_service(&ddc_service_init_data);
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ /* Set dpia port index : 0 to number of dpia ports */
+ link->ddc_hw_inst = init_params->connector_index;
+
+ /* TODO: Create link encoder */
+
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
+ link->wa_flags.dp_mot_reset_segment = true;
+
+ return true;
+
+ddc_create_fail:
+ return false;
+}
+
+static bool link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ /* Handle dpia case */
+ if (init_params->is_dpia_link == true)
+ return construct_dpia(link, init_params);
+ else
+ return construct_phy(link, init_params);
+}
+
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+ struct dc_link *link =
+ kzalloc(sizeof(*link), GFP_KERNEL);
+
+ if (NULL == link)
+ goto alloc_fail;
+
+ if (false == link_construct(link, init_params))
+ goto construct_fail;
+
+ return link;
+
+construct_fail:
+ kfree(link);
+
+alloc_fail:
+ return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+ link_destruct(*link);
+ kfree(*link);
+ *link = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
new file mode 100644
index 000000000000..e96220d48d03
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_FACTORY_H__
+#define __LINK_FACTORY_H__
+#include "link.h"
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
+
+#endif /* __LINK_FACTORY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.c b/drivers/gpu/drm/amd/display/dc/link/link_resource.c
new file mode 100644
index 000000000000..bd42bb273c0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements accessors to link resource.
+ */
+
+#include "link_resource.h"
+#include "protocols/link_dp_capability.h"
+
+void link_get_cur_link_res(const struct dc_link *link,
+ struct link_resource *link_res)
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+
+ memset(link_res, 0, sizeof(*link_res));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
+ if (pipe->stream->link == link) {
+ *link_res = pipe->link_res;
+ break;
+ }
+ }
+ }
+
+}
+
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map)
+{
+ struct dc_link *link;
+ uint32_t i;
+ uint32_t hpo_dp_recycle_map = 0;
+
+ *map = 0;
+
+ if (dc->caps.dp_hpo) {
+ for (i = 0; i < dc->caps.max_links; i++) {
+ link = dc->links[i];
+ if (link->link_status.link_active &&
+ link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
+ link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
+ /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
+ * but current link doesn't use it.
+ */
+ hpo_dp_recycle_map |= (1 << i);
+ }
+ *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
+ }
+}
+
+void link_restore_res_map(const struct dc *dc, uint32_t *map)
+{
+ struct dc_link *link;
+ uint32_t i;
+ unsigned int available_hpo_dp_count;
+ uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
+ >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
+
+ if (dc->caps.dp_hpo) {
+ available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
+ /* remove excess 128b/132b encoding support for not recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) == 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ /* remove excess 128b/132b encoding support for recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) != 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
new file mode 100644
index 000000000000..1907bda3cb6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_RESOURCE_H__
+#define __LINK_RESOURCE_H__
+#include "link.h"
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
+void link_restore_res_map(const struct dc *dc, uint32_t *map);
+void link_get_cur_link_res(const struct dc_link *link,
+ struct link_resource *link_res);
+#endif /* __LINK_RESOURCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
new file mode 100644
index 000000000000..d4b7da526f0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns timing validation against various link limitations. (ex.
+ * link bandwidth, receiver capability or our hardware capability) It also
+ * provides helper functions exposing bandwidth formulas used in validation.
+ */
+#include "link_validation.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia_bw.h"
+#include "resource.h"
+
+#define DC_LOGGER_INIT(logger)
+
+static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
+static bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dpcd_caps *dpcd_caps)
+{
+ const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
+
+ switch (dpcd_caps->dongle_type) {
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+ else
+ return false;
+ default:
+ break;
+ }
+
+ if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
+ dongle_caps->extendedCapValid == true) {
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ /* Check 3D format */
+ switch (timing->timing_3d_format) {
+ case TIMING_3D_FORMAT_NONE:
+ case TIMING_3D_FORMAT_FRAME_ALTERNATE:
+ /*Only frame alternate 3D is supported on active dongle*/
+ break;
+ default:
+ /*other 3D formats are not supported due to bad infoframe translation */
+ return false;
+ }
+
+ if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
+ struct dc_crtc_timing outputTiming = *timing;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+ /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
+ outputTiming.flags.DSC = 0;
+#endif
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ return false;
+ } else { // DP to HDMI TMDS converter
+ if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+ return false;
+ }
+ }
+
+ if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
+ dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
+ dongle_caps->dfp_cap_ext.supported) {
+
+ if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
+ return false;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_666 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ /* For 8b/10b encoding:
+ * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
+ * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
+ */
+ link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ if (dp_should_enable_fec(link)) {
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ }
+ break;
+ case DP_128b_132b_ENCODING:
+ /* For 128b/132b encoding:
+ * link rate is defined in the unit of 10mbps per lane.
+ * total data bandwidth efficiency is always 96.71%.
+ */
+ link_rate_per_lane_kbps = link_settings->link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ break;
+ }
+
+ /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
+ return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
+}
+
+static bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t req_bw;
+ uint32_t max_bw;
+
+ const struct dc_link_settings *link_setting;
+
+ /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 &&
+ !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
+ dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL)
+ return false;
+
+ /*always DP fail safe mode*/
+ if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 &&
+ timing->h_addressable == (uint32_t) 640 &&
+ timing->v_addressable == (uint32_t) 480)
+ return true;
+
+ link_setting = dp_get_verified_link_cap(link);
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /*if (flags.DYNAMIC_VALIDATION == 1 &&
+ link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+ link_setting = &link->verified_link_cap;
+ */
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dp_link_bandwidth_kbps(link, link_setting);
+
+ if (req_bw <= max_bw) {
+ /* remember the biggest mode here, during
+ * initial link training (to get
+ * verified_link_cap), LS sends event about
+ * cannot train at reported cap to upper
+ * layer and upper layer will re-enumerate modes.
+ * this is not necessary if the lower
+ * verified_link_cap is enough to drive
+ * all the modes */
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /* if (flags.DYNAMIC_VALIDATION == 1)
+ dpsst->max_req_bw_for_verified_linkcap = dal_max(
+ dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+ return true;
+ } else
+ return false;
+}
+
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ return DC_OK;
+
+ /* Passive Dongle */
+ if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk)
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
+ return DC_EXCEED_DONGLE_CAP;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (!dp_validate_mode_timing(
+ link,
+ timing))
+ return DC_NO_DP_LINK_BANDWIDTH;
+ break;
+
+ default:
+ break;
+ }
+
+ return DC_OK;
+}
+
+bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+{
+ bool ret = true;
+ int bw_needed[MAX_DPIA_NUM];
+ struct dc_link *link[MAX_DPIA_NUM];
+
+ if (!num_streams || num_streams > MAX_DPIA_NUM)
+ return ret;
+
+ for (uint8_t i = 0; i < num_streams; ++i) {
+
+ link[i] = stream[i].link;
+ bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
new file mode 100644
index 000000000000..4a954317d0da
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_VALIDATION_H__
+#define __LINK_VALIDATION_H__
+#include "link.h"
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+bool link_validate_dpia_bandwidth(
+ const struct dc_stream_state *stream,
+ const unsigned int num_streams);
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
+
+#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index ce8d6a54ca54..0fa1228bc178 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -23,20 +23,20 @@
*
*/
-#include "dm_services.h"
-#include "dm_helpers.h"
-#include "gpio_service_interface.h"
-#include "include/ddc_service_types.h"
-#include "include/grph_object_id.h"
-#include "include/dpcd_defs.h"
-#include "include/logger_interface.h"
-#include "include/vector.h"
-#include "core_types.h"
-#include "dc_link_ddc.h"
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements generic display communication protocols such as i2c, aux
+ * and scdc. The file should not contain any specific applications of these
+ * protocols such as display capability query, detection, or handshaking such as
+ * link training.
+ */
+#include "link_ddc.h"
+#include "vector.h"
#include "dce/dce_aux.h"
-#include "dmub/inc/dmub_cmd.h"
+#include "dal_asic_id.h"
#include "link_dpcd.h"
-#include "include/dal_asic_id.h"
+#include "dm_helpers.h"
+#include "atomfirmware.h"
#define DC_LOGGER_INIT(logger)
@@ -45,87 +45,6 @@ static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";
static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2";
-#define AUX_POWER_UP_WA_DELAY 500
-#define I2C_OVER_AUX_DEFER_WA_DELAY 70
-#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
-#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
-
-/* CV smart dongle slave address for retrieving supported HDTV modes*/
-#define CV_SMART_DONGLE_ADDRESS 0x20
-/* DVI-HDMI dongle slave address for retrieving dongle signature*/
-#define DVI_HDMI_DONGLE_ADDRESS 0x68
-struct dvi_hdmi_dongle_signature_data {
- int8_t vendor[3];/* "AMD" */
- uint8_t version[2];
- uint8_t size;
- int8_t id[11];/* "6140063500G"*/
-};
-/* DP-HDMI dongle slave address for retrieving dongle signature*/
-#define DP_HDMI_DONGLE_ADDRESS 0x40
-static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
-#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
-
-struct dp_hdmi_dongle_signature_data {
- int8_t id[15];/* "DP-HDMI ADAPTOR"*/
- uint8_t eot;/* end of transmition '\x4' */
-};
-
-/* SCDC Address defines (HDMI 2.0)*/
-#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
-#define HDMI_SCDC_ADDRESS 0x54
-#define HDMI_SCDC_SINK_VERSION 0x01
-#define HDMI_SCDC_SOURCE_VERSION 0x02
-#define HDMI_SCDC_UPDATE_0 0x10
-#define HDMI_SCDC_TMDS_CONFIG 0x20
-#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
-#define HDMI_SCDC_CONFIG_0 0x30
-#define HDMI_SCDC_STATUS_FLAGS 0x40
-#define HDMI_SCDC_ERR_DETECT 0x50
-#define HDMI_SCDC_TEST_CONFIG 0xC0
-#define HDMI_SCDC_DEVICE_ID 0xD3
-
-union hdmi_scdc_update_read_data {
- uint8_t byte[2];
- struct {
- uint8_t STATUS_UPDATE:1;
- uint8_t CED_UPDATE:1;
- uint8_t RR_TEST:1;
- uint8_t RESERVED:5;
- uint8_t RESERVED2:8;
- } fields;
-};
-
-union hdmi_scdc_status_flags_data {
- uint8_t byte;
- struct {
- uint8_t CLOCK_DETECTED:1;
- uint8_t CH0_LOCKED:1;
- uint8_t CH1_LOCKED:1;
- uint8_t CH2_LOCKED:1;
- uint8_t RESERVED:4;
- } fields;
-};
-
-union hdmi_scdc_ced_data {
- uint8_t byte[7];
- struct {
- uint8_t CH0_8LOW:8;
- uint8_t CH0_7HIGH:7;
- uint8_t CH0_VALID:1;
- uint8_t CH1_8LOW:8;
- uint8_t CH1_7HIGH:7;
- uint8_t CH1_VALID:1;
- uint8_t CH2_8LOW:8;
- uint8_t CH2_7HIGH:7;
- uint8_t CH2_VALID:1;
- uint8_t CHECKSUM:8;
- uint8_t RESERVED:8;
- uint8_t RESERVED2:8;
- uint8_t RESERVED3:8;
- uint8_t RESERVED4:4;
- } fields;
-};
-
struct i2c_payloads {
struct vector payloads;
};
@@ -134,7 +53,7 @@ struct aux_payloads {
struct vector payloads;
};
-static bool dal_ddc_i2c_payloads_create(
+static bool i2c_payloads_create(
struct dc_context *ctx,
struct i2c_payloads *payloads,
uint32_t count)
@@ -146,19 +65,27 @@ static bool dal_ddc_i2c_payloads_create(
return false;
}
-static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+static struct i2c_payload *i2c_payloads_get(struct i2c_payloads *p)
{
return (struct i2c_payload *)p->payloads.container;
}
-static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+static uint32_t i2c_payloads_get_count(struct i2c_payloads *p)
{
return p->payloads.count;
}
+static void i2c_payloads_destroy(struct i2c_payloads *p)
+{
+ if (!p)
+ return;
+
+ dal_vector_destruct(&p->payloads);
+}
+
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
-void dal_ddc_i2c_payloads_add(
+static void i2c_payloads_add(
struct i2c_payloads *payloads,
uint32_t address,
uint32_t len,
@@ -226,7 +153,7 @@ static void ddc_service_construct(
ddc_service->wa.raw = 0;
}
-struct ddc_service *dal_ddc_service_create(
+struct ddc_service *link_create_ddc_service(
struct ddc_service_init_data *init_data)
{
struct ddc_service *ddc_service;
@@ -246,7 +173,7 @@ static void ddc_service_destruct(struct ddc_service *ddc)
dal_gpio_destroy_ddc(&ddc->ddc_pin);
}
-void dal_ddc_service_destroy(struct ddc_service **ddc)
+void link_destroy_ddc_service(struct ddc_service **ddc)
{
if (!ddc || !*ddc) {
BREAK_TO_DEBUGGER();
@@ -257,19 +184,14 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)
*ddc = NULL;
}
-enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
-{
- return DDC_SERVICE_TYPE_CONNECTOR;
-}
-
-void dal_ddc_service_set_transaction_type(
+void set_ddc_transaction_type(
struct ddc_service *ddc,
enum ddc_transaction_type type)
{
ddc->transaction_type = type;
}
-bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc)
{
switch (ddc->transaction_type) {
case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
@@ -282,7 +204,7 @@ bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
return false;
}
-void ddc_service_set_dongle_type(struct ddc_service *ddc,
+void set_dongle_type(struct ddc_service *ddc,
enum display_dongle_type dongle_type)
{
ddc->dongle_type = dongle_type;
@@ -324,7 +246,7 @@ static uint32_t defer_delay_converter_wa(
#define DP_TRANSLATOR_DELAY 5
-uint32_t get_defer_delay(struct ddc_service *ddc)
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc)
{
uint32_t defer_delay = 0;
@@ -352,175 +274,45 @@ uint32_t get_defer_delay(struct ddc_service *ddc)
return defer_delay;
}
-static bool i2c_read(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *buffer,
- uint32_t len)
-{
- uint8_t offs_data = 0;
- struct i2c_payload payloads[2] = {
- {
- .write = true,
- .address = address,
- .length = 1,
- .data = &offs_data },
- {
- .write = false,
- .address = address,
- .length = len,
- .data = buffer } };
-
- struct i2c_command command = {
- .payloads = payloads,
- .number_of_payloads = 2,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
-
- return dm_helpers_submit_i2c(
- ddc->ctx,
- ddc->link,
- &command);
-}
-
-void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap)
+static bool submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
{
- uint8_t i;
- bool is_valid_hdmi_signature;
- enum display_dongle_type *dongle = &sink_cap->dongle_type;
- uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
- bool is_type2_dongle = false;
- int retry_count = 2;
- struct dp_hdmi_dongle_signature_data *dongle_signature;
-
- /* Assume we have no valid DP passive dongle connected */
- *dongle = DISPLAY_DONGLE_NONE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
-
- /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
- if (!i2c_read(
- ddc,
- DP_HDMI_DONGLE_ADDRESS,
- type2_dongle_buf,
- sizeof(type2_dongle_buf))) {
- /* Passive HDMI dongles can sometimes fail here without retrying*/
- while (retry_count > 0) {
- if (i2c_read(ddc,
- DP_HDMI_DONGLE_ADDRESS,
- type2_dongle_buf,
- sizeof(type2_dongle_buf)))
- break;
- retry_count--;
- }
- if (retry_count == 0) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- return;
- }
- }
-
- /* Check if Type 2 dongle.*/
- if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
- is_type2_dongle = true;
-
- dongle_signature =
- (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
-
- is_valid_hdmi_signature = true;
+ uint32_t retrieved = 0;
+ bool ret = false;
- /* Check EOT */
- if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
- is_valid_hdmi_signature = false;
- }
+ if (!ddc)
+ return false;
- /* Check signature */
- for (i = 0; i < sizeof(dongle_signature->id); ++i) {
- /* If its not the right signature,
- * skip mismatch in subversion byte.*/
- if (dongle_signature->id[i] !=
- dp_hdmi_dongle_signature_str[i] && i != 3) {
+ if (!payload)
+ return false;
- if (is_type2_dongle) {
- is_valid_hdmi_signature = false;
- break;
- }
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
+ payload->length;
+ uint32_t payload_length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- }
- }
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = payload_length;
+ /* set mot (middle of transaction) to false if it is the last payload */
+ current_payload.mot = is_end_of_payload ? payload->mot:true;
+ current_payload.write_status_update = false;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
- if (is_type2_dongle) {
- uint32_t max_tmds_clk =
- type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
-
- max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
-
- if (0 == max_tmds_clk ||
- max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
- max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- } else {
- if (is_valid_hdmi_signature == true) {
- *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 2 DP-HDMI passive dongle %dMhz: ",
- max_tmds_clk);
- } else {
- *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
- max_tmds_clk);
-
- }
-
- /* Multiply by 1000 to convert to kHz. */
- sink_cap->max_hdmi_pixel_clock =
- max_tmds_clk * 1000;
- }
- sink_cap->is_dongle_type_one = false;
+ ret = link_aux_transfer_with_retries_no_mutex(ddc, &current_payload);
- } else {
- if (is_valid_hdmi_signature == true) {
- *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 1 DP-HDMI passive dongle %dMhz: ",
- sink_cap->max_hdmi_pixel_clock / 1000);
- } else {
- *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
- sink_cap->max_hdmi_pixel_clock / 1000);
- }
- sink_cap->is_dongle_type_one = true;
- }
+ retrieved += payload_length;
+ } while (retrieved < payload->length && ret == true);
- return;
+ return ret;
}
-enum {
- DP_SINK_CAP_SIZE =
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
-};
-
-bool dal_ddc_service_query_ddc_data(
+bool link_query_ddc_data(
struct ddc_service *ddc,
uint32_t address,
uint8_t *write_buf,
@@ -530,7 +322,7 @@ bool dal_ddc_service_query_ddc_data(
{
bool success = true;
uint32_t payload_size =
- dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+ link_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
uint32_t write_payloads =
@@ -544,13 +336,13 @@ bool dal_ddc_service_query_ddc_data(
if (!payloads_num)
return false;
- if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+ if (link_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;
payload.i2c_over_aux = true;
payload.address = address;
payload.reply = NULL;
- payload.defer_delay = get_defer_delay(ddc);
+ payload.defer_delay = link_get_aux_defer_delay(ddc);
payload.write_status_update = false;
if (write_size != 0) {
@@ -562,7 +354,7 @@ bool dal_ddc_service_query_ddc_data(
payload.length = write_size;
payload.data = write_buf;
- success = dal_ddc_submit_aux_command(ddc, &payload);
+ success = submit_aux_command(ddc, &payload);
}
if (read_size != 0 && success) {
@@ -574,86 +366,41 @@ bool dal_ddc_service_query_ddc_data(
payload.length = read_size;
payload.data = read_buf;
- success = dal_ddc_submit_aux_command(ddc, &payload);
+ success = submit_aux_command(ddc, &payload);
}
} else {
struct i2c_command command = {0};
struct i2c_payloads payloads;
- if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ if (!i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
return false;
- command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.payloads = i2c_payloads_get(&payloads);
command.number_of_payloads = 0;
command.engine = DDC_I2C_COMMAND_ENGINE;
command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
- dal_ddc_i2c_payloads_add(
+ i2c_payloads_add(
&payloads, address, write_size, write_buf, true);
- dal_ddc_i2c_payloads_add(
+ i2c_payloads_add(
&payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(&payloads);
+ i2c_payloads_get_count(&payloads);
success = dm_helpers_submit_i2c(
ddc->ctx,
ddc->link,
&command);
- dal_vector_destruct(&payloads.payloads);
+ i2c_payloads_destroy(&payloads);
}
return success;
}
-bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
- struct aux_payload *payload)
-{
- uint32_t retrieved = 0;
- bool ret = false;
-
- if (!ddc)
- return false;
-
- if (!payload)
- return false;
-
- do {
- struct aux_payload current_payload;
- bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length;
- uint32_t payload_length = is_end_of_payload ?
- payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
-
- current_payload.address = payload->address;
- current_payload.data = &payload->data[retrieved];
- current_payload.defer_delay = payload->defer_delay;
- current_payload.i2c_over_aux = payload->i2c_over_aux;
- current_payload.length = payload_length;
- /* set mot (middle of transaction) to false if it is the last payload */
- current_payload.mot = is_end_of_payload ? payload->mot:true;
- current_payload.write_status_update = false;
- current_payload.reply = payload->reply;
- current_payload.write = payload->write;
-
- ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
-
- retrieved += payload_length;
- } while (retrieved < payload->length && ret == true);
-
- return ret;
-}
-
-/* dc_link_aux_transfer_raw() - Attempt to transfer
- * the given aux payload. This function does not perform
- * retries or handle error states. The reply is returned
- * in the payload->reply and the result through
- * *operation_result. Returns the number of bytes transferred,
- * or -1 on a failure.
- */
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+int link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
@@ -665,22 +412,14 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
}
}
-/* dc_link_aux_transfer_with_retries() - Attempt to submit an
- * aux payload, retrying on timeouts, defers, and busy states
- * as outlined in the DP spec. Returns true if the request
- * was successful.
- *
- * Unless you want to implement your own retry semantics, this
- * is probably the one you want.
- */
-bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload)
{
return dce_aux_transfer_with_retries(ddc, payload);
}
-bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
+bool try_to_configure_aux_timeout(struct ddc_service *ddc,
uint32_t timeout)
{
bool result = false;
@@ -713,20 +452,12 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
return result;
}
-/*test only function*/
-void dal_ddc_service_set_ddc_pin(
- struct ddc_service *ddc_service,
- struct ddc *ddc)
-{
- ddc_service->ddc_pin = ddc;
-}
-
-struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+struct ddc *get_ddc_pin(struct ddc_service *ddc_service)
{
return ddc_service->ddc_pin;
}
-void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+void write_scdc_data(struct ddc_service *ddc_service,
uint32_t pix_clk,
bool lte_340_scramble)
{
@@ -741,13 +472,13 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
/*Source Version = 1*/
write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
write_buffer[1] = 1;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
write_buffer, sizeof(write_buffer), NULL, 0);
/*Read Request from SCDC caps*/
}
@@ -760,11 +491,11 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
} else {
write_buffer[1] = 0;
}
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+ link_query_ddc_data(ddc_service, slave_address, write_buffer,
sizeof(write_buffer), NULL, 0);
}
-void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+void read_scdc_data(struct ddc_service *ddc_service)
{
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
@@ -774,20 +505,19 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
union hdmi_scdc_status_flags_data status_data = {0};
uint8_t scramble_status = 0;
offset = HDMI_SCDC_SCRAMBLER_STATUS;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
&offset, sizeof(offset), &scramble_status,
sizeof(scramble_status));
offset = HDMI_SCDC_STATUS_FLAGS;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
&offset, sizeof(offset), &status_data.byte,
sizeof(status_data.byte));
}
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
new file mode 100644
index 000000000000..860ef15d7f1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DDC_SERVICE_H__
+#define __DAL_DDC_SERVICE_H__
+
+#include "link.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
+#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
+#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+
+#define EDID_SEGMENT_SIZE 256
+
+struct ddc_service *link_create_ddc_service(
+ struct ddc_service_init_data *ddc_init_data);
+
+void link_destroy_ddc_service(struct ddc_service **ddc);
+
+void set_ddc_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type);
+
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
+bool try_to_configure_aux_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
+bool link_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
+ * states as outlined in the DP spec. Returns true if the request was
+ * successful.
+ *
+ * NOTE: The function requires explicit mutex on DM side in order to prevent
+ * potential race condition. DC components should call the dpcd read/write
+ * function in dm_helpers in order to access dpcd safely
+ */
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
+void write_scdc_data(
+ struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble);
+
+void read_scdc_data(
+ struct ddc_service *ddc_service);
+
+void set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type);
+
+struct ddc *get_ddc_pin(struct ddc_service *ddc_service);
+
+int link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+#endif /* __DAL_DDC_SERVICE_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
new file mode 100644
index 000000000000..ba98013fecd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -0,0 +1,2259 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp specific link capability retrieval sequence. It is
+ * responsible for retrieving, parsing, overriding, deciding capability obtained
+ * from dp link. Link capability consists of encoders, DPRXs, cables, retimers,
+ * usb and all other possible backend capabilities. Other components should
+ * include this header file in order to access link capability. Accessing link
+ * capability by dereferencing dc_link outside dp_link_capability is not a
+ * recommended method as it makes the component dependent on the underlying data
+ * structure used to represent link capability instead of function interfaces.
+ */
+
+#include "link_dp_capability.h"
+#include "link_ddc.h"
+#include "link_dpcd.h"
+#include "link_dp_dpia.h"
+#include "link_dp_phy.h"
+#include "link_edp_panel_control.h"
+#include "link_dp_irq_handler.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
+#include "link_dp_training.h"
+#include "atomfirmware.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
+#include "gpio_service_interface.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+struct dp_lt_fallback_entry {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+};
+
+static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
+ /* This link training fallback array is ordered by
+ * link bandwidth from highest to lowest.
+ * DP specs makes it a normative policy to always
+ * choose the next highest link bandwidth during
+ * link training fallback.
+ */
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR20},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR10},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH2},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH3},
+ {LANE_COUNT_FOUR, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH2},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH},
+ {LANE_COUNT_TWO, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_LOW},
+};
+
+static const struct dc_link_settings fail_safe_link_settings = {
+ .lane_count = LANE_COUNT_ONE,
+ .link_rate = LINK_RATE_LOW,
+ .link_spread = LINK_SPREAD_DISABLED,
+};
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+ return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) &&
+ (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+bool is_dp_branch_device(const struct dc_link *link)
+{
+ return link->dpcd_caps.is_branch_dev;
+}
+
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
+ }
+ return 0; // invalid value
+}
+
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+{
+ switch (bw) {
+ case 0b001:
+ return 9000000;
+ case 0b010:
+ return 18000000;
+ case 0b011:
+ return 24000000;
+ case 0b100:
+ return 32000000;
+ case 0b101:
+ return 40000000;
+ case 0b110:
+ return 48000000;
+ }
+
+ return 0;
+}
+
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+{
+ enum dc_link_rate link_rate;
+ // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
+ switch (link_rate_in_khz) {
+ case 1620000:
+ link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane
+ break;
+ case 2160000:
+ link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane
+ break;
+ case 2430000:
+ link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane
+ break;
+ case 2700000:
+ link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane
+ break;
+ case 3240000:
+ link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane
+ break;
+ case 4320000:
+ link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane
+ break;
+ case 5400000:
+ link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane
+ break;
+ case 6750000:
+ link_rate = LINK_RATE_RATE_8; // Rate_8 - 6.75 Gbps/Lane
+ break;
+ case 8100000:
+ link_rate = LINK_RATE_HIGH3; // Rate_9 (HBR3)- 8.10 Gbps/Lane
+ break;
+ default:
+ link_rate = LINK_RATE_UNKNOWN;
+ break;
+ }
+ return link_rate;
+}
+
+static union dp_cable_id intersect_cable_id(
+ union dp_cable_id *a, union dp_cable_id *b)
+{
+ union dp_cable_id out;
+
+ out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY,
+ b->bits.UHBR10_20_CAPABILITY);
+ out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY,
+ b->bits.UHBR13_5_CAPABILITY);
+ out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE);
+
+ return out;
+}
+
+/*
+ * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
+ */
+static uint32_t intersect_frl_link_bw_support(
+ const uint32_t max_supported_frl_bw_in_kbps,
+ const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
+{
+ uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+
+ // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
+ if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ }
+
+ return supported_bw_in_kbps;
+}
+
+static enum clock_source_id get_clock_source_id(struct dc_link *link)
+{
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
+ struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs != NULL) {
+ dp_cs_id = dp_cs->id;
+ } else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ return dp_cs_id;
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ int length)
+{
+ int retry = 0;
+
+ if (!link->dpcd_caps.dpcd_rev.raw) {
+ do {
+ dpcd_write_rx_power_ctrl(link, true);
+ core_link_read_dpcd(link, DP_DPCD_REV,
+ dpcd_data, length);
+ link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+ DP_DPCD_REV -
+ DP_DPCD_REV];
+ } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+ }
+
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ switch (link->dpcd_caps.branch_dev_id) {
+ /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
+ * all internal circuits including AUX communication preventing
+ * reading DPCD table and EDID (spec violation).
+ * Encoder will skip DP RX power down on disable_output to
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_0010FA:
+ case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+ /* TODO: May need work around for other dongles. */
+ default:
+ link->wa_flags.dp_keep_receiver_powered = false;
+ break;
+ }
+ } else
+ link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+bool dp_is_fec_supported(const struct dc_link *link)
+{
+ /* TODO - use asic cap instead of link_enc->features
+ * we no longer know which link enc to use for this link before commit
+ */
+ struct link_encoder *link_enc = NULL;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ return (dc_is_dp_signal(link->connector_signal) && link_enc &&
+ link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
+bool dp_should_enable_fec(const struct dc_link *link)
+{
+ bool force_disable = false;
+
+ if (link->fec_state == dc_link_fec_enabled)
+ force_disable = false;
+ else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.disable_fec)
+ force_disable = true;
+ else if (link->connector_signal == SIGNAL_TYPE_EDP
+ && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
+ dsc_support.DSC_SUPPORT == false
+ || link->panel_config.dsc.disable_dsc_edp
+ || !link->dc->caps.edp_dsc_support))
+ force_disable = true;
+
+ return !force_disable && dp_is_fec_supported(link);
+}
+
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ /* If this assert is hit then we have a link encoder dynamic management issue */
+ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+
+bool dp_is_lttpr_present(struct dc_link *link)
+{
+ return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
+
+/* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+static enum dc_link_rate get_link_rate_from_max_link_bw(
+ uint8_t max_link_bw)
+{
+ enum dc_link_rate link_rate;
+
+ if (max_link_bw >= LINK_RATE_HIGH3) {
+ link_rate = LINK_RATE_HIGH3;
+ } else if (max_link_bw < LINK_RATE_HIGH3
+ && max_link_bw >= LINK_RATE_HIGH2) {
+ link_rate = LINK_RATE_HIGH2;
+ } else if (max_link_bw < LINK_RATE_HIGH2
+ && max_link_bw >= LINK_RATE_HIGH) {
+ link_rate = LINK_RATE_HIGH;
+ } else if (max_link_bw < LINK_RATE_HIGH
+ && max_link_bw >= LINK_RATE_LOW) {
+ link_rate = LINK_RATE_LOW;
+ } else {
+ link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ return link_rate;
+}
+
+static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
+ lttpr_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ lttpr_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
+ lttpr_max_link_rate = LINK_RATE_UHBR10;
+
+ return lttpr_max_link_rate;
+}
+
+static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+ cable_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ cable_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
+ cable_max_link_rate = LINK_RATE_UHBR10;
+
+ return cable_max_link_rate;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+ return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+ return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_FOUR:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_ONE;
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_UNKNOWN;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate reduce_link_rate(const struct dc_link *link, enum dc_link_rate link_rate)
+{
+ // NEEDSWORK: provide some details about why this function never returns some of the
+ // obscure link rates such as 4.32 Gbps or 3.24 Gbps and if such behavior is intended.
+ //
+
+ switch (link_rate) {
+ case LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_HIGH3;
+ case LINK_RATE_HIGH3:
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->debug.support_eDP1_5)
+ return LINK_RATE_RATE_8;
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_RATE_8:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_RATE_6:
+ case LINK_RATE_RBR2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_LOW;
+ case LINK_RATE_RATE_3:
+ case LINK_RATE_RATE_2:
+ return LINK_RATE_LOW;
+ case LINK_RATE_LOW:
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_FOUR;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate increase_link_rate(struct dc_link *link,
+ enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH3;
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ /* upto DP2.x specs UHBR13.5 is the only link rate that could be
+ * not supported by DPRX when higher link rate is supported.
+ * so we treat it as a special case for code simplicity. When we
+ * have new specs with more link rates like this, we should
+ * consider a more generic solution to handle discrete link
+ * rate capabilities.
+ */
+ return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ?
+ LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR20;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static bool decide_fallback_link_setting_max_bw_policy(
+ struct dc_link *link,
+ const struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result)
+{
+ uint8_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ if (training_result == LINK_TRAINING_ABORT)
+ return false;
+
+ while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find current index */
+ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
+ dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
+ break;
+ else
+ cur_idx++;
+
+ next_idx = cur_idx + 1;
+
+ while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find next index */
+ if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count ||
+ dp_lt_fallbacks[next_idx].link_rate > max->link_rate)
+ next_idx++;
+ else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 &&
+ link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0)
+ /* upto DP2.x specs UHBR13.5 is the only link rate that
+ * could be not supported by DPRX when higher link rate
+ * is supported. so we treat it as a special case for
+ * code simplicity. When we have new specs with more
+ * link rates like this, we should consider a more
+ * generic solution to handle discrete link rate
+ * capabilities.
+ */
+ next_idx++;
+ else
+ break;
+
+ if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
+ cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
+ cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
+ found = true;
+ }
+
+ return found;
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ * true - link setting could be set
+ * false - has reached minimum setting
+ * and no further fallback could be done
+ */
+bool decide_fallback_link_setting(
+ struct dc_link *link,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result)
+{
+ if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING ||
+ link->dc->debug.force_dp2_lt_fallback_method)
+ return decide_fallback_link_setting_max_bw_policy(link, max,
+ cur, training_result);
+
+ switch (training_result) {
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ case LINK_TRAINING_LQA_FAIL:
+ {
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(link, cur->link_rate);
+ } else if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->link_rate = max->link_rate;
+ if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
+ return false;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
+ cur->lane_count = LANE_COUNT_ONE;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
+ cur->lane_count = LANE_COUNT_TWO;
+ else
+ cur->lane_count = reduce_lane_count(cur->lane_count);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+ {
+ if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->lane_count = reduce_lane_count(cur->lane_count);
+ } else if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(link, cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_CR:
+ {
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(link, cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting = {
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
+ struct dc_link_settings current_link_setting =
+ initial_link_setting;
+ uint32_t link_bw;
+
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dp_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+
+ return false;
+}
+
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0) {
+ *link_setting = link->verified_link_cap;
+ return true;
+ }
+
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dp_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ return false;
+}
+
+bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ unsigned int policy = 0;
+
+ policy = link->panel_config.dsc.force_dsc_edp_policy;
+ if (max_link_rate == LINK_RATE_UNKNOWN)
+ max_link_rate = link->verified_link_cap.link_rate;
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = LINK_RATE_LOW;
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = false;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dp_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ } else {
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate = initial_link_setting.link_rate;
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+ }
+ return false;
+ }
+
+ /* if optimize edp link is supported */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dp_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate_set <
+ link->dpcd_caps.edp_supported_link_rates_count
+ && current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else {
+ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ *link_setting = link->verified_link_cap;
+ return true;
+}
+
+bool link_decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link = stream->link;
+ uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ memset(link_setting, 0, sizeof(*link_setting));
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return true;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ decide_mst_link_settings(link, link_setting);
+ } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* enable edp link optimization for DSC eDP case */
+ if (stream->timing.flags.DSC) {
+ enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->panel_config.dsc.force_dsc_edp_policy) {
+ /* calculate link max link rate cap*/
+ struct dc_link_settings tmp_link_setting;
+ struct dc_crtc_timing tmp_timing = stream->timing;
+ uint32_t orig_req_bw;
+
+ tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ tmp_timing.flags.DSC = 0;
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
+ max_link_rate = tmp_link_setting.link_rate;
+ }
+ decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
+ } else {
+ edp_decide_link_settings(link, link_setting, req_bw);
+ }
+ } else {
+ decide_dp_link_settings(link, link_setting, req_bw);
+ }
+
+ return link_setting->lane_count != LANE_COUNT_UNKNOWN &&
+ link_setting->link_rate != LINK_RATE_UNKNOWN;
+}
+
+enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings)
+{
+ if ((link_settings->link_rate >= LINK_RATE_LOW) &&
+ (link_settings->link_rate <= LINK_RATE_HIGH3))
+ return DP_8b_10b_ENCODING;
+ else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
+ (link_settings->link_rate <= LINK_RATE_UHBR20))
+ return DP_128b_132b_ENCODING;
+ return DP_UNKNOWN_ENCODING;
+}
+
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ if (!dc_is_dp_signal(link->connector_signal))
+ return DP_UNKNOWN_ENCODING;
+
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ link_settings = link->preferred_link_setting;
+ } else {
+ decide_mst_link_settings(link, &link_settings);
+ }
+
+ return link_dp_get_encoding_format(&link_settings);
+}
+
+static void read_dp_device_vendor_id(struct dc_link *link)
+{
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+}
+
+static enum dc_status wake_up_aux_channel(struct dc_link *link)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t aux_channel_retry_cnt = 0;
+ uint8_t dpcd_power_state = '\0';
+
+ while (status != DC_OK && aux_channel_retry_cnt < 10) {
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
+ fsleep(1000);
+ aux_channel_retry_cnt++;
+ }
+ }
+
+ if (status != DC_OK) {
+ dpcd_power_state = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+
+ dpcd_power_state = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+ if (status < 0)
+ DC_LOG_DC("%s: Failed to power up sink: %s\n", __func__,
+ dpcd_power_state == DP_SET_POWER_D0 ? "D0" : "D3");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+{
+ union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
+ link->dpcd_caps.is_branch_dev = false;
+ return;
+ }
+
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
+ /* At this point we don't know is it DVI or HDMI or DP++,
+ * assume DVI.*/
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ default:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ }
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
+ union dwnstream_port_caps_byte0 *port_caps =
+ (union dwnstream_port_caps_byte0 *)det_caps;
+ if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+ det_caps, sizeof(det_caps)) == DC_OK) {
+
+ switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ /*Handle DP case as DONGLE_NONE*/
+ case DOWN_STREAM_DETAILED_DP:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ case DOWN_STREAM_DETAILED_VGA:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_DVI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_HDMI:
+ case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
+ /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+ link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+ if (ds_port.fields.DETAILED_CAPS) {
+
+ union dwnstream_port_caps_byte3_hdmi
+ hdmi_caps = {.raw = det_caps[3] };
+ union dwnstream_port_caps_byte2
+ hdmi_color_caps = {.raw = det_caps[2] };
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
+
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+ hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+ /*YCBCR capability only for HDMI case*/
+ if (port_caps->bits.DWN_STRM_PORTX_TYPE
+ == DOWN_STREAM_DETAILED_HDMI) {
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+ }
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+
+ if (link->dc->caps.dp_hdmi21_pcon_support) {
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
+ link_bw_kbps_from_raw_frl_link_rate_data(
+ hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+ }
+ }
+ }
+
+ set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+ {
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.branch_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ }
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ union dp_dfp_cap_ext dfp_cap_ext;
+ memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
+ core_link_read_dpcd(
+ link,
+ DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
+ dfp_cap_ext.raw,
+ sizeof(dfp_cap_ext.raw));
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
+ dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
+ (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
+ dfp_cap_ext.fields.max_video_h_active_width[0] +
+ (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
+ dfp_cap_ext.fields.max_video_v_active_height[0] +
+ (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
+ dfp_cap_ext.fields.encoding_format_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
+ dfp_cap_ext.fields.rgb_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr444_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr422_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr420_color_depth_caps;
+ DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
+ DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
+ DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
+ }
+}
+
+static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
+ struct dc_link_settings *link_settings)
+{
+ /* Temporary Renoir-specific workaround PHY will sometimes be in bad
+ * state on hotplugging display from certain USB-C dongle, so add extra
+ * cycle of enabling and disabling the PHY before first link training.
+ */
+ struct link_resource link_res = {0};
+ enum clock_source_id dp_cs_id = get_clock_source_id(link);
+
+ dp_enable_link_phy(link, &link_res, link->connector_signal,
+ dp_cs_id, link_settings);
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+}
+
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[16];
+ uint32_t read_dpcd_retry_cnt = 3;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ union dp_downstream_port_present ds_port = { 0 };
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+
+ int i;
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ return true;
+}
+
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ if (!link->dc->vendor_signature.is_valid) {
+ enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
+ struct dpcd_amd_signature amd_signature = {0};
+ struct dpcd_amd_device_id amd_device_id = {0};
+
+ amd_device_id.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_device_id.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ amd_device_id.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_read_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+ }
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+ (uint8_t *)(&amd_device_id),
+ sizeof(amd_device_id));
+
+ if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
+ link->dc->caps.min_horizontal_blanking_period != 0) {
+
+ uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
+
+ result_write_min_hblank = core_link_write_dpcd(link,
+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
+ sizeof(hblank_size));
+ }
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
+ "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
+ result_write_min_hblank,
+ link->link_index,
+ link->ctx->dce_version,
+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED,
+ link->dc->caps.min_horizontal_blanking_period,
+ link->dpcd_caps.branch_dev_id,
+ link->dpcd_caps.branch_dev_name[0],
+ link->dpcd_caps.branch_dev_name[1],
+ link->dpcd_caps.branch_dev_name[2],
+ link->dpcd_caps.branch_dev_name[3],
+ link->dpcd_caps.branch_dev_name[4],
+ link->dpcd_caps.branch_dev_name[5]);
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+}
+
+void dpcd_write_cable_id_to_dprx(struct dc_link *link)
+{
+ if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED ||
+ link->dpcd_caps.cable_id.raw == 0 ||
+ link->dprx_states.cable_id_written)
+ return;
+
+ core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX,
+ &link->dpcd_caps.cable_id.raw,
+ sizeof(link->dpcd_caps.cable_id.raw));
+
+ link->dprx_states.cable_id_written = 1;
+}
+
+static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!link->ctx->dmub_srv ||
+ link->ep_type != DISPLAY_ENDPOINT_PHY ||
+ link->link_enc->features.flags.bits.DP_IS_USB_C == 0)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID;
+ cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
+ cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
+ link->dc, link->link_enc->transmitter);
+ if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
+ cmd.cable_id.header.ret_status == 1) {
+ cable_id->raw = cmd.cable_id.data.output_raw;
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
+ return cmd.cable_id.header.ret_status == 1;
+}
+
+static void retrieve_cable_id(struct dc_link *link)
+{
+ union dp_cable_id usbc_cable_id;
+
+ link->dpcd_caps.cable_id.raw = 0;
+ core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
+ &link->dpcd_caps.cable_id.raw, sizeof(uint8_t));
+
+ if (get_usbc_cable_id(link, &usbc_cable_id))
+ link->dpcd_caps.cable_id = intersect_cable_id(
+ &link->dpcd_caps.cable_id, &usbc_cable_id);
+}
+
+bool read_is_mst_supported(struct dc_link *link)
+{
+ bool mst = false;
+ enum dc_status st = DC_OK;
+ union dpcd_rev rev;
+ union mstm_cap cap;
+
+ if (link->preferred_training_settings.mst_enable &&
+ *link->preferred_training_settings.mst_enable == false) {
+ return false;
+ }
+
+ rev.raw = 0;
+ cap.raw = 0;
+
+ st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+ sizeof(rev));
+
+ if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+ st = core_link_read_dpcd(link, DP_MSTM_CAP,
+ &cap.raw, sizeof(cap));
+ if (st == DC_OK && cap.bits.MST_CAP == 1)
+ mst = true;
+ }
+ return mst;
+
+}
+
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
+enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
+{
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status;
+ bool is_lttpr_present;
+
+ /* Logic to determine LTTPR support*/
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
+
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return DC_NOT_SUPPORTED;
+
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ }
+
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
+
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ return status;
+}
+
+static bool retrieve_link_cap(struct dc_link *link)
+{
+ /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+ * which means size 16 will be good for both of those DPCD register block reads
+ */
+ uint8_t dpcd_data[16];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
+ struct dp_device_vendor_id sink_id;
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t read_dpcd_retry_cnt = 3;
+ int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(link->ddc,
+ LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+
+ status = dp_retrieve_lttpr_cap(link);
+
+ if (status != DC_OK) {
+ status = wake_up_aux_channel(link);
+ if (status == DC_OK)
+ dp_retrieve_lttpr_cap(link);
+ else
+ return false;
+ }
+
+ if (dp_is_lttpr_present(link))
+ configure_lttpr_mode_transparent(link);
+
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
+
+ dpcd_set_source_specific_data(link);
+ /* Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+
+ if (status != DC_OK) {
+ dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
+ return false;
+ }
+
+ if (!dp_is_lttpr_present(link))
+ try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+
+ {
+ union training_aux_rd_interval aux_rd_interval;
+
+ aux_rd_interval.raw =
+ dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
+ uint8_t ext_cap_data[16];
+
+ memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DP13_DPCD_REV,
+ ext_cap_data,
+ sizeof(ext_cap_data));
+ if (status == DC_OK) {
+ memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+ break;
+ }
+ }
+ if (status != DC_OK)
+ dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
+ }
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.ext_receiver_cap_field_present) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+
+ /* AdaptiveSyncCapability */
+ dpcd_dprx_data = 0;
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &dpcd_dprx_data, sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed. Addr:%#x\n",
+ __func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ read_dp_device_vendor_id(link);
+
+ /* TODO - decouple raw mst capability from policy decision */
+ link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
+
+ get_active_converter_info(ds_port.byte, link);
+
+ dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw(
+ dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]);
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+ link->dpcd_caps.channel_coding_cap.raw =
+ dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV];
+ link->test_pattern_enabled = false;
+ link->compliance_test_state.raw = 0;
+
+ /* read sink count */
+ core_link_read_dpcd(link,
+ DP_SINK_COUNT,
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
+ /* read sink ieee oui */
+ core_link_read_dpcd(link,
+ DP_SINK_OUI,
+ (uint8_t *)(&sink_id),
+ sizeof(sink_id));
+
+ link->dpcd_caps.sink_dev_id =
+ (sink_id.ieee_oui[0] << 16) +
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
+ memmove(
+ link->dpcd_caps.sink_dev_id_str,
+ sink_id.ieee_device_id,
+ sizeof(sink_id.ieee_device_id));
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_HW_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.sink_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.sink_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
+ /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
+ memset(&link->dpcd_caps.dsc_caps, '\0',
+ sizeof(link->dpcd_caps.dsc_caps));
+ memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+ /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ status = core_link_read_dpcd(
+ link,
+ DP_FEC_CAPABILITY,
+ &link->dpcd_caps.fec_cap.raw,
+ sizeof(link->dpcd_caps.fec_cap.raw));
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_SUPPORT,
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
+ DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
+ }
+
+ /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode
+ * only if required.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around &&
+ link->dpcd_caps.is_branch_dev &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
+ (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE ||
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) {
+ /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA.
+ * Clear FEC and DSC capabilities as a work around if that is not the case.
+ */
+ link->wa_flags.dpia_forced_tbt3_mode = true;
+ memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps));
+ memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+ DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index);
+ } else
+ link->wa_flags.dpia_forced_tbt3_mode = false;
+ }
+
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+
+ core_link_read_dpcd(link,
+ DP_128B132B_SUPPORTED_LINK_RATES,
+ &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
+ sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
+ if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
+ else
+ dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
+ DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
+ DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
+ link->reported_link_cap.link_rate / 100,
+ link->reported_link_cap.link_rate % 100);
+
+ core_link_read_dpcd(link,
+ DP_SINK_VIDEO_FALLBACK_FORMATS,
+ &link->dpcd_caps.fallback_formats.raw,
+ sizeof(link->dpcd_caps.fallback_formats.raw));
+ DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
+ if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.raw == 0) {
+ DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
+ link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
+ }
+
+ core_link_read_dpcd(link,
+ DP_FEC_CAPABILITY_1,
+ &link->dpcd_caps.fec_cap1.raw,
+ sizeof(link->dpcd_caps.fec_cap1.raw));
+ DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
+ if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
+ DC_LOG_DP2("\tFEC aggregated error counters are supported");
+ }
+
+ retrieve_cable_id(link);
+ dpcd_write_cable_id_to_dprx(link);
+
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+ return true;
+}
+
+bool detect_dp_sink_caps(struct dc_link *link)
+{
+ return retrieve_link_cap(link);
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+ uint8_t supported_link_rates[16];
+ uint32_t entry;
+ uint32_t link_rate_in_khz;
+ enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
+ uint8_t backlight_adj_cap;
+ uint8_t general_edp_cap;
+
+ retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
+
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+ (link->panel_config.ilr.optimize_edp_link_rate ||
+ link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+
+ for (entry = 0; entry < 16; entry += 2) {
+ // DPCD register reports per-lane link rate = 16-bit link rate capability
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
+ link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
+ supported_link_rates[entry]) * 200;
+
+ if (link_rate_in_khz != 0) {
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
+
+ if (link->reported_link_cap.link_rate < link_rate)
+ link->reported_link_cap.link_rate = link_rate;
+ }
+ }
+ }
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
+ &backlight_adj_cap, sizeof(backlight_adj_cap));
+
+ link->dpcd_caps.dynamic_backlight_capable_edp =
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
+ &general_edp_cap, sizeof(general_edp_cap));
+
+ link->dpcd_caps.set_power_state_capable_edp =
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+
+ set_default_brightness_aux(link);
+
+ core_link_read_dpcd(link, DP_EDP_DPCD_REV,
+ &link->dpcd_caps.edp_rev,
+ sizeof(link->dpcd_caps.edp_rev));
+ /*
+ * PSR is only valid for eDP v1.3 or higher.
+ */
+ if (link->dpcd_caps.edp_rev >= DP_EDP_13) {
+ core_link_read_dpcd(link, DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
+ core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
+ &link->dpcd_caps.psr_info.force_psrsu_cap,
+ sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
+ core_link_read_dpcd(link, DP_PSR_CAPS,
+ &link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
+ if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) {
+ core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY,
+ &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap,
+ sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap));
+ }
+ }
+
+ /*
+ * ALPM is only valid for eDP v1.4 or higher.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
+ core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
+ &link->dpcd_caps.alpm_caps.raw,
+ sizeof(link->dpcd_caps.alpm_caps.raw));
+}
+
+bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+{
+ struct link_encoder *link_enc = NULL;
+
+ if (!max_link_enc_cap) {
+ DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
+ return false;
+ }
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (link_enc && link_enc->funcs->get_max_link_cap) {
+ link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
+ return true;
+ }
+
+ DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
+ max_link_enc_cap->lane_count = 1;
+ max_link_enc_cap->link_rate = 6;
+ return false;
+}
+
+const struct dc_link_settings *dp_get_verified_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+ enum dc_link_rate lttpr_max_link_rate;
+ enum dc_link_rate cable_max_link_rate;
+ struct link_encoder *link_enc = NULL;
+
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ /* get max link encoder capability */
+ if (link_enc)
+ link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+
+ /* Lower link settings based on sink's link cap */
+ if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count =
+ link->reported_link_cap.lane_count;
+ if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate =
+ link->reported_link_cap.link_rate;
+ if (link->reported_link_cap.link_spread <
+ max_link_cap.link_spread)
+ max_link_cap.link_spread =
+ link->reported_link_cap.link_spread;
+
+ /* Lower link settings based on cable attributes
+ * Cable ID is a DP2 feature to identify max certified link rate that
+ * a cable can carry. The cable identification method requires both
+ * cable and display hardware support. Since the specs comes late, it is
+ * anticipated that the first round of DP2 cables and displays may not
+ * be fully compatible to reliably return cable ID data. Therefore the
+ * decision of our cable id policy is that if the cable can return non
+ * zero cable id data, we will take cable's link rate capability into
+ * account. However if we get zero data, the cable link rate capability
+ * is considered inconclusive. In this case, we will not take cable's
+ * capability into account to avoid of over limiting hardware capability
+ * from users. The max overall link rate capability is still determined
+ * after actual dp pre-training. Cable id is considered as an auxiliary
+ * method of determining max link bandwidth capability.
+ */
+ cable_max_link_rate = get_cable_max_link_rate(link);
+
+ if (!link->dc->debug.ignore_cable_id &&
+ cable_max_link_rate != LINK_RATE_UNKNOWN &&
+ cable_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = cable_max_link_rate;
+
+ /* account for lttpr repeaters cap
+ * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
+ */
+ if (dp_is_lttpr_present(link)) {
+ if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
+
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
+ __func__,
+ max_link_cap.lane_count,
+ max_link_cap.link_rate);
+ }
+
+ if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
+ link->dc->debug.disable_uhbr)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ return max_link_cap;
+}
+
+static bool dp_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
+{
+ struct dc_link_settings cur_link_settings = {0};
+ struct dc_link_settings max_link_settings = *known_limit_link_setting;
+ bool success = false;
+ bool skip_video_pattern;
+ enum clock_source_id dp_cs_id = get_clock_source_id(link);
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union hpd_irq_data irq_data;
+ struct link_resource link_res;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+ cur_link_settings = max_link_settings;
+
+ /* Grant extended timeout request */
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
+
+ core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
+ }
+
+ do {
+ if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings))
+ continue;
+
+ skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW;
+ dp_enable_link_phy(
+ link,
+ &link_res,
+ link->connector_signal,
+ dp_cs_id,
+ &cur_link_settings);
+
+ status = dp_perform_link_training(
+ link,
+ &link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ success = true;
+ fsleep(1000);
+ if (dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
+ dp_parse_link_loss_status(
+ link,
+ &irq_data))
+ (*fail_count)++;
+
+ } else {
+ (*fail_count)++;
+ }
+ dp_trace_lt_total_count_increment(link, true);
+ dp_trace_lt_result_update(link, status, true);
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(link,
+ &max_link_settings, &cur_link_settings, status));
+
+ link->verified_link_cap = success ?
+ cur_link_settings : fail_safe_link_settings;
+ return success;
+}
+
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts)
+{
+ int i = 0;
+ bool success = false;
+ int fail_count = 0;
+
+ dp_trace_detect_lt_init(link);
+
+ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ link->dc->debug.usbc_combo_phy_reset_wa)
+ apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
+
+ dp_trace_set_lt_start_timestamp(link, false);
+ for (i = 0; i < attempts; i++) {
+ enum dc_connection_type type = dc_connection_none;
+
+ memset(&link->verified_link_cap, 0,
+ sizeof(struct dc_link_settings));
+ if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap = fail_safe_link_settings;
+ break;
+ } else if (dp_verify_link_cap(link, known_limit_link_setting,
+ &fail_count) && fail_count == 0) {
+ success = true;
+ break;
+ }
+ fsleep(10 * 1000);
+ }
+
+ dp_trace_lt_fail_count_update(link, fail_count, true);
+ dp_trace_set_lt_end_timestamp(link, true);
+
+ return success;
+}
+
+/*
+ * Check if there is a native DP or passive DP-HDMI dongle connected
+ */
+bool dp_is_sink_present(struct dc_link *link)
+{
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+ uint8_t retry = 0;
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(link->link_id);
+
+ bool present =
+ ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP) ||
+ (connector_id == CONNECTOR_ID_USBC));
+
+ ddc = get_ddc_pin(link->ddc);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return present;
+ }
+
+ /* Open GPIO and set it to I2C mode */
+ /* Note: this GpioMode_Input will be converted
+ * to GpioConfigType_I2cAuxDualMode in GPIO component,
+ * which indicates we need additional delay
+ */
+
+ if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
+ dal_ddc_close(ddc);
+
+ return present;
+ }
+
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ fsleep(1000);
+ else
+ break;
+ } while (retry++ < 3);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+ dal_ddc_close(ddc);
+
+ return present;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
new file mode 100644
index 000000000000..8f0ce97f2362
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_CAPABILITY_H__
+#define __DC_LINK_DP_CAPABILITY_H__
+
+#include "link.h"
+
+bool detect_dp_sink_caps(struct dc_link *link);
+
+void detect_edp_sink_caps(struct dc_link *link);
+
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
+
+bool dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+const struct dc_link_settings *dp_get_verified_link_cap(
+ const struct dc_link *link);
+
+enum dp_link_encoding link_dp_get_encoding_format(
+ const struct dc_link_settings *link_settings);
+
+enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
+
+/* Convert PHY repeater count read from DPCD uint8_t. */
+uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+
+bool dp_is_sink_present(struct dc_link *link);
+
+bool dp_is_lttpr_present(struct dc_link *link);
+
+bool dp_is_fec_supported(const struct dc_link *link);
+
+bool is_dp_active_dongle(const struct dc_link *link);
+
+bool is_dp_branch_device(const struct dc_link *link);
+
+void dpcd_write_cable_id_to_dprx(struct dc_link *link);
+
+bool dp_should_enable_fec(const struct dc_link *link);
+
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+
+/* Initialize output parameter lt_settings. */
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings);
+
+bool link_decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
+
+bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate);
+
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link);
+
+void dpcd_set_source_specific_data(struct dc_link *link);
+
+/*query dpcd for version and mst cap addresses*/
+bool read_is_mst_supported(struct dc_link *link);
+
+bool decide_fallback_link_setting(
+ struct dc_link *link,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result);
+
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts);
+
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
+
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
+#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
new file mode 100644
index 000000000000..4626fabc0a96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "inc/core_status.h"
+#include "dpcd_defs.h"
+
+#include "link_dp_dpia.h"
+#include "link_hwss.h"
+#include "dm_helpers.h"
+#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
+/* DPCD DP Tunneling over USB4 */
+#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
+
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
+{
+ enum dc_status status = DC_OK;
+ uint8_t dpcd_dp_tun_data[3] = {0};
+ uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
+ uint8_t i = 0;
+
+ status = core_link_read_dpcd(
+ link,
+ DP_TUNNELING_CAPABILITIES_SUPPORT,
+ dpcd_dp_tun_data,
+ sizeof(dpcd_dp_tun_data));
+
+ status = core_link_read_dpcd(
+ link,
+ DP_USB4_ROUTER_TOPOLOGY_ID,
+ dpcd_topology_data,
+ sizeof(dpcd_topology_data));
+
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
+ link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+
+ return status;
+}
+
+bool dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ cmd.query_hpd.data.result);
+
+ return is_hpd_high;
+}
+
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 84204ec1b046..363f45a1a964 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -1,9 +1,6 @@
-/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
- * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
- */
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -12,36 +9,33 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#ifndef __TDFX_H__
-#define __TDFX_H__
-
-/* General customization:
+ * Authors: AMD
+ *
*/
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
+#ifndef __DC_LINK_DPIA_H__
+#define __DC_LINK_DPIA_H__
-#define DRIVER_NAME "tdfx"
-#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
-#define DRIVER_DATE "20010216"
+#include "link.h"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
+/* Read tunneling device capability from DPCD and update link capability
+ * accordingly.
+ */
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
-#endif
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dpia_query_hpd_status(struct dc_link *link);
+#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
new file mode 100644
index 000000000000..7581023daa47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -0,0 +1,492 @@
+
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+/*********************************************************************/
+// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
+/*********************************************************************/
+#include "link_dp_dpia_bw.h"
+#include "link_dpcd.h"
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+#define Kbps_TO_Gbps (1000 * 1000)
+
+// ------------------------------------------------------------------
+// PRIVATE FUNCTIONS
+// ------------------------------------------------------------------
+/*
+ * Always Check the following:
+ * - Is it USB4 link?
+ * - Is HPD HIGH?
+ * - Is BW Allocation Support Mode enabled on DP-Tx?
+ */
+static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+{
+ return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type
+ && tmp->hpd_status
+ && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
+}
+static void reset_bw_alloc_struct(struct dc_link *link)
+{
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+ link->dpia_bw_alloc_config.sink_verified_bw = 0;
+ link->dpia_bw_alloc_config.sink_max_bw = 0;
+ link->dpia_bw_alloc_config.estimated_bw = 0;
+ link->dpia_bw_alloc_config.bw_granularity = 0;
+ link->dpia_bw_alloc_config.response_ready = false;
+}
+static uint8_t get_bw_granularity(struct dc_link *link)
+{
+ uint8_t bw_granularity = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_BW_GRANULALITY,
+ &bw_granularity,
+ sizeof(uint8_t));
+
+ switch (bw_granularity & 0x3) {
+ case 0:
+ bw_granularity = 4;
+ break;
+ case 1:
+ default:
+ bw_granularity = 2;
+ break;
+ }
+
+ return bw_granularity;
+}
+static int get_estimated_bw(struct dc_link *link)
+{
+ uint8_t bw_estimated_bw = 0;
+
+ core_link_read_dpcd(
+ link,
+ ESTIMATED_BW,
+ &bw_estimated_bw,
+ sizeof(uint8_t));
+
+ return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+}
+static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
+{
+ if (bw_needed > 0)
+ *stream_allocated_bw += bw_needed;
+
+ return true;
+}
+static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
+{
+ bool ret = false;
+
+ if (*stream_allocated_bw > 0) {
+ *stream_allocated_bw -= bw_to_dealloc;
+ ret = true;
+ } else {
+ //Do nothing for now
+ ret = true;
+ }
+
+ // Unplug so reset values
+ if (!link->hpd_status)
+ reset_bw_alloc_struct(link);
+
+ return ret;
+}
+/*
+ * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
+ * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
+ * for host router and dpia
+ */
+static void init_usb4_bw_struct(struct dc_link *link)
+{
+ // Init the known values
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+}
+static uint8_t get_lowest_dpia_index(struct dc_link *link)
+{
+ const struct dc *dc_struct = link->dc;
+ uint8_t idx = 0xFF;
+ int i;
+
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
+
+ if (!dc_struct->links[i] ||
+ dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+ if (idx > dc_struct->links[i]->link_index)
+ idx = dc_struct->links[i]->link_index;
+ }
+
+ return idx;
+}
+/*
+ * Get the Max Available BW or Max Estimated BW for each Host Router
+ *
+ * @link: pointer to the dc_link struct instance
+ * @type: ESTIMATD BW or MAX AVAILABLE BW
+ *
+ * return: response_ready flag from dc_link struct
+ */
+static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+{
+ const struct dc *dc_struct = link->dc;
+ uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
+ uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
+ struct dc_link *link_temp;
+ int total_bw = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
+
+ if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+ link_temp = dc_struct->links[i];
+ if (!link_temp || !link_temp->hpd_status)
+ continue;
+
+ idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
+
+ if (idx_temp == idx) {
+
+ if (type == HOST_ROUTER_BW_ESTIMATED)
+ total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
+ else if (type == HOST_ROUTER_BW_ALLOCATED)
+ total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
+ }
+ }
+
+ return total_bw;
+}
+/*
+ * Cleanup function for when the dpia is unplugged to reset struct
+ * and perform any required clean up
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: none
+ */
+static bool dpia_bw_alloc_unplug(struct dc_link *link)
+{
+ if (!link)
+ return true;
+
+ return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ link->dpia_bw_alloc_config.sink_allocated_bw, link);
+}
+static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+{
+ uint8_t requested_bw;
+ uint32_t temp;
+
+ // 1. Add check for this corner case #1
+ if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ requested_bw = temp / Kbps_TO_Gbps;
+
+ // Always make sure to add more to account for floating points
+ if (temp % Kbps_TO_Gbps)
+ ++requested_bw;
+
+ // 2. Add check for this corner case #2
+ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
+ return;
+
+ if (core_link_write_dpcd(
+ link,
+ REQUESTED_BW,
+ &requested_bw,
+ sizeof(uint8_t)) == DC_OK)
+ link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+}
+/*
+ * Return the response_ready flag from dc_link struct
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: response_ready flag from dc_link struct
+ */
+static bool get_cm_response_ready_flag(struct dc_link *link)
+{
+ return link->dpia_bw_alloc_config.response_ready;
+}
+// ------------------------------------------------------------------
+// PUBLIC FUNCTIONS
+// ------------------------------------------------------------------
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+{
+ bool ret = false;
+ uint8_t response = 0,
+ bw_support_dpia = 0,
+ bw_support_cm = 0;
+
+ if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
+ goto out;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_TUNNELING_CAPABILITIES,
+ &response,
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_dpia = (response >> 7) & 1;
+
+ if (core_link_read_dpcd(
+ link,
+ USB4_DRIVER_BW_CAPABILITY,
+ &response,
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_cm = (response >> 7) & 1;
+
+ /* Send request acknowledgment to Turn ON DPTX support */
+ if (bw_support_cm && bw_support_dpia) {
+
+ response = 0x80;
+ if (core_link_write_dpcd(
+ link,
+ DPTX_BW_ALLOCATION_MODE_CONTROL,
+ &response,
+ sizeof(uint8_t)) != DC_OK) {
+ DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
+ } else {
+ // SUCCESS Enabled DPtx BW Allocation Mode Support
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
+
+ ret = true;
+ init_usb4_bw_struct(link);
+ }
+ }
+
+out:
+ return ret;
+}
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+{
+ int bw_needed = 0;
+ int estimated = 0;
+ int host_router_total_estimated_bw = 0;
+
+ if (!get_bw_alloc_proceed_flag((link)))
+ return;
+
+ switch (result) {
+
+ case DPIA_BW_REQ_FAILED:
+
+ DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+
+ // Update the new Estimated BW value updated by CM
+ link->dpia_bw_alloc_config.estimated_bw =
+ bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
+ link->dpia_bw_alloc_config.response_ready = false;
+
+ /*
+ * If FAIL then it is either:
+ * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally
+ * => get estimated and allocate that
+ * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then
+ * CM will have to update 0xE0023 with new ESTIMATED BW value.
+ */
+ break;
+
+ case DPIA_BW_REQ_SUCCESS:
+
+ DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
+
+ // 1. SUCCESS 1st time before any Pruning is done
+ // 2. SUCCESS after prev. FAIL before any Pruning is done
+ // 3. SUCCESS after Pruning is done but before enabling link
+
+ bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ // 1.
+ if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
+
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
+ link->dpia_bw_alloc_config.sink_verified_bw =
+ link->dpia_bw_alloc_config.sink_allocated_bw;
+
+ // SUCCESS from first attempt
+ if (link->dpia_bw_alloc_config.sink_allocated_bw >
+ link->dpia_bw_alloc_config.sink_max_bw)
+ link->dpia_bw_alloc_config.sink_verified_bw =
+ link->dpia_bw_alloc_config.sink_max_bw;
+ }
+ // 3.
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
+
+ // Find out how much do we need to de-alloc
+ if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
+ deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
+ else
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
+ }
+
+ // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
+ // => check if estimated_bw changed
+
+ link->dpia_bw_alloc_config.response_ready = true;
+ break;
+
+ case DPIA_EST_BW_CHANGED:
+
+ DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
+
+ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
+
+ // 1. If due to unplug of other sink
+ if (estimated == host_router_total_estimated_bw) {
+ // First update the estimated & max_bw fields
+ if (link->dpia_bw_alloc_config.estimated_bw < estimated)
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ }
+ // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
+ else {
+ // We lost estimated bw usually due to plug event of other dpia
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ }
+ break;
+
+ case DPIA_BW_ALLOC_CAPS_CHANGED:
+
+ DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+ break;
+ }
+}
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+{
+ int ret = 0;
+ uint8_t timeout = 10;
+
+ if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type
+ && link->dpia_bw_alloc_config.bw_alloc_enabled))
+ goto out;
+
+ //1. Hot Plug
+ if (link->hpd_status && peak_bw > 0) {
+
+ // If DP over USB4 then we need to check BW allocation
+ link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+
+ do {
+ if (!(timeout > 0))
+ timeout--;
+ else
+ break;
+ fsleep(10 * 1000);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+ ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ }
+ //2. Cold Unplug
+ else if (!link->hpd_status)
+ dpia_bw_alloc_unplug(link);
+
+out:
+ return ret;
+}
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+{
+ int ret = 0;
+ uint8_t timeout = 10;
+
+ if (!get_bw_alloc_proceed_flag(link))
+ goto out;
+
+ /*
+ * Sometimes stream uses same timing parameters as the already
+ * allocated max sink bw so no need to re-alloc
+ */
+ if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
+ set_usb4_req_bw_req(link, req_bw);
+ do {
+ if (!(timeout > 0))
+ timeout--;
+ else
+ break;
+ udelay(10 * 1000);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+ ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ }
+
+out:
+ return ret;
+}
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+{
+ bool ret = true;
+ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+ uint8_t lowest_dpia_index = 0, dpia_index = 0;
+ uint8_t i;
+
+ if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+ return ret;
+
+ //Get total Host Router BW & Validate against each Host Router max BW
+ for (i = 0; i < num_dpias; ++i) {
+
+ if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+ continue;
+
+ lowest_dpia_index = get_lowest_dpia_index(link[i]);
+ if (link[i]->link_index < lowest_dpia_index)
+ continue;
+
+ dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+ bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+ if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+
+ ret = false;
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
new file mode 100644
index 000000000000..7292690383ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INC_LINK_DP_DPIA_BW_H_
+#define DC_INC_LINK_DP_DPIA_BW_H_
+
+#include "link.h"
+
+/* Number of Host Routers per motherboard is 2 */
+#define MAX_HR_NUM 2
+/* Number of DPIA per host router is 2 */
+#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
+
+/*
+ * Host Router BW type
+ */
+enum bw_type {
+ HOST_ROUTER_BW_ESTIMATED,
+ HOST_ROUTER_BW_ALLOCATED,
+ HOST_ROUTER_BW_INVALID,
+};
+
+/*
+ * Enable BW Allocation Mode Support from the DP-Tx side
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: SUCCESS or FAILURE
+ */
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+
+/*
+ * Allocates only what the stream needs for bw, so if:
+ * If (stream_req_bw < or > already_allocated_bw_at_HPD)
+ * => Deallocate Max Bw & then allocate only what the stream needs
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: Bw requested by the stream
+ *
+ * return: allocated bw else return 0
+ */
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
+
+/*
+ * Handle the validation of total BW here and confirm that the bw used by each
+ * DPIA doesn't exceed available BW for each host router (HR)
+ *
+ * @link[]: array of link pointer to all possible DPIA links
+ * @bw_needed[]: bw needed for each DPIA link based on timing
+ * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+
+#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
new file mode 100644
index 000000000000..ba95facc4ee8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements DP HPD short pulse handling sequence according to DP
+ * specifications
+ *
+ */
+
+#include "link_dp_irq_handler.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "link_dp_capability.h"
+#include "link_edp_panel_control.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link/link_dpms.h"
+#include "dm_helpers.h"
+
+#define DC_LOGGER_INIT(logger)
+
+bool dp_parse_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = dp_get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
+{
+ union dpcd_psr_configuration psr_configuration;
+
+ if (!link->psr_settings.psr_feature_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 368,/*DpcdAddress_PSR_Enable_Cfg*/
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (psr_configuration.bits.ENABLE) {
+ unsigned char dpcdbuf[3] = {0};
+ union psr_error_status psr_error_status;
+ union psr_sink_psr_status psr_sink_psr_status;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 0x2006, /*DpcdAddress_PSR_Error_Status*/
+ (unsigned char *) dpcdbuf,
+ sizeof(dpcdbuf));
+
+ /*DPCD 2006h ERROR STATUS*/
+ psr_error_status.raw = dpcdbuf[0];
+ /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
+ psr_sink_psr_status.raw = dpcdbuf[2];
+
+ if (psr_error_status.bits.LINK_CRC_ERROR ||
+ psr_error_status.bits.RFB_STORAGE_ERROR ||
+ psr_error_status.bits.VSC_SDP_ERROR) {
+ bool allow_active;
+
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 8198,/*DpcdAddress_PSR_Error_Status*/
+ &psr_error_status.raw,
+ sizeof(psr_error_status.raw));
+
+ /* PSR error, disable and re-enable PSR */
+ if (link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ }
+
+ return true;
+ } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+ /* No error is detect, PSR is active.
+ * We should return with IRQ_HPD handled without
+ * checking for loss of sync since PSR would have
+ * powered down main link.
+ */
+ return true;
+ }
+ }
+ return false;
+}
+
+void dp_handle_link_loss(struct dc_link *link)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++)
+ link_set_dpms_off(pipes[i]);
+
+ for (i = count - 1; i >= 0; i--) {
+ // Always use max settings here for DP 1.4a LL Compliance CTS
+ if (link->is_automated) {
+ pipes[i]->link_config.dp_link_settings.lane_count =
+ link->verified_link_cap.lane_count;
+ pipes[i]->link_config.dp_link_settings.link_rate =
+ link->verified_link_cap.link_rate;
+ pipes[i]->link_config.dp_link_settings.link_spread =
+ link->verified_link_cap.link_spread;
+ }
+ link_set_dpms_on(link->dc->current_state, pipes[i]);
+ }
+}
+
+enum dc_status dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ static enum dc_status retval;
+
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ *
+ * For DP 1.4 we need to read those from 2002h range.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+ tmp,
+ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+}
+
+/*************************Short Pulse IRQ***************************/
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
+{
+ /*
+ * Don't handle RX IRQ unless one of following is met:
+ * 1) The link is established (cur_link_settings != unknown)
+ * 2) We know we're dealing with a branch device, SST or MST
+ */
+
+ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ is_dp_branch_device(link))
+ return true;
+
+ return false;
+}
+
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {0};
+ union device_service_irq device_service_clear = {0};
+ enum dc_status result;
+ bool status = false;
+
+ if (out_link_loss)
+ *out_link_loss = false;
+
+ if (has_left_work)
+ *has_left_work = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+
+ DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
+ __func__, link->link_index);
+
+
+ /* All the "handle_hpd_irq_xxx()" methods
+ * should be called only after
+ * dal_dpsst_ls_read_hpd_irq_data
+ * Order of calls is important too
+ */
+ result = dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ if (out_hpd_irq_dpcd_data)
+ *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+ if (result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
+ __func__);
+ return false;
+ }
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
+ link->is_automated = true;
+ device_service_clear.bits.AUTOMATED_TEST = 1;
+ core_link_write_dpcd(
+ link,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &device_service_clear.raw,
+ sizeof(device_service_clear.raw));
+ device_service_clear.raw = 0;
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_automated_test(link);
+ return false;
+ }
+
+ if (!dp_should_allow_hpd_rx_irq(link)) {
+ DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
+ __func__, link->link_index);
+ return false;
+ }
+
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ /* If PSR-related error handled, Main link may be off,
+ * so do not handle as a normal sink status change interrupt.
+ */
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ return true;
+ }
+
+ /* check if we have MST msg and return since we poll for it */
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ return false;
+ }
+
+ /* For now we only handle 'Downstream port status' case.
+ * If we got sink count changed it means
+ * Downstream port status changed,
+ * then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link*/
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dp_parse_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
+ /* Connectivity log: link loss */
+ CONN_DATA_LINK_LOSS(link,
+ hpd_irq_dpcd_data.raw,
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dp_handle_link_loss(link);
+
+ status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
+
+ dp_trace_link_loss_increment(link);
+ }
+
+ if (link->type == dc_connection_sst_branch &&
+ hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+ != link->dpcd_sink_count)
+ status = true;
+
+ /* reasons for HPD RX:
+ * 1. Link Loss - ie Re-train the Link
+ * 2. MST sideband message
+ * 3. Automated Test - ie. Internal Commit
+ * 4. CP (copy protection) - (not interesting for DM???)
+ * 5. DRR
+ * 6. Downstream Port status changed
+ * -ie. Detect - this the only one
+ * which is interesting for DM because
+ * it must call dc_link_detect.
+ */
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
new file mode 100644
index 000000000000..ac33730fedd4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
+#define __DC_LINK_DP_IRQ_HANDLER_H__
+
+#include "link.h"
+bool dp_parse_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link);
+void dp_handle_link_loss(struct dc_link *link);
+enum dc_status dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
new file mode 100644
index 000000000000..b7abba55bc2f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements basic dp phy functionality such as enable/disable phy
+ * output and set lane/drive settings. This file is responsible for maintaining
+ * and update software state representing current phy status such as current
+ * link settings.
+ */
+
+#include "link_dp_phy.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "link_dp_capability.h"
+#include "clk_mgr.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+#define DC_LOGGER \
+ link->ctx->logger
+
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on)
+{
+ uint8_t state;
+
+ state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+ if (link->sync_lt_in_progress)
+ return;
+
+ core_link_write_dpcd(link, DP_SET_POWER, &state,
+ sizeof(state));
+
+}
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ link->cur_link_settings = *link_settings;
+ link->dc->hwss.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
+ dpcd_write_rx_power_ctrl(link, true);
+}
+
+void dp_disable_link_phy(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (!link->wa_flags.dp_keep_receiver_powered)
+ dpcd_write_rx_power_ctrl(link, false);
+
+ dc->hwss.disable_link_output(link, link_res, signal);
+ /* Clear current link setting.*/
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+ return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ==
+ offset);
+}
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_training_settings *link_settings,
+ uint32_t offset)
+{
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
+ !is_immediate_downstream(link, offset))
+ return;
+
+ if (link_hwss->ext.set_dp_lane_settings)
+ link_hwss->ext.set_dp_lane_settings(link, link_res,
+ &link_settings->link_settings,
+ link_settings->hw_lane_settings);
+
+ memmove(link->cur_lane_setting,
+ link_settings->hw_lane_settings,
+ sizeof(link->cur_lane_setting));
+}
+
+void dp_set_drive_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings,
+ lt_settings->dpcd_lane_settings);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+}
+
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
+{
+ /* FEC has to be "set ready" before the link training.
+ * The policy is to always train with FEC
+ * if the sink supports it and leave it enabled on link.
+ * If FEC is not supported, disable it.
+ */
+ struct link_encoder *link_enc = NULL;
+ enum dc_status status = DC_OK;
+ uint8_t fec_config = 0;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dp_should_enable_fec(link))
+ return status;
+
+ if (link_enc->funcs->fec_set_ready &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (ready) {
+ fec_config = 1;
+ status = core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config));
+ if (status == DC_OK) {
+ link_enc->funcs->fec_set_ready(link_enc, true);
+ link->fec_state = dc_link_fec_ready;
+ } else {
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ dm_error("dpcd write failed to set fec_ready");
+ }
+ } else if (link->fec_state == dc_link_fec_ready) {
+ fec_config = 0;
+ status = core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config));
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ }
+ }
+
+ return status;
+}
+
+void dp_set_fec_enable(struct dc_link *link, bool enable)
+{
+ struct link_encoder *link_enc = NULL;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dp_should_enable_fec(link))
+ return;
+
+ if (link_enc->funcs->fec_set_enable &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (link->fec_state == dc_link_fec_ready && enable) {
+ /* Accord to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+ * time for transmitting 1000 LL codes which is 6.173 us.
+ * So use 7 microseconds delay instead.
+ */
+ udelay(7);
+ link_enc->funcs->fec_set_enable(link_enc, true);
+ link->fec_state = dc_link_fec_enabled;
+ } else if (link->fec_state == dc_link_fec_enabled && !enable) {
+ link_enc->funcs->fec_set_enable(link_enc, false);
+ link->fec_state = dc_link_fec_ready;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
new file mode 100644
index 000000000000..1eb0619d6710
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_PHY_H__
+#define __DC_LINK_DP_PHY_H__
+
+#include "link.h"
+void dp_enable_link_phy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+
+void dp_disable_link_phy(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_training_settings *link_settings,
+ uint32_t offset);
+
+void dp_set_drive_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+enum dc_status dp_set_fec_ready(struct dc_link *link,
+ const struct link_resource *link_res, bool ready);
+
+void dp_set_fec_enable(struct dc_link *link, bool enable);
+
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
+
+#endif /* __DC_LINK_DP_PHY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
new file mode 100644
index 000000000000..579fa222810d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -0,0 +1,1716 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements all generic dp link training helper functions and top
+ * level generic training sequence. All variations of dp link training sequence
+ * should be called inside the top level training functions in this file to
+ * ensure the integrity of our overall training procedure across different types
+ * of link encoding and back end hardware.
+ */
+#include "link_dp_training.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dp_training_128b_132b.h"
+#include "link_dp_training_auxless.h"
+#include "link_dp_training_dpia.h"
+#include "link_dp_training_fixed_vs_pe_retimer.h"
+#include "link_dpcd.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+#include "link_edp_panel_control.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
+#include "atomfirmware.h"
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dm_helpers.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+#define POST_LT_ADJ_REQ_LIMIT 6
+#define POST_LT_ADJ_REQ_TIMEOUT 200
+#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+
+void dp_log_training_result(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum link_training_result status)
+{
+ char *link_rate = "Unknown";
+ char *lt_result = "Unknown";
+ char *lt_spread = "Disabled";
+
+ switch (lt_settings->link_settings.link_rate) {
+ case LINK_RATE_LOW:
+ link_rate = "RBR";
+ break;
+ case LINK_RATE_RATE_2:
+ link_rate = "R2";
+ break;
+ case LINK_RATE_RATE_3:
+ link_rate = "R3";
+ break;
+ case LINK_RATE_HIGH:
+ link_rate = "HBR";
+ break;
+ case LINK_RATE_RBR2:
+ link_rate = "RBR2";
+ break;
+ case LINK_RATE_RATE_6:
+ link_rate = "R6";
+ break;
+ case LINK_RATE_HIGH2:
+ link_rate = "HBR2";
+ break;
+ case LINK_RATE_RATE_8:
+ link_rate = "R8";
+ break;
+ case LINK_RATE_HIGH3:
+ link_rate = "HBR3";
+ break;
+ case LINK_RATE_UHBR10:
+ link_rate = "UHBR10";
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = "UHBR13.5";
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = "UHBR20";
+ break;
+ default:
+ break;
+ }
+
+ switch (status) {
+ case LINK_TRAINING_SUCCESS:
+ lt_result = "pass";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ lt_result = "CR failed lane0";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ lt_result = "CR failed lane1";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ lt_result = "CR failed lane23";
+ break;
+ case LINK_TRAINING_EQ_FAIL_CR:
+ lt_result = "CR failed in EQ";
+ break;
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+ lt_result = "CR failed in EQ partially";
+ break;
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ lt_result = "EQ failed";
+ break;
+ case LINK_TRAINING_LQA_FAIL:
+ lt_result = "LQA failed";
+ break;
+ case LINK_TRAINING_LINK_LOSS:
+ lt_result = "Link loss";
+ break;
+ case DP_128b_132b_LT_FAILED:
+ lt_result = "LT_FAILED received";
+ break;
+ case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
+ lt_result = "max loop count reached";
+ break;
+ case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
+ lt_result = "channel EQ timeout";
+ break;
+ case DP_128b_132b_CDS_DONE_TIMEOUT:
+ lt_result = "CDS timeout";
+ break;
+ default:
+ break;
+ }
+
+ switch (lt_settings->link_settings.link_spread) {
+ case LINK_SPREAD_DISABLED:
+ lt_spread = "Disabled";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_30KHZ:
+ lt_spread = "0.5% 30KHz";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_33KHZ:
+ lt_spread = "0.5% 33KHz";
+ break;
+ default:
+ break;
+ }
+
+ /* Connectivity log: link training */
+
+ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
+
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
+ link_rate,
+ lt_settings->link_settings.lane_count,
+ lt_result,
+ lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
+ lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
+ lt_spread);
+}
+
+uint8_t dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ case DP_128b_132b_TPS1:
+ case DP_128b_132b_TPS2:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
+enum dpcd_training_patterns
+ dp_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ enum dpcd_training_patterns dpcd_tr_pattern =
+ DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS1\n", __func__);
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS2\n", __func__);
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS3\n", __func__);
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS4\n", __func__);
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+ break;
+ case DP_128b_132b_TPS1:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS1\n", __func__);
+ dpcd_tr_pattern = DPCD_128b_132b_TPS1;
+ break;
+ case DP_128b_132b_TPS2:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2\n", __func__);
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2;
+ break;
+ case DP_128b_132b_TPS2_CDS:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2 CDS\n",
+ __func__);
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
+ break;
+ case DP_TRAINING_PATTERN_VIDEOIDLE:
+ DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern videoidle\n", __func__);
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+
+ return dpcd_tr_pattern;
+}
+
+uint8_t dp_get_nibble_at_index(const uint8_t *buf,
+ uint32_t index)
+{
+ uint8_t nibble;
+ nibble = buf[index / 2];
+
+ if (index % 2)
+ nibble >>= 4;
+ else
+ nibble &= 0x0F;
+
+ return nibble;
+}
+
+void dp_wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs)
+{
+ fsleep(wait_in_micro_secs);
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
+ __func__,
+ wait_in_micro_secs);
+}
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_DISABLED };
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+ enum dc_voltage_swing voltage)
+{
+ enum dc_pre_emphasis pre_emphasis;
+ pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+ if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+ pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+ return pre_emphasis;
+
+}
+
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+ struct dc_lane_settings max_requested;
+
+ max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
+ max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
+
+ /* Determine what the maximum of the requested settings are*/
+ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
+ if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
+ max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
+
+ if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
+ if (lane_settings[lane].FFE_PRESET.settings.level >
+ max_requested.FFE_PRESET.settings.level)
+ max_requested.FFE_PRESET.settings.level =
+ lane_settings[lane].FFE_PRESET.settings.level;
+ }
+
+ /* make sure the requested settings are
+ * not higher than maximum settings*/
+ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+ max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+ if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+ max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+ if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
+ max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+
+ /* make sure the pre-emphasis matches the voltage swing*/
+ if (max_requested.PRE_EMPHASIS >
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING))
+ max_requested.PRE_EMPHASIS =
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
+ lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
+ lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
+ }
+}
+
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
+ (hw_lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (hw_lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
+ hw_lane_settings[lane].FFE_PRESET.settings.level;
+ }
+ }
+}
+
+uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
+{
+ uint8_t link_rate = 0;
+ enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings);
+
+ if (encoding == DP_128b_132b_ENCODING)
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ link_rate = 0x1;
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = 0x2;
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = 0x4;
+ break;
+ default:
+ link_rate = 0;
+ break;
+ }
+ else if (encoding == DP_8b_10b_ENCODING)
+ link_rate = (uint8_t) link_settings->link_rate;
+ else
+ link_rate = 0;
+
+ return link_rate;
+}
+
+/* Only used for channel equalization */
+uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
+{
+ unsigned int aux_rd_interval_us = 400;
+
+ switch (dpcd_aux_read_interval) {
+ case 0x01:
+ aux_rd_interval_us = 4000;
+ break;
+ case 0x02:
+ aux_rd_interval_us = 8000;
+ break;
+ case 0x03:
+ aux_rd_interval_us = 12000;
+ break;
+ case 0x04:
+ aux_rd_interval_us = 16000;
+ break;
+ case 0x05:
+ aux_rd_interval_us = 32000;
+ break;
+ case 0x06:
+ aux_rd_interval_us = 64000;
+ break;
+ default:
+ break;
+ }
+
+ return aux_rd_interval_us;
+}
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE0;
+ else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE1;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ return result;
+}
+
+bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
+{
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+}
+
+bool dp_is_max_vs_reached(
+ const struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count);
+ lane++) {
+ if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
+ == VOLTAGE_SWING_MAX_LEVEL)
+ return true;
+ }
+ return false;
+
+}
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ /*LANEx_CR_DONE bits All 1's?*/
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+ done = false;
+ }
+ return done;
+
+}
+
+bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++)
+ if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+ done = false;
+ return done;
+}
+
+bool dp_is_symbol_locked(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool locked = true;
+ uint32_t lane;
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++)
+ if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0)
+ locked = false;
+ return locked;
+}
+
+bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
+{
+ return align_status.bits.INTERLANE_ALIGN_DONE == 1;
+}
+
+enum link_training_result dp_check_link_loss_status(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_status lane_status;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /*
+ * check lanes status
+ */
+ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ status = LINK_TRAINING_LINK_LOSS;
+ break;
+ }
+ }
+
+ return status;
+}
+
+enum dc_status dp_get_lane_status_and_lane_adjust(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ uint32_t offset)
+{
+ unsigned int lane01_status_address = DP_LANE0_1_STATUS;
+ uint8_t lane_adjust_offset = 4;
+ unsigned int lane01_adjust_address;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+ enum dc_status status;
+
+ if (is_repeater(link_training_setting, offset)) {
+ lane01_status_address =
+ DP_LANE0_1_STATUS_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ lane_adjust_offset = 3;
+ }
+
+ status = core_link_read_dpcd(
+ link,
+ lane01_status_address,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ if (status != DC_OK) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X,"
+ " keep current lane status and lane adjust unchanged",
+ __func__,
+ lane01_status_address);
+ return status;
+ }
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ ln_status[lane].raw =
+ dp_get_nibble_at_index(&dpcd_buf[0], lane);
+ ln_adjust[lane].raw =
+ dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
+ }
+
+ ln_align->raw = dpcd_buf[2];
+
+ if (is_repeater(link_training_setting, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ offset,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ offset,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ }
+
+ return status;
+}
+
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ if (lt_settings->voltage_swing == NULL &&
+ lt_settings->pre_emphasis == NULL &&
+ lt_settings->ffe_preset == NULL &&
+ lt_settings->post_cursor2 == NULL)
+
+ return;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (lt_settings->voltage_swing)
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+ if (lt_settings->pre_emphasis)
+ lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+ if (lt_settings->post_cursor2)
+ lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+ if (lt_settings->ffe_preset)
+ lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+ }
+}
+
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
+
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
+ }
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
+}
+
+void override_training_settings(
+ struct dc_link *link,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+
+ /* Override link spread */
+ if (!link->dp_ss_off && overrides->downspread != NULL)
+ lt_settings->link_settings.link_spread = *overrides->downspread ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ
+ : LINK_SPREAD_DISABLED;
+
+ /* Override lane settings */
+ if (overrides->voltage_swing != NULL)
+ lt_settings->voltage_swing = overrides->voltage_swing;
+ if (overrides->pre_emphasis != NULL)
+ lt_settings->pre_emphasis = overrides->pre_emphasis;
+ if (overrides->post_cursor2 != NULL)
+ lt_settings->post_cursor2 = overrides->post_cursor2;
+ if (overrides->ffe_preset != NULL)
+ lt_settings->ffe_preset = overrides->ffe_preset;
+ /* Override HW lane settings with BIOS forced values if present */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
+ lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = false;
+ }
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
+ lt_settings->voltage_swing != NULL ?
+ *lt_settings->voltage_swing :
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
+ lt_settings->pre_emphasis != NULL ?
+ *lt_settings->pre_emphasis
+ : PRE_EMPHASIS_DISABLED;
+ lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
+ lt_settings->post_cursor2 != NULL ?
+ *lt_settings->post_cursor2
+ : POST_CURSOR2_DISABLED;
+ }
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Override training timings */
+ if (overrides->cr_pattern_time != NULL)
+ lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
+ if (overrides->eq_pattern_time != NULL)
+ lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
+ if (overrides->pattern_for_cr != NULL)
+ lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
+ if (overrides->pattern_for_eq != NULL)
+ lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
+ if (overrides->enhanced_framing != NULL)
+ lt_settings->enhanced_framing = *overrides->enhanced_framing;
+ if (link->preferred_training_settings.fec_enable != NULL)
+ lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
+}
+
+enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings)
+{
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ default:
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
+ case DP_128b_132b_ENCODING:
+ return DP_128b_132b_TPS1;
+ }
+}
+
+enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc;
+ struct encoder_feature_support *enc_caps;
+ struct dpcd_caps *rx_caps = &link->dpcd_caps;
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+ enc_caps = &link_enc->features;
+
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
+ rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+ else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
+ rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
+ else
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ case DP_128b_132b_ENCODING:
+ pattern = DP_128b_132b_TPS2;
+ break;
+ default:
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ }
+ return pattern;
+}
+
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting);
+
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
+
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(ln_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(ln_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ hw_lane_settings[lane].FFE_PRESET.raw =
+ ln_adjust[lane].tx_ffe.PRESET_VALUE;
+ }
+ }
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+
+ if (lt_settings->disallow_per_lane_settings) {
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ maximize_lane_settings(lt_settings, hw_lane_settings);
+ override_lane_settings(lt_settings, hw_lane_settings);
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+ }
+
+}
+
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ decide_8b_10b_training_settings(link, link_settings, lt_settings);
+ else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING)
+ decide_128b_132b_training_settings(link, link_settings, lt_settings);
+}
+
+
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
+{
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ return core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+}
+
+static enum dc_status configure_lttpr_mode_non_transparent(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ /* aux timeout is already set to extended */
+ /* RESET/SET lttpr mode to enable non transparent mode */
+ uint8_t repeater_cnt;
+ uint32_t aux_interval_address;
+ uint8_t repeater_id;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+ const struct dc *dc = link->dc;
+
+ enum dp_link_encoding encoding = dc->link_srv->dp_get_encoding_format(&lt_settings->link_settings);
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ }
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
+
+ repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
+ for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+ link,
+ aux_interval_address,
+ (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+ sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ }
+ }
+ }
+
+ return result;
+}
+
+enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
+{
+ enum dc_status status = DC_OK;
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = configure_lttpr_mode_transparent(link);
+
+ else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
+ return status;
+}
+
+void repeater_training_done(struct dc_link *link, uint32_t offset)
+{
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ /* Set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
+{
+ uint8_t sink_status = 0;
+ uint8_t i;
+
+ /* clear training pattern set */
+ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (encoding == DP_128b_132b_ENCODING) {
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ fsleep(1000);
+ }
+ }
+}
+
+enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dp_link_encoding encoding =
+ link_dp_get_encoding_format(
+ &lt_settings->link_settings);
+ enum dc_status status;
+
+ status = core_link_write_dpcd(
+ link,
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ (uint8_t *) &encoding,
+ 1);
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
+ __func__,
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ encoding);
+
+ return status;
+}
+
+void dpcd_set_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern training_pattern)
+{
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dp_training_pattern_to_dpcd_training_pattern(
+ link, training_pattern);
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+enum dc_status dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ uint8_t rate;
+ enum dc_status status;
+
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+
+ downspread.raw = (uint8_t)
+ (lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ /* WA for some MUX chips that will power down with eDP and lose supported
+ * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure
+ * MUX chip gets link rate set back before link training.
+ */
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ uint8_t supported_link_rates[16];
+
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+ }
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ }
+
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
+
+ return status;
+}
+
+enum dc_status dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset)
+{
+ unsigned int lane0_set_address;
+ enum dc_status status;
+ lane0_set_address = DP_TRAINING_LANE0_SET;
+
+ if (is_repeater(link_training_setting, offset))
+ lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ status = core_link_write_dpcd(link,
+ lane0_set_address,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ link_training_setting->link_settings.lane_count);
+
+ if (is_repeater(link_training_setting, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
+
+ return status;
+}
+
+void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
+{
+ uint32_t dpcd_base_lt_offset;
+ uint8_t dpcd_lt_buffer[5] = {0};
+ union dpcd_training_pattern dpcd_pattern = {0};
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
+
+ if (is_repeater(lt_settings, offset))
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /*****************************************************************
+ * DpcdAddress_TrainingPatternSet
+ *****************************************************************/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dp_initialize_scrambling_data_symbols(link, pattern);
+
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
+ = dpcd_pattern.raw;
+
+ if (is_repeater(lt_settings, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
+
+ /* concatenate everything into one buffer*/
+ size_in_bytes = lt_settings->link_settings.lane_count *
+ sizeof(lt_settings->dpcd_lane_settings[0]);
+
+ // 0x00103 - 0x00102
+ memmove(
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
+ lt_settings->dpcd_lane_settings,
+ size_in_bytes);
+
+ if (is_repeater(lt_settings, offset)) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
+ if (edp_workaround) {
+ /* for eDP write in 2 parts because the 5-byte burst is
+ * causing issues on some eDP panels (EPR#366724)
+ */
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(lt_settings->dpcd_lane_settings),
+ size_in_bytes);
+
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ sizeof(dpcd_lt_buffer));
+ } else
+ /* write it all in (1 + number-of-lanes)-byte burst*/
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ size_in_bytes + sizeof(dpcd_pattern.raw));
+}
+
+void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
+ udelay(400);
+}
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size)
+{
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+
+ pattern_param.dp_phy_pattern = test_pattern;
+ pattern_param.custom_pattern = custom_pattern;
+ pattern_param.custom_pattern_size = custom_pattern_size;
+ pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param);
+}
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
+{
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ case DP_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
+ break;
+ case DP_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
+ break;
+ default:
+ break;
+ }
+
+ dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
+
+ return true;
+}
+
+static bool perform_post_lt_adj_req_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+
+ uint32_t adj_req_count;
+ uint32_t adj_req_timer;
+ bool req_drv_setting_changed;
+ uint32_t lane;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ req_drv_setting_changed = false;
+ for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+ adj_req_count++) {
+
+ req_drv_setting_changed = false;
+
+ for (adj_req_timer = 0;
+ adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+ adj_req_timer++) {
+
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+
+ if (dpcd_lane_status_updated.bits.
+ POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+ return true;
+
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
+ return false;
+
+ if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(dpcd_lane_status_updated))
+ return false;
+
+ for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+ if (lt_settings->
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
+
+ req_drv_setting_changed = true;
+ break;
+ }
+ }
+
+ if (req_drv_setting_changed) {
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ dp_set_drive_settings(link,
+ link_res,
+ lt_settings);
+ break;
+ }
+
+ msleep(1);
+ }
+
+ if (!req_drv_setting_changed) {
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+ }
+ }
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+
+}
+
+static enum link_training_result dp_transition_to_video_idle(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ enum link_training_result status)
+{
+ union lane_count_set lane_count_set = {0};
+
+ /* 4. mainlink output idle pattern*/
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ /*
+ * 5. post training adjust if required
+ * If the upstream DPTX and downstream DPRX both support TPS4,
+ * TPS4 must be used instead of POST_LT_ADJ_REQ.
+ */
+ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+ lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
+ /* delay 5ms after Main Link output idle pattern and then check
+ * DPCD 0202h.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ status = dp_check_link_loss_status(link, lt_settings);
+ }
+ return status;
+ }
+
+ if (status == LINK_TRAINING_SUCCESS &&
+ perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
+ status = LINK_TRAINING_LQA_FAIL;
+
+ lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ core_link_write_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+
+ return status;
+}
+
+enum link_training_result dp_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ struct link_training_settings lt_settings = {0};
+ enum dp_link_encoding encoding =
+ link_dp_get_encoding_format(link_settings);
+
+ /* decide training settings */
+ dp_decide_training_settings(
+ link,
+ link_settings,
+ &lt_settings);
+
+ override_training_settings(
+ link,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* reset previous training states */
+ dpcd_exit_training_mode(link, encoding);
+
+ /* configure link prior to entering training mode */
+ dpcd_configure_lttpr_mode(link, &lt_settings);
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
+ dpcd_configure_channel_coding(link, &lt_settings);
+
+ /* enter training mode:
+ * Per DP specs starting from here, DPTX device shall not issue
+ * Non-LT AUX transactions inside training mode.
+ */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ if (link->dc->config.use_old_fixed_vs_sequence)
+ status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings);
+ else
+ status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ else if (encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
+ else if (encoding == DP_128b_132b_ENCODING)
+ status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
+ else
+ ASSERT(0);
+
+ /* exit training mode */
+ dpcd_exit_training_mode(link, encoding);
+
+ /* switch to video idle */
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
+ status = dp_transition_to_video_idle(link,
+ link_res,
+ &lt_settings,
+ status);
+
+ /* dump debug data */
+ dp_log_training_result(link, &lt_settings, status);
+ if (status != LINK_TRAINING_SUCCESS)
+ link->ctx->dc->debug_data.ltFailCount++;
+ return status;
+}
+
+bool perform_link_training_with_retries(
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal,
+ bool do_fallback)
+{
+ int j;
+ uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+ enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
+ struct dc_link_settings cur_link_settings = *link_setting;
+ struct dc_link_settings max_link_settings = *link_setting;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ dp_trace_commit_lt_init(link);
+
+
+ if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+ /* We need to do this before the link training to ensure the idle
+ * pattern in SST mode will be sent right after the link training
+ */
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
+ dp_trace_set_lt_start_timestamp(link, false);
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
+
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d) @ spread = %x\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts,
+ cur_link_settings.link_rate, cur_link_settings.lane_count,
+ cur_link_settings.link_spread);
+
+ dp_enable_link_phy(
+ link,
+ &pipe_ctx->link_res,
+ signal,
+ pipe_ctx->clock_source->id,
+ &cur_link_settings);
+
+ if (stream->sink_patches.dppowerup_delay > 0) {
+ int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
+
+ msleep(delay_dp_power_up_in_ms);
+ }
+
+ if (panel_mode == DP_PANEL_MODE_EDP) {
+ struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+
+ if (cp_psp && cp_psp->funcs.enable_assr) {
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ bool result;
+ result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
+ if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+ }
+
+ dp_set_panel_mode(link, panel_mode);
+
+ if (link->aux_access_disabled) {
+ dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
+ return true;
+ } else {
+ /** @todo Consolidate USB4 DP and DPx.x training. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dpia_perform_link_training(
+ link,
+ &pipe_ctx->link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+
+ /* Transmit idle pattern once training successful. */
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ // Update verified link settings to current one
+ // Because DPIA LT might fallback to lower link setting.
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
+ }
+ }
+ } else {
+ status = dp_perform_link_training(
+ link,
+ &pipe_ctx->link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+ }
+
+ dp_trace_lt_total_count_increment(link, false);
+ dp_trace_lt_result_update(link, status, false);
+ dp_trace_set_lt_end_timestamp(link, false);
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ return true;
+ }
+
+ fail_count++;
+ dp_trace_lt_fail_count_update(link, fail_count, false);
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
+ /* latest link training still fail or link training is aborted
+ * skip delay and keep PHY on
+ */
+ if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
+ break;
+ }
+
+ DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts,
+ cur_link_settings.link_rate, cur_link_settings.lane_count,
+ cur_link_settings.link_spread, status);
+
+ dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
+
+ /* Abort link training if failure due to sink being unplugged. */
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ link_detect_connection_type(link, &type);
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
+ break;
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+ decide_fallback_link_setting(link, &max_link_settings,
+ &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
+ */
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING(
+ "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, link->link_index, req_bw, link_bw);
+ }
+
+ msleep(delay_between_attempts);
+ }
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
new file mode 100644
index 000000000000..7d027bac8255
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_H__
+#define __DC_LINK_DP_TRAINING_H__
+#include "link.h"
+
+bool perform_link_training_with_retries(
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal,
+ bool do_fallback);
+
+enum link_training_result dp_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ bool skip_video_pattern);
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size);
+
+void dpcd_set_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern training_pattern);
+
+/* Write DPCD drive settings. */
+enum dc_status dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset);
+
+/* Write DPCD link configuration data. */
+enum dc_status dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings);
+
+void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
+
+/* Read training status and adjustment requests from DPCD. */
+enum dc_status dp_get_lane_status_and_lane_adjust(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ uint32_t offset);
+
+enum dc_status dpcd_configure_lttpr_mode(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link);
+
+enum dc_status dpcd_configure_channel_coding(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+void repeater_training_done(struct dc_link *link, uint32_t offset);
+
+void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings);
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+
+enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings);
+
+enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings);
+
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+void dp_get_lttpr_mode_override(struct dc_link *link,
+ enum lttpr_mode *override);
+
+void override_training_settings(
+ struct dc_link *link,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings);
+
+/* Check DPCD training status registers to detect link loss. */
+enum link_training_result dp_check_link_loss_status(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting);
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+bool dp_is_symbol_locked(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
+
+bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset);
+
+bool dp_is_max_vs_reached(
+ const struct link_training_settings *lt_settings);
+
+uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings);
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+
+void dp_wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs);
+
+enum dpcd_training_patterns
+ dp_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+uint8_t dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+void dp_log_training_result(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum link_training_result status);
+
+uint32_t dp_translate_training_aux_read_interval(
+ uint32_t dpcd_aux_read_interval);
+
+uint8_t dp_get_nibble_at_index(const uint8_t *buf,
+ uint32_t index);
+#endif /* __DC_LINK_DP_TRAINING_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
new file mode 100644
index 000000000000..23d380f09a21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp 128b/132b link training software policies and
+ * sequences.
+ */
+#include "link_dp_training_128b_132b.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
+ uint32_t *interval_in_us)
+{
+ union dp_128b_132b_training_aux_rd_interval dpcd_interval;
+ uint32_t interval_unit = 0;
+
+ dpcd_interval.raw = 0;
+ core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL,
+ &dpcd_interval.raw, sizeof(dpcd_interval.raw));
+ interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
+ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
+ * INTERVAL_UNIT. The maximum is 256 ms
+ */
+ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
+}
+
+static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ uint8_t loop_count;
+ uint32_t aux_rd_interval = 0;
+ uint32_t wait_time = 0;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* Transmit 128b/132b_TPS1 over Main-Link */
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
+
+ /* Set TRAINING_PATTERN_SET to 01h */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
+
+ /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
+
+ /* Set loop counter to start from 1 */
+ loop_count = 1;
+
+ /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
+ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
+ lt_settings->pattern_for_eq, DPRX);
+
+ /* poll for channel EQ done */
+ while (result == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
+ wait_time += aux_rd_interval;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ dpcd_lane_status)) {
+ /* pass */
+ break;
+ } else if (loop_count >= lt_settings->eq_loop_count_limit) {
+ result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
+ }
+ loop_count++;
+ }
+
+ /* poll for EQ interlane align done */
+ while (result == LINK_TRAINING_SUCCESS) {
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (wait_time >= lt_settings->eq_wait_time_limit) {
+ result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->eq_pattern_time);
+ wait_time += lt_settings->eq_pattern_time;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ }
+ }
+
+ return result;
+}
+
+static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ /* Assumption: assume hardware has transmitted eq pattern */
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ uint32_t wait_time = 0;
+
+ /* initiate CDS done sequence */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
+
+ /* poll for CDS interlane align done and symbol lock */
+ while (result == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->cds_pattern_time);
+ wait_time += lt_settings->cds_pattern_time;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else if (wait_time >= lt_settings->cds_wait_time_limit) {
+ result = DP_128b_132b_CDS_DONE_TIMEOUT;
+ }
+ }
+
+ return result;
+}
+
+enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
+ if (link->dc->debug.legacy_dp2_lt) {
+ struct link_training_settings legacy_settings;
+
+ decide_8b_10b_training_settings(link,
+ &lt_settings->link_settings,
+ &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
+ }
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
+
+ return result;
+}
+
+void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, 0, sizeof(*lt_settings));
+
+ lt_settings->link_settings = *link_settings;
+ /* TODO: should decide link spread when populating link_settings */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->eq_pattern_time = 2500;
+ lt_settings->eq_wait_time_limit = 400000;
+ lt_settings->eq_loop_count_limit = 20;
+ lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
+ lt_settings->cds_pattern_time = 2500;
+ lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
new file mode 100644
index 000000000000..2147f24efc8b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__
+#define __DC_LINK_DP_TRAINING_128B_132B_H__
+#include "link_dp_training.h"
+
+enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings);
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
+
+#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
new file mode 100644
index 000000000000..3889ebb2256b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp 8b/10b link training software policies and
+ * sequences.
+ */
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ union training_aux_rd_interval training_rd_interval;
+ uint32_t wait_in_micro_secs = 100;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+ return wait_in_micro_secs;
+}
+
+static uint32_t get_eq_training_aux_rd_interval(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ union training_aux_rd_interval training_rd_interval;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ core_link_read_dpcd(
+ link,
+ DP_128B132B_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ }
+
+ switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
+ case 0: return 400;
+ case 1: return 4000;
+ case 2: return 8000;
+ case 3: return 12000;
+ case 4: return 16000;
+ case 5: return 32000;
+ case 6: return 64000;
+ default: return 400;
+ }
+}
+
+void decide_8b_10b_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, '\0', sizeof(struct link_training_settings));
+
+ /* Initialize link settings */
+ lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
+ lt_settings->link_settings.link_rate = link_setting->link_rate;
+ lt_settings->link_settings.lane_count = link_setting->lane_count;
+ /* TODO hard coded to SS for now
+ * lt_settings.link_settings.link_spread =
+ * dal_display_path_is_ss_supported(
+ * path_mode->display_path) ?
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ?
+ LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
+ lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+ lt_settings->enhanced_framing = 1;
+ lt_settings->should_set_fec_ready = true;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_TRANSPARENT;
+ }
+ }
+
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ }
+
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum link_training_result perform_8b_10b_clock_recovery_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+
+ /* najeeb - The synaptics MST hub can put the LT in
+ * infinite loop by switching the VS
+ */
+ /* between level 0 and level 1 continuously, here
+ * we try for CR lock for LinkTrainingMaxCRRetry count*/
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+
+ /* 1. call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ offset);
+
+ /* 2. update DPCD of the receiver*/
+ if (!retry_count)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ offset);
+ else
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ offset);
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ offset);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__);
+ return LINK_TRAINING_SUCCESS;
+ }
+
+ /* 6. max VS reached*/
+ if ((link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) &&
+ dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings*/
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient*/
+ if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
+ dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ return dp_get_cr_failure(lane_count, dpcd_lane_status);
+}
+
+enum link_training_result perform_8b_10b_channel_equalization_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, offset);
+ else
+ dpcd_set_lane_settings(link, lt_settings, offset);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ if (is_repeater(lt_settings, offset))
+ wait_time_microsec =
+ dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink*/
+
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ offset);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
+ return dpcd_lane_status[0].bits.CR_DONE_0 ?
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
+ LINK_TRAINING_EQ_FAIL_CR;
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+enum link_training_result dp_perform_8b_10b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
+ uint8_t lane = 0;
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
+
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS) {
+ repeater_training_done(link, repeater_id);
+ break;
+ }
+
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ repeater_id);
+
+ repeater_training_done(link, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
+ }
+ }
+ }
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ DPRX);
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
new file mode 100644
index 000000000000..d26de15ce954
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__
+#define __DC_LINK_DP_TRAINING_8B_10B_H__
+#include "link_dp_training.h"
+
+/* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+#define LINK_TRAINING_MAX_CR_RETRY 100
+#define LINK_TRAINING_MAX_RETRY_COUNT 5
+
+enum link_training_result dp_perform_8b_10b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result perform_8b_10b_clock_recovery_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+enum link_training_result perform_8b_10b_channel_equalization_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+
+void decide_8b_10b_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings);
+
+#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
new file mode 100644
index 000000000000..4c6b886a9da8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ */
+#include "link_dp_training_auxless.h"
+#include "link_dp_phy.h"
+#define DC_LOGGER \
+ link->ctx->logger
+bool dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting)
+{
+ struct link_training_settings lt_settings = {0};
+
+ dp_decide_training_settings(
+ link,
+ link_setting,
+ &lt_settings);
+ override_training_settings(
+ link,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* 1. Perform_clock_recovery_sequence. */
+
+ /* transmit training pattern for clock recovery */
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on*/
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
+
+ /* 2. Perform_channel_equalization_sequence. */
+
+ /* transmit training pattern for channel equalization. */
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on. */
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
+
+ /* 3. Perform_link_training_int. */
+
+ /* Mainlink output idle pattern. */
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ dp_log_training_result(link, &lt_settings, LINK_TRAINING_SUCCESS);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
new file mode 100644
index 000000000000..546387a5f32d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__
+#define __DC_LINK_DP_TRAINING_AUXLESS_H__
+#include "link_dp_training.h"
+
+bool dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting);
+#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index d130d58ac08e..4f4e899e5c46 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -1,6 +1,5 @@
-// SPDX-License-Identifier: MIT
/*
- * Copyright 2021 Advanced Micro Devices, Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,76 +23,71 @@
*
*/
+/* FILE POLICY AND INTENDED USAGE:
+ * This module implements functionality for training DPIA links.
+ */
+#include "link_dp_training_dpia.h"
#include "dc.h"
-#include "dc_link_dpia.h"
#include "inc/core_status.h"
-#include "dc_link.h"
-#include "dc_link_dp.h"
#include "dpcd_defs.h"
+
+#include "link_dp_dpia.h"
#include "link_hwss.h"
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "inc/link_dpcd.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dp_capability.h"
#include "dc_dmub_srv.h"
-
#define DC_LOGGER \
link->ctx->logger
-enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
-{
- enum dc_status status = DC_OK;
- uint8_t dpcd_dp_tun_data[3] = {0};
- uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
- uint8_t i = 0;
-
- status = core_link_read_dpcd(link,
- DP_TUNNELING_CAPABILITIES_SUPPORT,
- dpcd_dp_tun_data,
- sizeof(dpcd_dp_tun_data));
-
- status = core_link_read_dpcd(link,
- DP_USB4_ROUTER_TOPOLOGY_ID,
- dpcd_topology_data,
- sizeof(dpcd_topology_data));
-
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT -
- DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
- for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
- link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
-
- return status;
-}
-
-bool dc_link_dpia_query_hpd_status(struct dc_link *link)
-{
- union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
-
- /* prepare QUERY_HPD command */
- cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
- cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
- cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
-
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
+/* Extend interval between training status checks for manual testing. */
+#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
+
+#define TRAINING_AUX_RD_INTERVAL 100 //us
+
+/* SET_CONFIG message types sent by driver. */
+enum dpia_set_config_type {
+ DPIA_SET_CFG_SET_LINK = 0x01,
+ DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
+ DPIA_SET_CFG_SET_TRAINING = 0x18,
+ DPIA_SET_CFG_SET_VSPE = 0x19
+};
+
+/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
+enum dpia_set_config_ts {
+ DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
+ DPIA_TS_TPS1 = 0x01,
+ DPIA_TS_TPS2 = 0x02,
+ DPIA_TS_TPS3 = 0x03,
+ DPIA_TS_TPS4 = 0x07,
+ DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
+};
+
+/* SET_CONFIG message data associated with messages sent by driver. */
+union dpia_set_config_data {
+ struct {
+ uint8_t mode : 1;
+ uint8_t reserved : 7;
+ } set_link;
+ struct {
+ uint8_t stage;
+ } set_training;
+ struct {
+ uint8_t swing : 2;
+ uint8_t max_swing_reached : 1;
+ uint8_t pre_emph : 2;
+ uint8_t max_pre_emph_reached : 1;
+ uint8_t reserved : 2;
+ } set_vspe;
+ uint8_t raw;
+};
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
-}
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
@@ -113,11 +107,12 @@ static enum link_training_result dpia_configure_link(
bool fec_enable;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- lt_settings->lttpr_mode);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ lt_settings->lttpr_mode);
- dp_decide_training_settings(link,
+ dp_decide_training_settings(
+ link,
link_setting,
lt_settings);
@@ -137,7 +132,7 @@ static enum link_training_result dpia_configure_link(
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
- if (link->preferred_training_settings.fec_enable)
+ if (link->preferred_training_settings.fec_enable != NULL)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
@@ -148,7 +143,8 @@ static enum link_training_result dpia_configure_link(
return LINK_TRAINING_SUCCESS;
}
-static enum dc_status core_link_send_set_config(struct dc_link *link,
+static enum dc_status core_link_send_set_config(
+ struct dc_link *link,
uint8_t msg_type,
uint8_t msg_data)
{
@@ -160,8 +156,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,
payload.msg_data = msg_data;
if (!link->ddc->ddc_pin && !link->aux_access_disabled &&
- (dm_helpers_dmub_set_config_sync(link->ctx, link,
- &payload, &set_config_result) == -1)) {
+ (dm_helpers_dmub_set_config_sync(link->ctx,
+ link, &payload, &set_config_result) == -1)) {
return DC_ERROR_UNEXPECTED;
}
@@ -170,7 +166,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,
}
/* Build SET_CONFIG message data payload for specified message type. */
-static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
+static uint8_t dpia_build_set_config_data(
+ enum dpia_set_config_type type,
struct dc_link *link,
struct link_training_settings *lt_settings)
{
@@ -189,11 +186,9 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING;
data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS;
data.set_vspe.max_swing_reached =
- lt_settings->hw_lane_settings[0].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
+ lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
data.set_vspe.max_pre_emph_reached =
- lt_settings->hw_lane_settings[0].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
+ lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
break;
default:
ASSERT(false); /* Message type not supported by helper function. */
@@ -235,7 +230,8 @@ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern t
}
/* Write training pattern to DPCD. */
-static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
+static enum dc_status dpcd_set_lt_pattern(
+ struct dc_link *link,
enum dc_dp_training_pattern pattern,
uint32_t hop)
{
@@ -249,28 +245,29 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
/* DpcdAddress_TrainingPatternSet */
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dp_training_pattern_to_dpcd_training_pattern(link, pattern);
dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
- dc_dp_initialize_scrambling_data_symbols(link, pattern);
+ dp_initialize_scrambling_data_symbols(link, pattern);
if (hop != DPRX) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
- __func__,
- hop,
- dpcd_tps_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ __func__,
+ hop,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
} else {
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
- __func__,
- dpcd_tps_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ __func__,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- status = core_link_write_dpcd(link,
- dpcd_tps_offset,
- &dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw));
+ status = core_link_write_dpcd(
+ link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
return status;
}
@@ -284,7 +281,7 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_cr_non_transparent(
struct dc_link *link,
@@ -297,8 +294,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
enum dc_status status;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
- /* From DP spec, CR read interval is always 100us. */
- uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
union lane_align_status_updated dpcd_lane_status_updated = {0};
@@ -306,7 +302,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
uint8_t set_cfg_data;
enum dpia_set_config_ts ts;
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
* Fix inherited from perform_clock_recovery_sequence() -
@@ -316,17 +312,20 @@ static enum link_training_result dpia_training_cr_non_transparent(
* continuously.
*/
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
/* DPTX-to-DPIA */
if (hop == repeater_cnt) {
/* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that
* non-transparent link training has started.
* This also enables the transmission of clk_sync packets.
*/
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK,
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_LINK,
link,
lt_settings);
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_LINK,
set_cfg_data);
/* CR for this hop is considered successful as long as
@@ -347,6 +346,14 @@ static enum link_training_result dpia_training_cr_non_transparent(
result = LINK_TRAINING_ABORT;
break;
}
+ status = core_link_send_set_config(
+ link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -358,10 +365,12 @@ static enum link_training_result dpia_training_cr_non_transparent(
* drive settings for hops immediately downstream.
*/
if (hop == repeater_cnt - 1) {
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_VSPE,
link,
lt_settings);
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_VSPE,
set_cfg_data);
if (status != DC_OK) {
@@ -392,6 +401,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
/* Check if clock recovery successful. */
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__);
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -468,7 +478,8 @@ static enum link_training_result dpia_training_cr_transparent(
* continuously.
*/
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
/* Write TPS1 (not VS or PE) to DPCD to start CR phase.
* DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to
* start link training.
@@ -498,6 +509,7 @@ static enum link_training_result dpia_training_cr_transparent(
/* Check if clock recovery successful. */
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__);
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -529,8 +541,7 @@ static enum link_training_result dpia_training_cr_transparent(
if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
- DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
- " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
DPRX,
@@ -545,7 +556,7 @@ static enum link_training_result dpia_training_cr_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_cr_phase(
struct dc_link *link,
@@ -564,7 +575,8 @@ static enum link_training_result dpia_training_cr_phase(
}
/* Return status read interval during equalization phase. */
-static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
+static uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
const struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -590,12 +602,11 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
* - TPSx is transmitted for any hops downstream of DPOA.
* - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
* - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful.
- * - DPRX EQ only reported successful when both DPRX and DPIA requirements
- * (clk sync packets sent) fulfilled.
+ * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled.
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_non_transparent(
struct dc_link *link,
@@ -624,9 +635,10 @@ static enum link_training_result dpia_training_eq_non_transparent(
else
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+
/* DPTX-to-DPIA equalization always successful. */
if (hop == repeater_cnt) {
result = LINK_TRAINING_SUCCESS;
@@ -640,7 +652,8 @@ static enum link_training_result dpia_training_eq_non_transparent(
result = LINK_TRAINING_ABORT;
break;
}
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
ts);
if (status != DC_OK) {
@@ -658,12 +671,14 @@ static enum link_training_result dpia_training_eq_non_transparent(
* drive settings for hop immediately downstream.
*/
if (hop == repeater_cnt - 1) {
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
- link,
- lt_settings);
- status = core_link_send_set_config(link,
- DPIA_SET_CFG_SET_VSPE,
- set_cfg_data);
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(
+ link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
break;
@@ -679,7 +694,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
* ensure clock sync packets have been sent.
*/
if (hop == DPRX && retries_eq == 1)
- wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY);
+ wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY);
else
wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop);
@@ -705,8 +720,8 @@ static enum link_training_result dpia_training_eq_non_transparent(
}
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -741,7 +756,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_transparent(
struct dc_link *link,
@@ -761,6 +776,7 @@ static enum link_training_result dpia_training_eq_transparent(
wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);
for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+
if (retries_eq == 0) {
status = dpcd_set_lt_pattern(link, tr_pattern, DPRX);
if (status != DC_OK) {
@@ -810,8 +826,7 @@ static enum link_training_result dpia_training_eq_transparent(
if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
- DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
- " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
DPRX,
@@ -826,7 +841,7 @@ static enum link_training_result dpia_training_eq_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_phase(
struct dc_link *link,
@@ -845,7 +860,9 @@ static enum link_training_result dpia_training_eq_phase(
}
/* End training of specified hop in display path. */
-static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
+static enum dc_status dpcd_clear_lt_pattern(
+ struct dc_link *link,
+ uint32_t hop)
{
union dpcd_training_pattern dpcd_pattern = {0};
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -855,7 +872,8 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
- status = core_link_write_dpcd(link,
+ status = core_link_write_dpcd(
+ link,
dpcd_tps_offset,
&dpcd_pattern.raw,
sizeof(dpcd_pattern.raw));
@@ -873,9 +891,10 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* (DPTX-to-DPIA) and last hop (DPRX).
*
* @param link DPIA link being trained.
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_end(struct dc_link *link,
+static enum link_training_result dpia_training_end(
+ struct dc_link *link,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -884,13 +903,15 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
enum dc_status status;
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
/* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that
* DPTX-to-DPIA hop trained. No DPCD write needed for first hop.
*/
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
DPIA_TS_UFP_DONE);
if (status != DC_OK)
@@ -904,7 +925,8 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
/* Notify DPOA that non-transparent link training of DPRX done. */
if (hop == DPRX && result != LINK_TRAINING_ABORT) {
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
DPIA_TS_DPRX_DONE);
if (status != DC_OK)
@@ -912,18 +934,20 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
}
} else { /* non-LTTPR or transparent LTTPR. */
+
/* Write 0x0 to TRAINING_PATTERN_SET */
status = dpcd_clear_lt_pattern(link, hop);
if (status != DC_OK)
result = LINK_TRAINING_ABORT;
+
}
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- hop,
- result,
- lt_settings->lttpr_mode);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ lt_settings->lttpr_mode);
return result;
}
@@ -933,20 +957,21 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
*
* @param link DPIA link being trained.
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link,
- struct link_training_settings *lt_settings,
- uint32_t hop)
+static void dpia_training_abort(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- lt_settings->lttpr_mode,
- link->is_hpd_pending);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ lt_settings->lttpr_mode,
+ link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
if (link->is_hpd_pending)
@@ -962,7 +987,7 @@ static void dpia_training_abort(struct dc_link *link,
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
}
-enum link_training_result dc_link_dpia_perform_link_training(
+enum link_training_result dpia_perform_link_training(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
@@ -983,7 +1008,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
return result;
if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
* In transparent or non-LTTPR mode, train only the final hop (DPRX).
@@ -1011,13 +1036,13 @@ enum link_training_result dc_link_dpia_perform_link_training(
* falling back to lower bandwidth settings possible.
*/
if (result == LINK_TRAINING_SUCCESS) {
- msleep(5);
+ fsleep(5000);
if (!link->is_automated)
result = dp_check_link_loss_status(link, &lt_settings);
- } else if (result == LINK_TRAINING_ABORT) {
+ } else if (result == LINK_TRAINING_ABORT)
dpia_training_abort(link, &lt_settings, repeater_id);
- } else {
+ else
dpia_training_end(link, &lt_settings, repeater_id);
- }
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
new file mode 100644
index 000000000000..b39fb9faf1c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_DPIA_H__
+#define __DC_LINK_DP_TRAINING_DPIA_H__
+#include "link_dp_training.h"
+
+/* Train DP tunneling link for USB4 DPIA display endpoint.
+ * DPIA equivalent of dc_link_dp_perfrorm_link_training.
+ * Aborts link training upon detection of sink unplug.
+ */
+enum link_training_result dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
new file mode 100644
index 000000000000..5731c4b61f9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -0,0 +1,955 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements 8b/10b link training specially modified to support an
+ * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer.
+ * Unlike native dp connection this chip requires a modified link training
+ * protocol based on 8b/10b link training. Since this is a non standard sequence
+ * and we must support this hardware, we decided to isolate it in its own
+ * training sequence inside its own file.
+ */
+#include "link_dp_training_fixed_vs_pe_retimer.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+void dp_fixed_vs_pe_read_lane_adjust(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
+ const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint32_t vendor_lttpr_read_address = 0xF0053;
+ uint8_t dprx_vs = 0;
+ uint8_t dprx_pe = 0;
+ uint8_t lane;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ vendor_lttpr_read_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* W/A to read lane settings requested by DPRX */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_vs,
+ 1);
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_pe,
+ 1);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3;
+ }
+}
+
+
+void dp_fixed_vs_pe_set_retimer_lane_settings(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count)
+{
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint8_t lane = 0;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Force LTTPR to output desired VS and PE */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+}
+
+static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ uint8_t toggle_rate = 0x6;
+ uint8_t target_rate = 0x6;
+ bool apply_toggle_rate_wa = false;
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
+
+ /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */
+ if (lt_settings->cr_pattern_time < 16000)
+ lt_settings->cr_pattern_time = 16000;
+
+ /* Fixed VS/PE specific: Toggle link rate */
+ apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
+ target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
+ toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
+
+ if (apply_toggle_rate_wa)
+ lt_settings->link_settings.link_rate = toggle_rate;
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
+
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, lt_settings);
+
+ /* Fixed VS/PE specific: Toggle link rate back*/
+ if (apply_toggle_rate_wa) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &target_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = target_rate;
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS) {
+ repeater_training_done(link, repeater_id);
+ break;
+ }
+
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ repeater_id);
+
+ repeater_training_done(link, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
+ }
+ }
+ }
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ DPRX);
+ }
+ }
+
+ return status;
+}
+
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+ uint8_t toggle_rate;
+ uint8_t rate;
+
+ /* Only 8b/10b is supported */
+ ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING);
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
+ return status;
+ }
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* Certain display and cable configuration require extra delay */
+ if (offset > 2)
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
+
+ /* Vendor specific: Reset lane settings */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* Vendor specific: Enable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+
+ /* 1. set link rate, lane count and spread. */
+
+ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+ /* 2. Perform link training */
+
+ /* Perform Clock Recovery Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ const uint8_t max_vendor_dpcd_retries = 10;
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status dpcd_status = DC_OK;
+ uint8_t i = 0;
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+
+ /* 1. call HWSS to set lane settings */
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ 0);
+
+ /* 2. update DPCD of the receiver */
+ if (!retry_count) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.
+ */
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ 0);
+ /* Vendor specific: Disable intercept */
+ for (i = 0; i < max_vendor_dpcd_retries; i++) {
+ msleep(pre_disable_intercept_delay_ms);
+ dpcd_status = core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+
+ if (dpcd_status == DC_OK)
+ break;
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+ }
+ } else {
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ 0);
+ }
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 6. max VS reached*/
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings */
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient
+ */
+ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ status = dp_get_cr_failure(lane_count, dpcd_lane_status);
+ }
+
+ /* Perform Channel EQ Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
+
+ status = LINK_TRAINING_EQ_FAIL_EQ;
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
+
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, 0);
+ else
+ dpcd_set_lane_settings(link, lt_settings, 0);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ }
+
+ return status;
+}
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x6E};
+ const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
+ const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
+ const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+ uint8_t toggle_rate;
+ uint8_t rate;
+
+ /* Only 8b/10b is supported */
+ ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING);
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
+ return status;
+ }
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* Certain display and cable configuration require extra delay */
+ if (offset > 2)
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
+
+ /* Vendor specific: Reset lane settings */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* Vendor specific: Enable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+
+ /* 1. set link rate, lane count and spread. */
+
+ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+ /* 2. Perform link training */
+
+ /* Perform Clock Recovery Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ const uint8_t max_vendor_dpcd_retries = 10;
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status dpcd_status = DC_OK;
+ uint8_t i = 0;
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+
+ /* 1. call HWSS to set lane settings */
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ 0);
+
+ /* 2. update DPCD of the receiver */
+ if (!retry_count) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.
+ */
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ 0);
+ /* Vendor specific: Disable intercept */
+ for (i = 0; i < max_vendor_dpcd_retries; i++) {
+ msleep(pre_disable_intercept_delay_ms);
+ dpcd_status = core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+
+ if (dpcd_status == DC_OK)
+ break;
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+ }
+ } else {
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ 0);
+ }
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 6. max VS reached*/
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings */
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient
+ */
+ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ status = dp_get_cr_failure(lane_count, dpcd_lane_status);
+ }
+
+ /* Perform Channel EQ Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_adicora_eq1[0],
+ sizeof(vendor_lttpr_write_data_adicora_eq1));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_adicora_eq2[0],
+ sizeof(vendor_lttpr_write_data_adicora_eq2));
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
+
+ status = LINK_TRAINING_EQ_FAIL_EQ;
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
+
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, 0);
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_adicora_eq3[0],
+ sizeof(vendor_lttpr_write_data_adicora_eq3));
+ } else
+ dpcd_set_lane_settings(link, lt_settings, 0);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
new file mode 100644
index 000000000000..c0d6ea329504
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
+#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
+#include "link_dp_training.h"
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+void dp_fixed_vs_pe_set_retimer_lane_settings(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count);
+
+void dp_fixed_vs_pe_read_lane_adjust(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]);
+
+#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index af110bf9470f..5c9a30211c10 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -23,11 +23,14 @@
*
*/
-#include <inc/core_status.h>
-#include <dc_link.h>
-#include <inc/link_hwss.h>
-#include <inc/link_dpcd.h>
-#include <dc_dp_types.h>
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements basic dpcd read/write functionality. It also does basic
+ * dpcd range check to ensure that every dpcd request is compliant with specs
+ * range requirements.
+ */
+
+#include "link_dpcd.h"
#include <drm/display/drm_dp_helper.h>
#include "dm_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index d561f86d503c..08d787a1e451 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,9 +25,8 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include <inc/core_status.h>
-#include <dc_link.h>
-#include <dc_link_dp.h>
+#include "link.h"
+#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
new file mode 100644
index 000000000000..8d1df863659c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements retrieval and configuration of eDP panel features such
+ * as PSR and ABM and it also manages specs defined eDP panel power sequences.
+ */
+
+#include "link_edp_panel_control.h"
+#include "link_dpcd.h"
+#include "link_dp_capability.h"
+#include "dm_helpers.h"
+#include "dal_asic_id.h"
+#include "dce/dmub_psr.h"
+#include "abm.h"
+#define DC_LOGGER_INIT(logger)
+
+/* Travis */
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
+/* Nutmeg */
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
+
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum dc_status result;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DC_OK);
+ }
+ }
+ link->panel_mode = panel_mode;
+ DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_0022B9:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not
+ * provide sink device id, alternate scrambler
+ * scheme will be overriden later by querying
+ * Encoder features
+ */
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_00001A:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not provide
+ * sink device id, alternate scrambler scheme will
+ * be overriden later by querying Encoder feature
+ */
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp &&
+ (link->connector_signal == SIGNAL_TYPE_EDP ||
+ (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->is_internal_display))) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
+bool edp_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool edp_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
+ if (!read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return edp_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
+
+bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing)
+{
+ struct dc_link_settings link_setting;
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t req_bw;
+ union lane_count_set lane_count_set = {0};
+
+ ASSERT(link || crtc_timing); // invalid input
+
+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ !link->panel_config.ilr.optimize_edp_link_rate)
+ return false;
+
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
+ return true;
+ }
+
+ // Read DPCD 00115h to find the edp link rate set used
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+
+ if (!crtc_timing->flags.DSC)
+ edp_decide_link_settings(link, &link_setting, req_bw);
+ else
+ decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
+
+ if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
+ lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
+ return true;
+ }
+
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
+ return false;
+}
+
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+{
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ link->dc->hwss.edp_power_control(link, true);
+ if (wait_for_hpd)
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ if (link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, true);
+}
+
+bool edp_wait_for_t12(struct dc_link *link)
+{
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+ link->dc->hwss.edp_wait_for_T12(link);
+
+ return true;
+ }
+
+ return false;
+}
+
+void edp_add_delay_for_T9(struct dc_link *link)
+{
+ if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
+ fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000);
+}
+
+bool edp_receiver_ready_T9(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
+
+ return result;
+}
+
+bool edp_receiver_ready_T7(struct dc_link *link)
+{
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+
+ /* use absolute time stamp to constrain max T7*/
+ unsigned long long enter_timestamp = 0;
+ unsigned long long finish_timestamp = 0;
+ unsigned long long time_taken_in_ns = 0;
+
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
+
+ if (link && link->panel_config.pps.extra_t7_ms > 0)
+ fsleep(link->panel_config.pps.extra_t7_ms * 1000);
+
+ return result;
+}
+
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
+{
+ bool ret = false;
+ union dpcd_alpm_configuration alpm_config;
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ memset(&alpm_config, 0, sizeof(alpm_config));
+
+ alpm_config.bits.ENABLE = (enable ? true : false);
+ ret = dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+ return ret;
+}
+
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+ }
+
+ return pipe_ctx;
+}
+
+bool edp_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ struct dc *dc = link->ctx->dc;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+
+ if (pipe_ctx) {
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (pipe_ctx->plane_state == NULL)
+ frame_ramp = 0;
+ } else {
+ return false;
+ }
+
+ dc->hwss.set_backlight_level(
+ pipe_ctx,
+ backlight_pwm_u16_16,
+ frame_ramp);
+ }
+ return true;
+}
+
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (psr == NULL && force_static)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
+ // Don't enter PSR if panel is not connected
+ return false;
+ }
+
+ /* Set power optimization flag */
+ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
+ link->psr_settings.psr_power_opt = *power_opts;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
+ }
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled &&
+ force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+
+ /* Enable or Disable PSR */
+ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
+ link->psr_settings.psr_allow_active = *allow_active;
+
+ if (!link->psr_settings.psr_allow_active)
+ dc_z10_restore(dc);
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled) {
+ psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
+ } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
+ link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
+ else
+ return false;
+ }
+ return true;
+}
+
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
+ psr->funcs->psr_get_state(psr, state, panel_inst);
+ else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(struct dc_link *link)
+{
+ struct dc_context *dc_ctx = link->ctx;
+ enum transmitter transmitter_value = link->link_enc->transmitter;
+
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ DC_ERROR("Unknown transmitter value %d\n", transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool edp_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *dc;
+ struct dmcu *dmcu;
+ struct dmub_psr *psr;
+ int i;
+ unsigned int panel_inst;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+ union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ dc = link->ctx->dc;
+ dmcu = dc->res_pool->dmcu;
+ psr = dc->res_pool->psr;
+
+ if (!dmcu && !psr)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ /* For PSR v2, set the bit when the Source device will
+ * be enabling PSR2 operation.
+ */
+ psr_configuration.bits.ENABLE_PSR2 = 1;
+ /* For PSR v2, the Sink device must be able to receive
+ * SU region updates early in the frame time.
+ */
+ psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ edp_power_alpm_dpcd_enable(link, true);
+ psr_context->su_granularity_required =
+ psr_config->su_granularity_required;
+ psr_context->su_y_granularity =
+ psr_config->su_y_granularity;
+ psr_context->line_time_in_us = psr_config->line_time_in_us;
+
+ /* linux must be able to expose AMD Source DPCD definition
+ * in order to support FreeSync PSR
+ */
+ if (link->psr_settings.psr_vtotal_control_support) {
+ psr_context->rate_control_caps = psr_config->rate_control_caps;
+ vtotal_control.bits.ENABLE = true;
+ core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE,
+ &vtotal_control.raw, sizeof(vtotal_control.raw));
+ }
+ }
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId = transmitter_to_phy_id(link);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
+ switch (link->ctx->asic_id.chip_family) {
+ case FAMILY_YELLOW_CARP:
+ case AMDGPU_FAMILY_GC_10_3_6:
+ case AMDGPU_FAMILY_GC_11_0_1:
+ if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ break;
+ default:
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ break;
+ }
+ }
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+ psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Disable ALPM first for compatible non-ALPM panel now */
+ psr_context->psr_level.bits.DISABLE_ALPM = 0;
+ psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+
+ if (psr) {
+ link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
+ link, psr_context, panel_inst);
+ link->psr_settings.psr_power_opt = 0;
+ link->psr_settings.psr_allow_active = 0;
+ } else {
+ link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ }
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_settings.psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return;
+
+ // PSR residency measurements only supported on DMCUB
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
+ psr->funcs->psr_get_residency(psr, residency, panel_inst);
+ else
+ *residency = 0;
+}
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_psr *psr = dc->res_pool->psr;
+
+ if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support)
+ return false;
+
+ psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su);
+
+ return true;
+}
+
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx.stream;
+
+ if (stream && stream->link == link) {
+ abm = pipe_ctx.stream_res.abm;
+ break;
+ }
+ }
+ return abm;
+}
+
+int edp_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = get_abm_from_stream_res(link);
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
+
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+ return panel_cntl->funcs->get_current_backlight(panel_cntl);
+ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+ return (int) abm->funcs->get_current_backlight(abm);
+ else
+ return DC_ERROR_UNEXPECTED;
+}
+
+int edp_get_target_backlight_pwm(const struct dc_link *link)
+{
+ struct abm *abm = get_abm_from_stream_res(link);
+
+ if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_target_backlight(abm);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
new file mode 100644
index 000000000000..28f552080558
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
+#define __DC_LINK_EDP_PANEL_CONTROL_H__
+#include "link.h"
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+bool set_default_brightness_aux(struct dc_link *link);
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
+int edp_get_backlight_level(const struct dc_link *link);
+bool edp_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+bool edp_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+bool edp_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+int edp_get_target_backlight_pwm(const struct dc_link *link);
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state);
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts);
+bool edp_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
+ uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+bool edp_wait_for_t12(struct dc_link *link);
+bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
+void edp_add_delay_for_T9(struct dc_link *link);
+bool edp_receiver_ready_T9(struct dc_link *link);
+bool edp_receiver_ready_T7(struct dc_link *link);
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
+#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
new file mode 100644
index 000000000000..e3d729ab5b9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements functions that manage basic HPD components such as gpio.
+ * It also provides wrapper functions to execute HPD related programming. This
+ * file only manages basic HPD functionality. It doesn't manage detection or
+ * feature or signal specific HPD behaviors.
+ */
+#include "link_hpd.h"
+#include "gpio_service_interface.h"
+
+bool link_get_hpd_state(struct dc_link *link)
+{
+ uint32_t state;
+
+ dal_gpio_lock_pin(link->hpd_gpio);
+ dal_gpio_get_value(link->hpd_gpio, &state);
+ dal_gpio_unlock_pin(link->hpd_gpio);
+
+ return state;
+}
+
+void link_enable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->enable_hpd(encoder);
+}
+
+void link_disable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->disable_hpd(encoder);
+}
+
+void link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ struct gpio *hpd;
+
+ if (enable) {
+ link->is_hpd_filter_disabled = false;
+ program_hpd_filter(link);
+ } else {
+ link->is_hpd_filter_disabled = true;
+ /* Obtain HPD handle */
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = 0;
+ config.delay_on_disconnect = 0;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+ }
+}
+
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
+{
+ enum bp_result bp_result;
+ struct graphics_object_hpd_info hpd_info;
+ struct gpio_pin_info pin_info;
+
+ if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+ return NULL;
+
+ bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+ hpd_info.hpd_int_gpio_uid, &pin_info);
+
+ if (bp_result != BP_RESULT_OK) {
+ ASSERT(bp_result == BP_RESULT_NORECORD);
+ return NULL;
+ }
+
+ return dal_gpio_service_create_irq(gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+}
+
+bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high)
+{
+ struct gpio *hpd_pin = link_get_hpd_gpio(
+ link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (!hpd_pin)
+ return false;
+
+ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+ dal_gpio_get_value(hpd_pin, is_hpd_high);
+ dal_gpio_close(hpd_pin);
+ dal_gpio_destroy_irq(&hpd_pin);
+ return true;
+}
+
+enum hpd_source_id get_hpd_line(struct dc_link *link)
+{
+ struct gpio *hpd;
+ enum hpd_source_id hpd_id;
+
+ hpd_id = HPD_SOURCEID_UNKNOWN;
+
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (hpd) {
+ switch (dal_irq_get_source(hpd)) {
+ case DC_IRQ_SOURCE_HPD1:
+ hpd_id = HPD_SOURCEID1;
+ break;
+ case DC_IRQ_SOURCE_HPD2:
+ hpd_id = HPD_SOURCEID2;
+ break;
+ case DC_IRQ_SOURCE_HPD3:
+ hpd_id = HPD_SOURCEID3;
+ break;
+ case DC_IRQ_SOURCE_HPD4:
+ hpd_id = HPD_SOURCEID4;
+ break;
+ case DC_IRQ_SOURCE_HPD5:
+ hpd_id = HPD_SOURCEID5;
+ break;
+ case DC_IRQ_SOURCE_HPD6:
+ hpd_id = HPD_SOURCEID6;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ dal_gpio_destroy_irq(&hpd);
+ }
+
+ return hpd_id;
+}
+
+bool program_hpd_filter(const struct dc_link *link)
+{
+ bool result = false;
+ struct gpio *hpd;
+ int delay_on_connect_in_ms = 0;
+ int delay_on_disconnect_in_ms = 0;
+
+ if (link->is_hpd_filter_disabled)
+ return false;
+ /* Verify feature is supported */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* Program hpd filter */
+ delay_on_connect_in_ms = 500;
+ delay_on_disconnect_in_ms = 100;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* Program hpd filter to allow DP signal to settle */
+ /* 500: not able to detect MST <-> SST switch as HPD is low for
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50: not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
+ */
+ delay_on_connect_in_ms = 80;
+ delay_on_disconnect_in_ms = 0;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ default:
+ /* Don't program hpd filter */
+ return false;
+ }
+
+ /* Obtain HPD handle */
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (!hpd)
+ return result;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = delay_on_connect_in_ms;
+ config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+
+ result = true;
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
new file mode 100644
index 000000000000..4fb526b264f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_HPD_H__
+#define __DC_LINK_HPD_H__
+#include "link.h"
+
+enum hpd_source_id get_hpd_line(struct dc_link *link);
+/*
+ * Function: program_hpd_filter
+ *
+ * @brief
+ * Programs HPD filter on associated HPD line to default values.
+ *
+ * @return
+ * true on success, false otherwise
+ */
+bool program_hpd_filter(const struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dpia_query_hpd_status(struct dc_link *link);
+bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high);
+bool link_get_hpd_state(struct dc_link *link);
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+void link_enable_hpd(const struct dc_link *link);
+void link_disable_hpd(const struct dc_link *link);
+void link_enable_hpd_filter(struct dc_link *link, bool enable);
+#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 6b88ae14f1f9..aad8095660c9 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -53,11 +53,11 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
#include "amdgpu_dm/dc_fpu.h"
#define DC_FP_START() dc_fpu_begin(__func__, __LINE__)
#define DC_FP_END() dc_fpu_end(__func__, __LINE__)
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
/*
*
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index eb5b7eb292ef..ba1715e2d25a 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -126,10 +126,22 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
+ DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_MAX
};
/**
+ * DPIA NOTIFICATION Response Type
+ */
+enum dpia_notify_bw_alloc_status {
+
+ DPIA_BW_REQ_FAILED = 0,
+ DPIA_BW_REQ_SUCCESS,
+ DPIA_EST_BW_CHANGED,
+ DPIA_BW_ALLOC_CAPS_CHANGED
+};
+
+/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
* @top: top address for region
@@ -249,6 +261,8 @@ struct dmub_srv_hw_params {
bool usb4_cm_version;
bool fw_in_system_memory;
bool dpia_hpd_int_enable_supported;
+ bool disable_clock_gate;
+ bool disallow_dispclk_dppclk_ds;
};
/**
@@ -453,6 +467,7 @@ struct dmub_srv {
* @pending_notification: Indicates there are other pending notifications
* @aux_reply: aux reply
* @hpd_status: hpd status
+ * @bw_alloc_reply: BW Allocation reply from CM/DPIA
*/
struct dmub_notification {
enum dmub_notification_type type;
@@ -463,6 +478,10 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
+ /**
+ * DPIA notification command.
+ */
+ struct dmub_rb_cmd_dpia_notification dpia_notification;
};
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 33907feefebb..598fa1de54ce 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -95,6 +95,13 @@
/* Maximum number of SubVP streams */
#define DMUB_MAX_SUBVP_STREAMS 2
+/* Define max FPO streams as 4 for now. Current implementation today
+ * only supports 1, but could be more in the future. Reduce array
+ * size to ensure the command size remains less than 64 bytes if
+ * adding new fields.
+ */
+#define DMUB_MAX_FPO_STREAMS 4
+
/* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6
@@ -162,6 +169,7 @@ extern "C" {
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
+#pragma pack(push, 1)
/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
@@ -172,6 +180,7 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+#pragma pack(pop)
/**
* Dirty rect definition.
@@ -353,7 +362,7 @@ union dmub_fw_boot_status {
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t reserved : 1;
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
-
+ uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -368,6 +377,7 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
+ DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */
};
/* Register bit definition for SCRATCH5 */
@@ -408,8 +418,8 @@ union dmub_fw_boot_options {
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
-
- uint32_t reserved : 15; /**< reserved */
+ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
+ uint32_t reserved : 14; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -457,6 +467,10 @@ enum dmub_cmd_vbios_type {
* Query DP alt status on a transmitter.
*/
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
+ /**
+ * Controls domain power gating
+ */
+ DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
};
//==============================================================================
@@ -770,6 +784,10 @@ enum dmub_out_cmd_type {
* Command type used for SET_CONFIG Reply notification
*/
DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+ /**
+ * Command type used for USB4 DPIA notification
+ */
+ DMUB_OUT_CMD__DPIA_NOTIFICATION = 5,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -779,6 +797,11 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
+/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
+enum dmub_cmd_dpia_notification_type {
+ DPIA_NOTIFY__BW_ALLOCATION = 0,
+};
+
#pragma pack(push, 1)
/**
@@ -1082,7 +1105,12 @@ enum dmub_cmd_idle_opt_type {
/**
* DCN hardware save.
*/
- DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
+ DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1,
+
+ /**
+ * DCN hardware notify idle.
+ */
+ DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2
};
/**
@@ -1093,6 +1121,24 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
};
/**
+ * struct dmub_dcn_notify_idle_cntl_data - Data passed to FW in a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
+ */
+struct dmub_dcn_notify_idle_cntl_data {
+ uint8_t driver_idle;
+ uint8_t d3_entry;
+ uint8_t trigger;
+ uint8_t pad[1];
+};
+
+/**
+ * struct dmub_rb_cmd_idle_opt_dcn_notify_idle - Data passed to FW in a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
+ */
+struct dmub_rb_cmd_idle_opt_dcn_notify_idle {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_dcn_notify_idle_cntl_data cntl_data;
+};
+
+/**
* struct dmub_clocks - Clock update notification.
*/
struct dmub_clocks {
@@ -1205,6 +1251,23 @@ struct dmub_rb_cmd_dig1_transmitter_control {
};
/**
+ * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control
+ */
+struct dmub_rb_cmd_domain_control_data {
+ uint8_t inst : 6; /**< DOMAIN instance to control */
+ uint8_t power_gate : 1; /**< 1=power gate, 0=power up */
+ uint8_t reserved[3]; /**< Reserved for future use */
+};
+
+/**
+ * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating
+ */
+struct dmub_rb_cmd_domain_control {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_domain_control_data data; /**< payload */
+};
+
+/**
* DPIA tunnel command parameters.
*/
struct dmub_cmd_dig_dpia_control_data {
@@ -1558,6 +1621,79 @@ struct dmub_rb_cmd_dp_set_config_reply {
};
/**
+ * Definition of a DPIA notification header
+ */
+struct dpia_notification_header {
+ uint8_t instance; /**< DPIA Instance */
+ uint8_t reserved[3];
+ enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */
+};
+
+/**
+ * Definition of the common data struct of DPIA notification
+ */
+struct dpia_notification_common {
+ uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)
+ - sizeof(struct dpia_notification_header)];
+};
+
+/**
+ * Definition of a DPIA notification data
+ */
+struct dpia_bw_allocation_notify_data {
+ union {
+ struct {
+ uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */
+ uint16_t bw_request_failed: 1; /**< BW_Request_Failed */
+ uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */
+ uint16_t est_bw_changed: 1; /**< Estimated_BW changed */
+ uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */
+ uint16_t reserved: 11; /**< Reserved */
+ } bits;
+
+ uint16_t flags;
+ };
+
+ uint8_t cm_id; /**< CM ID */
+ uint8_t group_id; /**< Group ID */
+ uint8_t granularity; /**< BW Allocation Granularity */
+ uint8_t estimated_bw; /**< Estimated_BW */
+ uint8_t allocated_bw; /**< Allocated_BW */
+ uint8_t reserved;
+};
+
+/**
+ * union dpia_notify_data_type - DPIA Notification in Outbox command
+ */
+union dpia_notification_data {
+ /**
+ * DPIA Notification for common data struct
+ */
+ struct dpia_notification_common common_data;
+
+ /**
+ * DPIA Notification for DP BW Allocation support
+ */
+ struct dpia_bw_allocation_notify_data dpia_bw_alloc;
+};
+
+/**
+ * Definition of a DPIA notification payload
+ */
+struct dpia_notification_payload {
+ struct dpia_notification_header header;
+ union dpia_notification_data data; /**< DPIA notification payload data */
+};
+
+/**
+ * Definition of a DMUB_OUT_CMD__DPIA_NOTIFICATION command.
+ */
+struct dmub_rb_cmd_dpia_notification {
+ struct dmub_cmd_header header; /**< DPIA notification header */
+ struct dpia_notification_payload payload; /**< DPIA notification payload */
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_cmd_hpd_state_query_data {
@@ -1866,7 +2002,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -1886,6 +2022,14 @@ struct dmub_cmd_psr_copy_settings_data {
* Explicit padding to 2 byte boundary.
*/
uint8_t pad3;
+ /**
+ * DSC Slice height.
+ */
+ uint16_t dsc_slice_height;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint16_t pad;
};
/**
@@ -1916,7 +2060,7 @@ struct dmub_cmd_psr_set_level_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -1943,7 +2087,7 @@ struct dmub_rb_cmd_psr_enable_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -1987,7 +2131,7 @@ struct dmub_cmd_psr_set_version_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2018,7 +2162,7 @@ struct dmub_cmd_psr_force_static_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2093,7 +2237,7 @@ struct dmub_cmd_update_dirty_rect_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2231,7 +2375,7 @@ struct dmub_cmd_update_cursor_payload0 {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2278,7 +2422,7 @@ struct dmub_cmd_psr_set_vtotal_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2316,7 +2460,7 @@ struct dmub_cmd_psr_set_power_opt_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2971,14 +3115,15 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data {
uint8_t max_ramp_step;
uint8_t pipes;
uint8_t min_refresh_in_hz;
- uint8_t padding[1];
+ uint8_t pipe_count;
+ uint8_t pipe_index[4];
};
struct dmub_cmd_fw_assisted_mclk_switch_config {
uint8_t fams_enabled;
uint8_t visual_confirm_enabled;
- uint8_t padding[2];
- struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_STREAMS];
+ uint16_t vactive_stretch_margin_us; // Extra vblank stretch required when doing FPO + Vactive
+ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_FPO_STREAMS];
};
struct dmub_rb_cmd_fw_assisted_mclk_switch {
@@ -3029,7 +3174,8 @@ struct dmub_rb_cmd_panel_cntl {
*/
struct dmub_cmd_lvtma_control_data {
uint8_t uc_pwr_action; /**< LVTMA_ACTION */
- uint8_t reserved_0[3]; /**< For future use */
+ uint8_t bypass_panel_control_wait;
+ uint8_t reserved_0[2]; /**< For future use */
uint8_t panel_inst; /**< LVTMA control instance */
uint8_t reserved_1[3]; /**< For future use */
};
@@ -3232,6 +3378,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
/**
+ * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command.
+ */
+ struct dmub_rb_cmd_domain_control domain_control;
+ /**
* Definition of a DMUB_CMD__PSR_SET_VERSION command.
*/
struct dmub_rb_cmd_psr_set_version psr_set_version;
@@ -3422,6 +3572,10 @@ union dmub_rb_out_cmd {
* SET_CONFIG reply command.
*/
struct dmub_rb_cmd_dp_set_config_reply set_config_reply;
+ /**
+ * DPIA notification command.
+ */
+ struct dmub_rb_cmd_dpia_notification dpia_notification;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index a76da0131add..9c20516be066 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -130,12 +130,13 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
+ REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn32_reset_release(struct dmub_srv *dmub)
{
- REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 4a122925c3ae..92c18bfb98b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* reset the cache of the last wptr as well now that hw is reset */
+ dmub->inbox1_last_wptr = 0;
+
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* mailboxes have been reset in hw, so reset the sw state as well */
+ dmub->inbox1_last_wptr = 0;
+ dmub->inbox1_rb.wrpt = 0;
+ dmub->inbox1_rb.rptr = 0;
+ dmub->outbox0_rb.wrpt = 0;
+ dmub->outbox0_rb.rptr = 0;
+ dmub->outbox1_rb.wrpt = 0;
+ dmub->outbox1_rb.rptr = 0;
+
dmub->hw_init = false;
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 44502ec919a2..74189102eaec 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -92,6 +92,27 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
+ case DMUB_OUT_CMD__DPIA_NOTIFICATION:
+ notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
+ notify->link_index = cmd.dpia_notification.payload.header.instance;
+
+ if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
+
+ notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
+ cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
+ notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
+ cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
+
+ if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
+ notify->result = DPIA_BW_REQ_FAILED;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
+ notify->result = DPIA_BW_REQ_SUCCESS;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
+ notify->result = DPIA_EST_BW_CHANGED;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
+ notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
+ }
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index c3089c673975..e317089cf6ee 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -246,6 +246,7 @@ enum {
#define AMDGPU_FAMILY_GC_11_0_0 145
#define AMDGPU_FAMILY_GC_11_0_1 148
+#define AMDGPU_FAMILY_GC_11_5_0 150
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
#define GC_11_0_3_A0 0x20
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index a7ba5bd8dc16..f843fc497855 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
#define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8
+#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD
#define DP_BRANCH_HW_REV_10 0x10
#define DP_BRANCH_HW_REV_20 0x20
@@ -128,12 +129,4 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
-static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
-static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
-
-static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
-
-/*MST Dock*/
-static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
-
#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index b2df07f9e91c..c062a44db078 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -88,7 +88,10 @@ enum dpcd_phy_test_patterns {
PHY_TEST_PATTERN_PRBS23 = 0x30,
PHY_TEST_PATTERN_PRBS31 = 0x38,
PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40,
- PHY_TEST_PATTERN_SQUARE_PULSE = 0x48,
+ PHY_TEST_PATTERN_SQUARE = 0x48,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49,
+ PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B,
};
enum dpcd_test_dyn_range {
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
index 42229b4effdc..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
deleted file mode 100644
index 418fbf8c5c3a..000000000000
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DAL_I2CAUX_INTERFACE_H__
-#define __DAL_I2CAUX_INTERFACE_H__
-
-#include "dc_types.h"
-#include "gpio_service_interface.h"
-
-
-#define DEFAULT_AUX_MAX_DATA_SIZE 16
-#define AUX_MAX_DEFER_WRITE_RETRY 20
-
-struct aux_payload {
- /* set following flag to read/write I2C data,
- * reset it to read/write DPCD data */
- bool i2c_over_aux;
- /* set following flag to write data,
- * reset it to read data */
- bool write;
- bool mot;
- bool write_status_update;
-
- uint32_t address;
- uint32_t length;
- uint8_t *data;
- /*
- * used to return the reply type of the transaction
- * ignored if NULL
- */
- uint8_t *reply;
- /* expressed in milliseconds
- * zero means "use default value"
- */
- uint32_t defer_delay;
-
-};
-
-struct aux_command {
- struct aux_payload *payloads;
- uint8_t number_of_payloads;
-
- /* expressed in milliseconds
- * zero means "use default value" */
- uint32_t defer_delay;
-
- /* zero means "use default value" */
- uint32_t max_defer_write_retry;
-
- enum i2c_mot_mode mot;
-};
-
-union aux_config {
- struct {
- uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
- } bits;
- uint32_t raw;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index d1e91d31d151..cd870af5fd25 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -34,10 +34,6 @@
struct ddc;
struct irq_manager;
-enum {
- MAX_CONTROLLER_NUM = 6
-};
-
enum dp_power_state {
DP_POWER_STATE_D0 = 1,
DP_POWER_STATE_D3
@@ -60,28 +56,6 @@ enum {
DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
};
-enum link_training_result {
- LINK_TRAINING_SUCCESS,
- LINK_TRAINING_CR_FAIL_LANE0,
- LINK_TRAINING_CR_FAIL_LANE1,
- LINK_TRAINING_CR_FAIL_LANE23,
- /* CR DONE bit is cleared during EQ step */
- LINK_TRAINING_EQ_FAIL_CR,
- /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
- LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
- /* other failure during EQ step */
- LINK_TRAINING_EQ_FAIL_EQ,
- LINK_TRAINING_LQA_FAIL,
- /* one of the CR,EQ or symbol lock is dropped */
- LINK_TRAINING_LINK_LOSS,
- /* Abort link training (because sink unplugged) */
- LINK_TRAINING_ABORT,
- DP_128b_132b_LT_FAILED,
- DP_128b_132b_MAX_LOOP_COUNT_REACHED,
- DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
- DP_128b_132b_CDS_DONE_TIMEOUT,
-};
-
enum lttpr_mode {
LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
@@ -165,7 +139,12 @@ enum dp_test_pattern {
DP_TEST_PATTERN_PRBS23,
DP_TEST_PATTERN_PRBS31,
DP_TEST_PATTERN_264BIT_CUSTOM,
- DP_TEST_PATTERN_SQUARE_PULSE,
+ DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE = DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED,
+ DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_END = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
/* Link Training Patterns */
DP_TEST_PATTERN_TRAINING_PATTERN1,
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index beed70179bb5..23a308c3eccb 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -104,6 +104,7 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ signal == SIGNAL_TYPE_VIRTUAL ||
dc_is_hdmi_signal(signal));
}
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index f6034213c700..67a062af3ab0 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1715,8 +1715,8 @@ static bool map_regamma_hw_to_x_user(
const struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts,
- bool mapUserRamp,
- bool doClamping)
+ bool map_user_ramp,
+ bool do_clamping)
{
/* setup to spare calculated ideal regamma values */
@@ -1724,7 +1724,7 @@ static bool map_regamma_hw_to_x_user(
struct hw_x_point *coords = coords_x;
const struct pwl_float_data_ex *regamma = rgb_regamma;
- if (ramp && mapUserRamp) {
+ if (ramp && map_user_ramp) {
copy_rgb_regamma_to_coordinates_x(coords,
hw_points_num,
rgb_regamma);
@@ -1744,7 +1744,7 @@ static bool map_regamma_hw_to_x_user(
}
}
- if (doClamping) {
+ if (do_clamping) {
/* this should be named differently, all it does is clamp to 0-1 */
build_new_custom_resulted_curve(hw_points_num, tf_pts);
}
@@ -1875,7 +1875,7 @@ rgb_user_alloc_fail:
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *input_tf,
- const struct dc_gamma *ramp, bool mapUserRamp)
+ const struct dc_gamma *ramp, bool map_user_ramp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
struct dividers dividers;
@@ -1883,7 +1883,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct pwl_float_data_ex *curve = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ enum dc_transfer_func_predefined tf;
uint32_t i;
bool ret = false;
@@ -1891,12 +1891,12 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
return false;
/* we can use hardcoded curve for plain SRGB TF
- * If linear, it's bypass if on user ramp
+ * If linear, it's bypass if no user ramp
*/
if (input_tf->type == TF_TYPE_PREDEFINED) {
if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
- !mapUserRamp)
+ !map_user_ramp)
return true;
if (dc_caps != NULL &&
@@ -1919,7 +1919,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) {
+ if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
@@ -2007,7 +2007,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp && ramp && ramp->type == GAMMA_RGB_256,
+ map_user_ramp && ramp && ramp->type == GAMMA_RGB_256,
true);
}
@@ -2112,9 +2112,11 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
}
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct hdr_tm_params *fs_params,
- struct calculate_buffer *cal_buffer)
+ const struct dc_gamma *ramp,
+ bool map_user_ramp,
+ bool can_rom_be_used,
+ const struct hdr_tm_params *fs_params,
+ struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -2123,27 +2125,27 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
struct pwl_float_data_ex *rgb_regamma = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
- bool doClamping = true;
+ enum dc_transfer_func_predefined tf;
+ bool do_clamping = true;
bool ret = false;
if (output_tf->type == TF_TYPE_BYPASS)
return false;
/* we can use hardcoded curve for plain SRGB TF */
- if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+ if (output_tf->type == TF_TYPE_PREDEFINED && can_rom_be_used == true &&
output_tf->tf == TRANSFER_FUNCTION_SRGB) {
if (ramp == NULL)
return true;
if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ (!map_user_ramp && ramp->type == GAMMA_RGB_256))
return true;
}
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
- (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ (map_user_ramp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
@@ -2164,7 +2166,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
ramp->num_entries,
dividers);
- if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ if (ramp->type == GAMMA_RGB_256 && map_user_ramp)
scale_gamma(rgb_user, ramp, dividers);
else if (ramp->type == GAMMA_RGB_FLOAT_1024)
scale_gamma_dx(rgb_user, ramp, dividers);
@@ -2191,15 +2193,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
cal_buffer);
if (ret) {
- doClamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
- fs_params != NULL && fs_params->skip_tm == 0);
+ do_clamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL && fs_params->skip_tm == 0);
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, rgb_regamma,
- MAX_HW_POINTS, tf_pts,
- (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
- (ramp && ramp->type != GAMMA_CS_TFM_1D),
- doClamping);
+ coordinates_x, axis_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+ (map_user_ramp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+ (ramp && ramp->type != GAMMA_CS_TFM_1D),
+ do_clamping);
if (ramp && ramp->type == GAMMA_CS_TFM_1D)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
@@ -2215,89 +2217,3 @@ axis_x_alloc_fail:
rgb_user_alloc_fail:
return ret;
}
-
-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points)
-{
- uint32_t i;
- bool ret = false;
- struct pwl_float_data_ex *rgb_degamma = NULL;
-
- if (trans == TRANSFER_FUNCTION_UNITY ||
- trans == TRANSFER_FUNCTION_LINEAR) {
-
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = coordinates_x[i].x;
- points->green[i] = coordinates_x[i].x;
- points->blue[i] = coordinates_x[i].x;
- }
- ret = true;
- } else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
-
- build_de_pq(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
-
- kvfree(rgb_degamma);
- } else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709 ||
- trans == TRANSFER_FUNCTION_GAMMA22 ||
- trans == TRANSFER_FUNCTION_GAMMA24 ||
- trans == TRANSFER_FUNCTION_GAMMA26) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
- build_degamma(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x,
- trans);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
-
- kvfree(rgb_degamma);
- } else if (trans == TRANSFER_FUNCTION_HLG) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
- build_hlg_degamma(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x,
- 80, 1000);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
- kvfree(rgb_degamma);
- }
- points->end_exponent = 0;
- points->x_point_at_y1_red = 1;
- points->x_point_at_y1_green = 1;
- points->x_point_at_y1_blue = 1;
-
-rgb_degamma_alloc_fail:
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 2893abf48208..ee5c466613de 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -115,9 +115,6 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points);
-
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma,
struct calculate_buffer *cal_buffer,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index c2e00f7b8381..5c41a4751db4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -616,7 +616,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
unsigned int min_refresh;
unsigned int max_refresh;
@@ -649,9 +650,15 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x02;
/* PB6 = [Bit 2 = FreeSync Active] */
- if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ if (freesync_on_desktop) {
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x04;
+ } else {
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
vrr->state == VRR_STATE_ACTIVE_FIXED)
- infopacket->sb[6] |= 0x04;
+ infopacket->sb[6] |= 0x04;
+ }
min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000;
max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000;
@@ -898,52 +905,20 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
infopacket->valid = true;
}
-#ifndef TRIM_FSFT
-static void build_vrr_infopacket_fast_transport_data(
- bool ftActive,
- unsigned int ftOutputRate,
- struct dc_info_packet *infopacket)
-{
- /* PB9 : bit7 - fast transport Active*/
- unsigned char activeBit = (ftActive) ? 1 << 7 : 0;
-
- infopacket->sb[1] &= ~activeBit; //clear bit
- infopacket->sb[1] |= activeBit; //set bit
-
- /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */
- infopacket->sb[13] = ftOutputRate & 0xFF;
-
- /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */
- infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF;
-
- /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */
- infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF;
-
-}
-#endif
static void build_vrr_infopacket_v3(enum signal_type signal,
const struct mod_vrr_params *vrr,
-#ifndef TRIM_FSFT
- bool ftActive, unsigned int ftOutputRate,
-#endif
enum color_transfer_func app_tf,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
unsigned int payload_size = 0;
build_vrr_infopacket_header_v3(signal, infopacket, &payload_size);
- build_vrr_infopacket_data_v3(vrr, infopacket);
+ build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
-#ifndef TRIM_FSFT
- build_vrr_infopacket_fast_transport_data(
- ftActive,
- ftOutputRate,
- infopacket);
-#endif
-
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -985,18 +960,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
switch (packet_type) {
case PACKET_TYPE_FS_V3:
-#ifndef TRIM_FSFT
- // always populate with pixel rate.
- build_vrr_infopacket_v3(
- stream->signal, vrr,
- stream->timing.flags.FAST_TRANSPORT,
- (stream->timing.flags.FAST_TRANSPORT) ?
- stream->timing.fast_transport_output_rate_100hz :
- stream->timing.pix_clk_100hz,
- app_tf, infopacket);
-#else
- build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
-#endif
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
@@ -1165,7 +1129,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
{
struct core_freesync *core_freesync = NULL;
unsigned int last_render_time_in_us = 0;
- unsigned int average_render_time_in_us = 0;
if (mod_freesync == NULL)
return;
@@ -1174,7 +1137,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
if (in_out_vrr->supported &&
in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
- unsigned int i = 0;
unsigned int oldest_index = plane->time.index + 1;
if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX)
@@ -1183,18 +1145,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
last_render_time_in_us = curr_time_stamp_in_us -
plane->time.prev_update_time_in_us;
- /* Sum off all entries except oldest one */
- for (i = 0; i < DC_PLANE_UPDATE_TIMES_MAX; i++) {
- average_render_time_in_us +=
- plane->time.time_elapsed_in_us[i];
- }
- average_render_time_in_us -=
- plane->time.time_elapsed_in_us[oldest_index];
-
- /* Add render time for current flip */
- average_render_time_in_us += last_render_time_in_us;
- average_render_time_in_us /= DC_PLANE_UPDATE_TIMES_MAX;
-
if (in_out_vrr->btr.btr_enabled) {
apply_below_the_range(core_freesync,
stream,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index eb6f9b9c504a..c62df3bcc7cb 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -26,13 +26,11 @@
#ifndef MOD_HDCP_LOG_H_
#define MOD_HDCP_LOG_H_
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
-#endif
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 3348bb97ef81..a4d344a4db9e 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t dp2_enabled;
uint8_t usb4_enabled;
};
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index edf5845f6a1f..66dc9a19aebe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -41,4 +41,40 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+enum adaptive_sync_type {
+ ADAPTIVE_SYNC_TYPE_NONE = 0,
+ ADAPTIVE_SYNC_TYPE_DP = 1,
+ FREESYNC_TYPE_PCON_IN_WHITELIST = 2,
+ FREESYNC_TYPE_PCON_NOT_IN_WHITELIST = 3,
+ ADAPTIVE_SYNC_TYPE_EDP = 4,
+};
+
+enum adaptive_sync_sdp_version {
+ AS_SDP_VER_0 = 0x0,
+ AS_SDP_VER_1 = 0x1,
+ AS_SDP_VER_2 = 0x2,
+};
+
+#define AS_DP_SDP_LENGTH (9)
+
+struct frame_duration_op {
+ bool support;
+ unsigned char frame_duration_hex;
+};
+
+struct AS_Df_params {
+ bool supportMode;
+ struct frame_duration_op increase;
+ struct frame_duration_op decrease;
+};
+
+void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
+ enum adaptive_sync_type asType, const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet);
+
+void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream,
+ const struct AS_Df_params *param, struct dc_info_packet *info_packet);
+
+void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 69691058ab89..ec64f19e1786 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -519,3 +519,58 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
}
+void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
+ enum adaptive_sync_type asType,
+ const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet)
+{
+ info_packet->valid = false;
+
+ memset(info_packet, 0, sizeof(struct dc_info_packet));
+
+ switch (asType) {
+ case ADAPTIVE_SYNC_TYPE_DP:
+ if (stream != NULL)
+ mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
+ break;
+ case FREESYNC_TYPE_PCON_IN_WHITELIST:
+ mod_build_adaptive_sync_infopacket_v1(info_packet);
+ break;
+ case ADAPTIVE_SYNC_TYPE_NONE:
+ case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST:
+ default:
+ break;
+ }
+}
+
+void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet)
+{
+ info_packet->valid = true;
+ // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x22;
+ info_packet->hb2 = AS_SDP_VER_1;
+ info_packet->hb3 = 0x00;
+}
+
+void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream,
+ const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet)
+{
+ info_packet->valid = true;
+ // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x22;
+ info_packet->hb2 = AS_SDP_VER_2;
+ info_packet->hb3 = AS_DP_SDP_LENGTH;
+
+ //Payload
+ info_packet->sb[0] = param->supportMode; //1: AVT; 0: FAVT
+ info_packet->sb[1] = (stream->timing.v_total & 0x00FF);
+ info_packet->sb[2] = (stream->timing.v_total & 0xFF00) >> 8;
+ //info_packet->sb[3] = 0x00; Target RR, not use fot AVT
+ info_packet->sb[4] = (param->increase.support << 6 | param->decrease.support << 7);
+ info_packet->sb[5] = param->increase.frame_duration_hex;
+ info_packet->sb[6] = param->decrease.frame_duration_hex;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 9b5d9b2c9a6a..51e76bce92ea 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -678,13 +678,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
bool result = false;
uint32_t i, j = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
return false;
-#else
- if (res_pool->abm == NULL)
- return false;
-#endif
memset(&ram_table, 0, sizeof(ram_table));
memset(&config, 0, sizeof(config));
@@ -737,12 +732,10 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.min_abm_backlight = ram_table.min_abm_backlight;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->multiple_abms[inst]) {
result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
} else
-#endif
result = res_pool->abm->funcs->init_abm_config(
res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
@@ -765,8 +758,8 @@ bool dmcu_load_iram(struct dmcu *dmcu,
if (dmcu->dmcu_version.abm_version == 0x24) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
- result = dmcu->funcs->load_iram(
- dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ result = dmcu->funcs->load_iram(dmcu, 0, (char *)(&ram_table),
+ IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x23) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
@@ -916,3 +909,38 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
}
+
+bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config)
+{
+ uint16_t pic_height;
+ uint16_t slice_height;
+
+ config->dsc_slice_height = 0;
+ if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
+ (!dc->caps.edp_dsc_support ||
+ link->panel_config.dsc.disable_dsc_edp ||
+ !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ !stream->timing.dsc_cfg.num_slices_v))
+ return true;
+
+ pic_height = stream->timing.v_addressable +
+ stream->timing.v_border_top + stream->timing.v_border_bottom;
+
+ if (stream->timing.dsc_cfg.num_slices_v == 0)
+ return false;
+
+ slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
+ config->dsc_slice_height = slice_height;
+
+ if (slice_height) {
+ if (config->su_y_granularity &&
+ (slice_height % config->su_y_granularity)) {
+ ASSERT(0);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 316452e9dbc9..1d3079e56799 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
const struct dc_stream_state *stream);
bool mod_power_only_edp(const struct dc_state *context,
const struct dc_stream_state *stream);
+bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f175e65b853a..e4a22c68517d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_offset.h
new file mode 100644
index 000000000000..ff74a661f477
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_offset.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_1_8_0_OFFSET_HEADER
+#define _athub_1_8_0_OFFSET_HEADER
+
+
+
+// addressBlock: aid_athub_atsdec
+// base address: 0x3080
+#define regATC_ATS_CNTL 0x0000
+#define regATC_ATS_CNTL_BASE_IDX 0
+#define regATC_ATS_CNTL2 0x0001
+#define regATC_ATS_CNTL2_BASE_IDX 0
+#define regATC_ATS_CNTL3 0x0002
+#define regATC_ATS_CNTL3_BASE_IDX 0
+#define regATC_ATS_CNTL4 0x0003
+#define regATC_ATS_CNTL4_BASE_IDX 0
+#define regATC_ATS_MISC_CNTL 0x0005
+#define regATC_ATS_MISC_CNTL_BASE_IDX 0
+#define regATC_ATS_STATUS 0x0009
+#define regATC_ATS_STATUS_BASE_IDX 0
+#define regATC_PERFCOUNTER0_CFG 0x000a
+#define regATC_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regATC_PERFCOUNTER1_CFG 0x000b
+#define regATC_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regATC_PERFCOUNTER2_CFG 0x000c
+#define regATC_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regATC_PERFCOUNTER3_CFG 0x000d
+#define regATC_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regATC_PERFCOUNTER_RSLT_CNTL 0x000e
+#define regATC_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regATC_PERFCOUNTER_LO 0x000f
+#define regATC_PERFCOUNTER_LO_BASE_IDX 0
+#define regATC_PERFCOUNTER_HI 0x0010
+#define regATC_PERFCOUNTER_HI_BASE_IDX 0
+#define regATC_ATS_FAULT_CNTL 0x0012
+#define regATC_ATS_FAULT_CNTL_BASE_IDX 0
+#define regATC_ATS_FAULT_STATUS_INFO 0x0013
+#define regATC_ATS_FAULT_STATUS_INFO_BASE_IDX 0
+#define regATC_ATS_FAULT_STATUS_INFO2 0x0014
+#define regATC_ATS_FAULT_STATUS_INFO2_BASE_IDX 0
+#define regATC_ATS_FAULT_STATUS_INFO3 0x0015
+#define regATC_ATS_FAULT_STATUS_INFO3_BASE_IDX 0
+#define regATC_ATS_FAULT_STATUS_INFO4 0x0016
+#define regATC_ATS_FAULT_STATUS_INFO4_BASE_IDX 0
+#define regATC_ATS_FAULT_STATUS_ADDR 0x0017
+#define regATC_ATS_FAULT_STATUS_ADDR_BASE_IDX 0
+#define regATC_ATS_DEFAULT_PAGE_LOW 0x0018
+#define regATC_ATS_DEFAULT_PAGE_LOW_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL 0x001f
+#define regATHUB_PCIE_ATS_CNTL_BASE_IDX 0
+#define regATHUB_PCIE_PASID_CNTL 0x0020
+#define regATHUB_PCIE_PASID_CNTL_BASE_IDX 0
+#define regATHUB_PCIE_PAGE_REQ_CNTL 0x0021
+#define regATHUB_PCIE_PAGE_REQ_CNTL_BASE_IDX 0
+#define regATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x0022
+#define regATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC_BASE_IDX 0
+#define regATHUB_COMMAND 0x0023
+#define regATHUB_COMMAND_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_0 0x0024
+#define regATHUB_PCIE_ATS_CNTL_VF_0_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_1 0x0025
+#define regATHUB_PCIE_ATS_CNTL_VF_1_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_2 0x0026
+#define regATHUB_PCIE_ATS_CNTL_VF_2_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_3 0x0027
+#define regATHUB_PCIE_ATS_CNTL_VF_3_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_4 0x0028
+#define regATHUB_PCIE_ATS_CNTL_VF_4_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_5 0x0029
+#define regATHUB_PCIE_ATS_CNTL_VF_5_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_6 0x002a
+#define regATHUB_PCIE_ATS_CNTL_VF_6_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_7 0x002b
+#define regATHUB_PCIE_ATS_CNTL_VF_7_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_8 0x002c
+#define regATHUB_PCIE_ATS_CNTL_VF_8_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_9 0x002d
+#define regATHUB_PCIE_ATS_CNTL_VF_9_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_10 0x002e
+#define regATHUB_PCIE_ATS_CNTL_VF_10_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_11 0x002f
+#define regATHUB_PCIE_ATS_CNTL_VF_11_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_12 0x0030
+#define regATHUB_PCIE_ATS_CNTL_VF_12_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_13 0x0031
+#define regATHUB_PCIE_ATS_CNTL_VF_13_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_14 0x0032
+#define regATHUB_PCIE_ATS_CNTL_VF_14_BASE_IDX 0
+#define regATHUB_PCIE_ATS_CNTL_VF_15 0x0033
+#define regATHUB_PCIE_ATS_CNTL_VF_15_BASE_IDX 0
+#define regATHUB_SHARED_VIRT_RESET_REQ 0x0034
+#define regATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regATHUB_SHARED_ACTIVE_FCN_ID 0x0036
+#define regATHUB_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define regATC_ATS_SDPPORT_CNTL 0x0038
+#define regATC_ATS_SDPPORT_CNTL_BASE_IDX 0
+#define regATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x003e
+#define regATC_VMID_PASID_MAPPING_UPDATE_STATUS_BASE_IDX 0
+#define regATC_VMID0_PASID_MAPPING 0x003f
+#define regATC_VMID0_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID1_PASID_MAPPING 0x0040
+#define regATC_VMID1_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID2_PASID_MAPPING 0x0041
+#define regATC_VMID2_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID3_PASID_MAPPING 0x0042
+#define regATC_VMID3_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID4_PASID_MAPPING 0x0043
+#define regATC_VMID4_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID5_PASID_MAPPING 0x0044
+#define regATC_VMID5_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID6_PASID_MAPPING 0x0045
+#define regATC_VMID6_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID7_PASID_MAPPING 0x0046
+#define regATC_VMID7_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID8_PASID_MAPPING 0x0047
+#define regATC_VMID8_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID9_PASID_MAPPING 0x0048
+#define regATC_VMID9_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID10_PASID_MAPPING 0x0049
+#define regATC_VMID10_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID11_PASID_MAPPING 0x004a
+#define regATC_VMID11_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID12_PASID_MAPPING 0x004b
+#define regATC_VMID12_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID13_PASID_MAPPING 0x004c
+#define regATC_VMID13_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID14_PASID_MAPPING 0x004d
+#define regATC_VMID14_PASID_MAPPING_BASE_IDX 0
+#define regATC_VMID15_PASID_MAPPING 0x004e
+#define regATC_VMID15_PASID_MAPPING_BASE_IDX 0
+#define regATC_TRANS_FAULT_RSPCNTRL 0x0050
+#define regATC_TRANS_FAULT_RSPCNTRL_BASE_IDX 0
+#define regATC_ATS_VMID_STATUS 0x0051
+#define regATC_ATS_VMID_STATUS_BASE_IDX 0
+#define regATHUB_MISC_CNTL 0x0055
+#define regATHUB_MISC_CNTL_BASE_IDX 0
+#define regATHUB_MEM_POWER_LS 0x0056
+#define regATHUB_MEM_POWER_LS_BASE_IDX 0
+#define regATHUB_IH_CREDIT 0x0057
+#define regATHUB_IH_CREDIT_BASE_IDX 0
+
+
+// addressBlock: aid_athub_xpbdec
+// base address: 0x46000
+#define regXPB_RTR_SRC_APRTR0 0x0000
+#define regXPB_RTR_SRC_APRTR0_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR1 0x0001
+#define regXPB_RTR_SRC_APRTR1_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR2 0x0002
+#define regXPB_RTR_SRC_APRTR2_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR3 0x0003
+#define regXPB_RTR_SRC_APRTR3_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR4 0x0004
+#define regXPB_RTR_SRC_APRTR4_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR5 0x0005
+#define regXPB_RTR_SRC_APRTR5_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR6 0x0006
+#define regXPB_RTR_SRC_APRTR6_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR7 0x0007
+#define regXPB_RTR_SRC_APRTR7_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR8 0x0008
+#define regXPB_RTR_SRC_APRTR8_BASE_IDX 1
+#define regXPB_RTR_SRC_APRTR9 0x0009
+#define regXPB_RTR_SRC_APRTR9_BASE_IDX 1
+#define regXPB_XDMA_RTR_SRC_APRTR0 0x000a
+#define regXPB_XDMA_RTR_SRC_APRTR0_BASE_IDX 1
+#define regXPB_XDMA_RTR_SRC_APRTR1 0x000b
+#define regXPB_XDMA_RTR_SRC_APRTR1_BASE_IDX 1
+#define regXPB_XDMA_RTR_SRC_APRTR2 0x000c
+#define regXPB_XDMA_RTR_SRC_APRTR2_BASE_IDX 1
+#define regXPB_XDMA_RTR_SRC_APRTR3 0x000d
+#define regXPB_XDMA_RTR_SRC_APRTR3_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP0 0x000e
+#define regXPB_RTR_DEST_MAP0_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP1 0x000f
+#define regXPB_RTR_DEST_MAP1_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP2 0x0010
+#define regXPB_RTR_DEST_MAP2_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP3 0x0011
+#define regXPB_RTR_DEST_MAP3_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP4 0x0012
+#define regXPB_RTR_DEST_MAP4_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP5 0x0013
+#define regXPB_RTR_DEST_MAP5_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP6 0x0014
+#define regXPB_RTR_DEST_MAP6_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP7 0x0015
+#define regXPB_RTR_DEST_MAP7_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP8 0x0016
+#define regXPB_RTR_DEST_MAP8_BASE_IDX 1
+#define regXPB_RTR_DEST_MAP9 0x0017
+#define regXPB_RTR_DEST_MAP9_BASE_IDX 1
+#define regXPB_XDMA_RTR_DEST_MAP0 0x0018
+#define regXPB_XDMA_RTR_DEST_MAP0_BASE_IDX 1
+#define regXPB_XDMA_RTR_DEST_MAP1 0x0019
+#define regXPB_XDMA_RTR_DEST_MAP1_BASE_IDX 1
+#define regXPB_XDMA_RTR_DEST_MAP2 0x001a
+#define regXPB_XDMA_RTR_DEST_MAP2_BASE_IDX 1
+#define regXPB_XDMA_RTR_DEST_MAP3 0x001b
+#define regXPB_XDMA_RTR_DEST_MAP3_BASE_IDX 1
+#define regXPB_CLG_CFG0 0x001c
+#define regXPB_CLG_CFG0_BASE_IDX 1
+#define regXPB_CLG_CFG1 0x001d
+#define regXPB_CLG_CFG1_BASE_IDX 1
+#define regXPB_CLG_CFG2 0x001e
+#define regXPB_CLG_CFG2_BASE_IDX 1
+#define regXPB_CLG_CFG3 0x001f
+#define regXPB_CLG_CFG3_BASE_IDX 1
+#define regXPB_CLG_CFG4 0x0020
+#define regXPB_CLG_CFG4_BASE_IDX 1
+#define regXPB_CLG_CFG5 0x0021
+#define regXPB_CLG_CFG5_BASE_IDX 1
+#define regXPB_CLG_CFG6 0x0022
+#define regXPB_CLG_CFG6_BASE_IDX 1
+#define regXPB_CLG_CFG7 0x0023
+#define regXPB_CLG_CFG7_BASE_IDX 1
+#define regXPB_CLG_EXTRA0 0x0024
+#define regXPB_CLG_EXTRA0_BASE_IDX 1
+#define regXPB_CLG_EXTRA1 0x0025
+#define regXPB_CLG_EXTRA1_BASE_IDX 1
+#define regXPB_CLG_EXTRA_MSK 0x0026
+#define regXPB_CLG_EXTRA_MSK_BASE_IDX 1
+#define regXPB_LB_ADDR 0x0027
+#define regXPB_LB_ADDR_BASE_IDX 1
+#define regXPB_WCB_STS 0x0028
+#define regXPB_WCB_STS_BASE_IDX 1
+#define regXPB_HST_CFG 0x0029
+#define regXPB_HST_CFG_BASE_IDX 1
+#define regXPB_P2P_BAR_CFG 0x002a
+#define regXPB_P2P_BAR_CFG_BASE_IDX 1
+#define regXPB_P2P_BAR0 0x002b
+#define regXPB_P2P_BAR0_BASE_IDX 1
+#define regXPB_P2P_BAR1 0x002c
+#define regXPB_P2P_BAR1_BASE_IDX 1
+#define regXPB_P2P_BAR2 0x002d
+#define regXPB_P2P_BAR2_BASE_IDX 1
+#define regXPB_P2P_BAR3 0x002e
+#define regXPB_P2P_BAR3_BASE_IDX 1
+#define regXPB_P2P_BAR4 0x002f
+#define regXPB_P2P_BAR4_BASE_IDX 1
+#define regXPB_P2P_BAR5 0x0030
+#define regXPB_P2P_BAR5_BASE_IDX 1
+#define regXPB_P2P_BAR6 0x0031
+#define regXPB_P2P_BAR6_BASE_IDX 1
+#define regXPB_P2P_BAR7 0x0032
+#define regXPB_P2P_BAR7_BASE_IDX 1
+#define regXPB_P2P_BAR_SETUP 0x0033
+#define regXPB_P2P_BAR_SETUP_BASE_IDX 1
+#define regXPB_P2P_BAR_DELTA_ABOVE 0x0035
+#define regXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 1
+#define regXPB_P2P_BAR_DELTA_BELOW 0x0036
+#define regXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR0 0x0037
+#define regXPB_PEER_SYS_BAR0_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR1 0x0038
+#define regXPB_PEER_SYS_BAR1_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR2 0x0039
+#define regXPB_PEER_SYS_BAR2_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR3 0x003a
+#define regXPB_PEER_SYS_BAR3_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR4 0x003b
+#define regXPB_PEER_SYS_BAR4_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR5 0x003c
+#define regXPB_PEER_SYS_BAR5_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR6 0x003d
+#define regXPB_PEER_SYS_BAR6_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR7 0x003e
+#define regXPB_PEER_SYS_BAR7_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR8 0x003f
+#define regXPB_PEER_SYS_BAR8_BASE_IDX 1
+#define regXPB_PEER_SYS_BAR9 0x0040
+#define regXPB_PEER_SYS_BAR9_BASE_IDX 1
+#define regXPB_XDMA_PEER_SYS_BAR0 0x0041
+#define regXPB_XDMA_PEER_SYS_BAR0_BASE_IDX 1
+#define regXPB_XDMA_PEER_SYS_BAR1 0x0042
+#define regXPB_XDMA_PEER_SYS_BAR1_BASE_IDX 1
+#define regXPB_XDMA_PEER_SYS_BAR2 0x0043
+#define regXPB_XDMA_PEER_SYS_BAR2_BASE_IDX 1
+#define regXPB_XDMA_PEER_SYS_BAR3 0x0044
+#define regXPB_XDMA_PEER_SYS_BAR3_BASE_IDX 1
+#define regXPB_CLK_GAT 0x0045
+#define regXPB_CLK_GAT_BASE_IDX 1
+#define regXPB_INTF_CFG 0x0046
+#define regXPB_INTF_CFG_BASE_IDX 1
+#define regXPB_INTF_STS 0x0047
+#define regXPB_INTF_STS_BASE_IDX 1
+#define regXPB_PIPE_STS 0x0048
+#define regXPB_PIPE_STS_BASE_IDX 1
+#define regXPB_SUB_CTRL 0x0049
+#define regXPB_SUB_CTRL_BASE_IDX 1
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB 0x004a
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 1
+#define regXPB_PERF_KNOBS 0x004b
+#define regXPB_PERF_KNOBS_BASE_IDX 1
+#define regXPB_STICKY 0x004c
+#define regXPB_STICKY_BASE_IDX 1
+#define regXPB_STICKY_W1C 0x004d
+#define regXPB_STICKY_W1C_BASE_IDX 1
+#define regXPB_MISC_CFG 0x004e
+#define regXPB_MISC_CFG_BASE_IDX 1
+#define regXPB_INTF_CFG2 0x004f
+#define regXPB_INTF_CFG2_BASE_IDX 1
+#define regXPB_CLG_EXTRA_RD 0x0050
+#define regXPB_CLG_EXTRA_RD_BASE_IDX 1
+#define regXPB_CLG_EXTRA_MSK_RD 0x0051
+#define regXPB_CLG_EXTRA_MSK_RD_BASE_IDX 1
+#define regXPB_CLG_GFX_MATCH 0x0052
+#define regXPB_CLG_GFX_MATCH_BASE_IDX 1
+#define regXPB_CLG_GFX_MATCH_VLD 0x0053
+#define regXPB_CLG_GFX_MATCH_VLD_BASE_IDX 1
+#define regXPB_CLG_GFX_MATCH_MSK 0x0054
+#define regXPB_CLG_GFX_MATCH_MSK_BASE_IDX 1
+#define regXPB_CLG_MM_MATCH 0x0055
+#define regXPB_CLG_MM_MATCH_BASE_IDX 1
+#define regXPB_CLG_MM_MATCH_VLD 0x0056
+#define regXPB_CLG_MM_MATCH_VLD_BASE_IDX 1
+#define regXPB_CLG_MM_MATCH_MSK 0x0057
+#define regXPB_CLG_MM_MATCH_MSK_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING0 0x0058
+#define regXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING1 0x0059
+#define regXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING2 0x005a
+#define regXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING3 0x005b
+#define regXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING4 0x005c
+#define regXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING5 0x005d
+#define regXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING6 0x005e
+#define regXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 1
+#define regXPB_CLG_GFX_UNITID_MAPPING7 0x005f
+#define regXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 1
+#define regXPB_CLG_MM_UNITID_MAPPING0 0x0060
+#define regXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 1
+#define regXPB_CLG_MM_UNITID_MAPPING1 0x0061
+#define regXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 1
+#define regXPB_CLG_MM_UNITID_MAPPING2 0x0062
+#define regXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 1
+#define regXPB_CLG_MM_UNITID_MAPPING3 0x0063
+#define regXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 1
+
+
+// addressBlock: aid_athub_rpbdec
+// base address: 0x46200
+#define regRPB_PASSPW_CONF 0x0080
+#define regRPB_PASSPW_CONF_BASE_IDX 1
+#define regRPB_BLOCKLEVEL_CONF 0x0081
+#define regRPB_BLOCKLEVEL_CONF_BASE_IDX 1
+#define regRPB_TAG_CONF 0x0082
+#define regRPB_TAG_CONF_BASE_IDX 1
+#define regRPB_TAG_CONF2 0x0083
+#define regRPB_TAG_CONF2_BASE_IDX 1
+#define regRPB_ARB_CNTL 0x0085
+#define regRPB_ARB_CNTL_BASE_IDX 1
+#define regRPB_ARB_CNTL2 0x0086
+#define regRPB_ARB_CNTL2_BASE_IDX 1
+#define regRPB_BIF_CNTL 0x0087
+#define regRPB_BIF_CNTL_BASE_IDX 1
+#define regRPB_BIF_CNTL2 0x0088
+#define regRPB_BIF_CNTL2_BASE_IDX 1
+#define regRPB_PERF_COUNTER_CNTL 0x008a
+#define regRPB_PERF_COUNTER_CNTL_BASE_IDX 1
+#define regRPB_DEINTRLV_COMBINE_CNTL 0x008c
+#define regRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 1
+#define regRPB_VC_SWITCH_RDWR 0x008d
+#define regRPB_VC_SWITCH_RDWR_BASE_IDX 1
+#define regRPB_PERFCOUNTER_LO 0x008e
+#define regRPB_PERFCOUNTER_LO_BASE_IDX 1
+#define regRPB_PERFCOUNTER_HI 0x008f
+#define regRPB_PERFCOUNTER_HI_BASE_IDX 1
+#define regRPB_PERFCOUNTER0_CFG 0x0090
+#define regRPB_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regRPB_PERFCOUNTER1_CFG 0x0091
+#define regRPB_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regRPB_PERFCOUNTER2_CFG 0x0092
+#define regRPB_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regRPB_PERFCOUNTER3_CFG 0x0093
+#define regRPB_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regRPB_PERFCOUNTER_RSLT_CNTL 0x0094
+#define regRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regRPB_ATS_CNTL 0x0096
+#define regRPB_ATS_CNTL_BASE_IDX 1
+#define regRPB_ATS_CNTL2 0x0097
+#define regRPB_ATS_CNTL2_BASE_IDX 1
+#define regRPB_SDPPORT_CNTL 0x0098
+#define regRPB_SDPPORT_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_sh_mask.h
new file mode 100644
index 000000000000..91454c528ad7
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_sh_mask.h
@@ -0,0 +1,1807 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_1_8_0_SH_MASK_HEADER
+#define _athub_1_8_0_SH_MASK_HEADER
+
+
+// addressBlock: aid_athub_atsdec
+//ATC_ATS_CNTL
+#define ATC_ATS_CNTL__DISABLE_ATC__SHIFT 0x0
+#define ATC_ATS_CNTL__DISABLE_PRI__SHIFT 0x1
+#define ATC_ATS_CNTL__DISABLE_PASID__SHIFT 0x2
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB__SHIFT 0x8
+#define ATC_ATS_CNTL__DEBUG_ECO__SHIFT 0x10
+#define ATC_ATS_CNTL__TRANS_EXE_RETURN__SHIFT 0x16
+#define ATC_ATS_CNTL__DISABLE_ATC_MASK 0x00000001L
+#define ATC_ATS_CNTL__DISABLE_PRI_MASK 0x00000002L
+#define ATC_ATS_CNTL__DISABLE_PASID_MASK 0x00000004L
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB_MASK 0x00003F00L
+#define ATC_ATS_CNTL__DEBUG_ECO_MASK 0x000F0000L
+#define ATC_ATS_CNTL__TRANS_EXE_RETURN_MASK 0x00C00000L
+//ATC_ATS_CNTL2
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_VC5TR__SHIFT 0x0
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_VC0TR__SHIFT 0x8
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_PRINV__SHIFT 0x10
+#define ATC_ATS_CNTL2__TRANSLATION_STALL__SHIFT 0x18
+#define ATC_ATS_CNTL2__GC_TRANS_VC5_ENABLE__SHIFT 0x19
+#define ATC_ATS_CNTL2__MM_TRANS_VC5_ENABLE__SHIFT 0x1a
+#define ATC_ATS_CNTL2__RESERVED__SHIFT 0x1b
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_VC5TR_MASK 0x000000FFL
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_VC0TR_MASK 0x0000FF00L
+#define ATC_ATS_CNTL2__CREDITS_ATS_RPB_PRINV_MASK 0x00FF0000L
+#define ATC_ATS_CNTL2__TRANSLATION_STALL_MASK 0x01000000L
+#define ATC_ATS_CNTL2__GC_TRANS_VC5_ENABLE_MASK 0x02000000L
+#define ATC_ATS_CNTL2__MM_TRANS_VC5_ENABLE_MASK 0x04000000L
+#define ATC_ATS_CNTL2__RESERVED_MASK 0xF8000000L
+//ATC_ATS_CNTL3
+#define ATC_ATS_CNTL3__RESERVED__SHIFT 0x0
+#define ATC_ATS_CNTL3__RESERVED_MASK 0xFFFFFFFFL
+//ATC_ATS_CNTL4
+#define ATC_ATS_CNTL4__RESERVED__SHIFT 0x0
+#define ATC_ATS_CNTL4__RESERVED_MASK 0xFFFFFFFFL
+//ATC_ATS_MISC_CNTL
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_IN_INV_COLLISION_HOST__SHIFT 0x10
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_IN_INV_COLLISION_GUEST__SHIFT 0x11
+#define ATC_ATS_MISC_CNTL__DEBUG_COLLISION__SHIFT 0x12
+#define ATC_ATS_MISC_CNTL__EFFECTIVE_TRANS_WORK_QUEUE__SHIFT 0x13
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_NULL_PASID_SEL__SHIFT 0x1d
+#define ATC_ATS_MISC_CNTL__RESERVED__SHIFT 0x1e
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_IN_INV_COLLISION_HOST_MASK 0x00010000L
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_IN_INV_COLLISION_GUEST_MASK 0x00020000L
+#define ATC_ATS_MISC_CNTL__DEBUG_COLLISION_MASK 0x00040000L
+#define ATC_ATS_MISC_CNTL__EFFECTIVE_TRANS_WORK_QUEUE_MASK 0x1FF80000L
+#define ATC_ATS_MISC_CNTL__TRANS_RESP_NULL_PASID_SEL_MASK 0x20000000L
+#define ATC_ATS_MISC_CNTL__RESERVED_MASK 0xC0000000L
+//ATC_ATS_STATUS
+#define ATC_ATS_STATUS__BUSY__SHIFT 0x0
+#define ATC_ATS_STATUS__CRASHED__SHIFT 0x1
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION__SHIFT 0x2
+#define ATC_ATS_STATUS__FED_IND__SHIFT 0x3
+#define ATC_ATS_STATUS__BUSY_MASK 0x00000001L
+#define ATC_ATS_STATUS__CRASHED_MASK 0x00000002L
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION_MASK 0x00000004L
+#define ATC_ATS_STATUS__FED_IND_MASK 0x00000008L
+//ATC_PERFCOUNTER0_CFG
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER1_CFG
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER2_CFG
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER3_CFG
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER_RSLT_CNTL
+#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//ATC_PERFCOUNTER_LO
+#define ATC_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_PERFCOUNTER_HI
+#define ATC_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//ATC_ATS_FAULT_CNTL
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG__SHIFT 0x0
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE__SHIFT 0xa
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE__SHIFT 0x14
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG_MASK 0x000001FFL
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE_MASK 0x0007FC00L
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE_MASK 0x1FF00000L
+//ATC_ATS_FAULT_STATUS_INFO
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_ALL__SHIFT 0x9
+#define ATC_ATS_FAULT_STATUS_INFO__VMID__SHIFT 0xa
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO__SHIFT 0xf
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2__SHIFT 0x10
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION__SHIFT 0x11
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST__SHIFT 0x12
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS__SHIFT 0x13
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH__SHIFT 0x18
+#define ATC_ATS_FAULT_STATUS_INFO__PHYSICAL_ADDR_HIGH__SHIFT 0x1c
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE_MASK 0x000001FFL
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_ALL_MASK 0x00000200L
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_MASK 0x00007C00L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO_MASK 0x00008000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2_MASK 0x00010000L
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION_MASK 0x00020000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST_MASK 0x00040000L
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS_MASK 0x00F80000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH_MASK 0x0F000000L
+#define ATC_ATS_FAULT_STATUS_INFO__PHYSICAL_ADDR_HIGH_MASK 0xF0000000L
+//ATC_ATS_FAULT_STATUS_INFO2
+#define ATC_ATS_FAULT_STATUS_INFO2__VF__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO2__VFID__SHIFT 0x1
+#define ATC_ATS_FAULT_STATUS_INFO2__L2NUM__SHIFT 0x9
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX1__SHIFT 0xd
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX2__SHIFT 0x13
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX3__SHIFT 0x19
+#define ATC_ATS_FAULT_STATUS_INFO2__VF_MASK 0x00000001L
+#define ATC_ATS_FAULT_STATUS_INFO2__VFID_MASK 0x0000001EL
+#define ATC_ATS_FAULT_STATUS_INFO2__L2NUM_MASK 0x00001E00L
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX1_MASK 0x0007E000L
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX2_MASK 0x01F80000L
+#define ATC_ATS_FAULT_STATUS_INFO2__INV_VMID_GFX3_MASK 0x7E000000L
+//ATC_ATS_FAULT_STATUS_INFO3
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX4__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX5__SHIFT 0x6
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX6__SHIFT 0xc
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX7__SHIFT 0x12
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_MM0__SHIFT 0x18
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX4_MASK 0x0000003FL
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX5_MASK 0x00000FC0L
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX6_MASK 0x0003F000L
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_GFX7_MASK 0x00FC0000L
+#define ATC_ATS_FAULT_STATUS_INFO3__INV_VMID_MM0_MASK 0x3F000000L
+//ATC_ATS_FAULT_STATUS_INFO4
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM1__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM2__SHIFT 0x6
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM3__SHIFT 0xc
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM1_MASK 0x0000003FL
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM2_MASK 0x00000FC0L
+#define ATC_ATS_FAULT_STATUS_INFO4__INV_VMID_MM3_MASK 0x0003F000L
+//ATC_ATS_FAULT_STATUS_ADDR
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR_MASK 0xFFFFFFFFL
+//ATC_ATS_DEFAULT_PAGE_LOW
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE__SHIFT 0x0
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE_MASK 0xFFFFFFFFL
+//ATHUB_PCIE_ATS_CNTL
+#define ATHUB_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_PASID_CNTL
+#define ATHUB_PCIE_PASID_CNTL__PASID_EN__SHIFT 0x10
+#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x11
+#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x12
+#define ATHUB_PCIE_PASID_CNTL__PASID_EN_MASK 0x00010000L
+#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x00020000L
+#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x00040000L
+//ATHUB_PCIE_PAGE_REQ_CNTL
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x00000001L
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x00000002L
+//ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC
+#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
+#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
+//ATHUB_COMMAND
+#define ATHUB_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define ATHUB_COMMAND__BUS_MASTER_EN_MASK 0x00000004L
+//ATHUB_PCIE_ATS_CNTL_VF_0
+#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_1
+#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_2
+#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_3
+#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_4
+#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_5
+#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_6
+#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_7
+#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_8
+#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_9
+#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_10
+#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_11
+#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_12
+#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_13
+#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_14
+#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_15
+#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_SHARED_VIRT_RESET_REQ
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//ATHUB_SHARED_ACTIVE_FCN_ID
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//ATC_ATS_SDPPORT_CNTL
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE__SHIFT 0x0
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE__SHIFT 0x1
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD__SHIFT 0x3
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_REISSUE_CREDIT__SHIFT 0x7
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_ENABLE_DISRUPT_FULLDIS__SHIFT 0x8
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE__SHIFT 0x9
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK__SHIFT 0xa
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD__SHIFT 0xb
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE__SHIFT 0xf
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_RDY_MODE__SHIFT 0x10
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_REISSUE_CREDIT__SHIFT 0x11
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_ENABLE_DISRUPT_FULLDIS__SHIFT 0x12
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_INVALREQ_RDRSPSTATUS_CNTL__SHIFT 0x13
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN__SHIFT 0x16
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN__SHIFT 0x1c
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV__SHIFT 0x1d
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN__SHIFT 0x1e
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV__SHIFT 0x1f
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE_MASK 0x00000001L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE_MASK 0x00000006L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD_MASK 0x00000078L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_REISSUE_CREDIT_MASK 0x00000080L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_ENABLE_DISRUPT_FULLDIS_MASK 0x00000100L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE_MASK 0x00000200L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK_MASK 0x00000400L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD_MASK 0x00007800L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE_MASK 0x00008000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_RDY_MODE_MASK 0x00010000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_REISSUE_CREDIT_MASK 0x00020000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_ENABLE_DISRUPT_FULLDIS_MASK 0x00040000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_INVALREQ_RDRSPSTATUS_CNTL_MASK 0x00380000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN_MASK 0x10000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV_MASK 0x20000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN_MASK 0x40000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV_MASK 0x80000000L
+//ATC_VMID_PASID_MAPPING_UPDATE_STATUS
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED__SHIFT 0x0
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED__SHIFT 0x1
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED__SHIFT 0x2
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED__SHIFT 0x3
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED__SHIFT 0x4
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED__SHIFT 0x5
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED__SHIFT 0x6
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED__SHIFT 0x7
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED__SHIFT 0x8
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED__SHIFT 0x9
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED__SHIFT 0xa
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED__SHIFT 0xb
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED__SHIFT 0xc
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED__SHIFT 0xd
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED__SHIFT 0xe
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED__SHIFT 0xf
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED_MASK 0x00000001L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED_MASK 0x00000002L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED_MASK 0x00000004L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED_MASK 0x00000008L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED_MASK 0x00000010L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED_MASK 0x00000020L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED_MASK 0x00000040L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED_MASK 0x00000080L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED_MASK 0x00000100L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED_MASK 0x00000200L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED_MASK 0x00000400L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED_MASK 0x00000800L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED_MASK 0x00001000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED_MASK 0x00002000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED_MASK 0x00004000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED_MASK 0x00008000L
+//ATC_VMID0_PASID_MAPPING
+#define ATC_VMID0_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID0_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID0_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID0_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID1_PASID_MAPPING
+#define ATC_VMID1_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID1_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID1_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID1_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID2_PASID_MAPPING
+#define ATC_VMID2_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID2_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID2_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID2_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID3_PASID_MAPPING
+#define ATC_VMID3_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID3_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID3_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID3_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID4_PASID_MAPPING
+#define ATC_VMID4_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID4_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID4_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID4_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID5_PASID_MAPPING
+#define ATC_VMID5_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID5_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID5_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID5_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID6_PASID_MAPPING
+#define ATC_VMID6_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID6_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID6_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID6_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID7_PASID_MAPPING
+#define ATC_VMID7_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID7_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID7_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID7_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID8_PASID_MAPPING
+#define ATC_VMID8_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID8_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID8_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID8_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID9_PASID_MAPPING
+#define ATC_VMID9_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID9_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID9_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID9_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID10_PASID_MAPPING
+#define ATC_VMID10_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID10_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID10_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID10_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID11_PASID_MAPPING
+#define ATC_VMID11_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID11_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID11_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID11_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID12_PASID_MAPPING
+#define ATC_VMID12_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID12_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID12_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID12_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID13_PASID_MAPPING
+#define ATC_VMID13_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID13_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID13_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID13_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID14_PASID_MAPPING
+#define ATC_VMID14_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID14_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID14_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID14_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID15_PASID_MAPPING
+#define ATC_VMID15_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID15_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID15_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID15_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_TRANS_FAULT_RSPCNTRL
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID0__SHIFT 0x0
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID1__SHIFT 0x1
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID2__SHIFT 0x2
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID3__SHIFT 0x3
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID4__SHIFT 0x4
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID5__SHIFT 0x5
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID6__SHIFT 0x6
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID7__SHIFT 0x7
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID8__SHIFT 0x8
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID9__SHIFT 0x9
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID10__SHIFT 0xa
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID11__SHIFT 0xb
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID12__SHIFT 0xc
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID13__SHIFT 0xd
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID14__SHIFT 0xe
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID15__SHIFT 0xf
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID0_MASK 0x00000001L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID1_MASK 0x00000002L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID2_MASK 0x00000004L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID3_MASK 0x00000008L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID4_MASK 0x00000010L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID5_MASK 0x00000020L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID6_MASK 0x00000040L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID7_MASK 0x00000080L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID8_MASK 0x00000100L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID9_MASK 0x00000200L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID10_MASK 0x00000400L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID11_MASK 0x00000800L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID12_MASK 0x00001000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID13_MASK 0x00002000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID14_MASK 0x00004000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID15_MASK 0x00008000L
+//ATC_ATS_VMID_STATUS
+#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING__SHIFT 0x0
+#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING__SHIFT 0x1
+#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING__SHIFT 0x2
+#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING__SHIFT 0x3
+#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING__SHIFT 0x4
+#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING__SHIFT 0x5
+#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING__SHIFT 0x6
+#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING__SHIFT 0x7
+#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING__SHIFT 0x8
+#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING__SHIFT 0x9
+#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING__SHIFT 0xa
+#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING__SHIFT 0xb
+#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING__SHIFT 0xc
+#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING__SHIFT 0xd
+#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING__SHIFT 0xe
+#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING__SHIFT 0xf
+#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING_MASK 0x00000001L
+#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING_MASK 0x00000002L
+#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING_MASK 0x00000004L
+#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING_MASK 0x00000008L
+#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING_MASK 0x00000010L
+#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING_MASK 0x00000020L
+#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING_MASK 0x00000040L
+#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING_MASK 0x00000080L
+#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING_MASK 0x00000100L
+#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING_MASK 0x00000200L
+#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING_MASK 0x00000400L
+#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING_MASK 0x00000800L
+#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING_MASK 0x00001000L
+#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING_MASK 0x00002000L
+#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING_MASK 0x00004000L
+#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING_MASK 0x00008000L
+//ATHUB_MISC_CNTL
+#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x6
+#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x12
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x13
+#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x14
+#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x15
+#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x1b
+#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x1c
+#define ATHUB_MISC_CNTL__SRAM_FGCG_ENABLE__SHIFT 0x1d
+#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x00000FC0L
+#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00040000L
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00080000L
+#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00100000L
+#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x07E00000L
+#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x08000000L
+#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x10000000L
+#define ATHUB_MISC_CNTL__SRAM_FGCG_ENABLE_MASK 0x20000000L
+//ATHUB_MEM_POWER_LS
+#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATHUB_MEM_POWER_LS__LS_DELAY_ENABLE__SHIFT 0x13
+#define ATHUB_MEM_POWER_LS__LS_DELAY_TIME__SHIFT 0x14
+#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x0007FFC0L
+#define ATHUB_MEM_POWER_LS__LS_DELAY_ENABLE_MASK 0x00080000L
+#define ATHUB_MEM_POWER_LS__LS_DELAY_TIME_MASK 0x03F00000L
+//ATHUB_IH_CREDIT
+#define ATHUB_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define ATHUB_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define ATHUB_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define ATHUB_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+
+
+// addressBlock: aid_athub_xpbdec
+//XPB_RTR_SRC_APRTR0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR1
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR2
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR3
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR4
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR5
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR6
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR7
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR8
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR9
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR0
+#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR1
+#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR2
+#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR3
+#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_DEST_MAP0
+#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP1
+#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP2
+#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP3
+#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP4
+#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP5
+#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP6
+#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP7
+#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP8
+#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP9
+#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP0
+#define XPB_XDMA_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19
+#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP1
+#define XPB_XDMA_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19
+#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP2
+#define XPB_XDMA_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19
+#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP3
+#define XPB_XDMA_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19
+#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_CLG_CFG0
+#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG0__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG1
+#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG1__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG2
+#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG2__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG3
+#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG3__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG4
+#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG4__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG5
+#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG5__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG6
+#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG6__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG7
+#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG7__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_EXTRA0
+#define XPB_CLG_EXTRA0__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA0__CMP0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA0__VLD0__SHIFT 0xd
+#define XPB_CLG_EXTRA0__CLG0_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA0__CMP0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA0__CMP0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA0__VLD0_MASK 0x00002000L
+#define XPB_CLG_EXTRA0__CLG0_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA1
+#define XPB_CLG_EXTRA1__CMP1_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA1__CMP1_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA1__VLD1__SHIFT 0xd
+#define XPB_CLG_EXTRA1__CLG1_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA1__CMP1_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA1__CMP1_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA1__VLD1_MASK 0x00002000L
+#define XPB_CLG_EXTRA1__CLG1_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA_MSK
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xd
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x001FE000L
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x03E00000L
+//XPB_LB_ADDR
+#define XPB_LB_ADDR__CMP0__SHIFT 0x0
+#define XPB_LB_ADDR__MASK0__SHIFT 0xa
+#define XPB_LB_ADDR__CMP1__SHIFT 0x14
+#define XPB_LB_ADDR__MASK1__SHIFT 0x1a
+#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL
+#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L
+#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L
+#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L
+//XPB_WCB_STS
+#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17
+#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L
+//XPB_HST_CFG
+#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0
+#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L
+//XPB_P2P_BAR_CFG
+#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0
+#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4
+#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6
+#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8
+#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa
+#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc
+#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL
+#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+//XPB_P2P_BAR0
+#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR0__VALID__SHIFT 0xc
+#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR0__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR0__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR1
+#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR1__VALID__SHIFT 0xc
+#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR1__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR1__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR2
+#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR2__VALID__SHIFT 0xc
+#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR2__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR2__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR3
+#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR3__VALID__SHIFT 0xc
+#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR3__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR3__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR4
+#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR4__VALID__SHIFT 0xc
+#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR4__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR4__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR5
+#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR5__VALID__SHIFT 0xc
+#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR5__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR5__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR6
+#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR6__VALID__SHIFT 0xc
+#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR6__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR6__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR7
+#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR7__VALID__SHIFT 0xc
+#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR7__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR7__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_SETUP
+#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc
+#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR_SETUP__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR_SETUP__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_DELTA_ABOVE
+#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L
+//XPB_P2P_BAR_DELTA_BELOW
+#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L
+//XPB_PEER_SYS_BAR0
+#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR1
+#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR2
+#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR3
+#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR4
+#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR5
+#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR6
+#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR7
+#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR8
+#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR9
+#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR0
+#define XPB_XDMA_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR1
+#define XPB_XDMA_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR2
+#define XPB_XDMA_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR3
+#define XPB_XDMA_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_CLK_GAT
+#define XPB_CLK_GAT__ONDLY__SHIFT 0x0
+#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6
+#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc
+#define XPB_CLK_GAT__ENABLE__SHIFT 0x12
+#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13
+#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL
+#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L
+#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L
+#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+//XPB_INTF_CFG
+#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8
+#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10
+#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL__SHIFT 0x17
+#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL__SHIFT 0x18
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL__SHIFT 0x19
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL__SHIFT 0x1a
+#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b
+#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d
+#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA__SHIFT 0x1f
+#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L
+#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L
+#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL_MASK 0x00800000L
+#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL_MASK 0x01000000L
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL_MASK 0x02000000L
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL_MASK 0x04000000L
+#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA_MASK 0x80000000L
+//XPB_INTF_STS
+#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10
+#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11
+#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12
+#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13
+#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L
+//XPB_PIPE_STS
+#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16
+#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18
+#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L
+//XPB_SUB_CTRL
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9
+#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa
+#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb
+#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc
+#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd
+#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe
+#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf
+#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10
+#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11
+#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12
+#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+//XPB_MAP_INVERT_FLUSH_NUM_LSB
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL
+//XPB_PERF_KNOBS
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L
+//XPB_STICKY
+#define XPB_STICKY__BITS__SHIFT 0x0
+#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL
+//XPB_STICKY_W1C
+#define XPB_STICKY_W1C__BITS__SHIFT 0x0
+#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL
+//XPB_MISC_CFG
+#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0
+#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8
+#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10
+#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18
+#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f
+#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL
+#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L
+#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L
+#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L
+#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+//XPB_INTF_CFG2
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL
+//XPB_CLG_EXTRA_RD
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb
+#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf
+#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a
+#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L
+#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L
+#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L
+#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L
+#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L
+//XPB_CLG_EXTRA_MSK_RD
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L
+//XPB_CLG_GFX_MATCH
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_GFX_MATCH_VLD
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_GFX_MATCH_MSK
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH_VLD
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_MM_MATCH_MSK
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_GFX_UNITID_MAPPING0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING1
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING2
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING3
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING4
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING5
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING7
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING1
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING2
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING3
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+
+
+// addressBlock: aid_athub_rpbdec
+//RPB_PASSPW_CONF
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE__SHIFT 0x2
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0x3
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0x4
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x5
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0x6
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x7
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE__SHIFT 0x8
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x9
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0xa
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN__SHIFT 0xb
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xc
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0xe
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0xf
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x10
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x11
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_MASK 0x00000004L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000008L
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00000010L
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00000020L
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00000040L
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00000080L
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_MASK 0x00000100L
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00000200L
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00000400L
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN_MASK 0x00000800L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00001000L
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00004000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00008000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00010000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00020000L
+//RPB_BLOCKLEVEL_CONF
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL__SHIFT 0x2
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL__SHIFT 0x4
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x6
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x8
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0xa
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xc
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0xe
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x11
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x12
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x13
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL_MASK 0x0000000CL
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL_MASK 0x00000030L
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x000000C0L
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x00000300L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00000C00L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x00003000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x0000C000L
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00020000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00040000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00080000L
+//RPB_TAG_CONF
+#define RPB_TAG_CONF__RPB_ATS_VC0_TR__SHIFT 0x0
+#define RPB_TAG_CONF__RPB_ATS_VC5_TR__SHIFT 0xa
+#define RPB_TAG_CONF__RPB_ATS_PR__SHIFT 0x14
+#define RPB_TAG_CONF__RPB_ATS_VC0_TR_MASK 0x000003FFL
+#define RPB_TAG_CONF__RPB_ATS_VC5_TR_MASK 0x000FFC00L
+#define RPB_TAG_CONF__RPB_ATS_PR_MASK 0x3FF00000L
+//RPB_TAG_CONF2
+#define RPB_TAG_CONF2__RPB_IO_WR__SHIFT 0x0
+#define RPB_TAG_CONF2__RPB_IO_MAX_LIMIT__SHIFT 0xa
+#define RPB_TAG_CONF2__RPB_IO_RD_MARGIN__SHIFT 0x15
+#define RPB_TAG_CONF2__RPB_IO_WR_MASK 0x000003FFL
+#define RPB_TAG_CONF2__RPB_IO_MAX_LIMIT_MASK 0x001FFC00L
+#define RPB_TAG_CONF2__RPB_IO_RD_MARGIN_MASK 0xFFE00000L
+//RPB_ARB_CNTL
+#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19
+#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L
+//RPB_ARB_CNTL2
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L
+//RPB_BIF_CNTL
+#define RPB_BIF_CNTL__ARB_MODE__SHIFT 0x0
+#define RPB_BIF_CNTL__DRAIN_VC_NUM__SHIFT 0x1
+#define RPB_BIF_CNTL__SWITCH_ENABLE__SHIFT 0x3
+#define RPB_BIF_CNTL__SWITCH_THRESHOLD__SHIFT 0x4
+#define RPB_BIF_CNTL__PAGE_PRI_EN__SHIFT 0xc
+#define RPB_BIF_CNTL__VC0TR_PRI_EN__SHIFT 0xd
+#define RPB_BIF_CNTL__VC5TR_PRI_EN__SHIFT 0xe
+#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE__SHIFT 0xf
+#define RPB_BIF_CNTL__ARB_MODE_MASK 0x00000001L
+#define RPB_BIF_CNTL__DRAIN_VC_NUM_MASK 0x00000006L
+#define RPB_BIF_CNTL__SWITCH_ENABLE_MASK 0x00000008L
+#define RPB_BIF_CNTL__SWITCH_THRESHOLD_MASK 0x00000FF0L
+#define RPB_BIF_CNTL__PAGE_PRI_EN_MASK 0x00001000L
+#define RPB_BIF_CNTL__VC0TR_PRI_EN_MASK 0x00002000L
+#define RPB_BIF_CNTL__VC5TR_PRI_EN_MASK 0x00004000L
+#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE_MASK 0x00008000L
+//RPB_BIF_CNTL2
+#define RPB_BIF_CNTL2__VC0_SWITCH_NUM__SHIFT 0x0
+#define RPB_BIF_CNTL2__VC1_SWITCH_NUM__SHIFT 0x8
+#define RPB_BIF_CNTL2__VC5_SWITCH_NUM__SHIFT 0x10
+#define RPB_BIF_CNTL2__VC0_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_BIF_CNTL2__VC1_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_BIF_CNTL2__VC5_SWITCH_NUM_MASK 0x00FF0000L
+//RPB_PERF_COUNTER_CNTL
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x2
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x3
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x4
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x5
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x9
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0xe
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x13
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x18
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001E0L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003E00L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007C000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00F80000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1F000000L
+//RPB_DEINTRLV_COMBINE_CNTL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN__SHIFT 0x6
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN_MASK 0x00000040L
+//RPB_VC_SWITCH_RDWR
+#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0
+#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2
+#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD__SHIFT 0x12
+#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L
+#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL
+#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD_MASK 0x03FC0000L
+//RPB_PERFCOUNTER_LO
+#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//RPB_PERFCOUNTER_HI
+#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//RPB_PERFCOUNTER0_CFG
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER1_CFG
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER2_CFG
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER3_CFG
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER_RSLT_CNTL
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//RPB_ATS_CNTL
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2
+#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7
+#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM__SHIFT 0xf
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13
+#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17
+#define RPB_ATS_CNTL__INVAL_COM_CMD__SHIFT 0x19
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL
+#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L
+#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM_MASK 0x00078000L
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L
+#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L
+#define RPB_ATS_CNTL__INVAL_COM_CMD_MASK 0x7E000000L
+//RPB_ATS_CNTL2
+#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x0
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0x6
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0xc
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0xf
+#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x12
+#define RPB_ATS_CNTL2__MM_TRANS_VC5_ENABLE__SHIFT 0x14
+#define RPB_ATS_CNTL2__GC_TRANS_VC5_ENABLE__SHIFT 0x15
+#define RPB_ATS_CNTL2__RPB_VC5_CRD__SHIFT 0x16
+#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x0000003FL
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x00000FC0L
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x00007000L
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00038000L
+#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x000C0000L
+#define RPB_ATS_CNTL2__MM_TRANS_VC5_ENABLE_MASK 0x00100000L
+#define RPB_ATS_CNTL2__GC_TRANS_VC5_ENABLE_MASK 0x00200000L
+#define RPB_ATS_CNTL2__RPB_VC5_CRD_MASK 0x07C00000L
+//RPB_SDPPORT_CNTL
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6
+#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE__SHIFT 0xa
+#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE__SHIFT 0xb
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT__SHIFT 0xd
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER__SHIFT 0xe
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS__SHIFT 0xf
+#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD__SHIFT 0x10
+#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE__SHIFT 0x14
+#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK__SHIFT 0x15
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
+#define RPB_SDPPORT_CNTL__DF_HALT_THRESHOLD__SHIFT 0x1c
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L
+#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE_MASK 0x00000400L
+#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE_MASK 0x00001800L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT_MASK 0x00002000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER_MASK 0x00004000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS_MASK 0x00008000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD_MASK 0x000F0000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE_MASK 0x00100000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK_MASK 0x00200000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
+#define RPB_SDPPORT_CNTL__DF_HALT_THRESHOLD_MASK 0xF0000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
new file mode 100644
index 000000000000..fbb18e44ec52
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_OFFSET_HEADER
+#define _df_4_3_OFFSET_HEADER
+
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow 0x0e3e
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 4
+#define regDF_NCS_PG0_HardwareAssertMaskHigh 0x0e3f
+#define regDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 4
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
new file mode 100644
index 000000000000..9c8f19ded4eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_SH_MASK_HEADER
+#define _df_4_3_SH_MASK_HEADER
+
+//DF_CS_UMC_AON0_HardwareAssertMaskLow
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L
+
+//DF_NCS_PG0_HardwareAssertMaskHigh
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 18d34bbceebe..79c41004c0b6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -4868,6 +4868,10 @@
#define mmCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
#define mmCP_ME2_PIPE3_INT_STATUS 0x1e34
#define mmCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_GFX_QUEUE_INDEX 0x1e37
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index 4127896ffcdf..52043e143067 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -18680,6 +18680,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 3973110f149c..a734abaa91a5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -4531,6 +4531,10 @@
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
#define mmCC_GC_EDC_CONFIG_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
#define mmCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
#define mmCP_ME1_PIPE0_PRIORITY 0x1e3a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index d4e8ff22ecb8..d7a17bae2584 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -17028,6 +17028,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
index 3b95a59b196c..56e00252bff8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
@@ -3593,6 +3593,14 @@
#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+// addressBlock: gc_rlcsdec
+// base address: 0x3b980
+#define regRLC_RLCS_FED_STATUS_0 0x4eff
+#define regRLC_RLCS_FED_STATUS_0_BASE_IDX 1
+#define regRLC_RLCS_FED_STATUS_1 0x4f00
+#define regRLC_RLCS_FED_STATUS_1_BASE_IDX 1
+
+
// addressBlock: gc_gcvml2pspdec
// base address: 0x3f900
#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
index ae3ef8a9e702..658e88a8e2ac 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
@@ -37642,6 +37642,56 @@
#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCS_FED_STATUS_0
+#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR__SHIFT 0x0
+#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR__SHIFT 0x1
+#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR__SHIFT 0x2
+#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR__SHIFT 0x3
+#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR__SHIFT 0x4
+#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR__SHIFT 0x5
+#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR__SHIFT 0x6
+#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR__SHIFT 0x7
+#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR_MASK 0x00000001L
+#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR_MASK 0x00000002L
+#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR_MASK 0x00000004L
+#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR_MASK 0x00000008L
+#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR_MASK 0x00000010L
+#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR_MASK 0x00000020L
+#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR_MASK 0x00000040L
+#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR_MASK 0x00000080L
+//RLC_RLCS_FED_STATUS_1
+#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR__SHIFT 0x0
+#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR__SHIFT 0x1
+#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR__SHIFT 0x2
+#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR__SHIFT 0x3
+#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR__SHIFT 0x4
+#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR__SHIFT 0x5
+#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR__SHIFT 0x6
+#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR__SHIFT 0x7
+#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR__SHIFT 0x8
+#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR__SHIFT 0x9
+#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR__SHIFT 0xa
+#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR__SHIFT 0xb
+#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR__SHIFT 0xc
+#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR__SHIFT 0xd
+#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR__SHIFT 0xe
+#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR__SHIFT 0xf
+#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR_MASK 0x00000001L
+#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR_MASK 0x00000002L
+#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR_MASK 0x00000004L
+#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR_MASK 0x00000008L
+#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR_MASK 0x00000010L
+#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR_MASK 0x00000020L
+#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR_MASK 0x00000040L
+#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR_MASK 0x00000080L
+#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR_MASK 0x00000100L
+#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR_MASK 0x00000200L
+#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR_MASK 0x00000400L
+#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR_MASK 0x00000800L
+#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR_MASK 0x00001000L
+#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR_MASK 0x00002000L
+#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR_MASK 0x00004000L
+#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR_MASK 0x00008000L
//RLC_CGTT_MGCG_OVERRIDE
#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0
#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h
new file mode 100644
index 000000000000..3100de8b3881
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h
@@ -0,0 +1,7258 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_9_4_3_OFFSET_HEADER
+#define _gc_9_4_3_OFFSET_HEADER
+
+
+
+// addressBlock: xcd0_gc_grbmdec
+// base address: 0x8000
+#define regGRBM_CNTL 0x0000
+#define regGRBM_CNTL_BASE_IDX 0
+#define regGRBM_SKEW_CNTL 0x0001
+#define regGRBM_SKEW_CNTL_BASE_IDX 0
+#define regGRBM_STATUS2 0x0002
+#define regGRBM_STATUS2_BASE_IDX 0
+#define regGRBM_PWR_CNTL 0x0003
+#define regGRBM_PWR_CNTL_BASE_IDX 0
+#define regGRBM_STATUS 0x0004
+#define regGRBM_STATUS_BASE_IDX 0
+#define regGRBM_STATUS_SE0 0x0005
+#define regGRBM_STATUS_SE0_BASE_IDX 0
+#define regGRBM_STATUS_SE1 0x0006
+#define regGRBM_STATUS_SE1_BASE_IDX 0
+#define regGRBM_SOFT_RESET 0x0008
+#define regGRBM_SOFT_RESET_BASE_IDX 0
+#define regGRBM_GFX_CLKEN_CNTL 0x000c
+#define regGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define regGRBM_WAIT_IDLE_CLOCKS 0x000d
+#define regGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define regGRBM_STATUS_SE2 0x000e
+#define regGRBM_STATUS_SE2_BASE_IDX 0
+#define regGRBM_STATUS_SE3 0x000f
+#define regGRBM_STATUS_SE3_BASE_IDX 0
+#define regGRBM_READ_ERROR 0x0016
+#define regGRBM_READ_ERROR_BASE_IDX 0
+#define regGRBM_READ_ERROR2 0x0017
+#define regGRBM_READ_ERROR2_BASE_IDX 0
+#define regGRBM_INT_CNTL 0x0018
+#define regGRBM_INT_CNTL_BASE_IDX 0
+#define regGRBM_TRAP_OP 0x0019
+#define regGRBM_TRAP_OP_BASE_IDX 0
+#define regGRBM_TRAP_ADDR 0x001a
+#define regGRBM_TRAP_ADDR_BASE_IDX 0
+#define regGRBM_TRAP_ADDR_MSK 0x001b
+#define regGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define regGRBM_TRAP_WD 0x001c
+#define regGRBM_TRAP_WD_BASE_IDX 0
+#define regGRBM_TRAP_WD_MSK 0x001d
+#define regGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define regGRBM_WRITE_ERROR 0x001f
+#define regGRBM_WRITE_ERROR_BASE_IDX 0
+#define regGRBM_IOV_ERROR 0x0020
+#define regGRBM_IOV_ERROR_BASE_IDX 0
+#define regGRBM_CHIP_REVISION 0x0021
+#define regGRBM_CHIP_REVISION_BASE_IDX 0
+#define regGRBM_GFX_CNTL 0x0022
+#define regGRBM_GFX_CNTL_BASE_IDX 0
+#define regGRBM_RSMU_CFG 0x0023
+#define regGRBM_RSMU_CFG_BASE_IDX 0
+#define regGRBM_IH_CREDIT 0x0024
+#define regGRBM_IH_CREDIT_BASE_IDX 0
+#define regGRBM_PWR_CNTL2 0x0025
+#define regGRBM_PWR_CNTL2_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_START 0x0026
+#define regGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_END 0x0027
+#define regGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define regGRBM_RSMU_READ_ERROR 0x0028
+#define regGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define regGRBM_CHICKEN_BITS 0x0029
+#define regGRBM_CHICKEN_BITS_BASE_IDX 0
+#define regGRBM_FENCE_RANGE0 0x002a
+#define regGRBM_FENCE_RANGE0_BASE_IDX 0
+#define regGRBM_FENCE_RANGE1 0x002b
+#define regGRBM_FENCE_RANGE1_BASE_IDX 0
+#define regGRBM_IOV_READ_ERROR 0x002c
+#define regGRBM_IOV_READ_ERROR_BASE_IDX 0
+#define regGRBM_NOWHERE 0x003f
+#define regGRBM_NOWHERE_BASE_IDX 0
+#define regGRBM_SCRATCH_REG0 0x0040
+#define regGRBM_SCRATCH_REG0_BASE_IDX 0
+#define regGRBM_SCRATCH_REG1 0x0041
+#define regGRBM_SCRATCH_REG1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG2 0x0042
+#define regGRBM_SCRATCH_REG2_BASE_IDX 0
+#define regGRBM_SCRATCH_REG3 0x0043
+#define regGRBM_SCRATCH_REG3_BASE_IDX 0
+#define regGRBM_SCRATCH_REG4 0x0044
+#define regGRBM_SCRATCH_REG4_BASE_IDX 0
+#define regGRBM_SCRATCH_REG5 0x0045
+#define regGRBM_SCRATCH_REG5_BASE_IDX 0
+#define regGRBM_SCRATCH_REG6 0x0046
+#define regGRBM_SCRATCH_REG6_BASE_IDX 0
+#define regGRBM_SCRATCH_REG7 0x0047
+#define regGRBM_SCRATCH_REG7_BASE_IDX 0
+#define regVIOLATION_DATA_ASYNC_VF_PROG 0x0048
+#define regVIOLATION_DATA_ASYNC_VF_PROG_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_cpdec
+// base address: 0x8200
+#define regCP_CPC_DEBUG_CNTL 0x0080
+#define regCP_CPC_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPF_DEBUG_CNTL 0x0082
+#define regCP_CPF_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPC_STATUS 0x0084
+#define regCP_CPC_STATUS_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT 0x0085
+#define regCP_CPC_BUSY_STAT_BASE_IDX 0
+#define regCP_CPC_STALLED_STAT1 0x0086
+#define regCP_CPC_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPF_STATUS 0x0087
+#define regCP_CPF_STATUS_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT 0x0088
+#define regCP_CPF_BUSY_STAT_BASE_IDX 0
+#define regCP_CPF_STALLED_STAT1 0x0089
+#define regCP_CPF_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPC_GRBM_FREE_COUNT 0x008b
+#define regCP_CPC_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPC_PRIV_VIOLATION_ADDR 0x008c
+#define regCP_CPC_PRIV_VIOLATION_ADDR_BASE_IDX 0
+#define regCP_MEC_CNTL 0x008d
+#define regCP_MEC_CNTL_BASE_IDX 0
+#define regCP_MEC_ME1_HEADER_DUMP 0x008e
+#define regCP_MEC_ME1_HEADER_DUMP_BASE_IDX 0
+#define regCP_MEC_ME2_HEADER_DUMP 0x008f
+#define regCP_MEC_ME2_HEADER_DUMP_BASE_IDX 0
+#define regCP_CPC_SCRATCH_INDEX 0x0090
+#define regCP_CPC_SCRATCH_INDEX_BASE_IDX 0
+#define regCP_CPC_SCRATCH_DATA 0x0091
+#define regCP_CPC_SCRATCH_DATA_BASE_IDX 0
+#define regCP_CPF_GRBM_FREE_COUNT 0x0092
+#define regCP_CPF_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPC_HALT_HYST_COUNT 0x00a7
+#define regCP_CPC_HALT_HYST_COUNT_BASE_IDX 0
+#define regCP_CE_COMPARE_COUNT 0x00c0
+#define regCP_CE_COMPARE_COUNT_BASE_IDX 0
+#define regCP_CE_DE_COUNT 0x00c1
+#define regCP_CE_DE_COUNT_BASE_IDX 0
+#define regCP_DE_CE_COUNT 0x00c2
+#define regCP_DE_CE_COUNT_BASE_IDX 0
+#define regCP_DE_LAST_INVAL_COUNT 0x00c3
+#define regCP_DE_LAST_INVAL_COUNT_BASE_IDX 0
+#define regCP_DE_DE_COUNT 0x00c4
+#define regCP_DE_DE_COUNT_BASE_IDX 0
+#define regCP_STALLED_STAT3 0x019c
+#define regCP_STALLED_STAT3_BASE_IDX 0
+#define regCP_STALLED_STAT1 0x019d
+#define regCP_STALLED_STAT1_BASE_IDX 0
+#define regCP_STALLED_STAT2 0x019e
+#define regCP_STALLED_STAT2_BASE_IDX 0
+#define regCP_BUSY_STAT 0x019f
+#define regCP_BUSY_STAT_BASE_IDX 0
+#define regCP_STAT 0x01a0
+#define regCP_STAT_BASE_IDX 0
+#define regCP_ME_HEADER_DUMP 0x01a1
+#define regCP_ME_HEADER_DUMP_BASE_IDX 0
+#define regCP_PFP_HEADER_DUMP 0x01a2
+#define regCP_PFP_HEADER_DUMP_BASE_IDX 0
+#define regCP_GRBM_FREE_COUNT 0x01a3
+#define regCP_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CE_HEADER_DUMP 0x01a4
+#define regCP_CE_HEADER_DUMP_BASE_IDX 0
+#define regCP_PFP_INSTR_PNTR 0x01a5
+#define regCP_PFP_INSTR_PNTR_BASE_IDX 0
+#define regCP_ME_INSTR_PNTR 0x01a6
+#define regCP_ME_INSTR_PNTR_BASE_IDX 0
+#define regCP_CE_INSTR_PNTR 0x01a7
+#define regCP_CE_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC1_INSTR_PNTR 0x01a8
+#define regCP_MEC1_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC2_INSTR_PNTR 0x01a9
+#define regCP_MEC2_INSTR_PNTR_BASE_IDX 0
+#define regCP_CSF_STAT 0x01b4
+#define regCP_CSF_STAT_BASE_IDX 0
+#define regCP_ME_CNTL 0x01b6
+#define regCP_ME_CNTL_BASE_IDX 0
+#define regCP_CNTX_STAT 0x01b8
+#define regCP_CNTX_STAT_BASE_IDX 0
+#define regCP_ME_PREEMPTION 0x01b9
+#define regCP_ME_PREEMPTION_BASE_IDX 0
+#define regCP_ROQ_THRESHOLDS 0x01bc
+#define regCP_ROQ_THRESHOLDS_BASE_IDX 0
+#define regCP_MEQ_STQ_THRESHOLD 0x01bd
+#define regCP_MEQ_STQ_THRESHOLD_BASE_IDX 0
+#define regCP_RB2_RPTR 0x01be
+#define regCP_RB2_RPTR_BASE_IDX 0
+#define regCP_RB1_RPTR 0x01bf
+#define regCP_RB1_RPTR_BASE_IDX 0
+#define regCP_RB0_RPTR 0x01c0
+#define regCP_RB0_RPTR_BASE_IDX 0
+#define regCP_RB_RPTR 0x01c0
+#define regCP_RB_RPTR_BASE_IDX 0
+#define regCP_RB_WPTR_DELAY 0x01c1
+#define regCP_RB_WPTR_DELAY_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_CNTL 0x01c2
+#define regCP_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_ROQ1_THRESHOLDS 0x01d5
+#define regCP_ROQ1_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ2_THRESHOLDS 0x01d6
+#define regCP_ROQ2_THRESHOLDS_BASE_IDX 0
+#define regCP_STQ_THRESHOLDS 0x01d7
+#define regCP_STQ_THRESHOLDS_BASE_IDX 0
+#define regCP_QUEUE_THRESHOLDS 0x01d8
+#define regCP_QUEUE_THRESHOLDS_BASE_IDX 0
+#define regCP_MEQ_THRESHOLDS 0x01d9
+#define regCP_MEQ_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_AVAIL 0x01da
+#define regCP_ROQ_AVAIL_BASE_IDX 0
+#define regCP_STQ_AVAIL 0x01db
+#define regCP_STQ_AVAIL_BASE_IDX 0
+#define regCP_ROQ2_AVAIL 0x01dc
+#define regCP_ROQ2_AVAIL_BASE_IDX 0
+#define regCP_MEQ_AVAIL 0x01dd
+#define regCP_MEQ_AVAIL_BASE_IDX 0
+#define regCP_CMD_INDEX 0x01de
+#define regCP_CMD_INDEX_BASE_IDX 0
+#define regCP_CMD_DATA 0x01df
+#define regCP_CMD_DATA_BASE_IDX 0
+#define regCP_ROQ_RB_STAT 0x01e0
+#define regCP_ROQ_RB_STAT_BASE_IDX 0
+#define regCP_ROQ_IB1_STAT 0x01e1
+#define regCP_ROQ_IB1_STAT_BASE_IDX 0
+#define regCP_ROQ_IB2_STAT 0x01e2
+#define regCP_ROQ_IB2_STAT_BASE_IDX 0
+#define regCP_STQ_STAT 0x01e3
+#define regCP_STQ_STAT_BASE_IDX 0
+#define regCP_STQ_WR_STAT 0x01e4
+#define regCP_STQ_WR_STAT_BASE_IDX 0
+#define regCP_MEQ_STAT 0x01e5
+#define regCP_MEQ_STAT_BASE_IDX 0
+#define regCP_CEQ1_AVAIL 0x01e6
+#define regCP_CEQ1_AVAIL_BASE_IDX 0
+#define regCP_CEQ2_AVAIL 0x01e7
+#define regCP_CEQ2_AVAIL_BASE_IDX 0
+#define regCP_CE_ROQ_RB_STAT 0x01e8
+#define regCP_CE_ROQ_RB_STAT_BASE_IDX 0
+#define regCP_CE_ROQ_IB1_STAT 0x01e9
+#define regCP_CE_ROQ_IB1_STAT_BASE_IDX 0
+#define regCP_CE_ROQ_IB2_STAT 0x01ea
+#define regCP_CE_ROQ_IB2_STAT_BASE_IDX 0
+#define regCP_INT_STAT_DEBUG 0x01f7
+#define regCP_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_DEBUG_CNTL 0x01f8
+#define regCP_DEBUG_CNTL_BASE_IDX 0
+#define regCP_PRIV_VIOLATION_ADDR 0x01fa
+#define regCP_PRIV_VIOLATION_ADDR_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_padec
+// base address: 0x8800
+#define regVGT_VTX_VECT_EJECT_REG 0x022c
+#define regVGT_VTX_VECT_EJECT_REG_BASE_IDX 0
+#define regVGT_DMA_DATA_FIFO_DEPTH 0x022d
+#define regVGT_DMA_DATA_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DMA_REQ_FIFO_DEPTH 0x022e
+#define regVGT_DMA_REQ_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DRAW_INIT_FIFO_DEPTH 0x022f
+#define regVGT_DRAW_INIT_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_LAST_COPY_STATE 0x0230
+#define regVGT_LAST_COPY_STATE_BASE_IDX 0
+#define regVGT_CACHE_INVALIDATION 0x0231
+#define regVGT_CACHE_INVALIDATION_BASE_IDX 0
+#define regVGT_RESET_DEBUG 0x0232
+#define regVGT_RESET_DEBUG_BASE_IDX 0
+#define regVGT_STRMOUT_DELAY 0x0233
+#define regVGT_STRMOUT_DELAY_BASE_IDX 0
+#define regVGT_FIFO_DEPTHS 0x0234
+#define regVGT_FIFO_DEPTHS_BASE_IDX 0
+#define regVGT_GS_VERTEX_REUSE 0x0235
+#define regVGT_GS_VERTEX_REUSE_BASE_IDX 0
+#define regVGT_MC_LAT_CNTL 0x0236
+#define regVGT_MC_LAT_CNTL_BASE_IDX 0
+#define regIA_CNTL_STATUS 0x0237
+#define regIA_CNTL_STATUS_BASE_IDX 0
+#define regVGT_CNTL_STATUS 0x023c
+#define regVGT_CNTL_STATUS_BASE_IDX 0
+#define regWD_CNTL_STATUS 0x023f
+#define regWD_CNTL_STATUS_BASE_IDX 0
+#define regCC_GC_PRIM_CONFIG 0x0240
+#define regCC_GC_PRIM_CONFIG_BASE_IDX 0
+#define regGC_USER_PRIM_CONFIG 0x0241
+#define regGC_USER_PRIM_CONFIG_BASE_IDX 0
+#define regWD_QOS 0x0242
+#define regWD_QOS_BASE_IDX 0
+#define regWD_UTCL1_CNTL 0x0243
+#define regWD_UTCL1_CNTL_BASE_IDX 0
+#define regWD_UTCL1_STATUS 0x0244
+#define regWD_UTCL1_STATUS_BASE_IDX 0
+#define regIA_UTCL1_CNTL 0x0246
+#define regIA_UTCL1_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS 0x0247
+#define regIA_UTCL1_STATUS_BASE_IDX 0
+#define regVGT_SYS_CONFIG 0x0263
+#define regVGT_SYS_CONFIG_BASE_IDX 0
+#define regVGT_VS_MAX_WAVE_ID 0x0268
+#define regVGT_VS_MAX_WAVE_ID_BASE_IDX 0
+#define regVGT_GS_MAX_WAVE_ID 0x0269
+#define regVGT_GS_MAX_WAVE_ID_BASE_IDX 0
+#define regGFX_PIPE_CONTROL 0x026d
+#define regGFX_PIPE_CONTROL_BASE_IDX 0
+#define regCC_GC_SHADER_ARRAY_CONFIG 0x026f
+#define regCC_GC_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define regGC_USER_SHADER_ARRAY_CONFIG 0x0270
+#define regGC_USER_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define regVGT_DMA_PRIMITIVE_TYPE 0x0271
+#define regVGT_DMA_PRIMITIVE_TYPE_BASE_IDX 0
+#define regVGT_DMA_CONTROL 0x0272
+#define regVGT_DMA_CONTROL_BASE_IDX 0
+#define regVGT_DMA_LS_HS_CONFIG 0x0273
+#define regVGT_DMA_LS_HS_CONFIG_BASE_IDX 0
+#define regWD_BUF_RESOURCE_1 0x0276
+#define regWD_BUF_RESOURCE_1_BASE_IDX 0
+#define regWD_BUF_RESOURCE_2 0x0277
+#define regWD_BUF_RESOURCE_2_BASE_IDX 0
+#define regPA_CL_CNTL_STATUS 0x0284
+#define regPA_CL_CNTL_STATUS_BASE_IDX 0
+#define regPA_CL_ENHANCE 0x0285
+#define regPA_CL_ENHANCE_BASE_IDX 0
+#define regPA_CL_RESET_DEBUG 0x0286
+#define regPA_CL_RESET_DEBUG_BASE_IDX 0
+#define regPA_SU_CNTL_STATUS 0x0294
+#define regPA_SU_CNTL_STATUS_BASE_IDX 0
+#define regPA_SC_FIFO_DEPTH_CNTL 0x0295
+#define regPA_SC_FIFO_DEPTH_CNTL_BASE_IDX 0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x02c0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x02c1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define regPA_SC_TRAP_SCREEN_HV_LOCK 0x02c2
+#define regPA_SC_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define regPA_SC_FORCE_EOV_MAX_CNTS 0x02c9
+#define regPA_SC_FORCE_EOV_MAX_CNTS_BASE_IDX 0
+#define regPA_SC_BINNER_EVENT_CNTL_0 0x02cc
+#define regPA_SC_BINNER_EVENT_CNTL_0_BASE_IDX 0
+#define regPA_SC_BINNER_EVENT_CNTL_1 0x02cd
+#define regPA_SC_BINNER_EVENT_CNTL_1_BASE_IDX 0
+#define regPA_SC_BINNER_EVENT_CNTL_2 0x02ce
+#define regPA_SC_BINNER_EVENT_CNTL_2_BASE_IDX 0
+#define regPA_SC_BINNER_EVENT_CNTL_3 0x02cf
+#define regPA_SC_BINNER_EVENT_CNTL_3_BASE_IDX 0
+#define regPA_SC_BINNER_TIMEOUT_COUNTER 0x02d0
+#define regPA_SC_BINNER_TIMEOUT_COUNTER_BASE_IDX 0
+#define regPA_SC_BINNER_PERF_CNTL_0 0x02d1
+#define regPA_SC_BINNER_PERF_CNTL_0_BASE_IDX 0
+#define regPA_SC_BINNER_PERF_CNTL_1 0x02d2
+#define regPA_SC_BINNER_PERF_CNTL_1_BASE_IDX 0
+#define regPA_SC_BINNER_PERF_CNTL_2 0x02d3
+#define regPA_SC_BINNER_PERF_CNTL_2_BASE_IDX 0
+#define regPA_SC_BINNER_PERF_CNTL_3 0x02d4
+#define regPA_SC_BINNER_PERF_CNTL_3_BASE_IDX 0
+#define regPA_SC_ENHANCE_2 0x02dc
+#define regPA_SC_ENHANCE_2_BASE_IDX 0
+#define regPA_SC_FIFO_SIZE 0x02f3
+#define regPA_SC_FIFO_SIZE_BASE_IDX 0
+#define regPA_SC_IF_FIFO_SIZE 0x02f5
+#define regPA_SC_IF_FIFO_SIZE_BASE_IDX 0
+#define regPA_SC_PKR_WAVE_TABLE_CNTL 0x02f8
+#define regPA_SC_PKR_WAVE_TABLE_CNTL_BASE_IDX 0
+#define regPA_UTCL1_CNTL1 0x02f9
+#define regPA_UTCL1_CNTL1_BASE_IDX 0
+#define regPA_UTCL1_CNTL2 0x02fa
+#define regPA_UTCL1_CNTL2_BASE_IDX 0
+#define regPA_SIDEBAND_REQUEST_DELAYS 0x02fb
+#define regPA_SIDEBAND_REQUEST_DELAYS_BASE_IDX 0
+#define regPA_SC_ENHANCE 0x02fc
+#define regPA_SC_ENHANCE_BASE_IDX 0
+#define regPA_SC_ENHANCE_1 0x02fd
+#define regPA_SC_ENHANCE_1_BASE_IDX 0
+#define regPA_SC_DSM_CNTL 0x02fe
+#define regPA_SC_DSM_CNTL_BASE_IDX 0
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE 0x02ff
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_sqdec
+// base address: 0x8c00
+#define regSQ_CONFIG 0x0300
+#define regSQ_CONFIG_BASE_IDX 0
+#define regSQC_CONFIG 0x0301
+#define regSQC_CONFIG_BASE_IDX 0
+#define regLDS_CONFIG 0x0302
+#define regLDS_CONFIG_BASE_IDX 0
+#define regSQ_RANDOM_WAVE_PRI 0x0303
+#define regSQ_RANDOM_WAVE_PRI_BASE_IDX 0
+#define regSQ_REG_CREDITS 0x0304
+#define regSQ_REG_CREDITS_BASE_IDX 0
+#define regSQ_FIFO_SIZES 0x0305
+#define regSQ_FIFO_SIZES_BASE_IDX 0
+#define regSQ_DSM_CNTL 0x0306
+#define regSQ_DSM_CNTL_BASE_IDX 0
+#define regSQ_DSM_CNTL2 0x0307
+#define regSQ_DSM_CNTL2_BASE_IDX 0
+#define regSQ_RUNTIME_CONFIG 0x0308
+#define regSQ_RUNTIME_CONFIG_BASE_IDX 0
+#define regSQ_DEBUG_STS_GLOBAL 0x0309
+#define regSQ_DEBUG_STS_GLOBAL_BASE_IDX 0
+#define regSH_MEM_BASES 0x030a
+#define regSH_MEM_BASES_BASE_IDX 0
+#define regSQ_TIMEOUT_CONFIG 0x030b
+#define regSQ_TIMEOUT_CONFIG_BASE_IDX 0
+#define regSQ_TIMEOUT_STATUS 0x030c
+#define regSQ_TIMEOUT_STATUS_BASE_IDX 0
+#define regSH_MEM_CONFIG 0x030d
+#define regSH_MEM_CONFIG_BASE_IDX 0
+#define regSP_MFMA_PORTD_RD_CONFIG 0x030e
+#define regSP_MFMA_PORTD_RD_CONFIG_BASE_IDX 0
+#define regSH_CAC_CONFIG 0x030f
+#define regSH_CAC_CONFIG_BASE_IDX 0
+#define regSQ_DEBUG_STS_GLOBAL2 0x0310
+#define regSQ_DEBUG_STS_GLOBAL2_BASE_IDX 0
+#define regSQ_DEBUG_STS_GLOBAL3 0x0311
+#define regSQ_DEBUG_STS_GLOBAL3_BASE_IDX 0
+#define regCC_GC_SHADER_RATE_CONFIG 0x0312
+#define regCC_GC_SHADER_RATE_CONFIG_BASE_IDX 0
+#define regGC_USER_SHADER_RATE_CONFIG 0x0313
+#define regGC_USER_SHADER_RATE_CONFIG_BASE_IDX 0
+#define regSQ_INTERRUPT_AUTO_MASK 0x0314
+#define regSQ_INTERRUPT_AUTO_MASK_BASE_IDX 0
+#define regSQ_INTERRUPT_MSG_CTRL 0x0315
+#define regSQ_INTERRUPT_MSG_CTRL_BASE_IDX 0
+#define regSQ_DEBUG_PERFCOUNT_TRAP 0x0316
+#define regSQ_DEBUG_PERFCOUNT_TRAP_BASE_IDX 0
+#define regSQ_UTCL1_CNTL1 0x0317
+#define regSQ_UTCL1_CNTL1_BASE_IDX 0
+#define regSQ_UTCL1_CNTL2 0x0318
+#define regSQ_UTCL1_CNTL2_BASE_IDX 0
+#define regSQ_UTCL1_STATUS 0x0319
+#define regSQ_UTCL1_STATUS_BASE_IDX 0
+#define regSQ_FED_INTERRUPT_STATUS 0x031a
+#define regSQ_FED_INTERRUPT_STATUS_BASE_IDX 0
+#define regSQ_CGTS_CONFIG 0x031b
+#define regSQ_CGTS_CONFIG_BASE_IDX 0
+#define regSQ_SHADER_TBA_LO 0x031c
+#define regSQ_SHADER_TBA_LO_BASE_IDX 0
+#define regSQ_SHADER_TBA_HI 0x031d
+#define regSQ_SHADER_TBA_HI_BASE_IDX 0
+#define regSQ_SHADER_TMA_LO 0x031e
+#define regSQ_SHADER_TMA_LO_BASE_IDX 0
+#define regSQ_SHADER_TMA_HI 0x031f
+#define regSQ_SHADER_TMA_HI_BASE_IDX 0
+#define regSQC_DSM_CNTL 0x0320
+#define regSQC_DSM_CNTL_BASE_IDX 0
+#define regSQC_DSM_CNTLA 0x0321
+#define regSQC_DSM_CNTLA_BASE_IDX 0
+#define regSQC_DSM_CNTLB 0x0322
+#define regSQC_DSM_CNTLB_BASE_IDX 0
+#define regSQC_DSM_CNTL2 0x0325
+#define regSQC_DSM_CNTL2_BASE_IDX 0
+#define regSQC_DSM_CNTL2A 0x0326
+#define regSQC_DSM_CNTL2A_BASE_IDX 0
+#define regSQC_DSM_CNTL2B 0x0327
+#define regSQC_DSM_CNTL2B_BASE_IDX 0
+#define regSQC_DSM_CNTL2E 0x032a
+#define regSQC_DSM_CNTL2E_BASE_IDX 0
+#define regSQC_EDC_FUE_CNTL 0x032b
+#define regSQC_EDC_FUE_CNTL_BASE_IDX 0
+#define regSQC_EDC_CNT2 0x032c
+#define regSQC_EDC_CNT2_BASE_IDX 0
+#define regSQC_EDC_CNT3 0x032d
+#define regSQC_EDC_CNT3_BASE_IDX 0
+#define regSQC_EDC_PARITY_CNT3 0x032e
+#define regSQC_EDC_PARITY_CNT3_BASE_IDX 0
+#define regSQ_DEBUG 0x0332
+#define regSQ_DEBUG_BASE_IDX 0
+#define regSQ_PERF_SNAPSHOT_CTRL 0x0334
+#define regSQ_PERF_SNAPSHOT_CTRL_BASE_IDX 0
+#define regSQ_DEBUG_FOR_INTERNAL_CTRL 0x0335
+#define regSQ_DEBUG_FOR_INTERNAL_CTRL_BASE_IDX 0
+#define regSQ_REG_TIMESTAMP 0x0374
+#define regSQ_REG_TIMESTAMP_BASE_IDX 0
+#define regSQ_CMD_TIMESTAMP 0x0375
+#define regSQ_CMD_TIMESTAMP_BASE_IDX 0
+#define regSQ_HOSTTRAP_STATUS 0x0376
+#define regSQ_HOSTTRAP_STATUS_BASE_IDX 0
+#define regSQ_IND_INDEX 0x0378
+#define regSQ_IND_INDEX_BASE_IDX 0
+#define regSQ_IND_DATA 0x0379
+#define regSQ_IND_DATA_BASE_IDX 0
+#define regSQ_CONFIG1 0x037a
+#define regSQ_CONFIG1_BASE_IDX 0
+#define regSQ_CMD 0x037b
+#define regSQ_CMD_BASE_IDX 0
+#define regSQ_TIME_HI 0x037c
+#define regSQ_TIME_HI_BASE_IDX 0
+#define regSQ_TIME_LO 0x037d
+#define regSQ_TIME_LO_BASE_IDX 0
+#define regSQ_DS_0 0x037f
+#define regSQ_DS_0_BASE_IDX 0
+#define regSQ_DS_1 0x037f
+#define regSQ_DS_1_BASE_IDX 0
+#define regSQ_EXP_0 0x037f
+#define regSQ_EXP_0_BASE_IDX 0
+#define regSQ_EXP_1 0x037f
+#define regSQ_EXP_1_BASE_IDX 0
+#define regSQ_FLAT_0 0x037f
+#define regSQ_FLAT_0_BASE_IDX 0
+#define regSQ_FLAT_1 0x037f
+#define regSQ_FLAT_1_BASE_IDX 0
+#define regSQ_GLBL_0 0x037f
+#define regSQ_GLBL_0_BASE_IDX 0
+#define regSQ_GLBL_1 0x037f
+#define regSQ_GLBL_1_BASE_IDX 0
+#define regSQ_INST 0x037f
+#define regSQ_INST_BASE_IDX 0
+#define regSQ_MIMG_0 0x037f
+#define regSQ_MIMG_0_BASE_IDX 0
+#define regSQ_MIMG_1 0x037f
+#define regSQ_MIMG_1_BASE_IDX 0
+#define regSQ_MTBUF_0 0x037f
+#define regSQ_MTBUF_0_BASE_IDX 0
+#define regSQ_MTBUF_1 0x037f
+#define regSQ_MTBUF_1_BASE_IDX 0
+#define regSQ_MUBUF_0 0x037f
+#define regSQ_MUBUF_0_BASE_IDX 0
+#define regSQ_MUBUF_1 0x037f
+#define regSQ_MUBUF_1_BASE_IDX 0
+#define regSQ_SCRATCH_0 0x037f
+#define regSQ_SCRATCH_0_BASE_IDX 0
+#define regSQ_SCRATCH_1 0x037f
+#define regSQ_SCRATCH_1_BASE_IDX 0
+#define regSQ_SMEM_0 0x037f
+#define regSQ_SMEM_0_BASE_IDX 0
+#define regSQ_SMEM_1 0x037f
+#define regSQ_SMEM_1_BASE_IDX 0
+#define regSQ_SOP1 0x037f
+#define regSQ_SOP1_BASE_IDX 0
+#define regSQ_SOP2 0x037f
+#define regSQ_SOP2_BASE_IDX 0
+#define regSQ_SOPC 0x037f
+#define regSQ_SOPC_BASE_IDX 0
+#define regSQ_SOPK 0x037f
+#define regSQ_SOPK_BASE_IDX 0
+#define regSQ_SOPP 0x037f
+#define regSQ_SOPP_BASE_IDX 0
+#define regSQ_VINTRP 0x037f
+#define regSQ_VINTRP_BASE_IDX 0
+#define regSQ_VOP1 0x037f
+#define regSQ_VOP1_BASE_IDX 0
+#define regSQ_VOP2 0x037f
+#define regSQ_VOP2_BASE_IDX 0
+#define regSQ_VOP3P_0 0x037f
+#define regSQ_VOP3P_0_BASE_IDX 0
+#define regSQ_VOP3P_1 0x037f
+#define regSQ_VOP3P_1_BASE_IDX 0
+#define regSQ_VOP3P_MFMA_0 0x037f
+#define regSQ_VOP3P_MFMA_0_BASE_IDX 0
+#define regSQ_VOP3P_MFMA_1 0x037f
+#define regSQ_VOP3P_MFMA_1_BASE_IDX 0
+#define regSQ_VOP3_0 0x037f
+#define regSQ_VOP3_0_BASE_IDX 0
+#define regSQ_VOP3_0_SDST_ENC 0x037f
+#define regSQ_VOP3_0_SDST_ENC_BASE_IDX 0
+#define regSQ_VOP3_1 0x037f
+#define regSQ_VOP3_1_BASE_IDX 0
+#define regSQ_VOPC 0x037f
+#define regSQ_VOPC_BASE_IDX 0
+#define regSQ_VOP_DPP 0x037f
+#define regSQ_VOP_DPP_BASE_IDX 0
+#define regSQ_VOP_SDWA 0x037f
+#define regSQ_VOP_SDWA_BASE_IDX 0
+#define regSQ_VOP_SDWA_SDST_ENC 0x037f
+#define regSQ_VOP_SDWA_SDST_ENC_BASE_IDX 0
+#define regSQ_LB_CTR_CTRL 0x0398
+#define regSQ_LB_CTR_CTRL_BASE_IDX 0
+#define regSQ_LB_DATA0 0x0399
+#define regSQ_LB_DATA0_BASE_IDX 0
+#define regSQ_LB_DATA1 0x039a
+#define regSQ_LB_DATA1_BASE_IDX 0
+#define regSQ_LB_DATA2 0x039b
+#define regSQ_LB_DATA2_BASE_IDX 0
+#define regSQ_LB_DATA3 0x039c
+#define regSQ_LB_DATA3_BASE_IDX 0
+#define regSQ_LB_CTR_SEL 0x039d
+#define regSQ_LB_CTR_SEL_BASE_IDX 0
+#define regSQ_LB_CTR0_CU 0x039e
+#define regSQ_LB_CTR0_CU_BASE_IDX 0
+#define regSQ_LB_CTR1_CU 0x039f
+#define regSQ_LB_CTR1_CU_BASE_IDX 0
+#define regSQ_LB_CTR2_CU 0x03a0
+#define regSQ_LB_CTR2_CU_BASE_IDX 0
+#define regSQ_LB_CTR3_CU 0x03a1
+#define regSQ_LB_CTR3_CU_BASE_IDX 0
+#define regSQC_EDC_CNT 0x03a2
+#define regSQC_EDC_CNT_BASE_IDX 0
+#define regSQ_EDC_SEC_CNT 0x03a3
+#define regSQ_EDC_SEC_CNT_BASE_IDX 0
+#define regSQ_EDC_DED_CNT 0x03a4
+#define regSQ_EDC_DED_CNT_BASE_IDX 0
+#define regSQ_EDC_INFO 0x03a5
+#define regSQ_EDC_INFO_BASE_IDX 0
+#define regSQ_EDC_CNT 0x03a6
+#define regSQ_EDC_CNT_BASE_IDX 0
+#define regSQ_EDC_FUE_CNTL 0x03a7
+#define regSQ_EDC_FUE_CNTL_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_CMN 0x03b0
+#define regSQ_THREAD_TRACE_WORD_CMN_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_EVENT 0x03b0
+#define regSQ_THREAD_TRACE_WORD_EVENT_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_INST 0x03b0
+#define regSQ_THREAD_TRACE_WORD_INST_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_ISSUE 0x03b0
+#define regSQ_THREAD_TRACE_WORD_ISSUE_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_MISC 0x03b0
+#define regSQ_THREAD_TRACE_WORD_MISC_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_PERF_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_PERF_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_REG_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_REG_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_REG_2_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_REG_2_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2 0x03b0
+#define regSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_WAVE 0x03b0
+#define regSQ_THREAD_TRACE_WORD_WAVE_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_WAVE_START 0x03b0
+#define regSQ_THREAD_TRACE_WORD_WAVE_START_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2 0x03b1
+#define regSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2 0x03b1
+#define regSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_PERF_2_OF_2 0x03b1
+#define regSQ_THREAD_TRACE_WORD_PERF_2_OF_2_BASE_IDX 0
+#define regSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2 0x03b1
+#define regSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2_BASE_IDX 0
+#define regSQ_WREXEC_EXEC_HI 0x03b1
+#define regSQ_WREXEC_EXEC_HI_BASE_IDX 0
+#define regSQ_WREXEC_EXEC_LO 0x03b1
+#define regSQ_WREXEC_EXEC_LO_BASE_IDX 0
+#define regSQ_BUF_RSRC_WORD0 0x03c0
+#define regSQ_BUF_RSRC_WORD0_BASE_IDX 0
+#define regSQ_BUF_RSRC_WORD1 0x03c1
+#define regSQ_BUF_RSRC_WORD1_BASE_IDX 0
+#define regSQ_BUF_RSRC_WORD2 0x03c2
+#define regSQ_BUF_RSRC_WORD2_BASE_IDX 0
+#define regSQ_BUF_RSRC_WORD3 0x03c3
+#define regSQ_BUF_RSRC_WORD3_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD0 0x03c4
+#define regSQ_IMG_RSRC_WORD0_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD1 0x03c5
+#define regSQ_IMG_RSRC_WORD1_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD2 0x03c6
+#define regSQ_IMG_RSRC_WORD2_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD3 0x03c7
+#define regSQ_IMG_RSRC_WORD3_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD4 0x03c8
+#define regSQ_IMG_RSRC_WORD4_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD5 0x03c9
+#define regSQ_IMG_RSRC_WORD5_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD6 0x03ca
+#define regSQ_IMG_RSRC_WORD6_BASE_IDX 0
+#define regSQ_IMG_RSRC_WORD7 0x03cb
+#define regSQ_IMG_RSRC_WORD7_BASE_IDX 0
+#define regSQ_IMG_SAMP_WORD0 0x03cc
+#define regSQ_IMG_SAMP_WORD0_BASE_IDX 0
+#define regSQ_IMG_SAMP_WORD1 0x03cd
+#define regSQ_IMG_SAMP_WORD1_BASE_IDX 0
+#define regSQ_IMG_SAMP_WORD2 0x03ce
+#define regSQ_IMG_SAMP_WORD2_BASE_IDX 0
+#define regSQ_IMG_SAMP_WORD3 0x03cf
+#define regSQ_IMG_SAMP_WORD3_BASE_IDX 0
+#define regSQ_FLAT_SCRATCH_WORD0 0x03d0
+#define regSQ_FLAT_SCRATCH_WORD0_BASE_IDX 0
+#define regSQ_FLAT_SCRATCH_WORD1 0x03d1
+#define regSQ_FLAT_SCRATCH_WORD1_BASE_IDX 0
+#define regSQ_M0_GPR_IDX_WORD 0x03d2
+#define regSQ_M0_GPR_IDX_WORD_BASE_IDX 0
+#define regSQC_ICACHE_UTCL1_CNTL1 0x03d3
+#define regSQC_ICACHE_UTCL1_CNTL1_BASE_IDX 0
+#define regSQC_ICACHE_UTCL1_CNTL2 0x03d4
+#define regSQC_ICACHE_UTCL1_CNTL2_BASE_IDX 0
+#define regSQC_DCACHE_UTCL1_CNTL1 0x03d5
+#define regSQC_DCACHE_UTCL1_CNTL1_BASE_IDX 0
+#define regSQC_DCACHE_UTCL1_CNTL2 0x03d6
+#define regSQC_DCACHE_UTCL1_CNTL2_BASE_IDX 0
+#define regSQC_ICACHE_UTCL1_STATUS 0x03d7
+#define regSQC_ICACHE_UTCL1_STATUS_BASE_IDX 0
+#define regSQC_DCACHE_UTCL1_STATUS 0x03d8
+#define regSQC_DCACHE_UTCL1_STATUS_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_shsdec
+// base address: 0x9000
+#define regSX_DEBUG_BUSY 0x0414
+#define regSX_DEBUG_BUSY_BASE_IDX 0
+#define regSX_DEBUG_1 0x0419
+#define regSX_DEBUG_1_BASE_IDX 0
+#define regSPI_PS_MAX_WAVE_ID 0x043a
+#define regSPI_PS_MAX_WAVE_ID_BASE_IDX 0
+#define regSPI_START_PHASE 0x043b
+#define regSPI_START_PHASE_BASE_IDX 0
+#define regSPI_GFX_CNTL 0x043c
+#define regSPI_GFX_CNTL_BASE_IDX 0
+#define regSPI_DEBUG_READ 0x0442
+#define regSPI_DEBUG_READ_BASE_IDX 0
+#define regSPI_DSM_CNTL 0x0443
+#define regSPI_DSM_CNTL_BASE_IDX 0
+#define regSPI_DSM_CNTL2 0x0444
+#define regSPI_DSM_CNTL2_BASE_IDX 0
+#define regSPI_EDC_CNT 0x0445
+#define regSPI_EDC_CNT_BASE_IDX 0
+#define regSPI_DEBUG_BUSY 0x0450
+#define regSPI_DEBUG_BUSY_BASE_IDX 0
+#define regSPI_CONFIG_PS_CU_EN 0x0452
+#define regSPI_CONFIG_PS_CU_EN_BASE_IDX 0
+#define regSPI_WF_LIFETIME_CNTL 0x04aa
+#define regSPI_WF_LIFETIME_CNTL_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_0 0x04ab
+#define regSPI_WF_LIFETIME_LIMIT_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_1 0x04ac
+#define regSPI_WF_LIFETIME_LIMIT_1_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_2 0x04ad
+#define regSPI_WF_LIFETIME_LIMIT_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_3 0x04ae
+#define regSPI_WF_LIFETIME_LIMIT_3_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_4 0x04af
+#define regSPI_WF_LIFETIME_LIMIT_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_5 0x04b0
+#define regSPI_WF_LIFETIME_LIMIT_5_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_6 0x04b1
+#define regSPI_WF_LIFETIME_LIMIT_6_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_7 0x04b2
+#define regSPI_WF_LIFETIME_LIMIT_7_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_8 0x04b3
+#define regSPI_WF_LIFETIME_LIMIT_8_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_9 0x04b4
+#define regSPI_WF_LIFETIME_LIMIT_9_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_0 0x04b5
+#define regSPI_WF_LIFETIME_STATUS_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_1 0x04b6
+#define regSPI_WF_LIFETIME_STATUS_1_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_2 0x04b7
+#define regSPI_WF_LIFETIME_STATUS_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_3 0x04b8
+#define regSPI_WF_LIFETIME_STATUS_3_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_4 0x04b9
+#define regSPI_WF_LIFETIME_STATUS_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_5 0x04ba
+#define regSPI_WF_LIFETIME_STATUS_5_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_6 0x04bb
+#define regSPI_WF_LIFETIME_STATUS_6_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_7 0x04bc
+#define regSPI_WF_LIFETIME_STATUS_7_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_8 0x04bd
+#define regSPI_WF_LIFETIME_STATUS_8_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_9 0x04be
+#define regSPI_WF_LIFETIME_STATUS_9_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_10 0x04bf
+#define regSPI_WF_LIFETIME_STATUS_10_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_11 0x04c0
+#define regSPI_WF_LIFETIME_STATUS_11_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_12 0x04c1
+#define regSPI_WF_LIFETIME_STATUS_12_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_13 0x04c2
+#define regSPI_WF_LIFETIME_STATUS_13_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_14 0x04c3
+#define regSPI_WF_LIFETIME_STATUS_14_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_15 0x04c4
+#define regSPI_WF_LIFETIME_STATUS_15_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_16 0x04c5
+#define regSPI_WF_LIFETIME_STATUS_16_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_17 0x04c6
+#define regSPI_WF_LIFETIME_STATUS_17_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_18 0x04c7
+#define regSPI_WF_LIFETIME_STATUS_18_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_19 0x04c8
+#define regSPI_WF_LIFETIME_STATUS_19_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_20 0x04c9
+#define regSPI_WF_LIFETIME_STATUS_20_BASE_IDX 0
+#define regSPI_WF_LIFETIME_DEBUG 0x04ca
+#define regSPI_WF_LIFETIME_DEBUG_BASE_IDX 0
+#define regSPI_LB_CTR_CTRL 0x04d4
+#define regSPI_LB_CTR_CTRL_BASE_IDX 0
+#define regSPI_LB_CU_MASK 0x04d5
+#define regSPI_LB_CU_MASK_BASE_IDX 0
+#define regSPI_LB_DATA_REG 0x04d6
+#define regSPI_LB_DATA_REG_BASE_IDX 0
+#define regSPI_PG_ENABLE_STATIC_CU_MASK 0x04d7
+#define regSPI_PG_ENABLE_STATIC_CU_MASK_BASE_IDX 0
+#define regSPI_GDS_CREDITS 0x04d8
+#define regSPI_GDS_CREDITS_BASE_IDX 0
+#define regSPI_SX_EXPORT_BUFFER_SIZES 0x04d9
+#define regSPI_SX_EXPORT_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES 0x04da
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_STATUS 0x04db
+#define regSPI_CSQ_WF_ACTIVE_STATUS_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0 0x04dc
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1 0x04dd
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2 0x04de
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3 0x04df
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_4 0x04e0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_4_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_5 0x04e1
+#define regSPI_CSQ_WF_ACTIVE_COUNT_5_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_6 0x04e2
+#define regSPI_CSQ_WF_ACTIVE_COUNT_6_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_7 0x04e3
+#define regSPI_CSQ_WF_ACTIVE_COUNT_7_BASE_IDX 0
+#define regSPI_LB_DATA_WAVES 0x04e4
+#define regSPI_LB_DATA_WAVES_BASE_IDX 0
+#define regSPI_LB_DATA_PERCU_WAVE_HSGS 0x04e5
+#define regSPI_LB_DATA_PERCU_WAVE_HSGS_BASE_IDX 0
+#define regSPI_LB_DATA_PERCU_WAVE_VSPS 0x04e6
+#define regSPI_LB_DATA_PERCU_WAVE_VSPS_BASE_IDX 0
+#define regSPI_LB_DATA_PERCU_WAVE_CS 0x04e7
+#define regSPI_LB_DATA_PERCU_WAVE_CS_BASE_IDX 0
+#define regSPIS_DEBUG_READ 0x04ea
+#define regSPIS_DEBUG_READ_BASE_IDX 0
+#define regBCI_DEBUG_READ 0x04eb
+#define regBCI_DEBUG_READ_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO 0x04ec
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI 0x04ed
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO 0x04ee
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI 0x04ef
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN 0x04f0
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO 0x04f1
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI 0x04f2
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO 0x04f3
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI 0x04f4
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN 0x04f5
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_tpdec
+// base address: 0x9400
+#define regTD_CNTL 0x0525
+#define regTD_CNTL_BASE_IDX 0
+#define regTD_STATUS 0x0526
+#define regTD_STATUS_BASE_IDX 0
+#define regTD_POWER_CNTL 0x052a
+#define regTD_POWER_CNTL_BASE_IDX 0
+#define regTD_DSM_CNTL 0x052f
+#define regTD_DSM_CNTL_BASE_IDX 0
+#define regTD_DSM_CNTL2 0x0530
+#define regTD_DSM_CNTL2_BASE_IDX 0
+#define regTD_SCRATCH 0x0533
+#define regTD_SCRATCH_BASE_IDX 0
+#define regTA_POWER_CNTL 0x0540
+#define regTA_POWER_CNTL_BASE_IDX 0
+#define regTA_CNTL 0x0541
+#define regTA_CNTL_BASE_IDX 0
+#define regTA_CNTL_AUX 0x0542
+#define regTA_CNTL_AUX_BASE_IDX 0
+#define regTA_FEATURE_CNTL 0x0543
+#define regTA_FEATURE_CNTL_BASE_IDX 0
+#define regTA_STATUS 0x0548
+#define regTA_STATUS_BASE_IDX 0
+#define regTA_SCRATCH 0x0564
+#define regTA_SCRATCH_BASE_IDX 0
+#define regTA_DSM_CNTL 0x0584
+#define regTA_DSM_CNTL_BASE_IDX 0
+#define regTA_DSM_CNTL2 0x0585
+#define regTA_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_gdsdec
+// base address: 0x9700
+#define regGDS_CONFIG 0x05c0
+#define regGDS_CONFIG_BASE_IDX 0
+#define regGDS_CNTL_STATUS 0x05c1
+#define regGDS_CNTL_STATUS_BASE_IDX 0
+#define regGDS_ENHANCE2 0x05c2
+#define regGDS_ENHANCE2_BASE_IDX 0
+#define regGDS_PROTECTION_FAULT 0x05c3
+#define regGDS_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_VM_PROTECTION_FAULT 0x05c4
+#define regGDS_VM_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_EDC_CNT 0x05c5
+#define regGDS_EDC_CNT_BASE_IDX 0
+#define regGDS_EDC_GRBM_CNT 0x05c6
+#define regGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_DED 0x05c7
+#define regGDS_EDC_OA_DED_BASE_IDX 0
+#define regGDS_DSM_CNTL 0x05ca
+#define regGDS_DSM_CNTL_BASE_IDX 0
+#define regGDS_EDC_OA_PHY_CNT 0x05cb
+#define regGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_PIPE_CNT 0x05cc
+#define regGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+#define regGDS_DSM_CNTL2 0x05cd
+#define regGDS_DSM_CNTL2_BASE_IDX 0
+#define regGDS_WD_GDS_CSB 0x05ce
+#define regGDS_WD_GDS_CSB_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_rbdec
+// base address: 0x9800
+#define regDB_DEBUG 0x060c
+#define regDB_DEBUG_BASE_IDX 0
+#define regDB_DEBUG2 0x060d
+#define regDB_DEBUG2_BASE_IDX 0
+#define regDB_DEBUG3 0x060e
+#define regDB_DEBUG3_BASE_IDX 0
+#define regDB_DEBUG4 0x060f
+#define regDB_DEBUG4_BASE_IDX 0
+#define regDB_CREDIT_LIMIT 0x0614
+#define regDB_CREDIT_LIMIT_BASE_IDX 0
+#define regDB_WATERMARKS 0x0615
+#define regDB_WATERMARKS_BASE_IDX 0
+#define regDB_SUBTILE_CONTROL 0x0616
+#define regDB_SUBTILE_CONTROL_BASE_IDX 0
+#define regDB_FREE_CACHELINES 0x0617
+#define regDB_FREE_CACHELINES_BASE_IDX 0
+#define regDB_FIFO_DEPTH1 0x0618
+#define regDB_FIFO_DEPTH1_BASE_IDX 0
+#define regDB_FIFO_DEPTH2 0x0619
+#define regDB_FIFO_DEPTH2_BASE_IDX 0
+#define regDB_EXCEPTION_CONTROL 0x061a
+#define regDB_EXCEPTION_CONTROL_BASE_IDX 0
+#define regDB_RING_CONTROL 0x061b
+#define regDB_RING_CONTROL_BASE_IDX 0
+#define regDB_MEM_ARB_WATERMARKS 0x061c
+#define regDB_MEM_ARB_WATERMARKS_BASE_IDX 0
+#define regDB_RMI_CACHE_POLICY 0x061e
+#define regDB_RMI_CACHE_POLICY_BASE_IDX 0
+#define regDB_DFSM_CONFIG 0x0630
+#define regDB_DFSM_CONFIG_BASE_IDX 0
+#define regDB_DFSM_WATERMARK 0x0631
+#define regDB_DFSM_WATERMARK_BASE_IDX 0
+#define regDB_DFSM_TILES_IN_FLIGHT 0x0632
+#define regDB_DFSM_TILES_IN_FLIGHT_BASE_IDX 0
+#define regDB_DFSM_PRIMS_IN_FLIGHT 0x0633
+#define regDB_DFSM_PRIMS_IN_FLIGHT_BASE_IDX 0
+#define regDB_DFSM_WATCHDOG 0x0634
+#define regDB_DFSM_WATCHDOG_BASE_IDX 0
+#define regDB_DFSM_FLUSH_ENABLE 0x0635
+#define regDB_DFSM_FLUSH_ENABLE_BASE_IDX 0
+#define regDB_DFSM_FLUSH_AUX_EVENT 0x0636
+#define regDB_DFSM_FLUSH_AUX_EVENT_BASE_IDX 0
+#define regCC_RB_REDUNDANCY 0x063c
+#define regCC_RB_REDUNDANCY_BASE_IDX 0
+#define regCC_RB_BACKEND_DISABLE 0x063d
+#define regCC_RB_BACKEND_DISABLE_BASE_IDX 0
+#define regGB_ADDR_CONFIG 0x063e
+#define regGB_ADDR_CONFIG_BASE_IDX 0
+#define regGB_BACKEND_MAP 0x063f
+#define regGB_BACKEND_MAP_BASE_IDX 0
+#define regGB_GPU_ID 0x0640
+#define regGB_GPU_ID_BASE_IDX 0
+#define regCC_RB_DAISY_CHAIN 0x0641
+#define regCC_RB_DAISY_CHAIN_BASE_IDX 0
+#define regGB_ADDR_CONFIG_READ 0x0642
+#define regGB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regGB_TILE_MODE0 0x0644
+#define regGB_TILE_MODE0_BASE_IDX 0
+#define regGB_TILE_MODE1 0x0645
+#define regGB_TILE_MODE1_BASE_IDX 0
+#define regGB_TILE_MODE2 0x0646
+#define regGB_TILE_MODE2_BASE_IDX 0
+#define regGB_TILE_MODE3 0x0647
+#define regGB_TILE_MODE3_BASE_IDX 0
+#define regGB_TILE_MODE4 0x0648
+#define regGB_TILE_MODE4_BASE_IDX 0
+#define regGB_TILE_MODE5 0x0649
+#define regGB_TILE_MODE5_BASE_IDX 0
+#define regGB_TILE_MODE6 0x064a
+#define regGB_TILE_MODE6_BASE_IDX 0
+#define regGB_TILE_MODE7 0x064b
+#define regGB_TILE_MODE7_BASE_IDX 0
+#define regGB_TILE_MODE8 0x064c
+#define regGB_TILE_MODE8_BASE_IDX 0
+#define regGB_TILE_MODE9 0x064d
+#define regGB_TILE_MODE9_BASE_IDX 0
+#define regGB_TILE_MODE10 0x064e
+#define regGB_TILE_MODE10_BASE_IDX 0
+#define regGB_TILE_MODE11 0x064f
+#define regGB_TILE_MODE11_BASE_IDX 0
+#define regGB_TILE_MODE12 0x0650
+#define regGB_TILE_MODE12_BASE_IDX 0
+#define regGB_TILE_MODE13 0x0651
+#define regGB_TILE_MODE13_BASE_IDX 0
+#define regGB_TILE_MODE14 0x0652
+#define regGB_TILE_MODE14_BASE_IDX 0
+#define regGB_TILE_MODE15 0x0653
+#define regGB_TILE_MODE15_BASE_IDX 0
+#define regGB_TILE_MODE16 0x0654
+#define regGB_TILE_MODE16_BASE_IDX 0
+#define regGB_TILE_MODE17 0x0655
+#define regGB_TILE_MODE17_BASE_IDX 0
+#define regGB_TILE_MODE18 0x0656
+#define regGB_TILE_MODE18_BASE_IDX 0
+#define regGB_TILE_MODE19 0x0657
+#define regGB_TILE_MODE19_BASE_IDX 0
+#define regGB_TILE_MODE20 0x0658
+#define regGB_TILE_MODE20_BASE_IDX 0
+#define regGB_TILE_MODE21 0x0659
+#define regGB_TILE_MODE21_BASE_IDX 0
+#define regGB_TILE_MODE22 0x065a
+#define regGB_TILE_MODE22_BASE_IDX 0
+#define regGB_TILE_MODE23 0x065b
+#define regGB_TILE_MODE23_BASE_IDX 0
+#define regGB_TILE_MODE24 0x065c
+#define regGB_TILE_MODE24_BASE_IDX 0
+#define regGB_TILE_MODE25 0x065d
+#define regGB_TILE_MODE25_BASE_IDX 0
+#define regGB_TILE_MODE26 0x065e
+#define regGB_TILE_MODE26_BASE_IDX 0
+#define regGB_TILE_MODE27 0x065f
+#define regGB_TILE_MODE27_BASE_IDX 0
+#define regGB_TILE_MODE28 0x0660
+#define regGB_TILE_MODE28_BASE_IDX 0
+#define regGB_TILE_MODE29 0x0661
+#define regGB_TILE_MODE29_BASE_IDX 0
+#define regGB_TILE_MODE30 0x0662
+#define regGB_TILE_MODE30_BASE_IDX 0
+#define regGB_TILE_MODE31 0x0663
+#define regGB_TILE_MODE31_BASE_IDX 0
+#define regGB_MACROTILE_MODE0 0x0664
+#define regGB_MACROTILE_MODE0_BASE_IDX 0
+#define regGB_MACROTILE_MODE1 0x0665
+#define regGB_MACROTILE_MODE1_BASE_IDX 0
+#define regGB_MACROTILE_MODE2 0x0666
+#define regGB_MACROTILE_MODE2_BASE_IDX 0
+#define regGB_MACROTILE_MODE3 0x0667
+#define regGB_MACROTILE_MODE3_BASE_IDX 0
+#define regGB_MACROTILE_MODE4 0x0668
+#define regGB_MACROTILE_MODE4_BASE_IDX 0
+#define regGB_MACROTILE_MODE5 0x0669
+#define regGB_MACROTILE_MODE5_BASE_IDX 0
+#define regGB_MACROTILE_MODE6 0x066a
+#define regGB_MACROTILE_MODE6_BASE_IDX 0
+#define regGB_MACROTILE_MODE7 0x066b
+#define regGB_MACROTILE_MODE7_BASE_IDX 0
+#define regGB_MACROTILE_MODE8 0x066c
+#define regGB_MACROTILE_MODE8_BASE_IDX 0
+#define regGB_MACROTILE_MODE9 0x066d
+#define regGB_MACROTILE_MODE9_BASE_IDX 0
+#define regGB_MACROTILE_MODE10 0x066e
+#define regGB_MACROTILE_MODE10_BASE_IDX 0
+#define regGB_MACROTILE_MODE11 0x066f
+#define regGB_MACROTILE_MODE11_BASE_IDX 0
+#define regGB_MACROTILE_MODE12 0x0670
+#define regGB_MACROTILE_MODE12_BASE_IDX 0
+#define regGB_MACROTILE_MODE13 0x0671
+#define regGB_MACROTILE_MODE13_BASE_IDX 0
+#define regGB_MACROTILE_MODE14 0x0672
+#define regGB_MACROTILE_MODE14_BASE_IDX 0
+#define regGB_MACROTILE_MODE15 0x0673
+#define regGB_MACROTILE_MODE15_BASE_IDX 0
+#define regCB_HW_CONTROL 0x0680
+#define regCB_HW_CONTROL_BASE_IDX 0
+#define regCB_HW_CONTROL_1 0x0681
+#define regCB_HW_CONTROL_1_BASE_IDX 0
+#define regCB_HW_CONTROL_2 0x0682
+#define regCB_HW_CONTROL_2_BASE_IDX 0
+#define regCB_HW_CONTROL_3 0x0683
+#define regCB_HW_CONTROL_3_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_RD 0x0686
+#define regCB_HW_MEM_ARBITER_RD_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_WR 0x0687
+#define regCB_HW_MEM_ARBITER_WR_BASE_IDX 0
+#define regCB_DCC_CONFIG 0x0688
+#define regCB_DCC_CONFIG_BASE_IDX 0
+#define regGC_USER_RB_REDUNDANCY 0x06de
+#define regGC_USER_RB_REDUNDANCY_BASE_IDX 0
+#define regGC_USER_RB_BACKEND_DISABLE 0x06df
+#define regGC_USER_RB_BACKEND_DISABLE_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_ea_gceadec
+// base address: 0xa800
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0 0x0a00
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1 0x0a01
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0 0x0a02
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1 0x0a03
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_RD_GRP2VC_MAP 0x0a04
+#define regGCEA_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_WR_GRP2VC_MAP 0x0a05
+#define regGCEA_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_RD_LAZY 0x0a06
+#define regGCEA_DRAM_RD_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_WR_LAZY 0x0a07
+#define regGCEA_DRAM_WR_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_RD_CAM_CNTL 0x0a08
+#define regGCEA_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_WR_CAM_CNTL 0x0a09
+#define regGCEA_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_PAGE_BURST 0x0a0a
+#define regGCEA_DRAM_PAGE_BURST_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_AGE 0x0a0b
+#define regGCEA_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_AGE 0x0a0c
+#define regGCEA_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUEUING 0x0a0d
+#define regGCEA_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUEUING 0x0a0e
+#define regGCEA_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_FIXED 0x0a0f
+#define regGCEA_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_FIXED 0x0a10
+#define regGCEA_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_URGENCY 0x0a11
+#define regGCEA_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_URGENCY 0x0a12
+#define regGCEA_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1 0x0a13
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2 0x0a14
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3 0x0a15
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1 0x0a16
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2 0x0a17
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3 0x0a18
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP0 0x0ad5
+#define regGCEA_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP1 0x0ad6
+#define regGCEA_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP0 0x0ad7
+#define regGCEA_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP1 0x0ad8
+#define regGCEA_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_RD_COMBINE_FLUSH 0x0ad9
+#define regGCEA_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_WR_COMBINE_FLUSH 0x0ada
+#define regGCEA_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_GROUP_BURST 0x0adb
+#define regGCEA_IO_GROUP_BURST_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_AGE 0x0adc
+#define regGCEA_IO_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_AGE 0x0add
+#define regGCEA_IO_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUEUING 0x0ade
+#define regGCEA_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUEUING 0x0adf
+#define regGCEA_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_FIXED 0x0ae0
+#define regGCEA_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_FIXED 0x0ae1
+#define regGCEA_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY 0x0ae2
+#define regGCEA_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY 0x0ae3
+#define regGCEA_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING 0x0ae4
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING 0x0ae5
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI1 0x0ae6
+#define regGCEA_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI2 0x0ae7
+#define regGCEA_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI3 0x0ae8
+#define regGCEA_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI1 0x0ae9
+#define regGCEA_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI2 0x0aea
+#define regGCEA_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI3 0x0aeb
+#define regGCEA_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_SDP_ARB_DRAM 0x0aec
+#define regGCEA_SDP_ARB_DRAM_BASE_IDX 0
+#define regGCEA_SDP_ARB_FINAL 0x0aee
+#define regGCEA_SDP_ARB_FINAL_BASE_IDX 0
+#define regGCEA_SDP_DRAM_PRIORITY 0x0aef
+#define regGCEA_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_IO_PRIORITY 0x0af1
+#define regGCEA_SDP_IO_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_CREDITS 0x0af2
+#define regGCEA_SDP_CREDITS_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE0 0x0af3
+#define regGCEA_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE1 0x0af4
+#define regGCEA_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE0 0x0af5
+#define regGCEA_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE1 0x0af6
+#define regGCEA_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCD_RESERVE0 0x0af7
+#define regGCEA_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_VCD_RESERVE1 0x0af8
+#define regGCEA_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_REQ_CNTL 0x0af9
+#define regGCEA_SDP_REQ_CNTL_BASE_IDX 0
+#define regGCEA_MISC 0x0afa
+#define regGCEA_MISC_BASE_IDX 0
+#define regGCEA_LATENCY_SAMPLING 0x0afb
+#define regGCEA_LATENCY_SAMPLING_BASE_IDX 0
+#define regGCEA_PERFCOUNTER_LO 0x0afc
+#define regGCEA_PERFCOUNTER_LO_BASE_IDX 0
+#define regGCEA_PERFCOUNTER_HI 0x0afd
+#define regGCEA_PERFCOUNTER_HI_BASE_IDX 0
+#define regGCEA_PERFCOUNTER0_CFG 0x0afe
+#define regGCEA_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regGCEA_PERFCOUNTER1_CFG 0x0aff
+#define regGCEA_PERFCOUNTER1_CFG_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_ea_gceadec2
+// base address: 0x9c00
+#define regGCEA_PERFCOUNTER_RSLT_CNTL 0x0700
+#define regGCEA_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regGCEA_MAM_CTRL 0x0701
+#define regGCEA_MAM_CTRL_BASE_IDX 0
+#define regGCEA_MAM_CTRL2 0x0702
+#define regGCEA_MAM_CTRL2_BASE_IDX 0
+#define regGCEA_DSM_CNTL 0x0708
+#define regGCEA_DSM_CNTL_BASE_IDX 0
+#define regGCEA_DSM_CNTLA 0x0709
+#define regGCEA_DSM_CNTLA_BASE_IDX 0
+#define regGCEA_DSM_CNTLB 0x070a
+#define regGCEA_DSM_CNTLB_BASE_IDX 0
+#define regGCEA_DSM_CNTL2 0x070b
+#define regGCEA_DSM_CNTL2_BASE_IDX 0
+#define regGCEA_DSM_CNTL2A 0x070c
+#define regGCEA_DSM_CNTL2A_BASE_IDX 0
+#define regGCEA_DSM_CNTL2B 0x070d
+#define regGCEA_DSM_CNTL2B_BASE_IDX 0
+#define regGCEA_TCC_XBR_CREDITS 0x070e
+#define regGCEA_TCC_XBR_CREDITS_BASE_IDX 0
+#define regGCEA_TCC_XBR_MAXBURST 0x070f
+#define regGCEA_TCC_XBR_MAXBURST_BASE_IDX 0
+#define regGCEA_PROBE_CNTL 0x0710
+#define regGCEA_PROBE_CNTL_BASE_IDX 0
+#define regGCEA_PROBE_MAP 0x0711
+#define regGCEA_PROBE_MAP_BASE_IDX 0
+#define regGCEA_ERR_STATUS 0x0712
+#define regGCEA_ERR_STATUS_BASE_IDX 0
+#define regGCEA_MISC2 0x0713
+#define regGCEA_MISC2_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0 0x0715
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1 0x0716
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0 0x0717
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1 0x0718
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x0719
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define regGCEA_SDP_ENABLE 0x071f
+#define regGCEA_SDP_ENABLE_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_ea_pwrdec
+// base address: 0x3c000
+#define regGCEA_ICG_CTRL 0x50c4
+#define regGCEA_ICG_CTRL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_rmi_rmidec
+// base address: 0x9e00
+#define regRMI_GENERAL_CNTL 0x0780
+#define regRMI_GENERAL_CNTL_BASE_IDX 0
+#define regRMI_GENERAL_CNTL1 0x0781
+#define regRMI_GENERAL_CNTL1_BASE_IDX 0
+#define regRMI_GENERAL_STATUS 0x0782
+#define regRMI_GENERAL_STATUS_BASE_IDX 0
+#define regRMI_SUBBLOCK_STATUS0 0x0783
+#define regRMI_SUBBLOCK_STATUS0_BASE_IDX 0
+#define regRMI_SUBBLOCK_STATUS1 0x0784
+#define regRMI_SUBBLOCK_STATUS1_BASE_IDX 0
+#define regRMI_SUBBLOCK_STATUS2 0x0785
+#define regRMI_SUBBLOCK_STATUS2_BASE_IDX 0
+#define regRMI_SUBBLOCK_STATUS3 0x0786
+#define regRMI_SUBBLOCK_STATUS3_BASE_IDX 0
+#define regRMI_XBAR_CONFIG 0x0787
+#define regRMI_XBAR_CONFIG_BASE_IDX 0
+#define regRMI_PROBE_POP_LOGIC_CNTL 0x0788
+#define regRMI_PROBE_POP_LOGIC_CNTL_BASE_IDX 0
+#define regRMI_UTC_XNACK_N_MISC_CNTL 0x0789
+#define regRMI_UTC_XNACK_N_MISC_CNTL_BASE_IDX 0
+#define regRMI_DEMUX_CNTL 0x078a
+#define regRMI_DEMUX_CNTL_BASE_IDX 0
+#define regRMI_UTCL1_CNTL1 0x078b
+#define regRMI_UTCL1_CNTL1_BASE_IDX 0
+#define regRMI_UTCL1_CNTL2 0x078c
+#define regRMI_UTCL1_CNTL2_BASE_IDX 0
+#define regRMI_UTC_UNIT_CONFIG 0x078d
+#define regRMI_UTC_UNIT_CONFIG_BASE_IDX 0
+#define regRMI_TCIW_FORMATTER0_CNTL 0x078e
+#define regRMI_TCIW_FORMATTER0_CNTL_BASE_IDX 0
+#define regRMI_TCIW_FORMATTER1_CNTL 0x078f
+#define regRMI_TCIW_FORMATTER1_CNTL_BASE_IDX 0
+#define regRMI_SCOREBOARD_CNTL 0x0790
+#define regRMI_SCOREBOARD_CNTL_BASE_IDX 0
+#define regRMI_SCOREBOARD_STATUS0 0x0791
+#define regRMI_SCOREBOARD_STATUS0_BASE_IDX 0
+#define regRMI_SCOREBOARD_STATUS1 0x0792
+#define regRMI_SCOREBOARD_STATUS1_BASE_IDX 0
+#define regRMI_SCOREBOARD_STATUS2 0x0793
+#define regRMI_SCOREBOARD_STATUS2_BASE_IDX 0
+#define regRMI_XBAR_ARBITER_CONFIG 0x0794
+#define regRMI_XBAR_ARBITER_CONFIG_BASE_IDX 0
+#define regRMI_XBAR_ARBITER_CONFIG_1 0x0795
+#define regRMI_XBAR_ARBITER_CONFIG_1_BASE_IDX 0
+#define regRMI_CLOCK_CNTRL 0x0796
+#define regRMI_CLOCK_CNTRL_BASE_IDX 0
+#define regRMI_UTCL1_STATUS 0x0797
+#define regRMI_UTCL1_STATUS_BASE_IDX 0
+#define regRMI_XNACK_DEBUG 0x079d
+#define regRMI_XNACK_DEBUG_BASE_IDX 0
+#define regRMI_SPARE 0x079e
+#define regRMI_SPARE_BASE_IDX 0
+#define regRMI_SPARE_1 0x079f
+#define regRMI_SPARE_1_BASE_IDX 0
+#define regRMI_SPARE_2 0x07a0
+#define regRMI_SPARE_2_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2dec
+// base address: 0xa000
+#define regATC_L2_CNTL 0x0800
+#define regATC_L2_CNTL_BASE_IDX 0
+#define regATC_L2_CNTL2 0x0801
+#define regATC_L2_CNTL2_BASE_IDX 0
+#define regATC_L2_CACHE_DATA0 0x0804
+#define regATC_L2_CACHE_DATA0_BASE_IDX 0
+#define regATC_L2_CACHE_DATA1 0x0805
+#define regATC_L2_CACHE_DATA1_BASE_IDX 0
+#define regATC_L2_CACHE_DATA2 0x0806
+#define regATC_L2_CACHE_DATA2_BASE_IDX 0
+#define regATC_L2_CACHE_DATA3 0x0807
+#define regATC_L2_CACHE_DATA3_BASE_IDX 0
+#define regATC_L2_CNTL3 0x0808
+#define regATC_L2_CNTL3_BASE_IDX 0
+#define regATC_L2_STATUS 0x0809
+#define regATC_L2_STATUS_BASE_IDX 0
+#define regATC_L2_STATUS2 0x080a
+#define regATC_L2_STATUS2_BASE_IDX 0
+#define regATC_L2_MISC_CG 0x080b
+#define regATC_L2_MISC_CG_BASE_IDX 0
+#define regATC_L2_MEM_POWER_LS 0x080c
+#define regATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define regATC_L2_CGTT_CLK_CTRL 0x080d
+#define regATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regATC_L2_CACHE_4K_DSM_INDEX 0x080f
+#define regATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_32K_DSM_INDEX 0x0810
+#define regATC_L2_CACHE_32K_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_2M_DSM_INDEX 0x0811
+#define regATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_4K_DSM_CNTL 0x0812
+#define regATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CACHE_32K_DSM_CNTL 0x0813
+#define regATC_L2_CACHE_32K_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CACHE_2M_DSM_CNTL 0x0814
+#define regATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CNTL4 0x0815
+#define regATC_L2_CNTL4_BASE_IDX 0
+#define regATC_L2_MM_GROUP_RT_CLASSES 0x0816
+#define regATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_vml2pfdec
+// base address: 0xa080
+#define regVM_L2_CNTL 0x0820
+#define regVM_L2_CNTL_BASE_IDX 0
+#define regVM_L2_CNTL2 0x0821
+#define regVM_L2_CNTL2_BASE_IDX 0
+#define regVM_L2_CNTL3 0x0822
+#define regVM_L2_CNTL3_BASE_IDX 0
+#define regVM_L2_STATUS 0x0823
+#define regVM_L2_STATUS_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_CNTL 0x0824
+#define regVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x0825
+#define regVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x0826
+#define regVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_CNTL 0x0827
+#define regVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_CNTL2 0x0828
+#define regVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL3 0x0829
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL4 0x082a
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_STATUS 0x082b
+#define regVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_ADDR_LO32 0x082c
+#define regVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_ADDR_HI32 0x082d
+#define regVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x082e
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x082f
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x0831
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x0832
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x0833
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x0834
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x0835
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x0836
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define regVM_L2_CNTL4 0x0837
+#define regVM_L2_CNTL4_BASE_IDX 0
+#define regVM_L2_CNTL5 0x0838
+#define regVM_L2_CNTL5_BASE_IDX 0
+#define regVM_L2_MM_GROUP_RT_CLASSES 0x0839
+#define regVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regVM_L2_BANK_SELECT_RESERVED_CID 0x083a
+#define regVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define regVM_L2_BANK_SELECT_RESERVED_CID2 0x083b
+#define regVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define regVM_L2_CACHE_PARITY_CNTL 0x083c
+#define regVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define regVM_L2_CGTT_CLK_CTRL 0x083d
+#define regVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regVM_L2_CGTT_BUSY_CTRL 0x083e
+#define regVM_L2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regVML2_MEM_ECC_INDEX 0x0842
+#define regVML2_MEM_ECC_INDEX_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_INDEX 0x0843
+#define regVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define regUTCL2_MEM_ECC_INDEX 0x0844
+#define regUTCL2_MEM_ECC_INDEX_BASE_IDX 0
+#define regVML2_MEM_ECC_CNTL 0x0845
+#define regVML2_MEM_ECC_CNTL_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_CNTL 0x0846
+#define regVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0
+#define regUTCL2_MEM_ECC_CNTL 0x0847
+#define regUTCL2_MEM_ECC_CNTL_BASE_IDX 0
+#define regVML2_MEM_ECC_STATUS 0x0848
+#define regVML2_MEM_ECC_STATUS_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_STATUS 0x0849
+#define regVML2_WALKER_MEM_ECC_STATUS_BASE_IDX 0
+#define regUTCL2_MEM_ECC_STATUS 0x084a
+#define regUTCL2_MEM_ECC_STATUS_BASE_IDX 0
+#define regUTCL2_EDC_MODE 0x084b
+#define regUTCL2_EDC_MODE_BASE_IDX 0
+#define regUTCL2_EDC_CONFIG 0x084c
+#define regUTCL2_EDC_CONFIG_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_vml2vcdec
+// base address: 0xa180
+#define regVM_CONTEXT0_CNTL 0x0860
+#define regVM_CONTEXT0_CNTL_BASE_IDX 0
+#define regVM_CONTEXT1_CNTL 0x0861
+#define regVM_CONTEXT1_CNTL_BASE_IDX 0
+#define regVM_CONTEXT2_CNTL 0x0862
+#define regVM_CONTEXT2_CNTL_BASE_IDX 0
+#define regVM_CONTEXT3_CNTL 0x0863
+#define regVM_CONTEXT3_CNTL_BASE_IDX 0
+#define regVM_CONTEXT4_CNTL 0x0864
+#define regVM_CONTEXT4_CNTL_BASE_IDX 0
+#define regVM_CONTEXT5_CNTL 0x0865
+#define regVM_CONTEXT5_CNTL_BASE_IDX 0
+#define regVM_CONTEXT6_CNTL 0x0866
+#define regVM_CONTEXT6_CNTL_BASE_IDX 0
+#define regVM_CONTEXT7_CNTL 0x0867
+#define regVM_CONTEXT7_CNTL_BASE_IDX 0
+#define regVM_CONTEXT8_CNTL 0x0868
+#define regVM_CONTEXT8_CNTL_BASE_IDX 0
+#define regVM_CONTEXT9_CNTL 0x0869
+#define regVM_CONTEXT9_CNTL_BASE_IDX 0
+#define regVM_CONTEXT10_CNTL 0x086a
+#define regVM_CONTEXT10_CNTL_BASE_IDX 0
+#define regVM_CONTEXT11_CNTL 0x086b
+#define regVM_CONTEXT11_CNTL_BASE_IDX 0
+#define regVM_CONTEXT12_CNTL 0x086c
+#define regVM_CONTEXT12_CNTL_BASE_IDX 0
+#define regVM_CONTEXT13_CNTL 0x086d
+#define regVM_CONTEXT13_CNTL_BASE_IDX 0
+#define regVM_CONTEXT14_CNTL 0x086e
+#define regVM_CONTEXT14_CNTL_BASE_IDX 0
+#define regVM_CONTEXT15_CNTL 0x086f
+#define regVM_CONTEXT15_CNTL_BASE_IDX 0
+#define regVM_CONTEXTS_DISABLE 0x0870
+#define regVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_SEM 0x0871
+#define regVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_SEM 0x0872
+#define regVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_SEM 0x0873
+#define regVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_SEM 0x0874
+#define regVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_SEM 0x0875
+#define regVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_SEM 0x0876
+#define regVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_SEM 0x0877
+#define regVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_SEM 0x0878
+#define regVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_SEM 0x0879
+#define regVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_SEM 0x087a
+#define regVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_SEM 0x087b
+#define regVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_SEM 0x087c
+#define regVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_SEM 0x087d
+#define regVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_SEM 0x087e
+#define regVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_SEM 0x087f
+#define regVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_SEM 0x0880
+#define regVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_SEM 0x0881
+#define regVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_SEM 0x0882
+#define regVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_REQ 0x0883
+#define regVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_REQ 0x0884
+#define regVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_REQ 0x0885
+#define regVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_REQ 0x0886
+#define regVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_REQ 0x0887
+#define regVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_REQ 0x0888
+#define regVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_REQ 0x0889
+#define regVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_REQ 0x088a
+#define regVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_REQ 0x088b
+#define regVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_REQ 0x088c
+#define regVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_REQ 0x088d
+#define regVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_REQ 0x088e
+#define regVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_REQ 0x088f
+#define regVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_REQ 0x0890
+#define regVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_REQ 0x0891
+#define regVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_REQ 0x0892
+#define regVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_REQ 0x0893
+#define regVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_REQ 0x0894
+#define regVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ACK 0x0895
+#define regVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ACK 0x0896
+#define regVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ACK 0x0897
+#define regVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ACK 0x0898
+#define regVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ACK 0x0899
+#define regVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ACK 0x089a
+#define regVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ACK 0x089b
+#define regVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ACK 0x089c
+#define regVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ACK 0x089d
+#define regVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ACK 0x089e
+#define regVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ACK 0x089f
+#define regVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ACK 0x08a0
+#define regVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ACK 0x08a1
+#define regVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ACK 0x08a2
+#define regVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ACK 0x08a3
+#define regVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ACK 0x08a4
+#define regVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ACK 0x08a5
+#define regVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ACK 0x08a6
+#define regVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x08a7
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x08a8
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x08a9
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x08aa
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x08ab
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x08ac
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x08ad
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x08ae
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x08af
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x08b0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x08b1
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x08b2
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x08b3
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x08b4
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x08b5
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x08b6
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x08b7
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x08b8
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x08b9
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x08ba
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x08bb
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x08bc
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x08bd
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x08be
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x08bf
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x08c0
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x08c1
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x08c2
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x08c3
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x08c4
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x08c5
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x08c6
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x08c7
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x08c8
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x08c9
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x08ca
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x08cb
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x08cc
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x08cd
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x08ce
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x08cf
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x08d0
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x08d1
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x08d2
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x08d3
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x08d4
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x08d5
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x08d6
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x08d7
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x08d8
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x08d9
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x08da
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x08db
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x08dc
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x08dd
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x08de
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x08df
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x08e0
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x08e1
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x08e2
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x08e3
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x08e4
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x08e5
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x08e6
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x08e7
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x08e8
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x08e9
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x08ea
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x08eb
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x08ec
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x08ed
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x08ee
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x08ef
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x08f0
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x08f1
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x08f2
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x08f3
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x08f4
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x08f5
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x08f6
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x08f7
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x08f8
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x08f9
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x08fa
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x08fb
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x08fc
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x08fd
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x08fe
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x08ff
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0900
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0901
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0902
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0903
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0904
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0905
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0906
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0907
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0908
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0909
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x090a
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x090b
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x090c
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x090d
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x090e
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x090f
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0910
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0911
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0912
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0913
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0914
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0915
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0916
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0917
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0918
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0919
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x091a
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x091b
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x091c
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x091d
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x091e
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x091f
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0920
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0921
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0922
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0923
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0924
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0925
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0926
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0927
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0928
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0929
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x092a
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedpfdec
+// base address: 0xa500
+#define regMC_VM_NB_MMIOBASE 0x0940
+#define regMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define regMC_VM_NB_MMIOLIMIT 0x0941
+#define regMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define regMC_VM_NB_PCI_CTRL 0x0942
+#define regMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define regMC_VM_NB_PCI_ARB 0x0943
+#define regMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define regMC_VM_NB_TOP_OF_DRAM_SLOT1 0x0944
+#define regMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define regMC_VM_NB_LOWER_TOP_OF_DRAM2 0x0945
+#define regMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define regMC_VM_NB_UPPER_TOP_OF_DRAM2 0x0946
+#define regMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define regMC_VM_FB_OFFSET 0x0947
+#define regMC_VM_FB_OFFSET_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x0948
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x0949
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define regMC_VM_STEERING 0x094a
+#define regMC_VM_STEERING_BASE_IDX 0
+#define regMC_SHARED_VIRT_RESET_REQ 0x094b
+#define regMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regMC_MEM_POWER_LS 0x094c
+#define regMC_MEM_POWER_LS_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x094d
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x094e
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define regMC_VM_APT_CNTL 0x0951
+#define regMC_VM_APT_CNTL_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_START 0x0952
+#define regMC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_END 0x0953
+#define regMC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0954
+#define regMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define regUTCL2_CGTT_CLK_CTRL 0x0955
+#define regUTCL2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMC_VM_XGMI_LFB_CNTL 0x0957
+#define regMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define regMC_VM_XGMI_LFB_SIZE 0x0958
+#define regMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_CNTL 0x0959
+#define regMC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 0
+#define regMC_VM_HOST_MAPPING 0x095a
+#define regMC_VM_HOST_MAPPING_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedvcdec
+// base address: 0xa570
+#define regMC_VM_FB_LOCATION_BASE 0x095c
+#define regMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regMC_VM_FB_LOCATION_TOP 0x095d
+#define regMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define regMC_VM_AGP_TOP 0x095e
+#define regMC_VM_AGP_TOP_BASE_IDX 0
+#define regMC_VM_AGP_BOT 0x095f
+#define regMC_VM_AGP_BOT_BASE_IDX 0
+#define regMC_VM_AGP_BASE 0x0960
+#define regMC_VM_AGP_BASE_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0961
+#define regMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0962
+#define regMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB_CNTL 0x0963
+#define regMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbdec
+// base address: 0xa5b0
+#define regL2TLB_TLB0_STATUS 0x096d
+#define regL2TLB_TLB0_STATUS_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO 0x096f
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI 0x0970
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO 0x0971
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI 0x0972
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_tcdec
+// base address: 0xac00
+#define regTCP_INVALIDATE 0x0b00
+#define regTCP_INVALIDATE_BASE_IDX 0
+#define regTCP_STATUS 0x0b01
+#define regTCP_STATUS_BASE_IDX 0
+#define regTCP_CNTL 0x0b02
+#define regTCP_CNTL_BASE_IDX 0
+#define regTCP_CHAN_STEER_0 0x0b03
+#define regTCP_CHAN_STEER_0_BASE_IDX 0
+#define regTCP_CHAN_STEER_1 0x0b04
+#define regTCP_CHAN_STEER_1_BASE_IDX 0
+#define regTCP_ADDR_CONFIG 0x0b05
+#define regTCP_ADDR_CONFIG_BASE_IDX 0
+#define regTCP_CREDIT 0x0b06
+#define regTCP_CREDIT_BASE_IDX 0
+#define regTCP_BUFFER_ADDR_HASH_CNTL 0x0b16
+#define regTCP_BUFFER_ADDR_HASH_CNTL_BASE_IDX 0
+#define regTC_CFG_L1_LOAD_POLICY0 0x0b1a
+#define regTC_CFG_L1_LOAD_POLICY0_BASE_IDX 0
+#define regTC_CFG_L1_LOAD_POLICY1 0x0b1b
+#define regTC_CFG_L1_LOAD_POLICY1_BASE_IDX 0
+#define regTC_CFG_L1_STORE_POLICY 0x0b1c
+#define regTC_CFG_L1_STORE_POLICY_BASE_IDX 0
+#define regTC_CFG_L2_LOAD_POLICY0 0x0b1d
+#define regTC_CFG_L2_LOAD_POLICY0_BASE_IDX 0
+#define regTC_CFG_L2_LOAD_POLICY1 0x0b1e
+#define regTC_CFG_L2_LOAD_POLICY1_BASE_IDX 0
+#define regTC_CFG_L2_STORE_POLICY0 0x0b1f
+#define regTC_CFG_L2_STORE_POLICY0_BASE_IDX 0
+#define regTC_CFG_L2_STORE_POLICY1 0x0b20
+#define regTC_CFG_L2_STORE_POLICY1_BASE_IDX 0
+#define regTC_CFG_L2_ATOMIC_POLICY 0x0b21
+#define regTC_CFG_L2_ATOMIC_POLICY_BASE_IDX 0
+#define regTC_CFG_L1_VOLATILE 0x0b22
+#define regTC_CFG_L1_VOLATILE_BASE_IDX 0
+#define regTC_CFG_L2_VOLATILE 0x0b23
+#define regTC_CFG_L2_VOLATILE_BASE_IDX 0
+#define regTCI_MISC 0x0b5c
+#define regTCI_MISC_BASE_IDX 0
+#define regTCI_CNTL_3 0x0b5d
+#define regTCI_CNTL_3_BASE_IDX 0
+#define regTCI_DSM_CNTL 0x0b5e
+#define regTCI_DSM_CNTL_BASE_IDX 0
+#define regTCI_DSM_CNTL2 0x0b5f
+#define regTCI_DSM_CNTL2_BASE_IDX 0
+#define regTCI_STATUS 0x0b61
+#define regTCI_STATUS_BASE_IDX 0
+#define regTCI_CNTL_1 0x0b62
+#define regTCI_CNTL_1_BASE_IDX 0
+#define regTCI_CNTL_2 0x0b63
+#define regTCI_CNTL_2_BASE_IDX 0
+#define regTCC_CTRL 0x0b80
+#define regTCC_CTRL_BASE_IDX 0
+#define regTCC_CTRL2 0x0b81
+#define regTCC_CTRL2_BASE_IDX 0
+#define regTCC_DSM_CNTL 0x0b86
+#define regTCC_DSM_CNTL_BASE_IDX 0
+#define regTCC_DSM_CNTLA 0x0b87
+#define regTCC_DSM_CNTLA_BASE_IDX 0
+#define regTCC_DSM_CNTL2 0x0b88
+#define regTCC_DSM_CNTL2_BASE_IDX 0
+#define regTCC_DSM_CNTL2A 0x0b89
+#define regTCC_DSM_CNTL2A_BASE_IDX 0
+#define regTCC_DSM_CNTL2B 0x0b8a
+#define regTCC_DSM_CNTL2B_BASE_IDX 0
+#define regTCC_WBINVL2 0x0b8b
+#define regTCC_WBINVL2_BASE_IDX 0
+#define regTCC_SOFT_RESET 0x0b8c
+#define regTCC_SOFT_RESET_BASE_IDX 0
+#define regTCC_DSM_CNTL3 0x0b8e
+#define regTCC_DSM_CNTL3_BASE_IDX 0
+#define regTCA_CTRL 0x0bc0
+#define regTCA_CTRL_BASE_IDX 0
+#define regTCA_BURST_MASK 0x0bc1
+#define regTCA_BURST_MASK_BASE_IDX 0
+#define regTCA_BURST_CTRL 0x0bc2
+#define regTCA_BURST_CTRL_BASE_IDX 0
+#define regTCA_DSM_CNTL 0x0bc3
+#define regTCA_DSM_CNTL_BASE_IDX 0
+#define regTCA_DSM_CNTL2 0x0bc4
+#define regTCA_DSM_CNTL2_BASE_IDX 0
+#define regTCX_CTRL 0x0bc6
+#define regTCX_CTRL_BASE_IDX 0
+#define regTCX_DSM_CNTL 0x0bc7
+#define regTCX_DSM_CNTL_BASE_IDX 0
+#define regTCX_DSM_CNTL2 0x0bc8
+#define regTCX_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_shdec
+// base address: 0xb000
+#define regSPI_SHADER_PGM_RSRC3_PS 0x0c07
+#define regSPI_SHADER_PGM_RSRC3_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_PS 0x0c08
+#define regSPI_SHADER_PGM_LO_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_PS 0x0c09
+#define regSPI_SHADER_PGM_HI_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_PS 0x0c0a
+#define regSPI_SHADER_PGM_RSRC1_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_PS 0x0c0b
+#define regSPI_SHADER_PGM_RSRC2_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_0 0x0c0c
+#define regSPI_SHADER_USER_DATA_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_1 0x0c0d
+#define regSPI_SHADER_USER_DATA_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_2 0x0c0e
+#define regSPI_SHADER_USER_DATA_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_3 0x0c0f
+#define regSPI_SHADER_USER_DATA_PS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_4 0x0c10
+#define regSPI_SHADER_USER_DATA_PS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_5 0x0c11
+#define regSPI_SHADER_USER_DATA_PS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_6 0x0c12
+#define regSPI_SHADER_USER_DATA_PS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_7 0x0c13
+#define regSPI_SHADER_USER_DATA_PS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_8 0x0c14
+#define regSPI_SHADER_USER_DATA_PS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_9 0x0c15
+#define regSPI_SHADER_USER_DATA_PS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_10 0x0c16
+#define regSPI_SHADER_USER_DATA_PS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_11 0x0c17
+#define regSPI_SHADER_USER_DATA_PS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_12 0x0c18
+#define regSPI_SHADER_USER_DATA_PS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_13 0x0c19
+#define regSPI_SHADER_USER_DATA_PS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_14 0x0c1a
+#define regSPI_SHADER_USER_DATA_PS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_15 0x0c1b
+#define regSPI_SHADER_USER_DATA_PS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_16 0x0c1c
+#define regSPI_SHADER_USER_DATA_PS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_17 0x0c1d
+#define regSPI_SHADER_USER_DATA_PS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_18 0x0c1e
+#define regSPI_SHADER_USER_DATA_PS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_19 0x0c1f
+#define regSPI_SHADER_USER_DATA_PS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_20 0x0c20
+#define regSPI_SHADER_USER_DATA_PS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_21 0x0c21
+#define regSPI_SHADER_USER_DATA_PS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_22 0x0c22
+#define regSPI_SHADER_USER_DATA_PS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_23 0x0c23
+#define regSPI_SHADER_USER_DATA_PS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_24 0x0c24
+#define regSPI_SHADER_USER_DATA_PS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_25 0x0c25
+#define regSPI_SHADER_USER_DATA_PS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_26 0x0c26
+#define regSPI_SHADER_USER_DATA_PS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_27 0x0c27
+#define regSPI_SHADER_USER_DATA_PS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_28 0x0c28
+#define regSPI_SHADER_USER_DATA_PS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_29 0x0c29
+#define regSPI_SHADER_USER_DATA_PS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_30 0x0c2a
+#define regSPI_SHADER_USER_DATA_PS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_31 0x0c2b
+#define regSPI_SHADER_USER_DATA_PS_31_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_VS 0x0c46
+#define regSPI_SHADER_PGM_RSRC3_VS_BASE_IDX 0
+#define regSPI_SHADER_LATE_ALLOC_VS 0x0c47
+#define regSPI_SHADER_LATE_ALLOC_VS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_VS 0x0c48
+#define regSPI_SHADER_PGM_LO_VS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_VS 0x0c49
+#define regSPI_SHADER_PGM_HI_VS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_VS 0x0c4a
+#define regSPI_SHADER_PGM_RSRC1_VS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_VS 0x0c4b
+#define regSPI_SHADER_PGM_RSRC2_VS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_0 0x0c4c
+#define regSPI_SHADER_USER_DATA_VS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_1 0x0c4d
+#define regSPI_SHADER_USER_DATA_VS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_2 0x0c4e
+#define regSPI_SHADER_USER_DATA_VS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_3 0x0c4f
+#define regSPI_SHADER_USER_DATA_VS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_4 0x0c50
+#define regSPI_SHADER_USER_DATA_VS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_5 0x0c51
+#define regSPI_SHADER_USER_DATA_VS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_6 0x0c52
+#define regSPI_SHADER_USER_DATA_VS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_7 0x0c53
+#define regSPI_SHADER_USER_DATA_VS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_8 0x0c54
+#define regSPI_SHADER_USER_DATA_VS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_9 0x0c55
+#define regSPI_SHADER_USER_DATA_VS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_10 0x0c56
+#define regSPI_SHADER_USER_DATA_VS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_11 0x0c57
+#define regSPI_SHADER_USER_DATA_VS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_12 0x0c58
+#define regSPI_SHADER_USER_DATA_VS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_13 0x0c59
+#define regSPI_SHADER_USER_DATA_VS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_14 0x0c5a
+#define regSPI_SHADER_USER_DATA_VS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_15 0x0c5b
+#define regSPI_SHADER_USER_DATA_VS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_16 0x0c5c
+#define regSPI_SHADER_USER_DATA_VS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_17 0x0c5d
+#define regSPI_SHADER_USER_DATA_VS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_18 0x0c5e
+#define regSPI_SHADER_USER_DATA_VS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_19 0x0c5f
+#define regSPI_SHADER_USER_DATA_VS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_20 0x0c60
+#define regSPI_SHADER_USER_DATA_VS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_21 0x0c61
+#define regSPI_SHADER_USER_DATA_VS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_22 0x0c62
+#define regSPI_SHADER_USER_DATA_VS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_23 0x0c63
+#define regSPI_SHADER_USER_DATA_VS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_24 0x0c64
+#define regSPI_SHADER_USER_DATA_VS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_25 0x0c65
+#define regSPI_SHADER_USER_DATA_VS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_26 0x0c66
+#define regSPI_SHADER_USER_DATA_VS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_27 0x0c67
+#define regSPI_SHADER_USER_DATA_VS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_28 0x0c68
+#define regSPI_SHADER_USER_DATA_VS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_29 0x0c69
+#define regSPI_SHADER_USER_DATA_VS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_30 0x0c6a
+#define regSPI_SHADER_USER_DATA_VS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_VS_31 0x0c6b
+#define regSPI_SHADER_USER_DATA_VS_31_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_GS_VS 0x0c7c
+#define regSPI_SHADER_PGM_RSRC2_GS_VS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_GS 0x0c81
+#define regSPI_SHADER_PGM_RSRC4_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS 0x0c82
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS 0x0c83
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES 0x0c84
+#define regSPI_SHADER_PGM_LO_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES 0x0c85
+#define regSPI_SHADER_PGM_HI_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_GS 0x0c87
+#define regSPI_SHADER_PGM_RSRC3_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_GS 0x0c88
+#define regSPI_SHADER_PGM_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_GS 0x0c89
+#define regSPI_SHADER_PGM_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_GS 0x0c8a
+#define regSPI_SHADER_PGM_RSRC1_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_GS 0x0c8b
+#define regSPI_SHADER_PGM_RSRC2_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_0 0x0ccc
+#define regSPI_SHADER_USER_DATA_ES_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_1 0x0ccd
+#define regSPI_SHADER_USER_DATA_ES_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_2 0x0cce
+#define regSPI_SHADER_USER_DATA_ES_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_3 0x0ccf
+#define regSPI_SHADER_USER_DATA_ES_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_4 0x0cd0
+#define regSPI_SHADER_USER_DATA_ES_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_5 0x0cd1
+#define regSPI_SHADER_USER_DATA_ES_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_6 0x0cd2
+#define regSPI_SHADER_USER_DATA_ES_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_7 0x0cd3
+#define regSPI_SHADER_USER_DATA_ES_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_8 0x0cd4
+#define regSPI_SHADER_USER_DATA_ES_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_9 0x0cd5
+#define regSPI_SHADER_USER_DATA_ES_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_10 0x0cd6
+#define regSPI_SHADER_USER_DATA_ES_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_11 0x0cd7
+#define regSPI_SHADER_USER_DATA_ES_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_12 0x0cd8
+#define regSPI_SHADER_USER_DATA_ES_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_13 0x0cd9
+#define regSPI_SHADER_USER_DATA_ES_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_14 0x0cda
+#define regSPI_SHADER_USER_DATA_ES_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_15 0x0cdb
+#define regSPI_SHADER_USER_DATA_ES_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_16 0x0cdc
+#define regSPI_SHADER_USER_DATA_ES_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_17 0x0cdd
+#define regSPI_SHADER_USER_DATA_ES_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_18 0x0cde
+#define regSPI_SHADER_USER_DATA_ES_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_19 0x0cdf
+#define regSPI_SHADER_USER_DATA_ES_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_20 0x0ce0
+#define regSPI_SHADER_USER_DATA_ES_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_21 0x0ce1
+#define regSPI_SHADER_USER_DATA_ES_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_22 0x0ce2
+#define regSPI_SHADER_USER_DATA_ES_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_23 0x0ce3
+#define regSPI_SHADER_USER_DATA_ES_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_24 0x0ce4
+#define regSPI_SHADER_USER_DATA_ES_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_25 0x0ce5
+#define regSPI_SHADER_USER_DATA_ES_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_26 0x0ce6
+#define regSPI_SHADER_USER_DATA_ES_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_27 0x0ce7
+#define regSPI_SHADER_USER_DATA_ES_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_28 0x0ce8
+#define regSPI_SHADER_USER_DATA_ES_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_29 0x0ce9
+#define regSPI_SHADER_USER_DATA_ES_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_30 0x0cea
+#define regSPI_SHADER_USER_DATA_ES_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ES_31 0x0ceb
+#define regSPI_SHADER_USER_DATA_ES_31_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_HS 0x0d01
+#define regSPI_SHADER_PGM_RSRC4_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS 0x0d02
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS 0x0d03
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS 0x0d04
+#define regSPI_SHADER_PGM_LO_LS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS 0x0d05
+#define regSPI_SHADER_PGM_HI_LS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_HS 0x0d07
+#define regSPI_SHADER_PGM_RSRC3_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_HS 0x0d08
+#define regSPI_SHADER_PGM_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_HS 0x0d09
+#define regSPI_SHADER_PGM_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_HS 0x0d0a
+#define regSPI_SHADER_PGM_RSRC1_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_HS 0x0d0b
+#define regSPI_SHADER_PGM_RSRC2_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_0 0x0d0c
+#define regSPI_SHADER_USER_DATA_LS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_1 0x0d0d
+#define regSPI_SHADER_USER_DATA_LS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_2 0x0d0e
+#define regSPI_SHADER_USER_DATA_LS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_3 0x0d0f
+#define regSPI_SHADER_USER_DATA_LS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_4 0x0d10
+#define regSPI_SHADER_USER_DATA_LS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_5 0x0d11
+#define regSPI_SHADER_USER_DATA_LS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_6 0x0d12
+#define regSPI_SHADER_USER_DATA_LS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_7 0x0d13
+#define regSPI_SHADER_USER_DATA_LS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_8 0x0d14
+#define regSPI_SHADER_USER_DATA_LS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_9 0x0d15
+#define regSPI_SHADER_USER_DATA_LS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_10 0x0d16
+#define regSPI_SHADER_USER_DATA_LS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_11 0x0d17
+#define regSPI_SHADER_USER_DATA_LS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_12 0x0d18
+#define regSPI_SHADER_USER_DATA_LS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_13 0x0d19
+#define regSPI_SHADER_USER_DATA_LS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_14 0x0d1a
+#define regSPI_SHADER_USER_DATA_LS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_15 0x0d1b
+#define regSPI_SHADER_USER_DATA_LS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_16 0x0d1c
+#define regSPI_SHADER_USER_DATA_LS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_17 0x0d1d
+#define regSPI_SHADER_USER_DATA_LS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_18 0x0d1e
+#define regSPI_SHADER_USER_DATA_LS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_19 0x0d1f
+#define regSPI_SHADER_USER_DATA_LS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_20 0x0d20
+#define regSPI_SHADER_USER_DATA_LS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_21 0x0d21
+#define regSPI_SHADER_USER_DATA_LS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_22 0x0d22
+#define regSPI_SHADER_USER_DATA_LS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_23 0x0d23
+#define regSPI_SHADER_USER_DATA_LS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_24 0x0d24
+#define regSPI_SHADER_USER_DATA_LS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_25 0x0d25
+#define regSPI_SHADER_USER_DATA_LS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_26 0x0d26
+#define regSPI_SHADER_USER_DATA_LS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_27 0x0d27
+#define regSPI_SHADER_USER_DATA_LS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_28 0x0d28
+#define regSPI_SHADER_USER_DATA_LS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_29 0x0d29
+#define regSPI_SHADER_USER_DATA_LS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_30 0x0d2a
+#define regSPI_SHADER_USER_DATA_LS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_LS_31 0x0d2b
+#define regSPI_SHADER_USER_DATA_LS_31_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_0 0x0d4c
+#define regSPI_SHADER_USER_DATA_COMMON_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_1 0x0d4d
+#define regSPI_SHADER_USER_DATA_COMMON_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_2 0x0d4e
+#define regSPI_SHADER_USER_DATA_COMMON_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_3 0x0d4f
+#define regSPI_SHADER_USER_DATA_COMMON_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_4 0x0d50
+#define regSPI_SHADER_USER_DATA_COMMON_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_5 0x0d51
+#define regSPI_SHADER_USER_DATA_COMMON_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_6 0x0d52
+#define regSPI_SHADER_USER_DATA_COMMON_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_7 0x0d53
+#define regSPI_SHADER_USER_DATA_COMMON_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_8 0x0d54
+#define regSPI_SHADER_USER_DATA_COMMON_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_9 0x0d55
+#define regSPI_SHADER_USER_DATA_COMMON_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_10 0x0d56
+#define regSPI_SHADER_USER_DATA_COMMON_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_11 0x0d57
+#define regSPI_SHADER_USER_DATA_COMMON_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_12 0x0d58
+#define regSPI_SHADER_USER_DATA_COMMON_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_13 0x0d59
+#define regSPI_SHADER_USER_DATA_COMMON_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_14 0x0d5a
+#define regSPI_SHADER_USER_DATA_COMMON_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_15 0x0d5b
+#define regSPI_SHADER_USER_DATA_COMMON_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_16 0x0d5c
+#define regSPI_SHADER_USER_DATA_COMMON_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_17 0x0d5d
+#define regSPI_SHADER_USER_DATA_COMMON_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_18 0x0d5e
+#define regSPI_SHADER_USER_DATA_COMMON_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_19 0x0d5f
+#define regSPI_SHADER_USER_DATA_COMMON_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_20 0x0d60
+#define regSPI_SHADER_USER_DATA_COMMON_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_21 0x0d61
+#define regSPI_SHADER_USER_DATA_COMMON_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_22 0x0d62
+#define regSPI_SHADER_USER_DATA_COMMON_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_23 0x0d63
+#define regSPI_SHADER_USER_DATA_COMMON_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_24 0x0d64
+#define regSPI_SHADER_USER_DATA_COMMON_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_25 0x0d65
+#define regSPI_SHADER_USER_DATA_COMMON_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_26 0x0d66
+#define regSPI_SHADER_USER_DATA_COMMON_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_27 0x0d67
+#define regSPI_SHADER_USER_DATA_COMMON_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_28 0x0d68
+#define regSPI_SHADER_USER_DATA_COMMON_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_29 0x0d69
+#define regSPI_SHADER_USER_DATA_COMMON_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_30 0x0d6a
+#define regSPI_SHADER_USER_DATA_COMMON_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_COMMON_31 0x0d6b
+#define regSPI_SHADER_USER_DATA_COMMON_31_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INITIATOR 0x0e00
+#define regCOMPUTE_DISPATCH_INITIATOR_BASE_IDX 0
+#define regCOMPUTE_DIM_X 0x0e01
+#define regCOMPUTE_DIM_X_BASE_IDX 0
+#define regCOMPUTE_DIM_Y 0x0e02
+#define regCOMPUTE_DIM_Y_BASE_IDX 0
+#define regCOMPUTE_DIM_Z 0x0e03
+#define regCOMPUTE_DIM_Z_BASE_IDX 0
+#define regCOMPUTE_START_X 0x0e04
+#define regCOMPUTE_START_X_BASE_IDX 0
+#define regCOMPUTE_START_Y 0x0e05
+#define regCOMPUTE_START_Y_BASE_IDX 0
+#define regCOMPUTE_START_Z 0x0e06
+#define regCOMPUTE_START_Z_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_X 0x0e07
+#define regCOMPUTE_NUM_THREAD_X_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Y 0x0e08
+#define regCOMPUTE_NUM_THREAD_Y_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Z 0x0e09
+#define regCOMPUTE_NUM_THREAD_Z_BASE_IDX 0
+#define regCOMPUTE_PIPELINESTAT_ENABLE 0x0e0a
+#define regCOMPUTE_PIPELINESTAT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PERFCOUNT_ENABLE 0x0e0b
+#define regCOMPUTE_PERFCOUNT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PGM_LO 0x0e0c
+#define regCOMPUTE_PGM_LO_BASE_IDX 0
+#define regCOMPUTE_PGM_HI 0x0e0d
+#define regCOMPUTE_PGM_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO 0x0e0e
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI 0x0e0f
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO 0x0e10
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI 0x0e11
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC1 0x0e12
+#define regCOMPUTE_PGM_RSRC1_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC2 0x0e13
+#define regCOMPUTE_PGM_RSRC2_BASE_IDX 0
+#define regCOMPUTE_VMID 0x0e14
+#define regCOMPUTE_VMID_BASE_IDX 0
+#define regCOMPUTE_RESOURCE_LIMITS 0x0e15
+#define regCOMPUTE_RESOURCE_LIMITS_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0 0x0e16
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1 0x0e17
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1_BASE_IDX 0
+#define regCOMPUTE_TMPRING_SIZE 0x0e18
+#define regCOMPUTE_TMPRING_SIZE_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2 0x0e19
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define regCOMPUTE_RESTART_X 0x0e1b
+#define regCOMPUTE_RESTART_X_BASE_IDX 0
+#define regCOMPUTE_RESTART_Y 0x0e1c
+#define regCOMPUTE_RESTART_Y_BASE_IDX 0
+#define regCOMPUTE_RESTART_Z 0x0e1d
+#define regCOMPUTE_RESTART_Z_BASE_IDX 0
+#define regCOMPUTE_THREAD_TRACE_ENABLE 0x0e1e
+#define regCOMPUTE_THREAD_TRACE_ENABLE_BASE_IDX 0
+#define regCOMPUTE_MISC_RESERVED 0x0e1f
+#define regCOMPUTE_MISC_RESERVED_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_ID 0x0e20
+#define regCOMPUTE_DISPATCH_ID_BASE_IDX 0
+#define regCOMPUTE_THREADGROUP_ID 0x0e21
+#define regCOMPUTE_THREADGROUP_ID_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH 0x0e22
+#define regCOMPUTE_RELAUNCH_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO 0x0e23
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI 0x0e24
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_TG_CHUNK_SIZE 0x0e27
+#define regCOMPUTE_TG_CHUNK_SIZE_BASE_IDX 0
+#define regCOMPUTE_SHADER_CHKSUM 0x0e2c
+#define regCOMPUTE_SHADER_CHKSUM_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC3 0x0e2d
+#define regCOMPUTE_PGM_RSRC3_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_0 0x0e40
+#define regCOMPUTE_USER_DATA_0_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_1 0x0e41
+#define regCOMPUTE_USER_DATA_1_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_2 0x0e42
+#define regCOMPUTE_USER_DATA_2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_3 0x0e43
+#define regCOMPUTE_USER_DATA_3_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_4 0x0e44
+#define regCOMPUTE_USER_DATA_4_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_5 0x0e45
+#define regCOMPUTE_USER_DATA_5_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_6 0x0e46
+#define regCOMPUTE_USER_DATA_6_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_7 0x0e47
+#define regCOMPUTE_USER_DATA_7_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_8 0x0e48
+#define regCOMPUTE_USER_DATA_8_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_9 0x0e49
+#define regCOMPUTE_USER_DATA_9_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_10 0x0e4a
+#define regCOMPUTE_USER_DATA_10_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_11 0x0e4b
+#define regCOMPUTE_USER_DATA_11_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_12 0x0e4c
+#define regCOMPUTE_USER_DATA_12_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_13 0x0e4d
+#define regCOMPUTE_USER_DATA_13_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_14 0x0e4e
+#define regCOMPUTE_USER_DATA_14_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_15 0x0e4f
+#define regCOMPUTE_USER_DATA_15_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_END 0x0e7e
+#define regCOMPUTE_DISPATCH_END_BASE_IDX 0
+#define regCOMPUTE_NOWHERE 0x0e7f
+#define regCOMPUTE_NOWHERE_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_cppdec
+// base address: 0xc080
+#define regCP_DFY_CNTL 0x1020
+#define regCP_DFY_CNTL_BASE_IDX 0
+#define regCP_DFY_STAT 0x1021
+#define regCP_DFY_STAT_BASE_IDX 0
+#define regCP_DFY_ADDR_HI 0x1022
+#define regCP_DFY_ADDR_HI_BASE_IDX 0
+#define regCP_DFY_ADDR_LO 0x1023
+#define regCP_DFY_ADDR_LO_BASE_IDX 0
+#define regCP_DFY_DATA_0 0x1024
+#define regCP_DFY_DATA_0_BASE_IDX 0
+#define regCP_DFY_DATA_1 0x1025
+#define regCP_DFY_DATA_1_BASE_IDX 0
+#define regCP_DFY_DATA_2 0x1026
+#define regCP_DFY_DATA_2_BASE_IDX 0
+#define regCP_DFY_DATA_3 0x1027
+#define regCP_DFY_DATA_3_BASE_IDX 0
+#define regCP_DFY_DATA_4 0x1028
+#define regCP_DFY_DATA_4_BASE_IDX 0
+#define regCP_DFY_DATA_5 0x1029
+#define regCP_DFY_DATA_5_BASE_IDX 0
+#define regCP_DFY_DATA_6 0x102a
+#define regCP_DFY_DATA_6_BASE_IDX 0
+#define regCP_DFY_DATA_7 0x102b
+#define regCP_DFY_DATA_7_BASE_IDX 0
+#define regCP_DFY_DATA_8 0x102c
+#define regCP_DFY_DATA_8_BASE_IDX 0
+#define regCP_DFY_DATA_9 0x102d
+#define regCP_DFY_DATA_9_BASE_IDX 0
+#define regCP_DFY_DATA_10 0x102e
+#define regCP_DFY_DATA_10_BASE_IDX 0
+#define regCP_DFY_DATA_11 0x102f
+#define regCP_DFY_DATA_11_BASE_IDX 0
+#define regCP_DFY_DATA_12 0x1030
+#define regCP_DFY_DATA_12_BASE_IDX 0
+#define regCP_DFY_DATA_13 0x1031
+#define regCP_DFY_DATA_13_BASE_IDX 0
+#define regCP_DFY_DATA_14 0x1032
+#define regCP_DFY_DATA_14_BASE_IDX 0
+#define regCP_DFY_DATA_15 0x1033
+#define regCP_DFY_DATA_15_BASE_IDX 0
+#define regCP_DFY_CMD 0x1034
+#define regCP_DFY_CMD_BASE_IDX 0
+#define regCP_EOPQ_WAIT_TIME 0x1035
+#define regCP_EOPQ_WAIT_TIME_BASE_IDX 0
+#define regCP_CPC_MGCG_SYNC_CNTL 0x1036
+#define regCP_CPC_MGCG_SYNC_CNTL_BASE_IDX 0
+#define regCPC_INT_INFO 0x1037
+#define regCPC_INT_INFO_BASE_IDX 0
+#define regCP_VIRT_STATUS 0x1038
+#define regCP_VIRT_STATUS_BASE_IDX 0
+#define regCPC_INT_ADDR 0x1039
+#define regCPC_INT_ADDR_BASE_IDX 0
+#define regCPC_INT_PASID 0x103a
+#define regCPC_INT_PASID_BASE_IDX 0
+#define regCP_GFX_ERROR 0x103b
+#define regCP_GFX_ERROR_BASE_IDX 0
+#define regCPG_UTCL1_CNTL 0x103c
+#define regCPG_UTCL1_CNTL_BASE_IDX 0
+#define regCPC_UTCL1_CNTL 0x103d
+#define regCPC_UTCL1_CNTL_BASE_IDX 0
+#define regCPF_UTCL1_CNTL 0x103e
+#define regCPF_UTCL1_CNTL_BASE_IDX 0
+#define regCP_AQL_SMM_STATUS 0x103f
+#define regCP_AQL_SMM_STATUS_BASE_IDX 0
+#define regCP_RB0_BASE 0x1040
+#define regCP_RB0_BASE_BASE_IDX 0
+#define regCP_RB_BASE 0x1040
+#define regCP_RB_BASE_BASE_IDX 0
+#define regCP_RB0_CNTL 0x1041
+#define regCP_RB0_CNTL_BASE_IDX 0
+#define regCP_RB_CNTL 0x1041
+#define regCP_RB_CNTL_BASE_IDX 0
+#define regCP_RB_RPTR_WR 0x1042
+#define regCP_RB_RPTR_WR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR 0x1043
+#define regCP_RB0_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR 0x1043
+#define regCP_RB_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR_HI 0x1044
+#define regCP_RB0_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR_HI 0x1044
+#define regCP_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB0_BUFSZ_MASK 0x1045
+#define regCP_RB0_BUFSZ_MASK_BASE_IDX 0
+#define regCP_RB_BUFSZ_MASK 0x1045
+#define regCP_RB_BUFSZ_MASK_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_LO 0x1046
+#define regCP_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_HI 0x1047
+#define regCP_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regGC_PRIV_MODE 0x1048
+#define regGC_PRIV_MODE_BASE_IDX 0
+#define regCP_INT_CNTL 0x1049
+#define regCP_INT_CNTL_BASE_IDX 0
+#define regCP_INT_STATUS 0x104a
+#define regCP_INT_STATUS_BASE_IDX 0
+#define regCP_DEVICE_ID 0x104b
+#define regCP_DEVICE_ID_BASE_IDX 0
+#define regCP_ME0_PIPE_PRIORITY_CNTS 0x104c
+#define regCP_ME0_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_RING_PRIORITY_CNTS 0x104c
+#define regCP_RING_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME0_PIPE0_PRIORITY 0x104d
+#define regCP_ME0_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_RING0_PRIORITY 0x104d
+#define regCP_RING0_PRIORITY_BASE_IDX 0
+#define regCP_ME0_PIPE1_PRIORITY 0x104e
+#define regCP_ME0_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_RING1_PRIORITY 0x104e
+#define regCP_RING1_PRIORITY_BASE_IDX 0
+#define regCP_ME0_PIPE2_PRIORITY 0x104f
+#define regCP_ME0_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_RING2_PRIORITY 0x104f
+#define regCP_RING2_PRIORITY_BASE_IDX 0
+#define regCP_FATAL_ERROR 0x1050
+#define regCP_FATAL_ERROR_BASE_IDX 0
+#define regCP_RB_VMID 0x1051
+#define regCP_RB_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE0_VMID 0x1052
+#define regCP_ME0_PIPE0_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE1_VMID 0x1053
+#define regCP_ME0_PIPE1_VMID_BASE_IDX 0
+#define regCP_RB0_WPTR 0x1054
+#define regCP_RB0_WPTR_BASE_IDX 0
+#define regCP_RB_WPTR 0x1054
+#define regCP_RB_WPTR_BASE_IDX 0
+#define regCP_RB0_WPTR_HI 0x1055
+#define regCP_RB0_WPTR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_HI 0x1055
+#define regCP_RB_WPTR_HI_BASE_IDX 0
+#define regCP_RB1_WPTR 0x1056
+#define regCP_RB1_WPTR_BASE_IDX 0
+#define regCP_RB1_WPTR_HI 0x1057
+#define regCP_RB1_WPTR_HI_BASE_IDX 0
+#define regCP_RB2_WPTR 0x1058
+#define regCP_RB2_WPTR_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL 0x1059
+#define regCP_RB_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_LOWER 0x105a
+#define regCP_RB_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_UPPER 0x105b
+#define regCP_RB_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_LOWER 0x105c
+#define regCP_MEC_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_UPPER 0x105d
+#define regCP_MEC_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCPG_UTCL1_ERROR 0x105e
+#define regCPG_UTCL1_ERROR_BASE_IDX 0
+#define regCPC_UTCL1_ERROR 0x105f
+#define regCPC_UTCL1_ERROR_BASE_IDX 0
+#define regCP_RB1_BASE 0x1060
+#define regCP_RB1_BASE_BASE_IDX 0
+#define regCP_RB1_CNTL 0x1061
+#define regCP_RB1_CNTL_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR 0x1062
+#define regCP_RB1_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR_HI 0x1063
+#define regCP_RB1_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB2_BASE 0x1065
+#define regCP_RB2_BASE_BASE_IDX 0
+#define regCP_RB2_CNTL 0x1066
+#define regCP_RB2_CNTL_BASE_IDX 0
+#define regCP_RB2_RPTR_ADDR 0x1067
+#define regCP_RB2_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB2_RPTR_ADDR_HI 0x1068
+#define regCP_RB2_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB0_ACTIVE 0x1069
+#define regCP_RB0_ACTIVE_BASE_IDX 0
+#define regCP_RB_ACTIVE 0x1069
+#define regCP_RB_ACTIVE_BASE_IDX 0
+#define regCP_INT_CNTL_RING0 0x106a
+#define regCP_INT_CNTL_RING0_BASE_IDX 0
+#define regCP_INT_CNTL_RING1 0x106b
+#define regCP_INT_CNTL_RING1_BASE_IDX 0
+#define regCP_INT_CNTL_RING2 0x106c
+#define regCP_INT_CNTL_RING2_BASE_IDX 0
+#define regCP_INT_STATUS_RING0 0x106d
+#define regCP_INT_STATUS_RING0_BASE_IDX 0
+#define regCP_INT_STATUS_RING1 0x106e
+#define regCP_INT_STATUS_RING1_BASE_IDX 0
+#define regCP_INT_STATUS_RING2 0x106f
+#define regCP_INT_STATUS_RING2_BASE_IDX 0
+#define regCP_ME_F32_INTERRUPT 0x1073
+#define regCP_ME_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PFP_F32_INTERRUPT 0x1074
+#define regCP_PFP_F32_INTERRUPT_BASE_IDX 0
+#define regCP_CE_F32_INTERRUPT 0x1075
+#define regCP_CE_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC1_F32_INTERRUPT 0x1076
+#define regCP_MEC1_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC2_F32_INTERRUPT 0x1077
+#define regCP_MEC2_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PWR_CNTL 0x1078
+#define regCP_PWR_CNTL_BASE_IDX 0
+#define regCP_MEM_SLP_CNTL 0x1079
+#define regCP_MEM_SLP_CNTL_BASE_IDX 0
+#define regCP_ECC_DMA_FIRST_OCCURRENCE 0x107a
+#define regCP_ECC_DMA_FIRST_OCCURRENCE_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE 0x107a
+#define regCP_ECC_FIRSTOCCURRENCE_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING0 0x107b
+#define regCP_ECC_FIRSTOCCURRENCE_RING0_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING1 0x107c
+#define regCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING2 0x107d
+#define regCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
+#define regGB_EDC_MODE 0x107e
+#define regGB_EDC_MODE_BASE_IDX 0
+#define regCP_DEBUG 0x107f
+#define regCP_DEBUG_BASE_IDX 0
+#define regCP_CPF_DEBUG 0x1080
+#define regCP_CPF_DEBUG_BASE_IDX 0
+#define regCP_CPC_DEBUG 0x1081
+#define regCP_CPC_DEBUG_BASE_IDX 0
+#define regCP_CPC_DEBUG_2 0x1082
+#define regCP_CPC_DEBUG_2_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL 0x1083
+#define regCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL1 0x1084
+#define regCP_PQ_WPTR_POLL_CNTL1_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_CNTL 0x1085
+#define regCP_ME1_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_CNTL 0x1086
+#define regCP_ME1_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_CNTL 0x1087
+#define regCP_ME1_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_CNTL 0x1088
+#define regCP_ME1_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_CNTL 0x1089
+#define regCP_ME2_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_CNTL 0x108a
+#define regCP_ME2_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_CNTL 0x108b
+#define regCP_ME2_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_CNTL 0x108c
+#define regCP_ME2_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_STATUS 0x108d
+#define regCP_ME1_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_STATUS 0x108e
+#define regCP_ME1_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_STATUS 0x108f
+#define regCP_ME1_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_STATUS 0x1090
+#define regCP_ME1_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_STATUS 0x1091
+#define regCP_ME2_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_STATUS 0x1092
+#define regCP_ME2_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_STATUS 0x1093
+#define regCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_STATUS 0x1094
+#define regCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_INT_STAT_DEBUG 0x1095
+#define regCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_ME2_INT_STAT_DEBUG 0x1096
+#define regCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
+#define regCC_GC_EDC_CONFIG 0x1098
+#define regCC_GC_EDC_CONFIG_BASE_IDX 0
+#define regCP_ME1_PIPE_PRIORITY_CNTS 0x1099
+#define regCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME1_PIPE0_PRIORITY 0x109a
+#define regCP_ME1_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE1_PRIORITY 0x109b
+#define regCP_ME1_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE2_PRIORITY 0x109c
+#define regCP_ME1_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE3_PRIORITY 0x109d
+#define regCP_ME1_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE_PRIORITY_CNTS 0x109e
+#define regCP_ME2_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME2_PIPE0_PRIORITY 0x109f
+#define regCP_ME2_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE1_PRIORITY 0x10a0
+#define regCP_ME2_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE2_PRIORITY 0x10a1
+#define regCP_ME2_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE3_PRIORITY 0x10a2
+#define regCP_ME2_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_CE_PRGRM_CNTR_START 0x10a3
+#define regCP_CE_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START 0x10a4
+#define regCP_PFP_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START 0x10a5
+#define regCP_ME_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC1_PRGRM_CNTR_START 0x10a6
+#define regCP_MEC1_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC2_PRGRM_CNTR_START 0x10a7
+#define regCP_MEC2_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_CE_INTR_ROUTINE_START 0x10a8
+#define regCP_CE_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START 0x10a9
+#define regCP_PFP_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START 0x10aa
+#define regCP_ME_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC1_INTR_ROUTINE_START 0x10ab
+#define regCP_MEC1_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC2_INTR_ROUTINE_START 0x10ac
+#define regCP_MEC2_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_CONTEXT_CNTL 0x10ad
+#define regCP_CONTEXT_CNTL_BASE_IDX 0
+#define regCP_MAX_CONTEXT 0x10ae
+#define regCP_MAX_CONTEXT_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME1 0x10af
+#define regCP_IQ_WAIT_TIME1_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME2 0x10b0
+#define regCP_IQ_WAIT_TIME2_BASE_IDX 0
+#define regCP_RB0_BASE_HI 0x10b1
+#define regCP_RB0_BASE_HI_BASE_IDX 0
+#define regCP_RB1_BASE_HI 0x10b2
+#define regCP_RB1_BASE_HI_BASE_IDX 0
+#define regCP_VMID_RESET 0x10b3
+#define regCP_VMID_RESET_BASE_IDX 0
+#define regCPC_INT_CNTL 0x10b4
+#define regCPC_INT_CNTL_BASE_IDX 0
+#define regCPC_INT_STATUS 0x10b5
+#define regCPC_INT_STATUS_BASE_IDX 0
+#define regCP_VMID_PREEMPT 0x10b6
+#define regCP_VMID_PREEMPT_BASE_IDX 0
+#define regCPC_INT_CNTX_ID 0x10b7
+#define regCPC_INT_CNTX_ID_BASE_IDX 0
+#define regCP_PQ_STATUS 0x10b8
+#define regCP_PQ_STATUS_BASE_IDX 0
+#define regCP_CPC_IC_BASE_LO 0x10b9
+#define regCP_CPC_IC_BASE_LO_BASE_IDX 0
+#define regCP_CPC_IC_BASE_HI 0x10ba
+#define regCP_CPC_IC_BASE_HI_BASE_IDX 0
+#define regCP_CPC_IC_BASE_CNTL 0x10bb
+#define regCP_CPC_IC_BASE_CNTL_BASE_IDX 0
+#define regCP_CPC_IC_OP_CNTL 0x10bc
+#define regCP_CPC_IC_OP_CNTL_BASE_IDX 0
+#define regCP_MEC1_F32_INT_DIS 0x10bd
+#define regCP_MEC1_F32_INT_DIS_BASE_IDX 0
+#define regCP_MEC2_F32_INT_DIS 0x10be
+#define regCP_MEC2_F32_INT_DIS_BASE_IDX 0
+#define regCP_VMID_STATUS 0x10bf
+#define regCP_VMID_STATUS_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_cppdec2
+// base address: 0xc600
+#define regCP_RB_DOORBELL_CONTROL_SCH_0 0x1180
+#define regCP_RB_DOORBELL_CONTROL_SCH_0_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_1 0x1181
+#define regCP_RB_DOORBELL_CONTROL_SCH_1_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_2 0x1182
+#define regCP_RB_DOORBELL_CONTROL_SCH_2_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_3 0x1183
+#define regCP_RB_DOORBELL_CONTROL_SCH_3_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_4 0x1184
+#define regCP_RB_DOORBELL_CONTROL_SCH_4_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_5 0x1185
+#define regCP_RB_DOORBELL_CONTROL_SCH_5_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_6 0x1186
+#define regCP_RB_DOORBELL_CONTROL_SCH_6_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL_SCH_7 0x1187
+#define regCP_RB_DOORBELL_CONTROL_SCH_7_BASE_IDX 0
+#define regCP_RB_DOORBELL_CLEAR 0x1188
+#define regCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define regCP_CPF_DSM_CNTL 0x1194
+#define regCP_CPF_DSM_CNTL_BASE_IDX 0
+#define regCP_CPG_DSM_CNTL 0x1195
+#define regCP_CPG_DSM_CNTL_BASE_IDX 0
+#define regCP_CPC_DSM_CNTL 0x1196
+#define regCP_CPC_DSM_CNTL_BASE_IDX 0
+#define regCP_CPF_DSM_CNTL2 0x1197
+#define regCP_CPF_DSM_CNTL2_BASE_IDX 0
+#define regCP_CPG_DSM_CNTL2 0x1198
+#define regCP_CPG_DSM_CNTL2_BASE_IDX 0
+#define regCP_CPC_DSM_CNTL2 0x1199
+#define regCP_CPC_DSM_CNTL2_BASE_IDX 0
+#define regCP_CPF_DSM_CNTL2A 0x119a
+#define regCP_CPF_DSM_CNTL2A_BASE_IDX 0
+#define regCP_CPG_DSM_CNTL2A 0x119b
+#define regCP_CPG_DSM_CNTL2A_BASE_IDX 0
+#define regCP_CPC_DSM_CNTL2A 0x119c
+#define regCP_CPC_DSM_CNTL2A_BASE_IDX 0
+#define regCP_EDC_FUE_CNTL 0x119d
+#define regCP_EDC_FUE_CNTL_BASE_IDX 0
+#define regCP_GFX_MQD_CONTROL 0x11a0
+#define regCP_GFX_MQD_CONTROL_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR 0x11a1
+#define regCP_GFX_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR_HI 0x11a2
+#define regCP_GFX_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_RB_STATUS 0x11a3
+#define regCP_RB_STATUS_BASE_IDX 0
+#define regCPG_UTCL1_STATUS 0x11b4
+#define regCPG_UTCL1_STATUS_BASE_IDX 0
+#define regCPC_UTCL1_STATUS 0x11b5
+#define regCPC_UTCL1_STATUS_BASE_IDX 0
+#define regCPF_UTCL1_STATUS 0x11b6
+#define regCPF_UTCL1_STATUS_BASE_IDX 0
+#define regCP_SD_CNTL 0x11b7
+#define regCP_SD_CNTL_BASE_IDX 0
+#define regCP_SOFT_RESET_CNTL 0x11b9
+#define regCP_SOFT_RESET_CNTL_BASE_IDX 0
+#define regCP_CPC_GFX_CNTL 0x11ba
+#define regCP_CPC_GFX_CNTL_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_spipdec
+// base address: 0xc700
+#define regSPI_ARB_PRIORITY 0x11c0
+#define regSPI_ARB_PRIORITY_BASE_IDX 0
+#define regSPI_ARB_CYCLES_0 0x11c1
+#define regSPI_ARB_CYCLES_0_BASE_IDX 0
+#define regSPI_ARB_CYCLES_1 0x11c2
+#define regSPI_ARB_CYCLES_1_BASE_IDX 0
+#define regSPI_CDBG_SYS_GFX 0x11c3
+#define regSPI_CDBG_SYS_GFX_BASE_IDX 0
+#define regSPI_CDBG_SYS_HP3D 0x11c4
+#define regSPI_CDBG_SYS_HP3D_BASE_IDX 0
+#define regSPI_CDBG_SYS_CS0 0x11c5
+#define regSPI_CDBG_SYS_CS0_BASE_IDX 0
+#define regSPI_CDBG_SYS_CS1 0x11c6
+#define regSPI_CDBG_SYS_CS1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_GFX 0x11c7
+#define regSPI_WCL_PIPE_PERCENT_GFX_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_HP3D 0x11c8
+#define regSPI_WCL_PIPE_PERCENT_HP3D_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS0 0x11c9
+#define regSPI_WCL_PIPE_PERCENT_CS0_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS1 0x11ca
+#define regSPI_WCL_PIPE_PERCENT_CS1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS2 0x11cb
+#define regSPI_WCL_PIPE_PERCENT_CS2_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS3 0x11cc
+#define regSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS4 0x11cd
+#define regSPI_WCL_PIPE_PERCENT_CS4_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS5 0x11ce
+#define regSPI_WCL_PIPE_PERCENT_CS5_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS6 0x11cf
+#define regSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS7 0x11d0
+#define regSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define regSPI_GDBG_WAVE_CNTL 0x11d1
+#define regSPI_GDBG_WAVE_CNTL_BASE_IDX 0
+#define regSPI_GDBG_TRAP_CONFIG 0x11d2
+#define regSPI_GDBG_TRAP_CONFIG_BASE_IDX 0
+#define regSPI_GDBG_PER_VMID_CNTL 0x11d3
+#define regSPI_GDBG_PER_VMID_CNTL_BASE_IDX 0
+#define regSPI_GDBG_WAVE_CNTL3 0x11d5
+#define regSPI_GDBG_WAVE_CNTL3_BASE_IDX 0
+#define regSPI_SCRATCH_ADDR_CHECK 0x11d8
+#define regSPI_SCRATCH_ADDR_CHECK_BASE_IDX 0
+#define regSPI_SCRATCH_ADDR_STATUS 0x11d9
+#define regSPI_SCRATCH_ADDR_STATUS_BASE_IDX 0
+#define regSPI_RESET_DEBUG 0x11da
+#define regSPI_RESET_DEBUG_BASE_IDX 0
+#define regSPI_COMPUTE_QUEUE_RESET 0x11db
+#define regSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_0 0x11dc
+#define regSPI_RESOURCE_RESERVE_CU_0_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_1 0x11dd
+#define regSPI_RESOURCE_RESERVE_CU_1_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_2 0x11de
+#define regSPI_RESOURCE_RESERVE_CU_2_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_3 0x11df
+#define regSPI_RESOURCE_RESERVE_CU_3_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_4 0x11e0
+#define regSPI_RESOURCE_RESERVE_CU_4_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_5 0x11e1
+#define regSPI_RESOURCE_RESERVE_CU_5_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_6 0x11e2
+#define regSPI_RESOURCE_RESERVE_CU_6_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_7 0x11e3
+#define regSPI_RESOURCE_RESERVE_CU_7_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_8 0x11e4
+#define regSPI_RESOURCE_RESERVE_CU_8_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_9 0x11e5
+#define regSPI_RESOURCE_RESERVE_CU_9_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_0 0x11e6
+#define regSPI_RESOURCE_RESERVE_EN_CU_0_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_1 0x11e7
+#define regSPI_RESOURCE_RESERVE_EN_CU_1_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_2 0x11e8
+#define regSPI_RESOURCE_RESERVE_EN_CU_2_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_3 0x11e9
+#define regSPI_RESOURCE_RESERVE_EN_CU_3_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_4 0x11ea
+#define regSPI_RESOURCE_RESERVE_EN_CU_4_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_5 0x11eb
+#define regSPI_RESOURCE_RESERVE_EN_CU_5_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_6 0x11ec
+#define regSPI_RESOURCE_RESERVE_EN_CU_6_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_7 0x11ed
+#define regSPI_RESOURCE_RESERVE_EN_CU_7_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_8 0x11ee
+#define regSPI_RESOURCE_RESERVE_EN_CU_8_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_9 0x11ef
+#define regSPI_RESOURCE_RESERVE_EN_CU_9_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_10 0x11f0
+#define regSPI_RESOURCE_RESERVE_CU_10_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_11 0x11f1
+#define regSPI_RESOURCE_RESERVE_CU_11_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_10 0x11f2
+#define regSPI_RESOURCE_RESERVE_EN_CU_10_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_11 0x11f3
+#define regSPI_RESOURCE_RESERVE_EN_CU_11_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_12 0x11f4
+#define regSPI_RESOURCE_RESERVE_CU_12_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_13 0x11f5
+#define regSPI_RESOURCE_RESERVE_CU_13_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_14 0x11f6
+#define regSPI_RESOURCE_RESERVE_CU_14_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_CU_15 0x11f7
+#define regSPI_RESOURCE_RESERVE_CU_15_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_12 0x11f8
+#define regSPI_RESOURCE_RESERVE_EN_CU_12_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_13 0x11f9
+#define regSPI_RESOURCE_RESERVE_EN_CU_13_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_14 0x11fa
+#define regSPI_RESOURCE_RESERVE_EN_CU_14_BASE_IDX 0
+#define regSPI_RESOURCE_RESERVE_EN_CU_15 0x11fb
+#define regSPI_RESOURCE_RESERVE_EN_CU_15_BASE_IDX 0
+#define regSPI_COMPUTE_WF_CTX_SAVE 0x11fc
+#define regSPI_COMPUTE_WF_CTX_SAVE_BASE_IDX 0
+#define regSPI_ARB_CNTL_0 0x11fd
+#define regSPI_ARB_CNTL_0_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_cpphqddec
+// base address: 0xc800
+#define regCP_HQD_GFX_CONTROL 0x123e
+#define regCP_HQD_GFX_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_STATUS 0x123f
+#define regCP_HQD_GFX_STATUS_BASE_IDX 0
+#define regCP_HPD_ROQ_OFFSETS 0x1240
+#define regCP_HPD_ROQ_OFFSETS_BASE_IDX 0
+#define regCP_HPD_STATUS0 0x1241
+#define regCP_HPD_STATUS0_BASE_IDX 0
+#define regCP_HPD_UTCL1_CNTL 0x1242
+#define regCP_HPD_UTCL1_CNTL_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR 0x1243
+#define regCP_HPD_UTCL1_ERROR_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR_ADDR 0x1244
+#define regCP_HPD_UTCL1_ERROR_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR 0x1245
+#define regCP_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR_HI 0x1246
+#define regCP_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_ACTIVE 0x1247
+#define regCP_HQD_ACTIVE_BASE_IDX 0
+#define regCP_HQD_VMID 0x1248
+#define regCP_HQD_VMID_BASE_IDX 0
+#define regCP_HQD_PERSISTENT_STATE 0x1249
+#define regCP_HQD_PERSISTENT_STATE_BASE_IDX 0
+#define regCP_HQD_PIPE_PRIORITY 0x124a
+#define regCP_HQD_PIPE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUEUE_PRIORITY 0x124b
+#define regCP_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUANTUM 0x124c
+#define regCP_HQD_QUANTUM_BASE_IDX 0
+#define regCP_HQD_PQ_BASE 0x124d
+#define regCP_HQD_PQ_BASE_BASE_IDX 0
+#define regCP_HQD_PQ_BASE_HI 0x124e
+#define regCP_HQD_PQ_BASE_HI_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR 0x124f
+#define regCP_HQD_PQ_RPTR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR 0x1250
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1251
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR 0x1252
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1253
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_DOORBELL_CONTROL 0x1254
+#define regCP_HQD_PQ_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_CONTROL 0x1256
+#define regCP_HQD_PQ_CONTROL_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR 0x1257
+#define regCP_HQD_IB_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR_HI 0x1258
+#define regCP_HQD_IB_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_IB_RPTR 0x1259
+#define regCP_HQD_IB_RPTR_BASE_IDX 0
+#define regCP_HQD_IB_CONTROL 0x125a
+#define regCP_HQD_IB_CONTROL_BASE_IDX 0
+#define regCP_HQD_IQ_TIMER 0x125b
+#define regCP_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_HQD_IQ_RPTR 0x125c
+#define regCP_HQD_IQ_RPTR_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_REQUEST 0x125d
+#define regCP_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_HQD_DMA_OFFLOAD 0x125e
+#define regCP_HQD_DMA_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_OFFLOAD 0x125e
+#define regCP_HQD_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_SEMA_CMD 0x125f
+#define regCP_HQD_SEMA_CMD_BASE_IDX 0
+#define regCP_HQD_MSG_TYPE 0x1260
+#define regCP_HQD_MSG_TYPE_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_LO 0x1261
+#define regCP_HQD_ATOMIC0_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_HI 0x1262
+#define regCP_HQD_ATOMIC0_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_LO 0x1263
+#define regCP_HQD_ATOMIC1_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_HI 0x1264
+#define regCP_HQD_ATOMIC1_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER0 0x1265
+#define regCP_HQD_HQ_SCHEDULER0_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS0 0x1265
+#define regCP_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL0 0x1266
+#define regCP_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER1 0x1266
+#define regCP_HQD_HQ_SCHEDULER1_BASE_IDX 0
+#define regCP_MQD_CONTROL 0x1267
+#define regCP_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS1 0x1268
+#define regCP_HQD_HQ_STATUS1_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL1 0x1269
+#define regCP_HQD_HQ_CONTROL1_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR 0x126a
+#define regCP_HQD_EOP_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR_HI 0x126b
+#define regCP_HQD_EOP_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_EOP_CONTROL 0x126c
+#define regCP_HQD_EOP_CONTROL_BASE_IDX 0
+#define regCP_HQD_EOP_RPTR 0x126d
+#define regCP_HQD_EOP_RPTR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR 0x126e
+#define regCP_HQD_EOP_WPTR_BASE_IDX 0
+#define regCP_HQD_EOP_EVENTS 0x126f
+#define regCP_HQD_EOP_EVENTS_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x1270
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x1271
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_CONTROL 0x1272
+#define regCP_HQD_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_OFFSET 0x1273
+#define regCP_HQD_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_SIZE 0x1274
+#define regCP_HQD_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCP_HQD_WG_STATE_OFFSET 0x1275
+#define regCP_HQD_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_SIZE 0x1276
+#define regCP_HQD_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCP_HQD_GDS_RESOURCE_STATE 0x1277
+#define regCP_HQD_GDS_RESOURCE_STATE_BASE_IDX 0
+#define regCP_HQD_ERROR 0x1278
+#define regCP_HQD_ERROR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR_MEM 0x1279
+#define regCP_HQD_EOP_WPTR_MEM_BASE_IDX 0
+#define regCP_HQD_AQL_CONTROL 0x127a
+#define regCP_HQD_AQL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_LO 0x127b
+#define regCP_HQD_PQ_WPTR_LO_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_HI 0x127c
+#define regCP_HQD_PQ_WPTR_HI_BASE_IDX 0
+#define regCP_HQD_AQL_CONTROL_1 0x127d
+#define regCP_HQD_AQL_CONTROL_1_BASE_IDX 0
+#define regCP_HQD_AQL_DISPATCH_ID 0x127e
+#define regCP_HQD_AQL_DISPATCH_ID_BASE_IDX 0
+#define regCP_HQD_AQL_DISPATCH_ID_HI 0x127f
+#define regCP_HQD_AQL_DISPATCH_ID_HI_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_tcpdec
+// base address: 0xca80
+#define regTCP_WATCH0_ADDR_H 0x12a0
+#define regTCP_WATCH0_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH0_ADDR_L 0x12a1
+#define regTCP_WATCH0_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH0_CNTL 0x12a2
+#define regTCP_WATCH0_CNTL_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_H 0x12a3
+#define regTCP_WATCH1_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_L 0x12a4
+#define regTCP_WATCH1_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH1_CNTL 0x12a5
+#define regTCP_WATCH1_CNTL_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_H 0x12a6
+#define regTCP_WATCH2_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_L 0x12a7
+#define regTCP_WATCH2_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH2_CNTL 0x12a8
+#define regTCP_WATCH2_CNTL_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_H 0x12a9
+#define regTCP_WATCH3_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_L 0x12aa
+#define regTCP_WATCH3_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH3_CNTL 0x12ab
+#define regTCP_WATCH3_CNTL_BASE_IDX 0
+#define regTCP_GATCL1_CNTL 0x12b0
+#define regTCP_GATCL1_CNTL_BASE_IDX 0
+#define regTCP_ATC_EDC_GATCL1_CNT 0x12b1
+#define regTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0
+#define regTCP_GATCL1_DSM_CNTL 0x12b2
+#define regTCP_GATCL1_DSM_CNTL_BASE_IDX 0
+#define regTCP_DSM_CNTL 0x12b3
+#define regTCP_DSM_CNTL_BASE_IDX 0
+#define regTCP_CNTL2 0x12b4
+#define regTCP_CNTL2_BASE_IDX 0
+#define regTCP_UTCL1_CNTL1 0x12b5
+#define regTCP_UTCL1_CNTL1_BASE_IDX 0
+#define regTCP_UTCL1_CNTL2 0x12b6
+#define regTCP_UTCL1_CNTL2_BASE_IDX 0
+#define regTCP_UTCL1_STATUS 0x12b7
+#define regTCP_UTCL1_STATUS_BASE_IDX 0
+#define regTCP_DSM_CNTL2 0x12b8
+#define regTCP_DSM_CNTL2_BASE_IDX 0
+#define regTCP_PERFCOUNTER_FILTER 0x12b9
+#define regTCP_PERFCOUNTER_FILTER_BASE_IDX 0
+#define regTCP_PERFCOUNTER_FILTER_EN 0x12ba
+#define regTCP_PERFCOUNTER_FILTER_EN_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_gdspdec
+// base address: 0xcc00
+#define regGDS_VMID0_BASE 0x1300
+#define regGDS_VMID0_BASE_BASE_IDX 0
+#define regGDS_VMID0_SIZE 0x1301
+#define regGDS_VMID0_SIZE_BASE_IDX 0
+#define regGDS_VMID1_BASE 0x1302
+#define regGDS_VMID1_BASE_BASE_IDX 0
+#define regGDS_VMID1_SIZE 0x1303
+#define regGDS_VMID1_SIZE_BASE_IDX 0
+#define regGDS_VMID2_BASE 0x1304
+#define regGDS_VMID2_BASE_BASE_IDX 0
+#define regGDS_VMID2_SIZE 0x1305
+#define regGDS_VMID2_SIZE_BASE_IDX 0
+#define regGDS_VMID3_BASE 0x1306
+#define regGDS_VMID3_BASE_BASE_IDX 0
+#define regGDS_VMID3_SIZE 0x1307
+#define regGDS_VMID3_SIZE_BASE_IDX 0
+#define regGDS_VMID4_BASE 0x1308
+#define regGDS_VMID4_BASE_BASE_IDX 0
+#define regGDS_VMID4_SIZE 0x1309
+#define regGDS_VMID4_SIZE_BASE_IDX 0
+#define regGDS_VMID5_BASE 0x130a
+#define regGDS_VMID5_BASE_BASE_IDX 0
+#define regGDS_VMID5_SIZE 0x130b
+#define regGDS_VMID5_SIZE_BASE_IDX 0
+#define regGDS_VMID6_BASE 0x130c
+#define regGDS_VMID6_BASE_BASE_IDX 0
+#define regGDS_VMID6_SIZE 0x130d
+#define regGDS_VMID6_SIZE_BASE_IDX 0
+#define regGDS_VMID7_BASE 0x130e
+#define regGDS_VMID7_BASE_BASE_IDX 0
+#define regGDS_VMID7_SIZE 0x130f
+#define regGDS_VMID7_SIZE_BASE_IDX 0
+#define regGDS_VMID8_BASE 0x1310
+#define regGDS_VMID8_BASE_BASE_IDX 0
+#define regGDS_VMID8_SIZE 0x1311
+#define regGDS_VMID8_SIZE_BASE_IDX 0
+#define regGDS_VMID9_BASE 0x1312
+#define regGDS_VMID9_BASE_BASE_IDX 0
+#define regGDS_VMID9_SIZE 0x1313
+#define regGDS_VMID9_SIZE_BASE_IDX 0
+#define regGDS_VMID10_BASE 0x1314
+#define regGDS_VMID10_BASE_BASE_IDX 0
+#define regGDS_VMID10_SIZE 0x1315
+#define regGDS_VMID10_SIZE_BASE_IDX 0
+#define regGDS_VMID11_BASE 0x1316
+#define regGDS_VMID11_BASE_BASE_IDX 0
+#define regGDS_VMID11_SIZE 0x1317
+#define regGDS_VMID11_SIZE_BASE_IDX 0
+#define regGDS_VMID12_BASE 0x1318
+#define regGDS_VMID12_BASE_BASE_IDX 0
+#define regGDS_VMID12_SIZE 0x1319
+#define regGDS_VMID12_SIZE_BASE_IDX 0
+#define regGDS_VMID13_BASE 0x131a
+#define regGDS_VMID13_BASE_BASE_IDX 0
+#define regGDS_VMID13_SIZE 0x131b
+#define regGDS_VMID13_SIZE_BASE_IDX 0
+#define regGDS_VMID14_BASE 0x131c
+#define regGDS_VMID14_BASE_BASE_IDX 0
+#define regGDS_VMID14_SIZE 0x131d
+#define regGDS_VMID14_SIZE_BASE_IDX 0
+#define regGDS_VMID15_BASE 0x131e
+#define regGDS_VMID15_BASE_BASE_IDX 0
+#define regGDS_VMID15_SIZE 0x131f
+#define regGDS_VMID15_SIZE_BASE_IDX 0
+#define regGDS_GWS_VMID0 0x1320
+#define regGDS_GWS_VMID0_BASE_IDX 0
+#define regGDS_GWS_VMID1 0x1321
+#define regGDS_GWS_VMID1_BASE_IDX 0
+#define regGDS_GWS_VMID2 0x1322
+#define regGDS_GWS_VMID2_BASE_IDX 0
+#define regGDS_GWS_VMID3 0x1323
+#define regGDS_GWS_VMID3_BASE_IDX 0
+#define regGDS_GWS_VMID4 0x1324
+#define regGDS_GWS_VMID4_BASE_IDX 0
+#define regGDS_GWS_VMID5 0x1325
+#define regGDS_GWS_VMID5_BASE_IDX 0
+#define regGDS_GWS_VMID6 0x1326
+#define regGDS_GWS_VMID6_BASE_IDX 0
+#define regGDS_GWS_VMID7 0x1327
+#define regGDS_GWS_VMID7_BASE_IDX 0
+#define regGDS_GWS_VMID8 0x1328
+#define regGDS_GWS_VMID8_BASE_IDX 0
+#define regGDS_GWS_VMID9 0x1329
+#define regGDS_GWS_VMID9_BASE_IDX 0
+#define regGDS_GWS_VMID10 0x132a
+#define regGDS_GWS_VMID10_BASE_IDX 0
+#define regGDS_GWS_VMID11 0x132b
+#define regGDS_GWS_VMID11_BASE_IDX 0
+#define regGDS_GWS_VMID12 0x132c
+#define regGDS_GWS_VMID12_BASE_IDX 0
+#define regGDS_GWS_VMID13 0x132d
+#define regGDS_GWS_VMID13_BASE_IDX 0
+#define regGDS_GWS_VMID14 0x132e
+#define regGDS_GWS_VMID14_BASE_IDX 0
+#define regGDS_GWS_VMID15 0x132f
+#define regGDS_GWS_VMID15_BASE_IDX 0
+#define regGDS_OA_VMID0 0x1330
+#define regGDS_OA_VMID0_BASE_IDX 0
+#define regGDS_OA_VMID1 0x1331
+#define regGDS_OA_VMID1_BASE_IDX 0
+#define regGDS_OA_VMID2 0x1332
+#define regGDS_OA_VMID2_BASE_IDX 0
+#define regGDS_OA_VMID3 0x1333
+#define regGDS_OA_VMID3_BASE_IDX 0
+#define regGDS_OA_VMID4 0x1334
+#define regGDS_OA_VMID4_BASE_IDX 0
+#define regGDS_OA_VMID5 0x1335
+#define regGDS_OA_VMID5_BASE_IDX 0
+#define regGDS_OA_VMID6 0x1336
+#define regGDS_OA_VMID6_BASE_IDX 0
+#define regGDS_OA_VMID7 0x1337
+#define regGDS_OA_VMID7_BASE_IDX 0
+#define regGDS_OA_VMID8 0x1338
+#define regGDS_OA_VMID8_BASE_IDX 0
+#define regGDS_OA_VMID9 0x1339
+#define regGDS_OA_VMID9_BASE_IDX 0
+#define regGDS_OA_VMID10 0x133a
+#define regGDS_OA_VMID10_BASE_IDX 0
+#define regGDS_OA_VMID11 0x133b
+#define regGDS_OA_VMID11_BASE_IDX 0
+#define regGDS_OA_VMID12 0x133c
+#define regGDS_OA_VMID12_BASE_IDX 0
+#define regGDS_OA_VMID13 0x133d
+#define regGDS_OA_VMID13_BASE_IDX 0
+#define regGDS_OA_VMID14 0x133e
+#define regGDS_OA_VMID14_BASE_IDX 0
+#define regGDS_OA_VMID15 0x133f
+#define regGDS_OA_VMID15_BASE_IDX 0
+#define regGDS_GWS_RESET0 0x1344
+#define regGDS_GWS_RESET0_BASE_IDX 0
+#define regGDS_GWS_RESET1 0x1345
+#define regGDS_GWS_RESET1_BASE_IDX 0
+#define regGDS_GWS_RESOURCE_RESET 0x1346
+#define regGDS_GWS_RESOURCE_RESET_BASE_IDX 0
+#define regGDS_COMPUTE_MAX_WAVE_ID 0x1348
+#define regGDS_COMPUTE_MAX_WAVE_ID_BASE_IDX 0
+#define regGDS_OA_RESET_MASK 0x1349
+#define regGDS_OA_RESET_MASK_BASE_IDX 0
+#define regGDS_OA_RESET 0x134a
+#define regGDS_OA_RESET_BASE_IDX 0
+#define regGDS_ENHANCE 0x134b
+#define regGDS_ENHANCE_BASE_IDX 0
+#define regGDS_OA_CGPG_RESTORE 0x134c
+#define regGDS_OA_CGPG_RESTORE_BASE_IDX 0
+#define regGDS_CS_CTXSW_STATUS 0x134d
+#define regGDS_CS_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT0 0x134e
+#define regGDS_CS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT1 0x134f
+#define regGDS_CS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT2 0x1350
+#define regGDS_CS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT3 0x1351
+#define regGDS_CS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_GFX_CTXSW_STATUS 0x1352
+#define regGDS_GFX_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_VS_CTXSW_CNT0 0x1353
+#define regGDS_VS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_VS_CTXSW_CNT1 0x1354
+#define regGDS_VS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_VS_CTXSW_CNT2 0x1355
+#define regGDS_VS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_VS_CTXSW_CNT3 0x1356
+#define regGDS_VS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS0_CTXSW_CNT0 0x1357
+#define regGDS_PS0_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS0_CTXSW_CNT1 0x1358
+#define regGDS_PS0_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS0_CTXSW_CNT2 0x1359
+#define regGDS_PS0_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS0_CTXSW_CNT3 0x135a
+#define regGDS_PS0_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS1_CTXSW_CNT0 0x135b
+#define regGDS_PS1_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS1_CTXSW_CNT1 0x135c
+#define regGDS_PS1_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS1_CTXSW_CNT2 0x135d
+#define regGDS_PS1_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS1_CTXSW_CNT3 0x135e
+#define regGDS_PS1_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS2_CTXSW_CNT0 0x135f
+#define regGDS_PS2_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS2_CTXSW_CNT1 0x1360
+#define regGDS_PS2_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS2_CTXSW_CNT2 0x1361
+#define regGDS_PS2_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS2_CTXSW_CNT3 0x1362
+#define regGDS_PS2_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS3_CTXSW_CNT0 0x1363
+#define regGDS_PS3_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS3_CTXSW_CNT1 0x1364
+#define regGDS_PS3_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS3_CTXSW_CNT2 0x1365
+#define regGDS_PS3_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS3_CTXSW_CNT3 0x1366
+#define regGDS_PS3_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS4_CTXSW_CNT0 0x1367
+#define regGDS_PS4_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS4_CTXSW_CNT1 0x1368
+#define regGDS_PS4_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS4_CTXSW_CNT2 0x1369
+#define regGDS_PS4_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS4_CTXSW_CNT3 0x136a
+#define regGDS_PS4_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS5_CTXSW_CNT0 0x136b
+#define regGDS_PS5_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS5_CTXSW_CNT1 0x136c
+#define regGDS_PS5_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS5_CTXSW_CNT2 0x136d
+#define regGDS_PS5_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS5_CTXSW_CNT3 0x136e
+#define regGDS_PS5_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS6_CTXSW_CNT0 0x136f
+#define regGDS_PS6_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS6_CTXSW_CNT1 0x1370
+#define regGDS_PS6_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS6_CTXSW_CNT2 0x1371
+#define regGDS_PS6_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS6_CTXSW_CNT3 0x1372
+#define regGDS_PS6_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS7_CTXSW_CNT0 0x1373
+#define regGDS_PS7_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS7_CTXSW_CNT1 0x1374
+#define regGDS_PS7_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS7_CTXSW_CNT2 0x1375
+#define regGDS_PS7_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS7_CTXSW_CNT3 0x1376
+#define regGDS_PS7_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT0 0x1377
+#define regGDS_GS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT1 0x1378
+#define regGDS_GS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT2 0x1379
+#define regGDS_GS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT3 0x137a
+#define regGDS_GS_CTXSW_CNT3_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_rasdec
+// base address: 0xce00
+#define regRAS_SIGNATURE_CONTROL 0x1380
+#define regRAS_SIGNATURE_CONTROL_BASE_IDX 0
+#define regRAS_SIGNATURE_MASK 0x1381
+#define regRAS_SIGNATURE_MASK_BASE_IDX 0
+#define regRAS_SX_SIGNATURE0 0x1382
+#define regRAS_SX_SIGNATURE0_BASE_IDX 0
+#define regRAS_SX_SIGNATURE1 0x1383
+#define regRAS_SX_SIGNATURE1_BASE_IDX 0
+#define regRAS_SX_SIGNATURE2 0x1384
+#define regRAS_SX_SIGNATURE2_BASE_IDX 0
+#define regRAS_SX_SIGNATURE3 0x1385
+#define regRAS_SX_SIGNATURE3_BASE_IDX 0
+#define regRAS_DB_SIGNATURE0 0x138b
+#define regRAS_DB_SIGNATURE0_BASE_IDX 0
+#define regRAS_PA_SIGNATURE0 0x138c
+#define regRAS_PA_SIGNATURE0_BASE_IDX 0
+#define regRAS_VGT_SIGNATURE0 0x138d
+#define regRAS_VGT_SIGNATURE0_BASE_IDX 0
+#define regRAS_SQ_SIGNATURE0 0x138e
+#define regRAS_SQ_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE0 0x138f
+#define regRAS_SC_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE1 0x1390
+#define regRAS_SC_SIGNATURE1_BASE_IDX 0
+#define regRAS_SC_SIGNATURE2 0x1391
+#define regRAS_SC_SIGNATURE2_BASE_IDX 0
+#define regRAS_SC_SIGNATURE3 0x1392
+#define regRAS_SC_SIGNATURE3_BASE_IDX 0
+#define regRAS_SC_SIGNATURE4 0x1393
+#define regRAS_SC_SIGNATURE4_BASE_IDX 0
+#define regRAS_SC_SIGNATURE5 0x1394
+#define regRAS_SC_SIGNATURE5_BASE_IDX 0
+#define regRAS_SC_SIGNATURE6 0x1395
+#define regRAS_SC_SIGNATURE6_BASE_IDX 0
+#define regRAS_SC_SIGNATURE7 0x1396
+#define regRAS_SC_SIGNATURE7_BASE_IDX 0
+#define regRAS_IA_SIGNATURE0 0x1397
+#define regRAS_IA_SIGNATURE0_BASE_IDX 0
+#define regRAS_IA_SIGNATURE1 0x1398
+#define regRAS_IA_SIGNATURE1_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE0 0x1399
+#define regRAS_SPI_SIGNATURE0_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE1 0x139a
+#define regRAS_SPI_SIGNATURE1_BASE_IDX 0
+#define regRAS_TA_SIGNATURE0 0x139b
+#define regRAS_TA_SIGNATURE0_BASE_IDX 0
+#define regRAS_TD_SIGNATURE0 0x139c
+#define regRAS_TD_SIGNATURE0_BASE_IDX 0
+#define regRAS_CB_SIGNATURE0 0x139d
+#define regRAS_CB_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE0 0x139e
+#define regRAS_BCI_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE1 0x139f
+#define regRAS_BCI_SIGNATURE1_BASE_IDX 0
+#define regRAS_TA_SIGNATURE1 0x13a0
+#define regRAS_TA_SIGNATURE1_BASE_IDX 0
+
+
+// addressBlock: xcd0_gc_gfxdec0
+// base address: 0x28000
+#define regDB_RENDER_CONTROL 0x0000
+#define regDB_RENDER_CONTROL_BASE_IDX 1
+#define regDB_COUNT_CONTROL 0x0001
+#define regDB_COUNT_CONTROL_BASE_IDX 1
+#define regDB_DEPTH_VIEW 0x0002
+#define regDB_DEPTH_VIEW_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE 0x0003
+#define regDB_RENDER_OVERRIDE_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE2 0x0004
+#define regDB_RENDER_OVERRIDE2_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE 0x0005
+#define regDB_HTILE_DATA_BASE_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE_HI 0x0006
+#define regDB_HTILE_DATA_BASE_HI_BASE_IDX 1
+#define regDB_DEPTH_SIZE 0x0007
+#define regDB_DEPTH_SIZE_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MIN 0x0008
+#define regDB_DEPTH_BOUNDS_MIN_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MAX 0x0009
+#define regDB_DEPTH_BOUNDS_MAX_BASE_IDX 1
+#define regDB_STENCIL_CLEAR 0x000a
+#define regDB_STENCIL_CLEAR_BASE_IDX 1
+#define regDB_DEPTH_CLEAR 0x000b
+#define regDB_DEPTH_CLEAR_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_TL 0x000c
+#define regPA_SC_SCREEN_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_BR 0x000d
+#define regPA_SC_SCREEN_SCISSOR_BR_BASE_IDX 1
+#define regDB_Z_INFO 0x000e
+#define regDB_Z_INFO_BASE_IDX 1
+#define regDB_STENCIL_INFO 0x000f
+#define regDB_STENCIL_INFO_BASE_IDX 1
+#define regDB_Z_READ_BASE 0x0010
+#define regDB_Z_READ_BASE_BASE_IDX 1
+#define regDB_Z_READ_BASE_HI 0x0011
+#define regDB_Z_READ_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE 0x0012
+#define regDB_STENCIL_READ_BASE_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE_HI 0x0013
+#define regDB_STENCIL_READ_BASE_HI_BASE_IDX 1
+#define regDB_Z_WRITE_BASE 0x0014
+#define regDB_Z_WRITE_BASE_BASE_IDX 1
+#define regDB_Z_WRITE_BASE_HI 0x0015
+#define regDB_Z_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE 0x0016
+#define regDB_STENCIL_WRITE_BASE_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE_HI 0x0017
+#define regDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_DFSM_CONTROL 0x0018
+#define regDB_DFSM_CONTROL_BASE_IDX 1
+#define regDB_Z_INFO2 0x001a
+#define regDB_Z_INFO2_BASE_IDX 1
+#define regDB_STENCIL_INFO2 0x001b
+#define regDB_STENCIL_INFO2_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_0 0x007a
+#define regCOHER_DEST_BASE_HI_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_1 0x007b
+#define regCOHER_DEST_BASE_HI_1_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_2 0x007c
+#define regCOHER_DEST_BASE_HI_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_3 0x007d
+#define regCOHER_DEST_BASE_HI_3_BASE_IDX 1
+#define regCOHER_DEST_BASE_2 0x007e
+#define regCOHER_DEST_BASE_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_3 0x007f
+#define regCOHER_DEST_BASE_3_BASE_IDX 1
+#define regPA_SC_WINDOW_OFFSET 0x0080
+#define regPA_SC_WINDOW_OFFSET_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_TL 0x0081
+#define regPA_SC_WINDOW_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_BR 0x0082
+#define regPA_SC_WINDOW_SCISSOR_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_RULE 0x0083
+#define regPA_SC_CLIPRECT_RULE_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_TL 0x0084
+#define regPA_SC_CLIPRECT_0_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_BR 0x0085
+#define regPA_SC_CLIPRECT_0_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_TL 0x0086
+#define regPA_SC_CLIPRECT_1_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_BR 0x0087
+#define regPA_SC_CLIPRECT_1_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_TL 0x0088
+#define regPA_SC_CLIPRECT_2_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_BR 0x0089
+#define regPA_SC_CLIPRECT_2_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_TL 0x008a
+#define regPA_SC_CLIPRECT_3_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_BR 0x008b
+#define regPA_SC_CLIPRECT_3_BR_BASE_IDX 1
+#define regPA_SC_EDGERULE 0x008c
+#define regPA_SC_EDGERULE_BASE_IDX 1
+#define regPA_SU_HARDWARE_SCREEN_OFFSET 0x008d
+#define regPA_SU_HARDWARE_SCREEN_OFFSET_BASE_IDX 1
+#define regCB_TARGET_MASK 0x008e
+#define regCB_TARGET_MASK_BASE_IDX 1
+#define regCB_SHADER_MASK 0x008f
+#define regCB_SHADER_MASK_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_TL 0x0090
+#define regPA_SC_GENERIC_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_BR 0x0091
+#define regPA_SC_GENERIC_SCISSOR_BR_BASE_IDX 1
+#define regCOHER_DEST_BASE_0 0x0092
+#define regCOHER_DEST_BASE_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_1 0x0093
+#define regCOHER_DEST_BASE_1_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_TL 0x0094
+#define regPA_SC_VPORT_SCISSOR_0_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_BR 0x0095
+#define regPA_SC_VPORT_SCISSOR_0_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_TL 0x0096
+#define regPA_SC_VPORT_SCISSOR_1_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_BR 0x0097
+#define regPA_SC_VPORT_SCISSOR_1_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_TL 0x0098
+#define regPA_SC_VPORT_SCISSOR_2_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_BR 0x0099
+#define regPA_SC_VPORT_SCISSOR_2_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_TL 0x009a
+#define regPA_SC_VPORT_SCISSOR_3_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_BR 0x009b
+#define regPA_SC_VPORT_SCISSOR_3_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_TL 0x009c
+#define regPA_SC_VPORT_SCISSOR_4_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_BR 0x009d
+#define regPA_SC_VPORT_SCISSOR_4_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_TL 0x009e
+#define regPA_SC_VPORT_SCISSOR_5_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_BR 0x009f
+#define regPA_SC_VPORT_SCISSOR_5_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_TL 0x00a0
+#define regPA_SC_VPORT_SCISSOR_6_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_BR 0x00a1
+#define regPA_SC_VPORT_SCISSOR_6_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_TL 0x00a2
+#define regPA_SC_VPORT_SCISSOR_7_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_BR 0x00a3
+#define regPA_SC_VPORT_SCISSOR_7_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_TL 0x00a4
+#define regPA_SC_VPORT_SCISSOR_8_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_BR 0x00a5
+#define regPA_SC_VPORT_SCISSOR_8_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_TL 0x00a6
+#define regPA_SC_VPORT_SCISSOR_9_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_BR 0x00a7
+#define regPA_SC_VPORT_SCISSOR_9_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_TL 0x00a8
+#define regPA_SC_VPORT_SCISSOR_10_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_BR 0x00a9
+#define regPA_SC_VPORT_SCISSOR_10_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_TL 0x00aa
+#define regPA_SC_VPORT_SCISSOR_11_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_BR 0x00ab
+#define regPA_SC_VPORT_SCISSOR_11_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_TL 0x00ac
+#define regPA_SC_VPORT_SCISSOR_12_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_BR 0x00ad
+#define regPA_SC_VPORT_SCISSOR_12_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_TL 0x00ae
+#define regPA_SC_VPORT_SCISSOR_13_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_BR 0x00af
+#define regPA_SC_VPORT_SCISSOR_13_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_TL 0x00b0
+#define regPA_SC_VPORT_SCISSOR_14_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_BR 0x00b1
+#define regPA_SC_VPORT_SCISSOR_14_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_TL 0x00b2
+#define regPA_SC_VPORT_SCISSOR_15_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_BR 0x00b3
+#define regPA_SC_VPORT_SCISSOR_15_BR_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_0 0x00b4
+#define regPA_SC_VPORT_ZMIN_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_0 0x00b5
+#define regPA_SC_VPORT_ZMAX_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_1 0x00b6
+#define regPA_SC_VPORT_ZMIN_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_1 0x00b7
+#define regPA_SC_VPORT_ZMAX_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_2 0x00b8
+#define regPA_SC_VPORT_ZMIN_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_2 0x00b9
+#define regPA_SC_VPORT_ZMAX_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_3 0x00ba
+#define regPA_SC_VPORT_ZMIN_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_3 0x00bb
+#define regPA_SC_VPORT_ZMAX_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_4 0x00bc
+#define regPA_SC_VPORT_ZMIN_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_4 0x00bd
+#define regPA_SC_VPORT_ZMAX_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_5 0x00be
+#define regPA_SC_VPORT_ZMIN_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_5 0x00bf
+#define regPA_SC_VPORT_ZMAX_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_6 0x00c0
+#define regPA_SC_VPORT_ZMIN_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_6 0x00c1
+#define regPA_SC_VPORT_ZMAX_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_7 0x00c2
+#define regPA_SC_VPORT_ZMIN_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_7 0x00c3
+#define regPA_SC_VPORT_ZMAX_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_8 0x00c4
+#define regPA_SC_VPORT_ZMIN_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_8 0x00c5
+#define regPA_SC_VPORT_ZMAX_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_9 0x00c6
+#define regPA_SC_VPORT_ZMIN_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_9 0x00c7
+#define regPA_SC_VPORT_ZMAX_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_10 0x00c8
+#define regPA_SC_VPORT_ZMIN_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_10 0x00c9
+#define regPA_SC_VPORT_ZMAX_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_11 0x00ca
+#define regPA_SC_VPORT_ZMIN_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_11 0x00cb
+#define regPA_SC_VPORT_ZMAX_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_12 0x00cc
+#define regPA_SC_VPORT_ZMIN_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_12 0x00cd
+#define regPA_SC_VPORT_ZMAX_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_13 0x00ce
+#define regPA_SC_VPORT_ZMIN_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_13 0x00cf
+#define regPA_SC_VPORT_ZMAX_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_14 0x00d0
+#define regPA_SC_VPORT_ZMIN_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_14 0x00d1
+#define regPA_SC_VPORT_ZMAX_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_15 0x00d2
+#define regPA_SC_VPORT_ZMIN_15_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_15 0x00d3
+#define regPA_SC_VPORT_ZMAX_15_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG 0x00d4
+#define regPA_SC_RASTER_CONFIG_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG_1 0x00d5
+#define regPA_SC_RASTER_CONFIG_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_CONTROL 0x00d6
+#define regPA_SC_SCREEN_EXTENT_CONTROL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_OVERRIDE 0x00d7
+#define regPA_SC_TILE_STEERING_OVERRIDE_BASE_IDX 1
+#define regCP_PERFMON_CNTX_CNTL 0x00d8
+#define regCP_PERFMON_CNTX_CNTL_BASE_IDX 1
+#define regCP_PIPEID 0x00d9
+#define regCP_PIPEID_BASE_IDX 1
+#define regCP_RINGID 0x00d9
+#define regCP_RINGID_BASE_IDX 1
+#define regCP_VMID 0x00da
+#define regCP_VMID_BASE_IDX 1
+#define regPA_SC_RIGHT_VERT_GRID 0x00e8
+#define regPA_SC_RIGHT_VERT_GRID_BASE_IDX 1
+#define regPA_SC_LEFT_VERT_GRID 0x00e9
+#define regPA_SC_LEFT_VERT_GRID_BASE_IDX 1
+#define regPA_SC_HORIZ_GRID 0x00ea
+#define regPA_SC_HORIZ_GRID_BASE_IDX 1
+#define regVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
+#define regVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
+#define regCB_BLEND_RED 0x0105
+#define regCB_BLEND_RED_BASE_IDX 1
+#define regCB_BLEND_GREEN 0x0106
+#define regCB_BLEND_GREEN_BASE_IDX 1
+#define regCB_BLEND_BLUE 0x0107
+#define regCB_BLEND_BLUE_BASE_IDX 1
+#define regCB_BLEND_ALPHA 0x0108
+#define regCB_BLEND_ALPHA_BASE_IDX 1
+#define regCB_DCC_CONTROL 0x0109
+#define regCB_DCC_CONTROL_BASE_IDX 1
+#define regDB_STENCIL_CONTROL 0x010b
+#define regDB_STENCIL_CONTROL_BASE_IDX 1
+#define regDB_STENCILREFMASK 0x010c
+#define regDB_STENCILREFMASK_BASE_IDX 1
+#define regDB_STENCILREFMASK_BF 0x010d
+#define regDB_STENCILREFMASK_BF_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE 0x010f
+#define regPA_CL_VPORT_XSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET 0x0110
+#define regPA_CL_VPORT_XOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE 0x0111
+#define regPA_CL_VPORT_YSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET 0x0112
+#define regPA_CL_VPORT_YOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE 0x0113
+#define regPA_CL_VPORT_ZSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET 0x0114
+#define regPA_CL_VPORT_ZOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_1 0x0115
+#define regPA_CL_VPORT_XSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_1 0x0116
+#define regPA_CL_VPORT_XOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_1 0x0117
+#define regPA_CL_VPORT_YSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_1 0x0118
+#define regPA_CL_VPORT_YOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_1 0x0119
+#define regPA_CL_VPORT_ZSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_1 0x011a
+#define regPA_CL_VPORT_ZOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_2 0x011b
+#define regPA_CL_VPORT_XSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_2 0x011c
+#define regPA_CL_VPORT_XOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_2 0x011d
+#define regPA_CL_VPORT_YSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_2 0x011e
+#define regPA_CL_VPORT_YOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_2 0x011f
+#define regPA_CL_VPORT_ZSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_2 0x0120
+#define regPA_CL_VPORT_ZOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_3 0x0121
+#define regPA_CL_VPORT_XSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_3 0x0122
+#define regPA_CL_VPORT_XOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_3 0x0123
+#define regPA_CL_VPORT_YSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_3 0x0124
+#define regPA_CL_VPORT_YOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_3 0x0125
+#define regPA_CL_VPORT_ZSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_3 0x0126
+#define regPA_CL_VPORT_ZOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_4 0x0127
+#define regPA_CL_VPORT_XSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_4 0x0128
+#define regPA_CL_VPORT_XOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_4 0x0129
+#define regPA_CL_VPORT_YSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_4 0x012a
+#define regPA_CL_VPORT_YOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_4 0x012b
+#define regPA_CL_VPORT_ZSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_4 0x012c
+#define regPA_CL_VPORT_ZOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_5 0x012d
+#define regPA_CL_VPORT_XSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_5 0x012e
+#define regPA_CL_VPORT_XOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_5 0x012f
+#define regPA_CL_VPORT_YSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_5 0x0130
+#define regPA_CL_VPORT_YOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_5 0x0131
+#define regPA_CL_VPORT_ZSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_5 0x0132
+#define regPA_CL_VPORT_ZOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_6 0x0133
+#define regPA_CL_VPORT_XSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_6 0x0134
+#define regPA_CL_VPORT_XOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_6 0x0135
+#define regPA_CL_VPORT_YSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_6 0x0136
+#define regPA_CL_VPORT_YOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_6 0x0137
+#define regPA_CL_VPORT_ZSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_6 0x0138
+#define regPA_CL_VPORT_ZOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_7 0x0139
+#define regPA_CL_VPORT_XSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_7 0x013a
+#define regPA_CL_VPORT_XOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_7 0x013b
+#define regPA_CL_VPORT_YSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_7 0x013c
+#define regPA_CL_VPORT_YOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_7 0x013d
+#define regPA_CL_VPORT_ZSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_7 0x013e
+#define regPA_CL_VPORT_ZOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_8 0x013f
+#define regPA_CL_VPORT_XSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_8 0x0140
+#define regPA_CL_VPORT_XOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_8 0x0141
+#define regPA_CL_VPORT_YSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_8 0x0142
+#define regPA_CL_VPORT_YOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_8 0x0143
+#define regPA_CL_VPORT_ZSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_8 0x0144
+#define regPA_CL_VPORT_ZOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_9 0x0145
+#define regPA_CL_VPORT_XSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_9 0x0146
+#define regPA_CL_VPORT_XOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_9 0x0147
+#define regPA_CL_VPORT_YSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_9 0x0148
+#define regPA_CL_VPORT_YOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_9 0x0149
+#define regPA_CL_VPORT_ZSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_9 0x014a
+#define regPA_CL_VPORT_ZOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_10 0x014b
+#define regPA_CL_VPORT_XSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_10 0x014c
+#define regPA_CL_VPORT_XOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_10 0x014d
+#define regPA_CL_VPORT_YSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_10 0x014e
+#define regPA_CL_VPORT_YOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_10 0x014f
+#define regPA_CL_VPORT_ZSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_10 0x0150
+#define regPA_CL_VPORT_ZOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_11 0x0151
+#define regPA_CL_VPORT_XSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_11 0x0152
+#define regPA_CL_VPORT_XOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_11 0x0153
+#define regPA_CL_VPORT_YSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_11 0x0154
+#define regPA_CL_VPORT_YOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_11 0x0155
+#define regPA_CL_VPORT_ZSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_11 0x0156
+#define regPA_CL_VPORT_ZOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_12 0x0157
+#define regPA_CL_VPORT_XSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_12 0x0158
+#define regPA_CL_VPORT_XOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_12 0x0159
+#define regPA_CL_VPORT_YSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_12 0x015a
+#define regPA_CL_VPORT_YOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_12 0x015b
+#define regPA_CL_VPORT_ZSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_12 0x015c
+#define regPA_CL_VPORT_ZOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_13 0x015d
+#define regPA_CL_VPORT_XSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_13 0x015e
+#define regPA_CL_VPORT_XOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_13 0x015f
+#define regPA_CL_VPORT_YSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_13 0x0160
+#define regPA_CL_VPORT_YOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_13 0x0161
+#define regPA_CL_VPORT_ZSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_13 0x0162
+#define regPA_CL_VPORT_ZOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_14 0x0163
+#define regPA_CL_VPORT_XSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_14 0x0164
+#define regPA_CL_VPORT_XOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_14 0x0165
+#define regPA_CL_VPORT_YSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_14 0x0166
+#define regPA_CL_VPORT_YOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_14 0x0167
+#define regPA_CL_VPORT_ZSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_14 0x0168
+#define regPA_CL_VPORT_ZOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_15 0x0169
+#define regPA_CL_VPORT_XSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_15 0x016a
+#define regPA_CL_VPORT_XOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_15 0x016b
+#define regPA_CL_VPORT_YSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_15 0x016c
+#define regPA_CL_VPORT_YOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_15 0x016d
+#define regPA_CL_VPORT_ZSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_15 0x016e
+#define regPA_CL_VPORT_ZOFFSET_15_BASE_IDX 1
+#define regPA_CL_UCP_0_X 0x016f
+#define regPA_CL_UCP_0_X_BASE_IDX 1
+#define regPA_CL_UCP_0_Y 0x0170
+#define regPA_CL_UCP_0_Y_BASE_IDX 1
+#define regPA_CL_UCP_0_Z 0x0171
+#define regPA_CL_UCP_0_Z_BASE_IDX 1
+#define regPA_CL_UCP_0_W 0x0172
+#define regPA_CL_UCP_0_W_BASE_IDX 1
+#define regPA_CL_UCP_1_X 0x0173
+#define regPA_CL_UCP_1_X_BASE_IDX 1
+#define regPA_CL_UCP_1_Y 0x0174
+#define regPA_CL_UCP_1_Y_BASE_IDX 1
+#define regPA_CL_UCP_1_Z 0x0175
+#define regPA_CL_UCP_1_Z_BASE_IDX 1
+#define regPA_CL_UCP_1_W 0x0176
+#define regPA_CL_UCP_1_W_BASE_IDX 1
+#define regPA_CL_UCP_2_X 0x0177
+#define regPA_CL_UCP_2_X_BASE_IDX 1
+#define regPA_CL_UCP_2_Y 0x0178
+#define regPA_CL_UCP_2_Y_BASE_IDX 1
+#define regPA_CL_UCP_2_Z 0x0179
+#define regPA_CL_UCP_2_Z_BASE_IDX 1
+#define regPA_CL_UCP_2_W 0x017a
+#define regPA_CL_UCP_2_W_BASE_IDX 1
+#define regPA_CL_UCP_3_X 0x017b
+#define regPA_CL_UCP_3_X_BASE_IDX 1
+#define regPA_CL_UCP_3_Y 0x017c
+#define regPA_CL_UCP_3_Y_BASE_IDX 1
+#define regPA_CL_UCP_3_Z 0x017d
+#define regPA_CL_UCP_3_Z_BASE_IDX 1
+#define regPA_CL_UCP_3_W 0x017e
+#define regPA_CL_UCP_3_W_BASE_IDX 1
+#define regPA_CL_UCP_4_X 0x017f
+#define regPA_CL_UCP_4_X_BASE_IDX 1
+#define regPA_CL_UCP_4_Y 0x0180
+#define regPA_CL_UCP_4_Y_BASE_IDX 1
+#define regPA_CL_UCP_4_Z 0x0181
+#define regPA_CL_UCP_4_Z_BASE_IDX 1
+#define regPA_CL_UCP_4_W 0x0182
+#define regPA_CL_UCP_4_W_BASE_IDX 1
+#define regPA_CL_UCP_5_X 0x0183
+#define regPA_CL_UCP_5_X_BASE_IDX 1
+#define regPA_CL_UCP_5_Y 0x0184
+#define regPA_CL_UCP_5_Y_BASE_IDX 1
+#define regPA_CL_UCP_5_Z 0x0185
+#define regPA_CL_UCP_5_Z_BASE_IDX 1
+#define regPA_CL_UCP_5_W 0x0186
+#define regPA_CL_UCP_5_W_BASE_IDX 1
+#define regPA_CL_PROG_NEAR_CLIP_Z 0x0187
+#define regPA_CL_PROG_NEAR_CLIP_Z_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_0 0x0191
+#define regSPI_PS_INPUT_CNTL_0_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_1 0x0192
+#define regSPI_PS_INPUT_CNTL_1_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_2 0x0193
+#define regSPI_PS_INPUT_CNTL_2_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_3 0x0194
+#define regSPI_PS_INPUT_CNTL_3_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_4 0x0195
+#define regSPI_PS_INPUT_CNTL_4_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_5 0x0196
+#define regSPI_PS_INPUT_CNTL_5_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_6 0x0197
+#define regSPI_PS_INPUT_CNTL_6_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_7 0x0198
+#define regSPI_PS_INPUT_CNTL_7_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_8 0x0199
+#define regSPI_PS_INPUT_CNTL_8_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_9 0x019a
+#define regSPI_PS_INPUT_CNTL_9_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_10 0x019b
+#define regSPI_PS_INPUT_CNTL_10_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_11 0x019c
+#define regSPI_PS_INPUT_CNTL_11_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_12 0x019d
+#define regSPI_PS_INPUT_CNTL_12_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_13 0x019e
+#define regSPI_PS_INPUT_CNTL_13_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_14 0x019f
+#define regSPI_PS_INPUT_CNTL_14_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_15 0x01a0
+#define regSPI_PS_INPUT_CNTL_15_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_16 0x01a1
+#define regSPI_PS_INPUT_CNTL_16_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_17 0x01a2
+#define regSPI_PS_INPUT_CNTL_17_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_18 0x01a3
+#define regSPI_PS_INPUT_CNTL_18_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_19 0x01a4
+#define regSPI_PS_INPUT_CNTL_19_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_20 0x01a5
+#define regSPI_PS_INPUT_CNTL_20_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_21 0x01a6
+#define regSPI_PS_INPUT_CNTL_21_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_22 0x01a7
+#define regSPI_PS_INPUT_CNTL_22_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_23 0x01a8
+#define regSPI_PS_INPUT_CNTL_23_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_24 0x01a9
+#define regSPI_PS_INPUT_CNTL_24_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_25 0x01aa
+#define regSPI_PS_INPUT_CNTL_25_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_26 0x01ab
+#define regSPI_PS_INPUT_CNTL_26_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_27 0x01ac
+#define regSPI_PS_INPUT_CNTL_27_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_28 0x01ad
+#define regSPI_PS_INPUT_CNTL_28_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_29 0x01ae
+#define regSPI_PS_INPUT_CNTL_29_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_30 0x01af
+#define regSPI_PS_INPUT_CNTL_30_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_31 0x01b0
+#define regSPI_PS_INPUT_CNTL_31_BASE_IDX 1
+#define regSPI_VS_OUT_CONFIG 0x01b1
+#define regSPI_VS_OUT_CONFIG_BASE_IDX 1
+#define regSPI_PS_INPUT_ENA 0x01b3
+#define regSPI_PS_INPUT_ENA_BASE_IDX 1
+#define regSPI_PS_INPUT_ADDR 0x01b4
+#define regSPI_PS_INPUT_ADDR_BASE_IDX 1
+#define regSPI_INTERP_CONTROL_0 0x01b5
+#define regSPI_INTERP_CONTROL_0_BASE_IDX 1
+#define regSPI_PS_IN_CONTROL 0x01b6
+#define regSPI_PS_IN_CONTROL_BASE_IDX 1
+#define regSPI_BARYC_CNTL 0x01b8
+#define regSPI_BARYC_CNTL_BASE_IDX 1
+#define regSPI_TMPRING_SIZE 0x01ba
+#define regSPI_TMPRING_SIZE_BASE_IDX 1
+#define regSPI_SHADER_POS_FORMAT 0x01c3
+#define regSPI_SHADER_POS_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_Z_FORMAT 0x01c4
+#define regSPI_SHADER_Z_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_COL_FORMAT 0x01c5
+#define regSPI_SHADER_COL_FORMAT_BASE_IDX 1
+#define regCB_BLEND0_CONTROL 0x01e0
+#define regCB_BLEND0_CONTROL_BASE_IDX 1
+#define regCB_BLEND1_CONTROL 0x01e1
+#define regCB_BLEND1_CONTROL_BASE_IDX 1
+#define regCB_BLEND2_CONTROL 0x01e2
+#define regCB_BLEND2_CONTROL_BASE_IDX 1
+#define regCB_BLEND3_CONTROL 0x01e3
+#define regCB_BLEND3_CONTROL_BASE_IDX 1
+#define regCB_BLEND4_CONTROL 0x01e4
+#define regCB_BLEND4_CONTROL_BASE_IDX 1
+#define regCB_BLEND5_CONTROL 0x01e5
+#define regCB_BLEND5_CONTROL_BASE_IDX 1
+#define regCB_BLEND6_CONTROL 0x01e6
+#define regCB_BLEND6_CONTROL_BASE_IDX 1
+#define regCB_BLEND7_CONTROL 0x01e7
+#define regCB_BLEND7_CONTROL_BASE_IDX 1
+#define regCB_MRT0_EPITCH 0x01e8
+#define regCB_MRT0_EPITCH_BASE_IDX 1
+#define regCB_MRT1_EPITCH 0x01e9
+#define regCB_MRT1_EPITCH_BASE_IDX 1
+#define regCB_MRT2_EPITCH 0x01ea
+#define regCB_MRT2_EPITCH_BASE_IDX 1
+#define regCB_MRT3_EPITCH 0x01eb
+#define regCB_MRT3_EPITCH_BASE_IDX 1
+#define regCB_MRT4_EPITCH 0x01ec
+#define regCB_MRT4_EPITCH_BASE_IDX 1
+#define regCB_MRT5_EPITCH 0x01ed
+#define regCB_MRT5_EPITCH_BASE_IDX 1
+#define regCB_MRT6_EPITCH 0x01ee
+#define regCB_MRT6_EPITCH_BASE_IDX 1
+#define regCB_MRT7_EPITCH 0x01ef
+#define regCB_MRT7_EPITCH_BASE_IDX 1
+#define regCS_COPY_STATE 0x01f3
+#define regCS_COPY_STATE_BASE_IDX 1
+#define regGFX_COPY_STATE 0x01f4
+#define regGFX_COPY_STATE_BASE_IDX 1
+#define regPA_CL_POINT_X_RAD 0x01f5
+#define regPA_CL_POINT_X_RAD_BASE_IDX 1
+#define regPA_CL_POINT_Y_RAD 0x01f6
+#define regPA_CL_POINT_Y_RAD_BASE_IDX 1
+#define regPA_CL_POINT_SIZE 0x01f7
+#define regPA_CL_POINT_SIZE_BASE_IDX 1
+#define regPA_CL_POINT_CULL_RAD 0x01f8
+#define regPA_CL_POINT_CULL_RAD_BASE_IDX 1
+#define regVGT_DMA_BASE_HI 0x01f9
+#define regVGT_DMA_BASE_HI_BASE_IDX 1
+#define regVGT_DMA_BASE 0x01fa
+#define regVGT_DMA_BASE_BASE_IDX 1
+#define regVGT_DRAW_INITIATOR 0x01fc
+#define regVGT_DRAW_INITIATOR_BASE_IDX 1
+#define regVGT_IMMED_DATA 0x01fd
+#define regVGT_IMMED_DATA_BASE_IDX 1
+#define regVGT_EVENT_ADDRESS_REG 0x01fe
+#define regVGT_EVENT_ADDRESS_REG_BASE_IDX 1
+#define regDB_DEPTH_CONTROL 0x0200
+#define regDB_DEPTH_CONTROL_BASE_IDX 1
+#define regDB_EQAA 0x0201
+#define regDB_EQAA_BASE_IDX 1
+#define regCB_COLOR_CONTROL 0x0202
+#define regCB_COLOR_CONTROL_BASE_IDX 1
+#define regDB_SHADER_CONTROL 0x0203
+#define regDB_SHADER_CONTROL_BASE_IDX 1
+#define regPA_CL_CLIP_CNTL 0x0204
+#define regPA_CL_CLIP_CNTL_BASE_IDX 1
+#define regPA_SU_SC_MODE_CNTL 0x0205
+#define regPA_SU_SC_MODE_CNTL_BASE_IDX 1
+#define regPA_CL_VTE_CNTL 0x0206
+#define regPA_CL_VTE_CNTL_BASE_IDX 1
+#define regPA_CL_VS_OUT_CNTL 0x0207
+#define regPA_CL_VS_OUT_CNTL_BASE_IDX 1
+#define regPA_CL_NANINF_CNTL 0x0208
+#define regPA_CL_NANINF_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_CNTL 0x0209
+#define regPA_SU_LINE_STIPPLE_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_SCALE 0x020a
+#define regPA_SU_LINE_STIPPLE_SCALE_BASE_IDX 1
+#define regPA_SU_PRIM_FILTER_CNTL 0x020b
+#define regPA_SU_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL 0x020c
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_CL_OBJPRIM_ID_CNTL 0x020d
+#define regPA_CL_OBJPRIM_ID_CNTL_BASE_IDX 1
+#define regPA_CL_NGG_CNTL 0x020e
+#define regPA_CL_NGG_CNTL_BASE_IDX 1
+#define regPA_SU_OVER_RASTERIZATION_CNTL 0x020f
+#define regPA_SU_OVER_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_STEREO_CNTL 0x0210
+#define regPA_STEREO_CNTL_BASE_IDX 1
+#define regPA_SU_POINT_SIZE 0x0280
+#define regPA_SU_POINT_SIZE_BASE_IDX 1
+#define regPA_SU_POINT_MINMAX 0x0281
+#define regPA_SU_POINT_MINMAX_BASE_IDX 1
+#define regPA_SU_LINE_CNTL 0x0282
+#define regPA_SU_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE 0x0283
+#define regPA_SC_LINE_STIPPLE_BASE_IDX 1
+#define regVGT_OUTPUT_PATH_CNTL 0x0284
+#define regVGT_OUTPUT_PATH_CNTL_BASE_IDX 1
+#define regVGT_HOS_CNTL 0x0285
+#define regVGT_HOS_CNTL_BASE_IDX 1
+#define regVGT_HOS_MAX_TESS_LEVEL 0x0286
+#define regVGT_HOS_MAX_TESS_LEVEL_BASE_IDX 1
+#define regVGT_HOS_MIN_TESS_LEVEL 0x0287
+#define regVGT_HOS_MIN_TESS_LEVEL_BASE_IDX 1
+#define regVGT_HOS_REUSE_DEPTH 0x0288
+#define regVGT_HOS_REUSE_DEPTH_BASE_IDX 1
+#define regVGT_GROUP_PRIM_TYPE 0x0289
+#define regVGT_GROUP_PRIM_TYPE_BASE_IDX 1
+#define regVGT_GROUP_FIRST_DECR 0x028a
+#define regVGT_GROUP_FIRST_DECR_BASE_IDX 1
+#define regVGT_GROUP_DECR 0x028b
+#define regVGT_GROUP_DECR_BASE_IDX 1
+#define regVGT_GROUP_VECT_0_CNTL 0x028c
+#define regVGT_GROUP_VECT_0_CNTL_BASE_IDX 1
+#define regVGT_GROUP_VECT_1_CNTL 0x028d
+#define regVGT_GROUP_VECT_1_CNTL_BASE_IDX 1
+#define regVGT_GROUP_VECT_0_FMT_CNTL 0x028e
+#define regVGT_GROUP_VECT_0_FMT_CNTL_BASE_IDX 1
+#define regVGT_GROUP_VECT_1_FMT_CNTL 0x028f
+#define regVGT_GROUP_VECT_1_FMT_CNTL_BASE_IDX 1
+#define regVGT_GS_MODE 0x0290
+#define regVGT_GS_MODE_BASE_IDX 1
+#define regVGT_GS_ONCHIP_CNTL 0x0291
+#define regVGT_GS_ONCHIP_CNTL_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_0 0x0292
+#define regPA_SC_MODE_CNTL_0_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_1 0x0293
+#define regPA_SC_MODE_CNTL_1_BASE_IDX 1
+#define regVGT_ENHANCE 0x0294
+#define regVGT_ENHANCE_BASE_IDX 1
+#define regVGT_GS_PER_ES 0x0295
+#define regVGT_GS_PER_ES_BASE_IDX 1
+#define regVGT_ES_PER_GS 0x0296
+#define regVGT_ES_PER_GS_BASE_IDX 1
+#define regVGT_GS_PER_VS 0x0297
+#define regVGT_GS_PER_VS_BASE_IDX 1
+#define regVGT_GSVS_RING_OFFSET_1 0x0298
+#define regVGT_GSVS_RING_OFFSET_1_BASE_IDX 1
+#define regVGT_GSVS_RING_OFFSET_2 0x0299
+#define regVGT_GSVS_RING_OFFSET_2_BASE_IDX 1
+#define regVGT_GSVS_RING_OFFSET_3 0x029a
+#define regVGT_GSVS_RING_OFFSET_3_BASE_IDX 1
+#define regVGT_GS_OUT_PRIM_TYPE 0x029b
+#define regVGT_GS_OUT_PRIM_TYPE_BASE_IDX 1
+#define regIA_ENHANCE 0x029c
+#define regIA_ENHANCE_BASE_IDX 1
+#define regVGT_DMA_SIZE 0x029d
+#define regVGT_DMA_SIZE_BASE_IDX 1
+#define regVGT_DMA_MAX_SIZE 0x029e
+#define regVGT_DMA_MAX_SIZE_BASE_IDX 1
+#define regVGT_DMA_INDEX_TYPE 0x029f
+#define regVGT_DMA_INDEX_TYPE_BASE_IDX 1
+#define regWD_ENHANCE 0x02a0
+#define regWD_ENHANCE_BASE_IDX 1
+#define regVGT_PRIMITIVEID_EN 0x02a1
+#define regVGT_PRIMITIVEID_EN_BASE_IDX 1
+#define regVGT_DMA_NUM_INSTANCES 0x02a2
+#define regVGT_DMA_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_PRIMITIVEID_RESET 0x02a3
+#define regVGT_PRIMITIVEID_RESET_BASE_IDX 1
+#define regVGT_EVENT_INITIATOR 0x02a4
+#define regVGT_EVENT_INITIATOR_BASE_IDX 1
+#define regVGT_GS_MAX_PRIMS_PER_SUBGROUP 0x02a5
+#define regVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
+#define regVGT_DRAW_PAYLOAD_CNTL 0x02a6
+#define regVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
+#define regVGT_INSTANCE_STEP_RATE_0 0x02a8
+#define regVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
+#define regVGT_INSTANCE_STEP_RATE_1 0x02a9
+#define regVGT_INSTANCE_STEP_RATE_1_BASE_IDX 1
+#define regIA_MULTI_VGT_PARAM_BC 0x02aa
+#define regIA_MULTI_VGT_PARAM_BC_BASE_IDX 1
+#define regVGT_ESGS_RING_ITEMSIZE 0x02ab
+#define regVGT_ESGS_RING_ITEMSIZE_BASE_IDX 1
+#define regVGT_GSVS_RING_ITEMSIZE 0x02ac
+#define regVGT_GSVS_RING_ITEMSIZE_BASE_IDX 1
+#define regVGT_REUSE_OFF 0x02ad
+#define regVGT_REUSE_OFF_BASE_IDX 1
+#define regVGT_VTX_CNT_EN 0x02ae
+#define regVGT_VTX_CNT_EN_BASE_IDX 1
+#define regDB_HTILE_SURFACE 0x02af
+#define regDB_HTILE_SURFACE_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE0 0x02b0
+#define regDB_SRESULTS_COMPARE_STATE0_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE1 0x02b1
+#define regDB_SRESULTS_COMPARE_STATE1_BASE_IDX 1
+#define regDB_PRELOAD_CONTROL 0x02b2
+#define regDB_PRELOAD_CONTROL_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_SIZE_0 0x02b4
+#define regVGT_STRMOUT_BUFFER_SIZE_0_BASE_IDX 1
+#define regVGT_STRMOUT_VTX_STRIDE_0 0x02b5
+#define regVGT_STRMOUT_VTX_STRIDE_0_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_OFFSET_0 0x02b7
+#define regVGT_STRMOUT_BUFFER_OFFSET_0_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_SIZE_1 0x02b8
+#define regVGT_STRMOUT_BUFFER_SIZE_1_BASE_IDX 1
+#define regVGT_STRMOUT_VTX_STRIDE_1 0x02b9
+#define regVGT_STRMOUT_VTX_STRIDE_1_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_OFFSET_1 0x02bb
+#define regVGT_STRMOUT_BUFFER_OFFSET_1_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_SIZE_2 0x02bc
+#define regVGT_STRMOUT_BUFFER_SIZE_2_BASE_IDX 1
+#define regVGT_STRMOUT_VTX_STRIDE_2 0x02bd
+#define regVGT_STRMOUT_VTX_STRIDE_2_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_OFFSET_2 0x02bf
+#define regVGT_STRMOUT_BUFFER_OFFSET_2_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_SIZE_3 0x02c0
+#define regVGT_STRMOUT_BUFFER_SIZE_3_BASE_IDX 1
+#define regVGT_STRMOUT_VTX_STRIDE_3 0x02c1
+#define regVGT_STRMOUT_VTX_STRIDE_3_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_OFFSET_3 0x02c3
+#define regVGT_STRMOUT_BUFFER_OFFSET_3_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0x02ca
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x02cb
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x02cc
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_BASE_IDX 1
+#define regVGT_GS_MAX_VERT_OUT 0x02ce
+#define regVGT_GS_MAX_VERT_OUT_BASE_IDX 1
+#define regVGT_TESS_DISTRIBUTION 0x02d4
+#define regVGT_TESS_DISTRIBUTION_BASE_IDX 1
+#define regVGT_SHADER_STAGES_EN 0x02d5
+#define regVGT_SHADER_STAGES_EN_BASE_IDX 1
+#define regVGT_LS_HS_CONFIG 0x02d6
+#define regVGT_LS_HS_CONFIG_BASE_IDX 1
+#define regVGT_GS_VERT_ITEMSIZE 0x02d7
+#define regVGT_GS_VERT_ITEMSIZE_BASE_IDX 1
+#define regVGT_GS_VERT_ITEMSIZE_1 0x02d8
+#define regVGT_GS_VERT_ITEMSIZE_1_BASE_IDX 1
+#define regVGT_GS_VERT_ITEMSIZE_2 0x02d9
+#define regVGT_GS_VERT_ITEMSIZE_2_BASE_IDX 1
+#define regVGT_GS_VERT_ITEMSIZE_3 0x02da
+#define regVGT_GS_VERT_ITEMSIZE_3_BASE_IDX 1
+#define regVGT_TF_PARAM 0x02db
+#define regVGT_TF_PARAM_BASE_IDX 1
+#define regDB_ALPHA_TO_MASK 0x02dc
+#define regDB_ALPHA_TO_MASK_BASE_IDX 1
+#define regVGT_DISPATCH_DRAW_INDEX 0x02dd
+#define regVGT_DISPATCH_DRAW_INDEX_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL 0x02de
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_CLAMP 0x02df
+#define regPA_SU_POLY_OFFSET_CLAMP_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE 0x02e0
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET 0x02e1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_SCALE 0x02e2
+#define regPA_SU_POLY_OFFSET_BACK_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET 0x02e3
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET_BASE_IDX 1
+#define regVGT_GS_INSTANCE_CNT 0x02e4
+#define regVGT_GS_INSTANCE_CNT_BASE_IDX 1
+#define regVGT_STRMOUT_CONFIG 0x02e5
+#define regVGT_STRMOUT_CONFIG_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_CONFIG 0x02e6
+#define regVGT_STRMOUT_BUFFER_CONFIG_BASE_IDX 1
+#define regVGT_DMA_EVENT_INITIATOR 0x02e7
+#define regVGT_DMA_EVENT_INITIATOR_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_0 0x02f5
+#define regPA_SC_CENTROID_PRIORITY_0_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_1 0x02f6
+#define regPA_SC_CENTROID_PRIORITY_1_BASE_IDX 1
+#define regPA_SC_LINE_CNTL 0x02f7
+#define regPA_SC_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_AA_CONFIG 0x02f8
+#define regPA_SC_AA_CONFIG_BASE_IDX 1
+#define regPA_SU_VTX_CNTL 0x02f9
+#define regPA_SU_VTX_CNTL_BASE_IDX 1
+#define regPA_CL_GB_VERT_CLIP_ADJ 0x02fa
+#define regPA_CL_GB_VERT_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_VERT_DISC_ADJ 0x02fb
+#define regPA_CL_GB_VERT_DISC_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_CLIP_ADJ 0x02fc
+#define regPA_CL_GB_HORZ_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_DISC_ADJ 0x02fd
+#define regPA_CL_GB_HORZ_DISC_ADJ_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0x02fe
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0x02ff
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0x0300
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0x0301
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0x0302
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0x0303
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0x0304
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0x0305
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x0306
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0x0307
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0x0308
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0x0309
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x030a
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0x030b
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0x030c
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0x030d
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y0_X1Y0 0x030e
+#define regPA_SC_AA_MASK_X0Y0_X1Y0_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y1_X1Y1 0x030f
+#define regPA_SC_AA_MASK_X0Y1_X1Y1_BASE_IDX 1
+#define regPA_SC_SHADER_CONTROL 0x0310
+#define regPA_SC_SHADER_CONTROL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_0 0x0311
+#define regPA_SC_BINNER_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_1 0x0312
+#define regPA_SC_BINNER_CNTL_1_BASE_IDX 1
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL 0x0313
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_SC_NGG_MODE_CNTL 0x0314
+#define regPA_SC_NGG_MODE_CNTL_BASE_IDX 1
+#define regVGT_VERTEX_REUSE_BLOCK_CNTL 0x0316
+#define regVGT_VERTEX_REUSE_BLOCK_CNTL_BASE_IDX 1
+#define regVGT_OUT_DEALLOC_CNTL 0x0317
+#define regVGT_OUT_DEALLOC_CNTL_BASE_IDX 1
+#define regCB_COLOR0_BASE 0x0318
+#define regCB_COLOR0_BASE_BASE_IDX 1
+#define regCB_COLOR0_BASE_EXT 0x0319
+#define regCB_COLOR0_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB2 0x031a
+#define regCB_COLOR0_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR0_VIEW 0x031b
+#define regCB_COLOR0_VIEW_BASE_IDX 1
+#define regCB_COLOR0_INFO 0x031c
+#define regCB_COLOR0_INFO_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB 0x031d
+#define regCB_COLOR0_ATTRIB_BASE_IDX 1
+#define regCB_COLOR0_DCC_CONTROL 0x031e
+#define regCB_COLOR0_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR0_CMASK 0x031f
+#define regCB_COLOR0_CMASK_BASE_IDX 1
+#define regCB_COLOR0_CMASK_BASE_EXT 0x0320
+#define regCB_COLOR0_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_FMASK 0x0321
+#define regCB_COLOR0_FMASK_BASE_IDX 1
+#define regCB_COLOR0_FMASK_BASE_EXT 0x0322
+#define regCB_COLOR0_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_CLEAR_WORD0 0x0323
+#define regCB_COLOR0_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR0_CLEAR_WORD1 0x0324
+#define regCB_COLOR0_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE 0x0325
+#define regCB_COLOR0_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE_EXT 0x0326
+#define regCB_COLOR0_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_BASE 0x0327
+#define regCB_COLOR1_BASE_BASE_IDX 1
+#define regCB_COLOR1_BASE_EXT 0x0328
+#define regCB_COLOR1_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB2 0x0329
+#define regCB_COLOR1_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR1_VIEW 0x032a
+#define regCB_COLOR1_VIEW_BASE_IDX 1
+#define regCB_COLOR1_INFO 0x032b
+#define regCB_COLOR1_INFO_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB 0x032c
+#define regCB_COLOR1_ATTRIB_BASE_IDX 1
+#define regCB_COLOR1_DCC_CONTROL 0x032d
+#define regCB_COLOR1_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR1_CMASK 0x032e
+#define regCB_COLOR1_CMASK_BASE_IDX 1
+#define regCB_COLOR1_CMASK_BASE_EXT 0x032f
+#define regCB_COLOR1_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_FMASK 0x0330
+#define regCB_COLOR1_FMASK_BASE_IDX 1
+#define regCB_COLOR1_FMASK_BASE_EXT 0x0331
+#define regCB_COLOR1_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_CLEAR_WORD0 0x0332
+#define regCB_COLOR1_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR1_CLEAR_WORD1 0x0333
+#define regCB_COLOR1_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE 0x0334
+#define regCB_COLOR1_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE_EXT 0x0335
+#define regCB_COLOR1_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_BASE 0x0336
+#define regCB_COLOR2_BASE_BASE_IDX 1
+#define regCB_COLOR2_BASE_EXT 0x0337
+#define regCB_COLOR2_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB2 0x0338
+#define regCB_COLOR2_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR2_VIEW 0x0339
+#define regCB_COLOR2_VIEW_BASE_IDX 1
+#define regCB_COLOR2_INFO 0x033a
+#define regCB_COLOR2_INFO_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB 0x033b
+#define regCB_COLOR2_ATTRIB_BASE_IDX 1
+#define regCB_COLOR2_DCC_CONTROL 0x033c
+#define regCB_COLOR2_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR2_CMASK 0x033d
+#define regCB_COLOR2_CMASK_BASE_IDX 1
+#define regCB_COLOR2_CMASK_BASE_EXT 0x033e
+#define regCB_COLOR2_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_FMASK 0x033f
+#define regCB_COLOR2_FMASK_BASE_IDX 1
+#define regCB_COLOR2_FMASK_BASE_EXT 0x0340
+#define regCB_COLOR2_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_CLEAR_WORD0 0x0341
+#define regCB_COLOR2_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR2_CLEAR_WORD1 0x0342
+#define regCB_COLOR2_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE 0x0343
+#define regCB_COLOR2_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE_EXT 0x0344
+#define regCB_COLOR2_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_BASE 0x0345
+#define regCB_COLOR3_BASE_BASE_IDX 1
+#define regCB_COLOR3_BASE_EXT 0x0346
+#define regCB_COLOR3_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB2 0x0347
+#define regCB_COLOR3_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR3_VIEW 0x0348
+#define regCB_COLOR3_VIEW_BASE_IDX 1
+#define regCB_COLOR3_INFO 0x0349
+#define regCB_COLOR3_INFO_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB 0x034a
+#define regCB_COLOR3_ATTRIB_BASE_IDX 1
+#define regCB_COLOR3_DCC_CONTROL 0x034b
+#define regCB_COLOR3_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR3_CMASK 0x034c
+#define regCB_COLOR3_CMASK_BASE_IDX 1
+#define regCB_COLOR3_CMASK_BASE_EXT 0x034d
+#define regCB_COLOR3_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_FMASK 0x034e
+#define regCB_COLOR3_FMASK_BASE_IDX 1
+#define regCB_COLOR3_FMASK_BASE_EXT 0x034f
+#define regCB_COLOR3_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_CLEAR_WORD0 0x0350
+#define regCB_COLOR3_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR3_CLEAR_WORD1 0x0351
+#define regCB_COLOR3_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE 0x0352
+#define regCB_COLOR3_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE_EXT 0x0353
+#define regCB_COLOR3_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_BASE 0x0354
+#define regCB_COLOR4_BASE_BASE_IDX 1
+#define regCB_COLOR4_BASE_EXT 0x0355
+#define regCB_COLOR4_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB2 0x0356
+#define regCB_COLOR4_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR4_VIEW 0x0357
+#define regCB_COLOR4_VIEW_BASE_IDX 1
+#define regCB_COLOR4_INFO 0x0358
+#define regCB_COLOR4_INFO_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB 0x0359
+#define regCB_COLOR4_ATTRIB_BASE_IDX 1
+#define regCB_COLOR4_DCC_CONTROL 0x035a
+#define regCB_COLOR4_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR4_CMASK 0x035b
+#define regCB_COLOR4_CMASK_BASE_IDX 1
+#define regCB_COLOR4_CMASK_BASE_EXT 0x035c
+#define regCB_COLOR4_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_FMASK 0x035d
+#define regCB_COLOR4_FMASK_BASE_IDX 1
+#define regCB_COLOR4_FMASK_BASE_EXT 0x035e
+#define regCB_COLOR4_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_CLEAR_WORD0 0x035f
+#define regCB_COLOR4_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR4_CLEAR_WORD1 0x0360
+#define regCB_COLOR4_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE 0x0361
+#define regCB_COLOR4_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE_EXT 0x0362
+#define regCB_COLOR4_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_BASE 0x0363
+#define regCB_COLOR5_BASE_BASE_IDX 1
+#define regCB_COLOR5_BASE_EXT 0x0364
+#define regCB_COLOR5_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB2 0x0365
+#define regCB_COLOR5_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR5_VIEW 0x0366
+#define regCB_COLOR5_VIEW_BASE_IDX 1
+#define regCB_COLOR5_INFO 0x0367
+#define regCB_COLOR5_INFO_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB 0x0368
+#define regCB_COLOR5_ATTRIB_BASE_IDX 1
+#define regCB_COLOR5_DCC_CONTROL 0x0369
+#define regCB_COLOR5_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR5_CMASK 0x036a
+#define regCB_COLOR5_CMASK_BASE_IDX 1
+#define regCB_COLOR5_CMASK_BASE_EXT 0x036b
+#define regCB_COLOR5_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_FMASK 0x036c
+#define regCB_COLOR5_FMASK_BASE_IDX 1
+#define regCB_COLOR5_FMASK_BASE_EXT 0x036d
+#define regCB_COLOR5_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_CLEAR_WORD0 0x036e
+#define regCB_COLOR5_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR5_CLEAR_WORD1 0x036f
+#define regCB_COLOR5_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE 0x0370
+#define regCB_COLOR5_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE_EXT 0x0371
+#define regCB_COLOR5_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_BASE 0x0372
+#define regCB_COLOR6_BASE_BASE_IDX 1
+#define regCB_COLOR6_BASE_EXT 0x0373
+#define regCB_COLOR6_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB2 0x0374
+#define regCB_COLOR6_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR6_VIEW 0x0375
+#define regCB_COLOR6_VIEW_BASE_IDX 1
+#define regCB_COLOR6_INFO 0x0376
+#define regCB_COLOR6_INFO_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB 0x0377
+#define regCB_COLOR6_ATTRIB_BASE_IDX 1
+#define regCB_COLOR6_DCC_CONTROL 0x0378
+#define regCB_COLOR6_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR6_CMASK 0x0379
+#define regCB_COLOR6_CMASK_BASE_IDX 1
+#define regCB_COLOR6_CMASK_BASE_EXT 0x037a
+#define regCB_COLOR6_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_FMASK 0x037b
+#define regCB_COLOR6_FMASK_BASE_IDX 1
+#define regCB_COLOR6_FMASK_BASE_EXT 0x037c
+#define regCB_COLOR6_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_CLEAR_WORD0 0x037d
+#define regCB_COLOR6_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR6_CLEAR_WORD1 0x037e
+#define regCB_COLOR6_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE 0x037f
+#define regCB_COLOR6_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE_EXT 0x0380
+#define regCB_COLOR6_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_BASE 0x0381
+#define regCB_COLOR7_BASE_BASE_IDX 1
+#define regCB_COLOR7_BASE_EXT 0x0382
+#define regCB_COLOR7_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB2 0x0383
+#define regCB_COLOR7_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR7_VIEW 0x0384
+#define regCB_COLOR7_VIEW_BASE_IDX 1
+#define regCB_COLOR7_INFO 0x0385
+#define regCB_COLOR7_INFO_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB 0x0386
+#define regCB_COLOR7_ATTRIB_BASE_IDX 1
+#define regCB_COLOR7_DCC_CONTROL 0x0387
+#define regCB_COLOR7_DCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR7_CMASK 0x0388
+#define regCB_COLOR7_CMASK_BASE_IDX 1
+#define regCB_COLOR7_CMASK_BASE_EXT 0x0389
+#define regCB_COLOR7_CMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_FMASK 0x038a
+#define regCB_COLOR7_FMASK_BASE_IDX 1
+#define regCB_COLOR7_FMASK_BASE_EXT 0x038b
+#define regCB_COLOR7_FMASK_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_CLEAR_WORD0 0x038c
+#define regCB_COLOR7_CLEAR_WORD0_BASE_IDX 1
+#define regCB_COLOR7_CLEAR_WORD1 0x038d
+#define regCB_COLOR7_CLEAR_WORD1_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE 0x038e
+#define regCB_COLOR7_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE_EXT 0x038f
+#define regCB_COLOR7_DCC_BASE_EXT_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_gfxudec
+// base address: 0x30000
+#define regCP_EOP_DONE_ADDR_LO 0x2000
+#define regCP_EOP_DONE_ADDR_LO_BASE_IDX 1
+#define regCP_EOP_DONE_ADDR_HI 0x2001
+#define regCP_EOP_DONE_ADDR_HI_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_LO 0x2002
+#define regCP_EOP_DONE_DATA_LO_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_HI 0x2003
+#define regCP_EOP_DONE_DATA_HI_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_LO 0x2004
+#define regCP_EOP_LAST_FENCE_LO_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_HI 0x2005
+#define regCP_EOP_LAST_FENCE_HI_BASE_IDX 1
+#define regCP_STREAM_OUT_ADDR_LO 0x2006
+#define regCP_STREAM_OUT_ADDR_LO_BASE_IDX 1
+#define regCP_STREAM_OUT_ADDR_HI 0x2007
+#define regCP_STREAM_OUT_ADDR_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT0_LO 0x2008
+#define regCP_NUM_PRIM_WRITTEN_COUNT0_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT0_HI 0x2009
+#define regCP_NUM_PRIM_WRITTEN_COUNT0_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT0_LO 0x200a
+#define regCP_NUM_PRIM_NEEDED_COUNT0_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT0_HI 0x200b
+#define regCP_NUM_PRIM_NEEDED_COUNT0_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT1_LO 0x200c
+#define regCP_NUM_PRIM_WRITTEN_COUNT1_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT1_HI 0x200d
+#define regCP_NUM_PRIM_WRITTEN_COUNT1_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT1_LO 0x200e
+#define regCP_NUM_PRIM_NEEDED_COUNT1_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT1_HI 0x200f
+#define regCP_NUM_PRIM_NEEDED_COUNT1_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT2_LO 0x2010
+#define regCP_NUM_PRIM_WRITTEN_COUNT2_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT2_HI 0x2011
+#define regCP_NUM_PRIM_WRITTEN_COUNT2_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT2_LO 0x2012
+#define regCP_NUM_PRIM_NEEDED_COUNT2_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT2_HI 0x2013
+#define regCP_NUM_PRIM_NEEDED_COUNT2_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT3_LO 0x2014
+#define regCP_NUM_PRIM_WRITTEN_COUNT3_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_WRITTEN_COUNT3_HI 0x2015
+#define regCP_NUM_PRIM_WRITTEN_COUNT3_HI_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT3_LO 0x2016
+#define regCP_NUM_PRIM_NEEDED_COUNT3_LO_BASE_IDX 1
+#define regCP_NUM_PRIM_NEEDED_COUNT3_HI 0x2017
+#define regCP_NUM_PRIM_NEEDED_COUNT3_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_LO 0x2018
+#define regCP_PIPE_STATS_ADDR_LO_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_HI 0x2019
+#define regCP_PIPE_STATS_ADDR_HI_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_LO 0x201a
+#define regCP_VGT_IAVERT_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_HI 0x201b
+#define regCP_VGT_IAVERT_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_LO 0x201c
+#define regCP_VGT_IAPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_HI 0x201d
+#define regCP_VGT_IAPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_LO 0x201e
+#define regCP_VGT_GSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_HI 0x201f
+#define regCP_VGT_GSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_LO 0x2020
+#define regCP_VGT_VSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_HI 0x2021
+#define regCP_VGT_VSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_LO 0x2022
+#define regCP_VGT_GSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_HI 0x2023
+#define regCP_VGT_GSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_LO 0x2024
+#define regCP_VGT_HSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_HI 0x2025
+#define regCP_VGT_HSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_LO 0x2026
+#define regCP_VGT_DSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_HI 0x2027
+#define regCP_VGT_DSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_LO 0x2028
+#define regCP_PA_CINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_HI 0x2029
+#define regCP_PA_CINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_LO 0x202a
+#define regCP_PA_CPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_HI 0x202b
+#define regCP_PA_CPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_LO 0x202c
+#define regCP_SC_PSINVOC_COUNT0_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_HI 0x202d
+#define regCP_SC_PSINVOC_COUNT0_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_LO 0x202e
+#define regCP_SC_PSINVOC_COUNT1_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_HI 0x202f
+#define regCP_SC_PSINVOC_COUNT1_HI_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_LO 0x2030
+#define regCP_VGT_CSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_HI 0x2031
+#define regCP_VGT_CSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_CONTROL 0x203d
+#define regCP_PIPE_STATS_CONTROL_BASE_IDX 1
+#define regCP_STREAM_OUT_CONTROL 0x203e
+#define regCP_STREAM_OUT_CONTROL_BASE_IDX 1
+#define regCP_STRMOUT_CNTL 0x203f
+#define regCP_STRMOUT_CNTL_BASE_IDX 1
+#define regSCRATCH_REG0 0x2040
+#define regSCRATCH_REG0_BASE_IDX 1
+#define regSCRATCH_REG1 0x2041
+#define regSCRATCH_REG1_BASE_IDX 1
+#define regSCRATCH_REG2 0x2042
+#define regSCRATCH_REG2_BASE_IDX 1
+#define regSCRATCH_REG3 0x2043
+#define regSCRATCH_REG3_BASE_IDX 1
+#define regSCRATCH_REG4 0x2044
+#define regSCRATCH_REG4_BASE_IDX 1
+#define regSCRATCH_REG5 0x2045
+#define regSCRATCH_REG5_BASE_IDX 1
+#define regSCRATCH_REG6 0x2046
+#define regSCRATCH_REG6_BASE_IDX 1
+#define regSCRATCH_REG7 0x2047
+#define regSCRATCH_REG7_BASE_IDX 1
+#define regCP_APPEND_DATA_HI 0x204c
+#define regCP_APPEND_DATA_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_HI 0x204d
+#define regCP_APPEND_LAST_CS_FENCE_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_HI 0x204e
+#define regCP_APPEND_LAST_PS_FENCE_HI_BASE_IDX 1
+#define regSCRATCH_UMSK 0x2050
+#define regSCRATCH_UMSK_BASE_IDX 1
+#define regSCRATCH_ADDR 0x2051
+#define regSCRATCH_ADDR_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_LO 0x2052
+#define regCP_PFP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_HI 0x2053
+#define regCP_PFP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO 0x2054
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI 0x2055
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO 0x2056
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI 0x2057
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_APPEND_ADDR_LO 0x2058
+#define regCP_APPEND_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_ADDR_HI 0x2059
+#define regCP_APPEND_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_DATA_LO 0x205a
+#define regCP_APPEND_DATA_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_LO 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_LO 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_LO 0x205d
+#define regCP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_LO 0x205d
+#define regCP_ME_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_HI 0x205e
+#define regCP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_HI 0x205e
+#define regCP_ME_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_MC_WADDR_LO 0x2069
+#define regCP_ME_MC_WADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_WADDR_HI 0x206a
+#define regCP_ME_MC_WADDR_HI_BASE_IDX 1
+#define regCP_ME_MC_WDATA_LO 0x206b
+#define regCP_ME_MC_WDATA_LO_BASE_IDX 1
+#define regCP_ME_MC_WDATA_HI 0x206c
+#define regCP_ME_MC_WDATA_HI_BASE_IDX 1
+#define regCP_ME_MC_RADDR_LO 0x206d
+#define regCP_ME_MC_RADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_RADDR_HI 0x206e
+#define regCP_ME_MC_RADDR_HI_BASE_IDX 1
+#define regCP_SEM_WAIT_TIMER 0x206f
+#define regCP_SEM_WAIT_TIMER_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_LO 0x2070
+#define regCP_SIG_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_HI 0x2071
+#define regCP_SIG_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_WAIT_REG_MEM_TIMEOUT 0x2074
+#define regCP_WAIT_REG_MEM_TIMEOUT_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_LO 0x2075
+#define regCP_WAIT_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_HI 0x2076
+#define regCP_WAIT_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CONTROL 0x2077
+#define regCP_DMA_PFP_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_CONTROL 0x2078
+#define regCP_DMA_ME_CONTROL_BASE_IDX 1
+#define regCP_COHER_BASE_HI 0x2079
+#define regCP_COHER_BASE_HI_BASE_IDX 1
+#define regCP_COHER_START_DELAY 0x207b
+#define regCP_COHER_START_DELAY_BASE_IDX 1
+#define regCP_COHER_CNTL 0x207c
+#define regCP_COHER_CNTL_BASE_IDX 1
+#define regCP_COHER_SIZE 0x207d
+#define regCP_COHER_SIZE_BASE_IDX 1
+#define regCP_COHER_BASE 0x207e
+#define regCP_COHER_BASE_BASE_IDX 1
+#define regCP_COHER_STATUS 0x207f
+#define regCP_COHER_STATUS_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR 0x2080
+#define regCP_DMA_ME_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR_HI 0x2081
+#define regCP_DMA_ME_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR 0x2082
+#define regCP_DMA_ME_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR_HI 0x2083
+#define regCP_DMA_ME_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_COMMAND 0x2084
+#define regCP_DMA_ME_COMMAND_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR 0x2085
+#define regCP_DMA_PFP_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR_HI 0x2086
+#define regCP_DMA_PFP_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR 0x2087
+#define regCP_DMA_PFP_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR_HI 0x2088
+#define regCP_DMA_PFP_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_COMMAND 0x2089
+#define regCP_DMA_PFP_COMMAND_BASE_IDX 1
+#define regCP_DMA_CNTL 0x208a
+#define regCP_DMA_CNTL_BASE_IDX 1
+#define regCP_DMA_READ_TAGS 0x208b
+#define regCP_DMA_READ_TAGS_BASE_IDX 1
+#define regCP_COHER_SIZE_HI 0x208c
+#define regCP_COHER_SIZE_HI_BASE_IDX 1
+#define regCP_PFP_IB_CONTROL 0x208d
+#define regCP_PFP_IB_CONTROL_BASE_IDX 1
+#define regCP_PFP_LOAD_CONTROL 0x208e
+#define regCP_PFP_LOAD_CONTROL_BASE_IDX 1
+#define regCP_SCRATCH_INDEX 0x208f
+#define regCP_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_SCRATCH_DATA 0x2090
+#define regCP_SCRATCH_DATA_BASE_IDX 1
+#define regCP_RB_OFFSET 0x2091
+#define regCP_RB_OFFSET_BASE_IDX 1
+#define regCP_IB1_OFFSET 0x2092
+#define regCP_IB1_OFFSET_BASE_IDX 1
+#define regCP_IB2_OFFSET 0x2093
+#define regCP_IB2_OFFSET_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_BEGIN 0x2094
+#define regCP_IB1_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_END 0x2095
+#define regCP_IB1_PREAMBLE_END_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_BEGIN 0x2096
+#define regCP_IB2_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_END 0x2097
+#define regCP_IB2_PREAMBLE_END_BASE_IDX 1
+#define regCP_CE_IB1_OFFSET 0x2098
+#define regCP_CE_IB1_OFFSET_BASE_IDX 1
+#define regCP_CE_IB2_OFFSET 0x2099
+#define regCP_CE_IB2_OFFSET_BASE_IDX 1
+#define regCP_CE_COUNTER 0x209a
+#define regCP_CE_COUNTER_BASE_IDX 1
+#define regCP_CE_RB_OFFSET 0x209b
+#define regCP_CE_RB_OFFSET_BASE_IDX 1
+#define regCP_CE_INIT_CMD_BUFSZ 0x20bd
+#define regCP_CE_INIT_CMD_BUFSZ_BASE_IDX 1
+#define regCP_CE_IB1_CMD_BUFSZ 0x20be
+#define regCP_CE_IB1_CMD_BUFSZ_BASE_IDX 1
+#define regCP_CE_IB2_CMD_BUFSZ 0x20bf
+#define regCP_CE_IB2_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB1_CMD_BUFSZ 0x20c0
+#define regCP_IB1_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB2_CMD_BUFSZ 0x20c1
+#define regCP_IB2_CMD_BUFSZ_BASE_IDX 1
+#define regCP_ST_CMD_BUFSZ 0x20c2
+#define regCP_ST_CMD_BUFSZ_BASE_IDX 1
+#define regCP_CE_INIT_BASE_LO 0x20c3
+#define regCP_CE_INIT_BASE_LO_BASE_IDX 1
+#define regCP_CE_INIT_BASE_HI 0x20c4
+#define regCP_CE_INIT_BASE_HI_BASE_IDX 1
+#define regCP_CE_INIT_BUFSZ 0x20c5
+#define regCP_CE_INIT_BUFSZ_BASE_IDX 1
+#define regCP_CE_IB1_BASE_LO 0x20c6
+#define regCP_CE_IB1_BASE_LO_BASE_IDX 1
+#define regCP_CE_IB1_BASE_HI 0x20c7
+#define regCP_CE_IB1_BASE_HI_BASE_IDX 1
+#define regCP_CE_IB1_BUFSZ 0x20c8
+#define regCP_CE_IB1_BUFSZ_BASE_IDX 1
+#define regCP_CE_IB2_BASE_LO 0x20c9
+#define regCP_CE_IB2_BASE_LO_BASE_IDX 1
+#define regCP_CE_IB2_BASE_HI 0x20ca
+#define regCP_CE_IB2_BASE_HI_BASE_IDX 1
+#define regCP_CE_IB2_BUFSZ 0x20cb
+#define regCP_CE_IB2_BUFSZ_BASE_IDX 1
+#define regCP_IB1_BASE_LO 0x20cc
+#define regCP_IB1_BASE_LO_BASE_IDX 1
+#define regCP_IB1_BASE_HI 0x20cd
+#define regCP_IB1_BASE_HI_BASE_IDX 1
+#define regCP_IB1_BUFSZ 0x20ce
+#define regCP_IB1_BUFSZ_BASE_IDX 1
+#define regCP_IB2_BASE_LO 0x20cf
+#define regCP_IB2_BASE_LO_BASE_IDX 1
+#define regCP_IB2_BASE_HI 0x20d0
+#define regCP_IB2_BASE_HI_BASE_IDX 1
+#define regCP_IB2_BUFSZ 0x20d1
+#define regCP_IB2_BUFSZ_BASE_IDX 1
+#define regCP_ST_BASE_LO 0x20d2
+#define regCP_ST_BASE_LO_BASE_IDX 1
+#define regCP_ST_BASE_HI 0x20d3
+#define regCP_ST_BASE_HI_BASE_IDX 1
+#define regCP_ST_BUFSZ 0x20d4
+#define regCP_ST_BUFSZ_BASE_IDX 1
+#define regCP_EOP_DONE_EVENT_CNTL 0x20d5
+#define regCP_EOP_DONE_EVENT_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_CNTL 0x20d6
+#define regCP_EOP_DONE_DATA_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_CNTX_ID 0x20d7
+#define regCP_EOP_DONE_CNTX_ID_BASE_IDX 1
+#define regCP_PFP_COMPLETION_STATUS 0x20ec
+#define regCP_PFP_COMPLETION_STATUS_BASE_IDX 1
+#define regCP_CE_COMPLETION_STATUS 0x20ed
+#define regCP_CE_COMPLETION_STATUS_BASE_IDX 1
+#define regCP_PRED_NOT_VISIBLE 0x20ee
+#define regCP_PRED_NOT_VISIBLE_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR 0x20f0
+#define regCP_PFP_METADATA_BASE_ADDR_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR_HI 0x20f1
+#define regCP_PFP_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_CE_METADATA_BASE_ADDR 0x20f2
+#define regCP_CE_METADATA_BASE_ADDR_BASE_IDX 1
+#define regCP_CE_METADATA_BASE_ADDR_HI 0x20f3
+#define regCP_CE_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR 0x20f4
+#define regCP_DRAW_INDX_INDR_ADDR_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR_HI 0x20f5
+#define regCP_DRAW_INDX_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR 0x20f6
+#define regCP_DISPATCH_INDR_ADDR_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR_HI 0x20f7
+#define regCP_DISPATCH_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR 0x20f8
+#define regCP_INDEX_BASE_ADDR_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR_HI 0x20f9
+#define regCP_INDEX_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_TYPE 0x20fa
+#define regCP_INDEX_TYPE_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR 0x20fb
+#define regCP_GDS_BKUP_ADDR_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR_HI 0x20fc
+#define regCP_GDS_BKUP_ADDR_HI_BASE_IDX 1
+#define regCP_SAMPLE_STATUS 0x20fd
+#define regCP_SAMPLE_STATUS_BASE_IDX 1
+#define regCP_ME_COHER_CNTL 0x20fe
+#define regCP_ME_COHER_CNTL_BASE_IDX 1
+#define regCP_ME_COHER_SIZE 0x20ff
+#define regCP_ME_COHER_SIZE_BASE_IDX 1
+#define regCP_ME_COHER_SIZE_HI 0x2100
+#define regCP_ME_COHER_SIZE_HI_BASE_IDX 1
+#define regCP_ME_COHER_BASE 0x2101
+#define regCP_ME_COHER_BASE_BASE_IDX 1
+#define regCP_ME_COHER_BASE_HI 0x2102
+#define regCP_ME_COHER_BASE_HI_BASE_IDX 1
+#define regCP_ME_COHER_STATUS 0x2103
+#define regCP_ME_COHER_STATUS_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_0 0x2140
+#define regRLC_GPM_PERF_COUNT_0_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_1 0x2141
+#define regRLC_GPM_PERF_COUNT_1_BASE_IDX 1
+#define regGRBM_GFX_INDEX 0x2200
+#define regGRBM_GFX_INDEX_BASE_IDX 1
+#define regVGT_GSVS_RING_SIZE 0x2241
+#define regVGT_GSVS_RING_SIZE_BASE_IDX 1
+#define regVGT_PRIMITIVE_TYPE 0x2242
+#define regVGT_PRIMITIVE_TYPE_BASE_IDX 1
+#define regVGT_INDEX_TYPE 0x2243
+#define regVGT_INDEX_TYPE_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_0 0x2244
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_0_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_1 0x2245
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_1_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_2 0x2246
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_2_BASE_IDX 1
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_3 0x2247
+#define regVGT_STRMOUT_BUFFER_FILLED_SIZE_3_BASE_IDX 1
+#define regVGT_MAX_VTX_INDX 0x2248
+#define regVGT_MAX_VTX_INDX_BASE_IDX 1
+#define regVGT_MIN_VTX_INDX 0x2249
+#define regVGT_MIN_VTX_INDX_BASE_IDX 1
+#define regVGT_INDX_OFFSET 0x224a
+#define regVGT_INDX_OFFSET_BASE_IDX 1
+#define regVGT_MULTI_PRIM_IB_RESET_EN 0x224b
+#define regVGT_MULTI_PRIM_IB_RESET_EN_BASE_IDX 1
+#define regVGT_NUM_INDICES 0x224c
+#define regVGT_NUM_INDICES_BASE_IDX 1
+#define regVGT_NUM_INSTANCES 0x224d
+#define regVGT_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_TF_RING_SIZE 0x224e
+#define regVGT_TF_RING_SIZE_BASE_IDX 1
+#define regVGT_HS_OFFCHIP_PARAM 0x224f
+#define regVGT_HS_OFFCHIP_PARAM_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE 0x2250
+#define regVGT_TF_MEMORY_BASE_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE_HI 0x2251
+#define regVGT_TF_MEMORY_BASE_HI_BASE_IDX 1
+#define regWD_POS_BUF_BASE 0x2252
+#define regWD_POS_BUF_BASE_BASE_IDX 1
+#define regWD_POS_BUF_BASE_HI 0x2253
+#define regWD_POS_BUF_BASE_HI_BASE_IDX 1
+#define regWD_CNTL_SB_BUF_BASE 0x2254
+#define regWD_CNTL_SB_BUF_BASE_BASE_IDX 1
+#define regWD_CNTL_SB_BUF_BASE_HI 0x2255
+#define regWD_CNTL_SB_BUF_BASE_HI_BASE_IDX 1
+#define regWD_INDEX_BUF_BASE 0x2256
+#define regWD_INDEX_BUF_BASE_BASE_IDX 1
+#define regWD_INDEX_BUF_BASE_HI 0x2257
+#define regWD_INDEX_BUF_BASE_HI_BASE_IDX 1
+#define regIA_MULTI_VGT_PARAM 0x2258
+#define regIA_MULTI_VGT_PARAM_BASE_IDX 1
+#define regVGT_INSTANCE_BASE_ID 0x225a
+#define regVGT_INSTANCE_BASE_ID_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_VALUE 0x2280
+#define regPA_SU_LINE_STIPPLE_VALUE_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE_STATE 0x2281
+#define regPA_SC_LINE_STIPPLE_STATE_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_0 0x2284
+#define regPA_SC_SCREEN_EXTENT_MIN_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_0 0x2285
+#define regPA_SC_SCREEN_EXTENT_MAX_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_1 0x2286
+#define regPA_SC_SCREEN_EXTENT_MIN_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_1 0x228b
+#define regPA_SC_SCREEN_EXTENT_MAX_1_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN 0x22a0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_H 0x22a1
+#define regPA_SC_P3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_V 0x22a2
+#define regPA_SC_P3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0x22a3
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT 0x22a4
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN 0x22a8
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_H 0x22a9
+#define regPA_SC_HP3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_V 0x22aa
+#define regPA_SC_HP3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0x22ab
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT 0x22ac
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_EN 0x22b0
+#define regPA_SC_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_H 0x22b1
+#define regPA_SC_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_V 0x22b2
+#define regPA_SC_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE 0x22b3
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_COUNT 0x22b4
+#define regPA_SC_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_STATE_STEREO_X 0x22b5
+#define regPA_STATE_STEREO_X_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BASE 0x2330
+#define regSQ_THREAD_TRACE_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_SIZE 0x2331
+#define regSQ_THREAD_TRACE_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_MASK 0x2332
+#define regSQ_THREAD_TRACE_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_TOKEN_MASK 0x2333
+#define regSQ_THREAD_TRACE_TOKEN_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_PERF_MASK 0x2334
+#define regSQ_THREAD_TRACE_PERF_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_CTRL 0x2335
+#define regSQ_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regSQ_THREAD_TRACE_MODE 0x2336
+#define regSQ_THREAD_TRACE_MODE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BASE2 0x2337
+#define regSQ_THREAD_TRACE_BASE2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_TOKEN_MASK2 0x2338
+#define regSQ_THREAD_TRACE_TOKEN_MASK2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_WPTR 0x2339
+#define regSQ_THREAD_TRACE_WPTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS 0x233a
+#define regSQ_THREAD_TRACE_STATUS_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HIWATER 0x233b
+#define regSQ_THREAD_TRACE_HIWATER_BASE_IDX 1
+#define regSQ_THREAD_TRACE_CNTR 0x233c
+#define regSQ_THREAD_TRACE_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_0 0x2340
+#define regSQ_THREAD_TRACE_USERDATA_0_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_1 0x2341
+#define regSQ_THREAD_TRACE_USERDATA_1_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_2 0x2342
+#define regSQ_THREAD_TRACE_USERDATA_2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_3 0x2343
+#define regSQ_THREAD_TRACE_USERDATA_3_BASE_IDX 1
+#define regSQC_CACHES 0x2348
+#define regSQC_CACHES_BASE_IDX 1
+#define regSQC_WRITEBACK 0x2349
+#define regSQC_WRITEBACK_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_LOW 0x23c0
+#define regDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_HI 0x23c1
+#define regDB_OCCLUSION_COUNT0_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_LOW 0x23c2
+#define regDB_OCCLUSION_COUNT1_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_HI 0x23c3
+#define regDB_OCCLUSION_COUNT1_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_LOW 0x23c4
+#define regDB_OCCLUSION_COUNT2_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_HI 0x23c5
+#define regDB_OCCLUSION_COUNT2_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_LOW 0x23c6
+#define regDB_OCCLUSION_COUNT3_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_HI 0x23c7
+#define regDB_OCCLUSION_COUNT3_HI_BASE_IDX 1
+#define regDB_ZPASS_COUNT_LOW 0x23fe
+#define regDB_ZPASS_COUNT_LOW_BASE_IDX 1
+#define regDB_ZPASS_COUNT_HI 0x23ff
+#define regDB_ZPASS_COUNT_HI_BASE_IDX 1
+#define regGDS_RD_ADDR 0x2400
+#define regGDS_RD_ADDR_BASE_IDX 1
+#define regGDS_RD_DATA 0x2401
+#define regGDS_RD_DATA_BASE_IDX 1
+#define regGDS_RD_BURST_ADDR 0x2402
+#define regGDS_RD_BURST_ADDR_BASE_IDX 1
+#define regGDS_RD_BURST_COUNT 0x2403
+#define regGDS_RD_BURST_COUNT_BASE_IDX 1
+#define regGDS_RD_BURST_DATA 0x2404
+#define regGDS_RD_BURST_DATA_BASE_IDX 1
+#define regGDS_WR_ADDR 0x2405
+#define regGDS_WR_ADDR_BASE_IDX 1
+#define regGDS_WR_DATA 0x2406
+#define regGDS_WR_DATA_BASE_IDX 1
+#define regGDS_WR_BURST_ADDR 0x2407
+#define regGDS_WR_BURST_ADDR_BASE_IDX 1
+#define regGDS_WR_BURST_DATA 0x2408
+#define regGDS_WR_BURST_DATA_BASE_IDX 1
+#define regGDS_WRITE_COMPLETE 0x2409
+#define regGDS_WRITE_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_CNTL 0x240a
+#define regGDS_ATOM_CNTL_BASE_IDX 1
+#define regGDS_ATOM_COMPLETE 0x240b
+#define regGDS_ATOM_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_BASE 0x240c
+#define regGDS_ATOM_BASE_BASE_IDX 1
+#define regGDS_ATOM_SIZE 0x240d
+#define regGDS_ATOM_SIZE_BASE_IDX 1
+#define regGDS_ATOM_OFFSET0 0x240e
+#define regGDS_ATOM_OFFSET0_BASE_IDX 1
+#define regGDS_ATOM_OFFSET1 0x240f
+#define regGDS_ATOM_OFFSET1_BASE_IDX 1
+#define regGDS_ATOM_DST 0x2410
+#define regGDS_ATOM_DST_BASE_IDX 1
+#define regGDS_ATOM_OP 0x2411
+#define regGDS_ATOM_OP_BASE_IDX 1
+#define regGDS_ATOM_SRC0 0x2412
+#define regGDS_ATOM_SRC0_BASE_IDX 1
+#define regGDS_ATOM_SRC0_U 0x2413
+#define regGDS_ATOM_SRC0_U_BASE_IDX 1
+#define regGDS_ATOM_SRC1 0x2414
+#define regGDS_ATOM_SRC1_BASE_IDX 1
+#define regGDS_ATOM_SRC1_U 0x2415
+#define regGDS_ATOM_SRC1_U_BASE_IDX 1
+#define regGDS_ATOM_READ0 0x2416
+#define regGDS_ATOM_READ0_BASE_IDX 1
+#define regGDS_ATOM_READ0_U 0x2417
+#define regGDS_ATOM_READ0_U_BASE_IDX 1
+#define regGDS_ATOM_READ1 0x2418
+#define regGDS_ATOM_READ1_BASE_IDX 1
+#define regGDS_ATOM_READ1_U 0x2419
+#define regGDS_ATOM_READ1_U_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNTL 0x241a
+#define regGDS_GWS_RESOURCE_CNTL_BASE_IDX 1
+#define regGDS_GWS_RESOURCE 0x241b
+#define regGDS_GWS_RESOURCE_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNT 0x241c
+#define regGDS_GWS_RESOURCE_CNT_BASE_IDX 1
+#define regGDS_OA_CNTL 0x241d
+#define regGDS_OA_CNTL_BASE_IDX 1
+#define regGDS_OA_COUNTER 0x241e
+#define regGDS_OA_COUNTER_BASE_IDX 1
+#define regGDS_OA_ADDRESS 0x241f
+#define regGDS_OA_ADDRESS_BASE_IDX 1
+#define regGDS_OA_INCDEC 0x2420
+#define regGDS_OA_INCDEC_BASE_IDX 1
+#define regGDS_OA_RING_SIZE 0x2421
+#define regGDS_OA_RING_SIZE_BASE_IDX 1
+#define regSPI_CONFIG_CNTL 0x2440
+#define regSPI_CONFIG_CNTL_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_1 0x2441
+#define regSPI_CONFIG_CNTL_1_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_2 0x2442
+#define regSPI_CONFIG_CNTL_2_BASE_IDX 1
+#define regSPI_WAVE_LIMIT_CNTL 0x2443
+#define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_perfddec
+// base address: 0x34000
+#define regCPG_PERFCOUNTER1_LO 0x3000
+#define regCPG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER1_HI 0x3001
+#define regCPG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_LO 0x3002
+#define regCPG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_HI 0x3003
+#define regCPG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_LO 0x3004
+#define regCPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_HI 0x3005
+#define regCPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_LO 0x3006
+#define regCPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_HI 0x3007
+#define regCPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_LO 0x3008
+#define regCPF_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_HI 0x3009
+#define regCPF_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_LO 0x300a
+#define regCPF_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_HI 0x300b
+#define regCPF_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_LATENCY_STATS_DATA 0x300c
+#define regCPF_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPG_LATENCY_STATS_DATA 0x300d
+#define regCPG_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPC_LATENCY_STATS_DATA 0x300e
+#define regCPC_LATENCY_STATS_DATA_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_LO 0x3040
+#define regGRBM_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_HI 0x3041
+#define regGRBM_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_LO 0x3043
+#define regGRBM_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_HI 0x3044
+#define regGRBM_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_LO 0x3045
+#define regGRBM_SE0_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_HI 0x3046
+#define regGRBM_SE0_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_LO 0x3047
+#define regGRBM_SE1_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_HI 0x3048
+#define regGRBM_SE1_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_LO 0x3049
+#define regGRBM_SE2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_HI 0x304a
+#define regGRBM_SE2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_LO 0x304b
+#define regGRBM_SE3_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_HI 0x304c
+#define regGRBM_SE3_PERFCOUNTER_HI_BASE_IDX 1
+#define regWD_PERFCOUNTER0_LO 0x3080
+#define regWD_PERFCOUNTER0_LO_BASE_IDX 1
+#define regWD_PERFCOUNTER0_HI 0x3081
+#define regWD_PERFCOUNTER0_HI_BASE_IDX 1
+#define regWD_PERFCOUNTER1_LO 0x3082
+#define regWD_PERFCOUNTER1_LO_BASE_IDX 1
+#define regWD_PERFCOUNTER1_HI 0x3083
+#define regWD_PERFCOUNTER1_HI_BASE_IDX 1
+#define regWD_PERFCOUNTER2_LO 0x3084
+#define regWD_PERFCOUNTER2_LO_BASE_IDX 1
+#define regWD_PERFCOUNTER2_HI 0x3085
+#define regWD_PERFCOUNTER2_HI_BASE_IDX 1
+#define regWD_PERFCOUNTER3_LO 0x3086
+#define regWD_PERFCOUNTER3_LO_BASE_IDX 1
+#define regWD_PERFCOUNTER3_HI 0x3087
+#define regWD_PERFCOUNTER3_HI_BASE_IDX 1
+#define regIA_PERFCOUNTER0_LO 0x3088
+#define regIA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regIA_PERFCOUNTER0_HI 0x3089
+#define regIA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regIA_PERFCOUNTER1_LO 0x308a
+#define regIA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regIA_PERFCOUNTER1_HI 0x308b
+#define regIA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regIA_PERFCOUNTER2_LO 0x308c
+#define regIA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regIA_PERFCOUNTER2_HI 0x308d
+#define regIA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regIA_PERFCOUNTER3_LO 0x308e
+#define regIA_PERFCOUNTER3_LO_BASE_IDX 1
+#define regIA_PERFCOUNTER3_HI 0x308f
+#define regIA_PERFCOUNTER3_HI_BASE_IDX 1
+#define regVGT_PERFCOUNTER0_LO 0x3090
+#define regVGT_PERFCOUNTER0_LO_BASE_IDX 1
+#define regVGT_PERFCOUNTER0_HI 0x3091
+#define regVGT_PERFCOUNTER0_HI_BASE_IDX 1
+#define regVGT_PERFCOUNTER1_LO 0x3092
+#define regVGT_PERFCOUNTER1_LO_BASE_IDX 1
+#define regVGT_PERFCOUNTER1_HI 0x3093
+#define regVGT_PERFCOUNTER1_HI_BASE_IDX 1
+#define regVGT_PERFCOUNTER2_LO 0x3094
+#define regVGT_PERFCOUNTER2_LO_BASE_IDX 1
+#define regVGT_PERFCOUNTER2_HI 0x3095
+#define regVGT_PERFCOUNTER2_HI_BASE_IDX 1
+#define regVGT_PERFCOUNTER3_LO 0x3096
+#define regVGT_PERFCOUNTER3_LO_BASE_IDX 1
+#define regVGT_PERFCOUNTER3_HI 0x3097
+#define regVGT_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_LO 0x3100
+#define regPA_SU_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_HI 0x3101
+#define regPA_SU_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_LO 0x3102
+#define regPA_SU_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_HI 0x3103
+#define regPA_SU_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_LO 0x3104
+#define regPA_SU_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_HI 0x3105
+#define regPA_SU_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_LO 0x3106
+#define regPA_SU_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_HI 0x3107
+#define regPA_SU_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_LO 0x3140
+#define regPA_SC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_HI 0x3141
+#define regPA_SC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_LO 0x3142
+#define regPA_SC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_HI 0x3143
+#define regPA_SC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_LO 0x3144
+#define regPA_SC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_HI 0x3145
+#define regPA_SC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_LO 0x3146
+#define regPA_SC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_HI 0x3147
+#define regPA_SC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_LO 0x3148
+#define regPA_SC_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_HI 0x3149
+#define regPA_SC_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_LO 0x314a
+#define regPA_SC_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_HI 0x314b
+#define regPA_SC_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_LO 0x314c
+#define regPA_SC_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_HI 0x314d
+#define regPA_SC_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_LO 0x314e
+#define regPA_SC_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_HI 0x314f
+#define regPA_SC_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_HI 0x3180
+#define regSPI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_LO 0x3181
+#define regSPI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_HI 0x3182
+#define regSPI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_LO 0x3183
+#define regSPI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_HI 0x3184
+#define regSPI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_LO 0x3185
+#define regSPI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_HI 0x3186
+#define regSPI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_LO 0x3187
+#define regSPI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_HI 0x3188
+#define regSPI_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_LO 0x3189
+#define regSPI_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_HI 0x318a
+#define regSPI_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_LO 0x318b
+#define regSPI_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_LO 0x31c0
+#define regSQ_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_HI 0x31c1
+#define regSQ_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_LO 0x31c2
+#define regSQ_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_HI 0x31c3
+#define regSQ_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_LO 0x31c4
+#define regSQ_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_HI 0x31c5
+#define regSQ_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_LO 0x31c6
+#define regSQ_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_HI 0x31c7
+#define regSQ_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_LO 0x31c8
+#define regSQ_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_HI 0x31c9
+#define regSQ_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_LO 0x31ca
+#define regSQ_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_HI 0x31cb
+#define regSQ_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_LO 0x31cc
+#define regSQ_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_HI 0x31cd
+#define regSQ_PERFCOUNTER6_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_LO 0x31ce
+#define regSQ_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_HI 0x31cf
+#define regSQ_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_LO 0x31d0
+#define regSQ_PERFCOUNTER8_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_HI 0x31d1
+#define regSQ_PERFCOUNTER8_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_LO 0x31d2
+#define regSQ_PERFCOUNTER9_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_HI 0x31d3
+#define regSQ_PERFCOUNTER9_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_LO 0x31d4
+#define regSQ_PERFCOUNTER10_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_HI 0x31d5
+#define regSQ_PERFCOUNTER10_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_LO 0x31d6
+#define regSQ_PERFCOUNTER11_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_HI 0x31d7
+#define regSQ_PERFCOUNTER11_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_LO 0x31d8
+#define regSQ_PERFCOUNTER12_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_HI 0x31d9
+#define regSQ_PERFCOUNTER12_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_LO 0x31da
+#define regSQ_PERFCOUNTER13_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_HI 0x31db
+#define regSQ_PERFCOUNTER13_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_LO 0x31dc
+#define regSQ_PERFCOUNTER14_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_HI 0x31dd
+#define regSQ_PERFCOUNTER14_HI_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_LO 0x31de
+#define regSQ_PERFCOUNTER15_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_HI 0x31df
+#define regSQ_PERFCOUNTER15_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER0_LO 0x3240
+#define regSX_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER0_HI 0x3241
+#define regSX_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER1_LO 0x3242
+#define regSX_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER1_HI 0x3243
+#define regSX_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER2_LO 0x3244
+#define regSX_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER2_HI 0x3245
+#define regSX_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER3_LO 0x3246
+#define regSX_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER3_HI 0x3247
+#define regSX_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_LO 0x3280
+#define regGDS_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_HI 0x3281
+#define regGDS_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_LO 0x3282
+#define regGDS_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_HI 0x3283
+#define regGDS_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_LO 0x3284
+#define regGDS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_HI 0x3285
+#define regGDS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_LO 0x3286
+#define regGDS_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_HI 0x3287
+#define regGDS_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER0_LO 0x32c0
+#define regTA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER0_HI 0x32c1
+#define regTA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER1_LO 0x32c2
+#define regTA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER1_HI 0x32c3
+#define regTA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER0_LO 0x3300
+#define regTD_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER0_HI 0x3301
+#define regTD_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER1_LO 0x3302
+#define regTD_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER1_HI 0x3303
+#define regTD_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_LO 0x3340
+#define regTCP_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_HI 0x3341
+#define regTCP_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_LO 0x3342
+#define regTCP_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_HI 0x3343
+#define regTCP_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_LO 0x3344
+#define regTCP_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_HI 0x3345
+#define regTCP_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_LO 0x3346
+#define regTCP_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_HI 0x3347
+#define regTCP_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTCC_PERFCOUNTER0_LO 0x3380
+#define regTCC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCC_PERFCOUNTER0_HI 0x3381
+#define regTCC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCC_PERFCOUNTER1_LO 0x3382
+#define regTCC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCC_PERFCOUNTER1_HI 0x3383
+#define regTCC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCC_PERFCOUNTER2_LO 0x3384
+#define regTCC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCC_PERFCOUNTER2_HI 0x3385
+#define regTCC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCC_PERFCOUNTER3_LO 0x3386
+#define regTCC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCC_PERFCOUNTER3_HI 0x3387
+#define regTCC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTCA_PERFCOUNTER0_LO 0x3390
+#define regTCA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCA_PERFCOUNTER0_HI 0x3391
+#define regTCA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCA_PERFCOUNTER1_LO 0x3392
+#define regTCA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCA_PERFCOUNTER1_HI 0x3393
+#define regTCA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCA_PERFCOUNTER2_LO 0x3394
+#define regTCA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCA_PERFCOUNTER2_HI 0x3395
+#define regTCA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCA_PERFCOUNTER3_LO 0x3396
+#define regTCA_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCA_PERFCOUNTER3_HI 0x3397
+#define regTCA_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER0_LO 0x3406
+#define regCB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER0_HI 0x3407
+#define regCB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER1_LO 0x3408
+#define regCB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER1_HI 0x3409
+#define regCB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER2_LO 0x340a
+#define regCB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER2_HI 0x340b
+#define regCB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER3_LO 0x340c
+#define regCB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER3_HI 0x340d
+#define regCB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER0_LO 0x3440
+#define regDB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER0_HI 0x3441
+#define regDB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER1_LO 0x3442
+#define regDB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER1_HI 0x3443
+#define regDB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER2_LO 0x3444
+#define regDB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER2_HI 0x3445
+#define regDB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER3_LO 0x3446
+#define regDB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER3_HI 0x3447
+#define regDB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_LO 0x3480
+#define regRLC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_HI 0x3481
+#define regRLC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_LO 0x3482
+#define regRLC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_HI 0x3483
+#define regRLC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_LO 0x34c0
+#define regRMI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_HI 0x34c1
+#define regRMI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_LO 0x34c2
+#define regRMI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_HI 0x34c3
+#define regRMI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_LO 0x34c4
+#define regRMI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_HI 0x34c5
+#define regRMI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_LO 0x34c6
+#define regRMI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_HI 0x34c7
+#define regRMI_PERFCOUNTER3_HI_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2pfcntrdec
+// base address: 0x35400
+#define regATC_L2_PERFCOUNTER_LO 0x3500
+#define regATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regATC_L2_PERFCOUNTER_HI 0x3501
+#define regATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_vml2prdec
+// base address: 0x35408
+#define regMC_VM_L2_PERFCOUNTER_LO 0x3502
+#define regMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER_HI 0x3503
+#define regMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbprdec
+// base address: 0x35448
+#define regL2TLB_PERFCOUNTER_LO 0x3512
+#define regL2TLB_PERFCOUNTER_LO_BASE_IDX 1
+#define regL2TLB_PERFCOUNTER_HI 0x3513
+#define regL2TLB_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_perfsdec
+// base address: 0x36000
+#define regCPG_PERFCOUNTER1_SELECT 0x3800
+#define regCPG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT1 0x3801
+#define regCPG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT 0x3802
+#define regCPG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_SELECT 0x3803
+#define regCPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT1 0x3804
+#define regCPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_SELECT 0x3805
+#define regCPF_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT1 0x3806
+#define regCPF_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT 0x3807
+#define regCPF_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCP_PERFMON_CNTL 0x3808
+#define regCP_PERFMON_CNTL_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT 0x3809
+#define regCPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT 0x380a
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT 0x380b
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPF_LATENCY_STATS_SELECT 0x380c
+#define regCPF_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPG_LATENCY_STATS_SELECT 0x380d
+#define regCPG_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_LATENCY_STATS_SELECT 0x380e
+#define regCPC_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT 0x3810
+#define regCP_DRAW_OBJECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT_COUNTER 0x3811
+#define regCP_DRAW_OBJECT_COUNTER_BASE_IDX 1
+#define regCP_DRAW_WINDOW_MASK_HI 0x3812
+#define regCP_DRAW_WINDOW_MASK_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_HI 0x3813
+#define regCP_DRAW_WINDOW_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_LO 0x3814
+#define regCP_DRAW_WINDOW_LO_BASE_IDX 1
+#define regCP_DRAW_WINDOW_CNTL 0x3815
+#define regCP_DRAW_WINDOW_CNTL_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT 0x3840
+#define regGRBM_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT 0x3841
+#define regGRBM_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_SELECT 0x3842
+#define regGRBM_SE0_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_SELECT 0x3843
+#define regGRBM_SE1_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_SELECT 0x3844
+#define regGRBM_SE2_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_SELECT 0x3845
+#define regGRBM_SE3_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regWD_PERFCOUNTER0_SELECT 0x3880
+#define regWD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regWD_PERFCOUNTER1_SELECT 0x3881
+#define regWD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regWD_PERFCOUNTER2_SELECT 0x3882
+#define regWD_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regWD_PERFCOUNTER3_SELECT 0x3883
+#define regWD_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regIA_PERFCOUNTER0_SELECT 0x3884
+#define regIA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regIA_PERFCOUNTER1_SELECT 0x3885
+#define regIA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regIA_PERFCOUNTER2_SELECT 0x3886
+#define regIA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regIA_PERFCOUNTER3_SELECT 0x3887
+#define regIA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regIA_PERFCOUNTER0_SELECT1 0x3888
+#define regIA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regVGT_PERFCOUNTER0_SELECT 0x388c
+#define regVGT_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regVGT_PERFCOUNTER1_SELECT 0x388d
+#define regVGT_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regVGT_PERFCOUNTER2_SELECT 0x388e
+#define regVGT_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regVGT_PERFCOUNTER3_SELECT 0x388f
+#define regVGT_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regVGT_PERFCOUNTER0_SELECT1 0x3890
+#define regVGT_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regVGT_PERFCOUNTER1_SELECT1 0x3891
+#define regVGT_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regVGT_PERFCOUNTER_SEID_MASK 0x3894
+#define regVGT_PERFCOUNTER_SEID_MASK_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT 0x3900
+#define regPA_SU_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT1 0x3901
+#define regPA_SU_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT 0x3902
+#define regPA_SU_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT1 0x3903
+#define regPA_SU_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT 0x3904
+#define regPA_SU_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT 0x3905
+#define regPA_SU_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT 0x3940
+#define regPA_SC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT1 0x3941
+#define regPA_SC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_SELECT 0x3942
+#define regPA_SC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_SELECT 0x3943
+#define regPA_SC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_SELECT 0x3944
+#define regPA_SC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_SELECT 0x3945
+#define regPA_SC_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_SELECT 0x3946
+#define regPA_SC_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_SELECT 0x3947
+#define regPA_SC_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_SELECT 0x3948
+#define regPA_SC_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT 0x3980
+#define regSPI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT 0x3981
+#define regSPI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT 0x3982
+#define regSPI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT 0x3983
+#define regSPI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT1 0x3984
+#define regSPI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT1 0x3985
+#define regSPI_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT1 0x3986
+#define regSPI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT1 0x3987
+#define regSPI_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_SELECT 0x3988
+#define regSPI_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_SELECT 0x3989
+#define regSPI_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER_BINS 0x398a
+#define regSPI_PERFCOUNTER_BINS_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_SELECT 0x39c0
+#define regSQ_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_SELECT 0x39c1
+#define regSQ_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_SELECT 0x39c2
+#define regSQ_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_SELECT 0x39c3
+#define regSQ_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_SELECT 0x39c4
+#define regSQ_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_SELECT 0x39c5
+#define regSQ_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_SELECT 0x39c6
+#define regSQ_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_SELECT 0x39c7
+#define regSQ_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_SELECT 0x39c8
+#define regSQ_PERFCOUNTER8_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_SELECT 0x39c9
+#define regSQ_PERFCOUNTER9_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_SELECT 0x39ca
+#define regSQ_PERFCOUNTER10_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_SELECT 0x39cb
+#define regSQ_PERFCOUNTER11_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_SELECT 0x39cc
+#define regSQ_PERFCOUNTER12_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_SELECT 0x39cd
+#define regSQ_PERFCOUNTER13_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_SELECT 0x39ce
+#define regSQ_PERFCOUNTER14_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_SELECT 0x39cf
+#define regSQ_PERFCOUNTER15_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL 0x39e0
+#define regSQ_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQ_PERFCOUNTER_MASK 0x39e1
+#define regSQ_PERFCOUNTER_MASK_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL2 0x39e2
+#define regSQ_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT 0x3a40
+#define regSX_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT 0x3a41
+#define regSX_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER2_SELECT 0x3a42
+#define regSX_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER3_SELECT 0x3a43
+#define regSX_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT1 0x3a44
+#define regSX_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT1 0x3a45
+#define regSX_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT 0x3a80
+#define regGDS_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT 0x3a81
+#define regGDS_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT 0x3a82
+#define regGDS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT 0x3a83
+#define regGDS_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT1 0x3a84
+#define regGDS_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT 0x3ac0
+#define regTA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT1 0x3ac1
+#define regTA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER1_SELECT 0x3ac2
+#define regTA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT 0x3b00
+#define regTD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT1 0x3b01
+#define regTD_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTD_PERFCOUNTER1_SELECT 0x3b02
+#define regTD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT 0x3b40
+#define regTCP_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT1 0x3b41
+#define regTCP_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT 0x3b42
+#define regTCP_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT1 0x3b43
+#define regTCP_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_SELECT 0x3b44
+#define regTCP_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_SELECT 0x3b45
+#define regTCP_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regTCC_PERFCOUNTER0_SELECT 0x3b80
+#define regTCC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCC_PERFCOUNTER0_SELECT1 0x3b81
+#define regTCC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCC_PERFCOUNTER1_SELECT 0x3b82
+#define regTCC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCC_PERFCOUNTER1_SELECT1 0x3b83
+#define regTCC_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCC_PERFCOUNTER2_SELECT 0x3b84
+#define regTCC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCC_PERFCOUNTER3_SELECT 0x3b85
+#define regTCC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regTCA_PERFCOUNTER0_SELECT 0x3b90
+#define regTCA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCA_PERFCOUNTER0_SELECT1 0x3b91
+#define regTCA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCA_PERFCOUNTER1_SELECT 0x3b92
+#define regTCA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCA_PERFCOUNTER1_SELECT1 0x3b93
+#define regTCA_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCA_PERFCOUNTER2_SELECT 0x3b94
+#define regTCA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCA_PERFCOUNTER3_SELECT 0x3b95
+#define regTCA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER_FILTER 0x3c00
+#define regCB_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT 0x3c01
+#define regCB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT1 0x3c02
+#define regCB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCB_PERFCOUNTER1_SELECT 0x3c03
+#define regCB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER2_SELECT 0x3c04
+#define regCB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER3_SELECT 0x3c05
+#define regCB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT 0x3c40
+#define regDB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT1 0x3c41
+#define regDB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT 0x3c42
+#define regDB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT1 0x3c43
+#define regDB_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER2_SELECT 0x3c44
+#define regDB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER3_SELECT 0x3c46
+#define regDB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRLC_SPM_PERFMON_CNTL 0x3c80
+#define regRLC_SPM_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_LO 0x3c81
+#define regRLC_SPM_PERFMON_RING_BASE_LO_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_HI 0x3c82
+#define regRLC_SPM_PERFMON_RING_BASE_HI_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_SIZE 0x3c83
+#define regRLC_SPM_PERFMON_RING_SIZE_BASE_IDX 1
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE 0x3c84
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_ADDR 0x3c85
+#define regRLC_SPM_SE_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_DATA 0x3c86
+#define regRLC_SPM_SE_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_CPG_PERFMON_SAMPLE_DELAY 0x3c87
+#define regRLC_SPM_CPG_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_CPC_PERFMON_SAMPLE_DELAY 0x3c88
+#define regRLC_SPM_CPC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_CPF_PERFMON_SAMPLE_DELAY 0x3c89
+#define regRLC_SPM_CPF_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_CB_PERFMON_SAMPLE_DELAY 0x3c8a
+#define regRLC_SPM_CB_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_DB_PERFMON_SAMPLE_DELAY 0x3c8b
+#define regRLC_SPM_DB_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_PA_PERFMON_SAMPLE_DELAY 0x3c8c
+#define regRLC_SPM_PA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_GDS_PERFMON_SAMPLE_DELAY 0x3c8d
+#define regRLC_SPM_GDS_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_IA_PERFMON_SAMPLE_DELAY 0x3c8e
+#define regRLC_SPM_IA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_SC_PERFMON_SAMPLE_DELAY 0x3c90
+#define regRLC_SPM_SC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_TCC_PERFMON_SAMPLE_DELAY 0x3c91
+#define regRLC_SPM_TCC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_TCA_PERFMON_SAMPLE_DELAY 0x3c92
+#define regRLC_SPM_TCA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_TCP_PERFMON_SAMPLE_DELAY 0x3c93
+#define regRLC_SPM_TCP_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_TA_PERFMON_SAMPLE_DELAY 0x3c94
+#define regRLC_SPM_TA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_TD_PERFMON_SAMPLE_DELAY 0x3c95
+#define regRLC_SPM_TD_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_VGT_PERFMON_SAMPLE_DELAY 0x3c96
+#define regRLC_SPM_VGT_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_SPI_PERFMON_SAMPLE_DELAY 0x3c97
+#define regRLC_SPM_SPI_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_SQG_PERFMON_SAMPLE_DELAY 0x3c98
+#define regRLC_SPM_SQG_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_SX_PERFMON_SAMPLE_DELAY 0x3c9a
+#define regRLC_SPM_SX_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR 0x3c9b
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA 0x3c9c
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_RING_RDPTR 0x3c9d
+#define regRLC_SPM_RING_RDPTR_BASE_IDX 1
+#define regRLC_SPM_SEGMENT_THRESHOLD 0x3c9e
+#define regRLC_SPM_SEGMENT_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_RMI_PERFMON_SAMPLE_DELAY 0x3ca3
+#define regRLC_SPM_RMI_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define regRLC_SPM_PERFMON_SAMPLE_DELAY_MAX 0x3ca4
+#define regRLC_SPM_PERFMON_SAMPLE_DELAY_MAX_BASE_IDX 1
+#define regRLC_PERFMON_CNTL 0x3cc0
+#define regRLC_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_SELECT 0x3cc1
+#define regRLC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_SELECT 0x3cc2
+#define regRLC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_CNTL 0x3cc3
+#define regRLC_GPU_IOV_PERF_CNT_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR 0x3cc4
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA 0x3cc5
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR 0x3cc6
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA 0x3cc7
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT 0x3d00
+#define regRMI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT1 0x3d01
+#define regRMI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_SELECT 0x3d02
+#define regRMI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT 0x3d03
+#define regRMI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT1 0x3d04
+#define regRMI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_SELECT 0x3d05
+#define regRMI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRMI_PERF_COUNTER_CNTL 0x3d06
+#define regRMI_PERF_COUNTER_CNTL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2pfcntldec
+// base address: 0x37500
+#define regATC_L2_PERFCOUNTER0_CFG 0x3d40
+#define regATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regATC_L2_PERFCOUNTER1_CFG 0x3d41
+#define regATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regATC_L2_PERFCOUNTER_RSLT_CNTL 0x3d42
+#define regATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_vml2pldec
+// base address: 0x37518
+#define regMC_VM_L2_PERFCOUNTER0_CFG 0x3d46
+#define regMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER1_CFG 0x3d47
+#define regMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER2_CFG 0x3d48
+#define regMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER3_CFG 0x3d49
+#define regMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER4_CFG 0x3d4a
+#define regMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER5_CFG 0x3d4b
+#define regMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER6_CFG 0x3d4c
+#define regMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER7_CFG 0x3d4d
+#define regMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define regMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3d56
+#define regMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbpldec
+// base address: 0x37578
+#define regL2TLB_PERFCOUNTER0_CFG 0x3d5e
+#define regL2TLB_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regL2TLB_PERFCOUNTER1_CFG 0x3d5f
+#define regL2TLB_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regL2TLB_PERFCOUNTER2_CFG 0x3d60
+#define regL2TLB_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regL2TLB_PERFCOUNTER3_CFG 0x3d61
+#define regL2TLB_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regL2TLB_PERFCOUNTER_RSLT_CNTL 0x3d62
+#define regL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_gdflldec
+// base address: 0x3a000
+#define regGDFLL_EDC_HYSTERESIS_CNTL 0x481b
+#define regGDFLL_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_EDC_HYSTERESIS_STAT 0x481c
+#define regGDFLL_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_rlcpdec
+// base address: 0x3b000
+#define regRLC_CNTL 0x4c00
+#define regRLC_CNTL_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL_2 0x4c03
+#define regRLC_CGCG_CGLS_CTRL_2_BASE_IDX 1
+#define regRLC_STAT 0x4c04
+#define regRLC_STAT_BASE_IDX 1
+#define regRLC_SAFE_MODE 0x4c05
+#define regRLC_SAFE_MODE_BASE_IDX 1
+#define regRLC_MEM_SLP_CNTL 0x4c06
+#define regRLC_MEM_SLP_CNTL_BASE_IDX 1
+#define regSMU_RLC_RESPONSE 0x4c07
+#define regSMU_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_RLCV_SAFE_MODE 0x4c08
+#define regRLC_RLCV_SAFE_MODE_BASE_IDX 1
+#define regRLC_SMU_SAFE_MODE 0x4c09
+#define regRLC_SMU_SAFE_MODE_BASE_IDX 1
+#define regRLC_RLCV_COMMAND 0x4c0a
+#define regRLC_RLCV_COMMAND_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_LSB 0x4c0c
+#define regRLC_REFCLOCK_TIMESTAMP_LSB_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_MSB 0x4c0d
+#define regRLC_REFCLOCK_TIMESTAMP_MSB_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_0 0x4c0e
+#define regRLC_GPM_TIMER_INT_0_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_1 0x4c0f
+#define regRLC_GPM_TIMER_INT_1_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_2 0x4c10
+#define regRLC_GPM_TIMER_INT_2_BASE_IDX 1
+#define regRLC_GPM_TIMER_CTRL 0x4c11
+#define regRLC_GPM_TIMER_CTRL_BASE_IDX 1
+#define regRLC_LB_CNTR_MAX 0x4c12
+#define regRLC_LB_CNTR_MAX_BASE_IDX 1
+#define regRLC_GPM_TIMER_STAT 0x4c13
+#define regRLC_GPM_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_3 0x4c15
+#define regRLC_GPM_TIMER_INT_3_BASE_IDX 1
+#define regRLC_SERDES_WR_NONCU_MASTER_MASK_1 0x4c16
+#define regRLC_SERDES_WR_NONCU_MASTER_MASK_1_BASE_IDX 1
+#define regRLC_SERDES_NONCU_MASTER_BUSY_1 0x4c17
+#define regRLC_SERDES_NONCU_MASTER_BUSY_1_BASE_IDX 1
+#define regRLC_INT_STAT 0x4c18
+#define regRLC_INT_STAT_BASE_IDX 1
+#define regRLC_LB_CNTL 0x4c19
+#define regRLC_LB_CNTL_BASE_IDX 1
+#define regRLC_MGCG_CTRL 0x4c1a
+#define regRLC_MGCG_CTRL_BASE_IDX 1
+#define regRLC_LB_CNTR_INIT 0x4c1b
+#define regRLC_LB_CNTR_INIT_BASE_IDX 1
+#define regRLC_LOAD_BALANCE_CNTR 0x4c1c
+#define regRLC_LOAD_BALANCE_CNTR_BASE_IDX 1
+#define regRLC_JUMP_TABLE_RESTORE 0x4c1e
+#define regRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
+#define regRLC_PG_DELAY_2 0x4c1f
+#define regRLC_PG_DELAY_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB 0x4c24
+#define regRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB 0x4c25
+#define regRLC_GPU_CLOCK_COUNT_MSB_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT 0x4c26
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_BASE_IDX 1
+#define regRLC_UCODE_CNTL 0x4c27
+#define regRLC_UCODE_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_RESET 0x4c28
+#define regRLC_GPM_THREAD_RESET_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T0 0x4c29
+#define regRLC_GPM_CP_DMA_COMPLETE_T0_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T1 0x4c2a
+#define regRLC_GPM_CP_DMA_COMPLETE_T1_BASE_IDX 1
+#define regRLC_FIREWALL_VIOLATION 0x4c2b
+#define regRLC_FIREWALL_VIOLATION_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_LSB 0x4c30
+#define regRLC_CLK_COUNT_GFXCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_MSB 0x4c31
+#define regRLC_CLK_COUNT_GFXCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_LSB 0x4c32
+#define regRLC_CLK_COUNT_REFCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_MSB 0x4c33
+#define regRLC_CLK_COUNT_REFCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_CTRL 0x4c34
+#define regRLC_CLK_COUNT_CTRL_BASE_IDX 1
+#define regRLC_CLK_COUNT_STAT 0x4c35
+#define regRLC_CLK_COUNT_STAT_BASE_IDX 1
+#define regRLC_GPM_STAT 0x4c40
+#define regRLC_GPM_STAT_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32_RES_SEL 0x4c41
+#define regRLC_GPU_CLOCK_32_RES_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32 0x4c42
+#define regRLC_GPU_CLOCK_32_BASE_IDX 1
+#define regRLC_PG_CNTL 0x4c43
+#define regRLC_PG_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_PRIORITY 0x4c44
+#define regRLC_GPM_THREAD_PRIORITY_BASE_IDX 1
+#define regRLC_GPM_THREAD_ENABLE 0x4c45
+#define regRLC_GPM_THREAD_ENABLE_BASE_IDX 1
+#define regRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define regRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL 0x4c49
+#define regRLC_CGCG_CGLS_CTRL_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL 0x4c4a
+#define regRLC_CGCG_RAMP_CTRL_BASE_IDX 1
+#define regRLC_DYN_PG_STATUS 0x4c4b
+#define regRLC_DYN_PG_STATUS_BASE_IDX 1
+#define regRLC_DYN_PG_REQUEST 0x4c4c
+#define regRLC_DYN_PG_REQUEST_BASE_IDX 1
+#define regRLC_PG_DELAY 0x4c4d
+#define regRLC_PG_DELAY_BASE_IDX 1
+#define regRLC_CU_STATUS 0x4c4e
+#define regRLC_CU_STATUS_BASE_IDX 1
+#define regRLC_LB_INIT_CU_MASK 0x4c4f
+#define regRLC_LB_INIT_CU_MASK_BASE_IDX 1
+#define regRLC_LB_ALWAYS_ACTIVE_CU_MASK 0x4c50
+#define regRLC_LB_ALWAYS_ACTIVE_CU_MASK_BASE_IDX 1
+#define regRLC_LB_PARAMS 0x4c51
+#define regRLC_LB_PARAMS_BASE_IDX 1
+#define regRLC_THREAD1_DELAY 0x4c52
+#define regRLC_THREAD1_DELAY_BASE_IDX 1
+#define regRLC_PG_ALWAYS_ON_CU_MASK 0x4c53
+#define regRLC_PG_ALWAYS_ON_CU_MASK_BASE_IDX 1
+#define regRLC_MAX_PG_CU 0x4c54
+#define regRLC_MAX_PG_CU_BASE_IDX 1
+#define regRLC_AUTO_PG_CTRL 0x4c55
+#define regRLC_AUTO_PG_CTRL_BASE_IDX 1
+#define regRLC_SMU_GRBM_REG_SAVE_CTRL 0x4c56
+#define regRLC_SMU_GRBM_REG_SAVE_CTRL_BASE_IDX 1
+#define regRLC_SERDES_RD_PENDING 0x4c58
+#define regRLC_SERDES_RD_PENDING_BASE_IDX 1
+#define regRLC_SERDES_RD_MASTER_INDEX 0x4c59
+#define regRLC_SERDES_RD_MASTER_INDEX_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_0 0x4c5a
+#define regRLC_SERDES_RD_DATA_0_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_1 0x4c5b
+#define regRLC_SERDES_RD_DATA_1_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_2 0x4c5c
+#define regRLC_SERDES_RD_DATA_2_BASE_IDX 1
+#define regRLC_SERDES_WR_CU_MASTER_MASK 0x4c5d
+#define regRLC_SERDES_WR_CU_MASTER_MASK_BASE_IDX 1
+#define regRLC_SERDES_WR_NONCU_MASTER_MASK 0x4c5e
+#define regRLC_SERDES_WR_NONCU_MASTER_MASK_BASE_IDX 1
+#define regRLC_SERDES_WR_CTRL 0x4c5f
+#define regRLC_SERDES_WR_CTRL_BASE_IDX 1
+#define regRLC_SERDES_WR_DATA 0x4c60
+#define regRLC_SERDES_WR_DATA_BASE_IDX 1
+#define regRLC_SERDES_CU_MASTER_BUSY 0x4c61
+#define regRLC_SERDES_CU_MASTER_BUSY_BASE_IDX 1
+#define regRLC_SERDES_NONCU_MASTER_BUSY 0x4c62
+#define regRLC_SERDES_NONCU_MASTER_BUSY_BASE_IDX 1
+#define regRLC_GPM_GENERAL_0 0x4c63
+#define regRLC_GPM_GENERAL_0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_1 0x4c64
+#define regRLC_GPM_GENERAL_1_BASE_IDX 1
+#define regRLC_GPM_GENERAL_2 0x4c65
+#define regRLC_GPM_GENERAL_2_BASE_IDX 1
+#define regRLC_GPM_GENERAL_3 0x4c66
+#define regRLC_GPM_GENERAL_3_BASE_IDX 1
+#define regRLC_GPM_GENERAL_4 0x4c67
+#define regRLC_GPM_GENERAL_4_BASE_IDX 1
+#define regRLC_GPM_GENERAL_5 0x4c68
+#define regRLC_GPM_GENERAL_5_BASE_IDX 1
+#define regRLC_GPM_GENERAL_6 0x4c69
+#define regRLC_GPM_GENERAL_6_BASE_IDX 1
+#define regRLC_GPM_GENERAL_7 0x4c6a
+#define regRLC_GPM_GENERAL_7_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_ADDR 0x4c6c
+#define regRLC_GPM_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_DATA 0x4c6d
+#define regRLC_GPM_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_STATIC_PG_STATUS 0x4c6e
+#define regRLC_STATIC_PG_STATUS_BASE_IDX 1
+#define regRLC_SPM_MC_CNTL 0x4c71
+#define regRLC_SPM_MC_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_CNTL 0x4c72
+#define regRLC_SPM_INT_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_STATUS 0x4c73
+#define regRLC_SPM_INT_STATUS_BASE_IDX 1
+#define regRLC_SMU_MESSAGE 0x4c76
+#define regRLC_SMU_MESSAGE_BASE_IDX 1
+#define regRLC_GPM_LOG_SIZE 0x4c77
+#define regRLC_GPM_LOG_SIZE_BASE_IDX 1
+#define regRLC_PG_DELAY_3 0x4c78
+#define regRLC_PG_DELAY_3_BASE_IDX 1
+#define regRLC_GPR_REG1 0x4c79
+#define regRLC_GPR_REG1_BASE_IDX 1
+#define regRLC_GPR_REG2 0x4c7a
+#define regRLC_GPR_REG2_BASE_IDX 1
+#define regRLC_GPM_LOG_CONT 0x4c7b
+#define regRLC_GPM_LOG_CONT_BASE_IDX 1
+#define regRLC_GPM_INT_DISABLE_TH0 0x4c7c
+#define regRLC_GPM_INT_DISABLE_TH0_BASE_IDX 1
+#define regRLC_GPM_INT_FORCE_TH0 0x4c7e
+#define regRLC_GPM_INT_FORCE_TH0_BASE_IDX 1
+#define regRLC_GPM_INT_FORCE_TH1 0x4c7f
+#define regRLC_GPM_INT_FORCE_TH1_BASE_IDX 1
+#define regRLC_SRM_CNTL 0x4c80
+#define regRLC_SRM_CNTL_BASE_IDX 1
+#define regRLC_SRM_ARAM_ADDR 0x4c83
+#define regRLC_SRM_ARAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_ARAM_DATA 0x4c84
+#define regRLC_SRM_ARAM_DATA_BASE_IDX 1
+#define regRLC_SRM_DRAM_ADDR 0x4c85
+#define regRLC_SRM_DRAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_DRAM_DATA 0x4c86
+#define regRLC_SRM_DRAM_DATA_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND 0x4c87
+#define regRLC_SRM_GPM_COMMAND_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND_STATUS 0x4c88
+#define regRLC_SRM_GPM_COMMAND_STATUS_BASE_IDX 1
+#define regRLC_SRM_RLCV_COMMAND 0x4c89
+#define regRLC_SRM_RLCV_COMMAND_BASE_IDX 1
+#define regRLC_SRM_RLCV_COMMAND_STATUS 0x4c8a
+#define regRLC_SRM_RLCV_COMMAND_STATUS_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_0 0x4c8b
+#define regRLC_SRM_INDEX_CNTL_ADDR_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_1 0x4c8c
+#define regRLC_SRM_INDEX_CNTL_ADDR_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_2 0x4c8d
+#define regRLC_SRM_INDEX_CNTL_ADDR_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_3 0x4c8e
+#define regRLC_SRM_INDEX_CNTL_ADDR_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_4 0x4c8f
+#define regRLC_SRM_INDEX_CNTL_ADDR_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_5 0x4c90
+#define regRLC_SRM_INDEX_CNTL_ADDR_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_6 0x4c91
+#define regRLC_SRM_INDEX_CNTL_ADDR_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_7 0x4c92
+#define regRLC_SRM_INDEX_CNTL_ADDR_7_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_0 0x4c93
+#define regRLC_SRM_INDEX_CNTL_DATA_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_1 0x4c94
+#define regRLC_SRM_INDEX_CNTL_DATA_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_2 0x4c95
+#define regRLC_SRM_INDEX_CNTL_DATA_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_3 0x4c96
+#define regRLC_SRM_INDEX_CNTL_DATA_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_4 0x4c97
+#define regRLC_SRM_INDEX_CNTL_DATA_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_5 0x4c98
+#define regRLC_SRM_INDEX_CNTL_DATA_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_6 0x4c99
+#define regRLC_SRM_INDEX_CNTL_DATA_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_7 0x4c9a
+#define regRLC_SRM_INDEX_CNTL_DATA_7_BASE_IDX 1
+#define regRLC_SRM_STAT 0x4c9b
+#define regRLC_SRM_STAT_BASE_IDX 1
+#define regRLC_SRM_GPM_ABORT 0x4c9c
+#define regRLC_SRM_GPM_ABORT_BASE_IDX 1
+#define regRLC_CSIB_ADDR_LO 0x4ca2
+#define regRLC_CSIB_ADDR_LO_BASE_IDX 1
+#define regRLC_CSIB_ADDR_HI 0x4ca3
+#define regRLC_CSIB_ADDR_HI_BASE_IDX 1
+#define regRLC_CSIB_LENGTH 0x4ca4
+#define regRLC_CSIB_LENGTH_BASE_IDX 1
+#define regRLC_SMU_COMMAND 0x4ca9
+#define regRLC_SMU_COMMAND_BASE_IDX 1
+#define regRLC_CP_SCHEDULERS 0x4caa
+#define regRLC_CP_SCHEDULERS_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_1 0x4cab
+#define regRLC_SMU_ARGUMENT_1_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_2 0x4cac
+#define regRLC_SMU_ARGUMENT_2_BASE_IDX 1
+#define regRLC_GPM_GENERAL_8 0x4cad
+#define regRLC_GPM_GENERAL_8_BASE_IDX 1
+#define regRLC_GPM_GENERAL_9 0x4cae
+#define regRLC_GPM_GENERAL_9_BASE_IDX 1
+#define regRLC_GPM_GENERAL_10 0x4caf
+#define regRLC_GPM_GENERAL_10_BASE_IDX 1
+#define regRLC_GPM_GENERAL_11 0x4cb0
+#define regRLC_GPM_GENERAL_11_BASE_IDX 1
+#define regRLC_GPM_GENERAL_12 0x4cb1
+#define regRLC_GPM_GENERAL_12_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_0 0x4cb2
+#define regRLC_GPM_UTCL1_CNTL_0_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_1 0x4cb3
+#define regRLC_GPM_UTCL1_CNTL_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_2 0x4cb4
+#define regRLC_GPM_UTCL1_CNTL_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_CNTL 0x4cb5
+#define regRLC_SPM_UTCL1_CNTL_BASE_IDX 1
+#define regRLC_UTCL1_STATUS_2 0x4cb6
+#define regRLC_UTCL1_STATUS_2_BASE_IDX 1
+#define regRLC_LB_THR_CONFIG_2 0x4cb8
+#define regRLC_LB_THR_CONFIG_2_BASE_IDX 1
+#define regRLC_LB_THR_CONFIG_3 0x4cb9
+#define regRLC_LB_THR_CONFIG_3_BASE_IDX 1
+#define regRLC_LB_THR_CONFIG_4 0x4cba
+#define regRLC_LB_THR_CONFIG_4_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_1 0x4cbc
+#define regRLC_SPM_UTCL1_ERROR_1_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_2 0x4cbd
+#define regRLC_SPM_UTCL1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_1 0x4cbe
+#define regRLC_GPM_UTCL1_TH0_ERROR_1_BASE_IDX 1
+#define regRLC_LB_THR_CONFIG_1 0x4cbf
+#define regRLC_LB_THR_CONFIG_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_2 0x4cc0
+#define regRLC_GPM_UTCL1_TH0_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1 0x4cc1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_2 0x4cc2
+#define regRLC_GPM_UTCL1_TH1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_1 0x4cc3
+#define regRLC_GPM_UTCL1_TH2_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_2 0x4cc4
+#define regRLC_GPM_UTCL1_TH2_ERROR_2_BASE_IDX 1
+#define regRLC_SEMAPHORE_0 0x4cc7
+#define regRLC_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_SEMAPHORE_1 0x4cc8
+#define regRLC_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_CP_EOF_INT 0x4cca
+#define regRLC_CP_EOF_INT_BASE_IDX 1
+#define regRLC_CP_EOF_INT_CNT 0x4ccb
+#define regRLC_CP_EOF_INT_CNT_BASE_IDX 1
+#define regRLC_SPARE_INT 0x4ccc
+#define regRLC_SPARE_INT_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_CNTL 0x4ccd
+#define regRLC_PREWALKER_UTCL1_CNTL_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_TRIG 0x4cce
+#define regRLC_PREWALKER_UTCL1_TRIG_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_ADDR_LSB 0x4ccf
+#define regRLC_PREWALKER_UTCL1_ADDR_LSB_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_ADDR_MSB 0x4cd0
+#define regRLC_PREWALKER_UTCL1_ADDR_MSB_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_SIZE_LSB 0x4cd1
+#define regRLC_PREWALKER_UTCL1_SIZE_LSB_BASE_IDX 1
+#define regRLC_PREWALKER_UTCL1_SIZE_MSB 0x4cd2
+#define regRLC_PREWALKER_UTCL1_SIZE_MSB_BASE_IDX 1
+#define regRLC_DSM_TRIG 0x4cd3
+#define regRLC_DSM_TRIG_BASE_IDX 1
+#define regRLC_UTCL1_STATUS 0x4cd4
+#define regRLC_UTCL1_STATUS_BASE_IDX 1
+#define regRLC_R2I_CNTL_0 0x4cd5
+#define regRLC_R2I_CNTL_0_BASE_IDX 1
+#define regRLC_R2I_CNTL_1 0x4cd6
+#define regRLC_R2I_CNTL_1_BASE_IDX 1
+#define regRLC_R2I_CNTL_2 0x4cd7
+#define regRLC_R2I_CNTL_2_BASE_IDX 1
+#define regRLC_R2I_CNTL_3 0x4cd8
+#define regRLC_R2I_CNTL_3_BASE_IDX 1
+#define regRLC_UTCL2_CNTL 0x4cd9
+#define regRLC_UTCL2_CNTL_BASE_IDX 1
+#define regRLC_LBPW_CU_STAT 0x4cda
+#define regRLC_LBPW_CU_STAT_BASE_IDX 1
+#define regRLC_DS_CNTL 0x4cdb
+#define regRLC_DS_CNTL_BASE_IDX 1
+#define regRLC_GPM_INT_STAT_TH0 0x4cdc
+#define regRLC_GPM_INT_STAT_TH0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_13 0x4cdd
+#define regRLC_GPM_GENERAL_13_BASE_IDX 1
+#define regRLC_GPM_GENERAL_14 0x4cde
+#define regRLC_GPM_GENERAL_14_BASE_IDX 1
+#define regRLC_GPM_GENERAL_15 0x4cdf
+#define regRLC_GPM_GENERAL_15_BASE_IDX 1
+#define regRLC_SPARE_INT_1 0x4ce0
+#define regRLC_SPARE_INT_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT_1 0x4ce1
+#define regRLC_RLCV_SPARE_INT_1_BASE_IDX 1
+#define regRLC_SEMAPHORE_2 0x4ce3
+#define regRLC_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_SEMAPHORE_3 0x4ce4
+#define regRLC_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_3 0x4ce5
+#define regRLC_SMU_ARGUMENT_3_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_4 0x4ce6
+#define regRLC_SMU_ARGUMENT_4_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_1 0x4ce8
+#define regRLC_GPU_CLOCK_COUNT_LSB_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_1 0x4ce9
+#define regRLC_GPU_CLOCK_COUNT_MSB_1_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1 0x4cea
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_2 0x4ceb
+#define regRLC_GPU_CLOCK_COUNT_LSB_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_2 0x4cec
+#define regRLC_GPU_CLOCK_COUNT_MSB_2_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2 0x4cef
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
+#define regRLC_CPG_STAT_INVAL 0x4d09
+#define regRLC_CPG_STAT_INVAL_BASE_IDX 1
+#define regRLC_DSM_CNTL 0x4d42
+#define regRLC_DSM_CNTL_BASE_IDX 1
+#define regRLC_DSM_CNTLA 0x4d43
+#define regRLC_DSM_CNTLA_BASE_IDX 1
+#define regRLC_DSM_CNTL2 0x4d44
+#define regRLC_DSM_CNTL2_BASE_IDX 1
+#define regRLC_DSM_CNTL2A 0x4d45
+#define regRLC_DSM_CNTL2A_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT 0x4f30
+#define regRLC_RLCV_SPARE_INT_BASE_IDX 1
+#define regRLC_SMU_CLK_REQ 0x4f97
+#define regRLC_SMU_CLK_REQ_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_pwrdec
+// base address: 0x3c000
+#define regCGTS_SM_CTRL_REG 0x5000
+#define regCGTS_SM_CTRL_REG_BASE_IDX 1
+#define regCGTS_RD_CTRL_REG 0x5001
+#define regCGTS_RD_CTRL_REG_BASE_IDX 1
+#define regCGTS_RD_REG 0x5002
+#define regCGTS_RD_REG_BASE_IDX 1
+#define regCGTS_TCC_DISABLE 0x5003
+#define regCGTS_TCC_DISABLE_BASE_IDX 1
+#define regCGTS_USER_TCC_DISABLE 0x5004
+#define regCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define regCGTS_CU0_SP0_CTRL_REG 0x5008
+#define regCGTS_CU0_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU0_LDS_SQ_CTRL_REG 0x5009
+#define regCGTS_CU0_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU0_TA_SQC_CTRL_REG 0x500a
+#define regCGTS_CU0_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU0_SP1_CTRL_REG 0x500b
+#define regCGTS_CU0_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU0_TD_TCP_CTRL_REG 0x500c
+#define regCGTS_CU0_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_SP0_CTRL_REG 0x500d
+#define regCGTS_CU1_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_LDS_SQ_CTRL_REG 0x500e
+#define regCGTS_CU1_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_TA_SQC_CTRL_REG 0x500f
+#define regCGTS_CU1_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_SP1_CTRL_REG 0x5010
+#define regCGTS_CU1_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_TD_TCP_CTRL_REG 0x5011
+#define regCGTS_CU1_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_SP0_CTRL_REG 0x5012
+#define regCGTS_CU2_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_LDS_SQ_CTRL_REG 0x5013
+#define regCGTS_CU2_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_TA_SQC_CTRL_REG 0x5014
+#define regCGTS_CU2_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_SP1_CTRL_REG 0x5015
+#define regCGTS_CU2_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_TD_TCP_CTRL_REG 0x5016
+#define regCGTS_CU2_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_SP0_CTRL_REG 0x5017
+#define regCGTS_CU3_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_LDS_SQ_CTRL_REG 0x5018
+#define regCGTS_CU3_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_TA_SQC_CTRL_REG 0x5019
+#define regCGTS_CU3_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_SP1_CTRL_REG 0x501a
+#define regCGTS_CU3_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_TD_TCP_CTRL_REG 0x501b
+#define regCGTS_CU3_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_SP0_CTRL_REG 0x501c
+#define regCGTS_CU4_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_LDS_SQ_CTRL_REG 0x501d
+#define regCGTS_CU4_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_TA_SQC_CTRL_REG 0x501e
+#define regCGTS_CU4_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_SP1_CTRL_REG 0x501f
+#define regCGTS_CU4_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_TD_TCP_CTRL_REG 0x5020
+#define regCGTS_CU4_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_SP0_CTRL_REG 0x5021
+#define regCGTS_CU5_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_LDS_SQ_CTRL_REG 0x5022
+#define regCGTS_CU5_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_TA_SQC_CTRL_REG 0x5023
+#define regCGTS_CU5_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_SP1_CTRL_REG 0x5024
+#define regCGTS_CU5_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_TD_TCP_CTRL_REG 0x5025
+#define regCGTS_CU5_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_SP0_CTRL_REG 0x5026
+#define regCGTS_CU6_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_LDS_SQ_CTRL_REG 0x5027
+#define regCGTS_CU6_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_TA_SQC_CTRL_REG 0x5028
+#define regCGTS_CU6_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_SP1_CTRL_REG 0x5029
+#define regCGTS_CU6_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_TD_TCP_CTRL_REG 0x502a
+#define regCGTS_CU6_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_SP0_CTRL_REG 0x502b
+#define regCGTS_CU7_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_LDS_SQ_CTRL_REG 0x502c
+#define regCGTS_CU7_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_TA_SQC_CTRL_REG 0x502d
+#define regCGTS_CU7_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_SP1_CTRL_REG 0x502e
+#define regCGTS_CU7_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_TD_TCP_CTRL_REG 0x502f
+#define regCGTS_CU7_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_SP0_CTRL_REG 0x5030
+#define regCGTS_CU8_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_LDS_SQ_CTRL_REG 0x5031
+#define regCGTS_CU8_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_TA_SQC_CTRL_REG 0x5032
+#define regCGTS_CU8_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_SP1_CTRL_REG 0x5033
+#define regCGTS_CU8_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_TD_TCP_CTRL_REG 0x5034
+#define regCGTS_CU8_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_SP0_CTRL_REG 0x5035
+#define regCGTS_CU9_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_LDS_SQ_CTRL_REG 0x5036
+#define regCGTS_CU9_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_TA_SQC_CTRL_REG 0x5037
+#define regCGTS_CU9_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_SP1_CTRL_REG 0x5038
+#define regCGTS_CU9_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_TD_TCP_CTRL_REG 0x5039
+#define regCGTS_CU9_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_SP0_CTRL_REG 0x503a
+#define regCGTS_CU10_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_LDS_SQ_CTRL_REG 0x503b
+#define regCGTS_CU10_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_TA_SQC_CTRL_REG 0x503c
+#define regCGTS_CU10_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_SP1_CTRL_REG 0x503d
+#define regCGTS_CU10_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_TD_TCP_CTRL_REG 0x503e
+#define regCGTS_CU10_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_SP0_CTRL_REG 0x503f
+#define regCGTS_CU11_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_LDS_SQ_CTRL_REG 0x5040
+#define regCGTS_CU11_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_TA_SQC_CTRL_REG 0x5041
+#define regCGTS_CU11_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_SP1_CTRL_REG 0x5042
+#define regCGTS_CU11_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_TD_TCP_CTRL_REG 0x5043
+#define regCGTS_CU11_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_SP0_CTRL_REG 0x5044
+#define regCGTS_CU12_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_LDS_SQ_CTRL_REG 0x5045
+#define regCGTS_CU12_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_TA_SQC_CTRL_REG 0x5046
+#define regCGTS_CU12_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_SP1_CTRL_REG 0x5047
+#define regCGTS_CU12_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_TD_TCP_CTRL_REG 0x5048
+#define regCGTS_CU12_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_SP0_CTRL_REG 0x5049
+#define regCGTS_CU13_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_LDS_SQ_CTRL_REG 0x504a
+#define regCGTS_CU13_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_TA_SQC_CTRL_REG 0x504b
+#define regCGTS_CU13_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_SP1_CTRL_REG 0x504c
+#define regCGTS_CU13_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_TD_TCP_CTRL_REG 0x504d
+#define regCGTS_CU13_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_SP0_CTRL_REG 0x504e
+#define regCGTS_CU14_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_LDS_SQ_CTRL_REG 0x504f
+#define regCGTS_CU14_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_TA_SQC_CTRL_REG 0x5050
+#define regCGTS_CU14_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_SP1_CTRL_REG 0x5051
+#define regCGTS_CU14_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_TD_TCP_CTRL_REG 0x5052
+#define regCGTS_CU14_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_SP0_CTRL_REG 0x5053
+#define regCGTS_CU15_SP0_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_LDS_SQ_CTRL_REG 0x5054
+#define regCGTS_CU15_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_TA_SQC_CTRL_REG 0x5055
+#define regCGTS_CU15_TA_SQC_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_SP1_CTRL_REG 0x5056
+#define regCGTS_CU15_SP1_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_TD_TCP_CTRL_REG 0x5057
+#define regCGTS_CU15_TD_TCP_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU0_TCPI_CTRL_REG 0x5058
+#define regCGTS_CU0_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU1_TCPI_CTRL_REG 0x5059
+#define regCGTS_CU1_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU2_TCPI_CTRL_REG 0x505a
+#define regCGTS_CU2_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU3_TCPI_CTRL_REG 0x505b
+#define regCGTS_CU3_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU4_TCPI_CTRL_REG 0x505c
+#define regCGTS_CU4_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU5_TCPI_CTRL_REG 0x505d
+#define regCGTS_CU5_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU6_TCPI_CTRL_REG 0x505e
+#define regCGTS_CU6_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU7_TCPI_CTRL_REG 0x505f
+#define regCGTS_CU7_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU8_TCPI_CTRL_REG 0x5060
+#define regCGTS_CU8_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU9_TCPI_CTRL_REG 0x5061
+#define regCGTS_CU9_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU10_TCPI_CTRL_REG 0x5062
+#define regCGTS_CU10_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU11_TCPI_CTRL_REG 0x5063
+#define regCGTS_CU11_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU12_TCPI_CTRL_REG 0x5064
+#define regCGTS_CU12_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU13_TCPI_CTRL_REG 0x5065
+#define regCGTS_CU13_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU14_TCPI_CTRL_REG 0x5066
+#define regCGTS_CU14_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTS_CU15_TCPI_CTRL_REG 0x5067
+#define regCGTS_CU15_TCPI_CTRL_REG_BASE_IDX 1
+#define regCGTT_SPI_PS_CLK_CTRL 0x507d
+#define regCGTT_SPI_PS_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SPIS_CLK_CTRL 0x507e
+#define regCGTT_SPIS_CLK_CTRL_BASE_IDX 1
+#define regCGTX_SPI_DEBUG_CLK_CTRL 0x507f
+#define regCGTX_SPI_DEBUG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SPI_CLK_CTRL 0x5080
+#define regCGTT_SPI_CLK_CTRL_BASE_IDX 1
+#define regCGTT_PC_CLK_CTRL 0x5081
+#define regCGTT_PC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_BCI_CLK_CTRL 0x5082
+#define regCGTT_BCI_CLK_CTRL_BASE_IDX 1
+#define regCGTT_VGT_CLK_CTRL 0x5084
+#define regCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define regCGTT_IA_CLK_CTRL 0x5085
+#define regCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_WD_CLK_CTRL 0x5086
+#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define regCGTT_PA_CLK_CTRL 0x5088
+#define regCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL0 0x5089
+#define regCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL1 0x508a
+#define regCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL2 0x508b
+#define regCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_SQ_CLK_CTRL 0x508c
+#define regCGTT_SQ_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SQG_CLK_CTRL 0x508d
+#define regCGTT_SQG_CLK_CTRL_BASE_IDX 1
+#define regSQ_ALU_CLK_CTRL 0x508e
+#define regSQ_ALU_CLK_CTRL_BASE_IDX 1
+#define regSQ_TEX_CLK_CTRL 0x508f
+#define regSQ_TEX_CLK_CTRL_BASE_IDX 1
+#define regSQ_LDS_CLK_CTRL 0x5090
+#define regSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define regSQ_POWER_THROTTLE 0x5091
+#define regSQ_POWER_THROTTLE_BASE_IDX 1
+#define regSQ_POWER_THROTTLE2 0x5092
+#define regSQ_POWER_THROTTLE2_BASE_IDX 1
+#define regTD_CGTT_CTRL 0x509c
+#define regTD_CGTT_CTRL_BASE_IDX 1
+#define regTA_CGTT_CTRL 0x509d
+#define regTA_CGTT_CTRL_BASE_IDX 1
+#define regCGTT_TCPI_CLK_CTRL 0x509e
+#define regCGTT_TCPI_CLK_CTRL_BASE_IDX 1
+#define regTCX_CGTT_SCLK_CTRL 0x50a3
+#define regTCX_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regDB_CGTT_CLK_CTRL_0 0x50a4
+#define regDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define regCB_CGTT_SCLK_CTRL 0x50a8
+#define regCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regTCC_CGTT_SCLK_CTRL 0x50ac
+#define regTCC_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regTCC_CGTT_SCLK_CTRL2 0x50ad
+#define regTCC_CGTT_SCLK_CTRL2_BASE_IDX 1
+#define regTCC_CGTT_SCLK_CTRL3 0x50ae
+#define regTCC_CGTT_SCLK_CTRL3_BASE_IDX 1
+#define regTCA_CGTT_SCLK_CTRL 0x50af
+#define regTCA_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regCGTT_CP_CLK_CTRL 0x50b0
+#define regCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPC_CLK_CTRL 0x50b2
+#define regCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_RLC_CLK_CTRL 0x50b5
+#define regCGTT_RLC_CLK_CTRL_BASE_IDX 1
+#define regRLC_GFX_RM_CNTL 0x50b6
+#define regRLC_GFX_RM_CNTL_BASE_IDX 1
+#define regRMI_CGTT_SCLK_CTRL 0x50c0
+#define regRMI_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regCGTT_TCPF_CLK_CTRL 0x50c1
+#define regCGTT_TCPF_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_hypdec
+// base address: 0x3e000
+#define regCP_HYP_PFP_UCODE_ADDR 0x5814
+#define regCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_PFP_UCODE_ADDR 0x5814
+#define regCP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_DATA 0x5815
+#define regCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_PFP_UCODE_DATA 0x5815
+#define regCP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_ADDR 0x5816
+#define regCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define regCP_ME_RAM_RADDR 0x5816
+#define regCP_ME_RAM_RADDR_BASE_IDX 1
+#define regCP_ME_RAM_WADDR 0x5816
+#define regCP_ME_RAM_WADDR_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_DATA 0x5817
+#define regCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+#define regCP_ME_RAM_DATA 0x5817
+#define regCP_ME_RAM_DATA_BASE_IDX 1
+#define regCP_CE_UCODE_ADDR 0x5818
+#define regCP_CE_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_CE_UCODE_ADDR 0x5818
+#define regCP_HYP_CE_UCODE_ADDR_BASE_IDX 1
+#define regCP_CE_UCODE_DATA 0x5819
+#define regCP_CE_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_CE_UCODE_DATA 0x5819
+#define regCP_HYP_CE_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_ADDR 0x581a
+#define regCP_HYP_MEC1_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_ADDR 0x581a
+#define regCP_MEC_ME1_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_DATA 0x581b
+#define regCP_HYP_MEC1_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_DATA 0x581b
+#define regCP_MEC_ME1_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_ADDR 0x581c
+#define regCP_HYP_MEC2_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_ADDR 0x581c
+#define regCP_MEC_ME2_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_DATA 0x581d
+#define regCP_HYP_MEC2_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_DATA 0x581d
+#define regCP_MEC_ME2_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_CHKSUM 0x581e
+#define regCP_HYP_PFP_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_CE_UCODE_CHKSUM 0x581f
+#define regCP_HYP_CE_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_CHKSUM 0x5820
+#define regCP_HYP_ME_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM 0x5821
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM 0x5822
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_XCP_CTL 0x5828
+#define regCP_HYP_XCP_CTL_BASE_IDX 1
+#define regRLC_GPM_UCODE_ADDR 0x583c
+#define regRLC_GPM_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_DATA 0x583d
+#define regRLC_GPM_UCODE_DATA_BASE_IDX 1
+#define regGRBM_GFX_INDEX_SR_SELECT 0x5a00
+#define regGRBM_GFX_INDEX_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_INDEX_SR_DATA 0x5a01
+#define regGRBM_GFX_INDEX_SR_DATA_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_SELECT 0x5a02
+#define regGRBM_GFX_CNTL_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_DATA 0x5a03
+#define regGRBM_GFX_CNTL_SR_DATA_BASE_IDX 1
+#define regGRBM_MCM_ADDR 0x5a07
+#define regGRBM_MCM_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_ENABLE 0x5b00
+#define regRLC_GPU_IOV_VF_ENABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG6 0x5b06
+#define regRLC_GPU_IOV_CFG_REG6_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG8 0x5b20
+#define regRLC_GPU_IOV_CFG_REG8_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_0 0x5b25
+#define regRLC_RLCV_TIMER_INT_0_BASE_IDX 1
+#define regRLC_RLCV_TIMER_CTRL 0x5b26
+#define regRLC_RLCV_TIMER_CTRL_BASE_IDX 1
+#define regRLC_RLCV_TIMER_STAT 0x5b27
+#define regRLC_RLCV_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS 0x5b2a
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET 0x5b2b
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR 0x5b2c
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_MASK 0x5b2d
+#define regRLC_GPU_IOV_VF_MASK_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_0 0x5b2e
+#define regRLC_HYP_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_1 0x5b2f
+#define regRLC_HYP_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_CLK_CNTL 0x5b31
+#define regRLC_CLK_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_BLOCK 0x5b34
+#define regRLC_GPU_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG1 0x5b35
+#define regRLC_GPU_IOV_CFG_REG1_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG2 0x5b36
+#define regRLC_GPU_IOV_CFG_REG2_BASE_IDX 1
+#define regRLC_GPU_IOV_VM_BUSY_STATUS 0x5b37
+#define regRLC_GPU_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_0 0x5b38
+#define regRLC_GPU_IOV_SCH_0_BASE_IDX 1
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID 0x5b39
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_3 0x5b3a
+#define regRLC_GPU_IOV_SCH_3_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_1 0x5b3b
+#define regRLC_GPU_IOV_SCH_1_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_2 0x5b3c
+#define regRLC_GPU_IOV_SCH_2_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_STAT 0x5b3f
+#define regRLC_GPU_IOV_INT_STAT_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_1 0x5b40
+#define regRLC_RLCV_TIMER_INT_1_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_ADDR 0x5b42
+#define regRLC_GPU_IOV_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_DATA 0x5b43
+#define regRLC_GPU_IOV_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_ADDR 0x5b44
+#define regRLC_GPU_IOV_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_DATA 0x5b45
+#define regRLC_GPU_IOV_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_CNTL 0x5b46
+#define regRLC_GPU_IOV_F32_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_RESET 0x5b47
+#define regRLC_GPU_IOV_F32_RESET_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_STATUS 0x5b48
+#define regRLC_GPU_IOV_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_STATUS 0x5b49
+#define regRLC_GPU_IOV_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SMU_RESPONSE 0x5b4a
+#define regRLC_GPU_IOV_SMU_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_VIRT_RESET_REQ 0x5b4c
+#define regRLC_GPU_IOV_VIRT_RESET_REQ_BASE_IDX 1
+#define regRLC_GPU_IOV_RLC_RESPONSE 0x5b4d
+#define regRLC_GPU_IOV_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_DISABLE 0x5b4e
+#define regRLC_GPU_IOV_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_FORCE 0x5b4f
+#define regRLC_GPU_IOV_INT_FORCE_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS 0x5b50
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS 0x5b51
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_2 0x5b52
+#define regRLC_HYP_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_3 0x5b53
+#define regRLC_HYP_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_STATUS 0x5b54
+#define regRLC_GPU_IOV_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_STATUS 0x5b55
+#define regRLC_GPU_IOV_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_STATUS 0x5b56
+#define regRLC_GPU_IOV_SDMA4_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_STATUS 0x5b57
+#define regRLC_GPU_IOV_SDMA5_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_STATUS 0x5b58
+#define regRLC_GPU_IOV_SDMA6_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_STATUS 0x5b59
+#define regRLC_GPU_IOV_SDMA7_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS 0x5b5a
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS 0x5b5b
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS 0x5b5c
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS 0x5b5d
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS 0x5b5e
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS 0x5b5f
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedhvdec
+// base address: 0x3ea00
+#define regMC_VM_FB_SIZE_OFFSET_VF0 0x5a80
+#define regMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF1 0x5a81
+#define regMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF2 0x5a82
+#define regMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF3 0x5a83
+#define regMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF4 0x5a84
+#define regMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF5 0x5a85
+#define regMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF6 0x5a86
+#define regMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF7 0x5a87
+#define regMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF8 0x5a88
+#define regMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF9 0x5a89
+#define regMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF10 0x5a8a
+#define regMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF11 0x5a8b
+#define regMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF12 0x5a8c
+#define regMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF13 0x5a8d
+#define regMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF14 0x5a8e
+#define regMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define regMC_VM_FB_SIZE_OFFSET_VF15 0x5a8f
+#define regMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+#define regVM_IOMMU_MMIO_CNTRL_1 0x5a90
+#define regVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define regMC_VM_MARC_BASE_LO_0 0x5a91
+#define regMC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define regMC_VM_MARC_BASE_LO_1 0x5a92
+#define regMC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define regMC_VM_MARC_BASE_LO_2 0x5a93
+#define regMC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define regMC_VM_MARC_BASE_LO_3 0x5a94
+#define regMC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define regMC_VM_MARC_BASE_HI_0 0x5a95
+#define regMC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define regMC_VM_MARC_BASE_HI_1 0x5a96
+#define regMC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define regMC_VM_MARC_BASE_HI_2 0x5a97
+#define regMC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define regMC_VM_MARC_BASE_HI_3 0x5a98
+#define regMC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_LO_0 0x5a99
+#define regMC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_LO_1 0x5a9a
+#define regMC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_LO_2 0x5a9b
+#define regMC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_LO_3 0x5a9c
+#define regMC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_HI_0 0x5a9d
+#define regMC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_HI_1 0x5a9e
+#define regMC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_HI_2 0x5a9f
+#define regMC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define regMC_VM_MARC_RELOC_HI_3 0x5aa0
+#define regMC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define regMC_VM_MARC_LEN_LO_0 0x5aa1
+#define regMC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define regMC_VM_MARC_LEN_LO_1 0x5aa2
+#define regMC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define regMC_VM_MARC_LEN_LO_2 0x5aa3
+#define regMC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define regMC_VM_MARC_LEN_LO_3 0x5aa4
+#define regMC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define regMC_VM_MARC_LEN_HI_0 0x5aa5
+#define regMC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define regMC_VM_MARC_LEN_HI_1 0x5aa6
+#define regMC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define regMC_VM_MARC_LEN_HI_2 0x5aa7
+#define regMC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define regMC_VM_MARC_LEN_HI_3 0x5aa8
+#define regMC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define regVM_IOMMU_CONTROL_REGISTER 0x5aa9
+#define regVM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define regVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x5aaa
+#define regVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL 0x5aab
+#define regVM_PCIE_ATS_CNTL_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_0 0x5aac
+#define regVM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_1 0x5aad
+#define regVM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_2 0x5aae
+#define regVM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_3 0x5aaf
+#define regVM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_4 0x5ab0
+#define regVM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_5 0x5ab1
+#define regVM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_6 0x5ab2
+#define regVM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_7 0x5ab3
+#define regVM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_8 0x5ab4
+#define regVM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_9 0x5ab5
+#define regVM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_10 0x5ab6
+#define regVM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_11 0x5ab7
+#define regVM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_12 0x5ab8
+#define regVM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_13 0x5ab9
+#define regVM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_14 0x5aba
+#define regVM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1
+#define regVM_PCIE_ATS_CNTL_VF_15 0x5abb
+#define regVM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1
+#define regMC_SHARED_ACTIVE_FCN_ID 0x5abc
+#define regMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1
+#define regMC_VM_XGMI_GPUIOV_ENABLE 0x5abd
+#define regMC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1
+
+
+// addressBlock: xcd0_gc_pspdec
+// base address: 0x3f000
+#define regCPG_PSP_DEBUG 0x5c30
+#define regCPG_PSP_DEBUG_BASE_IDX 1
+#define regCPC_PSP_DEBUG 0x5c31
+#define regCPC_PSP_DEBUG_BASE_IDX 1
+#define regCP_PSP_XCP_CTL 0x5c34
+#define regCP_PSP_XCP_CTL_BASE_IDX 1
+#define regGRBM_SEC_CNTL 0x5e0b
+#define regGRBM_SEC_CNTL_BASE_IDX 1
+#define regGRBM_IOV_ERROR_FIFO_DATA 0x5e12
+#define regGRBM_IOV_ERROR_FIFO_DATA_BASE_IDX 1
+#define regGRBM_DSM_BYPASS 0x5e13
+#define regGRBM_DSM_BYPASS_BASE_IDX 1
+#define regGRBM_CAM_INDEX 0x5e16
+#define regGRBM_CAM_INDEX_BASE_IDX 1
+#define regGRBM_HYP_CAM_INDEX 0x5e16
+#define regGRBM_HYP_CAM_INDEX_BASE_IDX 1
+#define regGRBM_CAM_DATA 0x5e17
+#define regGRBM_CAM_DATA_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA 0x5e17
+#define regGRBM_HYP_CAM_DATA_BASE_IDX 1
+#define regRLC_FWL_FIRST_VIOL_ADDR 0x5f37
+#define regRLC_FWL_FIRST_VIOL_ADDR_BASE_IDX 1
+
+
+// addressBlock: sqind
+// base address: 0x0
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_WAVE_VALID_AND_IDLE 0x000a
+#define ixSQ_WAVE_MODE 0x0011
+#define ixSQ_WAVE_STATUS 0x0012
+#define ixSQ_WAVE_TRAPSTS 0x0013
+#define ixSQ_WAVE_HW_ID 0x0014
+#define ixSQ_WAVE_GPR_ALLOC 0x0015
+#define ixSQ_WAVE_LDS_ALLOC 0x0016
+#define ixSQ_WAVE_IB_STS 0x0017
+#define ixSQ_WAVE_PC_LO 0x0018
+#define ixSQ_WAVE_PC_HI 0x0019
+#define ixSQ_WAVE_INST_DW0 0x001a
+#define ixSQ_WAVE_INST_DW1 0x001b
+#define ixSQ_WAVE_IB_DBG0 0x001c
+#define ixSQ_WAVE_IB_DBG1 0x001d
+#define ixSQ_WAVE_FLUSH_IB 0x001e
+#define ixSQ_WAVE_TTMP0 0x026c
+#define ixSQ_WAVE_TTMP1 0x026d
+#define ixSQ_WAVE_TTMP2 0x026e
+#define ixSQ_WAVE_TTMP3 0x026f
+#define ixSQ_WAVE_TTMP4 0x0270
+#define ixSQ_WAVE_TTMP5 0x0271
+#define ixSQ_WAVE_TTMP6 0x0272
+#define ixSQ_WAVE_TTMP7 0x0273
+#define ixSQ_WAVE_TTMP8 0x0274
+#define ixSQ_WAVE_TTMP9 0x0275
+#define ixSQ_WAVE_TTMP10 0x0276
+#define ixSQ_WAVE_TTMP11 0x0277
+#define ixSQ_WAVE_TTMP12 0x0278
+#define ixSQ_WAVE_TTMP13 0x0279
+#define ixSQ_WAVE_TTMP14 0x027a
+#define ixSQ_WAVE_TTMP15 0x027b
+#define ixSQ_WAVE_M0 0x027c
+#define ixSQ_WAVE_EXEC_LO 0x027e
+#define ixSQ_WAVE_EXEC_HI 0x027f
+#define ixSQ_INTERRUPT_WORD_AUTO_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_AUTO_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_AUTO_LO 0x20c0
+#define ixSQ_INTERRUPT_WORD_CMN_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_CMN_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_LO 0x20c0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
new file mode 100644
index 000000000000..84a75b58347f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
@@ -0,0 +1,30535 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_9_4_3_SH_MASK_HEADER
+#define _gc_9_4_3_SH_MASK_HEADER
+
+
+// addressBlock: xcd0_gc_grbmdec
+//GRBM_CNTL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
+#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
+#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
+//GRBM_SKEW_CNTL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
+//GRBM_STATUS2
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0xa
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0xb
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0xc
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0xd
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
+#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
+#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
+#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
+#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
+#define GRBM_STATUS2__CPF_RQ_PENDING__SHIFT 0x13
+#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
+#define GRBM_STATUS2__CANE_BUSY__SHIFT 0x15
+#define GRBM_STATUS2__CANE_LINK_BUSY__SHIFT 0x16
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x18
+#define GRBM_STATUS2__TC_BUSY__SHIFT 0x19
+#define GRBM_STATUS2__TCC_CC_RESIDENT__SHIFT 0x1a
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
+#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
+#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
+#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
+#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
+#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
+#define GRBM_STATUS2__CPF_RQ_PENDING_MASK 0x00080000L
+#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
+#define GRBM_STATUS2__CANE_BUSY_MASK 0x00200000L
+#define GRBM_STATUS2__CANE_LINK_BUSY_MASK 0x00400000L
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS2__TC_BUSY_MASK 0x02000000L
+#define GRBM_STATUS2__TCC_CC_RESIDENT_MASK 0x04000000L
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
+//GRBM_PWR_CNTL
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
+#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
+#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
+#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
+#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
+//GRBM_STATUS
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
+#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
+#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x10
+#define GRBM_STATUS__VGT_BUSY__SHIFT 0x11
+#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x12
+#define GRBM_STATUS__IA_BUSY__SHIFT 0x13
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
+#define GRBM_STATUS__WD_BUSY__SHIFT 0x15
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
+#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+//GRBM_STATUS_SE0
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE1
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+//GRBM_SOFT_RESET
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
+#define GRBM_SOFT_RESET__SOFT_RESET_CANE__SHIFT 0x15
+#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2__SHIFT 0x17
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CANE_MASK 0x00200000L
+#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2_MASK 0x00800000L
+//GRBM_GFX_CLKEN_CNTL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
+//GRBM_WAIT_IDLE_CLOCKS
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
+//GRBM_STATUS_SE2
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE3
+#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE3__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE3__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
+//GRBM_READ_ERROR
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003FFFCL
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+//GRBM_READ_ERROR2
+#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
+#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+//GRBM_INT_CNTL
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+//GRBM_TRAP_OP
+#define GRBM_TRAP_OP__RW__SHIFT 0x0
+#define GRBM_TRAP_OP__RW_MASK 0x00000001L
+//GRBM_TRAP_ADDR
+#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_ADDR_MSK
+#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_WD
+#define GRBM_TRAP_WD__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
+//GRBM_TRAP_WD_MSK
+#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
+//GRBM_WRITE_ERROR
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
+#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
+#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x5
+#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
+#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
+#define GRBM_WRITE_ERROR__TMZ__SHIFT 0x11
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL__SHIFT 0x12
+#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
+#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
+#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
+#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000001CL
+#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x000001E0L
+#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
+#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
+#define GRBM_WRITE_ERROR__TMZ_MASK 0x00020000L
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL_MASK 0x00040000L
+#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
+#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
+#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
+//GRBM_IOV_ERROR
+#define GRBM_IOV_ERROR__IOV_ADDR__SHIFT 0x2
+#define GRBM_IOV_ERROR__IOV_VFID__SHIFT 0x14
+#define GRBM_IOV_ERROR__IOV_VF__SHIFT 0x1a
+#define GRBM_IOV_ERROR__IOV_OP__SHIFT 0x1b
+#define GRBM_IOV_ERROR__IOV_ERROR__SHIFT 0x1f
+#define GRBM_IOV_ERROR__IOV_ADDR_MASK 0x000FFFFCL
+#define GRBM_IOV_ERROR__IOV_VFID_MASK 0x03F00000L
+#define GRBM_IOV_ERROR__IOV_VF_MASK 0x04000000L
+#define GRBM_IOV_ERROR__IOV_OP_MASK 0x08000000L
+#define GRBM_IOV_ERROR__IOV_ERROR_MASK 0x80000000L
+//GRBM_CHIP_REVISION
+#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
+#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
+//GRBM_GFX_CNTL
+#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
+//GRBM_RSMU_CFG
+#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
+#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
+#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
+#define GRBM_RSMU_CFG__DEBUG_MASK__SHIFT 0x11
+#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
+#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
+#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
+#define GRBM_RSMU_CFG__DEBUG_MASK_MASK 0x00020000L
+//GRBM_IH_CREDIT
+#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//GRBM_PWR_CNTL2
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
+//GRBM_UTCL2_INVAL_RANGE_START
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
+//GRBM_UTCL2_INVAL_RANGE_END
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
+//GRBM_RSMU_READ_ERROR
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
+//GRBM_CHICKEN_BITS
+#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ__SHIFT 0x0
+#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ_MASK 0x00000001L
+//GRBM_FENCE_RANGE0
+#define GRBM_FENCE_RANGE0__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE0__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE0__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE0__END_MASK 0xFFFF0000L
+//GRBM_FENCE_RANGE1
+#define GRBM_FENCE_RANGE1__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE1__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE1__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE1__END_MASK 0xFFFF0000L
+//GRBM_IOV_READ_ERROR
+#define GRBM_IOV_READ_ERROR__IOV_ADDR__SHIFT 0x2
+#define GRBM_IOV_READ_ERROR__IOV_VFID__SHIFT 0x14
+#define GRBM_IOV_READ_ERROR__IOV_VF__SHIFT 0x1a
+#define GRBM_IOV_READ_ERROR__IOV_OP__SHIFT 0x1b
+#define GRBM_IOV_READ_ERROR__IOV_ERROR__SHIFT 0x1f
+#define GRBM_IOV_READ_ERROR__IOV_ADDR_MASK 0x000FFFFCL
+#define GRBM_IOV_READ_ERROR__IOV_VFID_MASK 0x03F00000L
+#define GRBM_IOV_READ_ERROR__IOV_VF_MASK 0x04000000L
+#define GRBM_IOV_READ_ERROR__IOV_OP_MASK 0x08000000L
+#define GRBM_IOV_READ_ERROR__IOV_ERROR_MASK 0x80000000L
+//GRBM_NOWHERE
+#define GRBM_NOWHERE__DATA__SHIFT 0x0
+#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG1
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG2
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG3
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG4
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG5
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG6
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG7
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//VIOLATION_DATA_ASYNC_VF_PROG
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID__SHIFT 0x0
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID__SHIFT 0x4
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR__SHIFT 0x1f
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID_MASK 0x0000000FL
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID_MASK 0x000003F0L
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR_MASK 0x80000000L
+
+
+// addressBlock: xcd0_gc_cpdec
+//CP_CPC_DEBUG_CNTL
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_DC_GD_SEL__SHIFT 0x8
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS__SHIFT 0x10
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_FLOP_EN__SHIFT 0x1f
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_DC_GD_SEL_MASK 0x00000700L
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS_MASK 0x003F0000L
+#define CP_CPC_DEBUG_CNTL__DEBUG_BUS_FLOP_EN_MASK 0x80000000L
+//CP_CPF_DEBUG_CNTL
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPF_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS__SHIFT 0x10
+#define CP_CPF_DEBUG_CNTL__DEBUG_BUS_FLOP_EN__SHIFT 0x1f
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+#define CP_CPF_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS_MASK 0x003F0000L
+#define CP_CPF_DEBUG_CNTL__DEBUG_BUS_FLOP_EN_MASK 0x80000000L
+//CP_CPC_STATUS
+#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
+#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
+#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
+#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
+#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
+#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
+#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
+#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
+#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
+#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
+#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
+#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
+#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
+#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
+#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
+#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
+#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
+#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
+#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
+#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
+#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
+#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
+#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
+#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
+#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
+#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
+#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
+#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
+//CP_CPC_BUSY_STAT
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY__SHIFT 0x1
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY__SHIFT 0x11
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY_MASK 0x00000002L
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY_MASK 0x00020000L
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
+//CP_CPC_STALLED_STAT1
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
+//CP_CPF_STATUS
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
+#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
+#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
+#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
+#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
+#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
+#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
+#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
+#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
+#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
+#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
+#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
+#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
+#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
+#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
+#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
+#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
+#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
+#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
+#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
+#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
+#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
+#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
+#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
+#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
+//CP_CPF_BUSY_STAT
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS__SHIFT 0x9
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
+#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS_MASK 0x00000200L
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
+//CP_CPF_STALLED_STAT1
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
+//CP_CPC_GRBM_FREE_COUNT
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+//CP_CPC_PRIV_VIOLATION_ADDR
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_STATUS__SHIFT 0x0
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_OP__SHIFT 0x1
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x2
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_APERTURE_ID__SHIFT 0x14
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_STATUS_MASK 0x00000001L
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_OP_MASK 0x00000002L
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x000FFFFCL
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_APERTURE_ID_MASK 0xFFF00000L
+//CP_MEC_CNTL
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
+#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
+#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
+#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
+#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
+#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
+#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
+#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
+#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
+//CP_MEC_ME1_HEADER_DUMP
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_HEADER_DUMP
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_CPC_SCRATCH_INDEX
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000003FFL
+//CP_CPC_SCRATCH_DATA
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_CPF_GRBM_FREE_COUNT
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
+//CP_CPC_HALT_HYST_COUNT
+#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
+#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
+//CP_CE_COMPARE_COUNT
+#define CP_CE_COMPARE_COUNT__COMPARE_COUNT__SHIFT 0x0
+#define CP_CE_COMPARE_COUNT__COMPARE_COUNT_MASK 0xFFFFFFFFL
+//CP_CE_DE_COUNT
+#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
+#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_CE_COUNT
+#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT__SHIFT 0x0
+#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_LAST_INVAL_COUNT
+#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT__SHIFT 0x0
+#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_DE_COUNT
+#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
+#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_STALLED_STAT3
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
+//CP_STALLED_STAT1
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x4
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
+//CP_STALLED_STAT2
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x15
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x16
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+//CP_BUSY_STAT
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+//CP_STAT
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
+#define CP_STAT__DC_BUSY__SHIFT 0xd
+#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
+#define CP_STAT__PFP_BUSY__SHIFT 0xf
+#define CP_STAT__MEQ_BUSY__SHIFT 0x10
+#define CP_STAT__ME_BUSY__SHIFT 0x11
+#define CP_STAT__QUERY_BUSY__SHIFT 0x12
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
+#define CP_STAT__DMA_BUSY__SHIFT 0x16
+#define CP_STAT__RCIU_BUSY__SHIFT 0x17
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
+#define CP_STAT__CE_BUSY__SHIFT 0x1a
+#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
+#define CP_STAT__CP_BUSY__SHIFT 0x1f
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+//CP_ME_HEADER_DUMP
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_HEADER_DUMP
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_GRBM_FREE_COUNT
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
+//CP_CE_HEADER_DUMP
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x0
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_INSTR_PNTR
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_ME_INSTR_PNTR
+#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CE_INSTR_PNTR
+#define CP_CE_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_CE_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC1_INSTR_PNTR
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC2_INSTR_PNTR
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CSF_STAT
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
+//CP_ME_CNTL
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
+#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
+#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
+#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
+#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
+#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
+#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
+#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
+#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
+#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
+#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
+#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+//CP_CNTX_STAT
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+//CP_ME_PREEMPTION
+#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
+#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
+//CP_ROQ_THRESHOLDS
+#define CP_ROQ_THRESHOLDS__IB1_START__SHIFT 0x0
+#define CP_ROQ_THRESHOLDS__IB2_START__SHIFT 0x8
+#define CP_ROQ_THRESHOLDS__IB1_START_MASK 0x000000FFL
+#define CP_ROQ_THRESHOLDS__IB2_START_MASK 0x0000FF00L
+//CP_MEQ_STQ_THRESHOLD
+#define CP_MEQ_STQ_THRESHOLD__STQ_START__SHIFT 0x0
+#define CP_MEQ_STQ_THRESHOLD__STQ_START_MASK 0x000000FFL
+//CP_RB2_RPTR
+#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB2_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB1_RPTR
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB0_RPTR
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_RPTR
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_WPTR_DELAY
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
+//CP_RB_WPTR_POLL_CNTL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//CP_ROQ1_THRESHOLDS
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
+#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x8
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x10
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x18
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000FFL
+#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000FF00L
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00FF0000L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xFF000000L
+//CP_ROQ2_THRESHOLDS
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x0
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x8
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x10
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x18
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000FFL
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000FF00L
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00FF0000L
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xFF000000L
+//CP_STQ_THRESHOLDS
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
+//CP_QUEUE_THRESHOLDS
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x0
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x8
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003FL
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003F00L
+//CP_MEQ_THRESHOLDS
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
+//CP_ROQ_AVAIL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007FFL
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07FF0000L
+//CP_STQ_AVAIL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
+//CP_ROQ2_AVAIL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007FFL
+//CP_MEQ_AVAIL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
+//CP_CMD_INDEX
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
+//CP_CMD_DATA
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
+#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
+//CP_ROQ_RB_STAT
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003FFL
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03FF0000L
+//CP_ROQ_IB1_STAT
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003FFL
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03FF0000L
+//CP_ROQ_IB2_STAT
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003FFL
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03FF0000L
+//CP_STQ_STAT
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
+//CP_STQ_WR_STAT
+#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
+#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
+//CP_MEQ_STAT
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
+//CP_CEQ1_AVAIL
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x0
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x10
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007FFL
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07FF0000L
+//CP_CEQ2_AVAIL
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x0
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007FFL
+//CP_CE_ROQ_RB_STAT
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003FFL
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03FF0000L
+//CP_CE_ROQ_IB1_STAT
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003FFL
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03FF0000L
+//CP_CE_ROQ_IB2_STAT
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003FFL
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03FF0000L
+//CP_INT_STAT_DEBUG
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED__SHIFT 0xb
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED__SHIFT 0x12
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x13
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x14
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED__SHIFT 0x15
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED_MASK 0x00000800L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED_MASK 0x00040000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED_MASK 0x00200000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_DEBUG_CNTL
+#define CP_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS__SHIFT 0x10
+#define CP_DEBUG_CNTL__DEBUG_BUS_FLOP_EN__SHIFT 0x1f
+#define CP_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+#define CP_DEBUG_CNTL__DEBUG_BUS_SELECT_BITS_MASK 0x003F0000L
+#define CP_DEBUG_CNTL__DEBUG_BUS_FLOP_EN_MASK 0x80000000L
+//CP_PRIV_VIOLATION_ADDR
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0000FFFFL
+
+
+// addressBlock: xcd0_gc_padec
+//VGT_VTX_VECT_EJECT_REG
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x0
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x0000007FL
+//VGT_DMA_DATA_FIFO_DEPTH
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH__SHIFT 0x9
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001FFL
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH_MASK 0x0007FE00L
+//VGT_DMA_REQ_FIFO_DEPTH
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_DRAW_INIT_FIFO_DEPTH
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_LAST_COPY_STATE
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x10
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
+//VGT_CACHE_INVALIDATION
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x0
+#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT__SHIFT 0x4
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x5
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x6
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x9
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0xb
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0xc
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0xd
+#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x10
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG__SHIFT 0x15
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1__SHIFT 0x16
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2__SHIFT 0x19
+#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE__SHIFT 0x1c
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI__SHIFT 0x1d
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
+#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT_MASK 0x00000010L
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000C0L
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
+#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001F0000L
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_MASK 0x00200000L
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1_MASK 0x01C00000L
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2_MASK 0x0E000000L
+#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE_MASK 0x10000000L
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI_MASK 0x20000000L
+//VGT_RESET_DEBUG
+#define VGT_RESET_DEBUG__GS_DISABLE__SHIFT 0x0
+#define VGT_RESET_DEBUG__TESS_DISABLE__SHIFT 0x1
+#define VGT_RESET_DEBUG__WD_DISABLE__SHIFT 0x2
+#define VGT_RESET_DEBUG__GS_DISABLE_MASK 0x00000001L
+#define VGT_RESET_DEBUG__TESS_DISABLE_MASK 0x00000002L
+#define VGT_RESET_DEBUG__WD_DISABLE_MASK 0x00000004L
+//VGT_STRMOUT_DELAY
+#define VGT_STRMOUT_DELAY__SKIP_DELAY__SHIFT 0x0
+#define VGT_STRMOUT_DELAY__SE0_WD_DELAY__SHIFT 0x8
+#define VGT_STRMOUT_DELAY__SE1_WD_DELAY__SHIFT 0xb
+#define VGT_STRMOUT_DELAY__SE2_WD_DELAY__SHIFT 0xe
+#define VGT_STRMOUT_DELAY__SE3_WD_DELAY__SHIFT 0x11
+#define VGT_STRMOUT_DELAY__SKIP_DELAY_MASK 0x000000FFL
+#define VGT_STRMOUT_DELAY__SE0_WD_DELAY_MASK 0x00000700L
+#define VGT_STRMOUT_DELAY__SE1_WD_DELAY_MASK 0x00003800L
+#define VGT_STRMOUT_DELAY__SE2_WD_DELAY_MASK 0x0001C000L
+#define VGT_STRMOUT_DELAY__SE3_WD_DELAY_MASK 0x000E0000L
+//VGT_FIFO_DEPTHS
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x0
+#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x7
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x8
+#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH__SHIFT 0x16
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007FL
+#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003FFF00L
+#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH_MASK 0x0FC00000L
+//VGT_GS_VERTEX_REUSE
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x0
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001FL
+//VGT_MC_LAT_CNTL
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
+//IA_CNTL_STATUS
+#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x0
+#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x1
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x2
+#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x3
+#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x4
+#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
+#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
+//VGT_CNTL_STATUS
+#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x0
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x1
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x2
+#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x3
+#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x4
+#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x5
+#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x6
+#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x7
+#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x8
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x9
+#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY__SHIFT 0xa
+#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
+#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
+#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
+#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
+#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
+#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
+#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
+#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY_MASK 0x00000400L
+//WD_CNTL_STATUS
+#define WD_CNTL_STATUS__WD_BUSY__SHIFT 0x0
+#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY__SHIFT 0x1
+#define WD_CNTL_STATUS__WD_SPL_DI_BUSY__SHIFT 0x2
+#define WD_CNTL_STATUS__WD_ADC_BUSY__SHIFT 0x3
+#define WD_CNTL_STATUS__WD_BUSY_MASK 0x00000001L
+#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY_MASK 0x00000002L
+#define WD_CNTL_STATUS__WD_SPL_DI_BUSY_MASK 0x00000004L
+#define WD_CNTL_STATUS__WD_ADC_BUSY_MASK 0x00000008L
+//CC_GC_PRIM_CONFIG
+#define CC_GC_PRIM_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
+#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
+#define CC_GC_PRIM_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
+#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
+//GC_USER_PRIM_CONFIG
+#define GC_USER_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
+#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
+#define GC_USER_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
+#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
+//WD_QOS
+#define WD_QOS__DRAW_STALL__SHIFT 0x0
+#define WD_QOS__DRAW_STALL_MASK 0x00000001L
+//WD_UTCL1_CNTL
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+//WD_UTCL1_STATUS
+#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//IA_UTCL1_CNTL
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+//IA_UTCL1_STATUS
+#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//VGT_SYS_CONFIG
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+//VGT_VS_MAX_WAVE_ID
+#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//VGT_GS_MAX_WAVE_ID
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GFX_PIPE_CONTROL
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
+#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
+#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
+//CC_GC_SHADER_ARRAY_CONFIG
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
+//GC_USER_SHADER_ARRAY_CONFIG
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
+//VGT_DMA_PRIMITIVE_TYPE
+#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_DMA_CONTROL
+#define VGT_DMA_CONTROL__PRIMGROUP_SIZE__SHIFT 0x0
+#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP__SHIFT 0x11
+#define VGT_DMA_CONTROL__SWITCH_ON_EOI__SHIFT 0x13
+#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP__SHIFT 0x14
+#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC__SHIFT 0x15
+#define VGT_DMA_CONTROL__EN_INST_OPT_ADV__SHIFT 0x16
+#define VGT_DMA_CONTROL__HW_USE_ONLY__SHIFT 0x17
+#define VGT_DMA_CONTROL__PRIMGROUP_SIZE_MASK 0x0000FFFFL
+#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP_MASK 0x00020000L
+#define VGT_DMA_CONTROL__SWITCH_ON_EOI_MASK 0x00080000L
+#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC_MASK 0x00200000L
+#define VGT_DMA_CONTROL__EN_INST_OPT_ADV_MASK 0x00400000L
+#define VGT_DMA_CONTROL__HW_USE_ONLY_MASK 0x00800000L
+//VGT_DMA_LS_HS_CONFIG
+#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+//WD_BUF_RESOURCE_1
+#define WD_BUF_RESOURCE_1__POS_BUF_SIZE__SHIFT 0x0
+#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE__SHIFT 0x10
+#define WD_BUF_RESOURCE_1__POS_BUF_SIZE_MASK 0x0000FFFFL
+#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE_MASK 0xFFFF0000L
+//WD_BUF_RESOURCE_2
+#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE__SHIFT 0x0
+#define WD_BUF_RESOURCE_2__ADDR_MODE__SHIFT 0xf
+#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE__SHIFT 0x10
+#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE_MASK 0x00001FFFL
+#define WD_BUF_RESOURCE_2__ADDR_MODE_MASK 0x00008000L
+#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE_MASK 0xFFFF0000L
+//PA_CL_CNTL_STATUS
+#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED__SHIFT 0x0
+#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED__SHIFT 0x1
+#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED__SHIFT 0x2
+#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED_MASK 0x00000001L
+#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED_MASK 0x00000002L
+#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED_MASK 0x00000004L
+//PA_CL_ENHANCE
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE__SHIFT 0x11
+#define PA_CL_ENHANCE__OUTPUT_SWITCH_TO_LEGACY_EVENT__SHIFT 0x12
+#define PA_CL_ENHANCE__NO_SWITCH_TO_LEGACY_AFTER_VMID_RESET__SHIFT 0x13
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE__SHIFT 0x14
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE__SHIFT 0x15
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE_MASK 0x00020000L
+#define PA_CL_ENHANCE__OUTPUT_SWITCH_TO_LEGACY_EVENT_MASK 0x00040000L
+#define PA_CL_ENHANCE__NO_SWITCH_TO_LEGACY_AFTER_VMID_RESET_MASK 0x00080000L
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE_MASK 0x00100000L
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE_MASK 0x00200000L
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+//PA_CL_RESET_DEBUG
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x00000001L
+//PA_SU_CNTL_STATUS
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+//PA_SC_FIFO_DEPTH_CNTL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
+//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_TRAP_SCREEN_HV_LOCK
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_FORCE_EOV_MAX_CNTS
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
+//PA_SC_BINNER_EVENT_CNTL_0
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_1
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_2
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_3
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63_MASK 0xC0000000L
+//PA_SC_BINNER_TIMEOUT_COUNTER
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_BINNER_PERF_CNTL_0
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
+//PA_SC_BINNER_PERF_CNTL_1
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
+//PA_SC_BINNER_PERF_CNTL_2
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
+//PA_SC_BINNER_PERF_CNTL_3
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_ENHANCE_2
+#define PA_SC_ENHANCE_2__RESERVED_0__SHIFT 0x0
+#define PA_SC_ENHANCE_2__RESERVED_1__SHIFT 0x1
+#define PA_SC_ENHANCE_2__RESERVED_2__SHIFT 0x2
+#define PA_SC_ENHANCE_2__RESERVED_3__SHIFT 0x3
+#define PA_SC_ENHANCE_2__RESERVED_4__SHIFT 0x4
+#define PA_SC_ENHANCE_2__RESERVED_5__SHIFT 0x5
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_COMPOUND_INDEX_EN__SHIFT 0x6
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PIPELINE_PRIMID__SHIFT 0x7
+#define PA_SC_ENHANCE_2__RSVD__SHIFT 0x8
+#define PA_SC_ENHANCE_2__RESERVED_0_MASK 0x00000001L
+#define PA_SC_ENHANCE_2__RESERVED_1_MASK 0x00000002L
+#define PA_SC_ENHANCE_2__RESERVED_2_MASK 0x00000004L
+#define PA_SC_ENHANCE_2__RESERVED_3_MASK 0x00000008L
+#define PA_SC_ENHANCE_2__RESERVED_4_MASK 0x00000010L
+#define PA_SC_ENHANCE_2__RESERVED_5_MASK 0x00000020L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_COMPOUND_INDEX_EN_MASK 0x00000040L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PIPELINE_PRIMID_MASK 0x00000080L
+#define PA_SC_ENHANCE_2__RSVD_MASK 0xFFFFFF00L
+//PA_SC_FIFO_SIZE
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
+//PA_SC_IF_FIFO_SIZE
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
+//PA_SC_PKR_WAVE_TABLE_CNTL
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
+//PA_UTCL1_CNTL1
+#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
+#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define PA_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define PA_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define PA_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define PA_UTCL1_CNTL1__SPARE__SHIFT 0x10
+#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define PA_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define PA_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID__SHIFT 0x19
+#define PA_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define PA_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
+#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define PA_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define PA_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define PA_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define PA_UTCL1_CNTL1__SPARE_MASK 0x00010000L
+#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define PA_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define PA_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define PA_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define PA_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//PA_UTCL1_CNTL2
+#define PA_UTCL1_CNTL2__SPARE1__SHIFT 0x0
+#define PA_UTCL1_CNTL2__SPARE2__SHIFT 0x8
+#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define PA_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define PA_UTCL1_CNTL2__SPARE3__SHIFT 0xb
+#define PA_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT__SHIFT 0xd
+#define PA_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define PA_UTCL1_CNTL2__SPARE4__SHIFT 0x10
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define PA_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define PA_UTCL1_CNTL2__SPARE5__SHIFT 0x19
+#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define PA_UTCL1_CNTL2__RESERVED__SHIFT 0x1b
+#define PA_UTCL1_CNTL2__SPARE1_MASK 0x000000FFL
+#define PA_UTCL1_CNTL2__SPARE2_MASK 0x00000100L
+#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define PA_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define PA_UTCL1_CNTL2__SPARE3_MASK 0x00000800L
+#define PA_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT_MASK 0x00002000L
+#define PA_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define PA_UTCL1_CNTL2__SPARE4_MASK 0x00030000L
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define PA_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define PA_UTCL1_CNTL2__SPARE5_MASK 0x02000000L
+#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define PA_UTCL1_CNTL2__RESERVED_MASK 0xF8000000L
+//PA_SIDEBAND_REQUEST_DELAYS
+#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY__SHIFT 0x0
+#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY__SHIFT 0x10
+#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY_MASK 0x0000FFFFL
+#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY_MASK 0xFFFF0000L
+//PA_SC_ENHANCE
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
+//PA_SC_ENHANCE_1
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
+#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
+#define PA_SC_ENHANCE_1__ECO_SPARE0__SHIFT 0x5
+#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
+#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
+#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_XY_UNPACK__SHIFT 0xc
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xd
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
+#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION__SHIFT 0xf
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
+#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING__SHIFT 0x11
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG__SHIFT 0x17
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER__SHIFT 0x19
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1a
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE__SHIFT 0x1b
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX__SHIFT 0x1c
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1__SHIFT 0x1d
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI__SHIFT 0x1e
+#define PA_SC_ENHANCE_1__RSVD__SHIFT 0x1f
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
+#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
+#define PA_SC_ENHANCE_1__ECO_SPARE0_MASK 0x00000020L
+#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
+#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
+#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_XY_UNPACK_MASK 0x00001000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00002000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION_MASK 0x00008000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING_MASK 0x00020000L
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG_MASK 0x00800000L
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER_MASK 0x02000000L
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION_MASK 0x04000000L
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE_MASK 0x08000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_MASK 0x10000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1_MASK 0x20000000L
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI_MASK 0x40000000L
+#define PA_SC_ENHANCE_1__RSVD_MASK 0x80000000L
+//PA_SC_DSM_CNTL
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
+//PA_SC_TILE_STEERING_CREST_OVERRIDE
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
+
+
+// addressBlock: xcd0_gc_sqdec
+//SQ_CONFIG
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0
+#define SQ_CONFIG__DISABLE_REPEATER_FGCG_CLOCK_GATING__SHIFT 0x1
+#define SQ_CONFIG__DISABLE_SPIPRIO_OVER_USERPRIO__SHIFT 0x2
+#define SQ_CONFIG__OVERRIDE_SP_MAI_ALU_BUSY__SHIFT 0x3
+#define SQ_CONFIG__DISABLE_RAM_CLOCK_GATING__SHIFT 0x4
+#define SQ_CONFIG__DISABLE_MAI_CO_EXEC__SHIFT 0x5
+#define SQ_CONFIG__OVERRIDE_MAI_ALU_BUSY__SHIFT 0x6
+#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
+#define SQ_CONFIG__DEBUG_EN__SHIFT 0x8
+#define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9
+#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE__SHIFT 0xa
+#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY__SHIFT 0xb
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0xc
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0xd
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0xe
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0xf
+#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE__SHIFT 0x10
+#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE__SHIFT 0x11
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS__SHIFT 0x12
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS__SHIFT 0x13
+#define SQ_CONFIG__REPLAY_SLEEP_CNT__SHIFT 0x15
+#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP__SHIFT 0x1c
+#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
+#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
+#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L
+#define SQ_CONFIG__DISABLE_REPEATER_FGCG_CLOCK_GATING_MASK 0x00000002L
+#define SQ_CONFIG__DISABLE_SPIPRIO_OVER_USERPRIO_MASK 0x00000004L
+#define SQ_CONFIG__OVERRIDE_SP_MAI_ALU_BUSY_MASK 0x00000008L
+#define SQ_CONFIG__DISABLE_RAM_CLOCK_GATING_MASK 0x00000010L
+#define SQ_CONFIG__DISABLE_MAI_CO_EXEC_MASK 0x00000020L
+#define SQ_CONFIG__OVERRIDE_MAI_ALU_BUSY_MASK 0x00000040L
+#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
+#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
+#define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L
+#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE_MASK 0x00000400L
+#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY_MASK 0x00000800L
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
+#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE_MASK 0x00010000L
+#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE_MASK 0x00020000L
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS_MASK 0x00040000L
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS_MASK 0x00180000L
+#define SQ_CONFIG__REPLAY_SLEEP_CNT_MASK 0x0FE00000L
+#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP_MASK 0x10000000L
+#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING_MASK 0x20000000L
+#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE_MASK 0x40000000L
+#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE_MASK 0x80000000L
+//SQC_CONFIG
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
+#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x9
+#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0xa
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0xb
+#define SQC_CONFIG__EVICT_LRU__SHIFT 0xc
+#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xe
+#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xf
+#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0x10
+#define SQC_CONFIG__INST_PRF_COUNT__SHIFT 0x18
+#define SQC_CONFIG__INST_PRF_FILTER_DIS__SHIFT 0x1d
+#define SQC_CONFIG__DISABLE_PREFETCH_CROSS_4K_BOUNDARY_CHECK__SHIFT 0x1e
+#define SQC_CONFIG__MEM_LS_DISABLE__SHIFT 0x1f
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
+#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
+#define SQC_CONFIG__EVICT_LRU_MASK 0x00003000L
+#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00004000L
+#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00008000L
+#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x00FF0000L
+#define SQC_CONFIG__INST_PRF_COUNT_MASK 0x1F000000L
+#define SQC_CONFIG__INST_PRF_FILTER_DIS_MASK 0x20000000L
+#define SQC_CONFIG__DISABLE_PREFETCH_CROSS_4K_BOUNDARY_CHECK_MASK 0x40000000L
+#define SQC_CONFIG__MEM_LS_DISABLE_MASK 0x80000000L
+//LDS_CONFIG
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
+#define LDS_CONFIG__TMZ_VIOLATION_REPORTING__SHIFT 0x1
+#define LDS_CONFIG__DISABLE_RAM_CLOCK_GATING__SHIFT 0x2
+#define LDS_CONFIG__DISABLE_IDXCLK_MGCG__SHIFT 0x3
+#define LDS_CONFIG__DISABLE_MEMCLK_MGCG__SHIFT 0x4
+#define LDS_CONFIG__DISABLE_ATTRCLK_MGCG__SHIFT 0x5
+#define LDS_CONFIG__DISABLE_ATODFPCLK_MGCG__SHIFT 0x6
+#define LDS_CONFIG__DISABLE_PHASE_FGCG__SHIFT 0x7
+#define LDS_CONFIG__DISABLE_LDS_SP_READ_FGCG__SHIFT 0x8
+#define LDS_CONFIG__DISABLE_SP_DATA_CLOCK_GATING__SHIFT 0x9
+#define LDS_CONFIG__DISABLE_TD_DATA_CLOCK_GATING__SHIFT 0xa
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
+#define LDS_CONFIG__TMZ_VIOLATION_REPORTING_MASK 0x00000002L
+#define LDS_CONFIG__DISABLE_RAM_CLOCK_GATING_MASK 0x00000004L
+#define LDS_CONFIG__DISABLE_IDXCLK_MGCG_MASK 0x00000008L
+#define LDS_CONFIG__DISABLE_MEMCLK_MGCG_MASK 0x00000010L
+#define LDS_CONFIG__DISABLE_ATTRCLK_MGCG_MASK 0x00000020L
+#define LDS_CONFIG__DISABLE_ATODFPCLK_MGCG_MASK 0x00000040L
+#define LDS_CONFIG__DISABLE_PHASE_FGCG_MASK 0x00000080L
+#define LDS_CONFIG__DISABLE_LDS_SP_READ_FGCG_MASK 0x00000100L
+#define LDS_CONFIG__DISABLE_SP_DATA_CLOCK_GATING_MASK 0x00000200L
+#define LDS_CONFIG__DISABLE_TD_DATA_CLOCK_GATING_MASK 0x00000400L
+//SQ_RANDOM_WAVE_PRI
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x007FFC00L
+//SQ_REG_CREDITS
+#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x0
+#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x8
+#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x1c
+#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x1d
+#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x1e
+#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x1f
+#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003FL
+#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000F00L
+#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
+#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
+#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
+#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
+//SQ_FIFO_SIZES
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x10
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000F00L
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
+//SQ_DSM_CNTL
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQ_DSM_CNTL2
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
+//SQ_RUNTIME_CONFIG
+#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST__SHIFT 0x0
+#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST_MASK 0x00000001L
+//SQ_DEBUG_STS_GLOBAL
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY__SHIFT 0x1
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0__SHIFT 0x4
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0_MASK 0x0000FFF0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1_MASK 0x0FFF0000L
+//SH_MEM_BASES
+#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
+#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
+#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
+#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
+//SQ_TIMEOUT_CONFIG
+#define SQ_TIMEOUT_CONFIG__PERIOD_SEL__SHIFT 0x0
+#define SQ_TIMEOUT_CONFIG__TIMEOUT_FATAL_DISABLE__SHIFT 0x6
+#define SQ_TIMEOUT_CONFIG__TIMER_LONGER_SEL__SHIFT 0x7
+#define SQ_TIMEOUT_CONFIG__TIMEOUT_CONDITIONS_MASK__SHIFT 0x8
+#define SQ_TIMEOUT_CONFIG__PERIOD_SEL_MASK 0x0000003FL
+#define SQ_TIMEOUT_CONFIG__TIMEOUT_FATAL_DISABLE_MASK 0x00000040L
+#define SQ_TIMEOUT_CONFIG__TIMER_LONGER_SEL_MASK 0x00000080L
+#define SQ_TIMEOUT_CONFIG__TIMEOUT_CONDITIONS_MASK_MASK 0x07FFFF00L
+//SQ_TIMEOUT_STATUS
+#define SQ_TIMEOUT_STATUS__WAVE_TIMEOUT__SHIFT 0x0
+#define SQ_TIMEOUT_STATUS__WAVE_TIMEOUT_MASK 0xFFFFFFFFL
+//SH_MEM_CONFIG
+#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
+#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
+#define SH_MEM_CONFIG__F8_MODE__SHIFT 0x8
+#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
+#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
+#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
+#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
+#define SH_MEM_CONFIG__F8_MODE_MASK 0x00000100L
+#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
+#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
+//SP_MFMA_PORTD_RD_CONFIG
+#define SP_MFMA_PORTD_RD_CONFIG__SET__SHIFT 0x0
+#define SP_MFMA_PORTD_RD_CONFIG__TYPE__SHIFT 0x1
+#define SP_MFMA_PORTD_RD_CONFIG__LAST_PASS__SHIFT 0x4
+#define SP_MFMA_PORTD_RD_CONFIG__PORTD_PATTERN__SHIFT 0x9
+#define SP_MFMA_PORTD_RD_CONFIG__SET_MASK 0x00000001L
+#define SP_MFMA_PORTD_RD_CONFIG__TYPE_MASK 0x0000000EL
+#define SP_MFMA_PORTD_RD_CONFIG__LAST_PASS_MASK 0x000001F0L
+#define SP_MFMA_PORTD_RD_CONFIG__PORTD_PATTERN_MASK 0x1FFFFE00L
+//SH_CAC_CONFIG
+#define SH_CAC_CONFIG__SQG_UTCL1_REPEATER_FGCG_DISABLE__SHIFT 0x0
+#define SH_CAC_CONFIG__SQC_UTCL1_REPEATER_FGCG_DISABLE__SHIFT 0x1
+#define SH_CAC_CONFIG__SPI_SQ_CMD_REPEATER_FGCG_DISABLE__SHIFT 0x2
+#define SH_CAC_CONFIG__SQ_MSG_REPEATER_FGCG_DISABLE__SHIFT 0x3
+#define SH_CAC_CONFIG__SQC_TC_REPEATER_FGCG_DISABLE__SHIFT 0x4
+#define SH_CAC_CONFIG__SQC_SQ_REPEATER_FGCG_DISABLE__SHIFT 0x5
+#define SH_CAC_CONFIG__SQG_TC_REPEATER_FGCG_DISABLE__SHIFT 0x6
+#define SH_CAC_CONFIG__SQC_DISABLE_RAM_CLOCK_GATING__SHIFT 0x8
+#define SH_CAC_CONFIG__SQG_DISABLE_RAM_CLOCK_GATING__SHIFT 0x9
+#define SH_CAC_CONFIG__SQC_MGCG_CLOCK_OFF_DELAY_CNT__SHIFT 0x10
+#define SH_CAC_CONFIG__SQC_MGCG_DISABLE__SHIFT 0x14
+#define SH_CAC_CONFIG__SQC_TC_REQ_CLKEN_CHICKENBIT__SHIFT 0x1c
+#define SH_CAC_CONFIG__SQC_ICACHE_CTRL_MGCG_DISABLE__SHIFT 0x1d
+#define SH_CAC_CONFIG__SQC_DCACHE_CTRL_MGCG_DISABLE__SHIFT 0x1e
+#define SH_CAC_CONFIG__SQC_DISABLE_UTCL1_FGCG_PADDR__SHIFT 0x1f
+#define SH_CAC_CONFIG__SQG_UTCL1_REPEATER_FGCG_DISABLE_MASK 0x00000001L
+#define SH_CAC_CONFIG__SQC_UTCL1_REPEATER_FGCG_DISABLE_MASK 0x00000002L
+#define SH_CAC_CONFIG__SPI_SQ_CMD_REPEATER_FGCG_DISABLE_MASK 0x00000004L
+#define SH_CAC_CONFIG__SQ_MSG_REPEATER_FGCG_DISABLE_MASK 0x00000008L
+#define SH_CAC_CONFIG__SQC_TC_REPEATER_FGCG_DISABLE_MASK 0x00000010L
+#define SH_CAC_CONFIG__SQC_SQ_REPEATER_FGCG_DISABLE_MASK 0x00000020L
+#define SH_CAC_CONFIG__SQG_TC_REPEATER_FGCG_DISABLE_MASK 0x00000040L
+#define SH_CAC_CONFIG__SQC_DISABLE_RAM_CLOCK_GATING_MASK 0x00000100L
+#define SH_CAC_CONFIG__SQG_DISABLE_RAM_CLOCK_GATING_MASK 0x00000200L
+#define SH_CAC_CONFIG__SQC_MGCG_CLOCK_OFF_DELAY_CNT_MASK 0x000F0000L
+#define SH_CAC_CONFIG__SQC_MGCG_DISABLE_MASK 0x0FF00000L
+#define SH_CAC_CONFIG__SQC_TC_REQ_CLKEN_CHICKENBIT_MASK 0x10000000L
+#define SH_CAC_CONFIG__SQC_ICACHE_CTRL_MGCG_DISABLE_MASK 0x20000000L
+#define SH_CAC_CONFIG__SQC_DCACHE_CTRL_MGCG_DISABLE_MASK 0x40000000L
+#define SH_CAC_CONFIG__SQC_DISABLE_UTCL1_FGCG_PADDR_MASK 0x80000000L
+//SQ_DEBUG_STS_GLOBAL2
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1__SHIFT 0x8
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST__SHIFT 0x18
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000FFL
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1_MASK 0x0000FF00L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED_MASK 0x00FF0000L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST_MASK 0xFF000000L
+//SQ_DEBUG_STS_GLOBAL3
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG__SHIFT 0x4
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD_MASK 0x0000000FL
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG_MASK 0x000003F0L
+//CC_GC_SHADER_RATE_CONFIG
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
+#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
+//GC_USER_SHADER_RATE_CONFIG
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
+#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
+//SQ_INTERRUPT_AUTO_MASK
+#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
+#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
+//SQ_INTERRUPT_MSG_CTRL
+#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
+#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
+//SQ_DEBUG_PERFCOUNT_TRAP
+#define SQ_DEBUG_PERFCOUNT_TRAP__ENABLE__SHIFT 0x0
+#define SQ_DEBUG_PERFCOUNT_TRAP__COUNTER__SHIFT 0x1
+#define SQ_DEBUG_PERFCOUNT_TRAP__LIMIT__SHIFT 0x4
+#define SQ_DEBUG_PERFCOUNT_TRAP__ENABLE_MASK 0x00000001L
+#define SQ_DEBUG_PERFCOUNT_TRAP__COUNTER_MASK 0x0000000EL
+#define SQ_DEBUG_PERFCOUNT_TRAP__LIMIT_MASK 0x0FFFFFF0L
+//SQ_UTCL1_CNTL1
+#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQ_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQ_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQ_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL__SHIFT 0x19
+#define SQ_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQ_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQ_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQ_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_MASK 0x02000000L
+#define SQ_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQ_UTCL1_CNTL2
+#define SQ_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQ_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQ_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQ_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQ_UTCL1_CNTL2__RETRY_TIMER__SHIFT 0x10
+#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQ_UTCL1_CNTL2__PREFETCH_PAGE__SHIFT 0x1c
+#define SQ_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQ_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQ_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQ_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQ_UTCL1_CNTL2__RETRY_TIMER_MASK 0x007F0000L
+#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define SQ_UTCL1_CNTL2__PREFETCH_PAGE_MASK 0xF0000000L
+//SQ_UTCL1_STATUS
+#define SQ_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQ_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQ_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQ_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define SQ_UTCL1_STATUS__UNUSED__SHIFT 0x10
+#define SQ_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQ_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQ_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define SQ_UTCL1_STATUS__RESERVED_MASK 0x0000FFF8L
+#define SQ_UTCL1_STATUS__UNUSED_MASK 0xFFFF0000L
+//SQ_FED_INTERRUPT_STATUS
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_STATUS__SHIFT 0x0
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_SIMD_ID__SHIFT 0x2
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_WAVE_ID__SHIFT 0x4
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_CU_ID__SHIFT 0x8
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_VM_ID__SHIFT 0xc
+#define SQ_FED_INTERRUPT_STATUS__TO_RSMU_DISABLE__SHIFT 0x10
+#define SQ_FED_INTERRUPT_STATUS__TO_IH_DISABLE__SHIFT 0x11
+#define SQ_FED_INTERRUPT_STATUS__FED_HALT_DISABLE__SHIFT 0x12
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_STATUS_MASK 0x00000001L
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_SIMD_ID_MASK 0x0000000CL
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_WAVE_ID_MASK 0x000000F0L
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_CU_ID_MASK 0x00000F00L
+#define SQ_FED_INTERRUPT_STATUS__INTERRUPT_VM_ID_MASK 0x0000F000L
+#define SQ_FED_INTERRUPT_STATUS__TO_RSMU_DISABLE_MASK 0x00010000L
+#define SQ_FED_INTERRUPT_STATUS__TO_IH_DISABLE_MASK 0x00020000L
+#define SQ_FED_INTERRUPT_STATUS__FED_HALT_DISABLE_MASK 0x00040000L
+//SQ_CGTS_CONFIG
+#define SQ_CGTS_CONFIG__DGEMM_EXTRA_BUSY_PASS__SHIFT 0x0
+#define SQ_CGTS_CONFIG__XDL_EXTRA_BUSY_PASS__SHIFT 0x4
+#define SQ_CGTS_CONFIG__VALU_EXTRA_BUSY_PASS__SHIFT 0x8
+#define SQ_CGTS_CONFIG__DLOP_EXTRA_BUSY_PASS__SHIFT 0xc
+#define SQ_CGTS_CONFIG__XDL_EXTRA_GAP_PASS__SHIFT 0x10
+#define SQ_CGTS_CONFIG__DGEMM_EXTRA_GAP_PASS__SHIFT 0x12
+#define SQ_CGTS_CONFIG__DLOP_EXTRA_GAP_PASS__SHIFT 0x14
+#define SQ_CGTS_CONFIG__DGEMM_EXTRA_BUSY_PASS_MASK 0x0000000FL
+#define SQ_CGTS_CONFIG__XDL_EXTRA_BUSY_PASS_MASK 0x000000F0L
+#define SQ_CGTS_CONFIG__VALU_EXTRA_BUSY_PASS_MASK 0x00000F00L
+#define SQ_CGTS_CONFIG__DLOP_EXTRA_BUSY_PASS_MASK 0x0000F000L
+#define SQ_CGTS_CONFIG__XDL_EXTRA_GAP_PASS_MASK 0x00030000L
+#define SQ_CGTS_CONFIG__DGEMM_EXTRA_GAP_PASS_MASK 0x000C0000L
+#define SQ_CGTS_CONFIG__DLOP_EXTRA_GAP_PASS_MASK 0x00300000L
+//SQ_SHADER_TBA_LO
+#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TBA_HI
+#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
+//SQ_SHADER_TMA_LO
+#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TMA_HI
+#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
+//SQC_DSM_CNTL
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTL__DATA_CU3_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define SQC_DSM_CNTL__DATA_CU3_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define SQC_DSM_CNTL__DATA_CU3_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define SQC_DSM_CNTL__DATA_CU3_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define SQC_DSM_CNTL__DATA_CU3_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define SQC_DSM_CNTL__DATA_CU3_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define SQC_DSM_CNTL__DATA_CU3_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define SQC_DSM_CNTL__DATA_CU3_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQC_DSM_CNTLA
+#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQC_DSM_CNTLB
+#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQC_DSM_CNTL2
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//SQC_DSM_CNTL2A
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//SQC_DSM_CNTL2B
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//SQC_DSM_CNTL2E
+#define SQC_DSM_CNTL2E__DATA_CU3_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2E__DATA_CU3_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2E__DATA_CU3_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2E__DATA_CU3_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2E__DATA_CU3_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2E__DATA_CU3_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2E__DATA_CU3_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2E__DATA_CU3_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
+//SQC_EDC_FUE_CNTL
+#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
+#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
+#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
+#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
+//SQC_EDC_CNT2
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT_MASK 0x00C00000L
+//SQC_EDC_CNT3
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT_MASK 0x000C0000L
+//SQC_EDC_PARITY_CNT3
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L
+//SQ_DEBUG
+#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x0
+#define SQ_DEBUG__SINGLE_ALU_OP__SHIFT 0x1
+#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L
+#define SQ_DEBUG__SINGLE_ALU_OP_MASK 0x00000002L
+//SQ_PERF_SNAPSHOT_CTRL
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTVAL__SHIFT 0x0
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL__SHIFT 0x5
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK__SHIFT 0x6
+#define SQ_PERF_SNAPSHOT_CTRL__ENABLE__SHIFT 0x16
+#define SQ_PERF_SNAPSHOT_CTRL__TEST_MODE__SHIFT 0x17
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTVAL_MASK 0x0000001FL
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL_MASK 0x00000020L
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK_MASK 0x003FFFC0L
+#define SQ_PERF_SNAPSHOT_CTRL__ENABLE_MASK 0x00400000L
+#define SQ_PERF_SNAPSHOT_CTRL__TEST_MODE_MASK 0x00800000L
+//SQ_DEBUG_FOR_INTERNAL_CTRL
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__FLAG_WR_ADDR_MATCH_FLAT_SCRATCH__SHIFT 0x0
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__FLAG_RD_FLAT_SCRATCH_RETURN_ZERO__SHIFT 0x1
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_FLAT_SCRATCH_WRITE_PROTECT__SHIFT 0x2
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_FLAT_SCRATCH_READ_PROTECT__SHIFT 0x3
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_SNAPSHOT_TRAP_ON_BARRIER__SHIFT 0x4
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__ENABLE_DED_TRIGGER_HALT__SHIFT 0x5
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__FLAG_WR_ADDR_MATCH_FLAT_SCRATCH_MASK 0x00000001L
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__FLAG_RD_FLAT_SCRATCH_RETURN_ZERO_MASK 0x00000002L
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_FLAT_SCRATCH_WRITE_PROTECT_MASK 0x00000004L
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_FLAT_SCRATCH_READ_PROTECT_MASK 0x00000008L
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__DISABLE_SNAPSHOT_TRAP_ON_BARRIER_MASK 0x00000010L
+#define SQ_DEBUG_FOR_INTERNAL_CTRL__ENABLE_DED_TRIGGER_HALT_MASK 0x00000020L
+//SQ_REG_TIMESTAMP
+#define SQ_REG_TIMESTAMP__TIMESTAMP__SHIFT 0x0
+#define SQ_REG_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
+//SQ_CMD_TIMESTAMP
+#define SQ_CMD_TIMESTAMP__TIMESTAMP__SHIFT 0x0
+#define SQ_CMD_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
+//SQ_HOSTTRAP_STATUS
+#define SQ_HOSTTRAP_STATUS__HTPENDINGCOUNT__SHIFT 0x0
+#define SQ_HOSTTRAP_STATUS__HTPENDING_OVERRIDE__SHIFT 0x8
+#define SQ_HOSTTRAP_STATUS__HTPENDINGCOUNT_MASK 0x000000FFL
+#define SQ_HOSTTRAP_STATUS__HTPENDING_OVERRIDE_MASK 0x00000100L
+//SQ_IND_INDEX
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
+#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x4
+#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x6
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xc
+#define SQ_IND_INDEX__FORCE_READ__SHIFT 0xd
+#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0xe
+#define SQ_IND_INDEX__UNINDEXED__SHIFT 0xf
+#define SQ_IND_INDEX__INDEX__SHIFT 0x10
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000FL
+#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
+#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000FC0L
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
+#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
+#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
+#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
+#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
+//SQ_IND_DATA
+#define SQ_IND_DATA__DATA__SHIFT 0x0
+#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//SQ_CONFIG1
+#define SQ_CONFIG1__DISABLE_XDL_PORTD_CO_EXEC__SHIFT 0x0
+#define SQ_CONFIG1__DISABLE_MGCG_ON_IBUF__SHIFT 0x1
+#define SQ_CONFIG1__DISABLE_MGCG_ON_PERF__SHIFT 0x2
+#define SQ_CONFIG1__DISABLE_MGCG_ON_EXP__SHIFT 0x3
+#define SQ_CONFIG1__DISABLE_MGCG_ON_SCA__SHIFT 0x4
+#define SQ_CONFIG1__DISABLE_MGCG_ON_SREG__SHIFT 0x5
+#define SQ_CONFIG1__DISABLE_MGCG_ON_VDEC__SHIFT 0x6
+#define SQ_CONFIG1__EXTRA_DGEMM_PROTECT_EN__SHIFT 0x7
+#define SQ_CONFIG1__DISABLE_VALU_COEXEC__SHIFT 0x8
+#define SQ_CONFIG1__DISABLE_WAVE_VALU_COEXEC__SHIFT 0x9
+#define SQ_CONFIG1__VGPR_ARB_PLUS1__SHIFT 0xa
+#define SQ_CONFIG1__DISABLE_VGPR_COLLAPSE__SHIFT 0xb
+#define SQ_CONFIG1__DISABLE_XNACK_CHECK_IN_RETRY_DISABLE__SHIFT 0xc
+#define SQ_CONFIG1__DISABLE_BARRIER_ADDR_WATCH__SHIFT 0xd
+#define SQ_CONFIG1__DISABLE_BARRIER_MEMVIOL_WAIT__SHIFT 0xe
+#define SQ_CONFIG1__DISABLE_BARRIER_MEMVIOL_BACKOFF__SHIFT 0xf
+#define SQ_CONFIG1__EXPAND_SP_CMD_FGCG__SHIFT 0x10
+#define SQ_CONFIG1__EXPAND_SP_EXPORT_FGCG__SHIFT 0x11
+#define SQ_CONFIG1__DISABLE_MGCG_ON_PERF_SNAP__SHIFT 0x12
+#define SQ_CONFIG1__DISABLE_VALU_COEXEC_MODE_AUTO_CLEAN__SHIFT 0x13
+#define SQ_CONFIG1__SP_CORE1_MGCG_OVERRIDE__SHIFT 0x14
+#define SQ_CONFIG1__SP_CORE4_MGCG_OVERRIDE__SHIFT 0x15
+#define SQ_CONFIG1__SP_CORE5_MGCG_OVERRIDE__SHIFT 0x16
+#define SQ_CONFIG1__DISABLE_SP_VGPR_COLLAPSE__SHIFT 0x17
+#define SQ_CONFIG1__SP_FGCG_REP_OVERRIDE__SHIFT 0x18
+#define SQ_CONFIG1__DPMACC_MGCG_OVERRIDE__SHIFT 0x19
+#define SQ_CONFIG1__XDLMACC_MGCG_OVERRIDE__SHIFT 0x1a
+#define SQ_CONFIG1__TRANSMACC_MGCG_OVERRIDE__SHIFT 0x1b
+#define SQ_CONFIG1__SPMACC_MGCG_OVERRIDE__SHIFT 0x1c
+#define SQ_CONFIG1__DPMACC_DGEMM2X_MGCG_OVERRIDE__SHIFT 0x1d
+#define SQ_CONFIG1__DISABLE_SP_VGPR_READ_SKIP__SHIFT 0x1e
+#define SQ_CONFIG1__SP_SRC_1ST_BUFFER_MGCG_OVERRIDE__SHIFT 0x1f
+#define SQ_CONFIG1__DISABLE_XDL_PORTD_CO_EXEC_MASK 0x00000001L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_IBUF_MASK 0x00000002L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_PERF_MASK 0x00000004L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_EXP_MASK 0x00000008L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_SCA_MASK 0x00000010L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_SREG_MASK 0x00000020L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_VDEC_MASK 0x00000040L
+#define SQ_CONFIG1__EXTRA_DGEMM_PROTECT_EN_MASK 0x00000080L
+#define SQ_CONFIG1__DISABLE_VALU_COEXEC_MASK 0x00000100L
+#define SQ_CONFIG1__DISABLE_WAVE_VALU_COEXEC_MASK 0x00000200L
+#define SQ_CONFIG1__VGPR_ARB_PLUS1_MASK 0x00000400L
+#define SQ_CONFIG1__DISABLE_VGPR_COLLAPSE_MASK 0x00000800L
+#define SQ_CONFIG1__DISABLE_XNACK_CHECK_IN_RETRY_DISABLE_MASK 0x00001000L
+#define SQ_CONFIG1__DISABLE_BARRIER_ADDR_WATCH_MASK 0x00002000L
+#define SQ_CONFIG1__DISABLE_BARRIER_MEMVIOL_WAIT_MASK 0x00004000L
+#define SQ_CONFIG1__DISABLE_BARRIER_MEMVIOL_BACKOFF_MASK 0x00008000L
+#define SQ_CONFIG1__EXPAND_SP_CMD_FGCG_MASK 0x00010000L
+#define SQ_CONFIG1__EXPAND_SP_EXPORT_FGCG_MASK 0x00020000L
+#define SQ_CONFIG1__DISABLE_MGCG_ON_PERF_SNAP_MASK 0x00040000L
+#define SQ_CONFIG1__DISABLE_VALU_COEXEC_MODE_AUTO_CLEAN_MASK 0x00080000L
+#define SQ_CONFIG1__SP_CORE1_MGCG_OVERRIDE_MASK 0x00100000L
+#define SQ_CONFIG1__SP_CORE4_MGCG_OVERRIDE_MASK 0x00200000L
+#define SQ_CONFIG1__SP_CORE5_MGCG_OVERRIDE_MASK 0x00400000L
+#define SQ_CONFIG1__DISABLE_SP_VGPR_COLLAPSE_MASK 0x00800000L
+#define SQ_CONFIG1__SP_FGCG_REP_OVERRIDE_MASK 0x01000000L
+#define SQ_CONFIG1__DPMACC_MGCG_OVERRIDE_MASK 0x02000000L
+#define SQ_CONFIG1__XDLMACC_MGCG_OVERRIDE_MASK 0x04000000L
+#define SQ_CONFIG1__TRANSMACC_MGCG_OVERRIDE_MASK 0x08000000L
+#define SQ_CONFIG1__SPMACC_MGCG_OVERRIDE_MASK 0x10000000L
+#define SQ_CONFIG1__DPMACC_DGEMM2X_MGCG_OVERRIDE_MASK 0x20000000L
+#define SQ_CONFIG1__DISABLE_SP_VGPR_READ_SKIP_MASK 0x40000000L
+#define SQ_CONFIG1__SP_SRC_1ST_BUFFER_MGCG_OVERRIDE_MASK 0x80000000L
+//SQ_CMD
+#define SQ_CMD__CMD__SHIFT 0x0
+#define SQ_CMD__MODE__SHIFT 0x4
+#define SQ_CMD__CHECK_VMID__SHIFT 0x7
+#define SQ_CMD__DATA__SHIFT 0x8
+#define SQ_CMD__WAVE_ID__SHIFT 0x10
+#define SQ_CMD__SIMD_ID__SHIFT 0x14
+#define SQ_CMD__QUEUE_ID__SHIFT 0x18
+#define SQ_CMD__VM_ID__SHIFT 0x1c
+#define SQ_CMD__CMD_MASK 0x00000007L
+#define SQ_CMD__MODE_MASK 0x00000070L
+#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
+#define SQ_CMD__DATA_MASK 0x00000F00L
+#define SQ_CMD__WAVE_ID_MASK 0x000F0000L
+#define SQ_CMD__SIMD_ID_MASK 0x00300000L
+#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
+#define SQ_CMD__VM_ID_MASK 0xF0000000L
+//SQ_TIME_HI
+#define SQ_TIME_HI__TIME__SHIFT 0x0
+#define SQ_TIME_HI__TIME_MASK 0xFFFFFFFFL
+//SQ_TIME_LO
+#define SQ_TIME_LO__TIME__SHIFT 0x0
+#define SQ_TIME_LO__TIME_MASK 0xFFFFFFFFL
+//SQ_DS_0
+#define SQ_DS_0__OFFSET0__SHIFT 0x0
+#define SQ_DS_0__OFFSET1__SHIFT 0x8
+#define SQ_DS_0__GDS__SHIFT 0x10
+#define SQ_DS_0__OP__SHIFT 0x11
+#define SQ_DS_0__ACC__SHIFT 0x19
+#define SQ_DS_0__ENCODING__SHIFT 0x1a
+#define SQ_DS_0__OFFSET0_MASK 0x000000FFL
+#define SQ_DS_0__OFFSET1_MASK 0x0000FF00L
+#define SQ_DS_0__GDS_MASK 0x00010000L
+#define SQ_DS_0__OP_MASK 0x01FE0000L
+#define SQ_DS_0__ACC_MASK 0x02000000L
+#define SQ_DS_0__ENCODING_MASK 0xFC000000L
+//SQ_DS_1
+#define SQ_DS_1__ADDR__SHIFT 0x0
+#define SQ_DS_1__DATA0__SHIFT 0x8
+#define SQ_DS_1__DATA1__SHIFT 0x10
+#define SQ_DS_1__VDST__SHIFT 0x18
+#define SQ_DS_1__ADDR_MASK 0x000000FFL
+#define SQ_DS_1__DATA0_MASK 0x0000FF00L
+#define SQ_DS_1__DATA1_MASK 0x00FF0000L
+#define SQ_DS_1__VDST_MASK 0xFF000000L
+//SQ_EXP_0
+#define SQ_EXP_0__EN__SHIFT 0x0
+#define SQ_EXP_0__TGT__SHIFT 0x4
+#define SQ_EXP_0__COMPR__SHIFT 0xa
+#define SQ_EXP_0__DONE__SHIFT 0xb
+#define SQ_EXP_0__VM__SHIFT 0xc
+#define SQ_EXP_0__ENCODING__SHIFT 0x1a
+#define SQ_EXP_0__EN_MASK 0x0000000FL
+#define SQ_EXP_0__TGT_MASK 0x000003F0L
+#define SQ_EXP_0__COMPR_MASK 0x00000400L
+#define SQ_EXP_0__DONE_MASK 0x00000800L
+#define SQ_EXP_0__VM_MASK 0x00001000L
+#define SQ_EXP_0__ENCODING_MASK 0xFC000000L
+//SQ_EXP_1
+#define SQ_EXP_1__VSRC0__SHIFT 0x0
+#define SQ_EXP_1__VSRC1__SHIFT 0x8
+#define SQ_EXP_1__VSRC2__SHIFT 0x10
+#define SQ_EXP_1__VSRC3__SHIFT 0x18
+#define SQ_EXP_1__VSRC0_MASK 0x000000FFL
+#define SQ_EXP_1__VSRC1_MASK 0x0000FF00L
+#define SQ_EXP_1__VSRC2_MASK 0x00FF0000L
+#define SQ_EXP_1__VSRC3_MASK 0xFF000000L
+//SQ_FLAT_0
+#define SQ_FLAT_0__OFFSET__SHIFT 0x0
+#define SQ_FLAT_0__SVE__SHIFT 0xd
+#define SQ_FLAT_0__SEG__SHIFT 0xe
+#define SQ_FLAT_0__SC0__SHIFT 0x10
+#define SQ_FLAT_0__NT__SHIFT 0x11
+#define SQ_FLAT_0__OP__SHIFT 0x12
+#define SQ_FLAT_0__SC1__SHIFT 0x19
+#define SQ_FLAT_0__ENCODING__SHIFT 0x1a
+#define SQ_FLAT_0__OFFSET_MASK 0x00000FFFL
+#define SQ_FLAT_0__SVE_MASK 0x00002000L
+#define SQ_FLAT_0__SEG_MASK 0x0000C000L
+#define SQ_FLAT_0__SC0_MASK 0x00010000L
+#define SQ_FLAT_0__NT_MASK 0x00020000L
+#define SQ_FLAT_0__OP_MASK 0x01FC0000L
+#define SQ_FLAT_0__SC1_MASK 0x02000000L
+#define SQ_FLAT_0__ENCODING_MASK 0xFC000000L
+//SQ_FLAT_1
+#define SQ_FLAT_1__ADDR__SHIFT 0x0
+#define SQ_FLAT_1__DATA__SHIFT 0x8
+#define SQ_FLAT_1__SADDR__SHIFT 0x10
+#define SQ_FLAT_1__ACC__SHIFT 0x17
+#define SQ_FLAT_1__VDST__SHIFT 0x18
+#define SQ_FLAT_1__ADDR_MASK 0x000000FFL
+#define SQ_FLAT_1__DATA_MASK 0x0000FF00L
+#define SQ_FLAT_1__SADDR_MASK 0x007F0000L
+#define SQ_FLAT_1__ACC_MASK 0x00800000L
+#define SQ_FLAT_1__VDST_MASK 0xFF000000L
+//SQ_GLBL_0
+#define SQ_GLBL_0__OFFSET__SHIFT 0x0
+#define SQ_GLBL_0__SVE__SHIFT 0xd
+#define SQ_GLBL_0__SEG__SHIFT 0xe
+#define SQ_GLBL_0__SC0__SHIFT 0x10
+#define SQ_GLBL_0__NT__SHIFT 0x11
+#define SQ_GLBL_0__OP__SHIFT 0x12
+#define SQ_GLBL_0__SC1__SHIFT 0x19
+#define SQ_GLBL_0__ENCODING__SHIFT 0x1a
+#define SQ_GLBL_0__OFFSET_MASK 0x00001FFFL
+#define SQ_GLBL_0__SVE_MASK 0x00002000L
+#define SQ_GLBL_0__SEG_MASK 0x0000C000L
+#define SQ_GLBL_0__SC0_MASK 0x00010000L
+#define SQ_GLBL_0__NT_MASK 0x00020000L
+#define SQ_GLBL_0__OP_MASK 0x01FC0000L
+#define SQ_GLBL_0__SC1_MASK 0x02000000L
+#define SQ_GLBL_0__ENCODING_MASK 0xFC000000L
+//SQ_GLBL_1
+#define SQ_GLBL_1__ADDR__SHIFT 0x0
+#define SQ_GLBL_1__DATA__SHIFT 0x8
+#define SQ_GLBL_1__SADDR__SHIFT 0x10
+#define SQ_GLBL_1__ACC__SHIFT 0x17
+#define SQ_GLBL_1__VDST__SHIFT 0x18
+#define SQ_GLBL_1__ADDR_MASK 0x000000FFL
+#define SQ_GLBL_1__DATA_MASK 0x0000FF00L
+#define SQ_GLBL_1__SADDR_MASK 0x007F0000L
+#define SQ_GLBL_1__ACC_MASK 0x00800000L
+#define SQ_GLBL_1__VDST_MASK 0xFF000000L
+//SQ_INST
+#define SQ_INST__ENCODING__SHIFT 0x0
+#define SQ_INST__ENCODING_MASK 0xFFFFFFFFL
+//SQ_MIMG_0
+#define SQ_MIMG_0__OPM__SHIFT 0x0
+#define SQ_MIMG_0__SC1__SHIFT 0x7
+#define SQ_MIMG_0__DMASK__SHIFT 0x8
+#define SQ_MIMG_0__UNORM__SHIFT 0xc
+#define SQ_MIMG_0__SC0__SHIFT 0xd
+#define SQ_MIMG_0__DA__SHIFT 0xe
+#define SQ_MIMG_0__A16__SHIFT 0xf
+#define SQ_MIMG_0__ACC__SHIFT 0x10
+#define SQ_MIMG_0__LWE__SHIFT 0x11
+#define SQ_MIMG_0__OP__SHIFT 0x12
+#define SQ_MIMG_0__NT__SHIFT 0x19
+#define SQ_MIMG_0__ENCODING__SHIFT 0x1a
+#define SQ_MIMG_0__OPM_MASK 0x00000001L
+#define SQ_MIMG_0__SC1_MASK 0x00000080L
+#define SQ_MIMG_0__DMASK_MASK 0x00000F00L
+#define SQ_MIMG_0__UNORM_MASK 0x00001000L
+#define SQ_MIMG_0__SC0_MASK 0x00002000L
+#define SQ_MIMG_0__DA_MASK 0x00004000L
+#define SQ_MIMG_0__A16_MASK 0x00008000L
+#define SQ_MIMG_0__ACC_MASK 0x00010000L
+#define SQ_MIMG_0__LWE_MASK 0x00020000L
+#define SQ_MIMG_0__OP_MASK 0x01FC0000L
+#define SQ_MIMG_0__NT_MASK 0x02000000L
+#define SQ_MIMG_0__ENCODING_MASK 0xFC000000L
+//SQ_MIMG_1
+#define SQ_MIMG_1__VADDR__SHIFT 0x0
+#define SQ_MIMG_1__VDATA__SHIFT 0x8
+#define SQ_MIMG_1__SRSRC__SHIFT 0x10
+#define SQ_MIMG_1__SSAMP__SHIFT 0x15
+#define SQ_MIMG_1__D16__SHIFT 0x1f
+#define SQ_MIMG_1__VADDR_MASK 0x000000FFL
+#define SQ_MIMG_1__VDATA_MASK 0x0000FF00L
+#define SQ_MIMG_1__SRSRC_MASK 0x001F0000L
+#define SQ_MIMG_1__SSAMP_MASK 0x03E00000L
+#define SQ_MIMG_1__D16_MASK 0x80000000L
+//SQ_MTBUF_0
+#define SQ_MTBUF_0__OFFSET__SHIFT 0x0
+#define SQ_MTBUF_0__OFFEN__SHIFT 0xc
+#define SQ_MTBUF_0__IDXEN__SHIFT 0xd
+#define SQ_MTBUF_0__SC0__SHIFT 0xe
+#define SQ_MTBUF_0__OP__SHIFT 0xf
+#define SQ_MTBUF_0__DFMT__SHIFT 0x13
+#define SQ_MTBUF_0__NFMT__SHIFT 0x17
+#define SQ_MTBUF_0__ENCODING__SHIFT 0x1a
+#define SQ_MTBUF_0__OFFSET_MASK 0x00000FFFL
+#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MTBUF_0__SC0_MASK 0x00004000L
+#define SQ_MTBUF_0__OP_MASK 0x00078000L
+#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
+#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
+#define SQ_MTBUF_0__ENCODING_MASK 0xFC000000L
+//SQ_MTBUF_1
+#define SQ_MTBUF_1__VADDR__SHIFT 0x0
+#define SQ_MTBUF_1__VDATA__SHIFT 0x8
+#define SQ_MTBUF_1__SRSRC__SHIFT 0x10
+#define SQ_MTBUF_1__SC1__SHIFT 0x15
+#define SQ_MTBUF_1__NT__SHIFT 0x16
+#define SQ_MTBUF_1__ACC__SHIFT 0x17
+#define SQ_MTBUF_1__SOFFSET__SHIFT 0x18
+#define SQ_MTBUF_1__VADDR_MASK 0x000000FFL
+#define SQ_MTBUF_1__VDATA_MASK 0x0000FF00L
+#define SQ_MTBUF_1__SRSRC_MASK 0x001F0000L
+#define SQ_MTBUF_1__SC1_MASK 0x00200000L
+#define SQ_MTBUF_1__NT_MASK 0x00400000L
+#define SQ_MTBUF_1__ACC_MASK 0x00800000L
+#define SQ_MTBUF_1__SOFFSET_MASK 0xFF000000L
+//SQ_MUBUF_0
+#define SQ_MUBUF_0__OFFSET__SHIFT 0x0
+#define SQ_MUBUF_0__OFFEN__SHIFT 0xc
+#define SQ_MUBUF_0__IDXEN__SHIFT 0xd
+#define SQ_MUBUF_0__SC0__SHIFT 0xe
+#define SQ_MUBUF_0__SC1__SHIFT 0xf
+#define SQ_MUBUF_0__LDS__SHIFT 0x10
+#define SQ_MUBUF_0__NT__SHIFT 0x11
+#define SQ_MUBUF_0__OP__SHIFT 0x12
+#define SQ_MUBUF_0__ENCODING__SHIFT 0x1a
+#define SQ_MUBUF_0__OFFSET_MASK 0x00000FFFL
+#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MUBUF_0__SC0_MASK 0x00004000L
+#define SQ_MUBUF_0__SC1_MASK 0x00008000L
+#define SQ_MUBUF_0__LDS_MASK 0x00010000L
+#define SQ_MUBUF_0__NT_MASK 0x00020000L
+#define SQ_MUBUF_0__OP_MASK 0x01FC0000L
+#define SQ_MUBUF_0__ENCODING_MASK 0xFC000000L
+//SQ_MUBUF_1
+#define SQ_MUBUF_1__VADDR__SHIFT 0x0
+#define SQ_MUBUF_1__VDATA__SHIFT 0x8
+#define SQ_MUBUF_1__SRSRC__SHIFT 0x10
+#define SQ_MUBUF_1__ACC__SHIFT 0x17
+#define SQ_MUBUF_1__SOFFSET__SHIFT 0x18
+#define SQ_MUBUF_1__VADDR_MASK 0x000000FFL
+#define SQ_MUBUF_1__VDATA_MASK 0x0000FF00L
+#define SQ_MUBUF_1__SRSRC_MASK 0x001F0000L
+#define SQ_MUBUF_1__ACC_MASK 0x00800000L
+#define SQ_MUBUF_1__SOFFSET_MASK 0xFF000000L
+//SQ_SCRATCH_0
+#define SQ_SCRATCH_0__OFFSET__SHIFT 0x0
+#define SQ_SCRATCH_0__SVE__SHIFT 0xd
+#define SQ_SCRATCH_0__SEG__SHIFT 0xe
+#define SQ_SCRATCH_0__SC0__SHIFT 0x10
+#define SQ_SCRATCH_0__NT__SHIFT 0x11
+#define SQ_SCRATCH_0__OP__SHIFT 0x12
+#define SQ_SCRATCH_0__SC1__SHIFT 0x19
+#define SQ_SCRATCH_0__ENCODING__SHIFT 0x1a
+#define SQ_SCRATCH_0__OFFSET_MASK 0x00001FFFL
+#define SQ_SCRATCH_0__SVE_MASK 0x00002000L
+#define SQ_SCRATCH_0__SEG_MASK 0x0000C000L
+#define SQ_SCRATCH_0__SC0_MASK 0x00010000L
+#define SQ_SCRATCH_0__NT_MASK 0x00020000L
+#define SQ_SCRATCH_0__OP_MASK 0x01FC0000L
+#define SQ_SCRATCH_0__SC1_MASK 0x02000000L
+#define SQ_SCRATCH_0__ENCODING_MASK 0xFC000000L
+//SQ_SCRATCH_1
+#define SQ_SCRATCH_1__ADDR__SHIFT 0x0
+#define SQ_SCRATCH_1__DATA__SHIFT 0x8
+#define SQ_SCRATCH_1__SADDR__SHIFT 0x10
+#define SQ_SCRATCH_1__ACC__SHIFT 0x17
+#define SQ_SCRATCH_1__VDST__SHIFT 0x18
+#define SQ_SCRATCH_1__ADDR_MASK 0x000000FFL
+#define SQ_SCRATCH_1__DATA_MASK 0x0000FF00L
+#define SQ_SCRATCH_1__SADDR_MASK 0x007F0000L
+#define SQ_SCRATCH_1__ACC_MASK 0x00800000L
+#define SQ_SCRATCH_1__VDST_MASK 0xFF000000L
+//SQ_SMEM_0
+#define SQ_SMEM_0__SBASE__SHIFT 0x0
+#define SQ_SMEM_0__SDATA__SHIFT 0x6
+#define SQ_SMEM_0__SOFFSET_EN__SHIFT 0xe
+#define SQ_SMEM_0__NV__SHIFT 0xf
+#define SQ_SMEM_0__GLC__SHIFT 0x10
+#define SQ_SMEM_0__IMM__SHIFT 0x11
+#define SQ_SMEM_0__OP__SHIFT 0x12
+#define SQ_SMEM_0__ENCODING__SHIFT 0x1a
+#define SQ_SMEM_0__SBASE_MASK 0x0000003FL
+#define SQ_SMEM_0__SDATA_MASK 0x00001FC0L
+#define SQ_SMEM_0__SOFFSET_EN_MASK 0x00004000L
+#define SQ_SMEM_0__NV_MASK 0x00008000L
+#define SQ_SMEM_0__GLC_MASK 0x00010000L
+#define SQ_SMEM_0__IMM_MASK 0x00020000L
+#define SQ_SMEM_0__OP_MASK 0x03FC0000L
+#define SQ_SMEM_0__ENCODING_MASK 0xFC000000L
+//SQ_SMEM_1
+#define SQ_SMEM_1__OFFSET__SHIFT 0x0
+#define SQ_SMEM_1__SOFFSET__SHIFT 0x19
+#define SQ_SMEM_1__OFFSET_MASK 0x001FFFFFL
+#define SQ_SMEM_1__SOFFSET_MASK 0xFE000000L
+//SQ_SOP1
+#define SQ_SOP1__SSRC0__SHIFT 0x0
+#define SQ_SOP1__OP__SHIFT 0x8
+#define SQ_SOP1__SDST__SHIFT 0x10
+#define SQ_SOP1__ENCODING__SHIFT 0x17
+#define SQ_SOP1__SSRC0_MASK 0x000000FFL
+#define SQ_SOP1__OP_MASK 0x0000FF00L
+#define SQ_SOP1__SDST_MASK 0x007F0000L
+#define SQ_SOP1__ENCODING_MASK 0xFF800000L
+//SQ_SOP2
+#define SQ_SOP2__SSRC0__SHIFT 0x0
+#define SQ_SOP2__SSRC1__SHIFT 0x8
+#define SQ_SOP2__SDST__SHIFT 0x10
+#define SQ_SOP2__OP__SHIFT 0x17
+#define SQ_SOP2__ENCODING__SHIFT 0x1e
+#define SQ_SOP2__SSRC0_MASK 0x000000FFL
+#define SQ_SOP2__SSRC1_MASK 0x0000FF00L
+#define SQ_SOP2__SDST_MASK 0x007F0000L
+#define SQ_SOP2__OP_MASK 0x3F800000L
+#define SQ_SOP2__ENCODING_MASK 0xC0000000L
+//SQ_SOPC
+#define SQ_SOPC__SSRC0__SHIFT 0x0
+#define SQ_SOPC__SSRC1__SHIFT 0x8
+#define SQ_SOPC__OP__SHIFT 0x10
+#define SQ_SOPC__ENCODING__SHIFT 0x17
+#define SQ_SOPC__SSRC0_MASK 0x000000FFL
+#define SQ_SOPC__SSRC1_MASK 0x0000FF00L
+#define SQ_SOPC__OP_MASK 0x007F0000L
+#define SQ_SOPC__ENCODING_MASK 0xFF800000L
+//SQ_SOPK
+#define SQ_SOPK__SIMM16__SHIFT 0x0
+#define SQ_SOPK__SDST__SHIFT 0x10
+#define SQ_SOPK__OP__SHIFT 0x17
+#define SQ_SOPK__ENCODING__SHIFT 0x1c
+#define SQ_SOPK__SIMM16_MASK 0x0000FFFFL
+#define SQ_SOPK__SDST_MASK 0x007F0000L
+#define SQ_SOPK__OP_MASK 0x0F800000L
+#define SQ_SOPK__ENCODING_MASK 0xF0000000L
+//SQ_SOPP
+#define SQ_SOPP__SIMM16__SHIFT 0x0
+#define SQ_SOPP__OP__SHIFT 0x10
+#define SQ_SOPP__ENCODING__SHIFT 0x17
+#define SQ_SOPP__SIMM16_MASK 0x0000FFFFL
+#define SQ_SOPP__OP_MASK 0x007F0000L
+#define SQ_SOPP__ENCODING_MASK 0xFF800000L
+//SQ_VINTRP
+#define SQ_VINTRP__VSRC__SHIFT 0x0
+#define SQ_VINTRP__ATTRCHAN__SHIFT 0x8
+#define SQ_VINTRP__ATTR__SHIFT 0xa
+#define SQ_VINTRP__OP__SHIFT 0x10
+#define SQ_VINTRP__VDST__SHIFT 0x12
+#define SQ_VINTRP__ENCODING__SHIFT 0x1a
+#define SQ_VINTRP__VSRC_MASK 0x000000FFL
+#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
+#define SQ_VINTRP__ATTR_MASK 0x0000FC00L
+#define SQ_VINTRP__OP_MASK 0x00030000L
+#define SQ_VINTRP__VDST_MASK 0x03FC0000L
+#define SQ_VINTRP__ENCODING_MASK 0xFC000000L
+//SQ_VOP1
+#define SQ_VOP1__SRC0__SHIFT 0x0
+#define SQ_VOP1__OP__SHIFT 0x9
+#define SQ_VOP1__VDST__SHIFT 0x11
+#define SQ_VOP1__ENCODING__SHIFT 0x19
+#define SQ_VOP1__SRC0_MASK 0x000001FFL
+#define SQ_VOP1__OP_MASK 0x0001FE00L
+#define SQ_VOP1__VDST_MASK 0x01FE0000L
+#define SQ_VOP1__ENCODING_MASK 0xFE000000L
+//SQ_VOP2
+#define SQ_VOP2__SRC0__SHIFT 0x0
+#define SQ_VOP2__VSRC1__SHIFT 0x9
+#define SQ_VOP2__VDST__SHIFT 0x11
+#define SQ_VOP2__OP__SHIFT 0x19
+#define SQ_VOP2__ENCODING__SHIFT 0x1f
+#define SQ_VOP2__SRC0_MASK 0x000001FFL
+#define SQ_VOP2__VSRC1_MASK 0x0001FE00L
+#define SQ_VOP2__VDST_MASK 0x01FE0000L
+#define SQ_VOP2__OP_MASK 0x7E000000L
+#define SQ_VOP2__ENCODING_MASK 0x80000000L
+//SQ_VOP3P_0
+#define SQ_VOP3P_0__VDST__SHIFT 0x0
+#define SQ_VOP3P_0__NEG_HI__SHIFT 0x8
+#define SQ_VOP3P_0__OP_SEL__SHIFT 0xb
+#define SQ_VOP3P_0__OP_SEL_HI_2__SHIFT 0xe
+#define SQ_VOP3P_0__CLAMP__SHIFT 0xf
+#define SQ_VOP3P_0__OP__SHIFT 0x10
+#define SQ_VOP3P_0__ENCODING__SHIFT 0x17
+#define SQ_VOP3P_0__VDST_MASK 0x000000FFL
+#define SQ_VOP3P_0__NEG_HI_MASK 0x00000700L
+#define SQ_VOP3P_0__OP_SEL_MASK 0x00003800L
+#define SQ_VOP3P_0__OP_SEL_HI_2_MASK 0x00004000L
+#define SQ_VOP3P_0__CLAMP_MASK 0x00008000L
+#define SQ_VOP3P_0__OP_MASK 0x007F0000L
+#define SQ_VOP3P_0__ENCODING_MASK 0xFF800000L
+//SQ_VOP3P_1
+#define SQ_VOP3P_1__SRC0__SHIFT 0x0
+#define SQ_VOP3P_1__SRC1__SHIFT 0x9
+#define SQ_VOP3P_1__SRC2__SHIFT 0x12
+#define SQ_VOP3P_1__OP_SEL_HI__SHIFT 0x1b
+#define SQ_VOP3P_1__NEG__SHIFT 0x1d
+#define SQ_VOP3P_1__SRC0_MASK 0x000001FFL
+#define SQ_VOP3P_1__SRC1_MASK 0x0003FE00L
+#define SQ_VOP3P_1__SRC2_MASK 0x07FC0000L
+#define SQ_VOP3P_1__OP_SEL_HI_MASK 0x18000000L
+#define SQ_VOP3P_1__NEG_MASK 0xE0000000L
+//SQ_VOP3P_MFMA_0
+#define SQ_VOP3P_MFMA_0__VDST__SHIFT 0x0
+#define SQ_VOP3P_MFMA_0__CBSZ__SHIFT 0x8
+#define SQ_VOP3P_MFMA_0__ABID__SHIFT 0xb
+#define SQ_VOP3P_MFMA_0__ACC_CD__SHIFT 0xf
+#define SQ_VOP3P_MFMA_0__OP__SHIFT 0x10
+#define SQ_VOP3P_MFMA_0__ENCODING__SHIFT 0x17
+#define SQ_VOP3P_MFMA_0__VDST_MASK 0x000000FFL
+#define SQ_VOP3P_MFMA_0__CBSZ_MASK 0x00000700L
+#define SQ_VOP3P_MFMA_0__ABID_MASK 0x00007800L
+#define SQ_VOP3P_MFMA_0__ACC_CD_MASK 0x00008000L
+#define SQ_VOP3P_MFMA_0__OP_MASK 0x007F0000L
+#define SQ_VOP3P_MFMA_0__ENCODING_MASK 0xFF800000L
+//SQ_VOP3P_MFMA_1
+#define SQ_VOP3P_MFMA_1__SRC0__SHIFT 0x0
+#define SQ_VOP3P_MFMA_1__SRC1__SHIFT 0x9
+#define SQ_VOP3P_MFMA_1__SRC2__SHIFT 0x12
+#define SQ_VOP3P_MFMA_1__ACC__SHIFT 0x1b
+#define SQ_VOP3P_MFMA_1__BLGP__SHIFT 0x1d
+#define SQ_VOP3P_MFMA_1__SRC0_MASK 0x000001FFL
+#define SQ_VOP3P_MFMA_1__SRC1_MASK 0x0003FE00L
+#define SQ_VOP3P_MFMA_1__SRC2_MASK 0x07FC0000L
+#define SQ_VOP3P_MFMA_1__ACC_MASK 0x18000000L
+#define SQ_VOP3P_MFMA_1__BLGP_MASK 0xE0000000L
+//SQ_VOP3_0
+#define SQ_VOP3_0__VDST__SHIFT 0x0
+#define SQ_VOP3_0__ABS__SHIFT 0x8
+#define SQ_VOP3_0__OP_SEL__SHIFT 0xb
+#define SQ_VOP3_0__CLAMP__SHIFT 0xf
+#define SQ_VOP3_0__OP__SHIFT 0x10
+#define SQ_VOP3_0__ENCODING__SHIFT 0x1a
+#define SQ_VOP3_0__VDST_MASK 0x000000FFL
+#define SQ_VOP3_0__ABS_MASK 0x00000700L
+#define SQ_VOP3_0__OP_SEL_MASK 0x00007800L
+#define SQ_VOP3_0__CLAMP_MASK 0x00008000L
+#define SQ_VOP3_0__OP_MASK 0x03FF0000L
+#define SQ_VOP3_0__ENCODING_MASK 0xFC000000L
+//SQ_VOP3_0_SDST_ENC
+#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x0
+#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x8
+#define SQ_VOP3_0_SDST_ENC__CLAMP__SHIFT 0xf
+#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x10
+#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x1a
+#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000FFL
+#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007F00L
+#define SQ_VOP3_0_SDST_ENC__CLAMP_MASK 0x00008000L
+#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03FF0000L
+#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xFC000000L
+//SQ_VOP3_1
+#define SQ_VOP3_1__SRC0__SHIFT 0x0
+#define SQ_VOP3_1__SRC1__SHIFT 0x9
+#define SQ_VOP3_1__SRC2__SHIFT 0x12
+#define SQ_VOP3_1__OMOD__SHIFT 0x1b
+#define SQ_VOP3_1__NEG__SHIFT 0x1d
+#define SQ_VOP3_1__SRC0_MASK 0x000001FFL
+#define SQ_VOP3_1__SRC1_MASK 0x0003FE00L
+#define SQ_VOP3_1__SRC2_MASK 0x07FC0000L
+#define SQ_VOP3_1__OMOD_MASK 0x18000000L
+#define SQ_VOP3_1__NEG_MASK 0xE0000000L
+//SQ_VOPC
+#define SQ_VOPC__SRC0__SHIFT 0x0
+#define SQ_VOPC__VSRC1__SHIFT 0x9
+#define SQ_VOPC__OP__SHIFT 0x11
+#define SQ_VOPC__ENCODING__SHIFT 0x19
+#define SQ_VOPC__SRC0_MASK 0x000001FFL
+#define SQ_VOPC__VSRC1_MASK 0x0001FE00L
+#define SQ_VOPC__OP_MASK 0x01FE0000L
+#define SQ_VOPC__ENCODING_MASK 0xFE000000L
+//SQ_VOP_DPP
+#define SQ_VOP_DPP__SRC0__SHIFT 0x0
+#define SQ_VOP_DPP__DPP_CTRL__SHIFT 0x8
+#define SQ_VOP_DPP__BOUND_CTRL__SHIFT 0x13
+#define SQ_VOP_DPP__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_DPP__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_DPP__SRC1_NEG__SHIFT 0x16
+#define SQ_VOP_DPP__SRC1_ABS__SHIFT 0x17
+#define SQ_VOP_DPP__BANK_MASK__SHIFT 0x18
+#define SQ_VOP_DPP__ROW_MASK__SHIFT 0x1c
+#define SQ_VOP_DPP__SRC0_MASK 0x000000FFL
+#define SQ_VOP_DPP__DPP_CTRL_MASK 0x0001FF00L
+#define SQ_VOP_DPP__BOUND_CTRL_MASK 0x00080000L
+#define SQ_VOP_DPP__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_DPP__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_DPP__SRC1_NEG_MASK 0x00400000L
+#define SQ_VOP_DPP__SRC1_ABS_MASK 0x00800000L
+#define SQ_VOP_DPP__BANK_MASK_MASK 0x0F000000L
+#define SQ_VOP_DPP__ROW_MASK_MASK 0xF0000000L
+//SQ_VOP_SDWA
+#define SQ_VOP_SDWA__SRC0__SHIFT 0x0
+#define SQ_VOP_SDWA__DST_SEL__SHIFT 0x8
+#define SQ_VOP_SDWA__DST_UNUSED__SHIFT 0xb
+#define SQ_VOP_SDWA__CLAMP__SHIFT 0xd
+#define SQ_VOP_SDWA__OMOD__SHIFT 0xe
+#define SQ_VOP_SDWA__SRC0_SEL__SHIFT 0x10
+#define SQ_VOP_SDWA__SRC0_SEXT__SHIFT 0x13
+#define SQ_VOP_SDWA__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_SDWA__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_SDWA__S0__SHIFT 0x17
+#define SQ_VOP_SDWA__SRC1_SEL__SHIFT 0x18
+#define SQ_VOP_SDWA__SRC1_SEXT__SHIFT 0x1b
+#define SQ_VOP_SDWA__SRC1_NEG__SHIFT 0x1c
+#define SQ_VOP_SDWA__SRC1_ABS__SHIFT 0x1d
+#define SQ_VOP_SDWA__S1__SHIFT 0x1f
+#define SQ_VOP_SDWA__SRC0_MASK 0x000000FFL
+#define SQ_VOP_SDWA__DST_SEL_MASK 0x00000700L
+#define SQ_VOP_SDWA__DST_UNUSED_MASK 0x00001800L
+#define SQ_VOP_SDWA__CLAMP_MASK 0x00002000L
+#define SQ_VOP_SDWA__OMOD_MASK 0x0000C000L
+#define SQ_VOP_SDWA__SRC0_SEL_MASK 0x00070000L
+#define SQ_VOP_SDWA__SRC0_SEXT_MASK 0x00080000L
+#define SQ_VOP_SDWA__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_SDWA__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_SDWA__S0_MASK 0x00800000L
+#define SQ_VOP_SDWA__SRC1_SEL_MASK 0x07000000L
+#define SQ_VOP_SDWA__SRC1_SEXT_MASK 0x08000000L
+#define SQ_VOP_SDWA__SRC1_NEG_MASK 0x10000000L
+#define SQ_VOP_SDWA__SRC1_ABS_MASK 0x20000000L
+#define SQ_VOP_SDWA__S1_MASK 0x80000000L
+//SQ_VOP_SDWA_SDST_ENC
+#define SQ_VOP_SDWA_SDST_ENC__SRC0__SHIFT 0x0
+#define SQ_VOP_SDWA_SDST_ENC__SDST__SHIFT 0x8
+#define SQ_VOP_SDWA_SDST_ENC__SD__SHIFT 0xf
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL__SHIFT 0x10
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT__SHIFT 0x13
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_SDWA_SDST_ENC__S0__SHIFT 0x17
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL__SHIFT 0x18
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT__SHIFT 0x1b
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG__SHIFT 0x1c
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS__SHIFT 0x1d
+#define SQ_VOP_SDWA_SDST_ENC__S1__SHIFT 0x1f
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_MASK 0x000000FFL
+#define SQ_VOP_SDWA_SDST_ENC__SDST_MASK 0x00007F00L
+#define SQ_VOP_SDWA_SDST_ENC__SD_MASK 0x00008000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL_MASK 0x00070000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT_MASK 0x00080000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_SDWA_SDST_ENC__S0_MASK 0x00800000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL_MASK 0x07000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT_MASK 0x08000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG_MASK 0x10000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS_MASK 0x20000000L
+#define SQ_VOP_SDWA_SDST_ENC__S1_MASK 0x80000000L
+//SQ_LB_CTR_CTRL
+#define SQ_LB_CTR_CTRL__START__SHIFT 0x0
+#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x1
+#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x2
+#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
+#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+//SQ_LB_DATA0
+#define SQ_LB_DATA0__DATA__SHIFT 0x0
+#define SQ_LB_DATA0__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA1
+#define SQ_LB_DATA1__DATA__SHIFT 0x0
+#define SQ_LB_DATA1__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA2
+#define SQ_LB_DATA2__DATA__SHIFT 0x0
+#define SQ_LB_DATA2__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA3
+#define SQ_LB_DATA3__DATA__SHIFT 0x0
+#define SQ_LB_DATA3__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_CTR_SEL
+#define SQ_LB_CTR_SEL__SEL0__SHIFT 0x0
+#define SQ_LB_CTR_SEL__SEL1__SHIFT 0x4
+#define SQ_LB_CTR_SEL__SEL2__SHIFT 0x8
+#define SQ_LB_CTR_SEL__SEL3__SHIFT 0xc
+#define SQ_LB_CTR_SEL__SEL0_MASK 0x0000000FL
+#define SQ_LB_CTR_SEL__SEL1_MASK 0x000000F0L
+#define SQ_LB_CTR_SEL__SEL2_MASK 0x00000F00L
+#define SQ_LB_CTR_SEL__SEL3_MASK 0x0000F000L
+//SQ_LB_CTR0_CU
+#define SQ_LB_CTR0_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR0_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR0_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR0_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR1_CU
+#define SQ_LB_CTR1_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR1_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR1_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR1_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR2_CU
+#define SQ_LB_CTR2_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR2_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR2_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR2_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR3_CU
+#define SQ_LB_CTR3_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR3_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR3_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR3_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQC_EDC_CNT
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L
+//SQ_EDC_SEC_CNT
+#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
+#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
+#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
+#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL
+#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L
+#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L
+//SQ_EDC_DED_CNT
+#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
+#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
+#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
+#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL
+#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L
+#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L
+//SQ_EDC_INFO
+#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
+#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
+#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
+#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
+#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL
+#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L
+#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L
+#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L
+//SQ_EDC_CNT
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0
+#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4
+#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6
+#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8
+#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc
+#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10
+#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14
+#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18
+#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L
+#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L
+#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L
+#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L
+#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L
+#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L
+#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L
+#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L
+#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L
+//SQ_EDC_FUE_CNTL
+#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
+#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
+#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
+#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_CMN
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x0010L
+//SQ_THREAD_TRACE_WORD_EVENT
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x0020L
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x01C0L
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0xFC00L
+//SQ_THREAD_TRACE_WORD_INST
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0xb
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x01E0L
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x0600L
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0xF800L
+//SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR__SHIFT 0xf
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001E0L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__PRIV__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__PRIV_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003C00L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_ISSUE
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x8
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x12
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x14
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x16
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x18
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x1a
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000C00L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000C0000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00C00000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0C000000L
+//SQ_THREAD_TRACE_WORD_MISC
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0xd
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x0FF0L
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x1000L
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0xE000L
+//SQ_THREAD_TRACE_WORD_PERF_1_OF_2
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x19
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000C00L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01FFF000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xFE000000L
+//SQ_THREAD_TRACE_WORD_REG_1_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x7
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0xf
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001C00L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_REG_2_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID__SHIFT 0x7
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR_MASK 0x0000FE00L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI_MASK 0x0000FFFFL
+//SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_WAVE
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x0020L
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x03C0L
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x3C00L
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0xC000L
+//SQ_THREAD_TRACE_WORD_WAVE_START
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x15
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x16
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x1d
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003C00L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001F0000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1FC00000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xE0000000L
+//SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00FFFFFFL
+//SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0xFFFFL
+//SQ_THREAD_TRACE_WORD_PERF_2_OF_2
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x13
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003FL
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007FFC0L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xFFF80000L
+//SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xFFFFFFFFL
+//SQ_WREXEC_EXEC_HI
+#define SQ_WREXEC_EXEC_HI__ADDR_HI__SHIFT 0x0
+#define SQ_WREXEC_EXEC_HI__FIRST_WAVE__SHIFT 0x1a
+#define SQ_WREXEC_EXEC_HI__ATC__SHIFT 0x1b
+#define SQ_WREXEC_EXEC_HI__MTYPE__SHIFT 0x1c
+#define SQ_WREXEC_EXEC_HI__MSB__SHIFT 0x1f
+#define SQ_WREXEC_EXEC_HI__ADDR_HI_MASK 0x0000FFFFL
+#define SQ_WREXEC_EXEC_HI__FIRST_WAVE_MASK 0x04000000L
+#define SQ_WREXEC_EXEC_HI__ATC_MASK 0x08000000L
+#define SQ_WREXEC_EXEC_HI__MTYPE_MASK 0x70000000L
+#define SQ_WREXEC_EXEC_HI__MSB_MASK 0x80000000L
+//SQ_WREXEC_EXEC_LO
+#define SQ_WREXEC_EXEC_LO__ADDR_LO__SHIFT 0x0
+#define SQ_WREXEC_EXEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD0
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD1
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x10
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x1e
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x1f
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000FFFFL
+#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3FFF0000L
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
+//SQ_BUF_RSRC_WORD2
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD3
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0xc
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0xf
+#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE__SHIFT 0x13
+#define SQ_BUF_RSRC_WORD3__USER_VM_MODE__SHIFT 0x14
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x15
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x17
+#define SQ_BUF_RSRC_WORD3__NV__SHIFT 0x1b
+#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x1e
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
+#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE_MASK 0x00080000L
+#define SQ_BUF_RSRC_WORD3__USER_VM_MODE_MASK 0x00100000L
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
+#define SQ_BUF_RSRC_WORD3__NV_MASK 0x08000000L
+#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xC0000000L
+//SQ_IMG_RSRC_WORD0
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_IMG_RSRC_WORD1
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x8
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x1a
+#define SQ_IMG_RSRC_WORD1__NV__SHIFT 0x1e
+#define SQ_IMG_RSRC_WORD1__META_DIRECT__SHIFT 0x1f
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000FFL
+#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000FFF00L
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03F00000L
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3C000000L
+#define SQ_IMG_RSRC_WORD1__NV_MASK 0x40000000L
+#define SQ_IMG_RSRC_WORD1__META_DIRECT_MASK 0x80000000L
+//SQ_IMG_RSRC_WORD2
+#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0xe
+#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003FFFL
+#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0FFFC000L
+#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
+//SQ_IMG_RSRC_WORD3
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0xc
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x10
+#define SQ_IMG_RSRC_WORD3__SW_MODE__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000F000L
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000F0000L
+#define SQ_IMG_RSRC_WORD3__SW_MODE_MASK 0x01F00000L
+#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD4
+#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0xd
+#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE__SHIFT 0x1d
+#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001FFFL
+#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x1FFFE000L
+#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE_MASK 0xE0000000L
+//SQ_IMG_RSRC_WORD5
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH__SHIFT 0xd
+#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS__SHIFT 0x11
+#define SQ_IMG_RSRC_WORD5__META_LINEAR__SHIFT 0x19
+#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED__SHIFT 0x1a
+#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED__SHIFT 0x1b
+#define SQ_IMG_RSRC_WORD5__MAX_MIP__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001FFFL
+#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH_MASK 0x0001E000L
+#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS_MASK 0x01FE0000L
+#define SQ_IMG_RSRC_WORD5__META_LINEAR_MASK 0x02000000L
+#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED_MASK 0x04000000L
+#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED_MASK 0x08000000L
+#define SQ_IMG_RSRC_WORD5__MAX_MIP_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD6
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0xc
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN__SHIFT 0x15
+#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB__SHIFT 0x16
+#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM__SHIFT 0x17
+#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS__SHIFT 0x18
+#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000FFFL
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000FF000L
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
+#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN_MASK 0x00200000L
+#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB_MASK 0x00400000L
+#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM_MASK 0x00800000L
+#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS_MASK 0x0F000000L
+#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD7
+#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_IMG_SAMP_WORD0
+#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x3
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x6
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x9
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0xf
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x10
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x13
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x14
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x15
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x1b
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x1d
+#define SQ_IMG_SAMP_WORD0__COMPAT_MODE__SHIFT 0x1f
+#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001C0L
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000E00L
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07E00000L
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
+#define SQ_IMG_SAMP_WORD0__COMPAT_MODE_MASK 0x80000000L
+//SQ_IMG_SAMP_WORD1
+#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x18
+#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000FFFL
+#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00FFF000L
+#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0F000000L
+#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xF0000000L
+//SQ_IMG_SAMP_WORD2
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0xe
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x14
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x16
+#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x18
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x1a
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT__SHIFT 0x1d
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x1e
+#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE__SHIFT 0x1f
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003FFFL
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000FC000L
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00C00000L
+#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0C000000L
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT_MASK 0x20000000L
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
+#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE_MASK 0x80000000L
+//SQ_IMG_SAMP_WORD3
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x1e
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000FFFL
+#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA_MASK 0x00001000L
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xC0000000L
+//SQ_FLAT_SCRATCH_WORD0
+#define SQ_FLAT_SCRATCH_WORD0__SIZE__SHIFT 0x0
+#define SQ_FLAT_SCRATCH_WORD0__SIZE_MASK 0x0007FFFFL
+//SQ_FLAT_SCRATCH_WORD1
+#define SQ_FLAT_SCRATCH_WORD1__OFFSET__SHIFT 0x0
+#define SQ_FLAT_SCRATCH_WORD1__OFFSET_MASK 0x00FFFFFFL
+//SQ_M0_GPR_IDX_WORD
+#define SQ_M0_GPR_IDX_WORD__INDEX__SHIFT 0x0
+#define SQ_M0_GPR_IDX_WORD__VSRC0_REL__SHIFT 0xc
+#define SQ_M0_GPR_IDX_WORD__VSRC1_REL__SHIFT 0xd
+#define SQ_M0_GPR_IDX_WORD__VSRC2_REL__SHIFT 0xe
+#define SQ_M0_GPR_IDX_WORD__VDST_REL__SHIFT 0xf
+#define SQ_M0_GPR_IDX_WORD__INDEX_MASK 0x000000FFL
+#define SQ_M0_GPR_IDX_WORD__VSRC0_REL_MASK 0x00001000L
+#define SQ_M0_GPR_IDX_WORD__VSRC1_REL_MASK 0x00002000L
+#define SQ_M0_GPR_IDX_WORD__VSRC2_REL_MASK 0x00004000L
+#define SQ_M0_GPR_IDX_WORD__VDST_REL_MASK 0x00008000L
+//SQC_ICACHE_UTCL1_CNTL1
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQC_ICACHE_UTCL1_CNTL1__RESERVED__SHIFT 0x10
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQC_ICACHE_UTCL1_CNTL1__RESERVED_MASK 0x00010000L
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQC_ICACHE_UTCL1_CNTL2
+#define SQC_ICACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define SQC_ICACHE_UTCL1_CNTL2__RESERVED__SHIFT 0x19
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQC_ICACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define SQC_ICACHE_UTCL1_CNTL2__RESERVED_MASK 0x02000000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//SQC_DCACHE_UTCL1_CNTL1
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQC_DCACHE_UTCL1_CNTL1__RESERVED__SHIFT 0x10
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQC_DCACHE_UTCL1_CNTL1__RESERVED_MASK 0x00010000L
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQC_DCACHE_UTCL1_CNTL2
+#define SQC_DCACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define SQC_DCACHE_UTCL1_CNTL2__RESERVED__SHIFT 0x19
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQC_DCACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define SQC_DCACHE_UTCL1_CNTL2__RESERVED_MASK 0x02000000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//SQC_ICACHE_UTCL1_STATUS
+#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//SQC_DCACHE_UTCL1_STATUS
+#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+
+
+// addressBlock: xcd0_gc_shsdec
+//SX_DEBUG_BUSY
+#define SX_DEBUG_BUSY__RESERVED__SHIFT 0x0
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x1b
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x1c
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x1d
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x1e
+#define SX_DEBUG_BUSY__PCDATA_VALID__SHIFT 0x1f
+#define SX_DEBUG_BUSY__RESERVED_MASK 0x07FFFFFFL
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x08000000L
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x10000000L
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x20000000L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x40000000L
+#define SX_DEBUG_BUSY__PCDATA_VALID_MASK 0x80000000L
+//SX_DEBUG_1
+#define SX_DEBUG_1__RESERVED__SHIFT 0x0
+#define SX_DEBUG_1__DISABLE_REP_FGCG__SHIFT 0xd
+#define SX_DEBUG_1__RESERVED_MASK 0x00001FFFL
+#define SX_DEBUG_1__DISABLE_REP_FGCG_MASK 0x00002000L
+//SPI_PS_MAX_WAVE_ID
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
+//SPI_START_PHASE
+#define SPI_START_PHASE__VGPR_START_PHASE__SHIFT 0x0
+#define SPI_START_PHASE__SGPR_START_PHASE__SHIFT 0x2
+#define SPI_START_PHASE__WAVE_START_PHASE__SHIFT 0x4
+#define SPI_START_PHASE__SPI_TD_GAP__SHIFT 0x6
+#define SPI_START_PHASE__VGPR_START_PHASE_MASK 0x00000003L
+#define SPI_START_PHASE__SGPR_START_PHASE_MASK 0x0000000CL
+#define SPI_START_PHASE__WAVE_START_PHASE_MASK 0x00000030L
+#define SPI_START_PHASE__SPI_TD_GAP_MASK 0x000003C0L
+//SPI_GFX_CNTL
+#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
+#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
+//SPI_DEBUG_READ
+#define SPI_DEBUG_READ__DATA__SHIFT 0x0
+#define SPI_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//SPI_DSM_CNTL
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SPI_DSM_CNTL__SPI_GDS_EXPREQ_MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SPI_DSM_CNTL__SPI_GDS_EXPREQ_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SPI_DSM_CNTL__SPI_WB_GRANT_30_MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SPI_DSM_CNTL__SPI_WB_GRANT_30_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SPI_DSM_CNTL__RESERVED__SHIFT 0x9
+#define SPI_DSM_CNTL__SPI_LIFE_CNT_MEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SPI_DSM_CNTL__SPI_LIFE_CNT_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SPI_DSM_CNTL__UNUSED__SHIFT 0xf
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SPI_DSM_CNTL__SPI_GDS_EXPREQ_MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SPI_DSM_CNTL__SPI_GDS_EXPREQ_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SPI_DSM_CNTL__SPI_WB_GRANT_30_MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SPI_DSM_CNTL__SPI_WB_GRANT_30_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SPI_DSM_CNTL__RESERVED_MASK 0x00000E00L
+#define SPI_DSM_CNTL__SPI_LIFE_CNT_MEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SPI_DSM_CNTL__SPI_LIFE_CNT_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SPI_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//SPI_DSM_CNTL2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x4
+#define SPI_DSM_CNTL2__SPI_GDS_EXPREQ_MEM_ENABLE_ERROR_INJECT__SHIFT 0xa
+#define SPI_DSM_CNTL2__SPI_GDS_EXPREQ_MEM_SELECT_INJECT_DELAY__SHIFT 0xc
+#define SPI_DSM_CNTL2__SPI_WB_GRANT_30_MEM_ENABLE_ERROR_INJECT__SHIFT 0xd
+#define SPI_DSM_CNTL2__SPI_WB_GRANT_30_MEM_SELECT_INJECT_DELAY__SHIFT 0xf
+#define SPI_DSM_CNTL2__RESERVED__SHIFT 0x10
+#define SPI_DSM_CNTL2__SPI_LIFE_CNT_MEM_ENABLE_ERROR_INJECT__SHIFT 0x13
+#define SPI_DSM_CNTL2__SPI_LIFE_CNT_MEM_SELECT_INJECT_DELAY__SHIFT 0x15
+#define SPI_DSM_CNTL2__UNUSED__SHIFT 0x16
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000003F0L
+#define SPI_DSM_CNTL2__SPI_GDS_EXPREQ_MEM_ENABLE_ERROR_INJECT_MASK 0x00000C00L
+#define SPI_DSM_CNTL2__SPI_GDS_EXPREQ_MEM_SELECT_INJECT_DELAY_MASK 0x00001000L
+#define SPI_DSM_CNTL2__SPI_WB_GRANT_30_MEM_ENABLE_ERROR_INJECT_MASK 0x00006000L
+#define SPI_DSM_CNTL2__SPI_WB_GRANT_30_MEM_SELECT_INJECT_DELAY_MASK 0x00008000L
+#define SPI_DSM_CNTL2__RESERVED_MASK 0x00070000L
+#define SPI_DSM_CNTL2__SPI_LIFE_CNT_MEM_ENABLE_ERROR_INJECT_MASK 0x00180000L
+#define SPI_DSM_CNTL2__SPI_LIFE_CNT_MEM_SELECT_INJECT_DELAY_MASK 0x00200000L
+#define SPI_DSM_CNTL2__UNUSED_MASK 0xFFC00000L
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa
+#define SPI_EDC_CNT__RESERVED__SHIFT 0xc
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12
+#define SPI_EDC_CNT__UNUSED__SHIFT 0x14
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L
+#define SPI_EDC_CNT__RESERVED_MASK 0x0000F000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L
+#define SPI_EDC_CNT__UNUSED_MASK 0xFFF00000L
+//SPI_DEBUG_BUSY
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
+#define SPI_DEBUG_BUSY__VS_BUSY__SHIFT 0x2
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x3
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x4
+#define SPI_DEBUG_BUSY__CSG_BUSY__SHIFT 0x5
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x6
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x7
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0x8
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0x9
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xa
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xb
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xc
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xd
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0xe
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0xf
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY__SHIFT 0x10
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY__SHIFT 0x11
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x12
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x13
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x14
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x15
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__VS_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__CSG_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00040000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00080000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00200000L
+//SPI_CONFIG_PS_CU_EN
+#define SPI_CONFIG_PS_CU_EN__ENABLE__SHIFT 0x0
+#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN__SHIFT 0x1
+#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN__SHIFT 0x10
+#define SPI_CONFIG_PS_CU_EN__ENABLE_MASK 0x00000001L
+#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN_MASK 0x0000FFFEL
+#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN_MASK 0xFFFF0000L
+//SPI_WF_LIFETIME_CNTL
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
+#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
+#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
+//SPI_WF_LIFETIME_LIMIT_0
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_1
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_2
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_3
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_4
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_5
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_6
+#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_7
+#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_8
+#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_9
+#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_0
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_1
+#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_1__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_1__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_2
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_3
+#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_3__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_3__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_4
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_5
+#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_5__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_5__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_6
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_7
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_8
+#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_8__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_8__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_9
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_10
+#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_10__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_10__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_11
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_12
+#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_12__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_12__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_13
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_14
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_15
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_16
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_17
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_18
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_19
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_20
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_DEBUG
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE__SHIFT 0x0
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN_MASK 0x80000000L
+//SPI_LB_CTR_CTRL
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
+#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
+#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
+#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
+//SPI_LB_CU_MASK
+#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x0
+#define SPI_LB_CU_MASK__CU_MASK_MASK 0xFFFFL
+//SPI_LB_DATA_REG
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
+//SPI_PG_ENABLE_STATIC_CU_MASK
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x0
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0xFFFFL
+//SPI_GDS_CREDITS
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
+#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x10
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
+#define SPI_GDS_CREDITS__UNUSED_MASK 0xFFFF0000L
+//SPI_SX_EXPORT_BUFFER_SIZES
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
+//SPI_SX_SCOREBOARD_BUFFER_SIZES
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
+//SPI_CSQ_WF_ACTIVE_STATUS
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
+//SPI_CSQ_WF_ACTIVE_COUNT_0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_1
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_2
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_3
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_4
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_5
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_6
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS_MASK 0x01FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_7
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT_MASK 0x000001FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS_MASK 0x01FF0000L
+//SPI_LB_DATA_WAVES
+#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
+#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
+#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
+#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_HSGS
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS__SHIFT 0x10
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_VSPS
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS__SHIFT 0x10
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_CS
+#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE_MASK 0xFFFFL
+//SPIS_DEBUG_READ
+#define SPIS_DEBUG_READ__DATA__SHIFT 0x0
+#define SPIS_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//BCI_DEBUG_READ
+#define BCI_DEBUG_READ__DATA__SHIFT 0x0
+#define BCI_DEBUG_READ__DATA_MASK 0xFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_LO
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_HI
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_PSMA_LO
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSMA_HI
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_GPR_MIN
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+//SPI_P1_TRAP_SCREEN_PSBA_LO
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSBA_HI
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_PSMA_LO
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSMA_HI
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_GPR_MIN
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+
+
+// addressBlock: xcd0_gc_tpdec
+//TD_CNTL
+#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x0
+#define TD_CNTL__TD_IN_CAC_CHICKENBITS__SHIFT 0x2
+#define TD_CNTL__TD_OUT_CAC_CHICKENBITS__SHIFT 0x6
+#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x9
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0xb
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
+#define TD_CNTL__SRAM_FGCG_TD_KCLARR_LG__SHIFT 0x1b
+#define TD_CNTL__RFGCG_CHICKEN__SHIFT 0x1c
+#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
+#define TD_CNTL__TD_IN_CAC_CHICKENBITS_MASK 0x0000000CL
+#define TD_CNTL__TD_OUT_CAC_CHICKENBITS_MASK 0x000000C0L
+#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
+#define TD_CNTL__SRAM_FGCG_TD_KCLARR_LG_MASK 0x08000000L
+#define TD_CNTL__RFGCG_CHICKEN_MASK 0x70000000L
+//TD_STATUS
+#define TD_STATUS__BUSY__SHIFT 0x1f
+#define TD_STATUS__BUSY_MASK 0x80000000L
+//TD_POWER_CNTL
+#define TD_POWER_CNTL__MGCG_OUTPUTSTAGE__SHIFT 0x1
+#define TD_POWER_CNTL__MID0_THREAD_DATA__SHIFT 0x2
+#define TD_POWER_CNTL__MID2_ACCUM_DATA__SHIFT 0x3
+#define TD_POWER_CNTL__MGCG_OUTPUTSTAGE_MASK 0x00000002L
+#define TD_POWER_CNTL__MID0_THREAD_DATA_MASK 0x00000004L
+#define TD_POWER_CNTL__MID2_ACCUM_DATA_MASK 0x00000008L
+//TD_DSM_CNTL
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+//TD_DSM_CNTL2
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TD_DSM_CNTL2__TD_INJECT_DELAY__SHIFT 0x1a
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TD_DSM_CNTL2__TD_INJECT_DELAY_MASK 0xFC000000L
+//TD_SCRATCH
+#define TD_SCRATCH__SCRATCH__SHIFT 0x0
+#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_POWER_CNTL
+#define TA_POWER_CNTL__INPUT_CLK_EN_MODE__SHIFT 0x0
+#define TA_POWER_CNTL__LOD_CLK_EN_MODE__SHIFT 0x1
+#define TA_POWER_CNTL__WDP_CLK_EN_MODE__SHIFT 0x2
+#define TA_POWER_CNTL__INPUT_CLK_EN_MODE_MASK 0x00000001L
+#define TA_POWER_CNTL__LOD_CLK_EN_MODE_MASK 0x00000002L
+#define TA_POWER_CNTL__WDP_CLK_EN_MODE_MASK 0x00000004L
+//TA_CNTL
+#define TA_CNTL__FX_XNACK_CREDIT__SHIFT 0x0
+#define TA_CNTL__SQ_XNACK_CREDIT__SHIFT 0x9
+#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0xd
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
+#define TA_CNTL__FX_XNACK_CREDIT_MASK 0x0000007FL
+#define TA_CNTL__SQ_XNACK_CREDIT_MASK 0x00001E00L
+#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000E000L
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
+//TA_CNTL_AUX
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
+#define TA_CNTL_AUX__RESERVED__SHIFT 0x1
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
+#define TA_CNTL_AUX__RESERVED_MASK 0x0000000EL
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
+//TA_FEATURE_CNTL
+#define TA_FEATURE_CNTL__ATOMIC_COALESCING_EN__SHIFT 0x4
+#define TA_FEATURE_CNTL__TA_ACFIFO_CHICKEN__SHIFT 0xb
+#define TA_FEATURE_CNTL__TA_CAC_CHICKEN__SHIFT 0xc
+#define TA_FEATURE_CNTL__AFIFO_SPLIT_CHICKEN__SHIFT 0xd
+#define TA_FEATURE_CNTL__TA_DXFIFO_CHICKEN__SHIFT 0xe
+#define TA_FEATURE_CNTL__ATOMIC_COALESCING_EN_MASK 0x00000030L
+#define TA_FEATURE_CNTL__TA_ACFIFO_CHICKEN_MASK 0x00000800L
+#define TA_FEATURE_CNTL__TA_CAC_CHICKEN_MASK 0x00001000L
+#define TA_FEATURE_CNTL__AFIFO_SPLIT_CHICKEN_MASK 0x00002000L
+#define TA_FEATURE_CNTL__TA_DXFIFO_CHICKEN_MASK 0x00004000L
+//TA_STATUS
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
+#define TA_STATUS__IN_BUSY__SHIFT 0x18
+#define TA_STATUS__FG_BUSY__SHIFT 0x19
+#define TA_STATUS__TA_BUSY__SHIFT 0x1c
+#define TA_STATUS__FA_BUSY__SHIFT 0x1d
+#define TA_STATUS__AL_BUSY__SHIFT 0x1e
+#define TA_STATUS__BUSY__SHIFT 0x1f
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__BUSY_MASK 0x80000000L
+//TA_SCRATCH
+#define TA_SCRATCH__SCRATCH__SHIFT 0x0
+#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_DSM_CNTL
+#define TA_DSM_CNTL__TA_FS_DFIFO_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define TA_DSM_CNTL__TA_FS_DFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define TA_DSM_CNTL__TA_FX_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define TA_DSM_CNTL__TA_FX_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define TA_DSM_CNTL__TA_FS_CFIFO_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define TA_DSM_CNTL__TA_FS_CFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define TA_DSM_CNTL__TA_FS_AFIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define TA_DSM_CNTL__TA_FS_AFIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define TA_DSM_CNTL__TA_FS_AFIFO_HI_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define TA_DSM_CNTL__TA_FS_AFIFO_HI_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define TA_DSM_CNTL__TA_FS_DFIFO_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define TA_DSM_CNTL__TA_FS_DFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define TA_DSM_CNTL__TA_FX_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define TA_DSM_CNTL__TA_FX_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define TA_DSM_CNTL__TA_FS_CFIFO_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define TA_DSM_CNTL__TA_FS_CFIFO_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define TA_DSM_CNTL__TA_FS_AFIFO_LO_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define TA_DSM_CNTL__TA_FS_AFIFO_LO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define TA_DSM_CNTL__TA_FS_AFIFO_HI_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define TA_DSM_CNTL__TA_FS_AFIFO_HI_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//TA_DSM_CNTL2
+#define TA_DSM_CNTL2__TA_FS_DFIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TA_DSM_CNTL2__TA_FS_DFIFO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TA_DSM_CNTL2__TA_FX_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TA_DSM_CNTL2__TA_FX_LFIFO_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TA_DSM_CNTL2__TA_FS_CFIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TA_DSM_CNTL2__TA_FS_CFIFO_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TA_DSM_CNTL2__TA_FS_AFIFO_LO_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TA_DSM_CNTL2__TA_FS_AFIFO_LO_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TA_DSM_CNTL2__TA_INJECT_DELAY__SHIFT 0x1a
+#define TA_DSM_CNTL2__TA_FS_DFIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TA_DSM_CNTL2__TA_FS_DFIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TA_DSM_CNTL2__TA_FX_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TA_DSM_CNTL2__TA_FX_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TA_DSM_CNTL2__TA_FS_CFIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TA_DSM_CNTL2__TA_FS_CFIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TA_DSM_CNTL2__TA_FS_AFIFO_LO_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TA_DSM_CNTL2__TA_FS_AFIFO_LO_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TA_DSM_CNTL2__TA_INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: xcd0_gc_gdsdec
+//GDS_CONFIG
+#define GDS_CONFIG__WRITE_DIS__SHIFT 0x0
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x1
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x3
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x5
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x7
+#define GDS_CONFIG__SH4_GPR_PHASE_SEL__SHIFT 0x9
+#define GDS_CONFIG__SH5_GPR_PHASE_SEL__SHIFT 0xb
+#define GDS_CONFIG__SH6_GPR_PHASE_SEL__SHIFT 0xd
+#define GDS_CONFIG__SH7_GPR_PHASE_SEL__SHIFT 0xf
+#define GDS_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
+#define GDS_CONFIG__SH4_GPR_PHASE_SEL_MASK 0x00000600L
+#define GDS_CONFIG__SH5_GPR_PHASE_SEL_MASK 0x00001800L
+#define GDS_CONFIG__SH6_GPR_PHASE_SEL_MASK 0x00006000L
+#define GDS_CONFIG__SH7_GPR_PHASE_SEL_MASK 0x00018000L
+//GDS_CNTL_STATUS
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x3
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x4
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x5
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x6
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x7
+#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x8
+#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x9
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0xa
+#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0xb
+#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xc
+#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xd
+#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xe
+#define GDS_CNTL_STATUS__CREDIT_BUSY4__SHIFT 0xf
+#define GDS_CNTL_STATUS__CREDIT_BUSY5__SHIFT 0x10
+#define GDS_CNTL_STATUS__CREDIT_BUSY6__SHIFT 0x11
+#define GDS_CNTL_STATUS__CREDIT_BUSY7__SHIFT 0x12
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000080L
+#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000100L
+#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000200L
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000400L
+#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000800L
+#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00001000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00002000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00004000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY4_MASK 0x00008000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY5_MASK 0x00010000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY6_MASK 0x00020000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY7_MASK 0x00040000L
+//GDS_ENHANCE2
+#define GDS_ENHANCE2__MISC__SHIFT 0x0
+#define GDS_ENHANCE2__GDS_TD_INTERFACES_FGCG_OVERRIDE__SHIFT 0x10
+#define GDS_ENHANCE2__GDS_PHY_CMD_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define GDS_ENHANCE2__GDS_FED_IN_PROPAGATE__SHIFT 0x12
+#define GDS_ENHANCE2__UNUSED__SHIFT 0x13
+#define GDS_ENHANCE2__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE2__GDS_TD_INTERFACES_FGCG_OVERRIDE_MASK 0x00010000L
+#define GDS_ENHANCE2__GDS_PHY_CMD_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define GDS_ENHANCE2__GDS_FED_IN_PROPAGATE_MASK 0x00040000L
+#define GDS_ENHANCE2__UNUSED_MASK 0xFFF80000L
+//GDS_PROTECTION_FAULT
+#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
+#define GDS_PROTECTION_FAULT__SH_ID__SHIFT 0x3
+#define GDS_PROTECTION_FAULT__CU_ID__SHIFT 0x6
+#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xa
+#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xc
+#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
+#define GDS_PROTECTION_FAULT__SH_ID_MASK 0x00000038L
+#define GDS_PROTECTION_FAULT__CU_ID_MASK 0x000003C0L
+#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00000C00L
+#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0000F000L
+#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_VM_PROTECTION_FAULT
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
+#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
+#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
+#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
+#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
+#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
+#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
+#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
+#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
+#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
+#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L
+//GDS_DSM_CNTL
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_DSM_CNTL2
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
+//GDS_WD_GDS_CSB
+#define GDS_WD_GDS_CSB__COUNTER__SHIFT 0x0
+#define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd
+#define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL
+#define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L
+
+
+// addressBlock: xcd0_gc_rbdec
+//DB_DEBUG
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+//DB_DEBUG2
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0xe
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0xf
+#define DB_DEBUG2__RESERVED__SHIFT 0x10
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
+#define DB_DEBUG2__DISABLE_VR_OBJ_PRIM_ID__SHIFT 0x1a
+#define DB_DEBUG2__DISABLE_VR_PS_INVOKE__SHIFT 0x1b
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
+#define DB_DEBUG2__RESERVED_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_VR_OBJ_PRIM_ID_MASK 0x04000000L
+#define DB_DEBUG2__DISABLE_VR_PS_INVOKE_MASK 0x08000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+//DB_DEBUG3
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
+#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION__SHIFT 0x1
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x7
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x9
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0xc
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x10
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
+#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE__SHIFT 0x1e
+#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK__SHIFT 0x1f
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
+#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION_MASK 0x00000002L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000200L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00001000L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00010000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE_MASK 0x40000000L
+#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK_MASK 0x80000000L
+//DB_DEBUG4
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
+#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF__SHIFT 0x4
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0x5
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x6
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x7
+#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS__SHIFT 0x8
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
+#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK__SHIFT 0xc
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
+#define DB_DEBUG4__DISABLE_TS_WRITE_L0__SHIFT 0xf
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0x10
+#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT__SHIFT 0x11
+#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT__SHIFT 0x12
+#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x13
+#define DB_DEBUG4__DISABLE_8PPC_OBJPRIMID_WHEN_NO_SHADER_EXPORTS__SHIFT 0x1e
+#define DB_DEBUG4__FULL_TILE_CACHE_EVICT_ON_HALF_FULL__SHIFT 0x1f
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
+#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF_MASK 0x00000010L
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00000020L
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000040L
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000080L
+#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS_MASK 0x00000100L
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
+#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK_MASK 0x00001000L
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
+#define DB_DEBUG4__DISABLE_TS_WRITE_L0_MASK 0x00008000L
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00010000L
+#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT_MASK 0x00020000L
+#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT_MASK 0x00040000L
+#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0x3FF80000L
+#define DB_DEBUG4__DISABLE_8PPC_OBJPRIMID_WHEN_NO_SHADER_EXPORTS_MASK 0x40000000L
+#define DB_DEBUG4__FULL_TILE_CACHE_EVICT_ON_HALF_FULL_MASK 0x80000000L
+//DB_CREDIT_LIMIT
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x18
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7F000000L
+//DB_WATERMARKS
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x5
+#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0xb
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0xf
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x14
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x1e
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x1f
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001FL
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007E0L
+#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000F8000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x0FF00000L
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
+//DB_SUBTILE_CONTROL
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
+//DB_FREE_CACHELINES
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x7
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0xe
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x14
+#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x18
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007FL
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003F80L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x000FC000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x00F00000L
+#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xFF000000L
+//DB_FIFO_DEPTH1
+#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS__SHIFT 0x0
+#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS__SHIFT 0x5
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0xa
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x15
+#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS_MASK 0x0000001FL
+#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS_MASK 0x000003E0L
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000FC00L
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001F0000L
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1FE00000L
+//DB_FIFO_DEPTH2
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0xf
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007F00L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF8000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
+//DB_EXCEPTION_CONTROL
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
+//DB_RING_CONTROL
+#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
+#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
+//DB_MEM_ARB_WATERMARKS
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
+//DB_RMI_CACHE_POLICY
+#define DB_RMI_CACHE_POLICY__Z_RD__SHIFT 0x0
+#define DB_RMI_CACHE_POLICY__S_RD__SHIFT 0x1
+#define DB_RMI_CACHE_POLICY__HTILE_RD__SHIFT 0x2
+#define DB_RMI_CACHE_POLICY__Z_WR__SHIFT 0x8
+#define DB_RMI_CACHE_POLICY__S_WR__SHIFT 0x9
+#define DB_RMI_CACHE_POLICY__HTILE_WR__SHIFT 0xa
+#define DB_RMI_CACHE_POLICY__ZPCPSD_WR__SHIFT 0xb
+#define DB_RMI_CACHE_POLICY__CC_RD__SHIFT 0x10
+#define DB_RMI_CACHE_POLICY__FMASK_RD__SHIFT 0x11
+#define DB_RMI_CACHE_POLICY__CMASK_RD__SHIFT 0x12
+#define DB_RMI_CACHE_POLICY__DCC_RD__SHIFT 0x13
+#define DB_RMI_CACHE_POLICY__CC_WR__SHIFT 0x18
+#define DB_RMI_CACHE_POLICY__FMASK_WR__SHIFT 0x19
+#define DB_RMI_CACHE_POLICY__CMASK_WR__SHIFT 0x1a
+#define DB_RMI_CACHE_POLICY__DCC_WR__SHIFT 0x1b
+#define DB_RMI_CACHE_POLICY__Z_RD_MASK 0x00000001L
+#define DB_RMI_CACHE_POLICY__S_RD_MASK 0x00000002L
+#define DB_RMI_CACHE_POLICY__HTILE_RD_MASK 0x00000004L
+#define DB_RMI_CACHE_POLICY__Z_WR_MASK 0x00000100L
+#define DB_RMI_CACHE_POLICY__S_WR_MASK 0x00000200L
+#define DB_RMI_CACHE_POLICY__HTILE_WR_MASK 0x00000400L
+#define DB_RMI_CACHE_POLICY__ZPCPSD_WR_MASK 0x00000800L
+#define DB_RMI_CACHE_POLICY__CC_RD_MASK 0x00010000L
+#define DB_RMI_CACHE_POLICY__FMASK_RD_MASK 0x00020000L
+#define DB_RMI_CACHE_POLICY__CMASK_RD_MASK 0x00040000L
+#define DB_RMI_CACHE_POLICY__DCC_RD_MASK 0x00080000L
+#define DB_RMI_CACHE_POLICY__CC_WR_MASK 0x01000000L
+#define DB_RMI_CACHE_POLICY__FMASK_WR_MASK 0x02000000L
+#define DB_RMI_CACHE_POLICY__CMASK_WR_MASK 0x04000000L
+#define DB_RMI_CACHE_POLICY__DCC_WR_MASK 0x08000000L
+//DB_DFSM_CONFIG
+#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
+#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT__SHIFT 0x1
+#define DB_DFSM_CONFIG__DISABLE_POPS__SHIFT 0x2
+#define DB_DFSM_CONFIG__FORCE_FLUSH__SHIFT 0x3
+#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH__SHIFT 0x8
+#define DB_DFSM_CONFIG__BYPASS_DFSM_MASK 0x00000001L
+#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT_MASK 0x00000002L
+#define DB_DFSM_CONFIG__DISABLE_POPS_MASK 0x00000004L
+#define DB_DFSM_CONFIG__FORCE_FLUSH_MASK 0x00000008L
+#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH_MASK 0x00007F00L
+//DB_DFSM_WATERMARK
+#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK__SHIFT 0x10
+#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK_MASK 0xFFFF0000L
+//DB_DFSM_TILES_IN_FLIGHT
+#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
+#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
+//DB_DFSM_PRIMS_IN_FLIGHT
+#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
+#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
+//DB_DFSM_WATCHDOG
+#define DB_DFSM_WATCHDOG__TIMER_TARGET__SHIFT 0x0
+#define DB_DFSM_WATCHDOG__TIMER_TARGET_MASK 0xFFFFFFFFL
+//DB_DFSM_FLUSH_ENABLE
+#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS__SHIFT 0x0
+#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU__SHIFT 0x18
+#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS__SHIFT 0x1c
+#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS_MASK 0x000003FFL
+#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU_MASK 0x0F000000L
+#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS_MASK 0xF0000000L
+//DB_DFSM_FLUSH_AUX_EVENT
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A__SHIFT 0x0
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B__SHIFT 0x8
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C__SHIFT 0x10
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D__SHIFT 0x18
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A_MASK 0x000000FFL
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B_MASK 0x0000FF00L
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C_MASK 0x00FF0000L
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D_MASK 0xFF000000L
+//CC_RB_REDUNDANCY
+#define CC_RB_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define CC_RB_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//CC_RB_BACKEND_DISABLE
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
+//GB_ADDR_CONFIG
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x15
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x18
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x1c
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x1e
+#define GB_ADDR_CONFIG__SE_ENABLE__SHIFT 0x1f
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00E00000L
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG__SE_ENABLE_MASK 0x80000000L
+//GB_BACKEND_MAP
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
+//GB_GPU_ID
+#define GB_GPU_ID__GPU_ID__SHIFT 0x0
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
+//CC_RB_DAISY_CHAIN
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
+//GB_ADDR_CONFIG_READ
+#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG_READ__NUM_GPUS__SHIFT 0x15
+#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE__SHIFT 0x18
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG_READ__ROW_SIZE__SHIFT 0x1c
+#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES__SHIFT 0x1e
+#define GB_ADDR_CONFIG_READ__SE_ENABLE__SHIFT 0x1f
+#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG_READ__NUM_GPUS_MASK 0x00E00000L
+#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+#define GB_ADDR_CONFIG_READ__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG_READ__SE_ENABLE_MASK 0x80000000L
+//GB_TILE_MODE0
+#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE1
+#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE2
+#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE3
+#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE4
+#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE5
+#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE6
+#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE7
+#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE8
+#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE9
+#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE10
+#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE11
+#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE12
+#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE13
+#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE14
+#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE15
+#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE16
+#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE17
+#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE18
+#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE19
+#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE20
+#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE21
+#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE22
+#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE23
+#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE24
+#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE25
+#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE26
+#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE27
+#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE28
+#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE29
+#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE30
+#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE31
+#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_MACROTILE_MODE0
+#define GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE0__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE0__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE0__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE0__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE1
+#define GB_MACROTILE_MODE1__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE1__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE1__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE1__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE1__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE1__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE2
+#define GB_MACROTILE_MODE2__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE2__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE2__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE2__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE2__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE2__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE3
+#define GB_MACROTILE_MODE3__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE3__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE3__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE3__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE3__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE3__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE4
+#define GB_MACROTILE_MODE4__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE4__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE4__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE4__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE4__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE4__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE5
+#define GB_MACROTILE_MODE5__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE5__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE5__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE5__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE5__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE5__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE6
+#define GB_MACROTILE_MODE6__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE6__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE6__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE6__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE6__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE6__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE7
+#define GB_MACROTILE_MODE7__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE7__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE7__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE7__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE7__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE7__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE8
+#define GB_MACROTILE_MODE8__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE8__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE8__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE8__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE8__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE8__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE9
+#define GB_MACROTILE_MODE9__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE9__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE9__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE9__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE9__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE9__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE10
+#define GB_MACROTILE_MODE10__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE10__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE10__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE10__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE10__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE10__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE11
+#define GB_MACROTILE_MODE11__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE11__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE11__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE11__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE11__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE11__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE12
+#define GB_MACROTILE_MODE12__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE12__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE12__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE12__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE12__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE12__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE13
+#define GB_MACROTILE_MODE13__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE13__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE13__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE13__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE13__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE13__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE14
+#define GB_MACROTILE_MODE14__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE14__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE14__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE14__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE14__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE14__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE15
+#define GB_MACROTILE_MODE15__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE15__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE15__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE15__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE15__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE15__NUM_BANKS_MASK 0x000000C0L
+//CB_HW_CONTROL
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x0
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x6
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0xc
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x10
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x12
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x14
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x16
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x17
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x1c
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x1d
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000FL
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003C0L
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000F000L
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+//CB_HW_CONTROL_1
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x0
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x5
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0xb
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x11
+#define CB_HW_CONTROL_1__RMI_CREDITS__SHIFT 0x1a
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001FL
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007E0L
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001F800L
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03FE0000L
+#define CB_HW_CONTROL_1__RMI_CREDITS_MASK 0xFC000000L
+//CB_HW_CONTROL_2
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x0
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x8
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0xf
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x18
+#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x1c
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000FFL
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007F00L
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007F8000L
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x0F000000L
+#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xF0000000L
+//CB_HW_CONTROL_3
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x0
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
+#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT__SHIFT 0x2
+#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP__SHIFT 0x3
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR__SHIFT 0x4
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x5
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD__SHIFT 0x6
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x7
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION__SHIFT 0x8
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x9
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0xa
+#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION__SHIFT 0xb
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967__SHIFT 0xc
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657__SHIFT 0xd
+#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542__SHIFT 0xe
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xf
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0x10
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0x11
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC__SHIFT 0x12
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0x13
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM__SHIFT 0x14
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0x15
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC__SHIFT 0x16
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x17
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
+#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
+#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
+#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX__SHIFT 0x1b
+#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS__SHIFT 0x1c
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
+#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT_MASK 0x00000004L
+#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP_MASK 0x00000008L
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR_MASK 0x00000010L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000020L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD_MASK 0x00000040L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000080L
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION_MASK 0x00000100L
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000200L
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000400L
+#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION_MASK 0x00000800L
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967_MASK 0x00001000L
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657_MASK 0x00002000L
+#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542_MASK 0x00004000L
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00008000L
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00010000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00020000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC_MASK 0x00040000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00080000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM_MASK 0x00100000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00200000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC_MASK 0x00400000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00800000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
+#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
+#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX_MASK 0x08000000L
+#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS_MASK 0x30000000L
+//CB_HW_MEM_ARBITER_RD
+#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x14
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x17
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x1a
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
+#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x000C0000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00400000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x03800000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x1C000000L
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
+//CB_HW_MEM_ARBITER_WR
+#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x14
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x17
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x1a
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
+#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x000C0000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00400000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x03800000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x1C000000L
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
+//CB_DCC_CONFIG
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH__SHIFT 0x0
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE__SHIFT 0x5
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE__SHIFT 0x6
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE__SHIFT 0x7
+#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH__SHIFT 0x8
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
+#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT__SHIFT 0x18
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x1c
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH_MASK 0x0000001FL
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE_MASK 0x00000020L
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE_MASK 0x00000040L
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE_MASK 0x00000080L
+#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH_MASK 0x0000FF00L
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x007F0000L
+#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT_MASK 0x0F000000L
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xF0000000L
+//GC_USER_RB_REDUNDANCY
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//GC_USER_RB_BACKEND_DISABLE
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
+
+
+// addressBlock: xcd0_gc_ea_gceadec
+//GCEA_DRAM_RD_CLI2GRP_MAP0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_CLI2GRP_MAP1
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP1
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_GRP2VC_MAP
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_WR_GRP2VC_MAP
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_RD_LAZY
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_WR_LAZY
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_RD_CAM_CNTL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_WR_CAM_CNTL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_PAGE_BURST
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_AGE
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_WR_PRI_AGE
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_RD_PRI_QUEUING
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_QUEUING
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_FIXED
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_FIXED
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_URGENCY
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_WR_PRI_URGENCY
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI1
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI2
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI3
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI1
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI2
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI3
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_CLI2GRP_MAP0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_CLI2GRP_MAP1
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP1
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_COMBINE_FLUSH
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//GCEA_IO_WR_COMBINE_FLUSH
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING__SHIFT 0x12
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING_MASK 0x00040000L
+//GCEA_IO_GROUP_BURST
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_AGE
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_WR_PRI_AGE
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_RD_PRI_QUEUING
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_QUEUING
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_FIXED
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_FIXED
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_URGENCY
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_WR_PRI_URGENCY
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_RD_PRI_URGENCY_MASKING
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_WR_PRI_URGENCY_MASKING
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_RD_PRI_QUANT_PRI1
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI2
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI3
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI1
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI2
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI3
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_SDP_ARB_DRAM
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//GCEA_SDP_ARB_FINAL
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//GCEA_SDP_DRAM_PRIORITY
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_IO_PRIORITY
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_CREDITS
+#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
+#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_TAG_RESERVE0
+#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GCEA_SDP_TAG_RESERVE1
+#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GCEA_SDP_VCC_RESERVE0
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCC_RESERVE1
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_VCD_RESERVE0
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCD_RESERVE1
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_REQ_CNTL
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//GCEA_MISC
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//GCEA_LATENCY_SAMPLING
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//GCEA_PERFCOUNTER_LO
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_HI
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GCEA_PERFCOUNTER0_CFG
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER1_CFG
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+
+
+// addressBlock: xcd0_gc_ea_gceadec2
+//GCEA_PERFCOUNTER_RSLT_CNTL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//GCEA_MAM_CTRL
+#define GCEA_MAM_CTRL__ADRAM_MODE__SHIFT 0x0
+#define GCEA_MAM_CTRL__ADRAM_COALESCE_DISABLE__SHIFT 0x2
+#define GCEA_MAM_CTRL__ARAM_FLUSH_CNTR_THRESHOLD__SHIFT 0x3
+#define GCEA_MAM_CTRL__ARAM_FLUSH_CNTR_DISABLE__SHIFT 0x6
+#define GCEA_MAM_CTRL__ARAM_FORCE_FLUSH__SHIFT 0x7
+#define GCEA_MAM_CTRL__ALOG_MODE1_FILTER1_THRESHOLD__SHIFT 0x8
+#define GCEA_MAM_CTRL__ALOG_MODE1_FILTER2_BYPASS__SHIFT 0xb
+#define GCEA_MAM_CTRL__ALOG_ACTIVE__SHIFT 0xc
+#define GCEA_MAM_CTRL__SDP_PRIORITY__SHIFT 0xd
+#define GCEA_MAM_CTRL__CLIENT_ID__SHIFT 0x11
+#define GCEA_MAM_CTRL__MAM_DISABLE__SHIFT 0x16
+#define GCEA_MAM_CTRL__ARAM_FLUSH_RB_SIZE__SHIFT 0x17
+#define GCEA_MAM_CTRL__ALOG_MODE__SHIFT 0x1b
+#define GCEA_MAM_CTRL__ALOG_MODE2_LOCK_WINDOW__SHIFT 0x1c
+#define GCEA_MAM_CTRL__ALOG_TRACK_2M_SEGMENT__SHIFT 0x1f
+#define GCEA_MAM_CTRL__ADRAM_MODE_MASK 0x00000003L
+#define GCEA_MAM_CTRL__ADRAM_COALESCE_DISABLE_MASK 0x00000004L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_CNTR_THRESHOLD_MASK 0x00000038L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_CNTR_DISABLE_MASK 0x00000040L
+#define GCEA_MAM_CTRL__ARAM_FORCE_FLUSH_MASK 0x00000080L
+#define GCEA_MAM_CTRL__ALOG_MODE1_FILTER1_THRESHOLD_MASK 0x00000700L
+#define GCEA_MAM_CTRL__ALOG_MODE1_FILTER2_BYPASS_MASK 0x00000800L
+#define GCEA_MAM_CTRL__ALOG_ACTIVE_MASK 0x00001000L
+#define GCEA_MAM_CTRL__SDP_PRIORITY_MASK 0x0001E000L
+#define GCEA_MAM_CTRL__CLIENT_ID_MASK 0x003E0000L
+#define GCEA_MAM_CTRL__MAM_DISABLE_MASK 0x00400000L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_RB_SIZE_MASK 0x07800000L
+#define GCEA_MAM_CTRL__ALOG_MODE_MASK 0x08000000L
+#define GCEA_MAM_CTRL__ALOG_MODE2_LOCK_WINDOW_MASK 0x70000000L
+#define GCEA_MAM_CTRL__ALOG_TRACK_2M_SEGMENT_MASK 0x80000000L
+//GCEA_MAM_CTRL2
+#define GCEA_MAM_CTRL2__ALOG_MODE2_INTR_THRESHOLD__SHIFT 0x0
+#define GCEA_MAM_CTRL2__ALOG_SPACE_EN__SHIFT 0x2
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_SNOOP_EN__SHIFT 0x5
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_NOALLOC__SHIFT 0x6
+#define GCEA_MAM_CTRL2__RESERVED_FIELD__SHIFT 0x7
+#define GCEA_MAM_CTRL2__ADDR_HI__SHIFT 0x18
+#define GCEA_MAM_CTRL2__ALOG_MODE2_INTR_THRESHOLD_MASK 0x00000003L
+#define GCEA_MAM_CTRL2__ALOG_SPACE_EN_MASK 0x0000001CL
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_SNOOP_EN_MASK 0x00000020L
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_NOALLOC_MASK 0x00000040L
+#define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0x00FFFF80L
+#define GCEA_MAM_CTRL2__ADDR_HI_MASK 0xFF000000L
+//GCEA_DSM_CNTL
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//GCEA_DSM_CNTLA
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//GCEA_DSM_CNTLB
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//GCEA_DSM_CNTL2
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//GCEA_DSM_CNTL2A
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//GCEA_DSM_CNTL2B
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//GCEA_TCC_XBR_CREDITS
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
+#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
+#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
+#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
+#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
+#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
+#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
+#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
+#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
+//GCEA_TCC_XBR_MAXBURST
+#define GCEA_TCC_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
+#define GCEA_TCC_XBR_MAXBURST__IO_RD__SHIFT 0x4
+#define GCEA_TCC_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
+#define GCEA_TCC_XBR_MAXBURST__IO_WR__SHIFT 0xc
+#define GCEA_TCC_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
+#define GCEA_TCC_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
+#define GCEA_TCC_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
+#define GCEA_TCC_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
+//GCEA_PROBE_CNTL
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
+//GCEA_PROBE_MAP
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC__SHIFT 0x0
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC__SHIFT 0x1
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC__SHIFT 0x2
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC__SHIFT 0x3
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC__SHIFT 0x4
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC__SHIFT 0x5
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC__SHIFT 0x6
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC__SHIFT 0x7
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC__SHIFT 0x8
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC__SHIFT 0x9
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC__SHIFT 0xa
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC__SHIFT 0xb
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC__SHIFT 0xc
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC__SHIFT 0xd
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC__SHIFT 0xe
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC__SHIFT 0xf
+#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC_MASK 0x00000001L
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC_MASK 0x00000002L
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC_MASK 0x00000004L
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC_MASK 0x00000008L
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC_MASK 0x00000010L
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC_MASK 0x00000020L
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC_MASK 0x00000040L
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC_MASK 0x00000080L
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC_MASK 0x00000100L
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC_MASK 0x00000200L
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC_MASK 0x00000400L
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC_MASK 0x00000800L
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC_MASK 0x00001000L
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC_MASK 0x00002000L
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC_MASK 0x00004000L
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC_MASK 0x00008000L
+#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define GCEA_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define GCEA_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define GCEA_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define GCEA_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//GCEA_MISC2
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define GCEA_MISC2__BLOCK_REQUESTS__SHIFT 0xd
+#define GCEA_MISC2__REQUESTS_BLOCKED__SHIFT 0xe
+#define GCEA_MISC2__FGCLKEN_OVERRIDE__SHIFT 0xf
+#define GCEA_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define GCEA_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define GCEA_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define GCEA_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define GCEA_MISC2__CONTAIN_ILLEGAL_OP__SHIFT 0x14
+#define GCEA_MISC2__REPORT_ILLEGAL_OP__SHIFT 0x15
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define GCEA_MISC2__BLOCK_REQUESTS_MASK 0x00002000L
+#define GCEA_MISC2__REQUESTS_BLOCKED_MASK 0x00004000L
+#define GCEA_MISC2__FGCLKEN_OVERRIDE_MASK 0x00008000L
+#define GCEA_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define GCEA_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define GCEA_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define GCEA_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+#define GCEA_MISC2__CONTAIN_ILLEGAL_OP_MASK 0x00100000L
+#define GCEA_MISC2__REPORT_ILLEGAL_OP_MASK 0x00200000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS1
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS1
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_MISCCREDITS
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED__SHIFT 0x10
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x17
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L
+//GCEA_SDP_ENABLE
+#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
+
+
+// addressBlock: xcd0_gc_ea_pwrdec
+//GCEA_ICG_CTRL
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x0
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x2
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x00000001L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000002L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000004L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L
+
+
+// addressBlock: xcd0_gc_rmi_rmidec
+//RMI_GENERAL_CNTL
+#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG__SHIFT 0x11
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
+#define RMI_GENERAL_CNTL__RB1_HARVEST_EN__SHIFT 0x14
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE__SHIFT 0x19
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK__SHIFT 0x1a
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK__SHIFT 0x1b
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK__SHIFT 0x1c
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK__SHIFT 0x1d
+#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK__SHIFT 0x1e
+#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_MASK 0x00060000L
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
+#define RMI_GENERAL_CNTL__RB1_HARVEST_EN_MASK 0x00100000L
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE_MASK 0x02000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK_MASK 0x04000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK_MASK 0x08000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK_MASK 0x10000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK_MASK 0x20000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK_MASK 0x40000000L
+//RMI_GENERAL_CNTL1
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xa
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN__SHIFT 0xb
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN__SHIFT 0xc
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000200L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000400L
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN_MASK 0x00000800L
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN_MASK 0x00001000L
+//RMI_GENERAL_STATUS
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
+#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY__SHIFT 0x6
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x10
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x11
+#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY__SHIFT 0x12
+#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY__SHIFT 0x13
+#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY__SHIFT 0x14
+#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED__SHIFT 0x15
+#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY__SHIFT 0x1d
+#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL__SHIFT 0x1e
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
+#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY_MASK 0x00000040L
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00010000L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00020000L
+#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY_MASK 0x00040000L
+#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY_MASK 0x00080000L
+#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY_MASK 0x00100000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED_MASK 0x1FE00000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY_MASK 0x20000000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL_MASK 0x40000000L
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
+//RMI_SUBBLOCK_STATUS0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
+//RMI_SUBBLOCK_STATUS1
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
+//RMI_SUBBLOCK_STATUS2
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
+//RMI_SUBBLOCK_STATUS3
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
+//RMI_XBAR_CONFIG
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1__SHIFT 0xe
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
+#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1_MASK 0x00004000L
+//RMI_PROBE_POP_LOGIC_CNTL
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
+//RMI_UTC_XNACK_N_MISC_CNTL
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
+//RMI_DEMUX_CNTL
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL__SHIFT 0x0
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x1
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x4
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL__SHIFT 0x10
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x11
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x14
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_MASK 0x00000001L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000002L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE_MASK 0x00000030L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_MASK 0x00010000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00020000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00300000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
+//RMI_UTCL1_CNTL1
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//RMI_UTCL1_CNTL2
+#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//RMI_UTC_UNIT_CONFIG
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN__SHIFT 0x0
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN_MASK 0x0000FFFFL
+//RMI_TCIW_FORMATTER0_CNTL
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA__SHIFT 0x13
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE__SHIFT 0x1c
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_MASK 0x07F80000L
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE_MASK 0x10000000L
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
+//RMI_TCIW_FORMATTER1_CNTL
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA__SHIFT 0x13
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE__SHIFT 0x1c
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_MASK 0x07F80000L
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE_MASK 0x10000000L
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
+//RMI_SCOREBOARD_CNTL
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1__SHIFT 0x4
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0__SHIFT 0x7
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN__SHIFT 0x8
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1_MASK 0x00000010L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0_MASK 0x00000080L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN_MASK 0x00000100L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
+//RMI_SCOREBOARD_STATUS0
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
+//RMI_SCOREBOARD_STATUS1
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
+//RMI_SCOREBOARD_STATUS2
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
+//RMI_XBAR_ARBITER_CONFIG
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
+//RMI_XBAR_ARBITER_CONFIG_1
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD_MASK 0x00FF0000L
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR_MASK 0xFF000000L
+//RMI_CLOCK_CNTRL
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK__SHIFT 0x14
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK__SHIFT 0x19
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK_MASK 0x01F00000L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK_MASK 0x3E000000L
+//RMI_UTCL1_STATUS
+#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//RMI_XNACK_DEBUG
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID__SHIFT 0x0
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID_MASK 0x0000FFFFL
+//RMI_SPARE
+#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING__SHIFT 0x0
+#define RMI_SPARE__SPARE_BIT_1__SHIFT 0x1
+#define RMI_SPARE__SPARE_BIT_2__SHIFT 0x2
+#define RMI_SPARE__SPARE_BIT_3__SHIFT 0x3
+#define RMI_SPARE__SPARE_BIT_4__SHIFT 0x4
+#define RMI_SPARE__SPARE_BIT_5__SHIFT 0x5
+#define RMI_SPARE__SPARE_BIT_6__SHIFT 0x6
+#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define RMI_SPARE__SPARE_BIT_8_0__SHIFT 0x8
+#define RMI_SPARE__SPARE_BIT_16_0__SHIFT 0x10
+#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING_MASK 0x00000001L
+#define RMI_SPARE__SPARE_BIT_1_MASK 0x00000002L
+#define RMI_SPARE__SPARE_BIT_2_MASK 0x00000004L
+#define RMI_SPARE__SPARE_BIT_3_MASK 0x00000008L
+#define RMI_SPARE__SPARE_BIT_4_MASK 0x00000010L
+#define RMI_SPARE__SPARE_BIT_5_MASK 0x00000020L
+#define RMI_SPARE__SPARE_BIT_6_MASK 0x00000040L
+#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define RMI_SPARE__SPARE_BIT_8_0_MASK 0x0000FF00L
+#define RMI_SPARE__SPARE_BIT_16_0_MASK 0xFFFF0000L
+//RMI_SPARE_1
+#define RMI_SPARE_1__SPARE_BIT_8__SHIFT 0x0
+#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
+#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
+#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
+#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
+#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
+#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
+#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
+#define RMI_SPARE_1__SPARE_BIT_8_1__SHIFT 0x8
+#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
+#define RMI_SPARE_1__SPARE_BIT_8_MASK 0x00000001L
+#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
+#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
+#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
+#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
+#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
+#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
+#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
+#define RMI_SPARE_1__SPARE_BIT_8_1_MASK 0x0000FF00L
+#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
+//RMI_SPARE_2
+#define RMI_SPARE_2__SPARE_BIT_16__SHIFT 0x0
+#define RMI_SPARE_2__SPARE_BIT_17__SHIFT 0x1
+#define RMI_SPARE_2__SPARE_BIT_18__SHIFT 0x2
+#define RMI_SPARE_2__SPARE_BIT_19__SHIFT 0x3
+#define RMI_SPARE_2__SPARE_BIT_20__SHIFT 0x4
+#define RMI_SPARE_2__SPARE_BIT_21__SHIFT 0x5
+#define RMI_SPARE_2__SPARE_BIT_22__SHIFT 0x6
+#define RMI_SPARE_2__SPARE_BIT_23__SHIFT 0x7
+#define RMI_SPARE_2__SPARE_BIT_4_0__SHIFT 0x8
+#define RMI_SPARE_2__SPARE_BIT_4_1__SHIFT 0xc
+#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
+#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
+#define RMI_SPARE_2__SPARE_BIT_16_MASK 0x00000001L
+#define RMI_SPARE_2__SPARE_BIT_17_MASK 0x00000002L
+#define RMI_SPARE_2__SPARE_BIT_18_MASK 0x00000004L
+#define RMI_SPARE_2__SPARE_BIT_19_MASK 0x00000008L
+#define RMI_SPARE_2__SPARE_BIT_20_MASK 0x00000010L
+#define RMI_SPARE_2__SPARE_BIT_21_MASK 0x00000020L
+#define RMI_SPARE_2__SPARE_BIT_22_MASK 0x00000040L
+#define RMI_SPARE_2__SPARE_BIT_23_MASK 0x00000080L
+#define RMI_SPARE_2__SPARE_BIT_4_0_MASK 0x00000F00L
+#define RMI_SPARE_2__SPARE_BIT_4_1_MASK 0x0000F000L
+#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
+#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define ATC_L2_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x14
+#define ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE__SHIFT 0x16
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+#define ATC_L2_CNTL__FRAG_APT_INTXN_MODE_MASK 0x00300000L
+#define ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE_MASK 0x0FC00000L
+//ATC_L2_CNTL2
+#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATC_L2_CNTL2__NUM_BANKS_LOG2__SHIFT 0x6
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x9
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xb
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0xc
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xf
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x12
+#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATC_L2_CNTL2__NUM_BANKS_LOG2_MASK 0x000001C0L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x00000600L
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000800L
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00007000L
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00038000L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00FC0000L
+//ATC_L2_CACHE_DATA0
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATC_L2_CACHE_DATA1
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA2
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA3
+#define ATC_L2_CACHE_DATA3__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA3__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CNTL3
+#define ATC_L2_CNTL3__L2_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define ATC_L2_CNTL3__L2_MIDK_FRAGMENT_SIZE__SHIFT 0x6
+#define ATC_L2_CNTL3__L2_BIGK_FRAGMENT_SIZE__SHIFT 0xc
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x12
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x15
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x1b
+#define ATC_L2_CNTL3__REPEATER_FGCG_OFF__SHIFT 0x1e
+#define ATC_L2_CNTL3__L2_SMALLK_FRAGMENT_SIZE_MASK 0x0000003FL
+#define ATC_L2_CNTL3__L2_MIDK_FRAGMENT_SIZE_MASK 0x00000FC0L
+#define ATC_L2_CNTL3__L2_BIGK_FRAGMENT_SIZE_MASK 0x0003F000L
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x001C0000L
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x07E00000L
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x38000000L
+#define ATC_L2_CNTL3__REPEATER_FGCG_OFF_MASK 0x40000000L
+//ATC_L2_STATUS
+#define ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS__SHIFT 0x1
+#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS_MASK 0x00000002L
+//ATC_L2_STATUS2
+#define ATC_L2_STATUS2__UCE_MEM_ADDR__SHIFT 0x0
+#define ATC_L2_STATUS2__UCE_MEM_INST__SHIFT 0xc
+#define ATC_L2_STATUS2__UCE_SRT_CACHE__SHIFT 0x14
+#define ATC_L2_STATUS2__UCE__SHIFT 0x15
+#define ATC_L2_STATUS2__UCE_MEM_ADDR_MASK 0x00000FFFL
+#define ATC_L2_STATUS2__UCE_MEM_INST_MASK 0x000FF000L
+#define ATC_L2_STATUS2__UCE_SRT_CACHE_MASK 0x00100000L
+#define ATC_L2_STATUS2__UCE_MASK 0x00200000L
+//ATC_L2_MISC_CG
+#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATC_L2_MEM_POWER_LS
+#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATC_L2_CGTT_CLK_CTRL
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_32K_DSM_INDEX
+#define ATC_L2_CACHE_32K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_32K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CACHE_32K_DSM_CNTL
+#define ATC_L2_CACHE_32K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_32K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_32K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_32K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_32K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_32K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_32K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_32K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_32K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_32K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_32K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CNTL4
+#define ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0
+#define ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa
+#define ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL
+#define ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L
+//ATC_L2_MM_GROUP_RT_CLASSES
+#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0
+#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_utcl2_vml2pfdec
+//VM_L2_CNTL
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VM_L2_CNTL2
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VM_L2_CNTL3
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VM_L2_STATUS
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VM_DUMMY_PAGE_FAULT_CNTL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_CNTL
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VM_L2_PROTECTION_FAULT_CNTL2
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_STATUS
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
+//VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VM_L2_CNTL4
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
+#define VM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE__SHIFT 0x1e
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+#define VM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
+#define VM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE_MASK 0x40000000L
+//VM_L2_CNTL5
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE__SHIFT 0x0
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE__SHIFT 0x1
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE_MASK 0x00000001L
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE_MASK 0x00000002L
+//VM_L2_MM_GROUP_RT_CLASSES
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VM_L2_BANK_SELECT_RESERVED_CID
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_BANK_SELECT_RESERVED_CID2
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_CACHE_PARITY_CNTL
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VM_L2_CGTT_CLK_CTRL
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//VM_L2_CGTT_BUSY_CTRL
+#define VM_L2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x4
+#define VM_L2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000010L
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define VML2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define VML2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define VML2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define VML2_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define VML2_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define VML2_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define VML2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define VML2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define VML2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define VML2_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define VML2_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define VML2_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define VML2_WALKER_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_WALKER_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define VML2_WALKER_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define VML2_WALKER_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define VML2_WALKER_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define VML2_WALKER_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define VML2_WALKER_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define VML2_WALKER_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define UTCL2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define UTCL2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define UTCL2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define UTCL2_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define UTCL2_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define UTCL2_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define UTCL2_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define UTCL2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define UTCL2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define UTCL2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define UTCL2_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define UTCL2_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define UTCL2_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//VML2_MEM_ECC_STATUS
+#define VML2_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define VML2_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define VML2_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define VML2_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//VML2_WALKER_MEM_ECC_STATUS
+#define VML2_WALKER_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define VML2_WALKER_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define VML2_WALKER_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//UTCL2_MEM_ECC_STATUS
+#define UTCL2_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define UTCL2_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define UTCL2_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define UTCL2_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//UTCL2_EDC_MODE
+#define UTCL2_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define UTCL2_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define UTCL2_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define UTCL2_EDC_MODE__DED_MODE__SHIFT 0x14
+#define UTCL2_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define UTCL2_EDC_MODE__BYPASS__SHIFT 0x1f
+#define UTCL2_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define UTCL2_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define UTCL2_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define UTCL2_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define UTCL2_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define UTCL2_EDC_MODE__BYPASS_MASK 0x80000000L
+//UTCL2_EDC_CONFIG
+#define UTCL2_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define UTCL2_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define UTCL2_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define UTCL2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+
+
+// addressBlock: xcd0_gc_utcl2_vml2vcdec
+//VM_CONTEXT0_CNTL
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT1_CNTL
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT2_CNTL
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT3_CNTL
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT4_CNTL
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT5_CNTL
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT6_CNTL
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT7_CNTL
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT8_CNTL
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT9_CNTL
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT10_CNTL
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT11_CNTL
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT12_CNTL
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT13_CNTL
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT14_CNTL
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT15_CNTL
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXTS_DISABLE
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VM_INVALIDATE_ENG0_SEM
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG1_SEM
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG2_SEM
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG3_SEM
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG4_SEM
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG5_SEM
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG6_SEM
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG7_SEM
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG8_SEM
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG9_SEM
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG10_SEM
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG11_SEM
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG12_SEM
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG13_SEM
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG14_SEM
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG15_SEM
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG16_SEM
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG17_SEM
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG0_REQ
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG1_REQ
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG2_REQ
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG3_REQ
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG4_REQ
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG5_REQ
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG6_REQ
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG7_REQ
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG8_REQ
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG9_REQ
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG10_REQ
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG11_REQ
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG12_REQ
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG13_REQ
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG14_REQ
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG15_REQ
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG16_REQ
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG17_REQ
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG0_ACK
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG1_ACK
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG2_ACK
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG3_ACK
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG4_ACK
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG5_ACK
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG6_ACK
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG7_ACK
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG8_ACK
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG9_ACK
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG10_ACK
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG11_ACK
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG12_ACK
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG13_ACK
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG14_ACK
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG15_ACK
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG16_ACK
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG17_ACK
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedpfdec
+//MC_VM_NB_MMIOBASE
+#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//MC_VM_NB_MMIOLIMIT
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//MC_VM_NB_PCI_CTRL
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//MC_VM_NB_PCI_ARB
+#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x0000FFFFL
+//MC_VM_FB_OFFSET
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//MC_VM_STEERING
+#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//MC_SHARED_VIRT_RESET_REQ
+#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//MC_MEM_POWER_LS
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_APT_CNTL
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define MC_VM_APT_CNTL__CHECK_IS_LOCAL__SHIFT 0x2
+#define MC_VM_APT_CNTL__PERMS_GRANTED__SHIFT 0x3
+#define MC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL__SHIFT 0x4
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+#define MC_VM_APT_CNTL__CHECK_IS_LOCAL_MASK 0x00000004L
+#define MC_VM_APT_CNTL__PERMS_GRANTED_MASK 0x00000008L
+#define MC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL_MASK 0x00000030L
+//MC_VM_LOCAL_HBM_ADDRESS_START
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_END
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//UTCL2_CGTT_CLK_CTRL
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL
+//MC_VM_CACHEABLE_DRAM_CNTL
+#define MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_HOST_MAPPING
+#define MC_VM_HOST_MAPPING__MODE__SHIFT 0x0
+#define MC_VM_HOST_MAPPING__MODE_MASK 0x00000001L
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedvcdec
+//MC_VM_FB_LOCATION_BASE
+#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//MC_VM_FB_LOCATION_TOP
+#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_TOP
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_BOT
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//MC_VM_AGP_BASE
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_MX_L1_TLB_CNTL
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbdec
+//L2TLB_TLB0_STATUS
+#define L2TLB_TLB0_STATUS__BUSY__SHIFT 0x0
+#define L2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define L2TLB_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define L2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR_MASK 0xFFFFFFFFL
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID__SHIFT 0x4
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID__SHIFT 0x9
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF__SHIFT 0xd
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA__SHIFT 0xe
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM__SHIFT 0x10
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM__SHIFT 0x11
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM__SHIFT 0x12
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID__SHIFT 0x13
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ__SHIFT 0x1f
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR_MASK 0x0000000FL
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID_MASK 0x000000F0L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID_MASK 0x00001E00L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF_MASK 0x00002000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA_MASK 0x0000C000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM_MASK 0x00010000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM_MASK 0x00020000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM_MASK 0x00040000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID_MASK 0x0FF80000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ_MASK 0x80000000L
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR_MASK 0xFFFFFFFFL
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS__SHIFT 0x4
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE__SHIFT 0x7
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP__SHIFT 0xd
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA__SHIFT 0xe
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO__SHIFT 0xf
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ__SHIFT 0x10
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE__SHIFT 0x11
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE__SHIFT 0x12
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG__SHIFT 0x14
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC__SHIFT 0x15
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK__SHIFT 0x16
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK__SHIFT 0x1f
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR_MASK 0x0000000FL
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS_MASK 0x00000070L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE_MASK 0x00001F80L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP_MASK 0x00002000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA_MASK 0x00004000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO_MASK 0x00008000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ_MASK 0x00010000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE_MASK 0x00020000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE_MASK 0x000C0000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG_MASK 0x00100000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC_MASK 0x00200000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK_MASK 0x00C00000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK_MASK 0x80000000L
+
+
+// addressBlock: xcd0_gc_tcdec
+//TCP_INVALIDATE
+#define TCP_INVALIDATE__START__SHIFT 0x0
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+//TCP_STATUS
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
+#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
+#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
+#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
+#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
+#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
+#define TCP_STATUS__READ_BUSY__SHIFT 0x6
+#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
+#define TCP_STATUS__VM_BUSY__SHIFT 0x8
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
+#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
+#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
+#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
+#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
+#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
+#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
+#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
+//TCP_CNTL
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
+#define TCP_CNTL__L1_SIZE__SHIFT 0x2
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x4
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x16
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__L1_SIZE_MASK 0x0000000CL
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0FC00000L
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+//TCP_CHAN_STEER_0
+#define TCP_CHAN_STEER_0__CHAN0__SHIFT 0x0
+#define TCP_CHAN_STEER_0__CHAN1__SHIFT 0x4
+#define TCP_CHAN_STEER_0__CHAN2__SHIFT 0x8
+#define TCP_CHAN_STEER_0__CHAN3__SHIFT 0xc
+#define TCP_CHAN_STEER_0__CHAN4__SHIFT 0x10
+#define TCP_CHAN_STEER_0__CHAN5__SHIFT 0x14
+#define TCP_CHAN_STEER_0__CHAN6__SHIFT 0x18
+#define TCP_CHAN_STEER_0__CHAN7__SHIFT 0x1c
+#define TCP_CHAN_STEER_0__CHAN0_MASK 0x0000000FL
+#define TCP_CHAN_STEER_0__CHAN1_MASK 0x000000F0L
+#define TCP_CHAN_STEER_0__CHAN2_MASK 0x00000F00L
+#define TCP_CHAN_STEER_0__CHAN3_MASK 0x0000F000L
+#define TCP_CHAN_STEER_0__CHAN4_MASK 0x000F0000L
+#define TCP_CHAN_STEER_0__CHAN5_MASK 0x00F00000L
+#define TCP_CHAN_STEER_0__CHAN6_MASK 0x0F000000L
+#define TCP_CHAN_STEER_0__CHAN7_MASK 0xF0000000L
+//TCP_CHAN_STEER_1
+#define TCP_CHAN_STEER_1__CHAN8__SHIFT 0x0
+#define TCP_CHAN_STEER_1__CHAN9__SHIFT 0x4
+#define TCP_CHAN_STEER_1__CHANA__SHIFT 0x8
+#define TCP_CHAN_STEER_1__CHANB__SHIFT 0xc
+#define TCP_CHAN_STEER_1__CHANC__SHIFT 0x10
+#define TCP_CHAN_STEER_1__CHAND__SHIFT 0x14
+#define TCP_CHAN_STEER_1__CHANE__SHIFT 0x18
+#define TCP_CHAN_STEER_1__CHANF__SHIFT 0x1c
+#define TCP_CHAN_STEER_1__CHAN8_MASK 0x0000000FL
+#define TCP_CHAN_STEER_1__CHAN9_MASK 0x000000F0L
+#define TCP_CHAN_STEER_1__CHANA_MASK 0x00000F00L
+#define TCP_CHAN_STEER_1__CHANB_MASK 0x0000F000L
+#define TCP_CHAN_STEER_1__CHANC_MASK 0x000F0000L
+#define TCP_CHAN_STEER_1__CHAND_MASK 0x00F00000L
+#define TCP_CHAN_STEER_1__CHANE_MASK 0x0F000000L
+#define TCP_CHAN_STEER_1__CHANF_MASK 0xF0000000L
+//TCP_ADDR_CONFIG
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x0
+#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT 0xb
+#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT 0xc
+#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT 0xd
+#define TCP_ADDR_CONFIG__ENABLE1THASH__SHIFT 0xe
+#define TCP_ADDR_CONFIG__ENABLE4KHASH__SHIFT 0xf
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000001FL
+#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK 0x00000800L
+#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK 0x00001000L
+#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK 0x00002000L
+#define TCP_ADDR_CONFIG__ENABLE1THASH_MASK 0x00004000L
+#define TCP_ADDR_CONFIG__ENABLE4KHASH_MASK 0x00008000L
+//TCP_CREDIT
+#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
+#define TCP_CREDIT__TD_CREDIT__SHIFT 0x1d
+#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000007FFL
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
+#define TCP_CREDIT__TD_CREDIT_MASK 0xE0000000L
+//TCP_BUFFER_ADDR_HASH_CNTL
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x0
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x8
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x10
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x18
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
+//TC_CFG_L1_LOAD_POLICY0
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L1_LOAD_POLICY1
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L1_STORE_POLICY
+#define TC_CFG_L1_STORE_POLICY__POLICY_0__SHIFT 0x0
+#define TC_CFG_L1_STORE_POLICY__POLICY_1__SHIFT 0x1
+#define TC_CFG_L1_STORE_POLICY__POLICY_2__SHIFT 0x2
+#define TC_CFG_L1_STORE_POLICY__POLICY_3__SHIFT 0x3
+#define TC_CFG_L1_STORE_POLICY__POLICY_4__SHIFT 0x4
+#define TC_CFG_L1_STORE_POLICY__POLICY_5__SHIFT 0x5
+#define TC_CFG_L1_STORE_POLICY__POLICY_6__SHIFT 0x6
+#define TC_CFG_L1_STORE_POLICY__POLICY_7__SHIFT 0x7
+#define TC_CFG_L1_STORE_POLICY__POLICY_8__SHIFT 0x8
+#define TC_CFG_L1_STORE_POLICY__POLICY_9__SHIFT 0x9
+#define TC_CFG_L1_STORE_POLICY__POLICY_10__SHIFT 0xa
+#define TC_CFG_L1_STORE_POLICY__POLICY_11__SHIFT 0xb
+#define TC_CFG_L1_STORE_POLICY__POLICY_12__SHIFT 0xc
+#define TC_CFG_L1_STORE_POLICY__POLICY_13__SHIFT 0xd
+#define TC_CFG_L1_STORE_POLICY__POLICY_14__SHIFT 0xe
+#define TC_CFG_L1_STORE_POLICY__POLICY_15__SHIFT 0xf
+#define TC_CFG_L1_STORE_POLICY__POLICY_16__SHIFT 0x10
+#define TC_CFG_L1_STORE_POLICY__POLICY_17__SHIFT 0x11
+#define TC_CFG_L1_STORE_POLICY__POLICY_18__SHIFT 0x12
+#define TC_CFG_L1_STORE_POLICY__POLICY_19__SHIFT 0x13
+#define TC_CFG_L1_STORE_POLICY__POLICY_20__SHIFT 0x14
+#define TC_CFG_L1_STORE_POLICY__POLICY_21__SHIFT 0x15
+#define TC_CFG_L1_STORE_POLICY__POLICY_22__SHIFT 0x16
+#define TC_CFG_L1_STORE_POLICY__POLICY_23__SHIFT 0x17
+#define TC_CFG_L1_STORE_POLICY__POLICY_24__SHIFT 0x18
+#define TC_CFG_L1_STORE_POLICY__POLICY_25__SHIFT 0x19
+#define TC_CFG_L1_STORE_POLICY__POLICY_26__SHIFT 0x1a
+#define TC_CFG_L1_STORE_POLICY__POLICY_27__SHIFT 0x1b
+#define TC_CFG_L1_STORE_POLICY__POLICY_28__SHIFT 0x1c
+#define TC_CFG_L1_STORE_POLICY__POLICY_29__SHIFT 0x1d
+#define TC_CFG_L1_STORE_POLICY__POLICY_30__SHIFT 0x1e
+#define TC_CFG_L1_STORE_POLICY__POLICY_31__SHIFT 0x1f
+#define TC_CFG_L1_STORE_POLICY__POLICY_0_MASK 0x00000001L
+#define TC_CFG_L1_STORE_POLICY__POLICY_1_MASK 0x00000002L
+#define TC_CFG_L1_STORE_POLICY__POLICY_2_MASK 0x00000004L
+#define TC_CFG_L1_STORE_POLICY__POLICY_3_MASK 0x00000008L
+#define TC_CFG_L1_STORE_POLICY__POLICY_4_MASK 0x00000010L
+#define TC_CFG_L1_STORE_POLICY__POLICY_5_MASK 0x00000020L
+#define TC_CFG_L1_STORE_POLICY__POLICY_6_MASK 0x00000040L
+#define TC_CFG_L1_STORE_POLICY__POLICY_7_MASK 0x00000080L
+#define TC_CFG_L1_STORE_POLICY__POLICY_8_MASK 0x00000100L
+#define TC_CFG_L1_STORE_POLICY__POLICY_9_MASK 0x00000200L
+#define TC_CFG_L1_STORE_POLICY__POLICY_10_MASK 0x00000400L
+#define TC_CFG_L1_STORE_POLICY__POLICY_11_MASK 0x00000800L
+#define TC_CFG_L1_STORE_POLICY__POLICY_12_MASK 0x00001000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_13_MASK 0x00002000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_14_MASK 0x00004000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_15_MASK 0x00008000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_16_MASK 0x00010000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_17_MASK 0x00020000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_18_MASK 0x00040000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_19_MASK 0x00080000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_20_MASK 0x00100000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_21_MASK 0x00200000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_22_MASK 0x00400000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_23_MASK 0x00800000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_24_MASK 0x01000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_25_MASK 0x02000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_26_MASK 0x04000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_27_MASK 0x08000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_28_MASK 0x10000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_29_MASK 0x20000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_30_MASK 0x40000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_31_MASK 0x80000000L
+//TC_CFG_L2_LOAD_POLICY0
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L2_LOAD_POLICY1
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L2_STORE_POLICY0
+#define TC_CFG_L2_STORE_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_STORE_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_STORE_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_STORE_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_STORE_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_STORE_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_STORE_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_STORE_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_STORE_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_STORE_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_STORE_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_STORE_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_STORE_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_STORE_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_STORE_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_STORE_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_STORE_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_STORE_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L2_STORE_POLICY1
+#define TC_CFG_L2_STORE_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L2_STORE_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L2_STORE_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L2_STORE_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L2_STORE_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L2_STORE_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L2_STORE_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L2_STORE_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L2_STORE_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L2_STORE_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L2_STORE_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L2_STORE_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L2_STORE_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L2_STORE_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L2_STORE_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L2_STORE_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L2_STORE_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L2_STORE_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L2_ATOMIC_POLICY
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L1_VOLATILE
+#define TC_CFG_L1_VOLATILE__VOL__SHIFT 0x0
+#define TC_CFG_L1_VOLATILE__VOL_MASK 0x0000000FL
+//TC_CFG_L2_VOLATILE
+#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
+#define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL
+//TCI_MISC
+#define TCI_MISC__FGCG_REPEATER_DISABLE__SHIFT 0x0
+#define TCI_MISC__LEGACY_MGCG_DISABLE__SHIFT 0x1
+#define TCI_MISC__FGCG_REPEATER_DISABLE_MASK 0x00000001L
+#define TCI_MISC__LEGACY_MGCG_DISABLE_MASK 0x00000002L
+//TCI_CNTL_3
+#define TCI_CNTL_3__DISABLE_DOUBLING_L2_BANDWIDTH__SHIFT 0x0
+#define TCI_CNTL_3__COMBINING_DELAY_WINDOW__SHIFT 0x2
+#define TCI_CNTL_3__CHICKEN_BIT_TCR_MGCG__SHIFT 0x4
+#define TCI_CNTL_3__TCR_FGCG_REPEATER_DISABLE__SHIFT 0x7
+#define TCI_CNTL_3__DISABLE_DOUBLING_L2_BANDWIDTH_MASK 0x00000003L
+#define TCI_CNTL_3__COMBINING_DELAY_WINDOW_MASK 0x0000000CL
+#define TCI_CNTL_3__CHICKEN_BIT_TCR_MGCG_MASK 0x00000070L
+#define TCI_CNTL_3__TCR_FGCG_REPEATER_DISABLE_MASK 0x00000080L
+//TCI_DSM_CNTL
+#define TCI_DSM_CNTL__WRITE_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCI_DSM_CNTL__WRITE_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCI_DSM_CNTL__WRITE_RAM_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCI_DSM_CNTL__WRITE_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+//TCI_DSM_CNTL2
+#define TCI_DSM_CNTL2__WRITE_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCI_DSM_CNTL2__WRITE_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCI_DSM_CNTL2__TCI_INJECT_DELAY__SHIFT 0x1a
+#define TCI_DSM_CNTL2__WRITE_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCI_DSM_CNTL2__WRITE_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCI_DSM_CNTL2__TCI_INJECT_DELAY_MASK 0xFC000000L
+//TCI_STATUS
+#define TCI_STATUS__TCI_BUSY__SHIFT 0x0
+#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
+//TCI_CNTL_1
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x0
+#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x10
+#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x18
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000FFFFL
+#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00FF0000L
+#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xFF000000L
+//TCI_CNTL_2
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x0
+#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x1
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
+#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001FEL
+//TCC_CTRL
+#define TCC_CTRL__CACHE_SIZE__SHIFT 0x0
+#define TCC_CTRL__RATE__SHIFT 0x2
+#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
+#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
+#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
+#define TCC_CTRL__LINEAR_SET_HASH__SHIFT 0x15
+#define TCC_CTRL__OUTPUT_FIFO_CLK_MODE__SHIFT 0x16
+#define TCC_CTRL__EXECUTE_CLK_MODE__SHIFT 0x17
+#define TCC_CTRL__RETURN_BUFFER_CLK_MODE__SHIFT 0x19
+#define TCC_CTRL__SRC_FIFO_CLK_MODE__SHIFT 0x1a
+#define TCC_CTRL__MC_WRITE_CLK_MODE__SHIFT 0x1b
+#define TCC_CTRL__LATENCY_FIFO_CLK_MODE__SHIFT 0x1c
+#define TCC_CTRL__DISABLE_SHARED_128B_READ_REQUEST__SHIFT 0x1d
+#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define TCC_CTRL__RATE_MASK 0x0000000CL
+#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
+#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
+#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
+#define TCC_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
+#define TCC_CTRL__OUTPUT_FIFO_CLK_MODE_MASK 0x00400000L
+#define TCC_CTRL__EXECUTE_CLK_MODE_MASK 0x01800000L
+#define TCC_CTRL__RETURN_BUFFER_CLK_MODE_MASK 0x02000000L
+#define TCC_CTRL__SRC_FIFO_CLK_MODE_MASK 0x04000000L
+#define TCC_CTRL__MC_WRITE_CLK_MODE_MASK 0x08000000L
+#define TCC_CTRL__LATENCY_FIFO_CLK_MODE_MASK 0x10000000L
+#define TCC_CTRL__DISABLE_SHARED_128B_READ_REQUEST_MASK 0x20000000L
+//TCC_CTRL2
+#define TCC_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
+#define TCC_CTRL2__INF_NAN_CLAMP__SHIFT 0x10
+#define TCC_CTRL2__PROBE_FILTER_CTRL__SHIFT 0x11
+#define TCC_CTRL2__WAIT_CLK_STABLE_CNT__SHIFT 0x12
+#define TCC_CTRL2__TCC_TCX_REPEATER_FGCG_DISABLE__SHIFT 0x17
+#define TCC_CTRL2__TCC_EA0_RDREQ_FGCG_DISABLE__SHIFT 0x18
+#define TCC_CTRL2__TCC_EA0_WRREQ_FGCG_DISABLE__SHIFT 0x19
+#define TCC_CTRL2__TCC_TCX_ACK_REPEATER_FGCG_DISABLE__SHIFT 0x1a
+#define TCC_CTRL2__TCC_TCA_HOLE_REPEATER_FGCG_DISABLE__SHIFT 0x1b
+#define TCC_CTRL2__TCC_TCA_RTN_REPEATER_FGCG_DISABLE__SHIFT 0x1c
+#define TCC_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK__SHIFT 0x1d
+#define TCC_CTRL2__Enable_TCC_64K_2M_1G_1T_hash__SHIFT 0x1e
+#define TCC_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
+#define TCC_CTRL2__INF_NAN_CLAMP_MASK 0x00010000L
+#define TCC_CTRL2__PROBE_FILTER_CTRL_MASK 0x00020000L
+#define TCC_CTRL2__WAIT_CLK_STABLE_CNT_MASK 0x007C0000L
+#define TCC_CTRL2__TCC_TCX_REPEATER_FGCG_DISABLE_MASK 0x00800000L
+#define TCC_CTRL2__TCC_EA0_RDREQ_FGCG_DISABLE_MASK 0x01000000L
+#define TCC_CTRL2__TCC_EA0_WRREQ_FGCG_DISABLE_MASK 0x02000000L
+#define TCC_CTRL2__TCC_TCX_ACK_REPEATER_FGCG_DISABLE_MASK 0x04000000L
+#define TCC_CTRL2__TCC_TCA_HOLE_REPEATER_FGCG_DISABLE_MASK 0x08000000L
+#define TCC_CTRL2__TCC_TCA_RTN_REPEATER_FGCG_DISABLE_MASK 0x10000000L
+#define TCC_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK_MASK 0x20000000L
+#define TCC_CTRL2__Enable_TCC_64K_2M_1G_1T_hash_MASK 0x40000000L
+//TCC_DSM_CNTL
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x15
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL__SHIFT 0x18
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL__SHIFT 0x1b
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x00600000L
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL_MASK 0x03000000L
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL_MASK 0x18000000L
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
+//TCC_DSM_CNTLA
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL__SHIFT 0x15
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL__SHIFT 0x18
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
+#define TCC_DSM_CNTLA__OUTPUT_FIFOS_IRRITATOR_DATA_SEL__SHIFT 0x1b
+#define TCC_DSM_CNTLA__OUTPUT_FIFOS_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL_MASK 0x00600000L
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL_MASK 0x03000000L
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
+#define TCC_DSM_CNTLA__OUTPUT_FIFOS_IRRITATOR_DATA_SEL_MASK 0x18000000L
+#define TCC_DSM_CNTLA__OUTPUT_FIFOS_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
+//TCC_DSM_CNTL2
+#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x17
+#define TCC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define TCC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//TCC_DSM_CNTL2A
+#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY__SHIFT 0x17
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define TCC_DSM_CNTL2A__OUTPUT_FIFOS_ENABLE_ERROR_INJECT__SHIFT 0x1b
+#define TCC_DSM_CNTL2A__OUTPUT_FIFOS_SELECT_INJECT_DELAY__SHIFT 0x1d
+#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY_MASK 0x04000000L
+#define TCC_DSM_CNTL2A__OUTPUT_FIFOS_ENABLE_ERROR_INJECT_MASK 0x18000000L
+#define TCC_DSM_CNTL2A__OUTPUT_FIFOS_SELECT_INJECT_DELAY_MASK 0x20000000L
+//TCC_DSM_CNTL2B
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCC_DSM_CNTL2B__RETURN_BUFFER_LEVEL_BUBBLE_THRESHOLD__SHIFT 0x12
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCC_DSM_CNTL2B__WRITE_EARLY_RETURN_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCC_DSM_CNTL2B__RETURN_BUFFER_LEVEL_BUBBLE_THRESHOLD_MASK 0x00FC0000L
+//TCC_WBINVL2
+#define TCC_WBINVL2__DONE__SHIFT 0x4
+#define TCC_WBINVL2__DONE_MASK 0x00000010L
+//TCC_SOFT_RESET
+#define TCC_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
+#define TCC_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
+//TCC_DSM_CNTL3
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_SELECT_INJECT_DELAY__SHIFT 0x17
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_2_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_0_3_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_2_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define TCC_DSM_CNTL3__CACHE_DATA_BANK_1_3_SELECT_INJECT_DELAY_MASK 0x00800000L
+//TCA_CTRL
+#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x0
+#define TCA_CTRL__RB_STILL_4_PHASE__SHIFT 0x4
+#define TCA_CTRL__RB_AS_TCI__SHIFT 0x5
+#define TCA_CTRL__DISABLE_UTCL2_PRIORITY__SHIFT 0x6
+#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER__SHIFT 0x7
+#define TCA_CTRL__TCA_TCC_FGCG_DISABLE__SHIFT 0x8
+#define TCA_CTRL__TCA_TCA_FGCG_DISABLE__SHIFT 0x9
+#define TCA_CTRL__TCA_TCH_FGCG_DISABLE__SHIFT 0xa
+#define TCA_CTRL__TCA_TCX_FGCG_DISABLE__SHIFT 0xb
+#define TCA_CTRL__TCA_RANDOM_REVERSE_PRIORITY_ENABLE__SHIFT 0xc
+#define TCA_CTRL__RTN_CREDIT_THRESHOLD__SHIFT 0xd
+#define TCA_CTRL__ACK_CREDIT_THRESHOLD__SHIFT 0x10
+#define TCA_CTRL__RTN_ARB_MODE__SHIFT 0x13
+#define TCA_CTRL__HOLE_ARB_SHARE_THRESHOLD__SHIFT 0x18
+#define TCA_CTRL__HOLE_ARB_LANE_THRESHOLD__SHIFT 0x1c
+#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000FL
+#define TCA_CTRL__RB_STILL_4_PHASE_MASK 0x00000010L
+#define TCA_CTRL__RB_AS_TCI_MASK 0x00000020L
+#define TCA_CTRL__DISABLE_UTCL2_PRIORITY_MASK 0x00000040L
+#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER_MASK 0x00000080L
+#define TCA_CTRL__TCA_TCC_FGCG_DISABLE_MASK 0x00000100L
+#define TCA_CTRL__TCA_TCA_FGCG_DISABLE_MASK 0x00000200L
+#define TCA_CTRL__TCA_TCH_FGCG_DISABLE_MASK 0x00000400L
+#define TCA_CTRL__TCA_TCX_FGCG_DISABLE_MASK 0x00000800L
+#define TCA_CTRL__TCA_RANDOM_REVERSE_PRIORITY_ENABLE_MASK 0x00001000L
+#define TCA_CTRL__RTN_CREDIT_THRESHOLD_MASK 0x0000E000L
+#define TCA_CTRL__ACK_CREDIT_THRESHOLD_MASK 0x00070000L
+#define TCA_CTRL__RTN_ARB_MODE_MASK 0x00080000L
+#define TCA_CTRL__HOLE_ARB_SHARE_THRESHOLD_MASK 0x07000000L
+#define TCA_CTRL__HOLE_ARB_LANE_THRESHOLD_MASK 0x70000000L
+//TCA_BURST_MASK
+#define TCA_BURST_MASK__ADDR_MASK__SHIFT 0x0
+#define TCA_BURST_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//TCA_BURST_CTRL
+#define TCA_BURST_CTRL__MAX_BURST__SHIFT 0x0
+#define TCA_BURST_CTRL__TCP_DISABLE__SHIFT 0x4
+#define TCA_BURST_CTRL__SQC_DISABLE__SHIFT 0x5
+#define TCA_BURST_CTRL__CPF_DISABLE__SHIFT 0x6
+#define TCA_BURST_CTRL__CPG_DISABLE__SHIFT 0x7
+#define TCA_BURST_CTRL__SQG_DISABLE__SHIFT 0xa
+#define TCA_BURST_CTRL__UTCL2_DISABLE__SHIFT 0xb
+#define TCA_BURST_CTRL__TPI_DISABLE__SHIFT 0xc
+#define TCA_BURST_CTRL__RLC_DISABLE__SHIFT 0xd
+#define TCA_BURST_CTRL__MAX_BURST_MASK 0x00000007L
+#define TCA_BURST_CTRL__TCP_DISABLE_MASK 0x00000010L
+#define TCA_BURST_CTRL__SQC_DISABLE_MASK 0x00000020L
+#define TCA_BURST_CTRL__CPF_DISABLE_MASK 0x00000040L
+#define TCA_BURST_CTRL__CPG_DISABLE_MASK 0x00000080L
+#define TCA_BURST_CTRL__SQG_DISABLE_MASK 0x00000400L
+#define TCA_BURST_CTRL__UTCL2_DISABLE_MASK 0x00000800L
+#define TCA_BURST_CTRL__TPI_DISABLE_MASK 0x00001000L
+#define TCA_BURST_CTRL__RLC_DISABLE_MASK 0x00002000L
+//TCA_DSM_CNTL
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+//TCA_DSM_CNTL2
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//TCX_CTRL
+#define TCX_CTRL__TCX_TCX_FGCG_DISABLE__SHIFT 0x0
+#define TCX_CTRL__TCX_TCR_FGCG_DISABLE__SHIFT 0x1
+#define TCX_CTRL__TCX_TCC_FGCG_DISABLE__SHIFT 0x2
+#define TCX_CTRL__TCX_TCX_FGCG_DISABLE_MASK 0x00000001L
+#define TCX_CTRL__TCX_TCR_FGCG_DISABLE_MASK 0x00000002L
+#define TCX_CTRL__TCX_TCC_FGCG_DISABLE_MASK 0x00000004L
+//TCX_DSM_CNTL
+#define TCX_DSM_CNTL__GROUP0_SED_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCX_DSM_CNTL__GROUP1_SED_IRRITATOR_DATA_SEL__SHIFT 0x2
+#define TCX_DSM_CNTL__GROUP2_SED_IRRITATOR_DATA_SEL__SHIFT 0x4
+#define TCX_DSM_CNTL__GROUP4_SED_IRRITATOR_DATA_SEL__SHIFT 0x8
+#define TCX_DSM_CNTL__GROUP5_SED_IRRITATOR_DATA_SEL__SHIFT 0xa
+#define TCX_DSM_CNTL__GROUP6_SED_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCX_DSM_CNTL__GROUP8_SED_IRRITATOR_DATA_SEL__SHIFT 0x10
+#define TCX_DSM_CNTL__GROUP9_SED_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCX_DSM_CNTL__GROUP10_SED_IRRITATOR_DATA_SEL__SHIFT 0x14
+#define TCX_DSM_CNTL__GROUP13_SED_IRRITATOR_DATA_SEL__SHIFT 0x1a
+#define TCX_DSM_CNTL__GROUP14_SED_IRRITATOR_DATA_SEL__SHIFT 0x1c
+#define TCX_DSM_CNTL__SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x1e
+#define TCX_DSM_CNTL__GROUP0_SED_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCX_DSM_CNTL__GROUP1_SED_IRRITATOR_DATA_SEL_MASK 0x0000000CL
+#define TCX_DSM_CNTL__GROUP2_SED_IRRITATOR_DATA_SEL_MASK 0x00000030L
+#define TCX_DSM_CNTL__GROUP4_SED_IRRITATOR_DATA_SEL_MASK 0x00000300L
+#define TCX_DSM_CNTL__GROUP5_SED_IRRITATOR_DATA_SEL_MASK 0x00000C00L
+#define TCX_DSM_CNTL__GROUP6_SED_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCX_DSM_CNTL__GROUP8_SED_IRRITATOR_DATA_SEL_MASK 0x00030000L
+#define TCX_DSM_CNTL__GROUP9_SED_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCX_DSM_CNTL__GROUP10_SED_IRRITATOR_DATA_SEL_MASK 0x00300000L
+#define TCX_DSM_CNTL__GROUP13_SED_IRRITATOR_DATA_SEL_MASK 0x0C000000L
+#define TCX_DSM_CNTL__GROUP14_SED_IRRITATOR_DATA_SEL_MASK 0x30000000L
+#define TCX_DSM_CNTL__SED_IRRITATOR_SINGLE_WRITE_MASK 0x40000000L
+//TCX_DSM_CNTL2
+#define TCX_DSM_CNTL2__SED_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCX_DSM_CNTL2__SED_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCX_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define TCX_DSM_CNTL2__SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCX_DSM_CNTL2__SED_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCX_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: xcd0_gc_shdec
+//SPI_SHADER_PGM_RSRC3_PS
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_PGM_LO_PS
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_PS
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_PS
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
+//SPI_SHADER_PGM_RSRC2_PS
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_PS_0
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_1
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_2
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_3
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_4
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_5
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_6
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_7
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_8
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_9
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_10
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_11
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_12
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_13
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_14
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_15
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_16
+#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_17
+#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_18
+#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_19
+#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_20
+#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_21
+#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_22
+#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_23
+#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_24
+#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_25
+#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_26
+#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_27
+#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_28
+#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_29
+#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_30
+#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_31
+#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC3_VS
+#define SPI_SHADER_PGM_RSRC3_VS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_VS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_LATE_ALLOC_VS
+#define SPI_SHADER_LATE_ALLOC_VS__LIMIT__SHIFT 0x0
+#define SPI_SHADER_LATE_ALLOC_VS__LIMIT_MASK 0x0000003FL
+//SPI_SHADER_PGM_LO_VS
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_VS
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_VS
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_VS
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x9
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0xb
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0xd
+#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x003FE000L
+#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_VS_0
+#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_1
+#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_2
+#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_3
+#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_4
+#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_5
+#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_6
+#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_7
+#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_8
+#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_9
+#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_10
+#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_11
+#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_12
+#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_13
+#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_14
+#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_15
+#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_16
+#define SPI_SHADER_USER_DATA_VS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_17
+#define SPI_SHADER_USER_DATA_VS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_18
+#define SPI_SHADER_USER_DATA_VS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_19
+#define SPI_SHADER_USER_DATA_VS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_20
+#define SPI_SHADER_USER_DATA_VS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_21
+#define SPI_SHADER_USER_DATA_VS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_22
+#define SPI_SHADER_USER_DATA_VS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_23
+#define SPI_SHADER_USER_DATA_VS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_24
+#define SPI_SHADER_USER_DATA_VS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_25
+#define SPI_SHADER_USER_DATA_VS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_26
+#define SPI_SHADER_USER_DATA_VS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_27
+#define SPI_SHADER_USER_DATA_VS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_28
+#define SPI_SHADER_USER_DATA_VS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_29
+#define SPI_SHADER_USER_DATA_VS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_30
+#define SPI_SHADER_USER_DATA_VS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_31
+#define SPI_SHADER_USER_DATA_VS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC2_GS_VS
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_PGM_RSRC4_GS
+#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x00003F80L
+//SPI_SHADER_USER_DATA_ADDR_LO_GS
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_GS
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_ES
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_GS
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_PGM_LO_GS
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_GS
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_GS
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_GS
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_ES_0
+#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_1
+#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_2
+#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_3
+#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_4
+#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_5
+#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_6
+#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_7
+#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_8
+#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_9
+#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_10
+#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_11
+#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_12
+#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_13
+#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_14
+#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_15
+#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_16
+#define SPI_SHADER_USER_DATA_ES_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_17
+#define SPI_SHADER_USER_DATA_ES_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_18
+#define SPI_SHADER_USER_DATA_ES_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_19
+#define SPI_SHADER_USER_DATA_ES_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_20
+#define SPI_SHADER_USER_DATA_ES_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_21
+#define SPI_SHADER_USER_DATA_ES_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_22
+#define SPI_SHADER_USER_DATA_ES_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_23
+#define SPI_SHADER_USER_DATA_ES_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_24
+#define SPI_SHADER_USER_DATA_ES_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_25
+#define SPI_SHADER_USER_DATA_ES_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_26
+#define SPI_SHADER_USER_DATA_ES_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_27
+#define SPI_SHADER_USER_DATA_ES_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_28
+#define SPI_SHADER_USER_DATA_ES_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_29
+#define SPI_SHADER_USER_DATA_ES_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_30
+#define SPI_SHADER_USER_DATA_ES_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_31
+#define SPI_SHADER_USER_DATA_ES_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_HS
+#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
+//SPI_SHADER_USER_DATA_ADDR_LO_HS
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_HS
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_LS
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_HS
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE_MASK 0x00003C00L
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
+//SPI_SHADER_PGM_LO_HS
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_HS
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_HS
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
+//SPI_SHADER_PGM_RSRC2_HS
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_LS_0
+#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_1
+#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_2
+#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_3
+#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_4
+#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_5
+#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_6
+#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_7
+#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_8
+#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_9
+#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_10
+#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_11
+#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_12
+#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_13
+#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_14
+#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_15
+#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_16
+#define SPI_SHADER_USER_DATA_LS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_17
+#define SPI_SHADER_USER_DATA_LS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_18
+#define SPI_SHADER_USER_DATA_LS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_19
+#define SPI_SHADER_USER_DATA_LS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_20
+#define SPI_SHADER_USER_DATA_LS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_21
+#define SPI_SHADER_USER_DATA_LS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_22
+#define SPI_SHADER_USER_DATA_LS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_23
+#define SPI_SHADER_USER_DATA_LS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_24
+#define SPI_SHADER_USER_DATA_LS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_25
+#define SPI_SHADER_USER_DATA_LS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_26
+#define SPI_SHADER_USER_DATA_LS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_27
+#define SPI_SHADER_USER_DATA_LS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_28
+#define SPI_SHADER_USER_DATA_LS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_29
+#define SPI_SHADER_USER_DATA_LS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_30
+#define SPI_SHADER_USER_DATA_LS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_31
+#define SPI_SHADER_USER_DATA_LS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_0
+#define SPI_SHADER_USER_DATA_COMMON_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_1
+#define SPI_SHADER_USER_DATA_COMMON_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_2
+#define SPI_SHADER_USER_DATA_COMMON_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_3
+#define SPI_SHADER_USER_DATA_COMMON_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_4
+#define SPI_SHADER_USER_DATA_COMMON_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_5
+#define SPI_SHADER_USER_DATA_COMMON_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_6
+#define SPI_SHADER_USER_DATA_COMMON_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_7
+#define SPI_SHADER_USER_DATA_COMMON_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_8
+#define SPI_SHADER_USER_DATA_COMMON_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_9
+#define SPI_SHADER_USER_DATA_COMMON_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_10
+#define SPI_SHADER_USER_DATA_COMMON_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_11
+#define SPI_SHADER_USER_DATA_COMMON_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_12
+#define SPI_SHADER_USER_DATA_COMMON_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_13
+#define SPI_SHADER_USER_DATA_COMMON_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_14
+#define SPI_SHADER_USER_DATA_COMMON_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_15
+#define SPI_SHADER_USER_DATA_COMMON_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_16
+#define SPI_SHADER_USER_DATA_COMMON_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_17
+#define SPI_SHADER_USER_DATA_COMMON_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_18
+#define SPI_SHADER_USER_DATA_COMMON_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_19
+#define SPI_SHADER_USER_DATA_COMMON_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_20
+#define SPI_SHADER_USER_DATA_COMMON_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_21
+#define SPI_SHADER_USER_DATA_COMMON_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_22
+#define SPI_SHADER_USER_DATA_COMMON_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_23
+#define SPI_SHADER_USER_DATA_COMMON_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_24
+#define SPI_SHADER_USER_DATA_COMMON_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_25
+#define SPI_SHADER_USER_DATA_COMMON_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_26
+#define SPI_SHADER_USER_DATA_COMMON_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_27
+#define SPI_SHADER_USER_DATA_COMMON_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_28
+#define SPI_SHADER_USER_DATA_COMMON_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_29
+#define SPI_SHADER_USER_DATA_COMMON_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_30
+#define SPI_SHADER_USER_DATA_COMMON_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_31
+#define SPI_SHADER_USER_DATA_COMMON_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_31__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_INITIATOR
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+//COMPUTE_DIM_X
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Y
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Z
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_START_X
+#define COMPUTE_START_X__START__SHIFT 0x0
+#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Y
+#define COMPUTE_START_Y__START__SHIFT 0x0
+#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Z
+#define COMPUTE_START_Z__START__SHIFT 0x0
+#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
+//COMPUTE_NUM_THREAD_X
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Y
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Z
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_PIPELINESTAT_ENABLE
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
+//COMPUTE_PERFCOUNT_ENABLE
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
+//COMPUTE_PGM_LO
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
+#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_HI
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_PKT_ADDR_LO
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_PKT_ADDR_HI
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_LO
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_HI
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//COMPUTE_PGM_RSRC1
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
+#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
+//COMPUTE_PGM_RSRC2
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
+#define COMPUTE_PGM_RSRC2__SKIP_USGPR0__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
+#define COMPUTE_PGM_RSRC2__SKIP_USGPR0_MASK 0x80000000L
+//COMPUTE_VMID
+#define COMPUTE_VMID__DATA__SHIFT 0x0
+#define COMPUTE_VMID__DATA_MASK 0x0000000FL
+//COMPUTE_RESOURCE_LIMITS
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE__SHIFT 0x1b
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE_MASK 0x78000000L
+//COMPUTE_STATIC_THREAD_MGMT_SE0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE1
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_TMPRING_SIZE
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
+//COMPUTE_STATIC_THREAD_MGMT_SE2
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE3
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_RESTART_X
+#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Y
+#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Z
+#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_THREAD_TRACE_ENABLE
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
+//COMPUTE_MISC_RESERVED
+#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
+#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000003L
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
+//COMPUTE_DISPATCH_ID
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
+//COMPUTE_THREADGROUP_ID
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
+//COMPUTE_RELAUNCH
+#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
+//COMPUTE_WAVE_RESTORE_ADDR_LO
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
+//COMPUTE_WAVE_RESTORE_ADDR_HI
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
+//COMPUTE_TG_CHUNK_SIZE
+#define COMPUTE_TG_CHUNK_SIZE__TG_CHUNK_SIZE__SHIFT 0x0
+#define COMPUTE_TG_CHUNK_SIZE__TG_CHUNK_SIZE_MASK 0x0000FFFFL
+//COMPUTE_SHADER_CHKSUM
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM__SHIFT 0x0
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_RSRC3
+#define COMPUTE_PGM_RSRC3__ACCUM_OFFSET__SHIFT 0x0
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START__SHIFT 0xa
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END__SHIFT 0xb
+#define COMPUTE_PGM_RSRC3__TG_SPLIT__SHIFT 0x10
+#define COMPUTE_PGM_RSRC3__ACCUM_OFFSET_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END_MASK 0x00000800L
+#define COMPUTE_PGM_RSRC3__TG_SPLIT_MASK 0x00010000L
+//COMPUTE_USER_DATA_0
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_1
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_2
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_3
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_4
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_5
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_6
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_7
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_8
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_9
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_10
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_11
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_12
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_13
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_14
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_15
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_END
+#define COMPUTE_DISPATCH_END__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_END__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_NOWHERE
+#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
+#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_cppdec
+//CP_DFY_CNTL
+#define CP_DFY_CNTL__POLICY__SHIFT 0x0
+#define CP_DFY_CNTL__MTYPE__SHIFT 0x2
+#define CP_DFY_CNTL__WRITE_DIS__SHIFT 0x1b
+#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
+#define CP_DFY_CNTL__MODE__SHIFT 0x1d
+#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DFY_CNTL__POLICY_MASK 0x00000001L
+#define CP_DFY_CNTL__MTYPE_MASK 0x0000000CL
+#define CP_DFY_CNTL__WRITE_DIS_MASK 0x08000000L
+#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
+#define CP_DFY_CNTL__MODE_MASK 0x60000000L
+#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
+//CP_DFY_STAT
+#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
+#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
+#define CP_DFY_STAT__BUSY__SHIFT 0x1f
+#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
+#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
+#define CP_DFY_STAT__BUSY_MASK 0x80000000L
+//CP_DFY_ADDR_HI
+#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_DFY_ADDR_LO
+#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
+#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
+//CP_DFY_DATA_0
+#define CP_DFY_DATA_0__DATA__SHIFT 0x0
+#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_1
+#define CP_DFY_DATA_1__DATA__SHIFT 0x0
+#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_2
+#define CP_DFY_DATA_2__DATA__SHIFT 0x0
+#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_3
+#define CP_DFY_DATA_3__DATA__SHIFT 0x0
+#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_4
+#define CP_DFY_DATA_4__DATA__SHIFT 0x0
+#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_5
+#define CP_DFY_DATA_5__DATA__SHIFT 0x0
+#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_6
+#define CP_DFY_DATA_6__DATA__SHIFT 0x0
+#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_7
+#define CP_DFY_DATA_7__DATA__SHIFT 0x0
+#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_8
+#define CP_DFY_DATA_8__DATA__SHIFT 0x0
+#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_9
+#define CP_DFY_DATA_9__DATA__SHIFT 0x0
+#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_10
+#define CP_DFY_DATA_10__DATA__SHIFT 0x0
+#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_11
+#define CP_DFY_DATA_11__DATA__SHIFT 0x0
+#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_12
+#define CP_DFY_DATA_12__DATA__SHIFT 0x0
+#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_13
+#define CP_DFY_DATA_13__DATA__SHIFT 0x0
+#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_14
+#define CP_DFY_DATA_14__DATA__SHIFT 0x0
+#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_15
+#define CP_DFY_DATA_15__DATA__SHIFT 0x0
+#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_CMD
+#define CP_DFY_CMD__OFFSET__SHIFT 0x0
+#define CP_DFY_CMD__SIZE__SHIFT 0x10
+#define CP_DFY_CMD__OFFSET_MASK 0x000001FFL
+#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
+//CP_EOPQ_WAIT_TIME
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
+//CP_CPC_MGCG_SYNC_CNTL
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
+//CPC_INT_INFO
+#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
+#define CPC_INT_INFO__TYPE__SHIFT 0x10
+#define CPC_INT_INFO__VMID__SHIFT 0x14
+#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
+#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
+#define CPC_INT_INFO__TYPE_MASK 0x00010000L
+#define CPC_INT_INFO__VMID_MASK 0x00F00000L
+#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
+//CP_VIRT_STATUS
+#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
+#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
+//CPC_INT_ADDR
+#define CPC_INT_ADDR__ADDR__SHIFT 0x0
+#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CPC_INT_PASID
+#define CPC_INT_PASID__PASID__SHIFT 0x0
+#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
+//CP_GFX_ERROR
+#define CP_GFX_ERROR__EDC_ERROR_ID__SHIFT 0x0
+#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_GFX_ERROR__RSVD1_ERROR__SHIFT 0x5
+#define CP_GFX_ERROR__RSVD2_ERROR__SHIFT 0x6
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
+#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR__SHIFT 0x8
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
+#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR__SHIFT 0x10
+#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR__SHIFT 0x11
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
+#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR__SHIFT 0x16
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
+#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR__SHIFT 0x1c
+#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR__SHIFT 0x1d
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
+#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR__SHIFT 0x1f
+#define CP_GFX_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
+#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_GFX_ERROR__RSVD1_ERROR_MASK 0x00000020L
+#define CP_GFX_ERROR__RSVD2_ERROR_MASK 0x00000040L
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
+#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR_MASK 0x00000100L
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
+#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR_MASK 0x00010000L
+#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR_MASK 0x00020000L
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
+#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR_MASK 0x00400000L
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
+#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR_MASK 0x10000000L
+#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR_MASK 0x20000000L
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
+#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR_MASK 0x80000000L
+//CPG_UTCL1_CNTL
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPC_UTCL1_CNTL
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPF_UTCL1_CNTL
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
+//CP_AQL_SMM_STATUS
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
+//CP_RB0_BASE
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB_BASE
+#define CP_RB_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB0_CNTL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x11
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00060000L
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_CNTL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_RPTR_WR
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
+//CP_RB0_RPTR_ADDR
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB_RPTR_ADDR
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB0_RPTR_ADDR_HI
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_RPTR_ADDR_HI
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_BUFSZ_MASK
+#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_BUFSZ_MASK
+#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_WPTR_POLL_ADDR_LO
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_RB_WPTR_POLL_ADDR_HI
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
+//GC_PRIV_MODE
+#define GC_PRIV_MODE__MC_PRIV_MODE__SHIFT 0x0
+#define GC_PRIV_MODE__MC_PRIV_MODE_MASK 0x00000001L
+//CP_INT_CNTL
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_DEVICE_ID
+#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
+//CP_ME0_PIPE_PRIORITY_CNTS
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_RING_PRIORITY_CNTS
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME0_PIPE0_PRIORITY
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING0_PRIORITY
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE1_PRIORITY
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING1_PRIORITY
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE2_PRIORITY
+#define CP_ME0_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING2_PRIORITY
+#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_FATAL_ERROR
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
+#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
+#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
+//CP_RB_VMID
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
+#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
+//CP_ME0_PIPE0_VMID
+#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
+//CP_ME0_PIPE1_VMID
+#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
+//CP_RB0_WPTR
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB0_WPTR_HI
+#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR_HI
+#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR_HI
+#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB2_WPTR
+#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB2_WPTR__RB_WPTR_MASK 0x000FFFFFL
+//CP_RB_DOORBELL_CONTROL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_RANGE_LOWER
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
+//CP_RB_DOORBELL_RANGE_UPPER
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
+//CP_MEC_DOORBELL_RANGE_LOWER
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
+//CP_MEC_DOORBELL_RANGE_UPPER
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
+//CPG_UTCL1_ERROR
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CPC_UTCL1_ERROR
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CP_RB1_BASE
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB1_CNTL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB1_RPTR_ADDR
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB1_RPTR_ADDR_HI
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB2_BASE
+#define CP_RB2_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB2_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB2_CNTL
+#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB2_RPTR_ADDR
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB2_RPTR_ADDR_HI
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_ACTIVE
+#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_ACTIVE
+#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_INT_CNTL_RING0
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING1
+#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING2
+#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING2__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING2__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS_RING0
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING1
+#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING2
+#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING2__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING2__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_ME_F32_INTERRUPT
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT__SHIFT 0x1
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2__SHIFT 0x2
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3__SHIFT 0x3
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT_MASK 0x00000002L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2_MASK 0x00000004L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3_MASK 0x00000008L
+//CP_PFP_F32_INTERRUPT
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3__SHIFT 0x3
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3_MASK 0x00000008L
+//CP_CE_F32_INTERRUPT
+#define CP_CE_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_CE_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x1
+#define CP_CE_F32_INTERRUPT__CE_F32_INT_2__SHIFT 0x2
+#define CP_CE_F32_INTERRUPT__CE_F32_INT_3__SHIFT 0x3
+#define CP_CE_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_CE_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000002L
+#define CP_CE_F32_INTERRUPT__CE_F32_INT_2_MASK 0x00000004L
+#define CP_CE_F32_INTERRUPT__CE_F32_INT_3_MASK 0x00000008L
+//CP_MEC1_F32_INTERRUPT
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INTERRUPT
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_PWR_CNTL
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
+//CP_MEM_SLP_CNTL
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x0
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x1
+#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
+#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x8
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
+#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
+#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
+//CP_ECC_DMA_FIRST_OCCURRENCE
+#define CP_ECC_DMA_FIRST_OCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_DMA_FIRST_OCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_DMA_FIRST_OCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_DMA_FIRST_OCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_DMA_FIRST_OCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_DMA_FIRST_OCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_DMA_FIRST_OCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_DMA_FIRST_OCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_DMA_FIRST_OCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_DMA_FIRST_OCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_FIRSTOCCURRENCE__QUEUE__SHIFT 0xc
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_FIRSTOCCURRENCE__QUEUE_MASK 0x00007000L
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE_RING0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING1
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING2
+#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE_MASK 0xFFFFFFFFL
+//GB_EDC_MODE
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+//CP_DEBUG
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE__SHIFT 0xe
+#define CP_DEBUG__DISABLE_GFX_HALT_ON_UTCL1_ERROR__SHIFT 0xf
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR__SHIFT 0x10
+#define CP_DEBUG__BUSY_EXTENDER__SHIFT 0x13
+#define CP_DEBUG__PRIV_REG_INTERRUPT_ENABLE__SHIFT 0x15
+#define CP_DEBUG__INTERRUPT_ENABLE__SHIFT 0x16
+#define CP_DEBUG__PREDICATE_DISABLE__SHIFT 0x17
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_DEBUG__CPG_TC_MTYPE_OVERRIDE__SHIFT 0x1b
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE__SHIFT 0x1e
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE__SHIFT 0x1f
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE_MASK 0x00004000L
+#define CP_DEBUG__DISABLE_GFX_HALT_ON_UTCL1_ERROR_MASK 0x00008000L
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR_MASK 0x00070000L
+#define CP_DEBUG__BUSY_EXTENDER_MASK 0x00180000L
+#define CP_DEBUG__PRIV_REG_INTERRUPT_ENABLE_MASK 0x00200000L
+#define CP_DEBUG__INTERRUPT_ENABLE_MASK 0x00400000L
+#define CP_DEBUG__PREDICATE_DISABLE_MASK 0x00800000L
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_DEBUG__CPG_TC_MTYPE_OVERRIDE_MASK 0x08000000L
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE_MASK 0x40000000L
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE_MASK 0x80000000L
+//CP_CPF_DEBUG
+#define CP_CPF_DEBUG__QUE_MANAGER_CLR_DBELLUPD_DIS__SHIFT 0x6
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE__SHIFT 0x10
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPF_DEBUG__BUSY_EXTENDER__SHIFT 0x13
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS__SHIFT 0x1d
+#define CP_CPF_DEBUG__CPF_TC_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPF_DEBUG__DBGU_TRIGGER__SHIFT 0x1f
+#define CP_CPF_DEBUG__QUE_MANAGER_CLR_DBELLUPD_DIS_MASK 0x00000040L
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE_MASK 0x00010000L
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPF_DEBUG__BUSY_EXTENDER_MASK 0x00180000L
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS_MASK 0x20000000L
+#define CP_CPF_DEBUG__CPF_TC_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPF_DEBUG__DBGU_TRIGGER_MASK 0x80000000L
+//CP_CPC_DEBUG
+#define CP_CPC_DEBUG__CPC_PIPE_SEL__SHIFT 0x0
+#define CP_CPC_DEBUG__CPC_ROLLOVER_EVENT_DEBUG_EN__SHIFT 0x3
+#define CP_CPC_DEBUG__CPC_HARVESTING_CONTINUE_DISABLE__SHIFT 0xb
+#define CP_CPC_DEBUG__CPC_HARVESTING_RELAUNCH_DISABLE__SHIFT 0xc
+#define CP_CPC_DEBUG__CPC_HARVEST_AUTO_DISABLE__SHIFT 0xd
+#define CP_CPC_DEBUG__CPC_HARVESTING_DISPATCH_DISABLE__SHIFT 0xe
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE__SHIFT 0xf
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPC_DEBUG__BUSY_EXTENDER__SHIFT 0x13
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE__SHIFT 0x15
+#define CP_CPC_DEBUG__RESERVED_INTERRUPT_ENABLE__SHIFT 0x16
+#define CP_CPC_DEBUG__RESTORE_FIFO_EMPTY_SEL__SHIFT 0x17
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_CPC_DEBUG__PRIV_REG_INTERRUPT_ENABLE__SHIFT 0x1b
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE__SHIFT 0x1f
+#define CP_CPC_DEBUG__CPC_PIPE_SEL_MASK 0x00000003L
+#define CP_CPC_DEBUG__CPC_ROLLOVER_EVENT_DEBUG_EN_MASK 0x00000008L
+#define CP_CPC_DEBUG__CPC_HARVESTING_CONTINUE_DISABLE_MASK 0x00000800L
+#define CP_CPC_DEBUG__CPC_HARVESTING_RELAUNCH_DISABLE_MASK 0x00001000L
+#define CP_CPC_DEBUG__CPC_HARVEST_AUTO_DISABLE_MASK 0x00002000L
+#define CP_CPC_DEBUG__CPC_HARVESTING_DISPATCH_DISABLE_MASK 0x00004000L
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE_MASK 0x00008000L
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPC_DEBUG__BUSY_EXTENDER_MASK 0x00180000L
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE_MASK 0x00200000L
+#define CP_CPC_DEBUG__RESERVED_INTERRUPT_ENABLE_MASK 0x00400000L
+#define CP_CPC_DEBUG__RESTORE_FIFO_EMPTY_SEL_MASK 0x00800000L
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_CPC_DEBUG__PRIV_REG_INTERRUPT_ENABLE_MASK 0x08000000L
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE_MASK 0x80000000L
+//CP_CPC_DEBUG_2
+#define CP_CPC_DEBUG_2__DC_GD_PIPE3_QSWITCH_CNT__SHIFT 0x0
+#define CP_CPC_DEBUG_2__DC_GD_PIPE2_QSWITCH_CNT__SHIFT 0x8
+#define CP_CPC_DEBUG_2__DC_GD_PIPE1_QSWITCH_CNT__SHIFT 0x10
+#define CP_CPC_DEBUG_2__DC_GD_PIPE0_QSWITCH_CNT__SHIFT 0x18
+#define CP_CPC_DEBUG_2__DC_GD_PIPE3_QSWITCH_CNT_MASK 0x000000FFL
+#define CP_CPC_DEBUG_2__DC_GD_PIPE2_QSWITCH_CNT_MASK 0x0000FF00L
+#define CP_CPC_DEBUG_2__DC_GD_PIPE1_QSWITCH_CNT_MASK 0x00FF0000L
+#define CP_CPC_DEBUG_2__DC_GD_PIPE0_QSWITCH_CNT_MASK 0xFF000000L
+//CP_PQ_WPTR_POLL_CNTL
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
+#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
+#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL1
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
+//CP_ME1_PIPE0_INT_CNTL
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_CNTL
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_CNTL
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_CNTL
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_CNTL
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_CNTL
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_CNTL
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_CNTL
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE0_INT_STATUS
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_STATUS
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_STATUS
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_STATUS
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_STATUS
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_STATUS
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_STATUS
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_STATUS
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CC_GC_EDC_CONFIG
+#define CC_GC_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_GC_EDC_CONFIG__ENABLE_IRRITATOR_CLK__SHIFT 0x2
+#define CC_GC_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define CC_GC_EDC_CONFIG__ENABLE_IRRITATOR_CLK_MASK 0x00000004L
+//CP_ME1_PIPE_PRIORITY_CNTS
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME1_PIPE0_PRIORITY
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE1_PRIORITY
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE2_PRIORITY
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE3_PRIORITY
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE_PRIORITY_CNTS
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME2_PIPE0_PRIORITY
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE1_PRIORITY
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE2_PRIORITY
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE3_PRIORITY
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_CE_PRGRM_CNTR_START
+#define CP_CE_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_CE_PRGRM_CNTR_START__IP_START_MASK 0x000007FFL
+//CP_PFP_PRGRM_CNTR_START
+#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0x00001FFFL
+//CP_ME_PRGRM_CNTR_START
+#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0x00000FFFL
+//CP_MEC1_PRGRM_CNTR_START
+#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
+//CP_MEC2_PRGRM_CNTR_START
+#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
+//CP_CE_INTR_ROUTINE_START
+#define CP_CE_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_CE_INTR_ROUTINE_START__IR_START_MASK 0x000007FFL
+//CP_PFP_INTR_ROUTINE_START
+#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0x00001FFFL
+//CP_ME_INTR_ROUTINE_START
+#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0x00000FFFL
+//CP_MEC1_INTR_ROUTINE_START
+#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
+//CP_MEC2_INTR_ROUTINE_START
+#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
+//CP_CONTEXT_CNTL
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX__SHIFT 0x0
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX__SHIFT 0x10
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX_MASK 0x00000007L
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX_MASK 0x00070000L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
+//CP_MAX_CONTEXT
+#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
+#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
+//CP_IQ_WAIT_TIME1
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
+#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
+//CP_IQ_WAIT_TIME2
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
+#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
+#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
+//CP_RB0_BASE_HI
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_RB1_BASE_HI
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_VMID_RESET
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
+//CPC_INT_CNTL
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CPC_INT_STATUS
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_VMID_PREEMPT
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
+#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
+//CPC_INT_CNTX_ID
+#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PQ_STATUS
+#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN__SHIFT 0x2
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN_MASK 0x00000004L
+//CP_CPC_IC_BASE_LO
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_CPC_IC_BASE_HI
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_CPC_IC_BASE_CNTL
+#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x01000000L
+//CP_CPC_IC_OP_CNTL
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_CPC_IC_OP_CNTL__ICACHE_INVALIDATED__SHIFT 0x6
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+#define CP_CPC_IC_OP_CNTL__ICACHE_INVALIDATED_MASK 0x00000040L
+//CP_MEC1_F32_INT_DIS
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INT_DIS
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_VMID_STATUS
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+
+
+// addressBlock: xcd0_gc_cppdec2
+//CP_RB_DOORBELL_CONTROL_SCH_0
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_1
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_2
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_3
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_4
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_5
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_6
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_7
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CLEAR
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
+//CP_CPF_DSM_CNTL
+#define CP_CPF_DSM_CNTL__CPF0_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define CP_CPF_DSM_CNTL__CPF0_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define CP_CPF_DSM_CNTL__CPF1_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define CP_CPF_DSM_CNTL__CPF1_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define CP_CPF_DSM_CNTL__CPF2_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define CP_CPF_DSM_CNTL__CPF2_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define CP_CPF_DSM_CNTL__CPF0_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define CP_CPF_DSM_CNTL__CPF0_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define CP_CPF_DSM_CNTL__CPF1_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define CP_CPF_DSM_CNTL__CPF1_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define CP_CPF_DSM_CNTL__CPF2_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define CP_CPF_DSM_CNTL__CPF2_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+//CP_CPG_DSM_CNTL
+#define CP_CPG_DSM_CNTL__CPG0_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define CP_CPG_DSM_CNTL__CPG0_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define CP_CPG_DSM_CNTL__CPG1_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define CP_CPG_DSM_CNTL__CPG1_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define CP_CPG_DSM_CNTL__CPG2_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define CP_CPG_DSM_CNTL__CPG2_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define CP_CPG_DSM_CNTL__CPG0_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define CP_CPG_DSM_CNTL__CPG0_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define CP_CPG_DSM_CNTL__CPG1_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define CP_CPG_DSM_CNTL__CPG1_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define CP_CPG_DSM_CNTL__CPG2_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define CP_CPG_DSM_CNTL__CPG2_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+//CP_CPC_DSM_CNTL
+#define CP_CPC_DSM_CNTL__CPC0_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define CP_CPC_DSM_CNTL__CPC0_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define CP_CPC_DSM_CNTL__CPC1_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define CP_CPC_DSM_CNTL__CPC1_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define CP_CPC_DSM_CNTL__CPC2_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define CP_CPC_DSM_CNTL__CPC2_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define CP_CPC_DSM_CNTL__CPC3_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define CP_CPC_DSM_CNTL__CPC3_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define CP_CPC_DSM_CNTL__CPC4_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define CP_CPC_DSM_CNTL__CPC4_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define CP_CPC_DSM_CNTL__CPC5_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define CP_CPC_DSM_CNTL__CPC5_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define CP_CPC_DSM_CNTL__CPC6_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define CP_CPC_DSM_CNTL__CPC6_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define CP_CPC_DSM_CNTL__CPC7_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define CP_CPC_DSM_CNTL__CPC7_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define CP_CPC_DSM_CNTL__CPC8_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define CP_CPC_DSM_CNTL__CPC8_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define CP_CPC_DSM_CNTL__CPC0_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define CP_CPC_DSM_CNTL__CPC0_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define CP_CPC_DSM_CNTL__CPC1_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define CP_CPC_DSM_CNTL__CPC1_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define CP_CPC_DSM_CNTL__CPC2_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define CP_CPC_DSM_CNTL__CPC2_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define CP_CPC_DSM_CNTL__CPC3_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define CP_CPC_DSM_CNTL__CPC3_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define CP_CPC_DSM_CNTL__CPC4_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define CP_CPC_DSM_CNTL__CPC4_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define CP_CPC_DSM_CNTL__CPC5_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define CP_CPC_DSM_CNTL__CPC5_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define CP_CPC_DSM_CNTL__CPC6_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define CP_CPC_DSM_CNTL__CPC6_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define CP_CPC_DSM_CNTL__CPC7_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define CP_CPC_DSM_CNTL__CPC7_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define CP_CPC_DSM_CNTL__CPC8_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define CP_CPC_DSM_CNTL__CPC8_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//CP_CPF_DSM_CNTL2
+#define CP_CPF_DSM_CNTL2__CPF0_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define CP_CPF_DSM_CNTL2__CPF0_SELECT_INJECT_DELAY__SHIFT 0x2
+#define CP_CPF_DSM_CNTL2__CPF1_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define CP_CPF_DSM_CNTL2__CPF1_SELECT_INJECT_DELAY__SHIFT 0x5
+#define CP_CPF_DSM_CNTL2__CPF2_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define CP_CPF_DSM_CNTL2__CPF2_SELECT_INJECT_DELAY__SHIFT 0x8
+#define CP_CPF_DSM_CNTL2__CPF0_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define CP_CPF_DSM_CNTL2__CPF0_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define CP_CPF_DSM_CNTL2__CPF1_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define CP_CPF_DSM_CNTL2__CPF1_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define CP_CPF_DSM_CNTL2__CPF2_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define CP_CPF_DSM_CNTL2__CPF2_SELECT_INJECT_DELAY_MASK 0x00000100L
+//CP_CPG_DSM_CNTL2
+#define CP_CPG_DSM_CNTL2__CPG0_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define CP_CPG_DSM_CNTL2__CPG0_SELECT_INJECT_DELAY__SHIFT 0x2
+#define CP_CPG_DSM_CNTL2__CPG1_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define CP_CPG_DSM_CNTL2__CPG1_SELECT_INJECT_DELAY__SHIFT 0x5
+#define CP_CPG_DSM_CNTL2__CPG2_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define CP_CPG_DSM_CNTL2__CPG2_SELECT_INJECT_DELAY__SHIFT 0x8
+#define CP_CPG_DSM_CNTL2__CPG0_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define CP_CPG_DSM_CNTL2__CPG0_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define CP_CPG_DSM_CNTL2__CPG1_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define CP_CPG_DSM_CNTL2__CPG1_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define CP_CPG_DSM_CNTL2__CPG2_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define CP_CPG_DSM_CNTL2__CPG2_SELECT_INJECT_DELAY_MASK 0x00000100L
+//CP_CPC_DSM_CNTL2
+#define CP_CPC_DSM_CNTL2__CPC0_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define CP_CPC_DSM_CNTL2__CPC0_SELECT_INJECT_DELAY__SHIFT 0x2
+#define CP_CPC_DSM_CNTL2__CPC1_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define CP_CPC_DSM_CNTL2__CPC1_SELECT_INJECT_DELAY__SHIFT 0x5
+#define CP_CPC_DSM_CNTL2__CPC2_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define CP_CPC_DSM_CNTL2__CPC2_SELECT_INJECT_DELAY__SHIFT 0x8
+#define CP_CPC_DSM_CNTL2__CPC3_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define CP_CPC_DSM_CNTL2__CPC3_SELECT_INJECT_DELAY__SHIFT 0xb
+#define CP_CPC_DSM_CNTL2__CPC4_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define CP_CPC_DSM_CNTL2__CPC4_SELECT_INJECT_DELAY__SHIFT 0xe
+#define CP_CPC_DSM_CNTL2__CPC5_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define CP_CPC_DSM_CNTL2__CPC5_SELECT_INJECT_DELAY__SHIFT 0x11
+#define CP_CPC_DSM_CNTL2__CPC6_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define CP_CPC_DSM_CNTL2__CPC6_SELECT_INJECT_DELAY__SHIFT 0x14
+#define CP_CPC_DSM_CNTL2__CPC7_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define CP_CPC_DSM_CNTL2__CPC7_SELECT_INJECT_DELAY__SHIFT 0x17
+#define CP_CPC_DSM_CNTL2__CPC8_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define CP_CPC_DSM_CNTL2__CPC8_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define CP_CPC_DSM_CNTL2__CPC0_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define CP_CPC_DSM_CNTL2__CPC0_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define CP_CPC_DSM_CNTL2__CPC1_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define CP_CPC_DSM_CNTL2__CPC1_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define CP_CPC_DSM_CNTL2__CPC2_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define CP_CPC_DSM_CNTL2__CPC2_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define CP_CPC_DSM_CNTL2__CPC3_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define CP_CPC_DSM_CNTL2__CPC3_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define CP_CPC_DSM_CNTL2__CPC4_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define CP_CPC_DSM_CNTL2__CPC4_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define CP_CPC_DSM_CNTL2__CPC5_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define CP_CPC_DSM_CNTL2__CPC5_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define CP_CPC_DSM_CNTL2__CPC6_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define CP_CPC_DSM_CNTL2__CPC6_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define CP_CPC_DSM_CNTL2__CPC7_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define CP_CPC_DSM_CNTL2__CPC7_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define CP_CPC_DSM_CNTL2__CPC8_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define CP_CPC_DSM_CNTL2__CPC8_SELECT_INJECT_DELAY_MASK 0x04000000L
+//CP_CPF_DSM_CNTL2A
+#define CP_CPF_DSM_CNTL2A__CPF_INJECT_DELAY__SHIFT 0x0
+#define CP_CPF_DSM_CNTL2A__CPF_INJECT_DELAY_MASK 0x0000003FL
+//CP_CPG_DSM_CNTL2A
+#define CP_CPG_DSM_CNTL2A__CPG_INJECT_DELAY__SHIFT 0x0
+#define CP_CPG_DSM_CNTL2A__CPG_INJECT_DELAY_MASK 0x0000003FL
+//CP_CPC_DSM_CNTL2A
+#define CP_CPC_DSM_CNTL2A__CPC_INJECT_DELAY__SHIFT 0x0
+#define CP_CPC_DSM_CNTL2A__CPC_INJECT_DELAY_MASK 0x0000003FL
+//CP_EDC_FUE_CNTL
+#define CP_EDC_FUE_CNTL__CP_FUE_MASK__SHIFT 0x0
+#define CP_EDC_FUE_CNTL__SPI_FUE_MASK__SHIFT 0x1
+#define CP_EDC_FUE_CNTL__GDS_FUE_MASK__SHIFT 0x2
+#define CP_EDC_FUE_CNTL__TC_RLC_FUE_MASK__SHIFT 0x3
+#define CP_EDC_FUE_CNTL__TC_CPG_FUE_MASK__SHIFT 0x4
+#define CP_EDC_FUE_CNTL__TCA_FUE_MASK__SHIFT 0x5
+#define CP_EDC_FUE_CNTL__TCC_FUE_MASK__SHIFT 0x6
+#define CP_EDC_FUE_CNTL__UTCL2_FUE_MASK__SHIFT 0x7
+#define CP_EDC_FUE_CNTL__CP_FUE_FLAG__SHIFT 0x10
+#define CP_EDC_FUE_CNTL__SPI_FUE_FLAG__SHIFT 0x11
+#define CP_EDC_FUE_CNTL__GDS_FUE_FLAG__SHIFT 0x12
+#define CP_EDC_FUE_CNTL__TC_RLC_FUE_FLAG__SHIFT 0x13
+#define CP_EDC_FUE_CNTL__TC_CPG_FUE_FLAG__SHIFT 0x14
+#define CP_EDC_FUE_CNTL__TCA_FUE_FLAG__SHIFT 0x15
+#define CP_EDC_FUE_CNTL__TCC_FUE_FLAG__SHIFT 0x16
+#define CP_EDC_FUE_CNTL__UTCL2_FUE_FLAG__SHIFT 0x17
+#define CP_EDC_FUE_CNTL__CP_FUE_MASK_MASK 0x00000001L
+#define CP_EDC_FUE_CNTL__SPI_FUE_MASK_MASK 0x00000002L
+#define CP_EDC_FUE_CNTL__GDS_FUE_MASK_MASK 0x00000004L
+#define CP_EDC_FUE_CNTL__TC_RLC_FUE_MASK_MASK 0x00000008L
+#define CP_EDC_FUE_CNTL__TC_CPG_FUE_MASK_MASK 0x00000010L
+#define CP_EDC_FUE_CNTL__TCA_FUE_MASK_MASK 0x00000020L
+#define CP_EDC_FUE_CNTL__TCC_FUE_MASK_MASK 0x00000040L
+#define CP_EDC_FUE_CNTL__UTCL2_FUE_MASK_MASK 0x00000080L
+#define CP_EDC_FUE_CNTL__CP_FUE_FLAG_MASK 0x00010000L
+#define CP_EDC_FUE_CNTL__SPI_FUE_FLAG_MASK 0x00020000L
+#define CP_EDC_FUE_CNTL__GDS_FUE_FLAG_MASK 0x00040000L
+#define CP_EDC_FUE_CNTL__TC_RLC_FUE_FLAG_MASK 0x00080000L
+#define CP_EDC_FUE_CNTL__TC_CPG_FUE_FLAG_MASK 0x00100000L
+#define CP_EDC_FUE_CNTL__TCA_FUE_FLAG_MASK 0x00200000L
+#define CP_EDC_FUE_CNTL__TCC_FUE_FLAG_MASK 0x00400000L
+#define CP_EDC_FUE_CNTL__UTCL2_FUE_FLAG_MASK 0x00800000L
+//CP_GFX_MQD_CONTROL
+#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_GFX_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_GFX_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
+//CP_GFX_MQD_BASE_ADDR
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_MQD_BASE_ADDR_HI
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_STATUS
+#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CPG_UTCL1_STATUS
+#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPC_UTCL1_STATUS
+#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPF_UTCL1_STATUS
+#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CP_SD_CNTL
+#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
+#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
+#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
+#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
+#define CP_SD_CNTL__SPI_EN__SHIFT 0x4
+#define CP_SD_CNTL__WD_EN__SHIFT 0x5
+#define CP_SD_CNTL__IA_EN__SHIFT 0x6
+#define CP_SD_CNTL__PA_EN__SHIFT 0x7
+#define CP_SD_CNTL__RMI_EN__SHIFT 0x8
+#define CP_SD_CNTL__EA_EN__SHIFT 0x9
+#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
+#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
+#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
+#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
+#define CP_SD_CNTL__SPI_EN_MASK 0x00000010L
+#define CP_SD_CNTL__WD_EN_MASK 0x00000020L
+#define CP_SD_CNTL__IA_EN_MASK 0x00000040L
+#define CP_SD_CNTL__PA_EN_MASK 0x00000080L
+#define CP_SD_CNTL__RMI_EN_MASK 0x00000100L
+#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
+//CP_SOFT_RESET_CNTL
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
+//CP_CPC_GFX_CNTL
+#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
+#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
+#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
+#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
+#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
+#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
+#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
+#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
+
+
+// addressBlock: xcd0_gc_spipdec
+//SPI_ARB_PRIORITY
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
+//SPI_ARB_CYCLES_0
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
+//SPI_ARB_CYCLES_1
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
+//SPI_CDBG_SYS_GFX
+#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_GFX__VS_EN__SHIFT 0x1
+#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_GFX__ES_EN__SHIFT 0x3
+#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_GFX__LS_EN__SHIFT 0x5
+#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_GFX__VS_EN_MASK 0x0002L
+#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_GFX__ES_EN_MASK 0x0008L
+#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_GFX__LS_EN_MASK 0x0020L
+#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_HP3D
+#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_HP3D__VS_EN__SHIFT 0x1
+#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_HP3D__ES_EN__SHIFT 0x3
+#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_HP3D__LS_EN__SHIFT 0x5
+#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_HP3D__VS_EN_MASK 0x0002L
+#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_HP3D__ES_EN_MASK 0x0008L
+#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_HP3D__LS_EN_MASK 0x0020L
+//SPI_CDBG_SYS_CS0
+#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xFF000000L
+//SPI_CDBG_SYS_CS1
+#define SPI_CDBG_SYS_CS1__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS1__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS1__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS1__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS1__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS1__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS1__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS1__PIPE3_MASK 0xFF000000L
+//SPI_WCL_PIPE_PERCENT_GFX
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE__SHIFT 0x7
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE__SHIFT 0x11
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE_MASK 0x00000F80L
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE_MASK 0x003E0000L
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_HP3D
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_CS0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS1
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS2
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS3
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS4
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS5
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS6
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS7
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x01L
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN__SHIFT 0x18
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN_MASK 0x000000FFL
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN_MASK 0x0000FF00L
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN_MASK 0x00FF0000L
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN_MASK 0xFF000000L
+//SPI_GDBG_PER_VMID_CNTL
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID__SHIFT 0x0
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE__SHIFT 0x1
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START__SHIFT 0xe
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END__SHIFT 0xf
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x0001L
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x0006L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x0008L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x1FF0L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x2000L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START_MASK 0x4000L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END_MASK 0x8000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS_MASK 0x00000002L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_SCRATCH_ADDR_CHECK
+#define SPI_SCRATCH_ADDR_CHECK__RESERVED__SHIFT 0x0
+#define SPI_SCRATCH_ADDR_CHECK__RESERVED_MASK 0x0FL
+//SPI_SCRATCH_ADDR_STATUS
+#define SPI_SCRATCH_ADDR_STATUS__WRITE_DIS__SHIFT 0x0
+#define SPI_SCRATCH_ADDR_STATUS__OVERFLOW_DETECTED__SHIFT 0x1
+#define SPI_SCRATCH_ADDR_STATUS__ME_ID__SHIFT 0x2
+#define SPI_SCRATCH_ADDR_STATUS__PIPE_ID__SHIFT 0x4
+#define SPI_SCRATCH_ADDR_STATUS__WRITE_DIS_MASK 0x01L
+#define SPI_SCRATCH_ADDR_STATUS__OVERFLOW_DETECTED_MASK 0x02L
+#define SPI_SCRATCH_ADDR_STATUS__ME_ID_MASK 0x0CL
+#define SPI_SCRATCH_ADDR_STATUS__PIPE_ID_MASK 0x30L
+//SPI_RESET_DEBUG
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET__SHIFT 0x0
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID__SHIFT 0x1
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID__SHIFT 0x2
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE__SHIFT 0x3
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY__SHIFT 0x4
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_MASK 0x01L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID_MASK 0x02L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID_MASK 0x04L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE_MASK 0x08L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY_MASK 0x10L
+//SPI_COMPUTE_QUEUE_RESET
+#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
+#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
+//SPI_RESOURCE_RESERVE_CU_0
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_1
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_2
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_3
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_4
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_5
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_6
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_7
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_8
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_9
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_2
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_3
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_4
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_5
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_6
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_7
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_8
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_9
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_CU_10
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_11
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_11
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_CU_12
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_13
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_14
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_15
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_12
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_13
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_14
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_15
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_COMPUTE_WF_CTX_SAVE
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
+//SPI_ARB_CNTL_0
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
+
+
+// addressBlock: xcd0_gc_cpphqddec
+//CP_HQD_GFX_CONTROL
+#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
+#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
+#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
+#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
+//CP_HQD_GFX_STATUS
+#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
+#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
+//CP_HPD_ROQ_OFFSETS
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x003F0000L
+//CP_HPD_STATUS0
+#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK__SHIFT 0x1d
+#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK_MASK 0x20000000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+//CP_HPD_UTCL1_CNTL
+#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
+#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
+//CP_HPD_UTCL1_ERROR
+#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
+#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
+#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
+#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
+#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
+#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
+//CP_HPD_UTCL1_ERROR_ADDR
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
+//CP_MQD_BASE_ADDR
+#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_MQD_BASE_ADDR_HI
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_ACTIVE
+#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
+#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
+//CP_HQD_VMID
+#define CP_HQD_VMID__VMID__SHIFT 0x0
+#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
+#define CP_HQD_VMID__VQID__SHIFT 0x10
+#define CP_HQD_VMID__VMID_MASK 0x0000000FL
+#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
+#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
+//CP_HQD_PERSISTENT_STATE
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
+//CP_HQD_PIPE_PRIORITY
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
+//CP_HQD_QUEUE_PRIORITY
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_HQD_QUANTUM
+#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
+#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
+#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_HQD_PQ_BASE
+#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
+#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_BASE_HI
+#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
+//CP_HQD_PQ_RPTR
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_RPTR_REPORT_ADDR
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_WPTR_POLL_ADDR
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
+//CP_HQD_PQ_WPTR_POLL_ADDR_HI
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_DOORBELL_CONTROL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_HQD_PQ_CONTROL
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
+#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT 0x10
+#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT 0x11
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_PQ_CONTROL__TMZ__SHIFT 0x16
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x19
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
+#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP__SHIFT 0x1d
+#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
+#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN_MASK 0x00010000L
+#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP_MASK 0x00060000L
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_PQ_CONTROL__TMZ_MASK 0x00400000L
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x06000000L
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
+#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK 0x20000000L
+#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
+//CP_HQD_IB_BASE_ADDR
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_IB_BASE_ADDR_HI
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_IB_RPTR
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
+//CP_HQD_IB_CONTROL
+#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
+#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
+//CP_HQD_IQ_TIMER
+#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x19
+#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
+#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x02000000L
+#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
+#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_HQD_IQ_RPTR
+#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
+#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
+//CP_HQD_DEQUEUE_REQUEST
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000007L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_HQD_DMA_OFFLOAD
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+//CP_HQD_OFFLOAD
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_SEMA_CMD
+#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
+#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
+#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
+#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
+//CP_HQD_MSG_TYPE
+#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
+#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
+#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
+#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
+//CP_HQD_ATOMIC0_PREOP_LO
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC0_PREOP_HI
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_LO
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_HI
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER0
+#define CP_HQD_HQ_SCHEDULER0__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER0__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_STATUS0
+#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT__SHIFT 0x2
+#define CP_HQD_HQ_STATUS0__RSV_6_4__SHIFT 0x4
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_STATUS0__PG_ACTIVATED__SHIFT 0x9
+#define CP_HQD_HQ_STATUS0__RSVR_29_10__SHIFT 0xa
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000003L
+#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT_MASK 0x0000000CL
+#define CP_HQD_HQ_STATUS0__RSV_6_4_MASK 0x00000070L
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_STATUS0__PG_ACTIVATED_MASK 0x00000200L
+#define CP_HQD_HQ_STATUS0__RSVR_29_10_MASK 0x3FFFFC00L
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_CONTROL0
+#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER1
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_MQD_CONTROL
+#define CP_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
+//CP_HQD_HQ_STATUS1
+#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_CONTROL1
+#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR_HI
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
+//CP_HQD_EOP_CONTROL
+#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
+#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
+//CP_HQD_EOP_RPTR
+#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
+#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
+#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
+#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
+//CP_HQD_EOP_WPTR
+#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
+#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
+#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
+#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
+//CP_HQD_EOP_EVENTS
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_LO
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_HI
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_CTX_SAVE_CONTROL
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000008L
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CP_HQD_CNTL_STACK_OFFSET
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_CNTL_STACK_SIZE
+#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CP_HQD_WG_STATE_OFFSET
+#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x07FFFFFCL
+//CP_HQD_CTX_SAVE_SIZE
+#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x07FFF000L
+//CP_HQD_GDS_RESOURCE_STATE
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
+//CP_HQD_ERROR
+#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
+#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
+#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
+#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
+#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
+#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
+#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
+#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
+#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
+#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
+#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
+#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
+//CP_HQD_EOP_WPTR_MEM
+#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
+//CP_HQD_AQL_CONTROL
+#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
+#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
+#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
+//CP_HQD_PQ_WPTR_LO
+#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_WPTR_HI
+#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
+//CP_HQD_AQL_CONTROL_1
+#define CP_HQD_AQL_CONTROL_1__RESERVED__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL_1__RESERVED_MASK 0xFFFFFFFFL
+//CP_HQD_AQL_DISPATCH_ID
+#define CP_HQD_AQL_DISPATCH_ID__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_AQL_DISPATCH_ID__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_AQL_DISPATCH_ID_HI
+#define CP_HQD_AQL_DISPATCH_ID_HI__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_AQL_DISPATCH_ID_HI__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_tcpdec
+//TCP_WATCH0_ADDR_H
+#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH0_ADDR_L
+#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH0_CNTL
+#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH0_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH0_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH1_ADDR_H
+#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH1_ADDR_L
+#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH1_CNTL
+#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH1_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH1_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH2_ADDR_H
+#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH2_ADDR_L
+#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH2_CNTL
+#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH2_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH2_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH3_ADDR_H
+#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH3_ADDR_L
+#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH3_CNTL
+#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH3_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH3_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
+//TCP_GATCL1_CNTL
+#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID__SHIFT 0x19
+#define TCP_GATCL1_CNTL__FORCE_MISS__SHIFT 0x1a
+#define TCP_GATCL1_CNTL__FORCE_IN_ORDER__SHIFT 0x1b
+#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define TCP_GATCL1_CNTL__FORCE_MISS_MASK 0x04000000L
+#define TCP_GATCL1_CNTL__FORCE_IN_ORDER_MASK 0x08000000L
+#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//TCP_ATC_EDC_GATCL1_CNT
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL
+//TCP_GATCL1_DSM_CNTL
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0__SHIFT 0x0
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1__SHIFT 0x1
+#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0_MASK 0x00000001L
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1_MASK 0x00000002L
+#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A_MASK 0x00000004L
+//TCP_DSM_CNTL
+#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCP_DSM_CNTL__CMD_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCP_DSM_CNTL__CMD_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCP_DSM_CNTL__VM_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCP_DSM_CNTL__VM_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCP_DSM_CNTL__DB_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCP_DSM_CNTL__DB_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define TCP_DSM_CNTL__UTCL1_LFIFO0_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCP_DSM_CNTL__UTCL1_LFIFO0_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCP_DSM_CNTL__UTCL1_LFIFO1_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCP_DSM_CNTL__UTCL1_LFIFO1_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCP_DSM_CNTL__CMD_FIFO_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCP_DSM_CNTL__CMD_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCP_DSM_CNTL__VM_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCP_DSM_CNTL__VM_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCP_DSM_CNTL__DB_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCP_DSM_CNTL__DB_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define TCP_DSM_CNTL__UTCL1_LFIFO0_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCP_DSM_CNTL__UTCL1_LFIFO0_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCP_DSM_CNTL__UTCL1_LFIFO1_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCP_DSM_CNTL__UTCL1_LFIFO1_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+//TCP_CNTL2
+#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
+#define TCP_CNTL2__TCPF_FMT_MGCG_DISABLE__SHIFT 0x8
+#define TCP_CNTL2__TCPI_NEW_MGCG_CTRL_BIT__SHIFT 0xa
+#define TCP_CNTL2__MISS_CLK_DISABLE__SHIFT 0xb
+#define TCP_CNTL2__ADRS_CLK_DISABLE__SHIFT 0xc
+#define TCP_CNTL2__VM_CLK_DISABLE__SHIFT 0xd
+#define TCP_CNTL2__TAGRAM_CLK_DISABLE__SHIFT 0xe
+#define TCP_CNTL2__LEGACY_MGCG_DISABLE__SHIFT 0xf
+#define TCP_CNTL2__MEM_MID_GATE_MGCG_DISABLE__SHIFT 0x13
+#define TCP_CNTL2__UTCL1_MID_GATE_MGCG_DISABLE__SHIFT 0x14
+#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
+#define TCP_CNTL2__TCPF_FMT_MGCG_DISABLE_MASK 0x00000100L
+#define TCP_CNTL2__TCPI_NEW_MGCG_CTRL_BIT_MASK 0x00000400L
+#define TCP_CNTL2__MISS_CLK_DISABLE_MASK 0x00000800L
+#define TCP_CNTL2__ADRS_CLK_DISABLE_MASK 0x00001000L
+#define TCP_CNTL2__VM_CLK_DISABLE_MASK 0x00002000L
+#define TCP_CNTL2__TAGRAM_CLK_DISABLE_MASK 0x00004000L
+#define TCP_CNTL2__LEGACY_MGCG_DISABLE_MASK 0x00008000L
+#define TCP_CNTL2__MEM_MID_GATE_MGCG_DISABLE_MASK 0x00080000L
+#define TCP_CNTL2__UTCL1_MID_GATE_MGCG_DISABLE_MASK 0x00100000L
+//TCP_UTCL1_CNTL1
+#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
+#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define TCP_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define TCP_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define TCP_UTCL1_CNTL1__UTCL1_FGCG_REPEATER_DISABLE__SHIFT 0x10
+#define TCP_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define TCP_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
+#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define TCP_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define TCP_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define TCP_UTCL1_CNTL1__UTCL1_FGCG_REPEATER_DISABLE_MASK 0x00010000L
+#define TCP_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define TCP_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//TCP_UTCL1_CNTL2
+#define TCP_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define TCP_UTCL1_CNTL2__ANY_LINE_VALID__SHIFT 0xa
+#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define TCP_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define TCP_UTCL1_CNTL2__THRASHING_TIMEOUT_PROTECT_ENABLE__SHIFT 0x1b
+#define TCP_UTCL1_CNTL2__THRASHING_ENABLE__SHIFT 0x1c
+#define TCP_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define TCP_UTCL1_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
+#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define TCP_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define TCP_UTCL1_CNTL2__THRASHING_TIMEOUT_PROTECT_ENABLE_MASK 0x08000000L
+#define TCP_UTCL1_CNTL2__THRASHING_ENABLE_MASK 0x10000000L
+//TCP_UTCL1_STATUS
+#define TCP_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define TCP_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define TCP_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define TCP_UTCL1_STATUS__TIMEOUT_DETECTED__SHIFT 0x3
+#define TCP_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define TCP_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define TCP_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define TCP_UTCL1_STATUS__TIMEOUT_DETECTED_MASK 0x00000008L
+//TCP_DSM_CNTL2
+#define TCP_DSM_CNTL2__CACHE_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCP_DSM_CNTL2__CACHE_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCP_DSM_CNTL2__LFIFO_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCP_DSM_CNTL2__LFIFO_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCP_DSM_CNTL2__CMD_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TCP_DSM_CNTL2__CMD_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TCP_DSM_CNTL2__VM_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TCP_DSM_CNTL2__VM_FIFO_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TCP_DSM_CNTL2__DB_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCP_DSM_CNTL2__DB_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCP_DSM_CNTL2__UTCL1_LFIFO0_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCP_DSM_CNTL2__UTCL1_LFIFO0_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCP_DSM_CNTL2__UTCL1_LFIFO1_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCP_DSM_CNTL2__UTCL1_LFIFO1_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCP_DSM_CNTL2__TCP_INJECT_DELAY__SHIFT 0x1a
+#define TCP_DSM_CNTL2__CACHE_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCP_DSM_CNTL2__CACHE_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCP_DSM_CNTL2__LFIFO_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCP_DSM_CNTL2__LFIFO_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCP_DSM_CNTL2__CMD_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TCP_DSM_CNTL2__CMD_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TCP_DSM_CNTL2__VM_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TCP_DSM_CNTL2__VM_FIFO_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TCP_DSM_CNTL2__DB_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCP_DSM_CNTL2__DB_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCP_DSM_CNTL2__UTCL1_LFIFO0_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCP_DSM_CNTL2__UTCL1_LFIFO0_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCP_DSM_CNTL2__UTCL1_LFIFO1_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCP_DSM_CNTL2__UTCL1_LFIFO1_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCP_DSM_CNTL2__TCP_INJECT_DELAY_MASK 0xFC000000L
+//TCP_PERFCOUNTER_FILTER
+#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0xf
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x14
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x16
+#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x19
+#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1a
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1b
+#define TCP_PERFCOUNTER_FILTER__ADDR_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x000007E0L
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x00007800L
+#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x000F8000L
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00300000L
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x01C00000L
+#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x02000000L
+#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x04000000L
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x08000000L
+#define TCP_PERFCOUNTER_FILTER__ADDR_MODE_MASK 0x70000000L
+//TCP_PERFCOUNTER_FILTER_EN
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
+#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0x8
+#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x9
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xa
+#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
+#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000100L
+#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000200L
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000400L
+#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE_MASK 0x00000800L
+
+
+// addressBlock: xcd0_gc_gdspdec
+//GDS_VMID0_BASE
+#define GDS_VMID0_BASE__BASE__SHIFT 0x0
+#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID0_SIZE
+#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID1_BASE
+#define GDS_VMID1_BASE__BASE__SHIFT 0x0
+#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID1_SIZE
+#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID2_BASE
+#define GDS_VMID2_BASE__BASE__SHIFT 0x0
+#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID2_SIZE
+#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID3_BASE
+#define GDS_VMID3_BASE__BASE__SHIFT 0x0
+#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID3_SIZE
+#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID4_BASE
+#define GDS_VMID4_BASE__BASE__SHIFT 0x0
+#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID4_SIZE
+#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID5_BASE
+#define GDS_VMID5_BASE__BASE__SHIFT 0x0
+#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID5_SIZE
+#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID6_BASE
+#define GDS_VMID6_BASE__BASE__SHIFT 0x0
+#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID6_SIZE
+#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID7_BASE
+#define GDS_VMID7_BASE__BASE__SHIFT 0x0
+#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID7_SIZE
+#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID8_BASE
+#define GDS_VMID8_BASE__BASE__SHIFT 0x0
+#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID8_SIZE
+#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID9_BASE
+#define GDS_VMID9_BASE__BASE__SHIFT 0x0
+#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID9_SIZE
+#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID10_BASE
+#define GDS_VMID10_BASE__BASE__SHIFT 0x0
+#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID10_SIZE
+#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID11_BASE
+#define GDS_VMID11_BASE__BASE__SHIFT 0x0
+#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID11_SIZE
+#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID12_BASE
+#define GDS_VMID12_BASE__BASE__SHIFT 0x0
+#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID12_SIZE
+#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID13_BASE
+#define GDS_VMID13_BASE__BASE__SHIFT 0x0
+#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID13_SIZE
+#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID14_BASE
+#define GDS_VMID14_BASE__BASE__SHIFT 0x0
+#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID14_SIZE
+#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID15_BASE
+#define GDS_VMID15_BASE__BASE__SHIFT 0x0
+#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID15_SIZE
+#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_GWS_VMID0
+#define GDS_GWS_VMID0__BASE__SHIFT 0x0
+#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID1
+#define GDS_GWS_VMID1__BASE__SHIFT 0x0
+#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID2
+#define GDS_GWS_VMID2__BASE__SHIFT 0x0
+#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID3
+#define GDS_GWS_VMID3__BASE__SHIFT 0x0
+#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID4
+#define GDS_GWS_VMID4__BASE__SHIFT 0x0
+#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID5
+#define GDS_GWS_VMID5__BASE__SHIFT 0x0
+#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID6
+#define GDS_GWS_VMID6__BASE__SHIFT 0x0
+#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID7
+#define GDS_GWS_VMID7__BASE__SHIFT 0x0
+#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID8
+#define GDS_GWS_VMID8__BASE__SHIFT 0x0
+#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID9
+#define GDS_GWS_VMID9__BASE__SHIFT 0x0
+#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID10
+#define GDS_GWS_VMID10__BASE__SHIFT 0x0
+#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID11
+#define GDS_GWS_VMID11__BASE__SHIFT 0x0
+#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID12
+#define GDS_GWS_VMID12__BASE__SHIFT 0x0
+#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID13
+#define GDS_GWS_VMID13__BASE__SHIFT 0x0
+#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID14
+#define GDS_GWS_VMID14__BASE__SHIFT 0x0
+#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID15
+#define GDS_GWS_VMID15__BASE__SHIFT 0x0
+#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
+//GDS_OA_VMID0
+#define GDS_OA_VMID0__MASK__SHIFT 0x0
+#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID1
+#define GDS_OA_VMID1__MASK__SHIFT 0x0
+#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID2
+#define GDS_OA_VMID2__MASK__SHIFT 0x0
+#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID3
+#define GDS_OA_VMID3__MASK__SHIFT 0x0
+#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID4
+#define GDS_OA_VMID4__MASK__SHIFT 0x0
+#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID5
+#define GDS_OA_VMID5__MASK__SHIFT 0x0
+#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID6
+#define GDS_OA_VMID6__MASK__SHIFT 0x0
+#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID7
+#define GDS_OA_VMID7__MASK__SHIFT 0x0
+#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID8
+#define GDS_OA_VMID8__MASK__SHIFT 0x0
+#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID9
+#define GDS_OA_VMID9__MASK__SHIFT 0x0
+#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID10
+#define GDS_OA_VMID10__MASK__SHIFT 0x0
+#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID11
+#define GDS_OA_VMID11__MASK__SHIFT 0x0
+#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID12
+#define GDS_OA_VMID12__MASK__SHIFT 0x0
+#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID13
+#define GDS_OA_VMID13__MASK__SHIFT 0x0
+#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID14
+#define GDS_OA_VMID14__MASK__SHIFT 0x0
+#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID15
+#define GDS_OA_VMID15__MASK__SHIFT 0x0
+#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
+//GDS_GWS_RESET0
+#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
+#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
+#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
+#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
+#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
+#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
+#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
+#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
+#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
+#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
+#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
+#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
+#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
+#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
+#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
+#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
+#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
+#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
+#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
+#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
+#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
+#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
+#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
+#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
+#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
+#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
+#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
+//GDS_GWS_RESET1
+#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
+#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
+#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
+#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
+#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
+#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
+#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
+#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
+#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
+#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
+#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
+#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
+#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
+#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
+#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
+#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
+#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
+#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
+#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
+#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
+#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
+#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
+#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
+#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
+#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
+#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
+#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
+//GDS_GWS_RESOURCE_RESET
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
+//GDS_COMPUTE_MAX_WAVE_ID
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GDS_OA_RESET_MASK
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
+#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
+#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xc
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
+#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
+#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFF000L
+//GDS_OA_RESET
+#define GDS_OA_RESET__RESET__SHIFT 0x0
+#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
+#define GDS_OA_RESET__RESET_MASK 0x00000001L
+#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
+//GDS_ENHANCE
+#define GDS_ENHANCE__MISC__SHIFT 0x0
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
+#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
+#define GDS_ENHANCE__RD_BUF_TAG_MISS__SHIFT 0x12
+#define GDS_ENHANCE__GDSA_PC_CGTS_DIS__SHIFT 0x13
+#define GDS_ENHANCE__GDSO_PC_CGTS_DIS__SHIFT 0x14
+#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE__SHIFT 0x15
+#define GDS_ENHANCE__GDS_CLK_ENHANCE_DIS__SHIFT 0x16
+#define GDS_ENHANCE__DS_MEM_CLK_GATE_DIS__SHIFT 0x17
+#define GDS_ENHANCE__UNUSED__SHIFT 0x18
+#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
+#define GDS_ENHANCE__RD_BUF_TAG_MISS_MASK 0x00040000L
+#define GDS_ENHANCE__GDSA_PC_CGTS_DIS_MASK 0x00080000L
+#define GDS_ENHANCE__GDSO_PC_CGTS_DIS_MASK 0x00100000L
+#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE_MASK 0x00200000L
+#define GDS_ENHANCE__GDS_CLK_ENHANCE_DIS_MASK 0x00400000L
+#define GDS_ENHANCE__DS_MEM_CLK_GATE_DIS_MASK 0x00800000L
+#define GDS_ENHANCE__UNUSED_MASK 0xFF000000L
+//GDS_OA_CGPG_RESTORE
+#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
+#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
+#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
+#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
+#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
+#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
+#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
+#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
+#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
+#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
+//GDS_CS_CTXSW_STATUS
+#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_CS_CTXSW_CNT0
+#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT1
+#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT2
+#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT3
+#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GFX_CTXSW_STATUS
+#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_VS_CTXSW_CNT0
+#define GDS_VS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT1
+#define GDS_VS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT2
+#define GDS_VS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT3
+#define GDS_VS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT0
+#define GDS_PS0_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT1
+#define GDS_PS0_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT2
+#define GDS_PS0_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT3
+#define GDS_PS0_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT0
+#define GDS_PS1_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT1
+#define GDS_PS1_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT2
+#define GDS_PS1_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT3
+#define GDS_PS1_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT0
+#define GDS_PS2_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT1
+#define GDS_PS2_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT2
+#define GDS_PS2_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT3
+#define GDS_PS2_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT0
+#define GDS_PS3_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT1
+#define GDS_PS3_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT2
+#define GDS_PS3_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT3
+#define GDS_PS3_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT0
+#define GDS_PS4_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT1
+#define GDS_PS4_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT2
+#define GDS_PS4_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT3
+#define GDS_PS4_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT0
+#define GDS_PS5_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT1
+#define GDS_PS5_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT2
+#define GDS_PS5_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT3
+#define GDS_PS5_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT0
+#define GDS_PS6_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT1
+#define GDS_PS6_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT2
+#define GDS_PS6_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT3
+#define GDS_PS6_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT0
+#define GDS_PS7_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT1
+#define GDS_PS7_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT2
+#define GDS_PS7_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT3
+#define GDS_PS7_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT0
+#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT1
+#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT2
+#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT3
+#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+
+
+// addressBlock: xcd0_gc_rasdec
+//RAS_SIGNATURE_CONTROL
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+//RAS_SIGNATURE_MASK
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE0
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE1
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE2
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE3
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_DB_SIGNATURE0
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_PA_SIGNATURE0
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_VGT_SIGNATURE0
+#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SQ_SIGNATURE0
+#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE0
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE1
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE2
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE3
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE4
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE5
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE6
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE7
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_IA_SIGNATURE0
+#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_IA_SIGNATURE1
+#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE0
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE1
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TA_SIGNATURE0
+#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TD_SIGNATURE0
+#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_CB_SIGNATURE0
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE0
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE1
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TA_SIGNATURE1
+#define RAS_TA_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_TA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_gfxdec0
+//DB_RENDER_CONTROL
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
+//DB_COUNT_CONTROL
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x0
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
+//DB_DEPTH_VIEW
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
+#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
+//DB_RENDER_OVERRIDE
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0xf
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+//DB_RENDER_OVERRIDE2
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
+//DB_HTILE_DATA_BASE
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_HTILE_DATA_BASE_HI
+#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_DEPTH_SIZE
+#define DB_DEPTH_SIZE__X_MAX__SHIFT 0x0
+#define DB_DEPTH_SIZE__Y_MAX__SHIFT 0x10
+#define DB_DEPTH_SIZE__X_MAX_MASK 0x00003FFFL
+#define DB_DEPTH_SIZE__Y_MAX_MASK 0x3FFF0000L
+//DB_DEPTH_BOUNDS_MIN
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
+//DB_DEPTH_BOUNDS_MAX
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
+//DB_STENCIL_CLEAR
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
+//DB_DEPTH_CLEAR
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
+//PA_SC_SCREEN_SCISSOR_TL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_SCISSOR_BR
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
+//DB_Z_INFO
+#define DB_Z_INFO__FORMAT__SHIFT 0x0
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
+#define DB_Z_INFO__SW_MODE__SHIFT 0x4
+#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0xd
+#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xf
+#define DB_Z_INFO__MAXMIP__SHIFT 0x10
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
+#define DB_Z_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
+#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
+#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00008000L
+#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+//DB_STENCIL_INFO
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
+#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0xd
+#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xf
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
+#define DB_STENCIL_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
+#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00008000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+#define DB_STENCIL_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
+//DB_Z_READ_BASE
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_READ_BASE_HI
+#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_READ_BASE
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_READ_BASE_HI
+#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_Z_WRITE_BASE
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_WRITE_BASE_HI
+#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_WRITE_BASE
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_WRITE_BASE_HI
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_DFSM_CONTROL
+#define DB_DFSM_CONTROL__PUNCHOUT_MODE__SHIFT 0x0
+#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP__SHIFT 0x2
+#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW__SHIFT 0x3
+#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
+#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
+#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
+//DB_Z_INFO2
+#define DB_Z_INFO2__EPITCH__SHIFT 0x0
+#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
+//DB_STENCIL_INFO2
+#define DB_STENCIL_INFO2__EPITCH__SHIFT 0x0
+#define DB_STENCIL_INFO2__EPITCH_MASK 0x0000FFFFL
+//COHER_DEST_BASE_HI_0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_1
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_2
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_3
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_2
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_3
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_WINDOW_OFFSET
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
+//PA_SC_WINDOW_SCISSOR_TL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_WINDOW_SCISSOR_BR
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_RULE
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
+//PA_SC_CLIPRECT_0_TL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_0_BR
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_TL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_BR
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_TL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_BR
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_TL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_BR
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_EDGERULE
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
+//PA_SU_HARDWARE_SCREEN_OFFSET
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
+//CB_TARGET_MASK
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
+//CB_SHADER_MASK
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
+//PA_SC_GENERIC_SCISSOR_TL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_GENERIC_SCISSOR_BR
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//COHER_DEST_BASE_0
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_1
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_SCISSOR_0_TL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_0_BR
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_1_TL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_1_BR
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_2_TL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_2_BR
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_3_TL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_3_BR
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_4_TL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_4_BR
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_5_TL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_5_BR
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_6_TL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_6_BR
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_7_TL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_7_BR
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_8_TL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_8_BR
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_9_TL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_9_BR
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_10_TL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_10_BR
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_11_TL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_11_BR
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_12_TL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_12_BR
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_13_TL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_13_BR
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_14_TL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_14_BR
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_15_TL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_15_BR
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_ZMIN_0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_1
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_1
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_2
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_2
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_3
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_3
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_4
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_4
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_5
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_5
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_6
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_6
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_7
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_7
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_8
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_8
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_9
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_9
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_10
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_10
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_11
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_11
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_12
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_12
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_13
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_13
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_14
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_14
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_15
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_15
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_RASTER_CONFIG
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1d
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x1C000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0xE0000000L
+//PA_SC_RASTER_CONFIG_1
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x5
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000001CL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x000000E0L
+//PA_SC_SCREEN_EXTENT_CONTROL
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
+//PA_SC_TILE_STEERING_OVERRIDE
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
+#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
+#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
+//CP_PERFMON_CNTX_CNTL
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+//CP_PIPEID
+#define CP_PIPEID__PIPE_ID__SHIFT 0x0
+#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
+//CP_RINGID
+#define CP_RINGID__RINGID__SHIFT 0x0
+#define CP_RINGID__RINGID_MASK 0x00000003L
+//CP_VMID
+#define CP_VMID__VMID__SHIFT 0x0
+#define CP_VMID__VMID_MASK 0x0000000FL
+//PA_SC_RIGHT_VERT_GRID
+#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR__SHIFT 0x0
+#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF__SHIFT 0x8
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
+#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
+#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
+//PA_SC_LEFT_VERT_GRID
+#define PA_SC_LEFT_VERT_GRID__LEFT_QTR__SHIFT 0x0
+#define PA_SC_LEFT_VERT_GRID__LEFT_HALF__SHIFT 0x8
+#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
+#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
+#define PA_SC_LEFT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
+#define PA_SC_LEFT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
+#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
+#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
+//PA_SC_HORIZ_GRID
+#define PA_SC_HORIZ_GRID__TOP_QTR__SHIFT 0x0
+#define PA_SC_HORIZ_GRID__TOP_HALF__SHIFT 0x8
+#define PA_SC_HORIZ_GRID__BOT_HALF__SHIFT 0x10
+#define PA_SC_HORIZ_GRID__BOT_QTR__SHIFT 0x18
+#define PA_SC_HORIZ_GRID__TOP_QTR_MASK 0x000000FFL
+#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
+#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
+#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
+//VGT_MULTI_PRIM_IB_RESET_INDX
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
+//CB_BLEND_RED
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
+#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
+//CB_BLEND_GREEN
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
+//CB_BLEND_BLUE
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
+//CB_BLEND_ALPHA
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
+//CB_DCC_CONTROL
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE__SHIFT 0x1
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK__SHIFT 0x2
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01__SHIFT 0x8
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE__SHIFT 0x9
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0xa
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01__SHIFT 0xc
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE__SHIFT 0xd
+#define CB_DCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG__SHIFT 0xe
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE_MASK 0x00000002L
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK_MASK 0x0000007CL
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01_MASK 0x00000100L
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE_MASK 0x00000200L
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00000400L
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01_MASK 0x00001000L
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE_MASK 0x00002000L
+#define CB_DCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG_MASK 0x00004000L
+//DB_STENCIL_CONTROL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
+//DB_STENCILREFMASK
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
+//DB_STENCILREFMASK_BF
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
+//PA_CL_VPORT_XSCALE
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_1
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_1
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_1
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_1
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_1
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_1
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_2
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_2
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_2
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_2
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_2
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_2
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_3
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_3
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_3
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_3
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_3
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_3
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_4
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_4
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_4
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_4
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_4
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_4
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_5
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_5
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_5
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_5
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_5
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_5
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_6
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_6
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_6
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_6
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_6
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_6
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_7
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_7
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_7
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_7
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_7
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_7
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_8
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_8
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_8
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_8
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_8
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_8
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_9
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_9
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_9
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_9
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_9
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_9
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_10
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_10
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_10
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_10
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_10
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_10
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_11
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_11
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_11
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_11
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_11
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_11
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_12
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_12
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_12
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_12
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_12
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_12
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_13
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_13
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_13
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_13
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_13
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_13
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_14
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_14
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_14
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_14
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_14
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_14
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_15
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_15
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_15
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_15
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_15
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_15
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_X
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Y
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Z
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_W
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_X
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Y
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Z
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_W
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_X
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Y
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Z
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_W
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_X
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Y
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Z
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_W
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_X
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Y
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Z
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_W
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_X
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Y
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Z
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_W
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_PROG_NEAR_CLIP_Z
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//SPI_PS_INPUT_CNTL_0
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_1
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_2
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_3
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_4
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_5
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_6
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_7
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_8
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_9
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_10
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_11
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_12
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_13
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_14
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_15
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_16
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_17
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_18
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_19
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_20
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_21
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_22
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_23
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_24
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_25
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_26
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_27
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_28
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_29
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_30
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_31
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
+//SPI_VS_OUT_CONFIG
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x6
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
+//SPI_PS_INPUT_ENA
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_PS_INPUT_ADDR
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_INTERP_CONTROL_0
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+//SPI_PS_IN_CONTROL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+//SPI_BARYC_CNTL
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+//SPI_TMPRING_SIZE
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
+//SPI_SHADER_POS_FORMAT
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
+//SPI_SHADER_Z_FORMAT
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_COL_FORMAT
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
+//CB_BLEND0_CONTROL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND1_CONTROL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND2_CONTROL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND3_CONTROL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND4_CONTROL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND5_CONTROL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND6_CONTROL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND7_CONTROL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_MRT0_EPITCH
+#define CB_MRT0_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT0_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT1_EPITCH
+#define CB_MRT1_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT1_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT2_EPITCH
+#define CB_MRT2_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT2_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT3_EPITCH
+#define CB_MRT3_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT3_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT4_EPITCH
+#define CB_MRT4_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT4_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT5_EPITCH
+#define CB_MRT5_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT5_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT6_EPITCH
+#define CB_MRT6_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT6_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT7_EPITCH
+#define CB_MRT7_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT7_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CS_COPY_STATE
+#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//GFX_COPY_STATE
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//PA_CL_POINT_X_RAD
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_Y_RAD
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_SIZE
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_CULL_RAD
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//VGT_DMA_BASE_HI
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
+//VGT_DMA_BASE
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
+//VGT_DRAW_INITIATOR
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
+#define VGT_DRAW_INITIATOR__UNROLLED_INST__SHIFT 0x7
+#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC__SHIFT 0x8
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__UNROLLED_INST_MASK 0x00000080L
+#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC_MASK 0x00000100L
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
+//VGT_IMMED_DATA
+#define VGT_IMMED_DATA__DATA__SHIFT 0x0
+#define VGT_IMMED_DATA__DATA_MASK 0xFFFFFFFFL
+//VGT_EVENT_ADDRESS_REG
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
+//DB_DEPTH_CONTROL
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+//DB_EQAA
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+//CB_COLOR_CONTROL
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
+//DB_SHADER_CONTROL
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
+#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED__SHIFT 0x11
+#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES__SHIFT 0x14
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
+#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED_MASK 0x00020000L
+#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES_MASK 0x00700000L
+//PA_CL_CLIP_CNTL
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA__SHIFT 0x1c
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA_MASK 0x10000000L
+//PA_SU_SC_MODE_CNTL
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
+//PA_CL_VTE_CNTL
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+//PA_CL_VS_OUT_CNTL
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1a
+#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID__SHIFT 0x1b
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x04000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID_MASK 0x08000000L
+//PA_CL_NANINF_CNTL
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+//PA_SU_LINE_STIPPLE_CNTL
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x4
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
+//PA_SU_LINE_STIPPLE_SCALE
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
+//PA_SU_PRIM_FILTER_CNTL
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+//PA_SU_SMALL_PRIM_FILTER_CNTL
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE__SHIFT 0x6
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE_MASK 0x00000040L
+//PA_CL_OBJPRIM_ID_CNTL
+#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
+#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
+#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID__SHIFT 0x2
+#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL_MASK 0x00000001L
+#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID_MASK 0x00000002L
+#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID_MASK 0x00000004L
+//PA_CL_NGG_CNTL
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
+//PA_SU_OVER_RASTERIZATION_CNTL
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
+//PA_STEREO_CNTL
+#define PA_STEREO_CNTL__EN_STEREO__SHIFT 0x0
+#define PA_STEREO_CNTL__STEREO_MODE__SHIFT 0x1
+#define PA_STEREO_CNTL__RT_SLICE_MODE__SHIFT 0x5
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET__SHIFT 0x8
+#define PA_STEREO_CNTL__VP_ID_MODE__SHIFT 0xa
+#define PA_STEREO_CNTL__VP_ID_OFFSET__SHIFT 0xd
+#define PA_STEREO_CNTL__EN_STEREO_MASK 0x00000001L
+#define PA_STEREO_CNTL__STEREO_MODE_MASK 0x0000001EL
+#define PA_STEREO_CNTL__RT_SLICE_MODE_MASK 0x000000E0L
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET_MASK 0x00000300L
+#define PA_STEREO_CNTL__VP_ID_MODE_MASK 0x00001C00L
+#define PA_STEREO_CNTL__VP_ID_OFFSET_MASK 0x0001E000L
+//PA_SU_POINT_SIZE
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
+//PA_SU_POINT_MINMAX
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
+//PA_SU_LINE_CNTL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
+//PA_SC_LINE_STIPPLE
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+//VGT_OUTPUT_PATH_CNTL
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x0
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
+//VGT_HOS_CNTL
+#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x0
+#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
+//VGT_HOS_MAX_TESS_LEVEL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_MIN_TESS_LEVEL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_REUSE_DEPTH
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x0
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000FFL
+//VGT_GROUP_PRIM_TYPE
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0xe
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0xf
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x10
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001FL
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
+//VGT_GROUP_FIRST_DECR
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x0
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000FL
+//VGT_GROUP_DECR
+#define VGT_GROUP_DECR__DECR__SHIFT 0x0
+#define VGT_GROUP_DECR__DECR_MASK 0x0000000FL
+//VGT_GROUP_VECT_0_CNTL
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x0
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x1
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x2
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x3
+#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x8
+#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x10
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000FF00L
+#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00FF0000L
+//VGT_GROUP_VECT_1_CNTL
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x0
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x1
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x2
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x3
+#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x8
+#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x10
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000FF00L
+#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00FF0000L
+//VGT_GROUP_VECT_0_FMT_CNTL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x0
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x4
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x8
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0xc
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x10
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x14
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x18
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x1c
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000FL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000F00L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000F0000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0F000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
+//VGT_GROUP_VECT_1_FMT_CNTL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x0
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x4
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x8
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0xc
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x10
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x14
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x18
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x1c
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000FL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000F00L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000F0000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0F000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
+//VGT_GS_MODE
+#define VGT_GS_MODE__MODE__SHIFT 0x0
+#define VGT_GS_MODE__RESERVED_0__SHIFT 0x3
+#define VGT_GS_MODE__CUT_MODE__SHIFT 0x4
+#define VGT_GS_MODE__RESERVED_1__SHIFT 0x6
+#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0xb
+#define VGT_GS_MODE__RESERVED_2__SHIFT 0xc
+#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0xd
+#define VGT_GS_MODE__RESERVED_3__SHIFT 0xe
+#define VGT_GS_MODE__RESERVED_4__SHIFT 0xf
+#define VGT_GS_MODE__RESERVED_5__SHIFT 0x10
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x11
+#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x12
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x13
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x14
+#define VGT_GS_MODE__ONCHIP__SHIFT 0x15
+#define VGT_GS_MODE__MODE_MASK 0x00000007L
+#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
+#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
+#define VGT_GS_MODE__RESERVED_1_MASK 0x000007C0L
+#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
+#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
+#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
+#define VGT_GS_MODE__RESERVED_3_MASK 0x00004000L
+#define VGT_GS_MODE__RESERVED_4_MASK 0x00008000L
+#define VGT_GS_MODE__RESERVED_5_MASK 0x00010000L
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
+#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
+#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
+//VGT_GS_ONCHIP_CNTL
+#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP__SHIFT 0x0
+#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP__SHIFT 0xb
+#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP__SHIFT 0x16
+#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP_MASK 0x000007FFL
+#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP_MASK 0x003FF800L
+#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP_MASK 0xFFC00000L
+//PA_SC_MODE_CNTL_0
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
+#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD__SHIFT 0x4
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD_MASK 0x00000010L
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
+//PA_SC_MODE_CNTL_1
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+//VGT_ENHANCE
+#define VGT_ENHANCE__MISC__SHIFT 0x0
+#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_GS_PER_ES
+#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x0
+#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007FFL
+//VGT_ES_PER_GS
+#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x0
+#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007FFL
+//VGT_GS_PER_VS
+#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x0
+#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000FL
+//VGT_GSVS_RING_OFFSET_1
+#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007FFFL
+//VGT_GSVS_RING_OFFSET_2
+#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007FFFL
+//VGT_GSVS_RING_OFFSET_3
+#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007FFFL
+//VGT_GS_OUT_PRIM_TYPE
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x8
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x10
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x16
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x1f
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003F00L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003F0000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0FC00000L
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
+//IA_ENHANCE
+#define IA_ENHANCE__MISC__SHIFT 0x0
+#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_DMA_SIZE
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_DMA_MAX_SIZE
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
+//VGT_DMA_INDEX_TYPE
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
+#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x00000040L
+#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+//WD_ENHANCE
+#define WD_ENHANCE__MISC__SHIFT 0x0
+#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_EN
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
+//VGT_DMA_NUM_INSTANCES
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_RESET
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
+//VGT_EVENT_INITIATOR
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//VGT_GS_MAX_PRIMS_PER_SUBGROUP
+#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP__SHIFT 0x0
+#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP_MASK 0x0000FFFFL
+//VGT_DRAW_PAYLOAD_CNTL
+#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN__SHIFT 0x0
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID__SHIFT 0x2
+#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN__SHIFT 0x3
+#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN_MASK 0x00000001L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
+#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
+//VGT_INSTANCE_STEP_RATE_0
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
+//VGT_INSTANCE_STEP_RATE_1
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x0
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xFFFFFFFFL
+//IA_MULTI_VGT_PARAM_BC
+//VGT_ESGS_RING_ITEMSIZE
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GSVS_RING_ITEMSIZE
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_REUSE_OFF
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+//VGT_VTX_CNT_EN
+#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x0
+#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
+//DB_HTILE_SURFACE
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x2
+#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x3
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x4
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0xa
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
+#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
+#define DB_HTILE_SURFACE__RB_ALIGNED__SHIFT 0x13
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
+#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003F0L
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000FC00L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
+#define DB_HTILE_SURFACE__RB_ALIGNED_MASK 0x00080000L
+//DB_SRESULTS_COMPARE_STATE0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+//DB_SRESULTS_COMPARE_STATE1
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+//DB_PRELOAD_CONTROL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
+//VGT_STRMOUT_BUFFER_SIZE_0
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_0
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_0
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_1
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_1
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_1
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_2
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_2
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_2
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_3
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_3
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_3
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
+//VGT_GS_MAX_VERT_OUT
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
+//VGT_TESS_DISTRIBUTION
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
+//VGT_SHADER_STAGES_EN
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
+#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN__SHIFT 0x9
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0__SHIFT 0xa
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1__SHIFT 0xb
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
+#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN_MASK 0x00000200L
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0_MASK 0x00000400L
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1_MASK 0x00000800L
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00180000L
+//VGT_LS_HS_CONFIG
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
+//VGT_GS_VERT_ITEMSIZE
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_1
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_2
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_3
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007FFFL
+//VGT_TF_PARAM
+#define VGT_TF_PARAM__TYPE__SHIFT 0x0
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
+#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x9
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
+#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00008000L
+#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
+//DB_ALPHA_TO_MASK
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+//VGT_DISPATCH_DRAW_INDEX
+#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX__SHIFT 0x0
+#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_DB_FMT_CNTL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+//PA_SU_POLY_OFFSET_CLAMP
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_SCALE
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_OFFSET
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_SCALE
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_OFFSET
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_GS_INSTANCE_CNT
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
+//VGT_STRMOUT_CONFIG
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x0
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x1
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x2
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x3
+#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x4
+#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT__SHIFT 0x7
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x8
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x1f
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
+#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT_MASK 0x00000080L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000F00L
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
+//VGT_STRMOUT_BUFFER_CONFIG
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x4
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x8
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0xc
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000FL
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000F0L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000F00L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000F000L
+//VGT_DMA_EVENT_INITIATOR
+#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//PA_SC_CENTROID_PRIORITY_0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
+//PA_SC_CENTROID_PRIORITY_1
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
+//PA_SC_LINE_CNTL
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION__SHIFT 0xd
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION_MASK 0x00002000L
+//PA_SC_AA_CONFIG
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
+//PA_SU_VTX_CNTL
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+//PA_CL_GB_VERT_CLIP_ADJ
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_VERT_DISC_ADJ
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_CLIP_ADJ
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_DISC_ADJ
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_MASK_X0Y0_X1Y0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
+//PA_SC_AA_MASK_X0Y1_X1Y1
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
+//PA_SC_SHADER_CONTROL
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
+//PA_SC_BINNER_CNTL_0
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION_MASK 0x10000000L
+//PA_SC_BINNER_CNTL_1
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
+//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
+//PA_SC_NGG_MODE_CNTL
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
+//VGT_VERTEX_REUSE_BLOCK_CNTL
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x0
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000FFL
+//VGT_OUT_DEALLOC_CNTL
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x0
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007FL
+//CB_COLOR0_BASE
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_BASE_EXT
+#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_ATTRIB2
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR0_VIEW
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR0_INFO
+#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR0_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR0_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR0_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR0_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR0_ATTRIB
+#define CB_COLOR0_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR0_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR0_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR0_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR0_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR0_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR0_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR0_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR0_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR0_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR0_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR0_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR0_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR0_DCC_CONTROL
+#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR0_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR0_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR0_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR0_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR0_CMASK
+#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_CMASK_BASE_EXT
+#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_FMASK
+#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_FMASK_BASE_EXT
+#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_CLEAR_WORD0
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR0_CLEAR_WORD1
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR0_DCC_BASE
+#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_DCC_BASE_EXT
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_BASE
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_BASE_EXT
+#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_ATTRIB2
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR1_VIEW
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR1_INFO
+#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR1_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR1_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR1_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR1_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR1_ATTRIB
+#define CB_COLOR1_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR1_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR1_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR1_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR1_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR1_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR1_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR1_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR1_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR1_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR1_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR1_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR1_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR1_DCC_CONTROL
+#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR1_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR1_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR1_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR1_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR1_CMASK
+#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_CMASK_BASE_EXT
+#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_FMASK
+#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_FMASK_BASE_EXT
+#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_CLEAR_WORD0
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR1_CLEAR_WORD1
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR1_DCC_BASE
+#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_DCC_BASE_EXT
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_BASE
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_BASE_EXT
+#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_ATTRIB2
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR2_VIEW
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR2_INFO
+#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR2_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR2_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR2_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR2_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR2_ATTRIB
+#define CB_COLOR2_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR2_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR2_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR2_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR2_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR2_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR2_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR2_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR2_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR2_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR2_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR2_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR2_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR2_DCC_CONTROL
+#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR2_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR2_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR2_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR2_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR2_CMASK
+#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_CMASK_BASE_EXT
+#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_FMASK
+#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_FMASK_BASE_EXT
+#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_CLEAR_WORD0
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR2_CLEAR_WORD1
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR2_DCC_BASE
+#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_DCC_BASE_EXT
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_BASE
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_BASE_EXT
+#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_ATTRIB2
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR3_VIEW
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR3_INFO
+#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR3_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR3_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR3_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR3_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR3_ATTRIB
+#define CB_COLOR3_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR3_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR3_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR3_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR3_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR3_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR3_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR3_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR3_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR3_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR3_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR3_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR3_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR3_DCC_CONTROL
+#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR3_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR3_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR3_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR3_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR3_CMASK
+#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_CMASK_BASE_EXT
+#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_FMASK
+#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_FMASK_BASE_EXT
+#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_CLEAR_WORD0
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR3_CLEAR_WORD1
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR3_DCC_BASE
+#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_DCC_BASE_EXT
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_BASE
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_BASE_EXT
+#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_ATTRIB2
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR4_VIEW
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR4_INFO
+#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR4_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR4_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR4_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR4_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR4_ATTRIB
+#define CB_COLOR4_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR4_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR4_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR4_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR4_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR4_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR4_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR4_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR4_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR4_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR4_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR4_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR4_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR4_DCC_CONTROL
+#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR4_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR4_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR4_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR4_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR4_CMASK
+#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_CMASK_BASE_EXT
+#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_FMASK
+#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_FMASK_BASE_EXT
+#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_CLEAR_WORD0
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR4_CLEAR_WORD1
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR4_DCC_BASE
+#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_DCC_BASE_EXT
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_BASE
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_BASE_EXT
+#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_ATTRIB2
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR5_VIEW
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR5_INFO
+#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR5_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR5_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR5_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR5_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR5_ATTRIB
+#define CB_COLOR5_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR5_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR5_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR5_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR5_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR5_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR5_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR5_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR5_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR5_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR5_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR5_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR5_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR5_DCC_CONTROL
+#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR5_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR5_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR5_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR5_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR5_CMASK
+#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_CMASK_BASE_EXT
+#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_FMASK
+#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_FMASK_BASE_EXT
+#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_CLEAR_WORD0
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR5_CLEAR_WORD1
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR5_DCC_BASE
+#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_DCC_BASE_EXT
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_BASE
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_BASE_EXT
+#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_ATTRIB2
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR6_VIEW
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR6_INFO
+#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR6_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR6_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR6_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR6_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR6_ATTRIB
+#define CB_COLOR6_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR6_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR6_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR6_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR6_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR6_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR6_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR6_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR6_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR6_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR6_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR6_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR6_DCC_CONTROL
+#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR6_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR6_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR6_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR6_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR6_CMASK
+#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_CMASK_BASE_EXT
+#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_FMASK
+#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_FMASK_BASE_EXT
+#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_CLEAR_WORD0
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR6_CLEAR_WORD1
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR6_DCC_BASE
+#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_DCC_BASE_EXT
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_BASE
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_BASE_EXT
+#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_ATTRIB2
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR7_VIEW
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR7_INFO
+#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR7_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR7_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR7_ATTRIB
+#define CB_COLOR7_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR7_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR7_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR7_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR7_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR7_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR7_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR7_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR7_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR7_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR7_DCC_CONTROL
+#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR7_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR7_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR7_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR7_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR7_CMASK
+#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_CMASK_BASE_EXT
+#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_FMASK
+#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_FMASK_BASE_EXT
+#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_CLEAR_WORD0
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR7_CLEAR_WORD1
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR7_DCC_BASE
+#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_DCC_BASE_EXT
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+
+
+// addressBlock: xcd0_gc_gfxudec
+//CP_EOP_DONE_ADDR_LO
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_EOP_DONE_ADDR_HI
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_EOP_DONE_DATA_LO
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_EOP_DONE_DATA_HI
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_LO
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_HI
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
+//CP_STREAM_OUT_ADDR_LO
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x2
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_STREAM_OUT_ADDR_HI
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x0
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0x0000FFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT0_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT0_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT0_LO
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT0_HI
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT1_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT1_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT1_LO
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT1_HI
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT2_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT2_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT2_LO
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT2_HI
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT3_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT3_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT3_LO
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT3_HI
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_ADDR_LO
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_PIPE_STATS_ADDR_HI
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
+//CP_VGT_IAVERT_COUNT_LO
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAVERT_COUNT_HI
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_LO
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_HI
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_LO
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_HI
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_LO
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_HI
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_LO
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_HI
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_LO
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_HI
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_LO
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_HI
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_LO
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_HI
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_LO
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_HI
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_LO
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_HI
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_LO
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_HI
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_LO
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_HI
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_CONTROL
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x02000000L
+//CP_STREAM_OUT_CONTROL
+#define CP_STREAM_OUT_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_STREAM_OUT_CONTROL__CACHE_POLICY_MASK 0x02000000L
+//CP_STRMOUT_CNTL
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x0
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
+//SCRATCH_REG0
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//SCRATCH_REG1
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//SCRATCH_REG2
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//SCRATCH_REG3
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//SCRATCH_REG4
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//SCRATCH_REG5
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//SCRATCH_REG6
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//SCRATCH_REG7
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//CP_APPEND_DATA_HI
+#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_HI
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_HI
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//SCRATCH_UMSK
+#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x0
+#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x10
+#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000FFL
+#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
+//SCRATCH_ADDR
+#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x0
+#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_LO
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_HI
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_LO
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_HI
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_LO
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_HI
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_APPEND_ADDR_LO
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_ADDR_HI
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
+#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00010000L
+#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x02000000L
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
+//CP_APPEND_DATA_LO
+#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_LO
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_LO
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_LO
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_LO
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_HI
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_HI
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_LO
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_LO
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_HI
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_HI
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_LO
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_LO
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_HI
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_HI
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_WADDR_LO
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_WADDR_HI
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00400000L
+//CP_ME_MC_WDATA_LO
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
+//CP_ME_MC_WDATA_HI
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_RADDR_LO
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_RADDR_HI
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00400000L
+//CP_SEM_WAIT_TIMER
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
+//CP_SIG_SEM_ADDR_LO
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_SIG_SEM_ADDR_HI
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_WAIT_REG_MEM_TIMEOUT
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
+//CP_WAIT_SEM_ADDR_LO
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_WAIT_SEM_ADDR_HI
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_DMA_PFP_CONTROL
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
+#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
+#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_CONTROL
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
+#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
+#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_COHER_BASE_HI
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_COHER_START_DELAY
+#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x0
+#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003FL
+//CP_COHER_CNTL
+#define CP_COHER_CNTL__TC_NC_ACTION_ENA__SHIFT 0x3
+#define CP_COHER_CNTL__TC_WC_ACTION_ENA__SHIFT 0x4
+#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA__SHIFT 0x5
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0xf
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x12
+#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x16
+#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x17
+#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x19
+#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x1a
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x1b
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x1c
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x1d
+#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA__SHIFT 0x1e
+#define CP_COHER_CNTL__TC_NC_ACTION_ENA_MASK 0x00000008L
+#define CP_COHER_CNTL__TC_WC_ACTION_ENA_MASK 0x00000010L
+#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA_MASK 0x00000020L
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
+#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
+#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
+#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
+#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
+#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA_MASK 0x40000000L
+//CP_COHER_SIZE
+#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_COHER_BASE
+#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_COHER_STATUS
+#define CP_COHER_STATUS__MEID__SHIFT 0x18
+#define CP_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_COHER_STATUS__MEID_MASK 0x03000000L
+#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
+//CP_DMA_ME_SRC_ADDR
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_SRC_ADDR_HI
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_DST_ADDR
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_DST_ADDR_HI
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_COMMAND
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_PFP_SRC_ADDR
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_SRC_ADDR_HI
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_DST_ADDR
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_DST_ADDR_HI
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_COMMAND
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_CNTL
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000F0000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
+//CP_DMA_READ_TAGS
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+//CP_COHER_SIZE_HI
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_PFP_IB_CONTROL
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
+//CP_PFP_LOAD_CONTROL
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+//CP_SCRATCH_INDEX
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000FFL
+//CP_SCRATCH_DATA
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_RB_OFFSET
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_OFFSET
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_IB2_OFFSET
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_BEGIN
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_END
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_BEGIN
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_END
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_CE_IB1_OFFSET
+#define CP_CE_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_CE_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_CE_IB2_OFFSET
+#define CP_CE_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_CE_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_CE_COUNTER
+#define CP_CE_COUNTER__CONST_ENGINE_COUNT__SHIFT 0x0
+#define CP_CE_COUNTER__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_CE_RB_OFFSET
+#define CP_CE_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_CE_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_CE_INIT_CMD_BUFSZ
+#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ_MASK 0x00000FFFL
+//CP_CE_IB1_CMD_BUFSZ
+#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_CE_IB2_CMD_BUFSZ
+#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB1_CMD_BUFSZ
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB2_CMD_BUFSZ
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_ST_CMD_BUFSZ
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_CE_INIT_BASE_LO
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x5
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xFFFFFFE0L
+//CP_CE_INIT_BASE_HI
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x0
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_INIT_BUFSZ
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x0
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000FFFL
+//CP_CE_IB1_BASE_LO
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_CE_IB1_BASE_HI
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_IB1_BUFSZ
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_CE_IB2_BASE_LO
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_CE_IB2_BASE_HI
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_IB2_BUFSZ
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_IB2_BASE_LO
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB2_BASE_HI
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_IB2_BUFSZ
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_ST_BASE_LO
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
+//CP_ST_BASE_HI
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
+//CP_ST_BUFSZ
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
+//CP_EOP_DONE_EVENT_CNTL
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP__SHIFT 0x0
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA__SHIFT 0xc
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP_MASK 0x0000007FL
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA_MASK 0x0003F000L
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x02000000L
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
+//CP_EOP_DONE_DATA_CNTL
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
+//CP_EOP_DONE_CNTX_ID
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PFP_COMPLETION_STATUS
+#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_CE_COMPLETION_STATUS
+#define CP_CE_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_CE_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_PRED_NOT_VISIBLE
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
+//CP_PFP_METADATA_BASE_ADDR
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_PFP_METADATA_BASE_ADDR_HI
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_CE_METADATA_BASE_ADDR
+#define CP_CE_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_CE_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_CE_METADATA_BASE_ADDR_HI
+#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DRAW_INDX_INDR_ADDR
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DRAW_INDX_INDR_ADDR_HI
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DISPATCH_INDR_ADDR
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DISPATCH_INDR_ADDR_HI
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_BASE_ADDR
+#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_INDEX_BASE_ADDR_HI
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_TYPE
+#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+//CP_GDS_BKUP_ADDR
+#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_GDS_BKUP_ADDR_HI
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_SAMPLE_STATUS
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
+//CP_ME_COHER_CNTL
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+//CP_ME_COHER_SIZE
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_SIZE_HI
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_BASE
+#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_BASE_HI
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_STATUS
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
+#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
+#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
+//RLC_GPM_PERF_COUNT_0
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_0__SH_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_0__CU_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_0__SH_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_0__CU_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_PERF_COUNT_1
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_1__SH_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_1__CU_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_1__SH_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_1__CU_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+//VGT_GSVS_RING_SIZE
+#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x0
+#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVE_TYPE
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_INDEX_TYPE
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_1
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_2
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_3
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xFFFFFFFFL
+//VGT_MAX_VTX_INDX
+#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
+#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
+//VGT_MIN_VTX_INDX
+#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
+#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
+//VGT_INDX_OFFSET
+#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
+#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
+//VGT_MULTI_PRIM_IB_RESET_EN
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
+//VGT_NUM_INDICES
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_NUM_INSTANCES
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_TF_RING_SIZE
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000FFFFL
+//VGT_HS_OFFCHIP_PARAM
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x9
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000001FFL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
+//VGT_TF_MEMORY_BASE
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
+//VGT_TF_MEMORY_BASE_HI
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_POS_BUF_BASE
+#define WD_POS_BUF_BASE__BASE__SHIFT 0x0
+#define WD_POS_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_POS_BUF_BASE_HI
+#define WD_POS_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_POS_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_CNTL_SB_BUF_BASE
+#define WD_CNTL_SB_BUF_BASE__BASE__SHIFT 0x0
+#define WD_CNTL_SB_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_CNTL_SB_BUF_BASE_HI
+#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_INDEX_BUF_BASE
+#define WD_INDEX_BUF_BASE__BASE__SHIFT 0x0
+#define WD_INDEX_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_INDEX_BUF_BASE_HI
+#define WD_INDEX_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_INDEX_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//IA_MULTI_VGT_PARAM
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x0
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x10
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x11
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x12
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x13
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x14
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC__SHIFT 0x15
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV__SHIFT 0x16
+#define IA_MULTI_VGT_PARAM__HW_USE_ONLY__SHIFT 0x17
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000FFFFL
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
+#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
+//VGT_INSTANCE_BASE_ID
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
+//PA_SU_LINE_STIPPLE_VALUE
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
+//PA_SC_LINE_STIPPLE_STATE
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
+//PA_SC_SCREEN_EXTENT_MIN_0
+#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_0
+#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MIN_1
+#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_1
+#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
+//PA_SC_P3D_TRAP_SCREEN_HV_EN
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_P3D_TRAP_SCREEN_H
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_V
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_P3D_TRAP_SCREEN_COUNT
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_HV_EN
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_HP3D_TRAP_SCREEN_H
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_V
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_COUNT
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_HV_EN
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_TRAP_SCREEN_H
+#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_V
+#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_COUNT
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_STATE_STEREO_X
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BASE
+#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x0
+#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_SIZE
+#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x0
+#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003FFFFFL
+//SQ_THREAD_TRACE_MASK
+#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x0
+#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x5
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x7
+#define SQ_THREAD_TRACE_MASK__SIMD_EN__SHIFT 0x8
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0xc
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0xe
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0xf
+#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001FL
+#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
+#define SQ_THREAD_TRACE_MASK__SIMD_EN_MASK 0x00000F00L
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
+//SQ_THREAD_TRACE_TOKEN_MASK
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x10
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x18
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000FFFFL
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00FF0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
+//SQ_THREAD_TRACE_PERF_MASK
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x10
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_CTRL
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x1f
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
+//SQ_THREAD_TRACE_MODE
+#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x0
+#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x3
+#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x6
+#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x9
+#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0xc
+#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0xf
+#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x12
+#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x15
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x17
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x19
+#define SQ_THREAD_TRACE_MODE__TC_PERF_EN__SHIFT 0x1a
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x1b
+#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x1d
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x1e
+#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x1f
+#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
+#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
+#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001C0L
+#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000E00L
+#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
+#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
+#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001C0000L
+#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
+#define SQ_THREAD_TRACE_MODE__TC_PERF_EN_MASK 0x04000000L
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
+#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
+#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
+//SQ_THREAD_TRACE_BASE2
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000FL
+//SQ_THREAD_TRACE_TOKEN_MASK2
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_WPTR
+#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x1e
+#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3FFFFFFFL
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xC0000000L
+//SQ_THREAD_TRACE_STATUS
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x10
+#define SQ_THREAD_TRACE_STATUS__UTC_ERROR__SHIFT 0x1c
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x1d
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x1e
+#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x1f
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x000003FFL
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x03FF0000L
+#define SQ_THREAD_TRACE_STATUS__UTC_ERROR_MASK 0x10000000L
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
+#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
+//SQ_THREAD_TRACE_HIWATER
+#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x0
+#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
+//SQ_THREAD_TRACE_CNTR
+#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_1
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_2
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_3
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
+//SQC_CACHES
+#define SQC_CACHES__TARGET_INST__SHIFT 0x0
+#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
+#define SQC_CACHES__INVALIDATE__SHIFT 0x2
+#define SQC_CACHES__WRITEBACK__SHIFT 0x3
+#define SQC_CACHES__VOL__SHIFT 0x4
+#define SQC_CACHES__COMPLETE__SHIFT 0x10
+#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
+#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
+#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
+#define SQC_CACHES__WRITEBACK_MASK 0x00000008L
+#define SQC_CACHES__VOL_MASK 0x00000010L
+#define SQC_CACHES__COMPLETE_MASK 0x00010000L
+//SQC_WRITEBACK
+#define SQC_WRITEBACK__DWB__SHIFT 0x0
+#define SQC_WRITEBACK__DIRTY__SHIFT 0x1
+#define SQC_WRITEBACK__DWB_MASK 0x00000001L
+#define SQC_WRITEBACK__DIRTY_MASK 0x00000002L
+//DB_OCCLUSION_COUNT0_LOW
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT0_HI
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT1_LOW
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT1_HI
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT2_LOW
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT2_HI
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT3_LOW
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT3_HI
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_ZPASS_COUNT_LOW
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_ZPASS_COUNT_HI
+#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x0
+#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//GDS_RD_ADDR
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_DATA
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
+#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_ADDR
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_COUNT
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_DATA
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_ADDR
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_DATA
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_ADDR
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_DATA
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WRITE_COMPLETE
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
+//GDS_ATOM_CNTL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
+//GDS_ATOM_COMPLETE
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
+//GDS_ATOM_BASE
+#define GDS_ATOM_BASE__BASE__SHIFT 0x0
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0x10
+#define GDS_ATOM_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_ATOM_SIZE
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x10
+#define GDS_ATOM_SIZE__SIZE_MASK 0x0000FFFFL
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFF0000L
+//GDS_ATOM_OFFSET0
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_OFFSET1
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_DST
+#define GDS_ATOM_DST__DST__SHIFT 0x0
+#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
+//GDS_ATOM_OP
+#define GDS_ATOM_OP__OP__SHIFT 0x0
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OP__OP_MASK 0x000000FFL
+#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_SRC0
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC0_U
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1_U
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0
+#define GDS_ATOM_READ0__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0_U
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1
+#define GDS_ATOM_READ1__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1_U
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_GWS_RESOURCE_CNTL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GWS_RESOURCE
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xe
+#define GDS_GWS_RESOURCE__DED__SHIFT 0xf
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0x10
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x11
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1d
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1e
+#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1f
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00003FFEL
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__DED_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00010000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x1FFE0000L
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x20000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x40000000L
+#define GDS_GWS_RESOURCE__HALTED_MASK 0x80000000L
+//GDS_GWS_RESOURCE_CNT
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_CNTL
+#define GDS_OA_CNTL__INDEX__SHIFT 0x0
+#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
+#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
+#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
+//GDS_OA_COUNTER
+#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
+#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
+//GDS_OA_ADDRESS
+#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
+#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x10
+#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x14
+#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x16
+#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
+#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
+#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
+#define GDS_OA_ADDRESS__CRAWLER_MASK 0x000F0000L
+#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x00300000L
+#define GDS_OA_ADDRESS__UNUSED_MASK 0x3FC00000L
+#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
+#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
+//GDS_OA_INCDEC
+#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
+#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
+#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
+#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
+//GDS_OA_RING_SIZE
+#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
+#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
+//SPI_CONFIG_CNTL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x1a
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x1b
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
+//SPI_CONFIG_CNTL_1
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
+#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE__SHIFT 0x5
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x6
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x9
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x10
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE_MASK 0x00000020L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xFFFF0000L
+//SPI_CONFIG_CNTL_2
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
+//SPI_WAVE_LIMIT_CNTL
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN__SHIFT 0x0
+#define SPI_WAVE_LIMIT_CNTL__VS_WAVE_GRAN__SHIFT 0x2
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN__SHIFT 0x4
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN__SHIFT 0x6
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN_MASK 0x00000003L
+#define SPI_WAVE_LIMIT_CNTL__VS_WAVE_GRAN_MASK 0x0000000CL
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+
+
+// addressBlock: xcd0_gc_perfddec
+//CPG_PERFCOUNTER1_LO
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER1_HI
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_LO
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_HI
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_LO
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_HI
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_LO
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_HI
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_LO
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_HI
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_LO
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_HI
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_LATENCY_STATS_DATA
+#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_LATENCY_STATS_DATA
+#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPC_LATENCY_STATS_DATA
+#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_LO
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_HI
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_LO
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_HI
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_LO
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_HI
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_LO
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_HI
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_LO
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_HI
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_LO
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_HI
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER0_LO
+#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER0_HI
+#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER1_LO
+#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER1_HI
+#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER2_LO
+#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER2_HI
+#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER3_LO
+#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER3_HI
+#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER0_LO
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER0_HI
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER1_LO
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER1_HI
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER2_LO
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER2_HI
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER3_LO
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER3_HI
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER0_LO
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER0_HI
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER1_LO
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER1_HI
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER2_LO
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER2_HI
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER3_LO
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER3_HI
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_LO
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_HI
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER1_LO
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_HI
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER2_LO
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_HI
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER3_LO
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_HI
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SC_PERFCOUNTER0_LO
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_HI
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_LO
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_HI
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_LO
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_HI
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_LO
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_HI
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_LO
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_HI
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_LO
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_HI
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_LO
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_HI
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_LO
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_HI
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_HI
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_LO
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_HI
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_LO
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_HI
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_LO
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_HI
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_LO
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_HI
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_LO
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_HI
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_LO
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_LO
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_HI
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_LO
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_HI
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_LO
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_HI
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_LO
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_HI
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_LO
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_HI
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_LO
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_HI
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_LO
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_HI
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_LO
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_HI
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER8_LO
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER8_HI
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER9_LO
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER9_HI
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER10_LO
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER10_HI
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER11_LO
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER11_HI
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER12_LO
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER12_HI
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER13_LO
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER13_HI
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER14_LO
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER14_HI
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER15_LO
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER15_HI
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_LO
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_HI
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_LO
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_HI
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_LO
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_HI
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_LO
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_HI
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_LO
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_HI
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_LO
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_HI
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_LO
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_HI
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_LO
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_HI
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_LO
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_HI
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_LO
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_HI
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_LO
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_HI
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_LO
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_HI
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_LO
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_HI
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_LO
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_HI
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_LO
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_HI
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_LO
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_HI
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER0_LO
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER0_HI
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER1_LO
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER1_HI
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER2_LO
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER2_HI
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER3_LO
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER3_HI
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER0_LO
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER0_HI
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER1_LO
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER1_HI
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER2_LO
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER2_HI
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER3_LO
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER3_HI
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_LO
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_HI
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_LO
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_HI
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_LO
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_HI
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_LO
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_HI
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_LO
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_HI
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_LO
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_HI
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_LO
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_HI
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_LO
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_HI
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_LO
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_HI
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_LO
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_HI
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_LO
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_HI
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_LO
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_HI
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_LO
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_HI
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_LO
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_HI
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2pfcntrdec
+//ATC_L2_PERFCOUNTER_LO
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_L2_PERFCOUNTER_HI
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: xcd0_gc_utcl2_vml2prdec
+//MC_VM_L2_PERFCOUNTER_LO
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_L2_PERFCOUNTER_HI
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbprdec
+//L2TLB_PERFCOUNTER_LO
+#define L2TLB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//L2TLB_PERFCOUNTER_HI
+#define L2TLB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define L2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: xcd0_gc_perfsdec
+//CPG_PERFCOUNTER1_SELECT
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT1
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER1_SELECT
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER0_SELECT1
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER1_SELECT
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT1
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CP_PERFMON_CNTL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//CPC_PERFCOUNTER0_SELECT
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPG_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPF_LATENCY_STATS_SELECT
+#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
+#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPG_LATENCY_STATS_SELECT
+#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_LATENCY_STATS_SELECT
+#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x00000007L
+#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CP_DRAW_OBJECT
+#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
+#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
+//CP_DRAW_OBJECT_COUNTER
+#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
+#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
+//CP_DRAW_WINDOW_MASK_HI
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_HI
+#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_LO
+#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
+#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
+#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
+#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
+//CP_DRAW_WINDOW_CNTL
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
+#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
+#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
+//GRBM_PERFCOUNTER0_SELECT
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_PERFCOUNTER1_SELECT
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_SE0_PERFCOUNTER_SELECT
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE1_PERFCOUNTER_SELECT
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE2_PERFCOUNTER_SELECT
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE3_PERFCOUNTER_SELECT
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//WD_PERFCOUNTER0_SELECT
+#define WD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER1_SELECT
+#define WD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER2_SELECT
+#define WD_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER3_SELECT
+#define WD_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER0_SELECT
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER1_SELECT
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER2_SELECT
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER3_SELECT
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER0_SELECT1
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER0_SELECT
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER1_SELECT
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER2_SELECT
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER3_SELECT
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER0_SELECT1
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER1_SELECT1
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER_SEID_MASK
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x0
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000FFL
+//PA_SU_PERFCOUNTER0_SELECT
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT1
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT1
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT1
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER1_SELECT
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER2_SELECT
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER3_SELECT
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER4_SELECT
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER5_SELECT
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER6_SELECT
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER7_SELECT
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER0_SELECT
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER0_SELECT1
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT1
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT1
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT1
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER4_SELECT
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000000FFL
+//SPI_PERFCOUNTER5_SELECT
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000000FFL
+//SPI_PERFCOUNTER_BINS
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
+//SQ_PERFCOUNTER0_SELECT
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER1_SELECT
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER2_SELECT
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER3_SELECT
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER4_SELECT
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER5_SELECT
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER6_SELECT
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER7_SELECT
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER8_SELECT
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER9_SELECT
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER10_SELECT
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER11_SELECT
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER12_SELECT
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER13_SELECT
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER14_SELECT
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER15_SELECT
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER_CTRL
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x1
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x3
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x5
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x8
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0xd
+#define SQ_PERFCOUNTER_CTRL__VMID_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001F00L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
+#define SQ_PERFCOUNTER_CTRL__VMID_MASK_MASK 0xFFFF0000L
+//SQ_PERFCOUNTER_MASK
+#define SQ_PERFCOUNTER_MASK__SH0_MASK__SHIFT 0x0
+#define SQ_PERFCOUNTER_MASK__SH1_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER_MASK__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_PERFCOUNTER_MASK__SH1_MASK_MASK 0xFFFF0000L
+//SQ_PERFCOUNTER_CTRL2
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+//SX_PERFCOUNTER0_SELECT
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER2_SELECT
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER3_SELECT
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER0_SELECT1
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT1
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT1
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0001FC00L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT1
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x0000007FL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0001FC00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER1_SELECT
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0000FC00L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT1
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x0000003FL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0000FC00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TD_PERFCOUNTER1_SELECT
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0001FC00L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT1
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x0000007FL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0001FC00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0001FC00L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT1
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x0000007FL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x0001FC00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER2_SELECT
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER3_SELECT
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x0000007FL
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER0_SELECT
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER0_SELECT1
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCC_PERFCOUNTER1_SELECT
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER1_SELECT1
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCC_PERFCOUNTER2_SELECT
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER3_SELECT
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER0_SELECT
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER0_SELECT1
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCA_PERFCOUNTER1_SELECT
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER1_SELECT1
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCA_PERFCOUNTER2_SELECT
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER3_SELECT
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER_FILTER
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
+//CB_PERFCOUNTER0_SELECT
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER0_SELECT1
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//CB_PERFCOUNTER1_SELECT
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER2_SELECT
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER3_SELECT
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT1
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT1
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER2_SELECT
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER3_SELECT
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RLC_SPM_PERFMON_CNTL
+#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
+#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xe
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFFL
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
+#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x0000C000L
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_BASE_LO
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_RING_BASE_HI
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_SIZE
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_SEGMENT_SIZE
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1__SHIFT 0x8
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE__SHIFT 0xb
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE__SHIFT 0x10
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE__SHIFT 0x15
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE__SHIFT 0x1a
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED__SHIFT 0x1f
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE_MASK 0x000000FFL
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1_MASK 0x00000700L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE_MASK 0x0000F800L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE_MASK 0x001F0000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE_MASK 0x03E00000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE_MASK 0x7C000000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED_MASK 0x80000000L
+//RLC_SPM_SE_MUXSEL_ADDR
+#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_ADDR__RESERVED__SHIFT 0x8
+#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0x000000FFL
+#define RLC_SPM_SE_MUXSEL_ADDR__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SE_MUXSEL_DATA
+#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_CPG_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CPC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CPF_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CB_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_DB_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_GDS_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_IA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCP_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TD_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_VGT_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SPI_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SQG_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SX_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_GLOBAL_MUXSEL_ADDR
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0x0000007FL
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_GLOBAL_MUXSEL_DATA
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RING_RDPTR
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
+//RLC_SPM_SEGMENT_THRESHOLD
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0xFFFFFFFFL
+//RLC_SPM_RMI_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PERFMON_SAMPLE_DELAY_MAX
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__PERFMON_MAX_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__RESERVED__SHIFT 0x8
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__PERFMON_MAX_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__RESERVED_MASK 0xFFFFFF00L
+//RLC_PERFMON_CNTL
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//RLC_PERFCOUNTER0_SELECT
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
+//RLC_PERFCOUNTER1_SELECT
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
+//RLC_GPU_IOV_PERF_CNT_CNTL
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
+//RLC_GPU_IOV_PERF_CNT_WR_ADDR
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_WR_DATA
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0x0000000FL
+//RLC_GPU_IOV_PERF_CNT_RD_ADDR
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_RD_DATA
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0x0000000FL
+//RMI_PERFCOUNTER0_SELECT
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER0_SELECT1
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER1_SELECT
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT1
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER3_SELECT
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERF_COUNTER_CNTL
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
+
+
+// addressBlock: xcd0_gc_utcl2_atcl2pfcntldec
+//ATC_L2_PERFCOUNTER0_CFG
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER1_CFG
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: xcd0_gc_utcl2_vml2pldec
+//MC_VM_L2_PERFCOUNTER0_CFG
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER1_CFG
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER2_CFG
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER3_CFG
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER4_CFG
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER5_CFG
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER6_CFG
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER7_CFG
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: xcd0_gc_utcl2_l2tlbpldec
+//L2TLB_PERFCOUNTER0_CFG
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER1_CFG
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER2_CFG
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER3_CFG
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER_RSLT_CNTL
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: xcd0_gc_gdflldec
+//GDFLL_EDC_HYSTERESIS_CNTL
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_EDC_HYSTERESIS_STAT
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: xcd0_gc_rlcpdec
+//RLC_CNTL
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
+#define RLC_CNTL__RESERVED__SHIFT 0x4
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CGCG_CGLS_CTRL_2
+#define RLC_CGCG_CGLS_CTRL_2__CANE_STAT_BUSY_MASK__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL_2__RESERVED__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL_2__CANE_STAT_BUSY_MASK_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_STAT
+#define RLC_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x1
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x2
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x3
+#define RLC_STAT__MC_BUSY__SHIFT 0x4
+#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
+#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
+#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
+#define RLC_STAT__RESERVED__SHIFT 0x8
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000008L
+#define RLC_STAT__MC_BUSY_MASK 0x00000010L
+#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
+#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
+#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
+#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
+//RLC_SAFE_MODE
+#define RLC_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_MEM_SLP_CNTL
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
+//SMU_RLC_RESPONSE
+#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
+#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_RLCV_SAFE_MODE
+#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SMU_SAFE_MODE
+#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCV_COMMAND
+#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
+#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
+#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
+#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
+//RLC_REFCLOCK_TIMESTAMP_LSB
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
+//RLC_REFCLOCK_TIMESTAMP_MSB
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_0
+#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_1
+#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_2
+#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_CTRL
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
+#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x4
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
+#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_LB_CNTR_MAX
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x0
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_STAT
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC__SHIFT 0xa
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC__SHIFT 0xb
+#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0xc
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC_MASK 0x00000400L
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC_MASK 0x00000800L
+#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFFFF000L
+//RLC_GPM_TIMER_INT_3
+#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_NONCU_MASTER_MASK_1
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1__SHIFT 0x0
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1__SHIFT 0x10
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1__SHIFT 0x11
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK__SHIFT 0x12
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1__SHIFT 0x13
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK__SHIFT 0x14
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK__SHIFT 0x15
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK__SHIFT 0x16
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK__SHIFT 0x17
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK__SHIFT 0x18
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED__SHIFT 0x19
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1_MASK 0x0000FFFFL
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1_MASK 0x00010000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1_MASK 0x00020000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK_MASK 0x00040000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1_MASK 0x00080000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK_MASK 0x00100000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK_MASK 0x00200000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK_MASK 0x00400000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK_MASK 0x00800000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK_MASK 0x01000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_MASK 0xFE000000L
+//RLC_SERDES_NONCU_MASTER_BUSY_1
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1__SHIFT 0x0
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1__SHIFT 0x10
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1__SHIFT 0x11
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1__SHIFT 0x12
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1__SHIFT 0x13
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY__SHIFT 0x14
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY__SHIFT 0x15
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY__SHIFT 0x16
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY__SHIFT 0x17
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY__SHIFT 0x18
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED__SHIFT 0x19
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1_MASK 0x0000FFFFL
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1_MASK 0x00010000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1_MASK 0x00020000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1_MASK 0x00040000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1_MASK 0x00080000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY_MASK 0x00100000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY_MASK 0x00200000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY_MASK 0x00400000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY_MASK 0x00800000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY_MASK 0x01000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_MASK 0xFE000000L
+//RLC_INT_STAT
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
+#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
+#define RLC_INT_STAT__RESERVED__SHIFT 0x9
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
+#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
+#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
+//RLC_LB_CNTL
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x0
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x1
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x2
+#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x3
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x4
+#define RLC_LB_CNTL__RESERVED__SHIFT 0xc
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
+#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000FF0L
+#define RLC_LB_CNTL__RESERVED_MASK 0xFFFFF000L
+//RLC_MGCG_CTRL
+#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
+#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
+#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
+#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
+#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL__SHIFT 0xf
+#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x10
+#define RLC_MGCG_CTRL__SPARE__SHIFT 0x11
+#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
+#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
+#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
+#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
+#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL_MASK 0x00008000L
+#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL_MASK 0x00010000L
+#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFE0000L
+//RLC_LB_CNTR_INIT
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x0
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xFFFFFFFFL
+//RLC_LOAD_BALANCE_CNTR
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x0
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xFFFFFFFFL
+//RLC_JUMP_TABLE_RESTORE
+#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
+#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_2
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE__SHIFT 0x10
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE_MASK 0xFFFF0000L
+//RLC_GPU_CLOCK_COUNT_LSB
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_UCODE_CNTL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
+//RLC_GPM_THREAD_RESET
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
+#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
+#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPM_CP_DMA_COMPLETE_T0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_CP_DMA_COMPLETE_T1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_FIREWALL_VIOLATION
+#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
+#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_LSB
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_MSB
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_LSB
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_MSB
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_CTRL
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN__SHIFT 0x0
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET__SHIFT 0x1
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE__SHIFT 0x2
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN__SHIFT 0x3
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET__SHIFT 0x4
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE__SHIFT 0x5
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN_MASK 0x00000001L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET_MASK 0x00000002L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE_MASK 0x00000004L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN_MASK 0x00000008L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET_MASK 0x00000010L
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE_MASK 0x00000020L
+//RLC_CLK_COUNT_STAT
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID__SHIFT 0x0
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID__SHIFT 0x1
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC__SHIFT 0x2
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC__SHIFT 0x3
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC__SHIFT 0x4
+#define RLC_CLK_COUNT_STAT__RESERVED__SHIFT 0x5
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID_MASK 0x00000001L
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID_MASK 0x00000002L
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC_MASK 0x00000004L
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC_MASK 0x00000008L
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC_MASK 0x00000010L
+#define RLC_CLK_COUNT_STAT__RESERVED_MASK 0xFFFFFFE0L
+//RLC_GPM_STAT
+#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_GPM_STAT__STATIC_CU_POWERING_UP__SHIFT 0xd
+#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN__SHIFT 0xe
+#define RLC_GPM_STAT__DYN_CU_POWERING_UP__SHIFT 0xf
+#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
+#define RLC_GPM_STAT__RESERVED_1__SHIFT 0x13
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_GPM_STAT__STATIC_CU_POWERING_UP_MASK 0x00002000L
+#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN_MASK 0x00004000L
+#define RLC_GPM_STAT__DYN_CU_POWERING_UP_MASK 0x00008000L
+#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN_MASK 0x00010000L
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
+#define RLC_GPM_STAT__RESERVED_1_MASK 0x00180000L
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_GPU_CLOCK_32_RES_SEL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_CLOCK_32
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
+//RLC_PG_CNTL
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x2
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x3
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
+#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
+#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x13
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable__SHIFT 0x15
+#define RLC_PG_CNTL__RESERVED2__SHIFT 0x16
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE__SHIFT 0x17
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
+#define RLC_PG_CNTL__RESERVED_MASK 0x00003FE0L
+#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
+#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00180000L
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable_MASK 0x00200000L
+#define RLC_PG_CNTL__RESERVED2_MASK 0x00400000L
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK 0x00800000L
+//RLC_GPM_THREAD_PRIORITY
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
+//RLC_GPM_THREAD_ENABLE
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
+#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
+#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_PERF_CLK_EN__SHIFT 0xa
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_11__SHIFT 0xb
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY__SHIFT 0x10
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK 0x00000200L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_PERF_CLK_EN_MASK 0x00000400L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_11_MASK 0x0000F800L
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK 0x00010000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17_MASK 0xFFFE0000L
+//RLC_CGCG_CGLS_CTRL
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_DYN_PG_STATUS
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_DYN_PG_REQUEST
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x0
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY
+#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
+#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
+#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
+#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
+//RLC_CU_STATUS
+#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x0
+#define RLC_CU_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
+//RLC_LB_INIT_CU_MASK
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x0
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_LB_ALWAYS_ACTIVE_CU_MASK
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x0
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_LB_PARAMS
+#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x0
+#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x1
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x8
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
+#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000FEL
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000FF00L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_THREAD1_DELAY
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x0
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x8
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x10
+#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x18
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000FFL
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000FF00L
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00FF0000L
+#define RLC_THREAD1_DELAY__SPARE_MASK 0xFF000000L
+//RLC_PG_ALWAYS_ON_CU_MASK
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x0
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_MAX_PG_CU
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x0
+#define RLC_MAX_PG_CU__SPARE__SHIFT 0x8
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000FFL
+#define RLC_MAX_PG_CU__SPARE_MASK 0xFFFFFF00L
+//RLC_AUTO_PG_CTRL
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
+//RLC_SMU_GRBM_REG_SAVE_CTRL
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x0
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x1
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xFFFFFFFEL
+//RLC_SERDES_RD_PENDING
+#define RLC_SERDES_RD_PENDING__RD_PENDING__SHIFT 0x0
+#define RLC_SERDES_RD_PENDING__RD_PENDING_MASK 0x00000001L
+//RLC_SERDES_RD_MASTER_INDEX
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x0
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x4
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x6
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x9
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0xc
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0xd
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x11
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x13
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000FL
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001C0L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000E00L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00001000L
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x0001E000L
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x00060000L
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xFFF80000L
+//RLC_SERDES_RD_DATA_0
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_1
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_2
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_CU_MASTER_MASK
+#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK__SHIFT 0x0
+#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_NONCU_MASTER_MASK
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK__SHIFT 0x0
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK__SHIFT 0x10
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK__SHIFT 0x11
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK__SHIFT 0x12
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK__SHIFT 0x13
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK__SHIFT 0x14
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK__SHIFT 0x15
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK__SHIFT 0x16
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK__SHIFT 0x17
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK__SHIFT 0x18
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK__SHIFT 0x19
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED__SHIFT 0x1a
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK_MASK 0x0000FFFFL
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK_MASK 0x00010000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK_MASK 0x00020000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK_MASK 0x00040000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK_MASK 0x00080000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK_MASK 0x00100000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK_MASK 0x00200000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK_MASK 0x00400000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK_MASK 0x00800000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK_MASK 0x01000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK_MASK 0x02000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED_MASK 0xFC000000L
+//RLC_SERDES_WR_CTRL
+#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x0
+#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x8
+#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x9
+#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0xa
+#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0xb
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0xc
+#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0xd
+#define RLC_SERDES_WR_CTRL__RDDATA_RESET__SHIFT 0xe
+#define RLC_SERDES_WR_CTRL__SHORT_FORMAT__SHIFT 0xf
+#define RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT 0x10
+#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE__SHIFT 0x1a
+#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR__SHIFT 0x1b
+#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x1c
+#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000FFL
+#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
+#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
+#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
+#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
+#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
+#define RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK 0x00004000L
+#define RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK 0x00008000L
+#define RLC_SERDES_WR_CTRL__BPM_DATA_MASK 0x03FF0000L
+#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK 0x04000000L
+#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK 0x08000000L
+#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xF0000000L
+//RLC_SERDES_WR_DATA
+#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x0
+#define RLC_SERDES_WR_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_CU_MASTER_BUSY
+#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY__SHIFT 0x0
+#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY_MASK 0xFFFFFFFFL
+//RLC_SERDES_NONCU_MASTER_BUSY
+#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY__SHIFT 0x0
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY__SHIFT 0x10
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY__SHIFT 0x11
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY__SHIFT 0x12
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY__SHIFT 0x13
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY__SHIFT 0x14
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY__SHIFT 0x15
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY__SHIFT 0x16
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY__SHIFT 0x17
+#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY__SHIFT 0x18
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY__SHIFT 0x19
+#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED__SHIFT 0x1a
+#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK 0x0000FFFFL
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK 0x00010000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY_MASK 0x00020000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK 0x00040000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK 0x00080000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY_MASK 0x00100000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY_MASK 0x00200000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY_MASK 0x00400000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY_MASK 0x00800000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY_MASK 0x01000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY_MASK 0x02000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED_MASK 0xFC000000L
+//RLC_GPM_GENERAL_0
+#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_1
+#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_2
+#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_3
+#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_4
+#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_5
+#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_6
+#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_7
+#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_SCRATCH_ADDR
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x9
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
+#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
+//RLC_GPM_SCRATCH_DATA
+#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_STATIC_PG_STATUS
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_SPM_MC_CNTL
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x5
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x6
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x7
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x8
+#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0xa
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000010L
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000020L
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000040L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000080L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000300L
+#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFFFFC00L
+//RLC_SPM_INT_CNTL
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
+#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
+#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_STATUS
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
+#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
+#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_MESSAGE
+#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
+//RLC_GPM_LOG_SIZE
+#define RLC_GPM_LOG_SIZE__SIZE__SHIFT 0x0
+#define RLC_GPM_LOG_SIZE__SIZE_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_3
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
+#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
+#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
+//RLC_GPR_REG1
+#define RLC_GPR_REG1__DATA__SHIFT 0x0
+#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPR_REG2
+#define RLC_GPR_REG2__DATA__SHIFT 0x0
+#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_LOG_CONT
+#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
+#define RLC_GPM_LOG_CONT__CONT_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_DISABLE_TH0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_FORCE_TH0
+#define RLC_GPM_INT_FORCE_TH0__FORCE__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_FORCE_TH1
+#define RLC_GPM_INT_FORCE_TH1__FORCE__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH1__FORCE_MASK 0xFFFFFFFFL
+//RLC_SRM_CNTL
+#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
+#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
+#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_ARAM_ADDR
+#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xb
+#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x000007FFL
+#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFF800L
+//RLC_SRM_ARAM_DATA
+#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_DRAM_ADDR
+#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xb
+#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x000007FFL
+#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFF800L
+//RLC_SRM_DRAM_DATA
+#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_GPM_COMMAND
+#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_GPM_COMMAND__RESERVED_16__SHIFT 0x10
+#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x11
+#define RLC_SRM_GPM_COMMAND__RESERVED_30_29__SHIFT 0x1d
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0000FFE0L
+#define RLC_SRM_GPM_COMMAND__RESERVED_16_MASK 0x00010000L
+#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x0FFE0000L
+#define RLC_SRM_GPM_COMMAND__RESERVED_30_29_MASK 0x60000000L
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_GPM_COMMAND_STATUS
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_RLCV_COMMAND
+#define RLC_SRM_RLCV_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_RLCV_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_RLCV_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_RLCV_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_RLCV_COMMAND__RESERVED_16__SHIFT 0x10
+#define RLC_SRM_RLCV_COMMAND__START_OFFSET__SHIFT 0x11
+#define RLC_SRM_RLCV_COMMAND__RESERVED1__SHIFT 0x1c
+#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_RLCV_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_RLCV_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_RLCV_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_RLCV_COMMAND__SIZE_MASK 0x0000FFE0L
+#define RLC_SRM_RLCV_COMMAND__RESERVED_16_MASK 0x00010000L
+#define RLC_SRM_RLCV_COMMAND__START_OFFSET_MASK 0x0FFE0000L
+#define RLC_SRM_RLCV_COMMAND__RESERVED1_MASK 0x70000000L
+#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_RLCV_COMMAND_STATUS
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_INDEX_CNTL_ADDR_0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_1
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_2
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_3
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_4
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_5
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_6
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_7
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_DATA_0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_1
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_2
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_3
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_4
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_5
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_6
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_7
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_STAT
+#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
+#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
+#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
+#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
+#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
+#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_GPM_ABORT
+#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
+#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
+#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
+#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CSIB_ADDR_LO
+#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
+//RLC_CSIB_ADDR_HI
+#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
+//RLC_CSIB_LENGTH
+#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
+#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
+//RLC_SMU_COMMAND
+#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
+#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
+//RLC_CP_SCHEDULERS
+#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
+#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
+#define RLC_CP_SCHEDULERS__scheduler2__SHIFT 0x10
+#define RLC_CP_SCHEDULERS__scheduler3__SHIFT 0x18
+#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
+#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
+#define RLC_CP_SCHEDULERS__scheduler2_MASK 0x00FF0000L
+#define RLC_CP_SCHEDULERS__scheduler3_MASK 0xFF000000L
+//RLC_SMU_ARGUMENT_1
+#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_2
+#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_8
+#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_9
+#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_10
+#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_11
+#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_12
+#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_CNTL_0
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
+//RLC_GPM_UTCL1_CNTL_1
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
+//RLC_GPM_UTCL1_CNTL_2
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
+//RLC_SPM_UTCL1_CNTL
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x19
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0x02000000L
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+//RLC_UTCL1_STATUS_2
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY__SHIFT 0x4
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans__SHIFT 0x9
+#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0xa
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY_MASK 0x00000010L
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans_MASK 0x00000200L
+#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFC00L
+//RLC_LB_THR_CONFIG_2
+#define RLC_LB_THR_CONFIG_2__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_2__DATA_MASK 0xFFFFFFFFL
+//RLC_LB_THR_CONFIG_3
+#define RLC_LB_THR_CONFIG_3__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_3__DATA_MASK 0xFFFFFFFFL
+//RLC_LB_THR_CONFIG_4
+#define RLC_LB_THR_CONFIG_4__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_UTCL1_ERROR_1
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_SPM_UTCL1_ERROR_2
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_1
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_LB_THR_CONFIG_1
+#define RLC_LB_THR_CONFIG_1__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_2
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH1_ERROR_1
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH1_ERROR_2
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH2_ERROR_1
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH2_ERROR_2
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_SEMAPHORE_0
+#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_1
+#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CP_EOF_INT
+#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
+#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
+#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CP_EOF_INT_CNT
+#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
+#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT
+#define RLC_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_PREWALKER_UTCL1_CNTL
+#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_PREWALKER_UTCL1_CNTL__RESERVED__SHIFT 0x19
+#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_PREWALKER_UTCL1_CNTL__RESERVED_MASK 0x02000000L
+#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+//RLC_PREWALKER_UTCL1_TRIG
+#define RLC_PREWALKER_UTCL1_TRIG__VALID__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_TRIG__VMID__SHIFT 0x1
+#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE__SHIFT 0x5
+#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM__SHIFT 0x6
+#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM__SHIFT 0x7
+#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM__SHIFT 0x8
+#define RLC_PREWALKER_UTCL1_TRIG__RESERVED__SHIFT 0x9
+#define RLC_PREWALKER_UTCL1_TRIG__READY__SHIFT 0x1f
+#define RLC_PREWALKER_UTCL1_TRIG__VALID_MASK 0x00000001L
+#define RLC_PREWALKER_UTCL1_TRIG__VMID_MASK 0x0000001EL
+#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE_MASK 0x00000020L
+#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM_MASK 0x00000040L
+#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM_MASK 0x00000080L
+#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM_MASK 0x00000100L
+#define RLC_PREWALKER_UTCL1_TRIG__RESERVED_MASK 0x7FFFFE00L
+#define RLC_PREWALKER_UTCL1_TRIG__READY_MASK 0x80000000L
+//RLC_PREWALKER_UTCL1_ADDR_LSB
+#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB_MASK 0xFFFFFFFFL
+//RLC_PREWALKER_UTCL1_ADDR_MSB
+#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB_MASK 0x0000FFFFL
+//RLC_PREWALKER_UTCL1_SIZE_LSB
+#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB_MASK 0xFFFFFFFFL
+//RLC_PREWALKER_UTCL1_SIZE_MSB
+#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB_MASK 0x00000003L
+//RLC_DSM_TRIG
+#define RLC_DSM_TRIG__START__SHIFT 0x0
+#define RLC_DSM_TRIG__START_MASK 0x00000001L
+//RLC_UTCL1_STATUS
+#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
+#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
+//RLC_R2I_CNTL_0
+#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_1
+#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_2
+#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_3
+#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
+//RLC_UTCL2_CNTL
+#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
+#define RLC_UTCL2_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1
+#define RLC_UTCL2_CNTL__RESERVED__SHIFT 0x2
+#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
+#define RLC_UTCL2_CNTL__IGNORE_PTE_PERMISSION_MASK 0x00000002L
+#define RLC_UTCL2_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_LBPW_CU_STAT
+#define RLC_LBPW_CU_STAT__MAX_CU__SHIFT 0x0
+#define RLC_LBPW_CU_STAT__ON_CU__SHIFT 0x10
+#define RLC_LBPW_CU_STAT__MAX_CU_MASK 0x0000FFFFL
+#define RLC_LBPW_CU_STAT__ON_CU_MASK 0xFFFF0000L
+//RLC_DS_CNTL
+#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x0
+#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x1
+#define RLC_DS_CNTL__GFX_CLK_TCC_RLC_GRBM_CC_RESIDENT_MASK__SHIFT 0x2
+#define RLC_DS_CNTL__GFX_CLK_EA_RLC_GRBM_STAT_BUSY_MASK__SHIFT 0x3
+#define RLC_DS_CNTL__RESRVED__SHIFT 0x4
+#define RLC_DS_CNTL__SOC_CLK_TCC_RLC_GRBM_CC_RESIDENT_MASK__SHIFT 0xe
+#define RLC_DS_CNTL__SOC_CLK_EA_RLC_GRBM_STAT_BUSY_MASK__SHIFT 0xf
+#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x10
+#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x11
+#define RLC_DS_CNTL__RESRVED_1__SHIFT 0x12
+#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000001L
+#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000002L
+#define RLC_DS_CNTL__GFX_CLK_TCC_RLC_GRBM_CC_RESIDENT_MASK_MASK 0x00000004L
+#define RLC_DS_CNTL__GFX_CLK_EA_RLC_GRBM_STAT_BUSY_MASK_MASK 0x00000008L
+#define RLC_DS_CNTL__RESRVED_MASK 0x00003FF0L
+#define RLC_DS_CNTL__SOC_CLK_TCC_RLC_GRBM_CC_RESIDENT_MASK_MASK 0x00004000L
+#define RLC_DS_CNTL__SOC_CLK_EA_RLC_GRBM_STAT_BUSY_MASK_MASK 0x00008000L
+#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00010000L
+#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00020000L
+#define RLC_DS_CNTL__RESRVED_1_MASK 0xFFFC0000L
+//RLC_GPM_INT_STAT_TH0
+#define RLC_GPM_INT_STAT_TH0__STATUS__SHIFT 0x0
+#define RLC_GPM_INT_STAT_TH0__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_13
+#define RLC_GPM_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_14
+#define RLC_GPM_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_15
+#define RLC_GPM_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT_1
+#define RLC_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_SPARE_INT_1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SEMAPHORE_2
+#define RLC_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_3
+#define RLC_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SMU_ARGUMENT_3
+#define RLC_SMU_ARGUMENT_3__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_3__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_4
+#define RLC_SMU_ARGUMENT_4__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_4__ARG_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_LSB_1
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_1
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_CLOCK_COUNT_LSB_2
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_2
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_2
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CPG_STAT_INVAL
+#define RLC_CPG_STAT_INVAL__CPG_stat_inval__SHIFT 0x0
+#define RLC_CPG_STAT_INVAL__CPG_stat_inval_MASK 0x00000001L
+//RLC_DSM_CNTL
+#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define RLC_DSM_CNTL__RLCG_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define RLC_DSM_CNTL__RLCG_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define RLC_DSM_CNTL__RLCV_INSTR_RAM_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define RLC_DSM_CNTL__RLCV_INSTR_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define RLC_DSM_CNTL__RLCV_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define RLC_DSM_CNTL__RLCV_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define RLC_DSM_CNTL__RLC_TCTAG_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define RLC_DSM_CNTL__RLC_TCTAG_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define RLC_DSM_CNTL__RLC_SPM_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define RLC_DSM_CNTL__RLC_SPM_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define RLC_DSM_CNTL__RLC_SRM_DATA_RAM_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define RLC_DSM_CNTL__RLC_SRM_DATA_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define RLC_DSM_CNTL__RLC_SRM_ADDR_RAM_IRRITATOR_DATA_SEL__SHIFT 0x15
+#define RLC_DSM_CNTL__RLC_SRM_ADDR_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
+#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define RLC_DSM_CNTL__RLCG_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define RLC_DSM_CNTL__RLCG_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define RLC_DSM_CNTL__RLCV_INSTR_RAM_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define RLC_DSM_CNTL__RLCV_INSTR_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define RLC_DSM_CNTL__RLCV_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define RLC_DSM_CNTL__RLCV_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define RLC_DSM_CNTL__RLC_TCTAG_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define RLC_DSM_CNTL__RLC_TCTAG_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define RLC_DSM_CNTL__RLC_SPM_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define RLC_DSM_CNTL__RLC_SPM_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define RLC_DSM_CNTL__RLC_SRM_DATA_RAM_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define RLC_DSM_CNTL__RLC_SRM_DATA_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+#define RLC_DSM_CNTL__RLC_SRM_ADDR_RAM_IRRITATOR_DATA_SEL_MASK 0x00600000L
+#define RLC_DSM_CNTL__RLC_SRM_ADDR_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
+//RLC_DSM_CNTLA
+#define RLC_DSM_CNTLA__RLC_SPM_SE0_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define RLC_DSM_CNTLA__RLC_SPM_SE0_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define RLC_DSM_CNTLA__RLC_SPM_SE1_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define RLC_DSM_CNTLA__RLC_SPM_SE1_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define RLC_DSM_CNTLA__RLC_SPM_SE2_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define RLC_DSM_CNTLA__RLC_SPM_SE2_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define RLC_DSM_CNTLA__RLC_SPM_SE3_SCRATCH_RAM_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define RLC_DSM_CNTLA__RLC_SPM_SE3_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define RLC_DSM_CNTLA__RLC_SPM_SE0_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define RLC_DSM_CNTLA__RLC_SPM_SE0_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define RLC_DSM_CNTLA__RLC_SPM_SE1_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define RLC_DSM_CNTLA__RLC_SPM_SE1_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define RLC_DSM_CNTLA__RLC_SPM_SE2_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define RLC_DSM_CNTLA__RLC_SPM_SE2_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define RLC_DSM_CNTLA__RLC_SPM_SE3_SCRATCH_RAM_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define RLC_DSM_CNTLA__RLC_SPM_SE3_SCRATCH_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+//RLC_DSM_CNTL2
+#define RLC_DSM_CNTL2__RLCG_INSTR_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define RLC_DSM_CNTL2__RLCG_INSTR_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define RLC_DSM_CNTL2__RLCG_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define RLC_DSM_CNTL2__RLCG_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define RLC_DSM_CNTL2__RLCV_INSTR_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define RLC_DSM_CNTL2__RLCV_INSTR_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define RLC_DSM_CNTL2__RLCV_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define RLC_DSM_CNTL2__RLCV_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define RLC_DSM_CNTL2__RLC_TCTAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define RLC_DSM_CNTL2__RLC_TCTAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define RLC_DSM_CNTL2__RLC_SPM_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define RLC_DSM_CNTL2__RLC_SPM_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define RLC_DSM_CNTL2__RLC_SRM_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define RLC_DSM_CNTL2__RLC_SRM_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define RLC_DSM_CNTL2__RLC_SRM_ADDR_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define RLC_DSM_CNTL2__RLC_SRM_ADDR_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define RLC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define RLC_DSM_CNTL2__RLCG_INSTR_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define RLC_DSM_CNTL2__RLCG_INSTR_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define RLC_DSM_CNTL2__RLCG_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define RLC_DSM_CNTL2__RLCG_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define RLC_DSM_CNTL2__RLCV_INSTR_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define RLC_DSM_CNTL2__RLCV_INSTR_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define RLC_DSM_CNTL2__RLCV_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define RLC_DSM_CNTL2__RLCV_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define RLC_DSM_CNTL2__RLC_TCTAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define RLC_DSM_CNTL2__RLC_TCTAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define RLC_DSM_CNTL2__RLC_SPM_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define RLC_DSM_CNTL2__RLC_SPM_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define RLC_DSM_CNTL2__RLC_SRM_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define RLC_DSM_CNTL2__RLC_SRM_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define RLC_DSM_CNTL2__RLC_SRM_ADDR_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define RLC_DSM_CNTL2__RLC_SRM_ADDR_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define RLC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//RLC_DSM_CNTL2A
+#define RLC_DSM_CNTL2A__RLC_SPM_SE0_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define RLC_DSM_CNTL2A__RLC_SPM_SE0_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define RLC_DSM_CNTL2A__RLC_SPM_SE1_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define RLC_DSM_CNTL2A__RLC_SPM_SE1_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define RLC_DSM_CNTL2A__RLC_SPM_SE0_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE0_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE1_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE1_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//RLC_RLCV_SPARE_INT
+#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_CLK_REQ
+#define RLC_SMU_CLK_REQ__VALID__SHIFT 0x0
+#define RLC_SMU_CLK_REQ__VALID_MASK 0x00000001L
+
+
+// addressBlock: xcd0_gc_pwrdec
+//CGTS_SM_CTRL_REG
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x0
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x4
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0xc
+#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x10
+#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x11
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x14
+#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x15
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x16
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x17
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x18
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000FL
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000FF0L
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
+#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
+#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000E0000L
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
+#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xFF000000L
+//CGTS_RD_CTRL_REG
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x8
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001FL
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001F00L
+//CGTS_RD_REG
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
+#define CGTS_RD_REG__READ_DATA_MASK 0x00003FFFL
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_CU0_SP0_CTRL_REG
+#define CGTS_CU0_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_LDS_SQ_CTRL_REG
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TA_SQC_CTRL_REG
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_SP1_CTRL_REG
+#define CGTS_CU0_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TD_TCP_CTRL_REG
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_SP0_CTRL_REG
+#define CGTS_CU1_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_LDS_SQ_CTRL_REG
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_TA_SQC_CTRL_REG
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU1_SP1_CTRL_REG
+#define CGTS_CU1_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_TD_TCP_CTRL_REG
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_SP0_CTRL_REG
+#define CGTS_CU2_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_LDS_SQ_CTRL_REG
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_TA_SQC_CTRL_REG
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_SP1_CTRL_REG
+#define CGTS_CU2_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_TD_TCP_CTRL_REG
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_SP0_CTRL_REG
+#define CGTS_CU3_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_LDS_SQ_CTRL_REG
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_TA_SQC_CTRL_REG
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU3_SP1_CTRL_REG
+#define CGTS_CU3_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_TD_TCP_CTRL_REG
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_SP0_CTRL_REG
+#define CGTS_CU4_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_LDS_SQ_CTRL_REG
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_TA_SQC_CTRL_REG
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_SP1_CTRL_REG
+#define CGTS_CU4_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_TD_TCP_CTRL_REG
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_SP0_CTRL_REG
+#define CGTS_CU5_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_LDS_SQ_CTRL_REG
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_TA_SQC_CTRL_REG
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU5_SP1_CTRL_REG
+#define CGTS_CU5_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_TD_TCP_CTRL_REG
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_SP0_CTRL_REG
+#define CGTS_CU6_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_LDS_SQ_CTRL_REG
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_TA_SQC_CTRL_REG
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_SP1_CTRL_REG
+#define CGTS_CU6_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_TD_TCP_CTRL_REG
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_SP0_CTRL_REG
+#define CGTS_CU7_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_LDS_SQ_CTRL_REG
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_TA_SQC_CTRL_REG
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU7_SP1_CTRL_REG
+#define CGTS_CU7_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_TD_TCP_CTRL_REG
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_SP0_CTRL_REG
+#define CGTS_CU8_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_LDS_SQ_CTRL_REG
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_TA_SQC_CTRL_REG
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_SP1_CTRL_REG
+#define CGTS_CU8_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_TD_TCP_CTRL_REG
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_SP0_CTRL_REG
+#define CGTS_CU9_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_LDS_SQ_CTRL_REG
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_TA_SQC_CTRL_REG
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU9_SP1_CTRL_REG
+#define CGTS_CU9_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_TD_TCP_CTRL_REG
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_SP0_CTRL_REG
+#define CGTS_CU10_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_LDS_SQ_CTRL_REG
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_TA_SQC_CTRL_REG
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_SP1_CTRL_REG
+#define CGTS_CU10_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_TD_TCP_CTRL_REG
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_SP0_CTRL_REG
+#define CGTS_CU11_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_LDS_SQ_CTRL_REG
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_TA_SQC_CTRL_REG
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU11_SP1_CTRL_REG
+#define CGTS_CU11_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_TD_TCP_CTRL_REG
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_SP0_CTRL_REG
+#define CGTS_CU12_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_LDS_SQ_CTRL_REG
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_TA_SQC_CTRL_REG
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_SP1_CTRL_REG
+#define CGTS_CU12_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_TD_TCP_CTRL_REG
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_SP0_CTRL_REG
+#define CGTS_CU13_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_LDS_SQ_CTRL_REG
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_TA_SQC_CTRL_REG
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU13_SP1_CTRL_REG
+#define CGTS_CU13_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_TD_TCP_CTRL_REG
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_SP0_CTRL_REG
+#define CGTS_CU14_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_LDS_SQ_CTRL_REG
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_TA_SQC_CTRL_REG
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_SP1_CTRL_REG
+#define CGTS_CU14_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_TD_TCP_CTRL_REG
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_SP0_CTRL_REG
+#define CGTS_CU15_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_LDS_SQ_CTRL_REG
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_TA_SQC_CTRL_REG
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU15_SP1_CTRL_REG
+#define CGTS_CU15_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_TD_TCP_CTRL_REG
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TCPI_CTRL_REG
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU1_TCPI_CTRL_REG
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU2_TCPI_CTRL_REG
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU3_TCPI_CTRL_REG
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU4_TCPI_CTRL_REG
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU5_TCPI_CTRL_REG
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU6_TCPI_CTRL_REG
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU7_TCPI_CTRL_REG
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU8_TCPI_CTRL_REG
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU9_TCPI_CTRL_REG
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU10_TCPI_CTRL_REG
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU11_TCPI_CTRL_REG
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU12_TCPI_CTRL_REG
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU13_TCPI_CTRL_REG
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU14_TCPI_CTRL_REG
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU15_TCPI_CTRL_REG
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTT_SPI_PS_CLK_CTRL
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPIS_CLK_CTRL
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTX_SPI_DEBUG_CLK_CTRL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x0
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x6
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x7
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL__SHIFT 0x8
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x0000003FL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x00000040L
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x00000080L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL_MASK 0x00000100L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+//CGTT_SPI_CLK_CTRL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PC_CLK_CTRL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0x19
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0x1a
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x02000000L
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x04000000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_BCI_CLK_CTRL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9__SHIFT 0x18
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
+#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
+#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x1a
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
+#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
+#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
+#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
+#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQ_CLK_CTRL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//SQ_ALU_CLK_CTRL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_TEX_CLK_CTRL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_LDS_CLK_CTRL
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_POWER_THROTTLE
+#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x0
+#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x10
+#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x1e
+#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003FFFL
+#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3FFF0000L
+#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xC0000000L
+//SQ_POWER_THROTTLE2
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x0
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x1f
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
+//TD_CGTT_CTRL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPI_CLK_CTRL
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x0000F000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TCX_CGTT_SCLK_CTRL
+#define TCX_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCX_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCX_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TCC_CGTT_SCLK_CTRL
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TCC_CGTT_SCLK_CTRL2
+#define TCC_CGTT_SCLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCC_CGTT_SCLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//TCC_CGTT_SCLK_CTRL3
+#define TCC_CGTT_SCLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE18__SHIFT 0xc
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE17__SHIFT 0xd
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE16__SHIFT 0xe
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE15__SHIFT 0xf
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE14__SHIFT 0x10
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE13__SHIFT 0x11
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE12__SHIFT 0x12
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE11__SHIFT 0x13
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE10__SHIFT 0x14
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE9__SHIFT 0x15
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE8__SHIFT 0x17
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCC_CGTT_SCLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE18_MASK 0x00001000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE17_MASK 0x00002000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE16_MASK 0x00004000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE15_MASK 0x00008000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE14_MASK 0x00010000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE13_MASK 0x00020000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE12_MASK 0x00040000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE11_MASK 0x00080000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE10_MASK 0x00100000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE9_MASK 0x00200000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE8_MASK 0x00800000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+//TCA_CGTT_SCLK_CTRL
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__REG_CLK_OFF_HYSTERESIS__SHIFT 0x0
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__REG_CLK_OFF_HYSTERESIS_MASK 0x0000000FL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//RLC_GFX_RM_CNTL
+#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
+#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
+#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
+#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RMI_CGTT_SCLK_CTRL
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPF_CLK_CTRL
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x0000F000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+
+
+// addressBlock: xcd0_gc_hypdec
+//CP_HYP_PFP_UCODE_ADDR
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+//CP_PFP_UCODE_ADDR
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+//CP_HYP_PFP_UCODE_DATA
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_PFP_UCODE_DATA
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_ADDR
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x00001FFFL
+//CP_ME_RAM_RADDR
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00001FFFL
+//CP_ME_RAM_WADDR
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00001FFFL
+//CP_HYP_ME_UCODE_DATA
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_ME_RAM_DATA
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
+//CP_CE_UCODE_ADDR
+#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+//CP_HYP_CE_UCODE_ADDR
+#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+//CP_CE_UCODE_DATA
+#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_CE_UCODE_DATA
+#define CP_HYP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC1_UCODE_ADDR
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_MEC_ME1_UCODE_ADDR
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_HYP_MEC1_UCODE_DATA
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME1_UCODE_DATA
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC2_UCODE_ADDR
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_MEC_ME2_UCODE_ADDR
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_HYP_MEC2_UCODE_DATA
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_UCODE_DATA
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_PFP_UCODE_CHKSUM
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_CE_UCODE_CHKSUM
+#define CP_HYP_CE_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_CE_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_CHKSUM
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME1_UCODE_CHKSUM
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME2_UCODE_CHKSUM
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_XCP_CTL
+#define CP_HYP_XCP_CTL__VIRTUAL_XCC_ID__SHIFT 0x0
+#define CP_HYP_XCP_CTL__NUM_XCC_IN_XCP__SHIFT 0x3
+#define CP_HYP_XCP_CTL__VIRTUAL_XCC_ID_MASK 0x00000007L
+#define CP_HYP_XCP_CTL__NUM_XCC_IN_XCP_MASK 0x00000078L
+//RLC_GPM_UCODE_ADDR
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
+//RLC_GPM_UCODE_DATA
+#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//GRBM_GFX_INDEX_SR_SELECT
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_INDEX_SR_DATA
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_SELECT
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_DATA
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
+//GRBM_MCM_ADDR
+#define GRBM_MCM_ADDR__MCM_ADDR_IH__SHIFT 0x0
+#define GRBM_MCM_ADDR__MCM_ADDR_IH_MASK 0x000000FFL
+//RLC_GPU_IOV_VF_ENABLE
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG6
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
+#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
+#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//RLC_GPU_IOV_CFG_REG8
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_0
+#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_CTRL
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCV_TIMER_STAT
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
+//RLC_GPU_IOV_VF_MASK
+#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
+#define RLC_GPU_IOV_VF_MASK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_MASK__RESERVED_MASK 0xFFFF0000L
+//RLC_HYP_SEMAPHORE_0
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_1
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CLK_CNTL
+#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL__SHIFT 0x0
+#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL__SHIFT 0x2
+#define RLC_CLK_CNTL__RLC_GPM_CLK_CNTL__SHIFT 0x4
+#define RLC_CLK_CNTL__RLC_CMN_CLK_CNTL__SHIFT 0x5
+#define RLC_CLK_CNTL__RLC_TC_CLK_CNTL__SHIFT 0x6
+#define RLC_CLK_CNTL__RLC_SPP_CLK_CNTL__SHIFT 0x7
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE__SHIFT 0x8
+#define RLC_CLK_CNTL__RLC_EDC_OVERRIDE__SHIFT 0x9
+#define RLC_CLK_CNTL__RESERVED_11_10__SHIFT 0xa
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE__SHIFT 0xc
+#define RLC_CLK_CNTL__RLC_DFLL_CLK_CNTL__SHIFT 0xd
+#define RLC_CLK_CNTL__DBGBUS_CLK_ACTIVE_OVERRIDE__SHIFT 0xe
+#define RLC_CLK_CNTL__RLC_CAC2_CLK_CNTL__SHIFT 0xf
+#define RLC_CLK_CNTL__RESERVED_1__SHIFT 0x11
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE__SHIFT 0x12
+#define RLC_CLK_CNTL__RESERVED__SHIFT 0x13
+#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL_MASK 0x00000003L
+#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL_MASK 0x0000000CL
+#define RLC_CLK_CNTL__RLC_GPM_CLK_CNTL_MASK 0x00000010L
+#define RLC_CLK_CNTL__RLC_CMN_CLK_CNTL_MASK 0x00000020L
+#define RLC_CLK_CNTL__RLC_TC_CLK_CNTL_MASK 0x00000040L
+#define RLC_CLK_CNTL__RLC_SPP_CLK_CNTL_MASK 0x00000080L
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK 0x00000100L
+#define RLC_CLK_CNTL__RLC_EDC_OVERRIDE_MASK 0x00000200L
+#define RLC_CLK_CNTL__RESERVED_11_10_MASK 0x00000C00L
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE_MASK 0x00001000L
+#define RLC_CLK_CNTL__RLC_DFLL_CLK_CNTL_MASK 0x00002000L
+#define RLC_CLK_CNTL__DBGBUS_CLK_ACTIVE_OVERRIDE_MASK 0x00004000L
+#define RLC_CLK_CNTL__RLC_CAC2_CLK_CNTL_MASK 0x00018000L
+#define RLC_CLK_CNTL__RESERVED_1_MASK 0x00020000L
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE_MASK 0x00040000L
+#define RLC_CLK_CNTL__RESERVED_MASK 0xFFF80000L
+//RLC_GPU_IOV_SCH_BLOCK
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x00007F00L
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0x7FFF0000L
+//RLC_GPU_IOV_CFG_REG1
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
+//RLC_GPU_IOV_CFG_REG2
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPU_IOV_VM_BUSY_STATUS
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_ACTIVE_FCN_ID
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//RLC_GPU_IOV_SCH_3
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_1
+#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_2
+#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_STAT
+#define RLC_GPU_IOV_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_1
+#define RLC_RLCV_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_UCODE_ADDR
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_GPU_IOV_UCODE_DATA
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCRATCH_ADDR
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED__SHIFT 0x9
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
+#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
+//RLC_GPU_IOV_SCRATCH_DATA
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_CNTL
+#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_CNTL__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_F32_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_IOV_F32_RESET
+#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
+#define RLC_GPU_IOV_F32_RESET__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
+#define RLC_GPU_IOV_F32_RESET__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_IOV_SDMA0_STATUS
+#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA0_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA0_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA1_STATUS
+#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA1_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA1_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SMU_RESPONSE
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_VIRT_RESET_REQ
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
+//RLC_GPU_IOV_RLC_RESPONSE
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_DISABLE
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE__SHIFT 0x0
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_FORCE
+#define RLC_GPU_IOV_INT_FORCE__FORCE__SHIFT 0x0
+#define RLC_GPU_IOV_INT_FORCE__FORCE_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA0_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_HYP_SEMAPHORE_2
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_3
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_GPU_IOV_SDMA2_STATUS
+#define RLC_GPU_IOV_SDMA2_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA2_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA2_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA2_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA2_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA2_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA2_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA3_STATUS
+#define RLC_GPU_IOV_SDMA3_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA3_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA3_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA3_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA3_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA3_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA3_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA4_STATUS
+#define RLC_GPU_IOV_SDMA4_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA4_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA4_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA4_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA4_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA4_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA4_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA5_STATUS
+#define RLC_GPU_IOV_SDMA5_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA5_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA5_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA5_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA5_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA5_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA5_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA6_STATUS
+#define RLC_GPU_IOV_SDMA6_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA6_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA6_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA6_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA6_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA6_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA6_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA7_STATUS
+#define RLC_GPU_IOV_SDMA7_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA7_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA7_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA7_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA7_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA7_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA7_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA2_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: xcd0_gc_utcl2_vmsharedhvdec
+//MC_VM_FB_SIZE_OFFSET_VF0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF1
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF2
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF3
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF4
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF5
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF6
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF7
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF8
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF9
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF11
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF12
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF13
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF14
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF15
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VM_IOMMU_MMIO_CNTRL_1
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//MC_VM_MARC_BASE_LO_0
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_1
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_2
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_3
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_HI_0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_1
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_2
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_3
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_LO_0
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_1
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_2
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_3
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_HI_0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_1
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_2
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_3
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_LO_0
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_1
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_2
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_3
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_HI_0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_1
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_2
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_3
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VM_IOMMU_CONTROL_REGISTER
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VM_PCIE_ATS_CNTL
+#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_0
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_1
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_2
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_3
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_4
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_5
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_6
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_7
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_8
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_9
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_10
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_11
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_12
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_13
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_14
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_15
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//MC_SHARED_ACTIVE_FCN_ID
+#define MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MC_VM_XGMI_GPUIOV_ENABLE
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: xcd0_gc_pspdec
+//CPG_PSP_DEBUG
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL__SHIFT 0x2
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL_MASK 0x00000004L
+//CPC_PSP_DEBUG
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPC_PSP_DEBUG__VMID_VIOLATION_CNTL__SHIFT 0x2
+#define CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE__SHIFT 0x3
+#define CPC_PSP_DEBUG__CPC_DC_FIX_DISABLE__SHIFT 0x4
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPC_PSP_DEBUG__VMID_VIOLATION_CNTL_MASK 0x00000004L
+#define CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK 0x00000008L
+#define CPC_PSP_DEBUG__CPC_DC_FIX_DISABLE_MASK 0x00000010L
+//CP_PSP_XCP_CTL
+#define CP_PSP_XCP_CTL__PHYSICAL_XCC_ID__SHIFT 0x0
+#define CP_PSP_XCP_CTL__XCC_DIE_ID__SHIFT 0x3
+#define CP_PSP_XCP_CTL__PHYSICAL_XCC_ID_MASK 0x00000007L
+#define CP_PSP_XCP_CTL__XCC_DIE_ID_MASK 0x00000038L
+//GRBM_SEC_CNTL
+#define GRBM_SEC_CNTL__DEBUG_ENABLE__SHIFT 0x0
+#define GRBM_SEC_CNTL__DEBUG_ENABLE_MASK 0x00000001L
+//GRBM_IOV_ERROR_FIFO_DATA
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_ADDR__SHIFT 0x0
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_VFID__SHIFT 0x12
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_SSRCID__SHIFT 0x18
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_OP__SHIFT 0x1c
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_VF__SHIFT 0x1d
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_OVERFLOW__SHIFT 0x1e
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_READ_VALID__SHIFT 0x1f
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_ADDR_MASK 0x0003FFFFL
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_VFID_MASK 0x00FC0000L
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_SSRCID_MASK 0x0F000000L
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_OP_MASK 0x10000000L
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_VF_MASK 0x20000000L
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_OVERFLOW_MASK 0x40000000L
+#define GRBM_IOV_ERROR_FIFO_DATA__IOV_READ_VALID_MASK 0x80000000L
+//GRBM_DSM_BYPASS
+#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
+#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
+#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
+#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
+//GRBM_CAM_INDEX
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+//GRBM_HYP_CAM_INDEX
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+//GRBM_CAM_DATA
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_HYP_CAM_DATA
+#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//RLC_FWL_FIRST_VIOL_ADDR
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_STATUS__SHIFT 0x0
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP__SHIFT 0x1
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR__SHIFT 0x2
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID__SHIFT 0x14
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_STATUS_MASK 0x00000001L
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP_MASK 0x00000002L
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR_MASK 0x000FFFFCL
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID_MASK 0xFFF00000L
+
+
+// addressBlock: sqind
+//SQ_DEBUG_STS_LOCAL
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x4
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003F0L
+//SQ_DEBUG_CTRL_LOCAL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x0
+#define SQ_DEBUG_CTRL_LOCAL__PERF_SEL_INSTS_VALU_MFMA_NON_WAVE__SHIFT 0x8
+#define SQ_DEBUG_CTRL_LOCAL__PERF_SEL_INSTS_VALU_MFMA_MOPS_NON_WAVE__SHIFT 0x9
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000FFL
+#define SQ_DEBUG_CTRL_LOCAL__PERF_SEL_INSTS_VALU_MFMA_NON_WAVE_MASK 0x00000100L
+#define SQ_DEBUG_CTRL_LOCAL__PERF_SEL_INSTS_VALU_MFMA_MOPS_NON_WAVE_MASK 0x00000200L
+//SQ_WAVE_VALID_AND_IDLE
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT_MASK 0xFFFFFFFFL
+//SQ_PERF_SNAPSHOT_DATA
+//SQ_PERF_SNAPSHOT_DATA1
+//SQ_PERF_SNAPSHOT_PC_LO
+//SQ_PERF_SNAPSHOT_PC_HI
+//SQ_WAVE_MODE
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
+#define SQ_WAVE_MODE__DEBUG_EN__SHIFT 0xb
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
+#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
+#define SQ_WAVE_MODE__POPS_PACKER0__SHIFT 0x18
+#define SQ_WAVE_MODE__POPS_PACKER1__SHIFT 0x19
+#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1a
+#define SQ_WAVE_MODE__GPR_IDX_EN__SHIFT 0x1b
+#define SQ_WAVE_MODE__VSKIP__SHIFT 0x1c
+#define SQ_WAVE_MODE__CSP__SHIFT 0x1d
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__DEBUG_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
+#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
+#define SQ_WAVE_MODE__POPS_PACKER0_MASK 0x01000000L
+#define SQ_WAVE_MODE__POPS_PACKER1_MASK 0x02000000L
+#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x04000000L
+#define SQ_WAVE_MODE__GPR_IDX_EN_MASK 0x08000000L
+#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
+#define SQ_WAVE_MODE__CSP_MASK 0xE0000000L
+//SQ_WAVE_STATUS
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
+#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
+#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
+#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0xf
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
+#define SQ_WAVE_STATUS__ALLOW_REPLAY__SHIFT 0x16
+#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
+#define SQ_WAVE_STATUS__SCRATCH_EN__SHIFT 0x1c
+#define SQ_WAVE_STATUS__IDLE__SHIFT 0x1f
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__ALLOW_REPLAY_MASK 0x00400000L
+#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__SCRATCH_EN_MASK 0x10000000L
+#define SQ_WAVE_STATUS__IDLE_MASK 0x80000000L
+//SQ_WAVE_TRAPSTS
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
+#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
+#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x10
+#define SQ_WAVE_TRAPSTS__HOST_TRAP__SHIFT 0x16
+#define SQ_WAVE_TRAPSTS__WAVE_END__SHIFT 0x18
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST__SHIFT 0x19
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT__SHIFT 0x1a
+#define SQ_WAVE_TRAPSTS__XNACK_ERROR__SHIFT 0x1c
+#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x1d
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
+#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
+#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003F0000L
+#define SQ_WAVE_TRAPSTS__HOST_TRAP_MASK 0x00400000L
+#define SQ_WAVE_TRAPSTS__WAVE_END_MASK 0x01000000L
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST_MASK 0x02000000L
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT_MASK 0x04000000L
+#define SQ_WAVE_TRAPSTS__XNACK_ERROR_MASK 0x10000000L
+#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xE0000000L
+//SQ_WAVE_HW_ID
+#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x4
+#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x6
+#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0xc
+#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0xd
+#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x14
+#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x18
+#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x1b
+#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x1e
+#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000FL
+#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000C0L
+#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000F00L
+#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
+#define SQ_WAVE_HW_ID__SE_ID_MASK 0x0000E000L
+#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000F0000L
+#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00F00000L
+#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
+#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
+#define SQ_WAVE_HW_ID__ME_ID_MASK 0xC0000000L
+//SQ_WAVE_GPR_ALLOC
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x6
+#define SQ_WAVE_GPR_ALLOC__ACCV_OFFSET__SHIFT 0xc
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x12
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x18
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003FL
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00000FC0L
+#define SQ_WAVE_GPR_ALLOC__ACCV_OFFSET_MASK 0x0003F000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x00FC0000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0F000000L
+//SQ_WAVE_LDS_ALLOC
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000FFL
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
+//SQ_WAVE_IB_STS
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x0
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x4
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x8
+#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0xc
+#define SQ_WAVE_IB_STS__FIRST_REPLAY__SHIFT 0xf
+#define SQ_WAVE_IB_STS__RCNT__SHIFT 0x10
+#define SQ_WAVE_IB_STS__VM_CNT_HI__SHIFT 0x16
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000FL
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00000F00L
+#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x00007000L
+#define SQ_WAVE_IB_STS__FIRST_REPLAY_MASK 0x00008000L
+#define SQ_WAVE_IB_STS__RCNT_MASK 0x001F0000L
+#define SQ_WAVE_IB_STS__VM_CNT_HI_MASK 0x00C00000L
+//SQ_WAVE_PC_LO
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_PC_HI
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
+//SQ_WAVE_INST_DW0
+#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x0
+#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xFFFFFFFFL
+//SQ_WAVE_INST_DW1
+#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x0
+#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xFFFFFFFFL
+//SQ_WAVE_IB_DBG0
+#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x0
+#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x3
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x4
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x5
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x8
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0xa
+#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x10
+#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x18
+#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x1a
+#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x1b
+#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x1d
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x1e
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI__SHIFT 0x1f
+#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
+#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000E0L
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000C00L
+#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x000F0000L
+#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x03000000L
+#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x04000000L
+#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x18000000L
+#define SQ_WAVE_IB_DBG0__KILL_MASK 0x20000000L
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x40000000L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI_MASK 0x80000000L
+//SQ_WAVE_IB_DBG1
+#define SQ_WAVE_IB_DBG1__IXNACK__SHIFT 0x0
+#define SQ_WAVE_IB_DBG1__XNACK__SHIFT 0x1
+#define SQ_WAVE_IB_DBG1__TA_NEED_RESET__SHIFT 0x2
+#define SQ_WAVE_IB_DBG1__XCNT__SHIFT 0x4
+#define SQ_WAVE_IB_DBG1__QCNT__SHIFT 0xb
+#define SQ_WAVE_IB_DBG1__RCNT__SHIFT 0x12
+#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
+#define SQ_WAVE_IB_DBG1__IXNACK_MASK 0x00000001L
+#define SQ_WAVE_IB_DBG1__XNACK_MASK 0x00000002L
+#define SQ_WAVE_IB_DBG1__TA_NEED_RESET_MASK 0x00000004L
+#define SQ_WAVE_IB_DBG1__XCNT_MASK 0x000001F0L
+#define SQ_WAVE_IB_DBG1__QCNT_MASK 0x0000F800L
+#define SQ_WAVE_IB_DBG1__RCNT_MASK 0x007C0000L
+#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
+//SQ_WAVE_FLUSH_IB
+#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
+#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP0
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP1
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP2
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP3
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP4
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP5
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP6
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP7
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP8
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP9
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP10
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP11
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP12
+#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP13
+#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP14
+#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP15
+#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_M0
+#define SQ_WAVE_M0__M0__SHIFT 0x0
+#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_LO
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_HI
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
+//SQ_INTERRUPT_WORD_AUTO_CTXID
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 0x1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 0x2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 0x3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 0x5
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 0x6
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 0x7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x0000001L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x0000002L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x0000004L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x0000008L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x0000010L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x0000020L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x0000040L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x0000080L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x0000100L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_AUTO_HI
+#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_AUTO_LO
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_AUTO_LO__WLT__SHIFT 0x1
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL__SHIFT 0x2
+#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP__SHIFT 0x3
+#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW__SHIFT 0x5
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW__SHIFT 0x6
+#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW__SHIFT 0x7
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_MASK 0x001L
+#define SQ_INTERRUPT_WORD_AUTO_LO__WLT_MASK 0x002L
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL_MASK 0x004L
+#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP_MASK 0x008L
+#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP_MASK 0x010L
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW_MASK 0x020L
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW_MASK 0x040L
+#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW_MASK 0x080L
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR_MASK 0x100L
+//SQ_INTERRUPT_WORD_CMN_CTXID
+#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_CMN_HI
+#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_WAVE_CTXID
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 0xc
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 0xd
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 0xe
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 0x12
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 0x14
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x0000FFFL
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x0001000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x0002000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x003C000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x00C0000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x0F00000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_WAVE_HI
+#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID_MASK 0x00FL
+#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID_MASK 0x0F0L
+#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_WAVE_LO
+#define SQ_INTERRUPT_WORD_WAVE_LO__DATA__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV__SHIFT 0x19
+#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID__SHIFT 0x1e
+#define SQ_INTERRUPT_WORD_WAVE_LO__DATA_MASK 0x00FFFFFFL
+#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID_MASK 0x01000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID_MASK 0x3C000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID_MASK 0xC0000000L
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
new file mode 100644
index 000000000000..546b043ccdf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_OFFSET_HEADER
+#define _hdp_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_hdp_hdpdec
+// base address: 0x3c80
+#define regHDP_MMHUB_TLVL 0x0000
+#define regHDP_MMHUB_TLVL_BASE_IDX 0
+#define regHDP_MMHUB_UNITID 0x0001
+#define regHDP_MMHUB_UNITID_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE 0x0040
+#define regHDP_NONSURFACE_BASE_BASE_IDX 0
+#define regHDP_NONSURFACE_INFO 0x0041
+#define regHDP_NONSURFACE_INFO_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE_HI 0x0042
+#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
+#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS 0x00c5
+#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
+#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
+#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS 0x00c8
+#define regHDP_NONSURF_FLAGS_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS_CLR 0x00c9
+#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define regHDP_HOST_PATH_CNTL 0x00cc
+#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define regHDP_SW_SEMAPHORE 0x00cd
+#define regHDP_SW_SEMAPHORE_BASE_IDX 0
+#define regHDP_DEBUG0 0x00ce
+#define regHDP_DEBUG0_BASE_IDX 0
+#define regHDP_LAST_SURFACE_HIT 0x00d0
+#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define regHDP_OUTSTANDING_REQ 0x00d2
+#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define regHDP_MISC_CNTL 0x00d3
+#define regHDP_MISC_CNTL_BASE_IDX 0
+#define regHDP_MEM_POWER_CTRL 0x00d4
+#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
+#define regHDP_MMHUB_CNTL 0x00d5
+#define regHDP_MMHUB_CNTL_BASE_IDX 0
+#define regHDP_EDC_CNT 0x00d6
+#define regHDP_EDC_CNT_BASE_IDX 0
+#define regHDP_VERSION 0x00d7
+#define regHDP_VERSION_BASE_IDX 0
+#define regHDP_CLK_CNTL 0x00d8
+#define regHDP_CLK_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_CNTL 0x00f6
+#define regHDP_MEMIO_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_ADDR 0x00f7
+#define regHDP_MEMIO_ADDR_BASE_IDX 0
+#define regHDP_MEMIO_STATUS 0x00f8
+#define regHDP_MEMIO_STATUS_BASE_IDX 0
+#define regHDP_MEMIO_WR_DATA 0x00f9
+#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define regHDP_MEMIO_RD_DATA 0x00fa
+#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define regHDP_XDP_D2H_FLUSH 0x0101
+#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_3 0x0103
+#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_4 0x0104
+#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_5 0x0105
+#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_6 0x0106
+#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_7 0x0107
+#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_8 0x0108
+#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_9 0x0109
+#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_10 0x010a
+#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_11 0x010b
+#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_12 0x010c
+#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_13 0x010d
+#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_14 0x010e
+#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_15 0x010f
+#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_16 0x0110
+#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_17 0x0111
+#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_18 0x0112
+#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_19 0x0113
+#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_20 0x0114
+#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_21 0x0115
+#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_22 0x0116
+#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_23 0x0117
+#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_24 0x0118
+#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_25 0x0119
+#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_26 0x011a
+#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_27 0x011b
+#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_28 0x011c
+#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_29 0x011d
+#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_30 0x011e
+#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_31 0x011f
+#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_32 0x0120
+#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_33 0x0121
+#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_34 0x0122
+#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR_CFG 0x0124
+#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_MC_CFG 0x012e
+#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HST_CFG 0x012f
+#define regHDP_XDP_HST_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_IPH_CFG 0x0131
+#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR0 0x0134
+#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR1 0x0135
+#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR2 0x0136
+#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR3 0x0137
+#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR4 0x0138
+#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR5 0x0139
+#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR6 0x013a
+#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR7 0x013b
+#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define regHDP_XDP_BUSY_STS 0x013e
+#define regHDP_XDP_BUSY_STS_BASE_IDX 0
+#define regHDP_XDP_STICKY 0x013f
+#define regHDP_XDP_STICKY_BASE_IDX 0
+#define regHDP_XDP_CHKN 0x0140
+#define regHDP_XDP_CHKN_BASE_IDX 0
+#define regHDP_XDP_BARS_ADDR_39_36 0x0144
+#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR 0x014a
+#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..3ccd2797936e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_SH_MASK_HEADER
+#define _hdp_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_SURFACE_WRITE_FLAGS
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
+//HDP_SURFACE_WRITE_FLAGS_CLR
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS_CLR
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE__SHIFT 0x4
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE__SHIFT 0x7
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0xc
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE__SHIFT 0x14
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1a
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1b
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE_MASK 0x00000010L
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE_MASK 0x00000080L
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00003000L
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE_MASK 0x00100000L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x04000000L
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x08000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_CTRL
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN__SHIFT 0x1
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN__SHIFT 0x2
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN__SHIFT 0x3
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0x1e
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK 0x00000004L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK 0x00000008L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0xC0000000L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
+//HDP_EDC_CNT
+#define HDP_EDC_CNT__MEM0_SED_COUNT__SHIFT 0x0
+#define HDP_EDC_CNT__MEM0_SED_COUNT_MASK 0x00000003L
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK__SHIFT 0x4
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK_MASK 0x00000010L
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x6
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003FL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000FC0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x00FFFFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+//HDP_XDP_GPU_IOV_VIOLATION_LOG2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED__SHIFT 0x4
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED__SHIFT 0xc
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED_MASK 0x00000010L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED_MASK 0x00001000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h
new file mode 100644
index 000000000000..8bcc81f2dfc0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h
@@ -0,0 +1,3314 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mmhub_1_8_0_OFFSET_HEADER
+#define _mmhub_1_8_0_OFFSET_HEADER
+
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec0
+// base address: 0x60000
+#define regDAGB0_RDCLI0 0x0000
+#define regDAGB0_RDCLI0_BASE_IDX 0
+#define regDAGB0_RDCLI1 0x0001
+#define regDAGB0_RDCLI1_BASE_IDX 0
+#define regDAGB0_RDCLI2 0x0002
+#define regDAGB0_RDCLI2_BASE_IDX 0
+#define regDAGB0_RDCLI3 0x0003
+#define regDAGB0_RDCLI3_BASE_IDX 0
+#define regDAGB0_RDCLI4 0x0004
+#define regDAGB0_RDCLI4_BASE_IDX 0
+#define regDAGB0_RDCLI5 0x0005
+#define regDAGB0_RDCLI5_BASE_IDX 0
+#define regDAGB0_RDCLI6 0x0006
+#define regDAGB0_RDCLI6_BASE_IDX 0
+#define regDAGB0_RDCLI7 0x0007
+#define regDAGB0_RDCLI7_BASE_IDX 0
+#define regDAGB0_RDCLI8 0x0008
+#define regDAGB0_RDCLI8_BASE_IDX 0
+#define regDAGB0_RDCLI9 0x0009
+#define regDAGB0_RDCLI9_BASE_IDX 0
+#define regDAGB0_RDCLI10 0x000a
+#define regDAGB0_RDCLI10_BASE_IDX 0
+#define regDAGB0_RDCLI11 0x000b
+#define regDAGB0_RDCLI11_BASE_IDX 0
+#define regDAGB0_RDCLI12 0x000c
+#define regDAGB0_RDCLI12_BASE_IDX 0
+#define regDAGB0_RDCLI13 0x000d
+#define regDAGB0_RDCLI13_BASE_IDX 0
+#define regDAGB0_RDCLI14 0x000e
+#define regDAGB0_RDCLI14_BASE_IDX 0
+#define regDAGB0_RDCLI15 0x000f
+#define regDAGB0_RDCLI15_BASE_IDX 0
+#define regDAGB0_RD_CNTL 0x0010
+#define regDAGB0_RD_CNTL_BASE_IDX 0
+#define regDAGB0_RD_GMI_CNTL 0x0011
+#define regDAGB0_RD_GMI_CNTL_BASE_IDX 0
+#define regDAGB0_RD_ADDR_DAGB 0x0012
+#define regDAGB0_RD_ADDR_DAGB_BASE_IDX 0
+#define regDAGB0_RD_OUTPUT_DAGB_MAX_BURST 0x0013
+#define regDAGB0_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER 0x0014
+#define regDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB0_RD_CGTT_CLK_CTRL 0x0015
+#define regDAGB0_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_L1TLB_RD_CGTT_CLK_CTRL 0x0016
+#define regDAGB0_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_ATCVM_RD_CGTT_CLK_CTRL 0x0017
+#define regDAGB0_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_RD_ADDR_DAGB_MAX_BURST0 0x0018
+#define regDAGB0_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB0_RD_ADDR_DAGB_LAZY_TIMER0 0x0019
+#define regDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB0_RD_ADDR_DAGB_MAX_BURST1 0x001a
+#define regDAGB0_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB0_RD_ADDR_DAGB_LAZY_TIMER1 0x001b
+#define regDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB0_RD_VC0_CNTL 0x001c
+#define regDAGB0_RD_VC0_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC1_CNTL 0x001d
+#define regDAGB0_RD_VC1_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC2_CNTL 0x001e
+#define regDAGB0_RD_VC2_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC3_CNTL 0x001f
+#define regDAGB0_RD_VC3_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC4_CNTL 0x0020
+#define regDAGB0_RD_VC4_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC5_CNTL 0x0021
+#define regDAGB0_RD_VC5_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC6_CNTL 0x0022
+#define regDAGB0_RD_VC6_CNTL_BASE_IDX 0
+#define regDAGB0_RD_VC7_CNTL 0x0023
+#define regDAGB0_RD_VC7_CNTL_BASE_IDX 0
+#define regDAGB0_RD_CNTL_MISC 0x0024
+#define regDAGB0_RD_CNTL_MISC_BASE_IDX 0
+#define regDAGB0_RD_TLB_CREDIT 0x0025
+#define regDAGB0_RD_TLB_CREDIT_BASE_IDX 0
+#define regDAGB0_RD_RDRET_CREDIT_CNTL 0x0026
+#define regDAGB0_RD_RDRET_CREDIT_CNTL_BASE_IDX 0
+#define regDAGB0_RD_RDRET_CREDIT_CNTL2 0x0027
+#define regDAGB0_RD_RDRET_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB0_RDCLI_ASK_PENDING 0x0028
+#define regDAGB0_RDCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_GO_PENDING 0x0029
+#define regDAGB0_RDCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_GBLSEND_PENDING 0x002a
+#define regDAGB0_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_TLB_PENDING 0x002b
+#define regDAGB0_RDCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_OARB_PENDING 0x002c
+#define regDAGB0_RDCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_OSD_PENDING 0x002d
+#define regDAGB0_RDCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB0_RDCLI_NOALLOC_OVERRIDE 0x002e
+#define regDAGB0_RDCLI_NOALLOC_OVERRIDE_BASE_IDX 0
+#define regDAGB0_RDCLI_NOALLOC_OVERRIDE_VALUE 0x002f
+#define regDAGB0_RDCLI_NOALLOC_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB0_WRCLI0 0x0030
+#define regDAGB0_WRCLI0_BASE_IDX 0
+#define regDAGB0_WRCLI1 0x0031
+#define regDAGB0_WRCLI1_BASE_IDX 0
+#define regDAGB0_WRCLI2 0x0032
+#define regDAGB0_WRCLI2_BASE_IDX 0
+#define regDAGB0_WRCLI3 0x0033
+#define regDAGB0_WRCLI3_BASE_IDX 0
+#define regDAGB0_WRCLI4 0x0034
+#define regDAGB0_WRCLI4_BASE_IDX 0
+#define regDAGB0_WRCLI5 0x0035
+#define regDAGB0_WRCLI5_BASE_IDX 0
+#define regDAGB0_WRCLI6 0x0036
+#define regDAGB0_WRCLI6_BASE_IDX 0
+#define regDAGB0_WRCLI7 0x0037
+#define regDAGB0_WRCLI7_BASE_IDX 0
+#define regDAGB0_WRCLI8 0x0038
+#define regDAGB0_WRCLI8_BASE_IDX 0
+#define regDAGB0_WRCLI9 0x0039
+#define regDAGB0_WRCLI9_BASE_IDX 0
+#define regDAGB0_WRCLI10 0x003a
+#define regDAGB0_WRCLI10_BASE_IDX 0
+#define regDAGB0_WRCLI11 0x003b
+#define regDAGB0_WRCLI11_BASE_IDX 0
+#define regDAGB0_WRCLI12 0x003c
+#define regDAGB0_WRCLI12_BASE_IDX 0
+#define regDAGB0_WRCLI13 0x003d
+#define regDAGB0_WRCLI13_BASE_IDX 0
+#define regDAGB0_WRCLI14 0x003e
+#define regDAGB0_WRCLI14_BASE_IDX 0
+#define regDAGB0_WRCLI15 0x003f
+#define regDAGB0_WRCLI15_BASE_IDX 0
+#define regDAGB0_WR_CNTL 0x0040
+#define regDAGB0_WR_CNTL_BASE_IDX 0
+#define regDAGB0_WR_GMI_CNTL 0x0041
+#define regDAGB0_WR_GMI_CNTL_BASE_IDX 0
+#define regDAGB0_WR_ADDR_DAGB 0x0042
+#define regDAGB0_WR_ADDR_DAGB_BASE_IDX 0
+#define regDAGB0_WR_OUTPUT_DAGB_MAX_BURST 0x0043
+#define regDAGB0_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER 0x0044
+#define regDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB0_WR_CGTT_CLK_CTRL 0x0045
+#define regDAGB0_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_L1TLB_WR_CGTT_CLK_CTRL 0x0046
+#define regDAGB0_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_ATCVM_WR_CGTT_CLK_CTRL 0x0047
+#define regDAGB0_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB0_WR_ADDR_DAGB_MAX_BURST0 0x0048
+#define regDAGB0_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB0_WR_ADDR_DAGB_LAZY_TIMER0 0x0049
+#define regDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB0_WR_ADDR_DAGB_MAX_BURST1 0x004a
+#define regDAGB0_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB0_WR_ADDR_DAGB_LAZY_TIMER1 0x004b
+#define regDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB0_WR_DATA_DAGB 0x004c
+#define regDAGB0_WR_DATA_DAGB_BASE_IDX 0
+#define regDAGB0_WR_DATA_DAGB_MAX_BURST0 0x004d
+#define regDAGB0_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB0_WR_DATA_DAGB_LAZY_TIMER0 0x004e
+#define regDAGB0_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB0_WR_DATA_DAGB_MAX_BURST1 0x004f
+#define regDAGB0_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB0_WR_DATA_DAGB_LAZY_TIMER1 0x0050
+#define regDAGB0_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB0_WR_VC0_CNTL 0x0051
+#define regDAGB0_WR_VC0_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC1_CNTL 0x0052
+#define regDAGB0_WR_VC1_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC2_CNTL 0x0053
+#define regDAGB0_WR_VC2_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC3_CNTL 0x0054
+#define regDAGB0_WR_VC3_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC4_CNTL 0x0055
+#define regDAGB0_WR_VC4_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC5_CNTL 0x0056
+#define regDAGB0_WR_VC5_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC6_CNTL 0x0057
+#define regDAGB0_WR_VC6_CNTL_BASE_IDX 0
+#define regDAGB0_WR_VC7_CNTL 0x0058
+#define regDAGB0_WR_VC7_CNTL_BASE_IDX 0
+#define regDAGB0_WR_CNTL_MISC 0x0059
+#define regDAGB0_WR_CNTL_MISC_BASE_IDX 0
+#define regDAGB0_WR_TLB_CREDIT 0x005a
+#define regDAGB0_WR_TLB_CREDIT_BASE_IDX 0
+#define regDAGB0_WR_DATA_CREDIT 0x005b
+#define regDAGB0_WR_DATA_CREDIT_BASE_IDX 0
+#define regDAGB0_WR_MISC_CREDIT 0x005c
+#define regDAGB0_WR_MISC_CREDIT_BASE_IDX 0
+#define regDAGB0_WR_OSD_CREDIT_CNTL1 0x005d
+#define regDAGB0_WR_OSD_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB0_WR_OSD_CREDIT_CNTL2 0x005e
+#define regDAGB0_WR_OSD_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1 0x005f
+#define regDAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x0060
+#define regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 0
+#define regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x0061
+#define regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB0_WRCLI_ASK_PENDING 0x0062
+#define regDAGB0_WRCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_GO_PENDING 0x0063
+#define regDAGB0_WRCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_GBLSEND_PENDING 0x0064
+#define regDAGB0_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_TLB_PENDING 0x0065
+#define regDAGB0_WRCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_OARB_PENDING 0x0066
+#define regDAGB0_WRCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_OSD_PENDING 0x0067
+#define regDAGB0_WRCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_DBUS_ASK_PENDING 0x0068
+#define regDAGB0_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_DBUS_GO_PENDING 0x0069
+#define regDAGB0_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define regDAGB0_WRCLI_NOALLOC_OVERRIDE 0x006a
+#define regDAGB0_WRCLI_NOALLOC_OVERRIDE_BASE_IDX 0
+#define regDAGB0_WRCLI_NOALLOC_OVERRIDE_VALUE 0x006b
+#define regDAGB0_WRCLI_NOALLOC_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB0_DAGB_DLY 0x006c
+#define regDAGB0_DAGB_DLY_BASE_IDX 0
+#define regDAGB0_CNTL_MISC 0x006d
+#define regDAGB0_CNTL_MISC_BASE_IDX 0
+#define regDAGB0_CNTL_MISC2 0x006e
+#define regDAGB0_CNTL_MISC2_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_CNTL 0x006f
+#define regDAGB0_FATAL_ERROR_CNTL_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_CLEAR 0x0070
+#define regDAGB0_FATAL_ERROR_CLEAR_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_STATUS0 0x0071
+#define regDAGB0_FATAL_ERROR_STATUS0_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_STATUS1 0x0072
+#define regDAGB0_FATAL_ERROR_STATUS1_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_STATUS2 0x0073
+#define regDAGB0_FATAL_ERROR_STATUS2_BASE_IDX 0
+#define regDAGB0_FATAL_ERROR_STATUS3 0x0074
+#define regDAGB0_FATAL_ERROR_STATUS3_BASE_IDX 0
+#define regDAGB0_FIFO_EMPTY 0x0075
+#define regDAGB0_FIFO_EMPTY_BASE_IDX 0
+#define regDAGB0_FIFO_FULL 0x0076
+#define regDAGB0_FIFO_FULL_BASE_IDX 0
+#define regDAGB0_WR_CREDITS_FULL 0x0077
+#define regDAGB0_WR_CREDITS_FULL_BASE_IDX 0
+#define regDAGB0_RD_CREDITS_FULL 0x0078
+#define regDAGB0_RD_CREDITS_FULL_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER_LO 0x0079
+#define regDAGB0_PERFCOUNTER_LO_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER_HI 0x007a
+#define regDAGB0_PERFCOUNTER_HI_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER0_CFG 0x007b
+#define regDAGB0_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER1_CFG 0x007c
+#define regDAGB0_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER2_CFG 0x007d
+#define regDAGB0_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regDAGB0_PERFCOUNTER_RSLT_CNTL 0x007e
+#define regDAGB0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regDAGB0_L1TLB_REG_RW 0x007f
+#define regDAGB0_L1TLB_REG_RW_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec1
+// base address: 0x60200
+#define regDAGB1_RDCLI0 0x0080
+#define regDAGB1_RDCLI0_BASE_IDX 0
+#define regDAGB1_RDCLI1 0x0081
+#define regDAGB1_RDCLI1_BASE_IDX 0
+#define regDAGB1_RDCLI2 0x0082
+#define regDAGB1_RDCLI2_BASE_IDX 0
+#define regDAGB1_RDCLI3 0x0083
+#define regDAGB1_RDCLI3_BASE_IDX 0
+#define regDAGB1_RDCLI4 0x0084
+#define regDAGB1_RDCLI4_BASE_IDX 0
+#define regDAGB1_RDCLI5 0x0085
+#define regDAGB1_RDCLI5_BASE_IDX 0
+#define regDAGB1_RDCLI6 0x0086
+#define regDAGB1_RDCLI6_BASE_IDX 0
+#define regDAGB1_RDCLI7 0x0087
+#define regDAGB1_RDCLI7_BASE_IDX 0
+#define regDAGB1_RDCLI8 0x0088
+#define regDAGB1_RDCLI8_BASE_IDX 0
+#define regDAGB1_RDCLI9 0x0089
+#define regDAGB1_RDCLI9_BASE_IDX 0
+#define regDAGB1_RDCLI10 0x008a
+#define regDAGB1_RDCLI10_BASE_IDX 0
+#define regDAGB1_RDCLI11 0x008b
+#define regDAGB1_RDCLI11_BASE_IDX 0
+#define regDAGB1_RDCLI12 0x008c
+#define regDAGB1_RDCLI12_BASE_IDX 0
+#define regDAGB1_RDCLI13 0x008d
+#define regDAGB1_RDCLI13_BASE_IDX 0
+#define regDAGB1_RDCLI14 0x008e
+#define regDAGB1_RDCLI14_BASE_IDX 0
+#define regDAGB1_RDCLI15 0x008f
+#define regDAGB1_RDCLI15_BASE_IDX 0
+#define regDAGB1_RD_CNTL 0x0090
+#define regDAGB1_RD_CNTL_BASE_IDX 0
+#define regDAGB1_RD_GMI_CNTL 0x0091
+#define regDAGB1_RD_GMI_CNTL_BASE_IDX 0
+#define regDAGB1_RD_ADDR_DAGB 0x0092
+#define regDAGB1_RD_ADDR_DAGB_BASE_IDX 0
+#define regDAGB1_RD_OUTPUT_DAGB_MAX_BURST 0x0093
+#define regDAGB1_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER 0x0094
+#define regDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB1_RD_CGTT_CLK_CTRL 0x0095
+#define regDAGB1_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_L1TLB_RD_CGTT_CLK_CTRL 0x0096
+#define regDAGB1_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_ATCVM_RD_CGTT_CLK_CTRL 0x0097
+#define regDAGB1_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_RD_ADDR_DAGB_MAX_BURST0 0x0098
+#define regDAGB1_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB1_RD_ADDR_DAGB_LAZY_TIMER0 0x0099
+#define regDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB1_RD_ADDR_DAGB_MAX_BURST1 0x009a
+#define regDAGB1_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB1_RD_ADDR_DAGB_LAZY_TIMER1 0x009b
+#define regDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB1_RD_VC0_CNTL 0x009c
+#define regDAGB1_RD_VC0_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC1_CNTL 0x009d
+#define regDAGB1_RD_VC1_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC2_CNTL 0x009e
+#define regDAGB1_RD_VC2_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC3_CNTL 0x009f
+#define regDAGB1_RD_VC3_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC4_CNTL 0x00a0
+#define regDAGB1_RD_VC4_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC5_CNTL 0x00a1
+#define regDAGB1_RD_VC5_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC6_CNTL 0x00a2
+#define regDAGB1_RD_VC6_CNTL_BASE_IDX 0
+#define regDAGB1_RD_VC7_CNTL 0x00a3
+#define regDAGB1_RD_VC7_CNTL_BASE_IDX 0
+#define regDAGB1_RD_CNTL_MISC 0x00a4
+#define regDAGB1_RD_CNTL_MISC_BASE_IDX 0
+#define regDAGB1_RD_TLB_CREDIT 0x00a5
+#define regDAGB1_RD_TLB_CREDIT_BASE_IDX 0
+#define regDAGB1_RD_RDRET_CREDIT_CNTL 0x00a6
+#define regDAGB1_RD_RDRET_CREDIT_CNTL_BASE_IDX 0
+#define regDAGB1_RD_RDRET_CREDIT_CNTL2 0x00a7
+#define regDAGB1_RD_RDRET_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB1_RDCLI_ASK_PENDING 0x00a8
+#define regDAGB1_RDCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_GO_PENDING 0x00a9
+#define regDAGB1_RDCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_GBLSEND_PENDING 0x00aa
+#define regDAGB1_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_TLB_PENDING 0x00ab
+#define regDAGB1_RDCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_OARB_PENDING 0x00ac
+#define regDAGB1_RDCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_OSD_PENDING 0x00ad
+#define regDAGB1_RDCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB1_RDCLI_NOALLOC_OVERRIDE 0x00ae
+#define regDAGB1_RDCLI_NOALLOC_OVERRIDE_BASE_IDX 0
+#define regDAGB1_RDCLI_NOALLOC_OVERRIDE_VALUE 0x00af
+#define regDAGB1_RDCLI_NOALLOC_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB1_WRCLI0 0x00b0
+#define regDAGB1_WRCLI0_BASE_IDX 0
+#define regDAGB1_WRCLI1 0x00b1
+#define regDAGB1_WRCLI1_BASE_IDX 0
+#define regDAGB1_WRCLI2 0x00b2
+#define regDAGB1_WRCLI2_BASE_IDX 0
+#define regDAGB1_WRCLI3 0x00b3
+#define regDAGB1_WRCLI3_BASE_IDX 0
+#define regDAGB1_WRCLI4 0x00b4
+#define regDAGB1_WRCLI4_BASE_IDX 0
+#define regDAGB1_WRCLI5 0x00b5
+#define regDAGB1_WRCLI5_BASE_IDX 0
+#define regDAGB1_WRCLI6 0x00b6
+#define regDAGB1_WRCLI6_BASE_IDX 0
+#define regDAGB1_WRCLI7 0x00b7
+#define regDAGB1_WRCLI7_BASE_IDX 0
+#define regDAGB1_WRCLI8 0x00b8
+#define regDAGB1_WRCLI8_BASE_IDX 0
+#define regDAGB1_WRCLI9 0x00b9
+#define regDAGB1_WRCLI9_BASE_IDX 0
+#define regDAGB1_WRCLI10 0x00ba
+#define regDAGB1_WRCLI10_BASE_IDX 0
+#define regDAGB1_WRCLI11 0x00bb
+#define regDAGB1_WRCLI11_BASE_IDX 0
+#define regDAGB1_WRCLI12 0x00bc
+#define regDAGB1_WRCLI12_BASE_IDX 0
+#define regDAGB1_WRCLI13 0x00bd
+#define regDAGB1_WRCLI13_BASE_IDX 0
+#define regDAGB1_WRCLI14 0x00be
+#define regDAGB1_WRCLI14_BASE_IDX 0
+#define regDAGB1_WRCLI15 0x00bf
+#define regDAGB1_WRCLI15_BASE_IDX 0
+#define regDAGB1_WR_CNTL 0x00c0
+#define regDAGB1_WR_CNTL_BASE_IDX 0
+#define regDAGB1_WR_GMI_CNTL 0x00c1
+#define regDAGB1_WR_GMI_CNTL_BASE_IDX 0
+#define regDAGB1_WR_ADDR_DAGB 0x00c2
+#define regDAGB1_WR_ADDR_DAGB_BASE_IDX 0
+#define regDAGB1_WR_OUTPUT_DAGB_MAX_BURST 0x00c3
+#define regDAGB1_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER 0x00c4
+#define regDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB1_WR_CGTT_CLK_CTRL 0x00c5
+#define regDAGB1_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_L1TLB_WR_CGTT_CLK_CTRL 0x00c6
+#define regDAGB1_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_ATCVM_WR_CGTT_CLK_CTRL 0x00c7
+#define regDAGB1_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB1_WR_ADDR_DAGB_MAX_BURST0 0x00c8
+#define regDAGB1_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB1_WR_ADDR_DAGB_LAZY_TIMER0 0x00c9
+#define regDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB1_WR_ADDR_DAGB_MAX_BURST1 0x00ca
+#define regDAGB1_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB1_WR_ADDR_DAGB_LAZY_TIMER1 0x00cb
+#define regDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB1_WR_DATA_DAGB 0x00cc
+#define regDAGB1_WR_DATA_DAGB_BASE_IDX 0
+#define regDAGB1_WR_DATA_DAGB_MAX_BURST0 0x00cd
+#define regDAGB1_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB1_WR_DATA_DAGB_LAZY_TIMER0 0x00ce
+#define regDAGB1_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB1_WR_DATA_DAGB_MAX_BURST1 0x00cf
+#define regDAGB1_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB1_WR_DATA_DAGB_LAZY_TIMER1 0x00d0
+#define regDAGB1_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB1_WR_VC0_CNTL 0x00d1
+#define regDAGB1_WR_VC0_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC1_CNTL 0x00d2
+#define regDAGB1_WR_VC1_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC2_CNTL 0x00d3
+#define regDAGB1_WR_VC2_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC3_CNTL 0x00d4
+#define regDAGB1_WR_VC3_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC4_CNTL 0x00d5
+#define regDAGB1_WR_VC4_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC5_CNTL 0x00d6
+#define regDAGB1_WR_VC5_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC6_CNTL 0x00d7
+#define regDAGB1_WR_VC6_CNTL_BASE_IDX 0
+#define regDAGB1_WR_VC7_CNTL 0x00d8
+#define regDAGB1_WR_VC7_CNTL_BASE_IDX 0
+#define regDAGB1_WR_CNTL_MISC 0x00d9
+#define regDAGB1_WR_CNTL_MISC_BASE_IDX 0
+#define regDAGB1_WR_TLB_CREDIT 0x00da
+#define regDAGB1_WR_TLB_CREDIT_BASE_IDX 0
+#define regDAGB1_WR_DATA_CREDIT 0x00db
+#define regDAGB1_WR_DATA_CREDIT_BASE_IDX 0
+#define regDAGB1_WR_MISC_CREDIT 0x00dc
+#define regDAGB1_WR_MISC_CREDIT_BASE_IDX 0
+#define regDAGB1_WR_OSD_CREDIT_CNTL1 0x00dd
+#define regDAGB1_WR_OSD_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB1_WR_OSD_CREDIT_CNTL2 0x00de
+#define regDAGB1_WR_OSD_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1 0x00df
+#define regDAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00e0
+#define regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 0
+#define regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00e1
+#define regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB1_WRCLI_ASK_PENDING 0x00e2
+#define regDAGB1_WRCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_GO_PENDING 0x00e3
+#define regDAGB1_WRCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_GBLSEND_PENDING 0x00e4
+#define regDAGB1_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_TLB_PENDING 0x00e5
+#define regDAGB1_WRCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_OARB_PENDING 0x00e6
+#define regDAGB1_WRCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_OSD_PENDING 0x00e7
+#define regDAGB1_WRCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_DBUS_ASK_PENDING 0x00e8
+#define regDAGB1_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define regDAGB1_WRCLI_DBUS_GO_PENDING 0x00e9
+#define regDAGB1_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define regDAGB1_DAGB_DLY 0x00ec
+#define regDAGB1_DAGB_DLY_BASE_IDX 0
+#define regDAGB1_CNTL_MISC 0x00ed
+#define regDAGB1_CNTL_MISC_BASE_IDX 0
+#define regDAGB1_CNTL_MISC2 0x00ee
+#define regDAGB1_CNTL_MISC2_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_CNTL 0x00ef
+#define regDAGB1_FATAL_ERROR_CNTL_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_CLEAR 0x00f0
+#define regDAGB1_FATAL_ERROR_CLEAR_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_STATUS0 0x00f1
+#define regDAGB1_FATAL_ERROR_STATUS0_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_STATUS1 0x00f2
+#define regDAGB1_FATAL_ERROR_STATUS1_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_STATUS2 0x00f3
+#define regDAGB1_FATAL_ERROR_STATUS2_BASE_IDX 0
+#define regDAGB1_FATAL_ERROR_STATUS3 0x00f4
+#define regDAGB1_FATAL_ERROR_STATUS3_BASE_IDX 0
+#define regDAGB1_FIFO_EMPTY 0x00f5
+#define regDAGB1_FIFO_EMPTY_BASE_IDX 0
+#define regDAGB1_FIFO_FULL 0x00f6
+#define regDAGB1_FIFO_FULL_BASE_IDX 0
+#define regDAGB1_WR_CREDITS_FULL 0x00f7
+#define regDAGB1_WR_CREDITS_FULL_BASE_IDX 0
+#define regDAGB1_RD_CREDITS_FULL 0x00f8
+#define regDAGB1_RD_CREDITS_FULL_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER_LO 0x00f9
+#define regDAGB1_PERFCOUNTER_LO_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER_HI 0x00fa
+#define regDAGB1_PERFCOUNTER_HI_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER0_CFG 0x00fb
+#define regDAGB1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER1_CFG 0x00fc
+#define regDAGB1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER2_CFG 0x00fd
+#define regDAGB1_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regDAGB1_PERFCOUNTER_RSLT_CNTL 0x00fe
+#define regDAGB1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regDAGB1_L1TLB_REG_RW 0x00ff
+#define regDAGB1_L1TLB_REG_RW_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec2
+// base address: 0x60400
+#define regDAGB2_RDCLI0 0x0100
+#define regDAGB2_RDCLI0_BASE_IDX 0
+#define regDAGB2_RDCLI1 0x0101
+#define regDAGB2_RDCLI1_BASE_IDX 0
+#define regDAGB2_RDCLI2 0x0102
+#define regDAGB2_RDCLI2_BASE_IDX 0
+#define regDAGB2_RDCLI3 0x0103
+#define regDAGB2_RDCLI3_BASE_IDX 0
+#define regDAGB2_RDCLI4 0x0104
+#define regDAGB2_RDCLI4_BASE_IDX 0
+#define regDAGB2_RDCLI5 0x0105
+#define regDAGB2_RDCLI5_BASE_IDX 0
+#define regDAGB2_RDCLI6 0x0106
+#define regDAGB2_RDCLI6_BASE_IDX 0
+#define regDAGB2_RDCLI7 0x0107
+#define regDAGB2_RDCLI7_BASE_IDX 0
+#define regDAGB2_RDCLI8 0x0108
+#define regDAGB2_RDCLI8_BASE_IDX 0
+#define regDAGB2_RDCLI9 0x0109
+#define regDAGB2_RDCLI9_BASE_IDX 0
+#define regDAGB2_RDCLI10 0x010a
+#define regDAGB2_RDCLI10_BASE_IDX 0
+#define regDAGB2_RDCLI11 0x010b
+#define regDAGB2_RDCLI11_BASE_IDX 0
+#define regDAGB2_RDCLI12 0x010c
+#define regDAGB2_RDCLI12_BASE_IDX 0
+#define regDAGB2_RDCLI13 0x010d
+#define regDAGB2_RDCLI13_BASE_IDX 0
+#define regDAGB2_RDCLI14 0x010e
+#define regDAGB2_RDCLI14_BASE_IDX 0
+#define regDAGB2_RDCLI15 0x010f
+#define regDAGB2_RDCLI15_BASE_IDX 0
+#define regDAGB2_RD_CNTL 0x0110
+#define regDAGB2_RD_CNTL_BASE_IDX 0
+#define regDAGB2_RD_GMI_CNTL 0x0111
+#define regDAGB2_RD_GMI_CNTL_BASE_IDX 0
+#define regDAGB2_RD_ADDR_DAGB 0x0112
+#define regDAGB2_RD_ADDR_DAGB_BASE_IDX 0
+#define regDAGB2_RD_OUTPUT_DAGB_MAX_BURST 0x0113
+#define regDAGB2_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER 0x0114
+#define regDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB2_RD_CGTT_CLK_CTRL 0x0115
+#define regDAGB2_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_L1TLB_RD_CGTT_CLK_CTRL 0x0116
+#define regDAGB2_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_ATCVM_RD_CGTT_CLK_CTRL 0x0117
+#define regDAGB2_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_RD_ADDR_DAGB_MAX_BURST0 0x0118
+#define regDAGB2_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB2_RD_ADDR_DAGB_LAZY_TIMER0 0x0119
+#define regDAGB2_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB2_RD_ADDR_DAGB_MAX_BURST1 0x011a
+#define regDAGB2_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB2_RD_ADDR_DAGB_LAZY_TIMER1 0x011b
+#define regDAGB2_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB2_RD_VC0_CNTL 0x011c
+#define regDAGB2_RD_VC0_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC1_CNTL 0x011d
+#define regDAGB2_RD_VC1_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC2_CNTL 0x011e
+#define regDAGB2_RD_VC2_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC3_CNTL 0x011f
+#define regDAGB2_RD_VC3_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC4_CNTL 0x0120
+#define regDAGB2_RD_VC4_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC5_CNTL 0x0121
+#define regDAGB2_RD_VC5_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC6_CNTL 0x0122
+#define regDAGB2_RD_VC6_CNTL_BASE_IDX 0
+#define regDAGB2_RD_VC7_CNTL 0x0123
+#define regDAGB2_RD_VC7_CNTL_BASE_IDX 0
+#define regDAGB2_RD_CNTL_MISC 0x0124
+#define regDAGB2_RD_CNTL_MISC_BASE_IDX 0
+#define regDAGB2_RD_TLB_CREDIT 0x0125
+#define regDAGB2_RD_TLB_CREDIT_BASE_IDX 0
+#define regDAGB2_RD_RDRET_CREDIT_CNTL 0x0126
+#define regDAGB2_RD_RDRET_CREDIT_CNTL_BASE_IDX 0
+#define regDAGB2_RD_RDRET_CREDIT_CNTL2 0x0127
+#define regDAGB2_RD_RDRET_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB2_RDCLI_ASK_PENDING 0x0128
+#define regDAGB2_RDCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB2_RDCLI_GO_PENDING 0x0129
+#define regDAGB2_RDCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB2_RDCLI_GBLSEND_PENDING 0x012a
+#define regDAGB2_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB2_RDCLI_TLB_PENDING 0x012b
+#define regDAGB2_RDCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB2_RDCLI_OARB_PENDING 0x012c
+#define regDAGB2_RDCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB2_RDCLI_OSD_PENDING 0x012d
+#define regDAGB2_RDCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI0 0x0130
+#define regDAGB2_WRCLI0_BASE_IDX 0
+#define regDAGB2_WRCLI1 0x0131
+#define regDAGB2_WRCLI1_BASE_IDX 0
+#define regDAGB2_WRCLI2 0x0132
+#define regDAGB2_WRCLI2_BASE_IDX 0
+#define regDAGB2_WRCLI3 0x0133
+#define regDAGB2_WRCLI3_BASE_IDX 0
+#define regDAGB2_WRCLI4 0x0134
+#define regDAGB2_WRCLI4_BASE_IDX 0
+#define regDAGB2_WRCLI5 0x0135
+#define regDAGB2_WRCLI5_BASE_IDX 0
+#define regDAGB2_WRCLI6 0x0136
+#define regDAGB2_WRCLI6_BASE_IDX 0
+#define regDAGB2_WRCLI7 0x0137
+#define regDAGB2_WRCLI7_BASE_IDX 0
+#define regDAGB2_WRCLI8 0x0138
+#define regDAGB2_WRCLI8_BASE_IDX 0
+#define regDAGB2_WRCLI9 0x0139
+#define regDAGB2_WRCLI9_BASE_IDX 0
+#define regDAGB2_WRCLI10 0x013a
+#define regDAGB2_WRCLI10_BASE_IDX 0
+#define regDAGB2_WRCLI11 0x013b
+#define regDAGB2_WRCLI11_BASE_IDX 0
+#define regDAGB2_WRCLI12 0x013c
+#define regDAGB2_WRCLI12_BASE_IDX 0
+#define regDAGB2_WRCLI13 0x013d
+#define regDAGB2_WRCLI13_BASE_IDX 0
+#define regDAGB2_WRCLI14 0x013e
+#define regDAGB2_WRCLI14_BASE_IDX 0
+#define regDAGB2_WRCLI15 0x013f
+#define regDAGB2_WRCLI15_BASE_IDX 0
+#define regDAGB2_WR_CNTL 0x0140
+#define regDAGB2_WR_CNTL_BASE_IDX 0
+#define regDAGB2_WR_GMI_CNTL 0x0141
+#define regDAGB2_WR_GMI_CNTL_BASE_IDX 0
+#define regDAGB2_WR_ADDR_DAGB 0x0142
+#define regDAGB2_WR_ADDR_DAGB_BASE_IDX 0
+#define regDAGB2_WR_OUTPUT_DAGB_MAX_BURST 0x0143
+#define regDAGB2_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER 0x0144
+#define regDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB2_WR_CGTT_CLK_CTRL 0x0145
+#define regDAGB2_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_L1TLB_WR_CGTT_CLK_CTRL 0x0146
+#define regDAGB2_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_ATCVM_WR_CGTT_CLK_CTRL 0x0147
+#define regDAGB2_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB2_WR_ADDR_DAGB_MAX_BURST0 0x0148
+#define regDAGB2_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB2_WR_ADDR_DAGB_LAZY_TIMER0 0x0149
+#define regDAGB2_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB2_WR_ADDR_DAGB_MAX_BURST1 0x014a
+#define regDAGB2_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB2_WR_ADDR_DAGB_LAZY_TIMER1 0x014b
+#define regDAGB2_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB2_WR_DATA_DAGB 0x014c
+#define regDAGB2_WR_DATA_DAGB_BASE_IDX 0
+#define regDAGB2_WR_DATA_DAGB_MAX_BURST0 0x014d
+#define regDAGB2_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB2_WR_DATA_DAGB_LAZY_TIMER0 0x014e
+#define regDAGB2_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB2_WR_DATA_DAGB_MAX_BURST1 0x014f
+#define regDAGB2_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB2_WR_DATA_DAGB_LAZY_TIMER1 0x0150
+#define regDAGB2_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB2_WR_VC0_CNTL 0x0151
+#define regDAGB2_WR_VC0_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC1_CNTL 0x0152
+#define regDAGB2_WR_VC1_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC2_CNTL 0x0153
+#define regDAGB2_WR_VC2_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC3_CNTL 0x0154
+#define regDAGB2_WR_VC3_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC4_CNTL 0x0155
+#define regDAGB2_WR_VC4_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC5_CNTL 0x0156
+#define regDAGB2_WR_VC5_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC6_CNTL 0x0157
+#define regDAGB2_WR_VC6_CNTL_BASE_IDX 0
+#define regDAGB2_WR_VC7_CNTL 0x0158
+#define regDAGB2_WR_VC7_CNTL_BASE_IDX 0
+#define regDAGB2_WR_CNTL_MISC 0x0159
+#define regDAGB2_WR_CNTL_MISC_BASE_IDX 0
+#define regDAGB2_WR_TLB_CREDIT 0x015a
+#define regDAGB2_WR_TLB_CREDIT_BASE_IDX 0
+#define regDAGB2_WR_DATA_CREDIT 0x015b
+#define regDAGB2_WR_DATA_CREDIT_BASE_IDX 0
+#define regDAGB2_WR_MISC_CREDIT 0x015c
+#define regDAGB2_WR_MISC_CREDIT_BASE_IDX 0
+#define regDAGB2_WR_OSD_CREDIT_CNTL1 0x015d
+#define regDAGB2_WR_OSD_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB2_WR_OSD_CREDIT_CNTL2 0x015e
+#define regDAGB2_WR_OSD_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1 0x015f
+#define regDAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x0160
+#define regDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 0
+#define regDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x0161
+#define regDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB2_WRCLI_ASK_PENDING 0x0162
+#define regDAGB2_WRCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_GO_PENDING 0x0163
+#define regDAGB2_WRCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_GBLSEND_PENDING 0x0164
+#define regDAGB2_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_TLB_PENDING 0x0165
+#define regDAGB2_WRCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_OARB_PENDING 0x0166
+#define regDAGB2_WRCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_OSD_PENDING 0x0167
+#define regDAGB2_WRCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_DBUS_ASK_PENDING 0x0168
+#define regDAGB2_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define regDAGB2_WRCLI_DBUS_GO_PENDING 0x0169
+#define regDAGB2_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define regDAGB2_DAGB_DLY 0x016c
+#define regDAGB2_DAGB_DLY_BASE_IDX 0
+#define regDAGB2_CNTL_MISC 0x016d
+#define regDAGB2_CNTL_MISC_BASE_IDX 0
+#define regDAGB2_CNTL_MISC2 0x016e
+#define regDAGB2_CNTL_MISC2_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_CNTL 0x016f
+#define regDAGB2_FATAL_ERROR_CNTL_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_CLEAR 0x0170
+#define regDAGB2_FATAL_ERROR_CLEAR_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_STATUS0 0x0171
+#define regDAGB2_FATAL_ERROR_STATUS0_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_STATUS1 0x0172
+#define regDAGB2_FATAL_ERROR_STATUS1_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_STATUS2 0x0173
+#define regDAGB2_FATAL_ERROR_STATUS2_BASE_IDX 0
+#define regDAGB2_FATAL_ERROR_STATUS3 0x0174
+#define regDAGB2_FATAL_ERROR_STATUS3_BASE_IDX 0
+#define regDAGB2_FIFO_EMPTY 0x0175
+#define regDAGB2_FIFO_EMPTY_BASE_IDX 0
+#define regDAGB2_FIFO_FULL 0x0176
+#define regDAGB2_FIFO_FULL_BASE_IDX 0
+#define regDAGB2_WR_CREDITS_FULL 0x0177
+#define regDAGB2_WR_CREDITS_FULL_BASE_IDX 0
+#define regDAGB2_RD_CREDITS_FULL 0x0178
+#define regDAGB2_RD_CREDITS_FULL_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER_LO 0x0179
+#define regDAGB2_PERFCOUNTER_LO_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER_HI 0x017a
+#define regDAGB2_PERFCOUNTER_HI_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER0_CFG 0x017b
+#define regDAGB2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER1_CFG 0x017c
+#define regDAGB2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER2_CFG 0x017d
+#define regDAGB2_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regDAGB2_PERFCOUNTER_RSLT_CNTL 0x017e
+#define regDAGB2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regDAGB2_L1TLB_REG_RW 0x017f
+#define regDAGB2_L1TLB_REG_RW_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec3
+// base address: 0x60600
+#define regDAGB3_RDCLI0 0x0180
+#define regDAGB3_RDCLI0_BASE_IDX 0
+#define regDAGB3_RDCLI1 0x0181
+#define regDAGB3_RDCLI1_BASE_IDX 0
+#define regDAGB3_RDCLI2 0x0182
+#define regDAGB3_RDCLI2_BASE_IDX 0
+#define regDAGB3_RDCLI3 0x0183
+#define regDAGB3_RDCLI3_BASE_IDX 0
+#define regDAGB3_RDCLI4 0x0184
+#define regDAGB3_RDCLI4_BASE_IDX 0
+#define regDAGB3_RDCLI5 0x0185
+#define regDAGB3_RDCLI5_BASE_IDX 0
+#define regDAGB3_RDCLI6 0x0186
+#define regDAGB3_RDCLI6_BASE_IDX 0
+#define regDAGB3_RDCLI7 0x0187
+#define regDAGB3_RDCLI7_BASE_IDX 0
+#define regDAGB3_RDCLI8 0x0188
+#define regDAGB3_RDCLI8_BASE_IDX 0
+#define regDAGB3_RDCLI9 0x0189
+#define regDAGB3_RDCLI9_BASE_IDX 0
+#define regDAGB3_RDCLI10 0x018a
+#define regDAGB3_RDCLI10_BASE_IDX 0
+#define regDAGB3_RDCLI11 0x018b
+#define regDAGB3_RDCLI11_BASE_IDX 0
+#define regDAGB3_RDCLI12 0x018c
+#define regDAGB3_RDCLI12_BASE_IDX 0
+#define regDAGB3_RDCLI13 0x018d
+#define regDAGB3_RDCLI13_BASE_IDX 0
+#define regDAGB3_RDCLI14 0x018e
+#define regDAGB3_RDCLI14_BASE_IDX 0
+#define regDAGB3_RDCLI15 0x018f
+#define regDAGB3_RDCLI15_BASE_IDX 0
+#define regDAGB3_RD_CNTL 0x0190
+#define regDAGB3_RD_CNTL_BASE_IDX 0
+#define regDAGB3_RD_GMI_CNTL 0x0191
+#define regDAGB3_RD_GMI_CNTL_BASE_IDX 0
+#define regDAGB3_RD_ADDR_DAGB 0x0192
+#define regDAGB3_RD_ADDR_DAGB_BASE_IDX 0
+#define regDAGB3_RD_OUTPUT_DAGB_MAX_BURST 0x0193
+#define regDAGB3_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER 0x0194
+#define regDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB3_RD_CGTT_CLK_CTRL 0x0195
+#define regDAGB3_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_L1TLB_RD_CGTT_CLK_CTRL 0x0196
+#define regDAGB3_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_ATCVM_RD_CGTT_CLK_CTRL 0x0197
+#define regDAGB3_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_RD_ADDR_DAGB_MAX_BURST0 0x0198
+#define regDAGB3_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB3_RD_ADDR_DAGB_LAZY_TIMER0 0x0199
+#define regDAGB3_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB3_RD_ADDR_DAGB_MAX_BURST1 0x019a
+#define regDAGB3_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB3_RD_ADDR_DAGB_LAZY_TIMER1 0x019b
+#define regDAGB3_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB3_RD_VC0_CNTL 0x019c
+#define regDAGB3_RD_VC0_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC1_CNTL 0x019d
+#define regDAGB3_RD_VC1_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC2_CNTL 0x019e
+#define regDAGB3_RD_VC2_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC3_CNTL 0x019f
+#define regDAGB3_RD_VC3_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC4_CNTL 0x01a0
+#define regDAGB3_RD_VC4_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC5_CNTL 0x01a1
+#define regDAGB3_RD_VC5_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC6_CNTL 0x01a2
+#define regDAGB3_RD_VC6_CNTL_BASE_IDX 0
+#define regDAGB3_RD_VC7_CNTL 0x01a3
+#define regDAGB3_RD_VC7_CNTL_BASE_IDX 0
+#define regDAGB3_RD_CNTL_MISC 0x01a4
+#define regDAGB3_RD_CNTL_MISC_BASE_IDX 0
+#define regDAGB3_RD_TLB_CREDIT 0x01a5
+#define regDAGB3_RD_TLB_CREDIT_BASE_IDX 0
+#define regDAGB3_RD_RDRET_CREDIT_CNTL 0x01a6
+#define regDAGB3_RD_RDRET_CREDIT_CNTL_BASE_IDX 0
+#define regDAGB3_RD_RDRET_CREDIT_CNTL2 0x01a7
+#define regDAGB3_RD_RDRET_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB3_RDCLI_ASK_PENDING 0x01a8
+#define regDAGB3_RDCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB3_RDCLI_GO_PENDING 0x01a9
+#define regDAGB3_RDCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB3_RDCLI_GBLSEND_PENDING 0x01aa
+#define regDAGB3_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB3_RDCLI_TLB_PENDING 0x01ab
+#define regDAGB3_RDCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB3_RDCLI_OARB_PENDING 0x01ac
+#define regDAGB3_RDCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB3_RDCLI_OSD_PENDING 0x01ad
+#define regDAGB3_RDCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI0 0x01b0
+#define regDAGB3_WRCLI0_BASE_IDX 0
+#define regDAGB3_WRCLI1 0x01b1
+#define regDAGB3_WRCLI1_BASE_IDX 0
+#define regDAGB3_WRCLI2 0x01b2
+#define regDAGB3_WRCLI2_BASE_IDX 0
+#define regDAGB3_WRCLI3 0x01b3
+#define regDAGB3_WRCLI3_BASE_IDX 0
+#define regDAGB3_WRCLI4 0x01b4
+#define regDAGB3_WRCLI4_BASE_IDX 0
+#define regDAGB3_WRCLI5 0x01b5
+#define regDAGB3_WRCLI5_BASE_IDX 0
+#define regDAGB3_WRCLI6 0x01b6
+#define regDAGB3_WRCLI6_BASE_IDX 0
+#define regDAGB3_WRCLI7 0x01b7
+#define regDAGB3_WRCLI7_BASE_IDX 0
+#define regDAGB3_WRCLI8 0x01b8
+#define regDAGB3_WRCLI8_BASE_IDX 0
+#define regDAGB3_WRCLI9 0x01b9
+#define regDAGB3_WRCLI9_BASE_IDX 0
+#define regDAGB3_WRCLI10 0x01ba
+#define regDAGB3_WRCLI10_BASE_IDX 0
+#define regDAGB3_WRCLI11 0x01bb
+#define regDAGB3_WRCLI11_BASE_IDX 0
+#define regDAGB3_WRCLI12 0x01bc
+#define regDAGB3_WRCLI12_BASE_IDX 0
+#define regDAGB3_WRCLI13 0x01bd
+#define regDAGB3_WRCLI13_BASE_IDX 0
+#define regDAGB3_WRCLI14 0x01be
+#define regDAGB3_WRCLI14_BASE_IDX 0
+#define regDAGB3_WRCLI15 0x01bf
+#define regDAGB3_WRCLI15_BASE_IDX 0
+#define regDAGB3_WR_CNTL 0x01c0
+#define regDAGB3_WR_CNTL_BASE_IDX 0
+#define regDAGB3_WR_GMI_CNTL 0x01c1
+#define regDAGB3_WR_GMI_CNTL_BASE_IDX 0
+#define regDAGB3_WR_ADDR_DAGB 0x01c2
+#define regDAGB3_WR_ADDR_DAGB_BASE_IDX 0
+#define regDAGB3_WR_OUTPUT_DAGB_MAX_BURST 0x01c3
+#define regDAGB3_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER 0x01c4
+#define regDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB3_WR_CGTT_CLK_CTRL 0x01c5
+#define regDAGB3_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_L1TLB_WR_CGTT_CLK_CTRL 0x01c6
+#define regDAGB3_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_ATCVM_WR_CGTT_CLK_CTRL 0x01c7
+#define regDAGB3_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB3_WR_ADDR_DAGB_MAX_BURST0 0x01c8
+#define regDAGB3_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB3_WR_ADDR_DAGB_LAZY_TIMER0 0x01c9
+#define regDAGB3_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB3_WR_ADDR_DAGB_MAX_BURST1 0x01ca
+#define regDAGB3_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB3_WR_ADDR_DAGB_LAZY_TIMER1 0x01cb
+#define regDAGB3_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB3_WR_DATA_DAGB 0x01cc
+#define regDAGB3_WR_DATA_DAGB_BASE_IDX 0
+#define regDAGB3_WR_DATA_DAGB_MAX_BURST0 0x01cd
+#define regDAGB3_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB3_WR_DATA_DAGB_LAZY_TIMER0 0x01ce
+#define regDAGB3_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB3_WR_DATA_DAGB_MAX_BURST1 0x01cf
+#define regDAGB3_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB3_WR_DATA_DAGB_LAZY_TIMER1 0x01d0
+#define regDAGB3_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB3_WR_VC0_CNTL 0x01d1
+#define regDAGB3_WR_VC0_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC1_CNTL 0x01d2
+#define regDAGB3_WR_VC1_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC2_CNTL 0x01d3
+#define regDAGB3_WR_VC2_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC3_CNTL 0x01d4
+#define regDAGB3_WR_VC3_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC4_CNTL 0x01d5
+#define regDAGB3_WR_VC4_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC5_CNTL 0x01d6
+#define regDAGB3_WR_VC5_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC6_CNTL 0x01d7
+#define regDAGB3_WR_VC6_CNTL_BASE_IDX 0
+#define regDAGB3_WR_VC7_CNTL 0x01d8
+#define regDAGB3_WR_VC7_CNTL_BASE_IDX 0
+#define regDAGB3_WR_CNTL_MISC 0x01d9
+#define regDAGB3_WR_CNTL_MISC_BASE_IDX 0
+#define regDAGB3_WR_TLB_CREDIT 0x01da
+#define regDAGB3_WR_TLB_CREDIT_BASE_IDX 0
+#define regDAGB3_WR_DATA_CREDIT 0x01db
+#define regDAGB3_WR_DATA_CREDIT_BASE_IDX 0
+#define regDAGB3_WR_MISC_CREDIT 0x01dc
+#define regDAGB3_WR_MISC_CREDIT_BASE_IDX 0
+#define regDAGB3_WR_OSD_CREDIT_CNTL1 0x01dd
+#define regDAGB3_WR_OSD_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB3_WR_OSD_CREDIT_CNTL2 0x01de
+#define regDAGB3_WR_OSD_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1 0x01df
+#define regDAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01e0
+#define regDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 0
+#define regDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01e1
+#define regDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB3_WRCLI_ASK_PENDING 0x01e2
+#define regDAGB3_WRCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_GO_PENDING 0x01e3
+#define regDAGB3_WRCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_GBLSEND_PENDING 0x01e4
+#define regDAGB3_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_TLB_PENDING 0x01e5
+#define regDAGB3_WRCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_OARB_PENDING 0x01e6
+#define regDAGB3_WRCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_OSD_PENDING 0x01e7
+#define regDAGB3_WRCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_DBUS_ASK_PENDING 0x01e8
+#define regDAGB3_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define regDAGB3_WRCLI_DBUS_GO_PENDING 0x01e9
+#define regDAGB3_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define regDAGB3_DAGB_DLY 0x01ec
+#define regDAGB3_DAGB_DLY_BASE_IDX 0
+#define regDAGB3_CNTL_MISC 0x01ed
+#define regDAGB3_CNTL_MISC_BASE_IDX 0
+#define regDAGB3_CNTL_MISC2 0x01ee
+#define regDAGB3_CNTL_MISC2_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_CNTL 0x01ef
+#define regDAGB3_FATAL_ERROR_CNTL_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_CLEAR 0x01f0
+#define regDAGB3_FATAL_ERROR_CLEAR_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_STATUS0 0x01f1
+#define regDAGB3_FATAL_ERROR_STATUS0_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_STATUS1 0x01f2
+#define regDAGB3_FATAL_ERROR_STATUS1_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_STATUS2 0x01f3
+#define regDAGB3_FATAL_ERROR_STATUS2_BASE_IDX 0
+#define regDAGB3_FATAL_ERROR_STATUS3 0x01f4
+#define regDAGB3_FATAL_ERROR_STATUS3_BASE_IDX 0
+#define regDAGB3_FIFO_EMPTY 0x01f5
+#define regDAGB3_FIFO_EMPTY_BASE_IDX 0
+#define regDAGB3_FIFO_FULL 0x01f6
+#define regDAGB3_FIFO_FULL_BASE_IDX 0
+#define regDAGB3_WR_CREDITS_FULL 0x01f7
+#define regDAGB3_WR_CREDITS_FULL_BASE_IDX 0
+#define regDAGB3_RD_CREDITS_FULL 0x01f8
+#define regDAGB3_RD_CREDITS_FULL_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER_LO 0x01f9
+#define regDAGB3_PERFCOUNTER_LO_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER_HI 0x01fa
+#define regDAGB3_PERFCOUNTER_HI_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER0_CFG 0x01fb
+#define regDAGB3_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER1_CFG 0x01fc
+#define regDAGB3_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER2_CFG 0x01fd
+#define regDAGB3_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regDAGB3_PERFCOUNTER_RSLT_CNTL 0x01fe
+#define regDAGB3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regDAGB3_L1TLB_REG_RW 0x01ff
+#define regDAGB3_L1TLB_REG_RW_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec4
+// base address: 0x60800
+#define regDAGB4_RDCLI0 0x0200
+#define regDAGB4_RDCLI0_BASE_IDX 0
+#define regDAGB4_RDCLI1 0x0201
+#define regDAGB4_RDCLI1_BASE_IDX 0
+#define regDAGB4_RDCLI2 0x0202
+#define regDAGB4_RDCLI2_BASE_IDX 0
+#define regDAGB4_RDCLI3 0x0203
+#define regDAGB4_RDCLI3_BASE_IDX 0
+#define regDAGB4_RDCLI4 0x0204
+#define regDAGB4_RDCLI4_BASE_IDX 0
+#define regDAGB4_RDCLI5 0x0205
+#define regDAGB4_RDCLI5_BASE_IDX 0
+#define regDAGB4_RDCLI6 0x0206
+#define regDAGB4_RDCLI6_BASE_IDX 0
+#define regDAGB4_RDCLI7 0x0207
+#define regDAGB4_RDCLI7_BASE_IDX 0
+#define regDAGB4_RDCLI8 0x0208
+#define regDAGB4_RDCLI8_BASE_IDX 0
+#define regDAGB4_RDCLI9 0x0209
+#define regDAGB4_RDCLI9_BASE_IDX 0
+#define regDAGB4_RDCLI10 0x020a
+#define regDAGB4_RDCLI10_BASE_IDX 0
+#define regDAGB4_RDCLI11 0x020b
+#define regDAGB4_RDCLI11_BASE_IDX 0
+#define regDAGB4_RDCLI12 0x020c
+#define regDAGB4_RDCLI12_BASE_IDX 0
+#define regDAGB4_RDCLI13 0x020d
+#define regDAGB4_RDCLI13_BASE_IDX 0
+#define regDAGB4_RDCLI14 0x020e
+#define regDAGB4_RDCLI14_BASE_IDX 0
+#define regDAGB4_RDCLI15 0x020f
+#define regDAGB4_RDCLI15_BASE_IDX 0
+#define regDAGB4_RD_CNTL 0x0210
+#define regDAGB4_RD_CNTL_BASE_IDX 0
+#define regDAGB4_RD_GMI_CNTL 0x0211
+#define regDAGB4_RD_GMI_CNTL_BASE_IDX 0
+#define regDAGB4_RD_ADDR_DAGB 0x0212
+#define regDAGB4_RD_ADDR_DAGB_BASE_IDX 0
+#define regDAGB4_RD_OUTPUT_DAGB_MAX_BURST 0x0213
+#define regDAGB4_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER 0x0214
+#define regDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB4_RD_CGTT_CLK_CTRL 0x0215
+#define regDAGB4_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_L1TLB_RD_CGTT_CLK_CTRL 0x0216
+#define regDAGB4_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_ATCVM_RD_CGTT_CLK_CTRL 0x0217
+#define regDAGB4_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_RD_ADDR_DAGB_MAX_BURST0 0x0218
+#define regDAGB4_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB4_RD_ADDR_DAGB_LAZY_TIMER0 0x0219
+#define regDAGB4_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB4_RD_ADDR_DAGB_MAX_BURST1 0x021a
+#define regDAGB4_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB4_RD_ADDR_DAGB_LAZY_TIMER1 0x021b
+#define regDAGB4_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB4_RD_VC0_CNTL 0x021c
+#define regDAGB4_RD_VC0_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC1_CNTL 0x021d
+#define regDAGB4_RD_VC1_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC2_CNTL 0x021e
+#define regDAGB4_RD_VC2_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC3_CNTL 0x021f
+#define regDAGB4_RD_VC3_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC4_CNTL 0x0220
+#define regDAGB4_RD_VC4_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC5_CNTL 0x0221
+#define regDAGB4_RD_VC5_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC6_CNTL 0x0222
+#define regDAGB4_RD_VC6_CNTL_BASE_IDX 0
+#define regDAGB4_RD_VC7_CNTL 0x0223
+#define regDAGB4_RD_VC7_CNTL_BASE_IDX 0
+#define regDAGB4_RD_CNTL_MISC 0x0224
+#define regDAGB4_RD_CNTL_MISC_BASE_IDX 0
+#define regDAGB4_RD_TLB_CREDIT 0x0225
+#define regDAGB4_RD_TLB_CREDIT_BASE_IDX 0
+#define regDAGB4_RD_RDRET_CREDIT_CNTL 0x0226
+#define regDAGB4_RD_RDRET_CREDIT_CNTL_BASE_IDX 0
+#define regDAGB4_RD_RDRET_CREDIT_CNTL2 0x0227
+#define regDAGB4_RD_RDRET_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB4_RDCLI_ASK_PENDING 0x0228
+#define regDAGB4_RDCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB4_RDCLI_GO_PENDING 0x0229
+#define regDAGB4_RDCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB4_RDCLI_GBLSEND_PENDING 0x022a
+#define regDAGB4_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB4_RDCLI_TLB_PENDING 0x022b
+#define regDAGB4_RDCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB4_RDCLI_OARB_PENDING 0x022c
+#define regDAGB4_RDCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB4_RDCLI_OSD_PENDING 0x022d
+#define regDAGB4_RDCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI0 0x0230
+#define regDAGB4_WRCLI0_BASE_IDX 0
+#define regDAGB4_WRCLI1 0x0231
+#define regDAGB4_WRCLI1_BASE_IDX 0
+#define regDAGB4_WRCLI2 0x0232
+#define regDAGB4_WRCLI2_BASE_IDX 0
+#define regDAGB4_WRCLI3 0x0233
+#define regDAGB4_WRCLI3_BASE_IDX 0
+#define regDAGB4_WRCLI4 0x0234
+#define regDAGB4_WRCLI4_BASE_IDX 0
+#define regDAGB4_WRCLI5 0x0235
+#define regDAGB4_WRCLI5_BASE_IDX 0
+#define regDAGB4_WRCLI6 0x0236
+#define regDAGB4_WRCLI6_BASE_IDX 0
+#define regDAGB4_WRCLI7 0x0237
+#define regDAGB4_WRCLI7_BASE_IDX 0
+#define regDAGB4_WRCLI8 0x0238
+#define regDAGB4_WRCLI8_BASE_IDX 0
+#define regDAGB4_WRCLI9 0x0239
+#define regDAGB4_WRCLI9_BASE_IDX 0
+#define regDAGB4_WRCLI10 0x023a
+#define regDAGB4_WRCLI10_BASE_IDX 0
+#define regDAGB4_WRCLI11 0x023b
+#define regDAGB4_WRCLI11_BASE_IDX 0
+#define regDAGB4_WRCLI12 0x023c
+#define regDAGB4_WRCLI12_BASE_IDX 0
+#define regDAGB4_WRCLI13 0x023d
+#define regDAGB4_WRCLI13_BASE_IDX 0
+#define regDAGB4_WRCLI14 0x023e
+#define regDAGB4_WRCLI14_BASE_IDX 0
+#define regDAGB4_WRCLI15 0x023f
+#define regDAGB4_WRCLI15_BASE_IDX 0
+#define regDAGB4_WR_CNTL 0x0240
+#define regDAGB4_WR_CNTL_BASE_IDX 0
+#define regDAGB4_WR_GMI_CNTL 0x0241
+#define regDAGB4_WR_GMI_CNTL_BASE_IDX 0
+#define regDAGB4_WR_ADDR_DAGB 0x0242
+#define regDAGB4_WR_ADDR_DAGB_BASE_IDX 0
+#define regDAGB4_WR_OUTPUT_DAGB_MAX_BURST 0x0243
+#define regDAGB4_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define regDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER 0x0244
+#define regDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define regDAGB4_WR_CGTT_CLK_CTRL 0x0245
+#define regDAGB4_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_L1TLB_WR_CGTT_CLK_CTRL 0x0246
+#define regDAGB4_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_ATCVM_WR_CGTT_CLK_CTRL 0x0247
+#define regDAGB4_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define regDAGB4_WR_ADDR_DAGB_MAX_BURST0 0x0248
+#define regDAGB4_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB4_WR_ADDR_DAGB_LAZY_TIMER0 0x0249
+#define regDAGB4_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB4_WR_ADDR_DAGB_MAX_BURST1 0x024a
+#define regDAGB4_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB4_WR_ADDR_DAGB_LAZY_TIMER1 0x024b
+#define regDAGB4_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB4_WR_DATA_DAGB 0x024c
+#define regDAGB4_WR_DATA_DAGB_BASE_IDX 0
+#define regDAGB4_WR_DATA_DAGB_MAX_BURST0 0x024d
+#define regDAGB4_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define regDAGB4_WR_DATA_DAGB_LAZY_TIMER0 0x024e
+#define regDAGB4_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define regDAGB4_WR_DATA_DAGB_MAX_BURST1 0x024f
+#define regDAGB4_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define regDAGB4_WR_DATA_DAGB_LAZY_TIMER1 0x0250
+#define regDAGB4_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define regDAGB4_WR_VC0_CNTL 0x0251
+#define regDAGB4_WR_VC0_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC1_CNTL 0x0252
+#define regDAGB4_WR_VC1_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC2_CNTL 0x0253
+#define regDAGB4_WR_VC2_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC3_CNTL 0x0254
+#define regDAGB4_WR_VC3_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC4_CNTL 0x0255
+#define regDAGB4_WR_VC4_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC5_CNTL 0x0256
+#define regDAGB4_WR_VC5_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC6_CNTL 0x0257
+#define regDAGB4_WR_VC6_CNTL_BASE_IDX 0
+#define regDAGB4_WR_VC7_CNTL 0x0258
+#define regDAGB4_WR_VC7_CNTL_BASE_IDX 0
+#define regDAGB4_WR_CNTL_MISC 0x0259
+#define regDAGB4_WR_CNTL_MISC_BASE_IDX 0
+#define regDAGB4_WR_TLB_CREDIT 0x025a
+#define regDAGB4_WR_TLB_CREDIT_BASE_IDX 0
+#define regDAGB4_WR_DATA_CREDIT 0x025b
+#define regDAGB4_WR_DATA_CREDIT_BASE_IDX 0
+#define regDAGB4_WR_MISC_CREDIT 0x025c
+#define regDAGB4_WR_MISC_CREDIT_BASE_IDX 0
+#define regDAGB4_WR_OSD_CREDIT_CNTL1 0x025d
+#define regDAGB4_WR_OSD_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB4_WR_OSD_CREDIT_CNTL2 0x025e
+#define regDAGB4_WR_OSD_CREDIT_CNTL2_BASE_IDX 0
+#define regDAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1 0x025f
+#define regDAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1_BASE_IDX 0
+#define regDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x0260
+#define regDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 0
+#define regDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x0261
+#define regDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 0
+#define regDAGB4_WRCLI_ASK_PENDING 0x0262
+#define regDAGB4_WRCLI_ASK_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_GO_PENDING 0x0263
+#define regDAGB4_WRCLI_GO_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_GBLSEND_PENDING 0x0264
+#define regDAGB4_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_TLB_PENDING 0x0265
+#define regDAGB4_WRCLI_TLB_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_OARB_PENDING 0x0266
+#define regDAGB4_WRCLI_OARB_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_OSD_PENDING 0x0267
+#define regDAGB4_WRCLI_OSD_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_DBUS_ASK_PENDING 0x0268
+#define regDAGB4_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define regDAGB4_WRCLI_DBUS_GO_PENDING 0x0269
+#define regDAGB4_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define regDAGB4_DAGB_DLY 0x026c
+#define regDAGB4_DAGB_DLY_BASE_IDX 0
+#define regDAGB4_CNTL_MISC 0x026d
+#define regDAGB4_CNTL_MISC_BASE_IDX 0
+#define regDAGB4_CNTL_MISC2 0x026e
+#define regDAGB4_CNTL_MISC2_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_CNTL 0x026f
+#define regDAGB4_FATAL_ERROR_CNTL_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_CLEAR 0x0270
+#define regDAGB4_FATAL_ERROR_CLEAR_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_STATUS0 0x0271
+#define regDAGB4_FATAL_ERROR_STATUS0_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_STATUS1 0x0272
+#define regDAGB4_FATAL_ERROR_STATUS1_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_STATUS2 0x0273
+#define regDAGB4_FATAL_ERROR_STATUS2_BASE_IDX 0
+#define regDAGB4_FATAL_ERROR_STATUS3 0x0274
+#define regDAGB4_FATAL_ERROR_STATUS3_BASE_IDX 0
+#define regDAGB4_FIFO_EMPTY 0x0275
+#define regDAGB4_FIFO_EMPTY_BASE_IDX 0
+#define regDAGB4_FIFO_FULL 0x0276
+#define regDAGB4_FIFO_FULL_BASE_IDX 0
+#define regDAGB4_WR_CREDITS_FULL 0x0277
+#define regDAGB4_WR_CREDITS_FULL_BASE_IDX 0
+#define regDAGB4_RD_CREDITS_FULL 0x0278
+#define regDAGB4_RD_CREDITS_FULL_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER_LO 0x0279
+#define regDAGB4_PERFCOUNTER_LO_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER_HI 0x027a
+#define regDAGB4_PERFCOUNTER_HI_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER0_CFG 0x027b
+#define regDAGB4_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER1_CFG 0x027c
+#define regDAGB4_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER2_CFG 0x027d
+#define regDAGB4_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regDAGB4_PERFCOUNTER_RSLT_CNTL 0x027e
+#define regDAGB4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regDAGB4_L1TLB_REG_RW 0x027f
+#define regDAGB4_L1TLB_REG_RW_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_ea_mmeadec0
+// base address: 0x60c00
+#define regMMEA0_DRAM_RD_CLI2GRP_MAP0 0x0300
+#define regMMEA0_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_DRAM_RD_CLI2GRP_MAP1 0x0301
+#define regMMEA0_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_DRAM_WR_CLI2GRP_MAP0 0x0302
+#define regMMEA0_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_DRAM_WR_CLI2GRP_MAP1 0x0303
+#define regMMEA0_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_DRAM_RD_GRP2VC_MAP 0x0304
+#define regMMEA0_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA0_DRAM_WR_GRP2VC_MAP 0x0305
+#define regMMEA0_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA0_DRAM_RD_LAZY 0x0306
+#define regMMEA0_DRAM_RD_LAZY_BASE_IDX 0
+#define regMMEA0_DRAM_WR_LAZY 0x0307
+#define regMMEA0_DRAM_WR_LAZY_BASE_IDX 0
+#define regMMEA0_DRAM_RD_CAM_CNTL 0x0308
+#define regMMEA0_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA0_DRAM_WR_CAM_CNTL 0x0309
+#define regMMEA0_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA0_DRAM_PAGE_BURST 0x030a
+#define regMMEA0_DRAM_PAGE_BURST_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_AGE 0x030b
+#define regMMEA0_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_AGE 0x030c
+#define regMMEA0_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_QUEUING 0x030d
+#define regMMEA0_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_QUEUING 0x030e
+#define regMMEA0_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_FIXED 0x030f
+#define regMMEA0_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_FIXED 0x0310
+#define regMMEA0_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_URGENCY 0x0311
+#define regMMEA0_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_URGENCY 0x0312
+#define regMMEA0_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI1 0x0313
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI2 0x0314
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI3 0x0315
+#define regMMEA0_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI1 0x0316
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI2 0x0317
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI3 0x0318
+#define regMMEA0_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_GMI_RD_CLI2GRP_MAP0 0x0319
+#define regMMEA0_GMI_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_GMI_RD_CLI2GRP_MAP1 0x031a
+#define regMMEA0_GMI_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_GMI_WR_CLI2GRP_MAP0 0x031b
+#define regMMEA0_GMI_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_GMI_WR_CLI2GRP_MAP1 0x031c
+#define regMMEA0_GMI_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_GMI_RD_GRP2VC_MAP 0x031d
+#define regMMEA0_GMI_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA0_GMI_WR_GRP2VC_MAP 0x031e
+#define regMMEA0_GMI_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA0_GMI_RD_LAZY 0x031f
+#define regMMEA0_GMI_RD_LAZY_BASE_IDX 0
+#define regMMEA0_GMI_WR_LAZY 0x0320
+#define regMMEA0_GMI_WR_LAZY_BASE_IDX 0
+#define regMMEA0_GMI_RD_CAM_CNTL 0x0321
+#define regMMEA0_GMI_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA0_GMI_WR_CAM_CNTL 0x0322
+#define regMMEA0_GMI_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA0_GMI_PAGE_BURST 0x0323
+#define regMMEA0_GMI_PAGE_BURST_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_AGE 0x0324
+#define regMMEA0_GMI_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_AGE 0x0325
+#define regMMEA0_GMI_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_QUEUING 0x0326
+#define regMMEA0_GMI_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_QUEUING 0x0327
+#define regMMEA0_GMI_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_FIXED 0x0328
+#define regMMEA0_GMI_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_FIXED 0x0329
+#define regMMEA0_GMI_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_URGENCY 0x032a
+#define regMMEA0_GMI_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_URGENCY 0x032b
+#define regMMEA0_GMI_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_URGENCY_MASKING 0x032c
+#define regMMEA0_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_URGENCY_MASKING 0x032d
+#define regMMEA0_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI1 0x032e
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI2 0x032f
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI3 0x0330
+#define regMMEA0_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI1 0x0331
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI2 0x0332
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI3 0x0333
+#define regMMEA0_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_IO_RD_CLI2GRP_MAP0 0x03d5
+#define regMMEA0_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_IO_RD_CLI2GRP_MAP1 0x03d6
+#define regMMEA0_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_IO_WR_CLI2GRP_MAP0 0x03d7
+#define regMMEA0_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA0_IO_WR_CLI2GRP_MAP1 0x03d8
+#define regMMEA0_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA0_IO_RD_COMBINE_FLUSH 0x03d9
+#define regMMEA0_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA0_IO_WR_COMBINE_FLUSH 0x03da
+#define regMMEA0_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA0_IO_GROUP_BURST 0x03db
+#define regMMEA0_IO_GROUP_BURST_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_AGE 0x03dc
+#define regMMEA0_IO_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_AGE 0x03dd
+#define regMMEA0_IO_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_QUEUING 0x03de
+#define regMMEA0_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_QUEUING 0x03df
+#define regMMEA0_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_FIXED 0x03e0
+#define regMMEA0_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_FIXED 0x03e1
+#define regMMEA0_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_URGENCY 0x03e2
+#define regMMEA0_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_URGENCY 0x03e3
+#define regMMEA0_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_URGENCY_MASKING 0x03e4
+#define regMMEA0_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_URGENCY_MASKING 0x03e5
+#define regMMEA0_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_QUANT_PRI1 0x03e6
+#define regMMEA0_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_QUANT_PRI2 0x03e7
+#define regMMEA0_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_IO_RD_PRI_QUANT_PRI3 0x03e8
+#define regMMEA0_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_QUANT_PRI1 0x03e9
+#define regMMEA0_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_QUANT_PRI2 0x03ea
+#define regMMEA0_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA0_IO_WR_PRI_QUANT_PRI3 0x03eb
+#define regMMEA0_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA0_SDP_ARB_DRAM 0x03ec
+#define regMMEA0_SDP_ARB_DRAM_BASE_IDX 0
+#define regMMEA0_SDP_ARB_GMI 0x03ed
+#define regMMEA0_SDP_ARB_GMI_BASE_IDX 0
+#define regMMEA0_SDP_ARB_FINAL 0x03ee
+#define regMMEA0_SDP_ARB_FINAL_BASE_IDX 0
+#define regMMEA0_SDP_DRAM_PRIORITY 0x03ef
+#define regMMEA0_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regMMEA0_SDP_GMI_PRIORITY 0x03f0
+#define regMMEA0_SDP_GMI_PRIORITY_BASE_IDX 0
+#define regMMEA0_SDP_IO_PRIORITY 0x03f1
+#define regMMEA0_SDP_IO_PRIORITY_BASE_IDX 0
+#define regMMEA0_SDP_CREDITS 0x03f2
+#define regMMEA0_SDP_CREDITS_BASE_IDX 0
+#define regMMEA0_SDP_TAG_RESERVE0 0x03f3
+#define regMMEA0_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regMMEA0_SDP_TAG_RESERVE1 0x03f4
+#define regMMEA0_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regMMEA0_SDP_VCC_RESERVE0 0x03f5
+#define regMMEA0_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regMMEA0_SDP_VCC_RESERVE1 0x03f6
+#define regMMEA0_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regMMEA0_SDP_VCD_RESERVE0 0x03f7
+#define regMMEA0_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regMMEA0_SDP_VCD_RESERVE1 0x03f8
+#define regMMEA0_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regMMEA0_SDP_REQ_CNTL 0x03f9
+#define regMMEA0_SDP_REQ_CNTL_BASE_IDX 0
+#define regMMEA0_MISC 0x03fa
+#define regMMEA0_MISC_BASE_IDX 0
+#define regMMEA0_LATENCY_SAMPLING 0x03fb
+#define regMMEA0_LATENCY_SAMPLING_BASE_IDX 0
+#define regMMEA0_PERFCOUNTER_LO 0x03fc
+#define regMMEA0_PERFCOUNTER_LO_BASE_IDX 0
+#define regMMEA0_PERFCOUNTER_HI 0x03fd
+#define regMMEA0_PERFCOUNTER_HI_BASE_IDX 0
+#define regMMEA0_PERFCOUNTER0_CFG 0x03fe
+#define regMMEA0_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMMEA0_PERFCOUNTER1_CFG 0x03ff
+#define regMMEA0_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMMEA0_PERFCOUNTER_RSLT_CNTL 0x0400
+#define regMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA0_DSM_CNTL 0x0408
+#define regMMEA0_DSM_CNTL_BASE_IDX 0
+#define regMMEA0_DSM_CNTLA 0x0409
+#define regMMEA0_DSM_CNTLA_BASE_IDX 0
+#define regMMEA0_DSM_CNTLB 0x040a
+#define regMMEA0_DSM_CNTLB_BASE_IDX 0
+#define regMMEA0_DSM_CNTL2 0x040b
+#define regMMEA0_DSM_CNTL2_BASE_IDX 0
+#define regMMEA0_DSM_CNTL2A 0x040c
+#define regMMEA0_DSM_CNTL2A_BASE_IDX 0
+#define regMMEA0_DSM_CNTL2B 0x040d
+#define regMMEA0_DSM_CNTL2B_BASE_IDX 0
+#define regMMEA0_CGTT_CLK_CTRL 0x040f
+#define regMMEA0_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMMEA0_EDC_MODE 0x0410
+#define regMMEA0_EDC_MODE_BASE_IDX 0
+#define regMMEA0_ERR_STATUS 0x0411
+#define regMMEA0_ERR_STATUS_BASE_IDX 0
+#define regMMEA0_MISC2 0x0412
+#define regMMEA0_MISC2_BASE_IDX 0
+#define regMMEA0_MISC_AON 0x0415
+#define regMMEA0_MISC_AON_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_ea_mmeadec1
+// base address: 0x61100
+#define regMMEA1_DRAM_RD_CLI2GRP_MAP0 0x0440
+#define regMMEA1_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_DRAM_RD_CLI2GRP_MAP1 0x0441
+#define regMMEA1_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_DRAM_WR_CLI2GRP_MAP0 0x0442
+#define regMMEA1_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_DRAM_WR_CLI2GRP_MAP1 0x0443
+#define regMMEA1_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_DRAM_RD_GRP2VC_MAP 0x0444
+#define regMMEA1_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA1_DRAM_WR_GRP2VC_MAP 0x0445
+#define regMMEA1_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA1_DRAM_RD_LAZY 0x0446
+#define regMMEA1_DRAM_RD_LAZY_BASE_IDX 0
+#define regMMEA1_DRAM_WR_LAZY 0x0447
+#define regMMEA1_DRAM_WR_LAZY_BASE_IDX 0
+#define regMMEA1_DRAM_RD_CAM_CNTL 0x0448
+#define regMMEA1_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA1_DRAM_WR_CAM_CNTL 0x0449
+#define regMMEA1_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA1_DRAM_PAGE_BURST 0x044a
+#define regMMEA1_DRAM_PAGE_BURST_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_AGE 0x044b
+#define regMMEA1_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_AGE 0x044c
+#define regMMEA1_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_QUEUING 0x044d
+#define regMMEA1_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_QUEUING 0x044e
+#define regMMEA1_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_FIXED 0x044f
+#define regMMEA1_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_FIXED 0x0450
+#define regMMEA1_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_URGENCY 0x0451
+#define regMMEA1_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_URGENCY 0x0452
+#define regMMEA1_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI1 0x0453
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI2 0x0454
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI3 0x0455
+#define regMMEA1_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI1 0x0456
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI2 0x0457
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI3 0x0458
+#define regMMEA1_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_GMI_RD_CLI2GRP_MAP0 0x0459
+#define regMMEA1_GMI_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_GMI_RD_CLI2GRP_MAP1 0x045a
+#define regMMEA1_GMI_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_GMI_WR_CLI2GRP_MAP0 0x045b
+#define regMMEA1_GMI_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_GMI_WR_CLI2GRP_MAP1 0x045c
+#define regMMEA1_GMI_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_GMI_RD_GRP2VC_MAP 0x045d
+#define regMMEA1_GMI_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA1_GMI_WR_GRP2VC_MAP 0x045e
+#define regMMEA1_GMI_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA1_GMI_RD_LAZY 0x045f
+#define regMMEA1_GMI_RD_LAZY_BASE_IDX 0
+#define regMMEA1_GMI_WR_LAZY 0x0460
+#define regMMEA1_GMI_WR_LAZY_BASE_IDX 0
+#define regMMEA1_GMI_RD_CAM_CNTL 0x0461
+#define regMMEA1_GMI_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA1_GMI_WR_CAM_CNTL 0x0462
+#define regMMEA1_GMI_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA1_GMI_PAGE_BURST 0x0463
+#define regMMEA1_GMI_PAGE_BURST_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_AGE 0x0464
+#define regMMEA1_GMI_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_AGE 0x0465
+#define regMMEA1_GMI_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_QUEUING 0x0466
+#define regMMEA1_GMI_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_QUEUING 0x0467
+#define regMMEA1_GMI_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_FIXED 0x0468
+#define regMMEA1_GMI_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_FIXED 0x0469
+#define regMMEA1_GMI_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_URGENCY 0x046a
+#define regMMEA1_GMI_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_URGENCY 0x046b
+#define regMMEA1_GMI_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_URGENCY_MASKING 0x046c
+#define regMMEA1_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_URGENCY_MASKING 0x046d
+#define regMMEA1_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI1 0x046e
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI2 0x046f
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI3 0x0470
+#define regMMEA1_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI1 0x0471
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI2 0x0472
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI3 0x0473
+#define regMMEA1_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_IO_RD_CLI2GRP_MAP0 0x0515
+#define regMMEA1_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_IO_RD_CLI2GRP_MAP1 0x0516
+#define regMMEA1_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_IO_WR_CLI2GRP_MAP0 0x0517
+#define regMMEA1_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA1_IO_WR_CLI2GRP_MAP1 0x0518
+#define regMMEA1_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA1_IO_RD_COMBINE_FLUSH 0x0519
+#define regMMEA1_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA1_IO_WR_COMBINE_FLUSH 0x051a
+#define regMMEA1_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA1_IO_GROUP_BURST 0x051b
+#define regMMEA1_IO_GROUP_BURST_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_AGE 0x051c
+#define regMMEA1_IO_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_AGE 0x051d
+#define regMMEA1_IO_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_QUEUING 0x051e
+#define regMMEA1_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_QUEUING 0x051f
+#define regMMEA1_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_FIXED 0x0520
+#define regMMEA1_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_FIXED 0x0521
+#define regMMEA1_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_URGENCY 0x0522
+#define regMMEA1_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_URGENCY 0x0523
+#define regMMEA1_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_URGENCY_MASKING 0x0524
+#define regMMEA1_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_URGENCY_MASKING 0x0525
+#define regMMEA1_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_QUANT_PRI1 0x0526
+#define regMMEA1_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_QUANT_PRI2 0x0527
+#define regMMEA1_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_IO_RD_PRI_QUANT_PRI3 0x0528
+#define regMMEA1_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_QUANT_PRI1 0x0529
+#define regMMEA1_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_QUANT_PRI2 0x052a
+#define regMMEA1_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA1_IO_WR_PRI_QUANT_PRI3 0x052b
+#define regMMEA1_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA1_SDP_ARB_DRAM 0x052c
+#define regMMEA1_SDP_ARB_DRAM_BASE_IDX 0
+#define regMMEA1_SDP_ARB_GMI 0x052d
+#define regMMEA1_SDP_ARB_GMI_BASE_IDX 0
+#define regMMEA1_SDP_ARB_FINAL 0x052e
+#define regMMEA1_SDP_ARB_FINAL_BASE_IDX 0
+#define regMMEA1_SDP_DRAM_PRIORITY 0x052f
+#define regMMEA1_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regMMEA1_SDP_GMI_PRIORITY 0x0530
+#define regMMEA1_SDP_GMI_PRIORITY_BASE_IDX 0
+#define regMMEA1_SDP_IO_PRIORITY 0x0531
+#define regMMEA1_SDP_IO_PRIORITY_BASE_IDX 0
+#define regMMEA1_SDP_CREDITS 0x0532
+#define regMMEA1_SDP_CREDITS_BASE_IDX 0
+#define regMMEA1_SDP_TAG_RESERVE0 0x0533
+#define regMMEA1_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regMMEA1_SDP_TAG_RESERVE1 0x0534
+#define regMMEA1_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regMMEA1_SDP_VCC_RESERVE0 0x0535
+#define regMMEA1_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regMMEA1_SDP_VCC_RESERVE1 0x0536
+#define regMMEA1_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regMMEA1_SDP_VCD_RESERVE0 0x0537
+#define regMMEA1_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regMMEA1_SDP_VCD_RESERVE1 0x0538
+#define regMMEA1_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regMMEA1_SDP_REQ_CNTL 0x0539
+#define regMMEA1_SDP_REQ_CNTL_BASE_IDX 0
+#define regMMEA1_MISC 0x053a
+#define regMMEA1_MISC_BASE_IDX 0
+#define regMMEA1_LATENCY_SAMPLING 0x053b
+#define regMMEA1_LATENCY_SAMPLING_BASE_IDX 0
+#define regMMEA1_PERFCOUNTER_LO 0x053c
+#define regMMEA1_PERFCOUNTER_LO_BASE_IDX 0
+#define regMMEA1_PERFCOUNTER_HI 0x053d
+#define regMMEA1_PERFCOUNTER_HI_BASE_IDX 0
+#define regMMEA1_PERFCOUNTER0_CFG 0x053e
+#define regMMEA1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMMEA1_PERFCOUNTER1_CFG 0x053f
+#define regMMEA1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMMEA1_PERFCOUNTER_RSLT_CNTL 0x0540
+#define regMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA1_DSM_CNTL 0x0548
+#define regMMEA1_DSM_CNTL_BASE_IDX 0
+#define regMMEA1_DSM_CNTLA 0x0549
+#define regMMEA1_DSM_CNTLA_BASE_IDX 0
+#define regMMEA1_DSM_CNTLB 0x054a
+#define regMMEA1_DSM_CNTLB_BASE_IDX 0
+#define regMMEA1_DSM_CNTL2 0x054b
+#define regMMEA1_DSM_CNTL2_BASE_IDX 0
+#define regMMEA1_DSM_CNTL2A 0x054c
+#define regMMEA1_DSM_CNTL2A_BASE_IDX 0
+#define regMMEA1_DSM_CNTL2B 0x054d
+#define regMMEA1_DSM_CNTL2B_BASE_IDX 0
+#define regMMEA1_CGTT_CLK_CTRL 0x054f
+#define regMMEA1_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMMEA1_EDC_MODE 0x0550
+#define regMMEA1_EDC_MODE_BASE_IDX 0
+#define regMMEA1_ERR_STATUS 0x0551
+#define regMMEA1_ERR_STATUS_BASE_IDX 0
+#define regMMEA1_MISC2 0x0552
+#define regMMEA1_MISC2_BASE_IDX 0
+#define regMMEA1_MISC_AON 0x0555
+#define regMMEA1_MISC_AON_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_ea_mmeadec2
+// base address: 0x61600
+#define regMMEA2_DRAM_RD_CLI2GRP_MAP0 0x0580
+#define regMMEA2_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_DRAM_RD_CLI2GRP_MAP1 0x0581
+#define regMMEA2_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_DRAM_WR_CLI2GRP_MAP0 0x0582
+#define regMMEA2_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_DRAM_WR_CLI2GRP_MAP1 0x0583
+#define regMMEA2_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_DRAM_RD_GRP2VC_MAP 0x0584
+#define regMMEA2_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA2_DRAM_WR_GRP2VC_MAP 0x0585
+#define regMMEA2_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA2_DRAM_RD_LAZY 0x0586
+#define regMMEA2_DRAM_RD_LAZY_BASE_IDX 0
+#define regMMEA2_DRAM_WR_LAZY 0x0587
+#define regMMEA2_DRAM_WR_LAZY_BASE_IDX 0
+#define regMMEA2_DRAM_RD_CAM_CNTL 0x0588
+#define regMMEA2_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA2_DRAM_WR_CAM_CNTL 0x0589
+#define regMMEA2_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA2_DRAM_PAGE_BURST 0x058a
+#define regMMEA2_DRAM_PAGE_BURST_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_AGE 0x058b
+#define regMMEA2_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_AGE 0x058c
+#define regMMEA2_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_QUEUING 0x058d
+#define regMMEA2_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_QUEUING 0x058e
+#define regMMEA2_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_FIXED 0x058f
+#define regMMEA2_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_FIXED 0x0590
+#define regMMEA2_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_URGENCY 0x0591
+#define regMMEA2_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_URGENCY 0x0592
+#define regMMEA2_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI1 0x0593
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI2 0x0594
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI3 0x0595
+#define regMMEA2_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI1 0x0596
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI2 0x0597
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI3 0x0598
+#define regMMEA2_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_GMI_RD_CLI2GRP_MAP0 0x0599
+#define regMMEA2_GMI_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_GMI_RD_CLI2GRP_MAP1 0x059a
+#define regMMEA2_GMI_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_GMI_WR_CLI2GRP_MAP0 0x059b
+#define regMMEA2_GMI_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_GMI_WR_CLI2GRP_MAP1 0x059c
+#define regMMEA2_GMI_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_GMI_RD_GRP2VC_MAP 0x059d
+#define regMMEA2_GMI_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA2_GMI_WR_GRP2VC_MAP 0x059e
+#define regMMEA2_GMI_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA2_GMI_RD_LAZY 0x059f
+#define regMMEA2_GMI_RD_LAZY_BASE_IDX 0
+#define regMMEA2_GMI_WR_LAZY 0x05a0
+#define regMMEA2_GMI_WR_LAZY_BASE_IDX 0
+#define regMMEA2_GMI_RD_CAM_CNTL 0x05a1
+#define regMMEA2_GMI_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA2_GMI_WR_CAM_CNTL 0x05a2
+#define regMMEA2_GMI_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA2_GMI_PAGE_BURST 0x05a3
+#define regMMEA2_GMI_PAGE_BURST_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_AGE 0x05a4
+#define regMMEA2_GMI_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_AGE 0x05a5
+#define regMMEA2_GMI_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_QUEUING 0x05a6
+#define regMMEA2_GMI_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_QUEUING 0x05a7
+#define regMMEA2_GMI_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_FIXED 0x05a8
+#define regMMEA2_GMI_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_FIXED 0x05a9
+#define regMMEA2_GMI_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_URGENCY 0x05aa
+#define regMMEA2_GMI_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_URGENCY 0x05ab
+#define regMMEA2_GMI_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_URGENCY_MASKING 0x05ac
+#define regMMEA2_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_URGENCY_MASKING 0x05ad
+#define regMMEA2_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI1 0x05ae
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI2 0x05af
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI3 0x05b0
+#define regMMEA2_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI1 0x05b1
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI2 0x05b2
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI3 0x05b3
+#define regMMEA2_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_IO_RD_CLI2GRP_MAP0 0x0655
+#define regMMEA2_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_IO_RD_CLI2GRP_MAP1 0x0656
+#define regMMEA2_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_IO_WR_CLI2GRP_MAP0 0x0657
+#define regMMEA2_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA2_IO_WR_CLI2GRP_MAP1 0x0658
+#define regMMEA2_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA2_IO_RD_COMBINE_FLUSH 0x0659
+#define regMMEA2_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA2_IO_WR_COMBINE_FLUSH 0x065a
+#define regMMEA2_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA2_IO_GROUP_BURST 0x065b
+#define regMMEA2_IO_GROUP_BURST_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_AGE 0x065c
+#define regMMEA2_IO_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_AGE 0x065d
+#define regMMEA2_IO_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_QUEUING 0x065e
+#define regMMEA2_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_QUEUING 0x065f
+#define regMMEA2_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_FIXED 0x0660
+#define regMMEA2_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_FIXED 0x0661
+#define regMMEA2_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_URGENCY 0x0662
+#define regMMEA2_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_URGENCY 0x0663
+#define regMMEA2_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_URGENCY_MASKING 0x0664
+#define regMMEA2_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_URGENCY_MASKING 0x0665
+#define regMMEA2_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_QUANT_PRI1 0x0666
+#define regMMEA2_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_QUANT_PRI2 0x0667
+#define regMMEA2_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_IO_RD_PRI_QUANT_PRI3 0x0668
+#define regMMEA2_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_QUANT_PRI1 0x0669
+#define regMMEA2_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_QUANT_PRI2 0x066a
+#define regMMEA2_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA2_IO_WR_PRI_QUANT_PRI3 0x066b
+#define regMMEA2_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA2_SDP_ARB_DRAM 0x066c
+#define regMMEA2_SDP_ARB_DRAM_BASE_IDX 0
+#define regMMEA2_SDP_ARB_GMI 0x066d
+#define regMMEA2_SDP_ARB_GMI_BASE_IDX 0
+#define regMMEA2_SDP_ARB_FINAL 0x066e
+#define regMMEA2_SDP_ARB_FINAL_BASE_IDX 0
+#define regMMEA2_SDP_DRAM_PRIORITY 0x066f
+#define regMMEA2_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regMMEA2_SDP_GMI_PRIORITY 0x0670
+#define regMMEA2_SDP_GMI_PRIORITY_BASE_IDX 0
+#define regMMEA2_SDP_IO_PRIORITY 0x0671
+#define regMMEA2_SDP_IO_PRIORITY_BASE_IDX 0
+#define regMMEA2_SDP_CREDITS 0x0672
+#define regMMEA2_SDP_CREDITS_BASE_IDX 0
+#define regMMEA2_SDP_TAG_RESERVE0 0x0673
+#define regMMEA2_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regMMEA2_SDP_TAG_RESERVE1 0x0674
+#define regMMEA2_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regMMEA2_SDP_VCC_RESERVE0 0x0675
+#define regMMEA2_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regMMEA2_SDP_VCC_RESERVE1 0x0676
+#define regMMEA2_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regMMEA2_SDP_VCD_RESERVE0 0x0677
+#define regMMEA2_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regMMEA2_SDP_VCD_RESERVE1 0x0678
+#define regMMEA2_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regMMEA2_SDP_REQ_CNTL 0x0679
+#define regMMEA2_SDP_REQ_CNTL_BASE_IDX 0
+#define regMMEA2_MISC 0x067a
+#define regMMEA2_MISC_BASE_IDX 0
+#define regMMEA2_LATENCY_SAMPLING 0x067b
+#define regMMEA2_LATENCY_SAMPLING_BASE_IDX 0
+#define regMMEA2_PERFCOUNTER_LO 0x067c
+#define regMMEA2_PERFCOUNTER_LO_BASE_IDX 0
+#define regMMEA2_PERFCOUNTER_HI 0x067d
+#define regMMEA2_PERFCOUNTER_HI_BASE_IDX 0
+#define regMMEA2_PERFCOUNTER0_CFG 0x067e
+#define regMMEA2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMMEA2_PERFCOUNTER1_CFG 0x067f
+#define regMMEA2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMMEA2_PERFCOUNTER_RSLT_CNTL 0x0680
+#define regMMEA2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA2_DSM_CNTL 0x0688
+#define regMMEA2_DSM_CNTL_BASE_IDX 0
+#define regMMEA2_DSM_CNTLA 0x0689
+#define regMMEA2_DSM_CNTLA_BASE_IDX 0
+#define regMMEA2_DSM_CNTLB 0x068a
+#define regMMEA2_DSM_CNTLB_BASE_IDX 0
+#define regMMEA2_DSM_CNTL2 0x068b
+#define regMMEA2_DSM_CNTL2_BASE_IDX 0
+#define regMMEA2_DSM_CNTL2A 0x068c
+#define regMMEA2_DSM_CNTL2A_BASE_IDX 0
+#define regMMEA2_DSM_CNTL2B 0x068d
+#define regMMEA2_DSM_CNTL2B_BASE_IDX 0
+#define regMMEA2_CGTT_CLK_CTRL 0x068f
+#define regMMEA2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMMEA2_EDC_MODE 0x0690
+#define regMMEA2_EDC_MODE_BASE_IDX 0
+#define regMMEA2_ERR_STATUS 0x0691
+#define regMMEA2_ERR_STATUS_BASE_IDX 0
+#define regMMEA2_MISC2 0x0692
+#define regMMEA2_MISC2_BASE_IDX 0
+#define regMMEA2_MISC_AON 0x0695
+#define regMMEA2_MISC_AON_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_ea_mmeadec3
+// base address: 0x61b00
+#define regMMEA3_DRAM_RD_CLI2GRP_MAP0 0x06c0
+#define regMMEA3_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_DRAM_RD_CLI2GRP_MAP1 0x06c1
+#define regMMEA3_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_DRAM_WR_CLI2GRP_MAP0 0x06c2
+#define regMMEA3_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_DRAM_WR_CLI2GRP_MAP1 0x06c3
+#define regMMEA3_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_DRAM_RD_GRP2VC_MAP 0x06c4
+#define regMMEA3_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA3_DRAM_WR_GRP2VC_MAP 0x06c5
+#define regMMEA3_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA3_DRAM_RD_LAZY 0x06c6
+#define regMMEA3_DRAM_RD_LAZY_BASE_IDX 0
+#define regMMEA3_DRAM_WR_LAZY 0x06c7
+#define regMMEA3_DRAM_WR_LAZY_BASE_IDX 0
+#define regMMEA3_DRAM_RD_CAM_CNTL 0x06c8
+#define regMMEA3_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA3_DRAM_WR_CAM_CNTL 0x06c9
+#define regMMEA3_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA3_DRAM_PAGE_BURST 0x06ca
+#define regMMEA3_DRAM_PAGE_BURST_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_AGE 0x06cb
+#define regMMEA3_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_AGE 0x06cc
+#define regMMEA3_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_QUEUING 0x06cd
+#define regMMEA3_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_QUEUING 0x06ce
+#define regMMEA3_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_FIXED 0x06cf
+#define regMMEA3_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_FIXED 0x06d0
+#define regMMEA3_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_URGENCY 0x06d1
+#define regMMEA3_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_URGENCY 0x06d2
+#define regMMEA3_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI1 0x06d3
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI2 0x06d4
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI3 0x06d5
+#define regMMEA3_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI1 0x06d6
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI2 0x06d7
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI3 0x06d8
+#define regMMEA3_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_GMI_RD_CLI2GRP_MAP0 0x06d9
+#define regMMEA3_GMI_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_GMI_RD_CLI2GRP_MAP1 0x06da
+#define regMMEA3_GMI_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_GMI_WR_CLI2GRP_MAP0 0x06db
+#define regMMEA3_GMI_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_GMI_WR_CLI2GRP_MAP1 0x06dc
+#define regMMEA3_GMI_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_GMI_RD_GRP2VC_MAP 0x06dd
+#define regMMEA3_GMI_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA3_GMI_WR_GRP2VC_MAP 0x06de
+#define regMMEA3_GMI_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA3_GMI_RD_LAZY 0x06df
+#define regMMEA3_GMI_RD_LAZY_BASE_IDX 0
+#define regMMEA3_GMI_WR_LAZY 0x06e0
+#define regMMEA3_GMI_WR_LAZY_BASE_IDX 0
+#define regMMEA3_GMI_RD_CAM_CNTL 0x06e1
+#define regMMEA3_GMI_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA3_GMI_WR_CAM_CNTL 0x06e2
+#define regMMEA3_GMI_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA3_GMI_PAGE_BURST 0x06e3
+#define regMMEA3_GMI_PAGE_BURST_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_AGE 0x06e4
+#define regMMEA3_GMI_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_AGE 0x06e5
+#define regMMEA3_GMI_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_QUEUING 0x06e6
+#define regMMEA3_GMI_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_QUEUING 0x06e7
+#define regMMEA3_GMI_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_FIXED 0x06e8
+#define regMMEA3_GMI_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_FIXED 0x06e9
+#define regMMEA3_GMI_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_URGENCY 0x06ea
+#define regMMEA3_GMI_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_URGENCY 0x06eb
+#define regMMEA3_GMI_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_URGENCY_MASKING 0x06ec
+#define regMMEA3_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_URGENCY_MASKING 0x06ed
+#define regMMEA3_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI1 0x06ee
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI2 0x06ef
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI3 0x06f0
+#define regMMEA3_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI1 0x06f1
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI2 0x06f2
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI3 0x06f3
+#define regMMEA3_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_IO_RD_CLI2GRP_MAP0 0x0795
+#define regMMEA3_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_IO_RD_CLI2GRP_MAP1 0x0796
+#define regMMEA3_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_IO_WR_CLI2GRP_MAP0 0x0797
+#define regMMEA3_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA3_IO_WR_CLI2GRP_MAP1 0x0798
+#define regMMEA3_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA3_IO_RD_COMBINE_FLUSH 0x0799
+#define regMMEA3_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA3_IO_WR_COMBINE_FLUSH 0x079a
+#define regMMEA3_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA3_IO_GROUP_BURST 0x079b
+#define regMMEA3_IO_GROUP_BURST_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_AGE 0x079c
+#define regMMEA3_IO_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_AGE 0x079d
+#define regMMEA3_IO_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_QUEUING 0x079e
+#define regMMEA3_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_QUEUING 0x079f
+#define regMMEA3_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_FIXED 0x07a0
+#define regMMEA3_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_FIXED 0x07a1
+#define regMMEA3_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_URGENCY 0x07a2
+#define regMMEA3_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_URGENCY 0x07a3
+#define regMMEA3_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_URGENCY_MASKING 0x07a4
+#define regMMEA3_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_URGENCY_MASKING 0x07a5
+#define regMMEA3_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_QUANT_PRI1 0x07a6
+#define regMMEA3_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_QUANT_PRI2 0x07a7
+#define regMMEA3_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_IO_RD_PRI_QUANT_PRI3 0x07a8
+#define regMMEA3_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_QUANT_PRI1 0x07a9
+#define regMMEA3_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_QUANT_PRI2 0x07aa
+#define regMMEA3_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA3_IO_WR_PRI_QUANT_PRI3 0x07ab
+#define regMMEA3_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA3_SDP_ARB_DRAM 0x07ac
+#define regMMEA3_SDP_ARB_DRAM_BASE_IDX 0
+#define regMMEA3_SDP_ARB_GMI 0x07ad
+#define regMMEA3_SDP_ARB_GMI_BASE_IDX 0
+#define regMMEA3_SDP_ARB_FINAL 0x07ae
+#define regMMEA3_SDP_ARB_FINAL_BASE_IDX 0
+#define regMMEA3_SDP_DRAM_PRIORITY 0x07af
+#define regMMEA3_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regMMEA3_SDP_GMI_PRIORITY 0x07b0
+#define regMMEA3_SDP_GMI_PRIORITY_BASE_IDX 0
+#define regMMEA3_SDP_IO_PRIORITY 0x07b1
+#define regMMEA3_SDP_IO_PRIORITY_BASE_IDX 0
+#define regMMEA3_SDP_CREDITS 0x07b2
+#define regMMEA3_SDP_CREDITS_BASE_IDX 0
+#define regMMEA3_SDP_TAG_RESERVE0 0x07b3
+#define regMMEA3_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regMMEA3_SDP_TAG_RESERVE1 0x07b4
+#define regMMEA3_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regMMEA3_SDP_VCC_RESERVE0 0x07b5
+#define regMMEA3_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regMMEA3_SDP_VCC_RESERVE1 0x07b6
+#define regMMEA3_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regMMEA3_SDP_VCD_RESERVE0 0x07b7
+#define regMMEA3_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regMMEA3_SDP_VCD_RESERVE1 0x07b8
+#define regMMEA3_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regMMEA3_SDP_REQ_CNTL 0x07b9
+#define regMMEA3_SDP_REQ_CNTL_BASE_IDX 0
+#define regMMEA3_MISC 0x07ba
+#define regMMEA3_MISC_BASE_IDX 0
+#define regMMEA3_LATENCY_SAMPLING 0x07bb
+#define regMMEA3_LATENCY_SAMPLING_BASE_IDX 0
+#define regMMEA3_PERFCOUNTER_LO 0x07bc
+#define regMMEA3_PERFCOUNTER_LO_BASE_IDX 0
+#define regMMEA3_PERFCOUNTER_HI 0x07bd
+#define regMMEA3_PERFCOUNTER_HI_BASE_IDX 0
+#define regMMEA3_PERFCOUNTER0_CFG 0x07be
+#define regMMEA3_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMMEA3_PERFCOUNTER1_CFG 0x07bf
+#define regMMEA3_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMMEA3_PERFCOUNTER_RSLT_CNTL 0x07c0
+#define regMMEA3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA3_DSM_CNTL 0x07c8
+#define regMMEA3_DSM_CNTL_BASE_IDX 0
+#define regMMEA3_DSM_CNTLA 0x07c9
+#define regMMEA3_DSM_CNTLA_BASE_IDX 0
+#define regMMEA3_DSM_CNTLB 0x07ca
+#define regMMEA3_DSM_CNTLB_BASE_IDX 0
+#define regMMEA3_DSM_CNTL2 0x07cb
+#define regMMEA3_DSM_CNTL2_BASE_IDX 0
+#define regMMEA3_DSM_CNTL2A 0x07cc
+#define regMMEA3_DSM_CNTL2A_BASE_IDX 0
+#define regMMEA3_DSM_CNTL2B 0x07cd
+#define regMMEA3_DSM_CNTL2B_BASE_IDX 0
+#define regMMEA3_CGTT_CLK_CTRL 0x07cf
+#define regMMEA3_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMMEA3_EDC_MODE 0x07d0
+#define regMMEA3_EDC_MODE_BASE_IDX 0
+#define regMMEA3_ERR_STATUS 0x07d1
+#define regMMEA3_ERR_STATUS_BASE_IDX 0
+#define regMMEA3_MISC2 0x07d2
+#define regMMEA3_MISC2_BASE_IDX 0
+#define regMMEA3_MISC_AON 0x07d5
+#define regMMEA3_MISC_AON_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_ea_mmeadec4
+// base address: 0x62000
+#define regMMEA4_DRAM_RD_CLI2GRP_MAP0 0x0800
+#define regMMEA4_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_DRAM_RD_CLI2GRP_MAP1 0x0801
+#define regMMEA4_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_DRAM_WR_CLI2GRP_MAP0 0x0802
+#define regMMEA4_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_DRAM_WR_CLI2GRP_MAP1 0x0803
+#define regMMEA4_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_DRAM_RD_GRP2VC_MAP 0x0804
+#define regMMEA4_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA4_DRAM_WR_GRP2VC_MAP 0x0805
+#define regMMEA4_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA4_DRAM_RD_LAZY 0x0806
+#define regMMEA4_DRAM_RD_LAZY_BASE_IDX 0
+#define regMMEA4_DRAM_WR_LAZY 0x0807
+#define regMMEA4_DRAM_WR_LAZY_BASE_IDX 0
+#define regMMEA4_DRAM_RD_CAM_CNTL 0x0808
+#define regMMEA4_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA4_DRAM_WR_CAM_CNTL 0x0809
+#define regMMEA4_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA4_DRAM_PAGE_BURST 0x080a
+#define regMMEA4_DRAM_PAGE_BURST_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_AGE 0x080b
+#define regMMEA4_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_AGE 0x080c
+#define regMMEA4_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_QUEUING 0x080d
+#define regMMEA4_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_QUEUING 0x080e
+#define regMMEA4_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_FIXED 0x080f
+#define regMMEA4_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_FIXED 0x0810
+#define regMMEA4_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_URGENCY 0x0811
+#define regMMEA4_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_URGENCY 0x0812
+#define regMMEA4_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI1 0x0813
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI2 0x0814
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI3 0x0815
+#define regMMEA4_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI1 0x0816
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI2 0x0817
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI3 0x0818
+#define regMMEA4_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_GMI_RD_CLI2GRP_MAP0 0x0819
+#define regMMEA4_GMI_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_GMI_RD_CLI2GRP_MAP1 0x081a
+#define regMMEA4_GMI_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_GMI_WR_CLI2GRP_MAP0 0x081b
+#define regMMEA4_GMI_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_GMI_WR_CLI2GRP_MAP1 0x081c
+#define regMMEA4_GMI_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_GMI_RD_GRP2VC_MAP 0x081d
+#define regMMEA4_GMI_RD_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA4_GMI_WR_GRP2VC_MAP 0x081e
+#define regMMEA4_GMI_WR_GRP2VC_MAP_BASE_IDX 0
+#define regMMEA4_GMI_RD_LAZY 0x081f
+#define regMMEA4_GMI_RD_LAZY_BASE_IDX 0
+#define regMMEA4_GMI_WR_LAZY 0x0820
+#define regMMEA4_GMI_WR_LAZY_BASE_IDX 0
+#define regMMEA4_GMI_RD_CAM_CNTL 0x0821
+#define regMMEA4_GMI_RD_CAM_CNTL_BASE_IDX 0
+#define regMMEA4_GMI_WR_CAM_CNTL 0x0822
+#define regMMEA4_GMI_WR_CAM_CNTL_BASE_IDX 0
+#define regMMEA4_GMI_PAGE_BURST 0x0823
+#define regMMEA4_GMI_PAGE_BURST_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_AGE 0x0824
+#define regMMEA4_GMI_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_AGE 0x0825
+#define regMMEA4_GMI_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_QUEUING 0x0826
+#define regMMEA4_GMI_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_QUEUING 0x0827
+#define regMMEA4_GMI_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_FIXED 0x0828
+#define regMMEA4_GMI_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_FIXED 0x0829
+#define regMMEA4_GMI_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_URGENCY 0x082a
+#define regMMEA4_GMI_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_URGENCY 0x082b
+#define regMMEA4_GMI_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_URGENCY_MASKING 0x082c
+#define regMMEA4_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_URGENCY_MASKING 0x082d
+#define regMMEA4_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI1 0x082e
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI2 0x082f
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI3 0x0830
+#define regMMEA4_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI1 0x0831
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI2 0x0832
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI3 0x0833
+#define regMMEA4_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_IO_RD_CLI2GRP_MAP0 0x08d5
+#define regMMEA4_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_IO_RD_CLI2GRP_MAP1 0x08d6
+#define regMMEA4_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_IO_WR_CLI2GRP_MAP0 0x08d7
+#define regMMEA4_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regMMEA4_IO_WR_CLI2GRP_MAP1 0x08d8
+#define regMMEA4_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regMMEA4_IO_RD_COMBINE_FLUSH 0x08d9
+#define regMMEA4_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA4_IO_WR_COMBINE_FLUSH 0x08da
+#define regMMEA4_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regMMEA4_IO_GROUP_BURST 0x08db
+#define regMMEA4_IO_GROUP_BURST_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_AGE 0x08dc
+#define regMMEA4_IO_RD_PRI_AGE_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_AGE 0x08dd
+#define regMMEA4_IO_WR_PRI_AGE_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_QUEUING 0x08de
+#define regMMEA4_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_QUEUING 0x08df
+#define regMMEA4_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_FIXED 0x08e0
+#define regMMEA4_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_FIXED 0x08e1
+#define regMMEA4_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_URGENCY 0x08e2
+#define regMMEA4_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_URGENCY 0x08e3
+#define regMMEA4_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_URGENCY_MASKING 0x08e4
+#define regMMEA4_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_URGENCY_MASKING 0x08e5
+#define regMMEA4_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_QUANT_PRI1 0x08e6
+#define regMMEA4_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_QUANT_PRI2 0x08e7
+#define regMMEA4_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_IO_RD_PRI_QUANT_PRI3 0x08e8
+#define regMMEA4_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_QUANT_PRI1 0x08e9
+#define regMMEA4_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_QUANT_PRI2 0x08ea
+#define regMMEA4_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regMMEA4_IO_WR_PRI_QUANT_PRI3 0x08eb
+#define regMMEA4_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regMMEA4_SDP_ARB_DRAM 0x08ec
+#define regMMEA4_SDP_ARB_DRAM_BASE_IDX 0
+#define regMMEA4_SDP_ARB_GMI 0x08ed
+#define regMMEA4_SDP_ARB_GMI_BASE_IDX 0
+#define regMMEA4_SDP_ARB_FINAL 0x08ee
+#define regMMEA4_SDP_ARB_FINAL_BASE_IDX 0
+#define regMMEA4_SDP_DRAM_PRIORITY 0x08ef
+#define regMMEA4_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regMMEA4_SDP_GMI_PRIORITY 0x08f0
+#define regMMEA4_SDP_GMI_PRIORITY_BASE_IDX 0
+#define regMMEA4_SDP_IO_PRIORITY 0x08f1
+#define regMMEA4_SDP_IO_PRIORITY_BASE_IDX 0
+#define regMMEA4_SDP_CREDITS 0x08f2
+#define regMMEA4_SDP_CREDITS_BASE_IDX 0
+#define regMMEA4_SDP_TAG_RESERVE0 0x08f3
+#define regMMEA4_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regMMEA4_SDP_TAG_RESERVE1 0x08f4
+#define regMMEA4_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regMMEA4_SDP_VCC_RESERVE0 0x08f5
+#define regMMEA4_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regMMEA4_SDP_VCC_RESERVE1 0x08f6
+#define regMMEA4_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regMMEA4_SDP_VCD_RESERVE0 0x08f7
+#define regMMEA4_SDP_VCD_RESERVE0_BASE_IDX 0
+#define regMMEA4_SDP_VCD_RESERVE1 0x08f8
+#define regMMEA4_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regMMEA4_SDP_REQ_CNTL 0x08f9
+#define regMMEA4_SDP_REQ_CNTL_BASE_IDX 0
+#define regMMEA4_MISC 0x08fa
+#define regMMEA4_MISC_BASE_IDX 0
+#define regMMEA4_LATENCY_SAMPLING 0x08fb
+#define regMMEA4_LATENCY_SAMPLING_BASE_IDX 0
+#define regMMEA4_PERFCOUNTER_LO 0x08fc
+#define regMMEA4_PERFCOUNTER_LO_BASE_IDX 0
+#define regMMEA4_PERFCOUNTER_HI 0x08fd
+#define regMMEA4_PERFCOUNTER_HI_BASE_IDX 0
+#define regMMEA4_PERFCOUNTER0_CFG 0x08fe
+#define regMMEA4_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMMEA4_PERFCOUNTER1_CFG 0x08ff
+#define regMMEA4_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMMEA4_PERFCOUNTER_RSLT_CNTL 0x0900
+#define regMMEA4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA4_DSM_CNTL 0x0908
+#define regMMEA4_DSM_CNTL_BASE_IDX 0
+#define regMMEA4_DSM_CNTLA 0x0909
+#define regMMEA4_DSM_CNTLA_BASE_IDX 0
+#define regMMEA4_DSM_CNTLB 0x090a
+#define regMMEA4_DSM_CNTLB_BASE_IDX 0
+#define regMMEA4_DSM_CNTL2 0x090b
+#define regMMEA4_DSM_CNTL2_BASE_IDX 0
+#define regMMEA4_DSM_CNTL2A 0x090c
+#define regMMEA4_DSM_CNTL2A_BASE_IDX 0
+#define regMMEA4_DSM_CNTL2B 0x090d
+#define regMMEA4_DSM_CNTL2B_BASE_IDX 0
+#define regMMEA4_CGTT_CLK_CTRL 0x090f
+#define regMMEA4_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMMEA4_EDC_MODE 0x0910
+#define regMMEA4_EDC_MODE_BASE_IDX 0
+#define regMMEA4_ERR_STATUS 0x0911
+#define regMMEA4_ERR_STATUS_BASE_IDX 0
+#define regMMEA4_MISC2 0x0912
+#define regMMEA4_MISC2_BASE_IDX 0
+#define regMMEA4_MISC_AON 0x0915
+#define regMMEA4_MISC_AON_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_pctldec0
+// base address: 0x62a00
+#define regPCTL0_CTRL 0x0a80
+#define regPCTL0_CTRL_BASE_IDX 0
+#define regPCTL0_MMHUB_DEEPSLEEP_IB 0x0a81
+#define regPCTL0_MMHUB_DEEPSLEEP_IB_BASE_IDX 0
+#define regPCTL0_MMHUB_DEEPSLEEP_OVERRIDE 0x0a82
+#define regPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 0
+#define regPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB 0x0a83
+#define regPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB_BASE_IDX 0
+#define regPCTL0_PG_IGNORE_DEEPSLEEP 0x0a84
+#define regPCTL0_PG_IGNORE_DEEPSLEEP_BASE_IDX 0
+#define regPCTL0_PG_IGNORE_DEEPSLEEP_IB 0x0a85
+#define regPCTL0_PG_IGNORE_DEEPSLEEP_IB_BASE_IDX 0
+#define regPCTL0_SLICE0_CFG_DAGB_BUSY 0x0a86
+#define regPCTL0_SLICE0_CFG_DAGB_BUSY_BASE_IDX 0
+#define regPCTL0_SLICE0_CFG_DS_ALLOW 0x0a87
+#define regPCTL0_SLICE0_CFG_DS_ALLOW_BASE_IDX 0
+#define regPCTL0_SLICE0_CFG_DS_ALLOW_IB 0x0a88
+#define regPCTL0_SLICE0_CFG_DS_ALLOW_IB_BASE_IDX 0
+#define regPCTL0_SLICE1_CFG_DAGB_BUSY 0x0a89
+#define regPCTL0_SLICE1_CFG_DAGB_BUSY_BASE_IDX 0
+#define regPCTL0_SLICE1_CFG_DS_ALLOW 0x0a8a
+#define regPCTL0_SLICE1_CFG_DS_ALLOW_BASE_IDX 0
+#define regPCTL0_SLICE1_CFG_DS_ALLOW_IB 0x0a8b
+#define regPCTL0_SLICE1_CFG_DS_ALLOW_IB_BASE_IDX 0
+#define regPCTL0_SLICE2_CFG_DAGB_BUSY 0x0a8c
+#define regPCTL0_SLICE2_CFG_DAGB_BUSY_BASE_IDX 0
+#define regPCTL0_SLICE2_CFG_DS_ALLOW 0x0a8d
+#define regPCTL0_SLICE2_CFG_DS_ALLOW_BASE_IDX 0
+#define regPCTL0_SLICE2_CFG_DS_ALLOW_IB 0x0a8e
+#define regPCTL0_SLICE2_CFG_DS_ALLOW_IB_BASE_IDX 0
+#define regPCTL0_SLICE3_CFG_DAGB_BUSY 0x0a8f
+#define regPCTL0_SLICE3_CFG_DAGB_BUSY_BASE_IDX 0
+#define regPCTL0_SLICE3_CFG_DS_ALLOW 0x0a90
+#define regPCTL0_SLICE3_CFG_DS_ALLOW_BASE_IDX 0
+#define regPCTL0_SLICE3_CFG_DS_ALLOW_IB 0x0a91
+#define regPCTL0_SLICE3_CFG_DS_ALLOW_IB_BASE_IDX 0
+#define regPCTL0_SLICE4_CFG_DAGB_BUSY 0x0a92
+#define regPCTL0_SLICE4_CFG_DAGB_BUSY_BASE_IDX 0
+#define regPCTL0_SLICE4_CFG_DS_ALLOW 0x0a93
+#define regPCTL0_SLICE4_CFG_DS_ALLOW_BASE_IDX 0
+#define regPCTL0_SLICE4_CFG_DS_ALLOW_IB 0x0a94
+#define regPCTL0_SLICE4_CFG_DS_ALLOW_IB_BASE_IDX 0
+#define regPCTL0_UTCL2_MISC 0x0a95
+#define regPCTL0_UTCL2_MISC_BASE_IDX 0
+#define regPCTL0_SLICE0_MISC 0x0a96
+#define regPCTL0_SLICE0_MISC_BASE_IDX 0
+#define regPCTL0_SLICE1_MISC 0x0a97
+#define regPCTL0_SLICE1_MISC_BASE_IDX 0
+#define regPCTL0_SLICE2_MISC 0x0a98
+#define regPCTL0_SLICE2_MISC_BASE_IDX 0
+#define regPCTL0_SLICE3_MISC 0x0a99
+#define regPCTL0_SLICE3_MISC_BASE_IDX 0
+#define regPCTL0_SLICE4_MISC 0x0a9a
+#define regPCTL0_SLICE4_MISC_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1dec
+// base address: 0x62c00
+#define regMC_VM_MX_L1_TLB0_STATUS 0x0b08
+#define regMC_VM_MX_L1_TLB0_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB1_STATUS 0x0b09
+#define regMC_VM_MX_L1_TLB1_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB2_STATUS 0x0b0a
+#define regMC_VM_MX_L1_TLB2_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB3_STATUS 0x0b0b
+#define regMC_VM_MX_L1_TLB3_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB4_STATUS 0x0b0c
+#define regMC_VM_MX_L1_TLB4_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB5_STATUS 0x0b0d
+#define regMC_VM_MX_L1_TLB5_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB6_STATUS 0x0b0e
+#define regMC_VM_MX_L1_TLB6_STATUS_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB7_STATUS 0x0b0f
+#define regMC_VM_MX_L1_TLB7_STATUS_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1pldec
+// base address: 0x62c80
+#define regMC_VM_MX_L1_PERFCOUNTER0_CFG 0x0b20
+#define regMC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMC_VM_MX_L1_PERFCOUNTER1_CFG 0x0b21
+#define regMC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMC_VM_MX_L1_PERFCOUNTER2_CFG 0x0b22
+#define regMC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regMC_VM_MX_L1_PERFCOUNTER3_CFG 0x0b23
+#define regMC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x0b24
+#define regMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1prdec
+// base address: 0x62cc0
+#define regMC_VM_MX_L1_PERFCOUNTER_LO 0x0b30
+#define regMC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 0
+#define regMC_VM_MX_L1_PERFCOUNTER_HI 0x0b31
+#define regMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2dec
+// base address: 0x62d00
+#define regATC_L2_CNTL 0x0b40
+#define regATC_L2_CNTL_BASE_IDX 0
+#define regATC_L2_CNTL2 0x0b41
+#define regATC_L2_CNTL2_BASE_IDX 0
+#define regATC_L2_CACHE_DATA0 0x0b44
+#define regATC_L2_CACHE_DATA0_BASE_IDX 0
+#define regATC_L2_CACHE_DATA1 0x0b45
+#define regATC_L2_CACHE_DATA1_BASE_IDX 0
+#define regATC_L2_CACHE_DATA2 0x0b46
+#define regATC_L2_CACHE_DATA2_BASE_IDX 0
+#define regATC_L2_CACHE_DATA3 0x0b47
+#define regATC_L2_CACHE_DATA3_BASE_IDX 0
+#define regATC_L2_CNTL3 0x0b48
+#define regATC_L2_CNTL3_BASE_IDX 0
+#define regATC_L2_STATUS 0x0b49
+#define regATC_L2_STATUS_BASE_IDX 0
+#define regATC_L2_STATUS2 0x0b4a
+#define regATC_L2_STATUS2_BASE_IDX 0
+#define regATC_L2_MISC_CG 0x0b4b
+#define regATC_L2_MISC_CG_BASE_IDX 0
+#define regATC_L2_MEM_POWER_LS 0x0b4c
+#define regATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define regATC_L2_CGTT_CLK_CTRL 0x0b4d
+#define regATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regATC_L2_CACHE_4K_DSM_INDEX 0x0b4f
+#define regATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_32K_DSM_INDEX 0x0b50
+#define regATC_L2_CACHE_32K_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_2M_DSM_INDEX 0x0b51
+#define regATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0
+#define regATC_L2_CACHE_4K_DSM_CNTL 0x0b52
+#define regATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CACHE_32K_DSM_CNTL 0x0b53
+#define regATC_L2_CACHE_32K_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CACHE_2M_DSM_CNTL 0x0b54
+#define regATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0
+#define regATC_L2_CNTL4 0x0b55
+#define regATC_L2_CNTL4_BASE_IDX 0
+#define regATC_L2_MM_GROUP_RT_CLASSES 0x0b56
+#define regATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vml2pfdec
+// base address: 0x62d80
+#define regVM_L2_CNTL 0x0b60
+#define regVM_L2_CNTL_BASE_IDX 0
+#define regVM_L2_CNTL2 0x0b61
+#define regVM_L2_CNTL2_BASE_IDX 0
+#define regVM_L2_CNTL3 0x0b62
+#define regVM_L2_CNTL3_BASE_IDX 0
+#define regVM_L2_STATUS 0x0b63
+#define regVM_L2_STATUS_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_CNTL 0x0b64
+#define regVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x0b65
+#define regVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x0b66
+#define regVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_CNTL 0x0b67
+#define regVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_CNTL2 0x0b68
+#define regVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL3 0x0b69
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL4 0x0b6a
+#define regVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_STATUS 0x0b6b
+#define regVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_ADDR_LO32 0x0b6c
+#define regVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_ADDR_HI32 0x0b6d
+#define regVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x0b6e
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x0b6f
+#define regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x0b71
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x0b72
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x0b73
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x0b74
+#define regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x0b75
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x0b76
+#define regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define regVM_L2_CNTL4 0x0b77
+#define regVM_L2_CNTL4_BASE_IDX 0
+#define regVM_L2_CNTL5 0x0b78
+#define regVM_L2_CNTL5_BASE_IDX 0
+#define regVM_L2_MM_GROUP_RT_CLASSES 0x0b79
+#define regVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regVM_L2_BANK_SELECT_RESERVED_CID 0x0b7a
+#define regVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define regVM_L2_BANK_SELECT_RESERVED_CID2 0x0b7b
+#define regVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define regVM_L2_CACHE_PARITY_CNTL 0x0b7c
+#define regVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define regVM_L2_CGTT_CLK_CTRL 0x0b7d
+#define regVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regVM_L2_CGTT_BUSY_CTRL 0x0b7e
+#define regVM_L2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regVML2_MEM_ECC_INDEX 0x0b82
+#define regVML2_MEM_ECC_INDEX_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_INDEX 0x0b83
+#define regVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define regUTCL2_MEM_ECC_INDEX 0x0b84
+#define regUTCL2_MEM_ECC_INDEX_BASE_IDX 0
+#define regVML2_MEM_ECC_CNTL 0x0b85
+#define regVML2_MEM_ECC_CNTL_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_CNTL 0x0b86
+#define regVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0
+#define regUTCL2_MEM_ECC_CNTL 0x0b87
+#define regUTCL2_MEM_ECC_CNTL_BASE_IDX 0
+#define regVML2_MEM_ECC_STATUS 0x0b88
+#define regVML2_MEM_ECC_STATUS_BASE_IDX 0
+#define regVML2_WALKER_MEM_ECC_STATUS 0x0b89
+#define regVML2_WALKER_MEM_ECC_STATUS_BASE_IDX 0
+#define regUTCL2_MEM_ECC_STATUS 0x0b8a
+#define regUTCL2_MEM_ECC_STATUS_BASE_IDX 0
+#define regUTCL2_EDC_MODE 0x0b8b
+#define regUTCL2_EDC_MODE_BASE_IDX 0
+#define regUTCL2_EDC_CONFIG 0x0b8c
+#define regUTCL2_EDC_CONFIG_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vml2vcdec
+// base address: 0x62e80
+#define regVM_CONTEXT0_CNTL 0x0ba0
+#define regVM_CONTEXT0_CNTL_BASE_IDX 0
+#define regVM_CONTEXT1_CNTL 0x0ba1
+#define regVM_CONTEXT1_CNTL_BASE_IDX 0
+#define regVM_CONTEXT2_CNTL 0x0ba2
+#define regVM_CONTEXT2_CNTL_BASE_IDX 0
+#define regVM_CONTEXT3_CNTL 0x0ba3
+#define regVM_CONTEXT3_CNTL_BASE_IDX 0
+#define regVM_CONTEXT4_CNTL 0x0ba4
+#define regVM_CONTEXT4_CNTL_BASE_IDX 0
+#define regVM_CONTEXT5_CNTL 0x0ba5
+#define regVM_CONTEXT5_CNTL_BASE_IDX 0
+#define regVM_CONTEXT6_CNTL 0x0ba6
+#define regVM_CONTEXT6_CNTL_BASE_IDX 0
+#define regVM_CONTEXT7_CNTL 0x0ba7
+#define regVM_CONTEXT7_CNTL_BASE_IDX 0
+#define regVM_CONTEXT8_CNTL 0x0ba8
+#define regVM_CONTEXT8_CNTL_BASE_IDX 0
+#define regVM_CONTEXT9_CNTL 0x0ba9
+#define regVM_CONTEXT9_CNTL_BASE_IDX 0
+#define regVM_CONTEXT10_CNTL 0x0baa
+#define regVM_CONTEXT10_CNTL_BASE_IDX 0
+#define regVM_CONTEXT11_CNTL 0x0bab
+#define regVM_CONTEXT11_CNTL_BASE_IDX 0
+#define regVM_CONTEXT12_CNTL 0x0bac
+#define regVM_CONTEXT12_CNTL_BASE_IDX 0
+#define regVM_CONTEXT13_CNTL 0x0bad
+#define regVM_CONTEXT13_CNTL_BASE_IDX 0
+#define regVM_CONTEXT14_CNTL 0x0bae
+#define regVM_CONTEXT14_CNTL_BASE_IDX 0
+#define regVM_CONTEXT15_CNTL 0x0baf
+#define regVM_CONTEXT15_CNTL_BASE_IDX 0
+#define regVM_CONTEXTS_DISABLE 0x0bb0
+#define regVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_SEM 0x0bb1
+#define regVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_SEM 0x0bb2
+#define regVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_SEM 0x0bb3
+#define regVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_SEM 0x0bb4
+#define regVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_SEM 0x0bb5
+#define regVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_SEM 0x0bb6
+#define regVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_SEM 0x0bb7
+#define regVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_SEM 0x0bb8
+#define regVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_SEM 0x0bb9
+#define regVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_SEM 0x0bba
+#define regVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_SEM 0x0bbb
+#define regVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_SEM 0x0bbc
+#define regVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_SEM 0x0bbd
+#define regVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_SEM 0x0bbe
+#define regVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_SEM 0x0bbf
+#define regVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_SEM 0x0bc0
+#define regVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_SEM 0x0bc1
+#define regVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_SEM 0x0bc2
+#define regVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_REQ 0x0bc3
+#define regVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_REQ 0x0bc4
+#define regVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_REQ 0x0bc5
+#define regVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_REQ 0x0bc6
+#define regVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_REQ 0x0bc7
+#define regVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_REQ 0x0bc8
+#define regVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_REQ 0x0bc9
+#define regVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_REQ 0x0bca
+#define regVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_REQ 0x0bcb
+#define regVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_REQ 0x0bcc
+#define regVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_REQ 0x0bcd
+#define regVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_REQ 0x0bce
+#define regVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_REQ 0x0bcf
+#define regVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_REQ 0x0bd0
+#define regVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_REQ 0x0bd1
+#define regVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_REQ 0x0bd2
+#define regVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_REQ 0x0bd3
+#define regVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_REQ 0x0bd4
+#define regVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ACK 0x0bd5
+#define regVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ACK 0x0bd6
+#define regVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ACK 0x0bd7
+#define regVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ACK 0x0bd8
+#define regVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ACK 0x0bd9
+#define regVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ACK 0x0bda
+#define regVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ACK 0x0bdb
+#define regVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ACK 0x0bdc
+#define regVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ACK 0x0bdd
+#define regVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ACK 0x0bde
+#define regVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ACK 0x0bdf
+#define regVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ACK 0x0be0
+#define regVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ACK 0x0be1
+#define regVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ACK 0x0be2
+#define regVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ACK 0x0be3
+#define regVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ACK 0x0be4
+#define regVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ACK 0x0be5
+#define regVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ACK 0x0be6
+#define regVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x0be7
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x0be8
+#define regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x0be9
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x0bea
+#define regVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x0beb
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x0bec
+#define regVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x0bed
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x0bee
+#define regVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x0bef
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x0bf0
+#define regVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x0bf1
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x0bf2
+#define regVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x0bf3
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x0bf4
+#define regVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x0bf5
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x0bf6
+#define regVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x0bf7
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x0bf8
+#define regVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x0bf9
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x0bfa
+#define regVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x0bfb
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x0bfc
+#define regVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x0bfd
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x0bfe
+#define regVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x0bff
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x0c00
+#define regVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x0c01
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x0c02
+#define regVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x0c03
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x0c04
+#define regVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x0c05
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x0c06
+#define regVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0c07
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0c08
+#define regVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x0c09
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x0c0a
+#define regVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0c0b
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0c0c
+#define regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0c0d
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0c0e
+#define regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0c0f
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0c10
+#define regVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0c11
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x0c12
+#define regVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0c13
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0c14
+#define regVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x0c15
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x0c16
+#define regVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0c17
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0c18
+#define regVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x0c19
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x0c1a
+#define regVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0c1b
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0c1c
+#define regVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x0c1d
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0c1e
+#define regVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x0c1f
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x0c20
+#define regVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x0c21
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x0c22
+#define regVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x0c23
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x0c24
+#define regVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x0c25
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x0c26
+#define regVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x0c27
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x0c28
+#define regVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x0c29
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x0c2a
+#define regVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0c2b
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0c2c
+#define regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0c2d
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0c2e
+#define regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x0c2f
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x0c30
+#define regVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0c31
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0c32
+#define regVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0c33
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0c34
+#define regVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0c35
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x0c36
+#define regVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0c37
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0c38
+#define regVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x0c39
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x0c3a
+#define regVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0c3b
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0c3c
+#define regVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x0c3d
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x0c3e
+#define regVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x0c3f
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0c40
+#define regVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0c41
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0c42
+#define regVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0c43
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0c44
+#define regVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0c45
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0c46
+#define regVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0c47
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0c48
+#define regVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0c49
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x0c4a
+#define regVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x0c4b
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x0c4c
+#define regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0c4d
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0c4e
+#define regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x0c4f
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0c50
+#define regVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0c51
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0c52
+#define regVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0c53
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0c54
+#define regVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0c55
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0c56
+#define regVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0c57
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0c58
+#define regVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0c59
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x0c5a
+#define regVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0c5b
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0c5c
+#define regVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x0c5d
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x0c5e
+#define regVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x0c5f
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0c60
+#define regVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0c61
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0c62
+#define regVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0c63
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0c64
+#define regVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0c65
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0c66
+#define regVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0c67
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0c68
+#define regVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0c69
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x0c6a
+#define regVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedpfdec
+// base address: 0x63200
+#define regMC_VM_NB_MMIOBASE 0x0c80
+#define regMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define regMC_VM_NB_MMIOLIMIT 0x0c81
+#define regMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define regMC_VM_NB_PCI_CTRL 0x0c82
+#define regMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define regMC_VM_NB_PCI_ARB 0x0c83
+#define regMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define regMC_VM_NB_TOP_OF_DRAM_SLOT1 0x0c84
+#define regMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define regMC_VM_NB_LOWER_TOP_OF_DRAM2 0x0c85
+#define regMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define regMC_VM_NB_UPPER_TOP_OF_DRAM2 0x0c86
+#define regMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define regMC_VM_FB_OFFSET 0x0c87
+#define regMC_VM_FB_OFFSET_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x0c88
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x0c89
+#define regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define regMC_VM_STEERING 0x0c8a
+#define regMC_VM_STEERING_BASE_IDX 0
+#define regMC_SHARED_VIRT_RESET_REQ 0x0c8b
+#define regMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regMC_MEM_POWER_LS 0x0c8c
+#define regMC_MEM_POWER_LS_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x0c8d
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x0c8e
+#define regMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define regMC_VM_APT_CNTL 0x0c91
+#define regMC_VM_APT_CNTL_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_START 0x0c92
+#define regMC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_END 0x0c93
+#define regMC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 0
+#define regMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0c94
+#define regMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define regUTCL2_CGTT_CLK_CTRL 0x0c95
+#define regUTCL2_CGTT_CLK_CTRL_BASE_IDX 0
+#define regMC_VM_XGMI_LFB_CNTL 0x0c97
+#define regMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define regMC_VM_XGMI_LFB_SIZE 0x0c98
+#define regMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+#define regMC_VM_CACHEABLE_DRAM_CNTL 0x0c99
+#define regMC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 0
+#define regMC_VM_HOST_MAPPING 0x0c9a
+#define regMC_VM_HOST_MAPPING_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedvcdec
+// base address: 0x63270
+#define regMC_VM_FB_LOCATION_BASE 0x0c9c
+#define regMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regMC_VM_FB_LOCATION_TOP 0x0c9d
+#define regMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define regMC_VM_AGP_TOP 0x0c9e
+#define regMC_VM_AGP_TOP_BASE_IDX 0
+#define regMC_VM_AGP_BOT 0x0c9f
+#define regMC_VM_AGP_BOT_BASE_IDX 0
+#define regMC_VM_AGP_BASE 0x0ca0
+#define regMC_VM_AGP_BASE_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0ca1
+#define regMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define regMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0ca2
+#define regMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define regMC_VM_MX_L1_TLB_CNTL 0x0ca3
+#define regMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedhvdec
+// base address: 0x632b0
+#define regMC_VM_FB_SIZE_OFFSET_VF0 0x0cac
+#define regMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF1 0x0cad
+#define regMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF2 0x0cae
+#define regMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF3 0x0caf
+#define regMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF4 0x0cb0
+#define regMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF5 0x0cb1
+#define regMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF6 0x0cb2
+#define regMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF7 0x0cb3
+#define regMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF8 0x0cb4
+#define regMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF9 0x0cb5
+#define regMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF10 0x0cb6
+#define regMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF11 0x0cb7
+#define regMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF12 0x0cb8
+#define regMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF13 0x0cb9
+#define regMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF14 0x0cba
+#define regMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 0
+#define regMC_VM_FB_SIZE_OFFSET_VF15 0x0cbb
+#define regMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 0
+#define regVM_IOMMU_MMIO_CNTRL_1 0x0cbc
+#define regVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 0
+#define regMC_VM_MARC_BASE_LO_0 0x0cbd
+#define regMC_VM_MARC_BASE_LO_0_BASE_IDX 0
+#define regMC_VM_MARC_BASE_LO_1 0x0cbe
+#define regMC_VM_MARC_BASE_LO_1_BASE_IDX 0
+#define regMC_VM_MARC_BASE_LO_2 0x0cbf
+#define regMC_VM_MARC_BASE_LO_2_BASE_IDX 0
+#define regMC_VM_MARC_BASE_LO_3 0x0cc0
+#define regMC_VM_MARC_BASE_LO_3_BASE_IDX 0
+#define regMC_VM_MARC_BASE_HI_0 0x0cc1
+#define regMC_VM_MARC_BASE_HI_0_BASE_IDX 0
+#define regMC_VM_MARC_BASE_HI_1 0x0cc2
+#define regMC_VM_MARC_BASE_HI_1_BASE_IDX 0
+#define regMC_VM_MARC_BASE_HI_2 0x0cc3
+#define regMC_VM_MARC_BASE_HI_2_BASE_IDX 0
+#define regMC_VM_MARC_BASE_HI_3 0x0cc4
+#define regMC_VM_MARC_BASE_HI_3_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_LO_0 0x0cc5
+#define regMC_VM_MARC_RELOC_LO_0_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_LO_1 0x0cc6
+#define regMC_VM_MARC_RELOC_LO_1_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_LO_2 0x0cc7
+#define regMC_VM_MARC_RELOC_LO_2_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_LO_3 0x0cc8
+#define regMC_VM_MARC_RELOC_LO_3_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_HI_0 0x0cc9
+#define regMC_VM_MARC_RELOC_HI_0_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_HI_1 0x0cca
+#define regMC_VM_MARC_RELOC_HI_1_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_HI_2 0x0ccb
+#define regMC_VM_MARC_RELOC_HI_2_BASE_IDX 0
+#define regMC_VM_MARC_RELOC_HI_3 0x0ccc
+#define regMC_VM_MARC_RELOC_HI_3_BASE_IDX 0
+#define regMC_VM_MARC_LEN_LO_0 0x0ccd
+#define regMC_VM_MARC_LEN_LO_0_BASE_IDX 0
+#define regMC_VM_MARC_LEN_LO_1 0x0cce
+#define regMC_VM_MARC_LEN_LO_1_BASE_IDX 0
+#define regMC_VM_MARC_LEN_LO_2 0x0ccf
+#define regMC_VM_MARC_LEN_LO_2_BASE_IDX 0
+#define regMC_VM_MARC_LEN_LO_3 0x0cd0
+#define regMC_VM_MARC_LEN_LO_3_BASE_IDX 0
+#define regMC_VM_MARC_LEN_HI_0 0x0cd1
+#define regMC_VM_MARC_LEN_HI_0_BASE_IDX 0
+#define regMC_VM_MARC_LEN_HI_1 0x0cd2
+#define regMC_VM_MARC_LEN_HI_1_BASE_IDX 0
+#define regMC_VM_MARC_LEN_HI_2 0x0cd3
+#define regMC_VM_MARC_LEN_HI_2_BASE_IDX 0
+#define regMC_VM_MARC_LEN_HI_3 0x0cd4
+#define regMC_VM_MARC_LEN_HI_3_BASE_IDX 0
+#define regVM_IOMMU_CONTROL_REGISTER 0x0cd5
+#define regVM_IOMMU_CONTROL_REGISTER_BASE_IDX 0
+#define regVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x0cd6
+#define regVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL 0x0cd7
+#define regVM_PCIE_ATS_CNTL_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_0 0x0cd8
+#define regVM_PCIE_ATS_CNTL_VF_0_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_1 0x0cd9
+#define regVM_PCIE_ATS_CNTL_VF_1_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_2 0x0cda
+#define regVM_PCIE_ATS_CNTL_VF_2_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_3 0x0cdb
+#define regVM_PCIE_ATS_CNTL_VF_3_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_4 0x0cdc
+#define regVM_PCIE_ATS_CNTL_VF_4_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_5 0x0cdd
+#define regVM_PCIE_ATS_CNTL_VF_5_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_6 0x0cde
+#define regVM_PCIE_ATS_CNTL_VF_6_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_7 0x0cdf
+#define regVM_PCIE_ATS_CNTL_VF_7_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_8 0x0ce0
+#define regVM_PCIE_ATS_CNTL_VF_8_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_9 0x0ce1
+#define regVM_PCIE_ATS_CNTL_VF_9_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_10 0x0ce2
+#define regVM_PCIE_ATS_CNTL_VF_10_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_11 0x0ce3
+#define regVM_PCIE_ATS_CNTL_VF_11_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_12 0x0ce4
+#define regVM_PCIE_ATS_CNTL_VF_12_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_13 0x0ce5
+#define regVM_PCIE_ATS_CNTL_VF_13_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_14 0x0ce6
+#define regVM_PCIE_ATS_CNTL_VF_14_BASE_IDX 0
+#define regVM_PCIE_ATS_CNTL_VF_15 0x0ce7
+#define regVM_PCIE_ATS_CNTL_VF_15_BASE_IDX 0
+#define regMC_SHARED_ACTIVE_FCN_ID 0x0ce8
+#define regMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define regMC_VM_XGMI_GPUIOV_ENABLE 0x0ce9
+#define regMC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2pfcntrdec
+// base address: 0x633b0
+#define regATC_L2_PERFCOUNTER_LO 0x0cec
+#define regATC_L2_PERFCOUNTER_LO_BASE_IDX 0
+#define regATC_L2_PERFCOUNTER_HI 0x0ced
+#define regATC_L2_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2pfcntldec
+// base address: 0x633b8
+#define regATC_L2_PERFCOUNTER0_CFG 0x0cee
+#define regATC_L2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regATC_L2_PERFCOUNTER1_CFG 0x0cef
+#define regATC_L2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regATC_L2_PERFCOUNTER_RSLT_CNTL 0x0cf0
+#define regATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vml2pldec
+// base address: 0x633d0
+#define regMC_VM_L2_PERFCOUNTER0_CFG 0x0cf4
+#define regMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER1_CFG 0x0cf5
+#define regMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER2_CFG 0x0cf6
+#define regMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER3_CFG 0x0cf7
+#define regMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER4_CFG 0x0cf8
+#define regMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER5_CFG 0x0cf9
+#define regMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER6_CFG 0x0cfa
+#define regMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER7_CFG 0x0cfb
+#define regMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x0d04
+#define regMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_vml2prdec
+// base address: 0x63430
+#define regMC_VM_L2_PERFCOUNTER_LO 0x0d0c
+#define regMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 0
+#define regMC_VM_L2_PERFCOUNTER_HI 0x0d0d
+#define regMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbdec
+// base address: 0x63470
+#define regL2TLB_TLB0_STATUS 0x0d1d
+#define regL2TLB_TLB0_STATUS_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO 0x0d1f
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI 0x0d20
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO 0x0d21
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO_BASE_IDX 0
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI 0x0d22
+#define regUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbpldec
+// base address: 0x63490
+#define regL2TLB_PERFCOUNTER0_CFG 0x0d24
+#define regL2TLB_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regL2TLB_PERFCOUNTER1_CFG 0x0d25
+#define regL2TLB_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regL2TLB_PERFCOUNTER2_CFG 0x0d26
+#define regL2TLB_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regL2TLB_PERFCOUNTER3_CFG 0x0d27
+#define regL2TLB_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regL2TLB_PERFCOUNTER_RSLT_CNTL 0x0d28
+#define regL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbprdec
+// base address: 0x634b0
+#define regL2TLB_PERFCOUNTER_LO 0x0d2c
+#define regL2TLB_PERFCOUNTER_LO_BASE_IDX 0
+#define regL2TLB_PERFCOUNTER_HI 0x0d2d
+#define regL2TLB_PERFCOUNTER_HI_BASE_IDX 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h
new file mode 100644
index 000000000000..af41468ce69f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h
@@ -0,0 +1,22315 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mmhub_1_8_0_SH_MASK_HEADER
+#define _mmhub_1_8_0_SH_MASK_HEADER
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec0
+//DAGB0_RDCLI0
+#define DAGB0_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI1
+#define DAGB0_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI2
+#define DAGB0_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI3
+#define DAGB0_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI4
+#define DAGB0_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI5
+#define DAGB0_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI6
+#define DAGB0_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI7
+#define DAGB0_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI8
+#define DAGB0_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI9
+#define DAGB0_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI10
+#define DAGB0_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI11
+#define DAGB0_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI12
+#define DAGB0_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI13
+#define DAGB0_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI14
+#define DAGB0_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI15
+#define DAGB0_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RD_CNTL
+#define DAGB0_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_RD_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB0_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB0_RD_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB0_RD_GMI_CNTL
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_RD_ADDR_DAGB
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_RD_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB0_RD_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB0_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_RD_CGTT_CLK_CTRL
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_VC0_CNTL
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC1_CNTL
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC2_CNTL
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC3_CNTL
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC4_CNTL
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC5_CNTL
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC6_CNTL
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC7_CNTL
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_CNTL_MISC
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_RD_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_RD_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB0_RD_TLB_CREDIT
+#define DAGB0_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_RD_RDRET_CREDIT_CNTL
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC0_CREDIT__SHIFT 0x0
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC1_CREDIT__SHIFT 0x6
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC2_CREDIT__SHIFT 0xc
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC3_CREDIT__SHIFT 0x12
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC4_CREDIT__SHIFT 0x18
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC_MODE__SHIFT 0x1e
+#define DAGB0_RD_RDRET_CREDIT_CNTL__FIX_EQ__SHIFT 0x1f
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC0_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC1_CREDIT_MASK 0x00000FC0L
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC2_CREDIT_MASK 0x0003F000L
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC3_CREDIT_MASK 0x00FC0000L
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC4_CREDIT_MASK 0x3F000000L
+#define DAGB0_RD_RDRET_CREDIT_CNTL__VC_MODE_MASK 0x40000000L
+#define DAGB0_RD_RDRET_CREDIT_CNTL__FIX_EQ_MASK 0x80000000L
+//DAGB0_RD_RDRET_CREDIT_CNTL2
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__IO_CREDIT__SHIFT 0x0
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT__SHIFT 0x6
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT__SHIFT 0xc
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__IO_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT_MASK 0x00000FC0L
+#define DAGB0_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT_MASK 0x0007F000L
+//DAGB0_RDCLI_ASK_PENDING
+#define DAGB0_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GO_PENDING
+#define DAGB0_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GBLSEND_PENDING
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_TLB_PENDING
+#define DAGB0_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OARB_PENDING
+#define DAGB0_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OSD_PENDING
+#define DAGB0_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_NOALLOC_OVERRIDE
+#define DAGB0_RDCLI_NOALLOC_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_RDCLI_NOALLOC_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB0_RDCLI_NOALLOC_OVERRIDE_VALUE
+#define DAGB0_RDCLI_NOALLOC_OVERRIDE_VALUE__VALUE__SHIFT 0x0
+#define DAGB0_RDCLI_NOALLOC_OVERRIDE_VALUE__VALUE_MASK 0x0000FFFFL
+//DAGB0_WRCLI0
+#define DAGB0_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI1
+#define DAGB0_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI2
+#define DAGB0_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI3
+#define DAGB0_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI4
+#define DAGB0_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI5
+#define DAGB0_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI6
+#define DAGB0_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI7
+#define DAGB0_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI8
+#define DAGB0_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI9
+#define DAGB0_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI10
+#define DAGB0_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI11
+#define DAGB0_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI12
+#define DAGB0_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI13
+#define DAGB0_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI14
+#define DAGB0_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI15
+#define DAGB0_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WR_CNTL
+#define DAGB0_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_WR_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB0_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB0_WR_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB0_WR_GMI_CNTL
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_WR_ADDR_DAGB
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB0_WR_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB0_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_WR_CGTT_CLK_CTRL
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_WR_DATA_DAGB_MAX_BURST0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_MAX_BURST1
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_VC0_CNTL
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC1_CNTL
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC2_CNTL
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC3_CNTL
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC4_CNTL
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC5_CNTL
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC6_CNTL
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC7_CNTL
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_CNTL_MISC
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_WR_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_WR_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB0_WR_TLB_CREDIT
+#define DAGB0_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_WR_DATA_CREDIT
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB0_WR_MISC_CREDIT
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB0_WR_OSD_CREDIT_CNTL1
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x4
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0x8
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xc
+#define DAGB0_WR_OSD_CREDIT_CNTL1__IO_CREDIT__SHIFT 0x10
+#define DAGB0_WR_OSD_CREDIT_CNTL1__GMI_CREDIT__SHIFT 0x14
+#define DAGB0_WR_OSD_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x18
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000000FL
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000000F0L
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00000F00L
+#define DAGB0_WR_OSD_CREDIT_CNTL1__VC3_CREDIT_MASK 0x0000F000L
+#define DAGB0_WR_OSD_CREDIT_CNTL1__IO_CREDIT_MASK 0x000F0000L
+#define DAGB0_WR_OSD_CREDIT_CNTL1__GMI_CREDIT_MASK 0x00F00000L
+#define DAGB0_WR_OSD_CREDIT_CNTL1__POOL_CREDIT_MASK 0x3F000000L
+//DAGB0_WR_OSD_CREDIT_CNTL2
+#define DAGB0_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN__SHIFT 0x0
+#define DAGB0_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY__SHIFT 0x4
+#define DAGB0_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN_MASK 0x0000000FL
+#define DAGB0_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY_MASK 0x00000010L
+//DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x5
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0xa
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xf
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x14
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE__SHIFT 0x19
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ__SHIFT 0x1a
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0__SHIFT 0x1b
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1__SHIFT 0x1c
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2__SHIFT 0x1d
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000003E0L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00007C00L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT_MASK 0x000F8000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT_MASK 0x01F00000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE_MASK 0x02000000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ_MASK 0x04000000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0_MASK 0x08000000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1_MASK 0x10000000L
+#define DAGB0_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2_MASK 0x20000000L
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0x0000FFFFL
+//DAGB0_WRCLI_ASK_PENDING
+#define DAGB0_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GO_PENDING
+#define DAGB0_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GBLSEND_PENDING
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_TLB_PENDING
+#define DAGB0_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OARB_PENDING
+#define DAGB0_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OSD_PENDING
+#define DAGB0_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_ASK_PENDING
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_GO_PENDING
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_NOALLOC_OVERRIDE
+#define DAGB0_WRCLI_NOALLOC_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_NOALLOC_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB0_WRCLI_NOALLOC_OVERRIDE_VALUE
+#define DAGB0_WRCLI_NOALLOC_OVERRIDE_VALUE__VALUE__SHIFT 0x0
+#define DAGB0_WRCLI_NOALLOC_OVERRIDE_VALUE__VALUE_MASK 0x0000FFFFL
+//DAGB0_DAGB_DLY
+#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB0_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB0_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB0_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB0_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB0_CNTL_MISC
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB0_CNTL_MISC2
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB0_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB0_CNTL_MISC2__HDP_CID__SHIFT 0xc
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x10
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB0_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB0_CNTL_MISC2__HDP_CID_MASK 0x0000F000L
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x003F0000L
+//DAGB0_FATAL_ERROR_CNTL
+#define DAGB0_FATAL_ERROR_CNTL__FILTER_NUM__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_CNTL__FILTER_NUM_MASK 0x000003FFL
+//DAGB0_FATAL_ERROR_CLEAR
+#define DAGB0_FATAL_ERROR_CLEAR__CLEAR__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_CLEAR__CLEAR_MASK 0x00000001L
+//DAGB0_FATAL_ERROR_STATUS0
+#define DAGB0_FATAL_ERROR_STATUS0__VALID__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_STATUS0__CID__SHIFT 0x1
+#define DAGB0_FATAL_ERROR_STATUS0__ADDR_LO__SHIFT 0x6
+#define DAGB0_FATAL_ERROR_STATUS0__VALID_MASK 0x00000001L
+#define DAGB0_FATAL_ERROR_STATUS0__CID_MASK 0x0000003EL
+#define DAGB0_FATAL_ERROR_STATUS0__ADDR_LO_MASK 0xFFFFFFC0L
+//DAGB0_FATAL_ERROR_STATUS1
+#define DAGB0_FATAL_ERROR_STATUS1__ADDR_HI__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_STATUS1__ADDR_HI_MASK 0x0001FFFFL
+//DAGB0_FATAL_ERROR_STATUS2
+#define DAGB0_FATAL_ERROR_STATUS2__TAG__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_STATUS2__VFID__SHIFT 0x10
+#define DAGB0_FATAL_ERROR_STATUS2__VF__SHIFT 0x14
+#define DAGB0_FATAL_ERROR_STATUS2__SPACE__SHIFT 0x15
+#define DAGB0_FATAL_ERROR_STATUS2__IO__SHIFT 0x16
+#define DAGB0_FATAL_ERROR_STATUS2__SIZE__SHIFT 0x17
+#define DAGB0_FATAL_ERROR_STATUS2__DBGMSK__SHIFT 0x18
+#define DAGB0_FATAL_ERROR_STATUS2__FED__SHIFT 0x19
+#define DAGB0_FATAL_ERROR_STATUS2__TAG_MASK 0x0000FFFFL
+#define DAGB0_FATAL_ERROR_STATUS2__VFID_MASK 0x000F0000L
+#define DAGB0_FATAL_ERROR_STATUS2__VF_MASK 0x00100000L
+#define DAGB0_FATAL_ERROR_STATUS2__SPACE_MASK 0x00200000L
+#define DAGB0_FATAL_ERROR_STATUS2__IO_MASK 0x00400000L
+#define DAGB0_FATAL_ERROR_STATUS2__SIZE_MASK 0x00800000L
+#define DAGB0_FATAL_ERROR_STATUS2__DBGMSK_MASK 0x01000000L
+#define DAGB0_FATAL_ERROR_STATUS2__FED_MASK 0x02000000L
+//DAGB0_FATAL_ERROR_STATUS3
+#define DAGB0_FATAL_ERROR_STATUS3__NOALLOC__SHIFT 0x0
+#define DAGB0_FATAL_ERROR_STATUS3__UNITID__SHIFT 0x1
+#define DAGB0_FATAL_ERROR_STATUS3__OP__SHIFT 0x7
+#define DAGB0_FATAL_ERROR_STATUS3__SECLEVEL__SHIFT 0xe
+#define DAGB0_FATAL_ERROR_STATUS3__WRTMZ__SHIFT 0x11
+#define DAGB0_FATAL_ERROR_STATUS3__RDTMZ__SHIFT 0x12
+#define DAGB0_FATAL_ERROR_STATUS3__SNOOP__SHIFT 0x13
+#define DAGB0_FATAL_ERROR_STATUS3__INVAL__SHIFT 0x14
+#define DAGB0_FATAL_ERROR_STATUS3__NACK__SHIFT 0x15
+#define DAGB0_FATAL_ERROR_STATUS3__RO__SHIFT 0x17
+#define DAGB0_FATAL_ERROR_STATUS3__MEMLOG__SHIFT 0x18
+#define DAGB0_FATAL_ERROR_STATUS3__EOP__SHIFT 0x19
+#define DAGB0_FATAL_ERROR_STATUS3__NOALLOC_MASK 0x00000001L
+#define DAGB0_FATAL_ERROR_STATUS3__UNITID_MASK 0x0000007EL
+#define DAGB0_FATAL_ERROR_STATUS3__OP_MASK 0x00003F80L
+#define DAGB0_FATAL_ERROR_STATUS3__SECLEVEL_MASK 0x0001C000L
+#define DAGB0_FATAL_ERROR_STATUS3__WRTMZ_MASK 0x00020000L
+#define DAGB0_FATAL_ERROR_STATUS3__RDTMZ_MASK 0x00040000L
+#define DAGB0_FATAL_ERROR_STATUS3__SNOOP_MASK 0x00080000L
+#define DAGB0_FATAL_ERROR_STATUS3__INVAL_MASK 0x00100000L
+#define DAGB0_FATAL_ERROR_STATUS3__NACK_MASK 0x00600000L
+#define DAGB0_FATAL_ERROR_STATUS3__RO_MASK 0x00800000L
+#define DAGB0_FATAL_ERROR_STATUS3__MEMLOG_MASK 0x01000000L
+#define DAGB0_FATAL_ERROR_STATUS3__EOP_MASK 0x02000000L
+//DAGB0_FIFO_EMPTY
+#define DAGB0_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB0_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB0_FIFO_FULL
+#define DAGB0_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB0_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB0_WR_CREDITS_FULL
+#define DAGB0_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB0_RD_CREDITS_FULL
+#define DAGB0_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB0_PERFCOUNTER_LO
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB0_PERFCOUNTER_HI
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB0_PERFCOUNTER0_CFG
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER1_CFG
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER2_CFG
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER_RSLT_CNTL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB0_L1TLB_REG_RW
+#define DAGB0_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL__SHIFT 0x0
+#define DAGB0_L1TLB_REG_RW__REG_READ_L1TLB_CTRL__SHIFT 0x1
+#define DAGB0_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL__SHIFT 0x2
+#define DAGB0_L1TLB_REG_RW__WDAT_PARITY_CHECK__SHIFT 0x4
+#define DAGB0_L1TLB_REG_RW__DISABLE_RDRET_CHECK__SHIFT 0x5
+#define DAGB0_L1TLB_REG_RW__RESERVE__SHIFT 0x6
+#define DAGB0_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL_MASK 0x00000001L
+#define DAGB0_L1TLB_REG_RW__REG_READ_L1TLB_CTRL_MASK 0x00000002L
+#define DAGB0_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL_MASK 0x00000004L
+#define DAGB0_L1TLB_REG_RW__WDAT_PARITY_CHECK_MASK 0x00000010L
+#define DAGB0_L1TLB_REG_RW__DISABLE_RDRET_CHECK_MASK 0x00000020L
+#define DAGB0_L1TLB_REG_RW__RESERVE_MASK 0xFFFFFFC0L
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec1
+//DAGB1_RDCLI0
+#define DAGB1_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI1
+#define DAGB1_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI2
+#define DAGB1_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI3
+#define DAGB1_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI4
+#define DAGB1_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI5
+#define DAGB1_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI6
+#define DAGB1_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI7
+#define DAGB1_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI8
+#define DAGB1_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI9
+#define DAGB1_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI10
+#define DAGB1_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI11
+#define DAGB1_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI12
+#define DAGB1_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI13
+#define DAGB1_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI14
+#define DAGB1_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI15
+#define DAGB1_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RD_CNTL
+#define DAGB1_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_RD_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB1_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB1_RD_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB1_RD_GMI_CNTL
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_RD_ADDR_DAGB
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_RD_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB1_RD_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB1_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_RD_CGTT_CLK_CTRL
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_VC0_CNTL
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC1_CNTL
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC2_CNTL
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC3_CNTL
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC4_CNTL
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC5_CNTL
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC6_CNTL
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC7_CNTL
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_CNTL_MISC
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_RD_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_RD_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB1_RD_TLB_CREDIT
+#define DAGB1_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_RD_RDRET_CREDIT_CNTL
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC0_CREDIT__SHIFT 0x0
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC1_CREDIT__SHIFT 0x6
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC2_CREDIT__SHIFT 0xc
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC3_CREDIT__SHIFT 0x12
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC4_CREDIT__SHIFT 0x18
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC_MODE__SHIFT 0x1e
+#define DAGB1_RD_RDRET_CREDIT_CNTL__FIX_EQ__SHIFT 0x1f
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC0_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC1_CREDIT_MASK 0x00000FC0L
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC2_CREDIT_MASK 0x0003F000L
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC3_CREDIT_MASK 0x00FC0000L
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC4_CREDIT_MASK 0x3F000000L
+#define DAGB1_RD_RDRET_CREDIT_CNTL__VC_MODE_MASK 0x40000000L
+#define DAGB1_RD_RDRET_CREDIT_CNTL__FIX_EQ_MASK 0x80000000L
+//DAGB1_RD_RDRET_CREDIT_CNTL2
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__IO_CREDIT__SHIFT 0x0
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT__SHIFT 0x6
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT__SHIFT 0xc
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__IO_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT_MASK 0x00000FC0L
+#define DAGB1_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT_MASK 0x0007F000L
+//DAGB1_RDCLI_ASK_PENDING
+#define DAGB1_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GO_PENDING
+#define DAGB1_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GBLSEND_PENDING
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_TLB_PENDING
+#define DAGB1_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OARB_PENDING
+#define DAGB1_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OSD_PENDING
+#define DAGB1_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_NOALLOC_OVERRIDE
+#define DAGB1_RDCLI_NOALLOC_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB1_RDCLI_NOALLOC_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB1_RDCLI_NOALLOC_OVERRIDE_VALUE
+#define DAGB1_RDCLI_NOALLOC_OVERRIDE_VALUE__VALUE__SHIFT 0x0
+#define DAGB1_RDCLI_NOALLOC_OVERRIDE_VALUE__VALUE_MASK 0x0000FFFFL
+//DAGB1_WRCLI0
+#define DAGB1_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI1
+#define DAGB1_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI2
+#define DAGB1_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI3
+#define DAGB1_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI4
+#define DAGB1_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI5
+#define DAGB1_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI6
+#define DAGB1_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI7
+#define DAGB1_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI8
+#define DAGB1_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI9
+#define DAGB1_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI10
+#define DAGB1_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI11
+#define DAGB1_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI12
+#define DAGB1_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI13
+#define DAGB1_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI14
+#define DAGB1_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI15
+#define DAGB1_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WR_CNTL
+#define DAGB1_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_WR_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB1_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB1_WR_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB1_WR_GMI_CNTL
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_WR_ADDR_DAGB
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB1_WR_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB1_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_WR_CGTT_CLK_CTRL
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_WR_DATA_DAGB_MAX_BURST0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_MAX_BURST1
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_VC0_CNTL
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC1_CNTL
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC2_CNTL
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC3_CNTL
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC4_CNTL
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC5_CNTL
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC6_CNTL
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC7_CNTL
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_CNTL_MISC
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_WR_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_WR_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB1_WR_TLB_CREDIT
+#define DAGB1_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_WR_DATA_CREDIT
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB1_WR_MISC_CREDIT
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB1_WR_OSD_CREDIT_CNTL1
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x4
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0x8
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xc
+#define DAGB1_WR_OSD_CREDIT_CNTL1__IO_CREDIT__SHIFT 0x10
+#define DAGB1_WR_OSD_CREDIT_CNTL1__GMI_CREDIT__SHIFT 0x14
+#define DAGB1_WR_OSD_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x18
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000000FL
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000000F0L
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00000F00L
+#define DAGB1_WR_OSD_CREDIT_CNTL1__VC3_CREDIT_MASK 0x0000F000L
+#define DAGB1_WR_OSD_CREDIT_CNTL1__IO_CREDIT_MASK 0x000F0000L
+#define DAGB1_WR_OSD_CREDIT_CNTL1__GMI_CREDIT_MASK 0x00F00000L
+#define DAGB1_WR_OSD_CREDIT_CNTL1__POOL_CREDIT_MASK 0x3F000000L
+//DAGB1_WR_OSD_CREDIT_CNTL2
+#define DAGB1_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN__SHIFT 0x0
+#define DAGB1_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY__SHIFT 0x4
+#define DAGB1_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN_MASK 0x0000000FL
+#define DAGB1_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY_MASK 0x00000010L
+//DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x5
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0xa
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xf
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x14
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE__SHIFT 0x19
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ__SHIFT 0x1a
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0__SHIFT 0x1b
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1__SHIFT 0x1c
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2__SHIFT 0x1d
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000003E0L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00007C00L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT_MASK 0x000F8000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT_MASK 0x01F00000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE_MASK 0x02000000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ_MASK 0x04000000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0_MASK 0x08000000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1_MASK 0x10000000L
+#define DAGB1_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2_MASK 0x20000000L
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0x0000FFFFL
+//DAGB1_WRCLI_ASK_PENDING
+#define DAGB1_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GO_PENDING
+#define DAGB1_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GBLSEND_PENDING
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_TLB_PENDING
+#define DAGB1_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OARB_PENDING
+#define DAGB1_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OSD_PENDING
+#define DAGB1_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_ASK_PENDING
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_GO_PENDING
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_DAGB_DLY
+#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB1_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB1_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB1_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB1_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB1_CNTL_MISC
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB1_CNTL_MISC2
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB1_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB1_CNTL_MISC2__HDP_CID__SHIFT 0xc
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x10
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB1_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB1_CNTL_MISC2__HDP_CID_MASK 0x0000F000L
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x003F0000L
+//DAGB1_FATAL_ERROR_CNTL
+#define DAGB1_FATAL_ERROR_CNTL__FILTER_NUM__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_CNTL__FILTER_NUM_MASK 0x000003FFL
+//DAGB1_FATAL_ERROR_CLEAR
+#define DAGB1_FATAL_ERROR_CLEAR__CLEAR__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_CLEAR__CLEAR_MASK 0x00000001L
+//DAGB1_FATAL_ERROR_STATUS0
+#define DAGB1_FATAL_ERROR_STATUS0__VALID__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_STATUS0__CID__SHIFT 0x1
+#define DAGB1_FATAL_ERROR_STATUS0__ADDR_LO__SHIFT 0x6
+#define DAGB1_FATAL_ERROR_STATUS0__VALID_MASK 0x00000001L
+#define DAGB1_FATAL_ERROR_STATUS0__CID_MASK 0x0000003EL
+#define DAGB1_FATAL_ERROR_STATUS0__ADDR_LO_MASK 0xFFFFFFC0L
+//DAGB1_FATAL_ERROR_STATUS1
+#define DAGB1_FATAL_ERROR_STATUS1__ADDR_HI__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_STATUS1__ADDR_HI_MASK 0x0001FFFFL
+//DAGB1_FATAL_ERROR_STATUS2
+#define DAGB1_FATAL_ERROR_STATUS2__TAG__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_STATUS2__VFID__SHIFT 0x10
+#define DAGB1_FATAL_ERROR_STATUS2__VF__SHIFT 0x14
+#define DAGB1_FATAL_ERROR_STATUS2__SPACE__SHIFT 0x15
+#define DAGB1_FATAL_ERROR_STATUS2__IO__SHIFT 0x16
+#define DAGB1_FATAL_ERROR_STATUS2__SIZE__SHIFT 0x17
+#define DAGB1_FATAL_ERROR_STATUS2__DBGMSK__SHIFT 0x18
+#define DAGB1_FATAL_ERROR_STATUS2__FED__SHIFT 0x19
+#define DAGB1_FATAL_ERROR_STATUS2__TAG_MASK 0x0000FFFFL
+#define DAGB1_FATAL_ERROR_STATUS2__VFID_MASK 0x000F0000L
+#define DAGB1_FATAL_ERROR_STATUS2__VF_MASK 0x00100000L
+#define DAGB1_FATAL_ERROR_STATUS2__SPACE_MASK 0x00200000L
+#define DAGB1_FATAL_ERROR_STATUS2__IO_MASK 0x00400000L
+#define DAGB1_FATAL_ERROR_STATUS2__SIZE_MASK 0x00800000L
+#define DAGB1_FATAL_ERROR_STATUS2__DBGMSK_MASK 0x01000000L
+#define DAGB1_FATAL_ERROR_STATUS2__FED_MASK 0x02000000L
+//DAGB1_FATAL_ERROR_STATUS3
+#define DAGB1_FATAL_ERROR_STATUS3__NOALLOC__SHIFT 0x0
+#define DAGB1_FATAL_ERROR_STATUS3__UNITID__SHIFT 0x1
+#define DAGB1_FATAL_ERROR_STATUS3__OP__SHIFT 0x7
+#define DAGB1_FATAL_ERROR_STATUS3__SECLEVEL__SHIFT 0xe
+#define DAGB1_FATAL_ERROR_STATUS3__WRTMZ__SHIFT 0x11
+#define DAGB1_FATAL_ERROR_STATUS3__RDTMZ__SHIFT 0x12
+#define DAGB1_FATAL_ERROR_STATUS3__SNOOP__SHIFT 0x13
+#define DAGB1_FATAL_ERROR_STATUS3__INVAL__SHIFT 0x14
+#define DAGB1_FATAL_ERROR_STATUS3__NACK__SHIFT 0x15
+#define DAGB1_FATAL_ERROR_STATUS3__RO__SHIFT 0x17
+#define DAGB1_FATAL_ERROR_STATUS3__MEMLOG__SHIFT 0x18
+#define DAGB1_FATAL_ERROR_STATUS3__EOP__SHIFT 0x19
+#define DAGB1_FATAL_ERROR_STATUS3__NOALLOC_MASK 0x00000001L
+#define DAGB1_FATAL_ERROR_STATUS3__UNITID_MASK 0x0000007EL
+#define DAGB1_FATAL_ERROR_STATUS3__OP_MASK 0x00003F80L
+#define DAGB1_FATAL_ERROR_STATUS3__SECLEVEL_MASK 0x0001C000L
+#define DAGB1_FATAL_ERROR_STATUS3__WRTMZ_MASK 0x00020000L
+#define DAGB1_FATAL_ERROR_STATUS3__RDTMZ_MASK 0x00040000L
+#define DAGB1_FATAL_ERROR_STATUS3__SNOOP_MASK 0x00080000L
+#define DAGB1_FATAL_ERROR_STATUS3__INVAL_MASK 0x00100000L
+#define DAGB1_FATAL_ERROR_STATUS3__NACK_MASK 0x00600000L
+#define DAGB1_FATAL_ERROR_STATUS3__RO_MASK 0x00800000L
+#define DAGB1_FATAL_ERROR_STATUS3__MEMLOG_MASK 0x01000000L
+#define DAGB1_FATAL_ERROR_STATUS3__EOP_MASK 0x02000000L
+//DAGB1_FIFO_EMPTY
+#define DAGB1_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB1_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB1_FIFO_FULL
+#define DAGB1_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB1_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB1_WR_CREDITS_FULL
+#define DAGB1_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB1_RD_CREDITS_FULL
+#define DAGB1_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB1_PERFCOUNTER_LO
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB1_PERFCOUNTER_HI
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB1_PERFCOUNTER0_CFG
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER1_CFG
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER2_CFG
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER_RSLT_CNTL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB1_L1TLB_REG_RW
+#define DAGB1_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL__SHIFT 0x0
+#define DAGB1_L1TLB_REG_RW__REG_READ_L1TLB_CTRL__SHIFT 0x1
+#define DAGB1_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL__SHIFT 0x2
+#define DAGB1_L1TLB_REG_RW__WDAT_PARITY_CHECK__SHIFT 0x4
+#define DAGB1_L1TLB_REG_RW__DISABLE_RDRET_CHECK__SHIFT 0x5
+#define DAGB1_L1TLB_REG_RW__RESERVE__SHIFT 0x6
+#define DAGB1_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL_MASK 0x00000001L
+#define DAGB1_L1TLB_REG_RW__REG_READ_L1TLB_CTRL_MASK 0x00000002L
+#define DAGB1_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL_MASK 0x00000004L
+#define DAGB1_L1TLB_REG_RW__WDAT_PARITY_CHECK_MASK 0x00000010L
+#define DAGB1_L1TLB_REG_RW__DISABLE_RDRET_CHECK_MASK 0x00000020L
+#define DAGB1_L1TLB_REG_RW__RESERVE_MASK 0xFFFFFFC0L
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec2
+//DAGB2_RDCLI0
+#define DAGB2_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI1
+#define DAGB2_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI2
+#define DAGB2_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI3
+#define DAGB2_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI4
+#define DAGB2_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI5
+#define DAGB2_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI6
+#define DAGB2_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI7
+#define DAGB2_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI8
+#define DAGB2_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI9
+#define DAGB2_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI10
+#define DAGB2_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI11
+#define DAGB2_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI12
+#define DAGB2_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI13
+#define DAGB2_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI14
+#define DAGB2_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI15
+#define DAGB2_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RD_CNTL
+#define DAGB2_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB2_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB2_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB2_RD_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB2_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB2_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB2_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB2_RD_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB2_RD_GMI_CNTL
+#define DAGB2_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB2_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB2_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB2_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB2_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB2_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB2_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB2_RD_ADDR_DAGB
+#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_RD_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB2_RD_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB2_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB2_RD_CGTT_CLK_CTRL
+#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_RD_VC0_CNTL
+#define DAGB2_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC1_CNTL
+#define DAGB2_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC2_CNTL
+#define DAGB2_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC3_CNTL
+#define DAGB2_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC4_CNTL
+#define DAGB2_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC5_CNTL
+#define DAGB2_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC6_CNTL
+#define DAGB2_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC7_CNTL
+#define DAGB2_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_CNTL_MISC
+#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB2_RD_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB2_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB2_RD_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB2_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB2_RD_TLB_CREDIT
+#define DAGB2_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB2_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB2_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB2_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB2_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB2_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB2_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB2_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB2_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB2_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB2_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB2_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB2_RD_RDRET_CREDIT_CNTL
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC0_CREDIT__SHIFT 0x0
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC1_CREDIT__SHIFT 0x6
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC2_CREDIT__SHIFT 0xc
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC3_CREDIT__SHIFT 0x12
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC4_CREDIT__SHIFT 0x18
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC_MODE__SHIFT 0x1e
+#define DAGB2_RD_RDRET_CREDIT_CNTL__FIX_EQ__SHIFT 0x1f
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC0_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC1_CREDIT_MASK 0x00000FC0L
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC2_CREDIT_MASK 0x0003F000L
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC3_CREDIT_MASK 0x00FC0000L
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC4_CREDIT_MASK 0x3F000000L
+#define DAGB2_RD_RDRET_CREDIT_CNTL__VC_MODE_MASK 0x40000000L
+#define DAGB2_RD_RDRET_CREDIT_CNTL__FIX_EQ_MASK 0x80000000L
+//DAGB2_RD_RDRET_CREDIT_CNTL2
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__IO_CREDIT__SHIFT 0x0
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT__SHIFT 0x6
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT__SHIFT 0xc
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__IO_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT_MASK 0x00000FC0L
+#define DAGB2_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT_MASK 0x0007F000L
+//DAGB2_RDCLI_ASK_PENDING
+#define DAGB2_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_GO_PENDING
+#define DAGB2_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_GBLSEND_PENDING
+#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_TLB_PENDING
+#define DAGB2_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_OARB_PENDING
+#define DAGB2_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_OSD_PENDING
+#define DAGB2_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI0
+#define DAGB2_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI1
+#define DAGB2_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI2
+#define DAGB2_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI3
+#define DAGB2_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI4
+#define DAGB2_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI5
+#define DAGB2_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI6
+#define DAGB2_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI7
+#define DAGB2_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI8
+#define DAGB2_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI9
+#define DAGB2_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI10
+#define DAGB2_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI11
+#define DAGB2_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI12
+#define DAGB2_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI13
+#define DAGB2_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI14
+#define DAGB2_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI15
+#define DAGB2_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WR_CNTL
+#define DAGB2_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB2_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB2_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB2_WR_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB2_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB2_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB2_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB2_WR_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB2_WR_GMI_CNTL
+#define DAGB2_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB2_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB2_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB2_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB2_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB2_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB2_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB2_WR_ADDR_DAGB
+#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_WR_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB2_WR_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB2_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB2_WR_CGTT_CLK_CTRL
+#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB2_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB
+#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB2_WR_DATA_DAGB_MAX_BURST0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_MAX_BURST1
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_VC0_CNTL
+#define DAGB2_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC1_CNTL
+#define DAGB2_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC2_CNTL
+#define DAGB2_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC3_CNTL
+#define DAGB2_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC4_CNTL
+#define DAGB2_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC5_CNTL
+#define DAGB2_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC6_CNTL
+#define DAGB2_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC7_CNTL
+#define DAGB2_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_CNTL_MISC
+#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB2_WR_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB2_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB2_WR_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB2_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB2_WR_TLB_CREDIT
+#define DAGB2_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB2_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB2_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB2_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB2_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB2_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB2_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB2_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB2_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB2_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB2_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB2_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB2_WR_DATA_CREDIT
+#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB2_WR_MISC_CREDIT
+#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB2_WR_OSD_CREDIT_CNTL1
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x4
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0x8
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xc
+#define DAGB2_WR_OSD_CREDIT_CNTL1__IO_CREDIT__SHIFT 0x10
+#define DAGB2_WR_OSD_CREDIT_CNTL1__GMI_CREDIT__SHIFT 0x14
+#define DAGB2_WR_OSD_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x18
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000000FL
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000000F0L
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00000F00L
+#define DAGB2_WR_OSD_CREDIT_CNTL1__VC3_CREDIT_MASK 0x0000F000L
+#define DAGB2_WR_OSD_CREDIT_CNTL1__IO_CREDIT_MASK 0x000F0000L
+#define DAGB2_WR_OSD_CREDIT_CNTL1__GMI_CREDIT_MASK 0x00F00000L
+#define DAGB2_WR_OSD_CREDIT_CNTL1__POOL_CREDIT_MASK 0x3F000000L
+//DAGB2_WR_OSD_CREDIT_CNTL2
+#define DAGB2_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN__SHIFT 0x0
+#define DAGB2_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY__SHIFT 0x4
+#define DAGB2_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN_MASK 0x0000000FL
+#define DAGB2_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY_MASK 0x00000010L
+//DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x5
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0xa
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xf
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x14
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE__SHIFT 0x19
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ__SHIFT 0x1a
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0__SHIFT 0x1b
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1__SHIFT 0x1c
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2__SHIFT 0x1d
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000003E0L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00007C00L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT_MASK 0x000F8000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT_MASK 0x01F00000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE_MASK 0x02000000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ_MASK 0x04000000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0_MASK 0x08000000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1_MASK 0x10000000L
+#define DAGB2_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2_MASK 0x20000000L
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0x0000FFFFL
+//DAGB2_WRCLI_ASK_PENDING
+#define DAGB2_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GO_PENDING
+#define DAGB2_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GBLSEND_PENDING
+#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_TLB_PENDING
+#define DAGB2_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_OARB_PENDING
+#define DAGB2_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_OSD_PENDING
+#define DAGB2_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_DBUS_ASK_PENDING
+#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_DBUS_GO_PENDING
+#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_DAGB_DLY
+#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB2_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB2_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB2_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB2_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB2_CNTL_MISC
+#define DAGB2_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB2_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB2_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB2_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB2_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB2_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB2_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB2_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB2_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB2_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB2_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB2_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB2_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB2_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB2_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB2_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB2_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB2_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB2_CNTL_MISC2
+#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB2_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB2_CNTL_MISC2__HDP_CID__SHIFT 0xc
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x10
+#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB2_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB2_CNTL_MISC2__HDP_CID_MASK 0x0000F000L
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x003F0000L
+//DAGB2_FATAL_ERROR_CNTL
+#define DAGB2_FATAL_ERROR_CNTL__FILTER_NUM__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_CNTL__FILTER_NUM_MASK 0x000003FFL
+//DAGB2_FATAL_ERROR_CLEAR
+#define DAGB2_FATAL_ERROR_CLEAR__CLEAR__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_CLEAR__CLEAR_MASK 0x00000001L
+//DAGB2_FATAL_ERROR_STATUS0
+#define DAGB2_FATAL_ERROR_STATUS0__VALID__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_STATUS0__CID__SHIFT 0x1
+#define DAGB2_FATAL_ERROR_STATUS0__ADDR_LO__SHIFT 0x6
+#define DAGB2_FATAL_ERROR_STATUS0__VALID_MASK 0x00000001L
+#define DAGB2_FATAL_ERROR_STATUS0__CID_MASK 0x0000003EL
+#define DAGB2_FATAL_ERROR_STATUS0__ADDR_LO_MASK 0xFFFFFFC0L
+//DAGB2_FATAL_ERROR_STATUS1
+#define DAGB2_FATAL_ERROR_STATUS1__ADDR_HI__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_STATUS1__ADDR_HI_MASK 0x0001FFFFL
+//DAGB2_FATAL_ERROR_STATUS2
+#define DAGB2_FATAL_ERROR_STATUS2__TAG__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_STATUS2__VFID__SHIFT 0x10
+#define DAGB2_FATAL_ERROR_STATUS2__VF__SHIFT 0x14
+#define DAGB2_FATAL_ERROR_STATUS2__SPACE__SHIFT 0x15
+#define DAGB2_FATAL_ERROR_STATUS2__IO__SHIFT 0x16
+#define DAGB2_FATAL_ERROR_STATUS2__SIZE__SHIFT 0x17
+#define DAGB2_FATAL_ERROR_STATUS2__DBGMSK__SHIFT 0x18
+#define DAGB2_FATAL_ERROR_STATUS2__FED__SHIFT 0x19
+#define DAGB2_FATAL_ERROR_STATUS2__TAG_MASK 0x0000FFFFL
+#define DAGB2_FATAL_ERROR_STATUS2__VFID_MASK 0x000F0000L
+#define DAGB2_FATAL_ERROR_STATUS2__VF_MASK 0x00100000L
+#define DAGB2_FATAL_ERROR_STATUS2__SPACE_MASK 0x00200000L
+#define DAGB2_FATAL_ERROR_STATUS2__IO_MASK 0x00400000L
+#define DAGB2_FATAL_ERROR_STATUS2__SIZE_MASK 0x00800000L
+#define DAGB2_FATAL_ERROR_STATUS2__DBGMSK_MASK 0x01000000L
+#define DAGB2_FATAL_ERROR_STATUS2__FED_MASK 0x02000000L
+//DAGB2_FATAL_ERROR_STATUS3
+#define DAGB2_FATAL_ERROR_STATUS3__NOALLOC__SHIFT 0x0
+#define DAGB2_FATAL_ERROR_STATUS3__UNITID__SHIFT 0x1
+#define DAGB2_FATAL_ERROR_STATUS3__OP__SHIFT 0x7
+#define DAGB2_FATAL_ERROR_STATUS3__SECLEVEL__SHIFT 0xe
+#define DAGB2_FATAL_ERROR_STATUS3__WRTMZ__SHIFT 0x11
+#define DAGB2_FATAL_ERROR_STATUS3__RDTMZ__SHIFT 0x12
+#define DAGB2_FATAL_ERROR_STATUS3__SNOOP__SHIFT 0x13
+#define DAGB2_FATAL_ERROR_STATUS3__INVAL__SHIFT 0x14
+#define DAGB2_FATAL_ERROR_STATUS3__NACK__SHIFT 0x15
+#define DAGB2_FATAL_ERROR_STATUS3__RO__SHIFT 0x17
+#define DAGB2_FATAL_ERROR_STATUS3__MEMLOG__SHIFT 0x18
+#define DAGB2_FATAL_ERROR_STATUS3__EOP__SHIFT 0x19
+#define DAGB2_FATAL_ERROR_STATUS3__NOALLOC_MASK 0x00000001L
+#define DAGB2_FATAL_ERROR_STATUS3__UNITID_MASK 0x0000007EL
+#define DAGB2_FATAL_ERROR_STATUS3__OP_MASK 0x00003F80L
+#define DAGB2_FATAL_ERROR_STATUS3__SECLEVEL_MASK 0x0001C000L
+#define DAGB2_FATAL_ERROR_STATUS3__WRTMZ_MASK 0x00020000L
+#define DAGB2_FATAL_ERROR_STATUS3__RDTMZ_MASK 0x00040000L
+#define DAGB2_FATAL_ERROR_STATUS3__SNOOP_MASK 0x00080000L
+#define DAGB2_FATAL_ERROR_STATUS3__INVAL_MASK 0x00100000L
+#define DAGB2_FATAL_ERROR_STATUS3__NACK_MASK 0x00600000L
+#define DAGB2_FATAL_ERROR_STATUS3__RO_MASK 0x00800000L
+#define DAGB2_FATAL_ERROR_STATUS3__MEMLOG_MASK 0x01000000L
+#define DAGB2_FATAL_ERROR_STATUS3__EOP_MASK 0x02000000L
+//DAGB2_FIFO_EMPTY
+#define DAGB2_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB2_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB2_FIFO_FULL
+#define DAGB2_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB2_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB2_WR_CREDITS_FULL
+#define DAGB2_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB2_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB2_RD_CREDITS_FULL
+#define DAGB2_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB2_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB2_PERFCOUNTER_LO
+#define DAGB2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB2_PERFCOUNTER_HI
+#define DAGB2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB2_PERFCOUNTER0_CFG
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER1_CFG
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER2_CFG
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER_RSLT_CNTL
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB2_L1TLB_REG_RW
+#define DAGB2_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL__SHIFT 0x0
+#define DAGB2_L1TLB_REG_RW__REG_READ_L1TLB_CTRL__SHIFT 0x1
+#define DAGB2_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL__SHIFT 0x2
+#define DAGB2_L1TLB_REG_RW__WDAT_PARITY_CHECK__SHIFT 0x4
+#define DAGB2_L1TLB_REG_RW__DISABLE_RDRET_CHECK__SHIFT 0x5
+#define DAGB2_L1TLB_REG_RW__RESERVE__SHIFT 0x6
+#define DAGB2_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL_MASK 0x00000001L
+#define DAGB2_L1TLB_REG_RW__REG_READ_L1TLB_CTRL_MASK 0x00000002L
+#define DAGB2_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL_MASK 0x00000004L
+#define DAGB2_L1TLB_REG_RW__WDAT_PARITY_CHECK_MASK 0x00000010L
+#define DAGB2_L1TLB_REG_RW__DISABLE_RDRET_CHECK_MASK 0x00000020L
+#define DAGB2_L1TLB_REG_RW__RESERVE_MASK 0xFFFFFFC0L
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec3
+//DAGB3_RDCLI0
+#define DAGB3_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI1
+#define DAGB3_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI2
+#define DAGB3_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI3
+#define DAGB3_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI4
+#define DAGB3_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI5
+#define DAGB3_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI6
+#define DAGB3_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI7
+#define DAGB3_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI8
+#define DAGB3_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI9
+#define DAGB3_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI10
+#define DAGB3_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI11
+#define DAGB3_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI12
+#define DAGB3_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI13
+#define DAGB3_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI14
+#define DAGB3_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI15
+#define DAGB3_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RD_CNTL
+#define DAGB3_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB3_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB3_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB3_RD_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB3_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB3_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB3_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB3_RD_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB3_RD_GMI_CNTL
+#define DAGB3_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB3_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB3_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB3_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB3_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB3_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB3_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB3_RD_ADDR_DAGB
+#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_RD_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB3_RD_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB3_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB3_RD_CGTT_CLK_CTRL
+#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_RD_VC0_CNTL
+#define DAGB3_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC1_CNTL
+#define DAGB3_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC2_CNTL
+#define DAGB3_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC3_CNTL
+#define DAGB3_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC4_CNTL
+#define DAGB3_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC5_CNTL
+#define DAGB3_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC6_CNTL
+#define DAGB3_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC7_CNTL
+#define DAGB3_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_CNTL_MISC
+#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB3_RD_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB3_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB3_RD_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB3_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB3_RD_TLB_CREDIT
+#define DAGB3_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB3_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB3_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB3_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB3_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB3_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB3_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB3_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB3_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB3_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB3_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB3_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB3_RD_RDRET_CREDIT_CNTL
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC0_CREDIT__SHIFT 0x0
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC1_CREDIT__SHIFT 0x6
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC2_CREDIT__SHIFT 0xc
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC3_CREDIT__SHIFT 0x12
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC4_CREDIT__SHIFT 0x18
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC_MODE__SHIFT 0x1e
+#define DAGB3_RD_RDRET_CREDIT_CNTL__FIX_EQ__SHIFT 0x1f
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC0_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC1_CREDIT_MASK 0x00000FC0L
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC2_CREDIT_MASK 0x0003F000L
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC3_CREDIT_MASK 0x00FC0000L
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC4_CREDIT_MASK 0x3F000000L
+#define DAGB3_RD_RDRET_CREDIT_CNTL__VC_MODE_MASK 0x40000000L
+#define DAGB3_RD_RDRET_CREDIT_CNTL__FIX_EQ_MASK 0x80000000L
+//DAGB3_RD_RDRET_CREDIT_CNTL2
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__IO_CREDIT__SHIFT 0x0
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT__SHIFT 0x6
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT__SHIFT 0xc
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__IO_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT_MASK 0x00000FC0L
+#define DAGB3_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT_MASK 0x0007F000L
+//DAGB3_RDCLI_ASK_PENDING
+#define DAGB3_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_GO_PENDING
+#define DAGB3_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_GBLSEND_PENDING
+#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_TLB_PENDING
+#define DAGB3_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_OARB_PENDING
+#define DAGB3_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_OSD_PENDING
+#define DAGB3_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI0
+#define DAGB3_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI1
+#define DAGB3_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI2
+#define DAGB3_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI3
+#define DAGB3_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI4
+#define DAGB3_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI5
+#define DAGB3_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI6
+#define DAGB3_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI7
+#define DAGB3_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI8
+#define DAGB3_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI9
+#define DAGB3_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI10
+#define DAGB3_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI11
+#define DAGB3_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI12
+#define DAGB3_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI13
+#define DAGB3_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI14
+#define DAGB3_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI15
+#define DAGB3_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WR_CNTL
+#define DAGB3_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB3_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB3_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB3_WR_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB3_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB3_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB3_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB3_WR_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB3_WR_GMI_CNTL
+#define DAGB3_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB3_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB3_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB3_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB3_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB3_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB3_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB3_WR_ADDR_DAGB
+#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_WR_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB3_WR_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB3_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB3_WR_CGTT_CLK_CTRL
+#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB3_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB
+#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB3_WR_DATA_DAGB_MAX_BURST0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_MAX_BURST1
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_VC0_CNTL
+#define DAGB3_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC1_CNTL
+#define DAGB3_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC2_CNTL
+#define DAGB3_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC3_CNTL
+#define DAGB3_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC4_CNTL
+#define DAGB3_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC5_CNTL
+#define DAGB3_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC6_CNTL
+#define DAGB3_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC7_CNTL
+#define DAGB3_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_CNTL_MISC
+#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB3_WR_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB3_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB3_WR_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB3_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB3_WR_TLB_CREDIT
+#define DAGB3_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB3_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB3_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB3_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB3_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB3_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB3_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB3_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB3_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB3_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB3_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB3_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB3_WR_DATA_CREDIT
+#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB3_WR_MISC_CREDIT
+#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB3_WR_OSD_CREDIT_CNTL1
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x4
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0x8
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xc
+#define DAGB3_WR_OSD_CREDIT_CNTL1__IO_CREDIT__SHIFT 0x10
+#define DAGB3_WR_OSD_CREDIT_CNTL1__GMI_CREDIT__SHIFT 0x14
+#define DAGB3_WR_OSD_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x18
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000000FL
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000000F0L
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00000F00L
+#define DAGB3_WR_OSD_CREDIT_CNTL1__VC3_CREDIT_MASK 0x0000F000L
+#define DAGB3_WR_OSD_CREDIT_CNTL1__IO_CREDIT_MASK 0x000F0000L
+#define DAGB3_WR_OSD_CREDIT_CNTL1__GMI_CREDIT_MASK 0x00F00000L
+#define DAGB3_WR_OSD_CREDIT_CNTL1__POOL_CREDIT_MASK 0x3F000000L
+//DAGB3_WR_OSD_CREDIT_CNTL2
+#define DAGB3_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN__SHIFT 0x0
+#define DAGB3_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY__SHIFT 0x4
+#define DAGB3_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN_MASK 0x0000000FL
+#define DAGB3_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY_MASK 0x00000010L
+//DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x5
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0xa
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xf
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x14
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE__SHIFT 0x19
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ__SHIFT 0x1a
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0__SHIFT 0x1b
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1__SHIFT 0x1c
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2__SHIFT 0x1d
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000003E0L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00007C00L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT_MASK 0x000F8000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT_MASK 0x01F00000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE_MASK 0x02000000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ_MASK 0x04000000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0_MASK 0x08000000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1_MASK 0x10000000L
+#define DAGB3_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2_MASK 0x20000000L
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0x0000FFFFL
+//DAGB3_WRCLI_ASK_PENDING
+#define DAGB3_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GO_PENDING
+#define DAGB3_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GBLSEND_PENDING
+#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_TLB_PENDING
+#define DAGB3_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_OARB_PENDING
+#define DAGB3_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_OSD_PENDING
+#define DAGB3_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_DBUS_ASK_PENDING
+#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_DBUS_GO_PENDING
+#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_DAGB_DLY
+#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB3_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB3_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB3_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB3_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB3_CNTL_MISC
+#define DAGB3_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB3_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB3_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB3_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB3_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB3_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB3_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB3_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB3_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB3_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB3_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB3_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB3_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB3_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB3_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB3_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB3_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB3_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB3_CNTL_MISC2
+#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB3_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB3_CNTL_MISC2__HDP_CID__SHIFT 0xc
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x10
+#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB3_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB3_CNTL_MISC2__HDP_CID_MASK 0x0000F000L
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x003F0000L
+//DAGB3_FATAL_ERROR_CNTL
+#define DAGB3_FATAL_ERROR_CNTL__FILTER_NUM__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_CNTL__FILTER_NUM_MASK 0x000003FFL
+//DAGB3_FATAL_ERROR_CLEAR
+#define DAGB3_FATAL_ERROR_CLEAR__CLEAR__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_CLEAR__CLEAR_MASK 0x00000001L
+//DAGB3_FATAL_ERROR_STATUS0
+#define DAGB3_FATAL_ERROR_STATUS0__VALID__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_STATUS0__CID__SHIFT 0x1
+#define DAGB3_FATAL_ERROR_STATUS0__ADDR_LO__SHIFT 0x6
+#define DAGB3_FATAL_ERROR_STATUS0__VALID_MASK 0x00000001L
+#define DAGB3_FATAL_ERROR_STATUS0__CID_MASK 0x0000003EL
+#define DAGB3_FATAL_ERROR_STATUS0__ADDR_LO_MASK 0xFFFFFFC0L
+//DAGB3_FATAL_ERROR_STATUS1
+#define DAGB3_FATAL_ERROR_STATUS1__ADDR_HI__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_STATUS1__ADDR_HI_MASK 0x0001FFFFL
+//DAGB3_FATAL_ERROR_STATUS2
+#define DAGB3_FATAL_ERROR_STATUS2__TAG__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_STATUS2__VFID__SHIFT 0x10
+#define DAGB3_FATAL_ERROR_STATUS2__VF__SHIFT 0x14
+#define DAGB3_FATAL_ERROR_STATUS2__SPACE__SHIFT 0x15
+#define DAGB3_FATAL_ERROR_STATUS2__IO__SHIFT 0x16
+#define DAGB3_FATAL_ERROR_STATUS2__SIZE__SHIFT 0x17
+#define DAGB3_FATAL_ERROR_STATUS2__DBGMSK__SHIFT 0x18
+#define DAGB3_FATAL_ERROR_STATUS2__FED__SHIFT 0x19
+#define DAGB3_FATAL_ERROR_STATUS2__TAG_MASK 0x0000FFFFL
+#define DAGB3_FATAL_ERROR_STATUS2__VFID_MASK 0x000F0000L
+#define DAGB3_FATAL_ERROR_STATUS2__VF_MASK 0x00100000L
+#define DAGB3_FATAL_ERROR_STATUS2__SPACE_MASK 0x00200000L
+#define DAGB3_FATAL_ERROR_STATUS2__IO_MASK 0x00400000L
+#define DAGB3_FATAL_ERROR_STATUS2__SIZE_MASK 0x00800000L
+#define DAGB3_FATAL_ERROR_STATUS2__DBGMSK_MASK 0x01000000L
+#define DAGB3_FATAL_ERROR_STATUS2__FED_MASK 0x02000000L
+//DAGB3_FATAL_ERROR_STATUS3
+#define DAGB3_FATAL_ERROR_STATUS3__NOALLOC__SHIFT 0x0
+#define DAGB3_FATAL_ERROR_STATUS3__UNITID__SHIFT 0x1
+#define DAGB3_FATAL_ERROR_STATUS3__OP__SHIFT 0x7
+#define DAGB3_FATAL_ERROR_STATUS3__SECLEVEL__SHIFT 0xe
+#define DAGB3_FATAL_ERROR_STATUS3__WRTMZ__SHIFT 0x11
+#define DAGB3_FATAL_ERROR_STATUS3__RDTMZ__SHIFT 0x12
+#define DAGB3_FATAL_ERROR_STATUS3__SNOOP__SHIFT 0x13
+#define DAGB3_FATAL_ERROR_STATUS3__INVAL__SHIFT 0x14
+#define DAGB3_FATAL_ERROR_STATUS3__NACK__SHIFT 0x15
+#define DAGB3_FATAL_ERROR_STATUS3__RO__SHIFT 0x17
+#define DAGB3_FATAL_ERROR_STATUS3__MEMLOG__SHIFT 0x18
+#define DAGB3_FATAL_ERROR_STATUS3__EOP__SHIFT 0x19
+#define DAGB3_FATAL_ERROR_STATUS3__NOALLOC_MASK 0x00000001L
+#define DAGB3_FATAL_ERROR_STATUS3__UNITID_MASK 0x0000007EL
+#define DAGB3_FATAL_ERROR_STATUS3__OP_MASK 0x00003F80L
+#define DAGB3_FATAL_ERROR_STATUS3__SECLEVEL_MASK 0x0001C000L
+#define DAGB3_FATAL_ERROR_STATUS3__WRTMZ_MASK 0x00020000L
+#define DAGB3_FATAL_ERROR_STATUS3__RDTMZ_MASK 0x00040000L
+#define DAGB3_FATAL_ERROR_STATUS3__SNOOP_MASK 0x00080000L
+#define DAGB3_FATAL_ERROR_STATUS3__INVAL_MASK 0x00100000L
+#define DAGB3_FATAL_ERROR_STATUS3__NACK_MASK 0x00600000L
+#define DAGB3_FATAL_ERROR_STATUS3__RO_MASK 0x00800000L
+#define DAGB3_FATAL_ERROR_STATUS3__MEMLOG_MASK 0x01000000L
+#define DAGB3_FATAL_ERROR_STATUS3__EOP_MASK 0x02000000L
+//DAGB3_FIFO_EMPTY
+#define DAGB3_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB3_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB3_FIFO_FULL
+#define DAGB3_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB3_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB3_WR_CREDITS_FULL
+#define DAGB3_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB3_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB3_RD_CREDITS_FULL
+#define DAGB3_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB3_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB3_PERFCOUNTER_LO
+#define DAGB3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB3_PERFCOUNTER_HI
+#define DAGB3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB3_PERFCOUNTER0_CFG
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER1_CFG
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER2_CFG
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER_RSLT_CNTL
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB3_L1TLB_REG_RW
+#define DAGB3_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL__SHIFT 0x0
+#define DAGB3_L1TLB_REG_RW__REG_READ_L1TLB_CTRL__SHIFT 0x1
+#define DAGB3_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL__SHIFT 0x2
+#define DAGB3_L1TLB_REG_RW__WDAT_PARITY_CHECK__SHIFT 0x4
+#define DAGB3_L1TLB_REG_RW__DISABLE_RDRET_CHECK__SHIFT 0x5
+#define DAGB3_L1TLB_REG_RW__RESERVE__SHIFT 0x6
+#define DAGB3_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL_MASK 0x00000001L
+#define DAGB3_L1TLB_REG_RW__REG_READ_L1TLB_CTRL_MASK 0x00000002L
+#define DAGB3_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL_MASK 0x00000004L
+#define DAGB3_L1TLB_REG_RW__WDAT_PARITY_CHECK_MASK 0x00000010L
+#define DAGB3_L1TLB_REG_RW__DISABLE_RDRET_CHECK_MASK 0x00000020L
+#define DAGB3_L1TLB_REG_RW__RESERVE_MASK 0xFFFFFFC0L
+
+
+// addressBlock: aid_mmhub_dagb_dagbdec4
+//DAGB4_RDCLI0
+#define DAGB4_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI1
+#define DAGB4_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI2
+#define DAGB4_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI3
+#define DAGB4_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI4
+#define DAGB4_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI5
+#define DAGB4_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI6
+#define DAGB4_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI7
+#define DAGB4_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI8
+#define DAGB4_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI9
+#define DAGB4_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI10
+#define DAGB4_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI11
+#define DAGB4_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI12
+#define DAGB4_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI13
+#define DAGB4_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI14
+#define DAGB4_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI15
+#define DAGB4_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RD_CNTL
+#define DAGB4_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB4_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB4_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB4_RD_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB4_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB4_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB4_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB4_RD_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB4_RD_GMI_CNTL
+#define DAGB4_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB4_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB4_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB4_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB4_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB4_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB4_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB4_RD_ADDR_DAGB
+#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_RD_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB4_RD_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB4_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB4_RD_CGTT_CLK_CTRL
+#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_RD_VC0_CNTL
+#define DAGB4_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC1_CNTL
+#define DAGB4_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC2_CNTL
+#define DAGB4_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC3_CNTL
+#define DAGB4_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC4_CNTL
+#define DAGB4_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC5_CNTL
+#define DAGB4_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC6_CNTL
+#define DAGB4_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC7_CNTL
+#define DAGB4_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_CNTL_MISC
+#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB4_RD_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB4_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB4_RD_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB4_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB4_RD_TLB_CREDIT
+#define DAGB4_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB4_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB4_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB4_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB4_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB4_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB4_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB4_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB4_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB4_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB4_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB4_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB4_RD_RDRET_CREDIT_CNTL
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC0_CREDIT__SHIFT 0x0
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC1_CREDIT__SHIFT 0x6
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC2_CREDIT__SHIFT 0xc
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC3_CREDIT__SHIFT 0x12
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC4_CREDIT__SHIFT 0x18
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC_MODE__SHIFT 0x1e
+#define DAGB4_RD_RDRET_CREDIT_CNTL__FIX_EQ__SHIFT 0x1f
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC0_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC1_CREDIT_MASK 0x00000FC0L
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC2_CREDIT_MASK 0x0003F000L
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC3_CREDIT_MASK 0x00FC0000L
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC4_CREDIT_MASK 0x3F000000L
+#define DAGB4_RD_RDRET_CREDIT_CNTL__VC_MODE_MASK 0x40000000L
+#define DAGB4_RD_RDRET_CREDIT_CNTL__FIX_EQ_MASK 0x80000000L
+//DAGB4_RD_RDRET_CREDIT_CNTL2
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__IO_CREDIT__SHIFT 0x0
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT__SHIFT 0x6
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT__SHIFT 0xc
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__IO_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__GMI_CREDIT_MASK 0x00000FC0L
+#define DAGB4_RD_RDRET_CREDIT_CNTL2__POOL_CREDIT_MASK 0x0007F000L
+//DAGB4_RDCLI_ASK_PENDING
+#define DAGB4_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_GO_PENDING
+#define DAGB4_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_GBLSEND_PENDING
+#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_TLB_PENDING
+#define DAGB4_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_OARB_PENDING
+#define DAGB4_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_OSD_PENDING
+#define DAGB4_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI0
+#define DAGB4_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI1
+#define DAGB4_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI2
+#define DAGB4_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI3
+#define DAGB4_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI4
+#define DAGB4_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI5
+#define DAGB4_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI6
+#define DAGB4_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI7
+#define DAGB4_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI8
+#define DAGB4_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI9
+#define DAGB4_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI10
+#define DAGB4_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI11
+#define DAGB4_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI12
+#define DAGB4_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI13
+#define DAGB4_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI14
+#define DAGB4_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI15
+#define DAGB4_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WR_CNTL
+#define DAGB4_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB4_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB4_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB4_WR_CNTL__FIX_JUMP__SHIFT 0x1a
+#define DAGB4_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB4_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB4_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+#define DAGB4_WR_CNTL__FIX_JUMP_MASK 0x04000000L
+//DAGB4_WR_GMI_CNTL
+#define DAGB4_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB4_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB4_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB4_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB4_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB4_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB4_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB4_WR_ADDR_DAGB
+#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_WR_ADDR_DAGB__JUMP_MODE__SHIFT 0xd
+#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+#define DAGB4_WR_ADDR_DAGB__JUMP_MODE_MASK 0x00002000L
+//DAGB4_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB4_WR_CGTT_CLK_CTRL
+#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS__SHIFT 0xc
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1e
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x1f
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_ASSERT_HYSTERESIS_MASK 0x0FFFF000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x40000000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0x80000000L
+//DAGB4_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB
+#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB4_WR_DATA_DAGB_MAX_BURST0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_MAX_BURST1
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_VC0_CNTL
+#define DAGB4_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC1_CNTL
+#define DAGB4_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC2_CNTL
+#define DAGB4_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC3_CNTL
+#define DAGB4_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC4_CNTL
+#define DAGB4_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC5_CNTL
+#define DAGB4_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC6_CNTL
+#define DAGB4_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC7_CNTL
+#define DAGB4_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_CNTL_MISC
+#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB4_WR_CNTL_MISC__STOR_CC_NEW_MODE__SHIFT 0x13
+#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB4_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB4_WR_CNTL_MISC__STOR_CC_NEW_MODE_MASK 0x00080000L
+#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB4_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB4_WR_TLB_CREDIT
+#define DAGB4_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB4_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB4_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB4_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB4_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB4_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB4_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB4_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB4_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB4_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB4_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB4_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB4_WR_DATA_CREDIT
+#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB4_WR_MISC_CREDIT
+#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB4_WR_OSD_CREDIT_CNTL1
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x4
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0x8
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xc
+#define DAGB4_WR_OSD_CREDIT_CNTL1__IO_CREDIT__SHIFT 0x10
+#define DAGB4_WR_OSD_CREDIT_CNTL1__GMI_CREDIT__SHIFT 0x14
+#define DAGB4_WR_OSD_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x18
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000000FL
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000000F0L
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00000F00L
+#define DAGB4_WR_OSD_CREDIT_CNTL1__VC3_CREDIT_MASK 0x0000F000L
+#define DAGB4_WR_OSD_CREDIT_CNTL1__IO_CREDIT_MASK 0x000F0000L
+#define DAGB4_WR_OSD_CREDIT_CNTL1__GMI_CREDIT_MASK 0x00F00000L
+#define DAGB4_WR_OSD_CREDIT_CNTL1__POOL_CREDIT_MASK 0x3F000000L
+//DAGB4_WR_OSD_CREDIT_CNTL2
+#define DAGB4_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN__SHIFT 0x0
+#define DAGB4_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY__SHIFT 0x4
+#define DAGB4_WR_OSD_CREDIT_CNTL2__CREDIT_MARGIN_MASK 0x0000000FL
+#define DAGB4_WR_OSD_CREDIT_CNTL2__ENABLE_LEGACY_MASK 0x00000010L
+//DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT__SHIFT 0x0
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT__SHIFT 0x5
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT__SHIFT 0xa
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT__SHIFT 0xf
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT__SHIFT 0x14
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE__SHIFT 0x19
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ__SHIFT 0x1a
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0__SHIFT 0x1b
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1__SHIFT 0x1c
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2__SHIFT 0x1d
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC0_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC1_CREDIT_MASK 0x000003E0L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC2_CREDIT_MASK 0x00007C00L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC3_CREDIT_MASK 0x000F8000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__POOL_CREDIT_MASK 0x01F00000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__VC_MODE_MASK 0x02000000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX_EQ_MASK 0x04000000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX0_MASK 0x08000000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX1_MASK 0x10000000L
+#define DAGB4_WR_ATOMIC_FIFO_CREDIT_CNTL1__FIX2_MASK 0x20000000L
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0x0000FFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0x0000FFFFL
+//DAGB4_WRCLI_ASK_PENDING
+#define DAGB4_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GO_PENDING
+#define DAGB4_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GBLSEND_PENDING
+#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_TLB_PENDING
+#define DAGB4_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_OARB_PENDING
+#define DAGB4_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_OSD_PENDING
+#define DAGB4_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_DBUS_ASK_PENDING
+#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_DBUS_GO_PENDING
+#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_DAGB_DLY
+#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB4_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB4_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB4_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB4_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB4_CNTL_MISC
+#define DAGB4_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB4_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB4_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB4_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB4_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB4_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB4_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB4_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB4_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB4_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB4_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB4_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB4_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB4_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB4_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB4_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB4_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB4_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB4_CNTL_MISC2
+#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB4_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB4_CNTL_MISC2__HDP_CID__SHIFT 0xc
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x10
+#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB4_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB4_CNTL_MISC2__HDP_CID_MASK 0x0000F000L
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x003F0000L
+//DAGB4_FATAL_ERROR_CNTL
+#define DAGB4_FATAL_ERROR_CNTL__FILTER_NUM__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_CNTL__FILTER_NUM_MASK 0x000003FFL
+//DAGB4_FATAL_ERROR_CLEAR
+#define DAGB4_FATAL_ERROR_CLEAR__CLEAR__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_CLEAR__CLEAR_MASK 0x00000001L
+//DAGB4_FATAL_ERROR_STATUS0
+#define DAGB4_FATAL_ERROR_STATUS0__VALID__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_STATUS0__CID__SHIFT 0x1
+#define DAGB4_FATAL_ERROR_STATUS0__ADDR_LO__SHIFT 0x6
+#define DAGB4_FATAL_ERROR_STATUS0__VALID_MASK 0x00000001L
+#define DAGB4_FATAL_ERROR_STATUS0__CID_MASK 0x0000003EL
+#define DAGB4_FATAL_ERROR_STATUS0__ADDR_LO_MASK 0xFFFFFFC0L
+//DAGB4_FATAL_ERROR_STATUS1
+#define DAGB4_FATAL_ERROR_STATUS1__ADDR_HI__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_STATUS1__ADDR_HI_MASK 0x0001FFFFL
+//DAGB4_FATAL_ERROR_STATUS2
+#define DAGB4_FATAL_ERROR_STATUS2__TAG__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_STATUS2__VFID__SHIFT 0x10
+#define DAGB4_FATAL_ERROR_STATUS2__VF__SHIFT 0x14
+#define DAGB4_FATAL_ERROR_STATUS2__SPACE__SHIFT 0x15
+#define DAGB4_FATAL_ERROR_STATUS2__IO__SHIFT 0x16
+#define DAGB4_FATAL_ERROR_STATUS2__SIZE__SHIFT 0x17
+#define DAGB4_FATAL_ERROR_STATUS2__DBGMSK__SHIFT 0x18
+#define DAGB4_FATAL_ERROR_STATUS2__FED__SHIFT 0x19
+#define DAGB4_FATAL_ERROR_STATUS2__TAG_MASK 0x0000FFFFL
+#define DAGB4_FATAL_ERROR_STATUS2__VFID_MASK 0x000F0000L
+#define DAGB4_FATAL_ERROR_STATUS2__VF_MASK 0x00100000L
+#define DAGB4_FATAL_ERROR_STATUS2__SPACE_MASK 0x00200000L
+#define DAGB4_FATAL_ERROR_STATUS2__IO_MASK 0x00400000L
+#define DAGB4_FATAL_ERROR_STATUS2__SIZE_MASK 0x00800000L
+#define DAGB4_FATAL_ERROR_STATUS2__DBGMSK_MASK 0x01000000L
+#define DAGB4_FATAL_ERROR_STATUS2__FED_MASK 0x02000000L
+//DAGB4_FATAL_ERROR_STATUS3
+#define DAGB4_FATAL_ERROR_STATUS3__NOALLOC__SHIFT 0x0
+#define DAGB4_FATAL_ERROR_STATUS3__UNITID__SHIFT 0x1
+#define DAGB4_FATAL_ERROR_STATUS3__OP__SHIFT 0x7
+#define DAGB4_FATAL_ERROR_STATUS3__SECLEVEL__SHIFT 0xe
+#define DAGB4_FATAL_ERROR_STATUS3__WRTMZ__SHIFT 0x11
+#define DAGB4_FATAL_ERROR_STATUS3__RDTMZ__SHIFT 0x12
+#define DAGB4_FATAL_ERROR_STATUS3__SNOOP__SHIFT 0x13
+#define DAGB4_FATAL_ERROR_STATUS3__INVAL__SHIFT 0x14
+#define DAGB4_FATAL_ERROR_STATUS3__NACK__SHIFT 0x15
+#define DAGB4_FATAL_ERROR_STATUS3__RO__SHIFT 0x17
+#define DAGB4_FATAL_ERROR_STATUS3__MEMLOG__SHIFT 0x18
+#define DAGB4_FATAL_ERROR_STATUS3__EOP__SHIFT 0x19
+#define DAGB4_FATAL_ERROR_STATUS3__NOALLOC_MASK 0x00000001L
+#define DAGB4_FATAL_ERROR_STATUS3__UNITID_MASK 0x0000007EL
+#define DAGB4_FATAL_ERROR_STATUS3__OP_MASK 0x00003F80L
+#define DAGB4_FATAL_ERROR_STATUS3__SECLEVEL_MASK 0x0001C000L
+#define DAGB4_FATAL_ERROR_STATUS3__WRTMZ_MASK 0x00020000L
+#define DAGB4_FATAL_ERROR_STATUS3__RDTMZ_MASK 0x00040000L
+#define DAGB4_FATAL_ERROR_STATUS3__SNOOP_MASK 0x00080000L
+#define DAGB4_FATAL_ERROR_STATUS3__INVAL_MASK 0x00100000L
+#define DAGB4_FATAL_ERROR_STATUS3__NACK_MASK 0x00600000L
+#define DAGB4_FATAL_ERROR_STATUS3__RO_MASK 0x00800000L
+#define DAGB4_FATAL_ERROR_STATUS3__MEMLOG_MASK 0x01000000L
+#define DAGB4_FATAL_ERROR_STATUS3__EOP_MASK 0x02000000L
+//DAGB4_FIFO_EMPTY
+#define DAGB4_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB4_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB4_FIFO_FULL
+#define DAGB4_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB4_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB4_WR_CREDITS_FULL
+#define DAGB4_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB4_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB4_RD_CREDITS_FULL
+#define DAGB4_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB4_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB4_PERFCOUNTER_LO
+#define DAGB4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB4_PERFCOUNTER_HI
+#define DAGB4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB4_PERFCOUNTER0_CFG
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER1_CFG
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER2_CFG
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER_RSLT_CNTL
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB4_L1TLB_REG_RW
+#define DAGB4_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL__SHIFT 0x0
+#define DAGB4_L1TLB_REG_RW__REG_READ_L1TLB_CTRL__SHIFT 0x1
+#define DAGB4_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL__SHIFT 0x2
+#define DAGB4_L1TLB_REG_RW__WDAT_PARITY_CHECK__SHIFT 0x4
+#define DAGB4_L1TLB_REG_RW__DISABLE_RDRET_CHECK__SHIFT 0x5
+#define DAGB4_L1TLB_REG_RW__RESERVE__SHIFT 0x6
+#define DAGB4_L1TLB_REG_RW__REG_WRITE_L1TLB_CTRL_MASK 0x00000001L
+#define DAGB4_L1TLB_REG_RW__REG_READ_L1TLB_CTRL_MASK 0x00000002L
+#define DAGB4_L1TLB_REG_RW__VMID_EXCEP_INT_CTRL_MASK 0x00000004L
+#define DAGB4_L1TLB_REG_RW__WDAT_PARITY_CHECK_MASK 0x00000010L
+#define DAGB4_L1TLB_REG_RW__DISABLE_RDRET_CHECK_MASK 0x00000020L
+#define DAGB4_L1TLB_REG_RW__RESERVE_MASK 0xFFFFFFC0L
+
+
+// addressBlock: aid_mmhub_ea_mmeadec0
+//MMEA0_DRAM_RD_CLI2GRP_MAP0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_CLI2GRP_MAP1
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP1
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_GRP2VC_MAP
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_WR_GRP2VC_MAP
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_RD_LAZY
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_DRAM_WR_LAZY
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_DRAM_RD_CAM_CNTL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_DRAM_WR_CAM_CNTL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_DRAM_PAGE_BURST
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_AGE
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_WR_PRI_AGE
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_RD_PRI_QUEUING
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_QUEUING
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_FIXED
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_FIXED
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_URGENCY
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_WR_PRI_URGENCY
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_CLI2GRP_MAP0
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_RD_CLI2GRP_MAP1
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_WR_CLI2GRP_MAP0
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_WR_CLI2GRP_MAP1
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_RD_GRP2VC_MAP
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_GMI_WR_GRP2VC_MAP
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_GMI_RD_LAZY
+#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_GMI_WR_LAZY
+#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_GMI_RD_CAM_CNTL
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_GMI_WR_CAM_CNTL
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_GMI_PAGE_BURST
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_AGE
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_GMI_WR_PRI_AGE
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_GMI_RD_PRI_QUEUING
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_WR_PRI_QUEUING
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_RD_PRI_FIXED
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_WR_PRI_FIXED
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_RD_PRI_URGENCY
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_GMI_WR_PRI_URGENCY
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI1
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI2
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI3
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI1
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI2
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI3
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_CLI2GRP_MAP0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_CLI2GRP_MAP1
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP1
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_COMBINE_FLUSH
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA0_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA0_IO_WR_COMBINE_FLUSH
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA0_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA0_IO_GROUP_BURST
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_AGE
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_WR_PRI_AGE
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_RD_PRI_QUEUING
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_QUEUING
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_FIXED
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_FIXED
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_URGENCY
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_WR_PRI_URGENCY
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_RD_PRI_URGENCY_MASKING
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_WR_PRI_URGENCY_MASKING
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI1
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI2
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI3
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI1
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI2
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI3
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_SDP_ARB_DRAM
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA0_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA0_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA0_SDP_ARB_GMI
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA0_SDP_ARB_FINAL
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//MMEA0_SDP_DRAM_PRIORITY
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_GMI_PRIORITY
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_IO_PRIORITY
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_CREDITS
+#define MMEA0_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA0_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA0_SDP_TAG_RESERVE0
+#define MMEA0_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA0_SDP_TAG_RESERVE1
+#define MMEA0_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA0_SDP_VCC_RESERVE0
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCC_RESERVE1
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_VCD_RESERVE0
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCD_RESERVE1
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_REQ_CNTL
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define MMEA0_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//MMEA0_MISC
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA0_LATENCY_SAMPLING
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA0_PERFCOUNTER_LO
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA0_PERFCOUNTER_HI
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA0_PERFCOUNTER0_CFG
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER1_CFG
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER_RSLT_CNTL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_DSM_CNTL
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA0_DSM_CNTLA
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA0_DSM_CNTLB
+#define MMEA0_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//MMEA0_DSM_CNTL2
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA0_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA0_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA0_DSM_CNTL2A
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA0_DSM_CNTL2B
+#define MMEA0_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//MMEA0_CGTT_CLK_CTRL
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA0_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA0_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA0_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA0_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA0_EDC_MODE
+#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA0_ERR_STATUS
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA0_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define MMEA0_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define MMEA0_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define MMEA0_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define MMEA0_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define MMEA0_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define MMEA0_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define MMEA0_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define MMEA0_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define MMEA0_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define MMEA0_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define MMEA0_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//MMEA0_MISC2
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA0_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA0_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define MMEA0_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define MMEA0_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define MMEA0_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define MMEA0_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define MMEA0_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA0_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+#define MMEA0_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define MMEA0_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define MMEA0_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define MMEA0_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define MMEA0_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define MMEA0_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA0_MISC_AON
+#define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
+#define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
+#define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
+#define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
+
+
+// addressBlock: aid_mmhub_ea_mmeadec1
+//MMEA1_DRAM_RD_CLI2GRP_MAP0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_CLI2GRP_MAP1
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP1
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_GRP2VC_MAP
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_WR_GRP2VC_MAP
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_RD_LAZY
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_DRAM_WR_LAZY
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_DRAM_RD_CAM_CNTL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_DRAM_WR_CAM_CNTL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_DRAM_PAGE_BURST
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_AGE
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_WR_PRI_AGE
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_RD_PRI_QUEUING
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_QUEUING
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_FIXED
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_FIXED
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_URGENCY
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_WR_PRI_URGENCY
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_CLI2GRP_MAP0
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_RD_CLI2GRP_MAP1
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_WR_CLI2GRP_MAP0
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_WR_CLI2GRP_MAP1
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_RD_GRP2VC_MAP
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_GMI_WR_GRP2VC_MAP
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_GMI_RD_LAZY
+#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_GMI_WR_LAZY
+#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_GMI_RD_CAM_CNTL
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_GMI_WR_CAM_CNTL
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_GMI_PAGE_BURST
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_AGE
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_GMI_WR_PRI_AGE
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_GMI_RD_PRI_QUEUING
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_WR_PRI_QUEUING
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_RD_PRI_FIXED
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_WR_PRI_FIXED
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_RD_PRI_URGENCY
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_GMI_WR_PRI_URGENCY
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI1
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI2
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI3
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI1
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI2
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI3
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_CLI2GRP_MAP0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_CLI2GRP_MAP1
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP1
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_COMBINE_FLUSH
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA1_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA1_IO_WR_COMBINE_FLUSH
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA1_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA1_IO_GROUP_BURST
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_AGE
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_WR_PRI_AGE
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_RD_PRI_QUEUING
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_QUEUING
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_FIXED
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_FIXED
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_URGENCY
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_WR_PRI_URGENCY
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_RD_PRI_URGENCY_MASKING
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_WR_PRI_URGENCY_MASKING
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI1
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI2
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI3
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI1
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI2
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI3
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_SDP_ARB_DRAM
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA1_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA1_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA1_SDP_ARB_GMI
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA1_SDP_ARB_FINAL
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//MMEA1_SDP_DRAM_PRIORITY
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_GMI_PRIORITY
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_IO_PRIORITY
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_CREDITS
+#define MMEA1_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA1_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA1_SDP_TAG_RESERVE0
+#define MMEA1_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA1_SDP_TAG_RESERVE1
+#define MMEA1_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA1_SDP_VCC_RESERVE0
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCC_RESERVE1
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_VCD_RESERVE0
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCD_RESERVE1
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_REQ_CNTL
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define MMEA1_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//MMEA1_MISC
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA1_LATENCY_SAMPLING
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA1_PERFCOUNTER_LO
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA1_PERFCOUNTER_HI
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA1_PERFCOUNTER0_CFG
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER1_CFG
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER_RSLT_CNTL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA1_DSM_CNTL
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA1_DSM_CNTLA
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA1_DSM_CNTLB
+#define MMEA1_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//MMEA1_DSM_CNTL2
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA1_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA1_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA1_DSM_CNTL2A
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA1_DSM_CNTL2B
+#define MMEA1_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//MMEA1_CGTT_CLK_CTRL
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA1_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA1_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA1_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA1_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA1_EDC_MODE
+#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA1_ERR_STATUS
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA1_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define MMEA1_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define MMEA1_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define MMEA1_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define MMEA1_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define MMEA1_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define MMEA1_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define MMEA1_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define MMEA1_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define MMEA1_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define MMEA1_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define MMEA1_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//MMEA1_MISC2
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA1_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA1_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define MMEA1_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define MMEA1_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define MMEA1_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define MMEA1_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define MMEA1_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA1_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+#define MMEA1_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define MMEA1_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define MMEA1_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define MMEA1_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define MMEA1_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define MMEA1_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA1_MISC_AON
+#define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
+#define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
+#define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
+#define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
+
+
+// addressBlock: aid_mmhub_ea_mmeadec2
+//MMEA2_DRAM_RD_CLI2GRP_MAP0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_RD_CLI2GRP_MAP1
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_WR_CLI2GRP_MAP0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_WR_CLI2GRP_MAP1
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_RD_GRP2VC_MAP
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_DRAM_WR_GRP2VC_MAP
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_DRAM_RD_LAZY
+#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_DRAM_WR_LAZY
+#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_DRAM_RD_CAM_CNTL
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_DRAM_WR_CAM_CNTL
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_DRAM_PAGE_BURST
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_AGE
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_DRAM_WR_PRI_AGE
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_DRAM_RD_PRI_QUEUING
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_WR_PRI_QUEUING
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_RD_PRI_FIXED
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_WR_PRI_FIXED
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_RD_PRI_URGENCY
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_DRAM_WR_PRI_URGENCY
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_CLI2GRP_MAP0
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_RD_CLI2GRP_MAP1
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_WR_CLI2GRP_MAP0
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_WR_CLI2GRP_MAP1
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_RD_GRP2VC_MAP
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_GMI_WR_GRP2VC_MAP
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_GMI_RD_LAZY
+#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_GMI_WR_LAZY
+#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_GMI_RD_CAM_CNTL
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_GMI_WR_CAM_CNTL
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_GMI_PAGE_BURST
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_AGE
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_GMI_WR_PRI_AGE
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_GMI_RD_PRI_QUEUING
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_WR_PRI_QUEUING
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_RD_PRI_FIXED
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_WR_PRI_FIXED
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_RD_PRI_URGENCY
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_GMI_WR_PRI_URGENCY
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI1
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI2
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI3
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI1
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI2
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI3
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_RD_CLI2GRP_MAP0
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_IO_RD_CLI2GRP_MAP1
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_IO_WR_CLI2GRP_MAP0
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_IO_WR_CLI2GRP_MAP1
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_IO_RD_COMBINE_FLUSH
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA2_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA2_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA2_IO_WR_COMBINE_FLUSH
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA2_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA2_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA2_IO_GROUP_BURST
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_AGE
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_IO_WR_PRI_AGE
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_IO_RD_PRI_QUEUING
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_WR_PRI_QUEUING
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_RD_PRI_FIXED
+#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_WR_PRI_FIXED
+#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_RD_PRI_URGENCY
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_IO_WR_PRI_URGENCY
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_IO_RD_PRI_URGENCY_MASKING
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_IO_WR_PRI_URGENCY_MASKING
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI1
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI2
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI3
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI1
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI2
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI3
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_SDP_ARB_DRAM
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA2_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA2_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA2_SDP_ARB_GMI
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA2_SDP_ARB_FINAL
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//MMEA2_SDP_DRAM_PRIORITY
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_GMI_PRIORITY
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_IO_PRIORITY
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_CREDITS
+#define MMEA2_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA2_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA2_SDP_TAG_RESERVE0
+#define MMEA2_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA2_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA2_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA2_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA2_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA2_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA2_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA2_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA2_SDP_TAG_RESERVE1
+#define MMEA2_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA2_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA2_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA2_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA2_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA2_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA2_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA2_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA2_SDP_VCC_RESERVE0
+#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA2_SDP_VCC_RESERVE1
+#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA2_SDP_VCD_RESERVE0
+#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA2_SDP_VCD_RESERVE1
+#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA2_SDP_REQ_CNTL
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define MMEA2_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//MMEA2_MISC
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA2_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA2_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA2_LATENCY_SAMPLING
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA2_PERFCOUNTER_LO
+#define MMEA2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA2_PERFCOUNTER_HI
+#define MMEA2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA2_PERFCOUNTER0_CFG
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA2_PERFCOUNTER1_CFG
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA2_PERFCOUNTER_RSLT_CNTL
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA2_DSM_CNTL
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA2_DSM_CNTLA
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA2_DSM_CNTLB
+#define MMEA2_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA2_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA2_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA2_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA2_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA2_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA2_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA2_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA2_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA2_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA2_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA2_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA2_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA2_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA2_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA2_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//MMEA2_DSM_CNTL2
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA2_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA2_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA2_DSM_CNTL2A
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA2_DSM_CNTL2B
+#define MMEA2_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA2_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA2_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA2_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA2_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA2_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA2_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA2_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA2_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA2_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA2_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA2_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA2_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA2_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA2_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//MMEA2_CGTT_CLK_CTRL
+#define MMEA2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA2_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA2_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA2_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA2_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA2_EDC_MODE
+#define MMEA2_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA2_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA2_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA2_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA2_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA2_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA2_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA2_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA2_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA2_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA2_ERR_STATUS
+#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA2_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA2_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA2_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define MMEA2_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define MMEA2_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define MMEA2_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define MMEA2_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define MMEA2_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA2_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA2_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define MMEA2_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define MMEA2_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define MMEA2_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define MMEA2_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define MMEA2_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define MMEA2_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//MMEA2_MISC2
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA2_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA2_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define MMEA2_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define MMEA2_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define MMEA2_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define MMEA2_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define MMEA2_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA2_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+#define MMEA2_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define MMEA2_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define MMEA2_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define MMEA2_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define MMEA2_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define MMEA2_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA2_MISC_AON
+#define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
+#define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
+#define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
+#define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
+
+
+// addressBlock: aid_mmhub_ea_mmeadec3
+//MMEA3_DRAM_RD_CLI2GRP_MAP0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_RD_CLI2GRP_MAP1
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_WR_CLI2GRP_MAP0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_WR_CLI2GRP_MAP1
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_RD_GRP2VC_MAP
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_DRAM_WR_GRP2VC_MAP
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_DRAM_RD_LAZY
+#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_DRAM_WR_LAZY
+#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_DRAM_RD_CAM_CNTL
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_DRAM_WR_CAM_CNTL
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_DRAM_PAGE_BURST
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_AGE
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_DRAM_WR_PRI_AGE
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_DRAM_RD_PRI_QUEUING
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_WR_PRI_QUEUING
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_RD_PRI_FIXED
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_WR_PRI_FIXED
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_RD_PRI_URGENCY
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_DRAM_WR_PRI_URGENCY
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_CLI2GRP_MAP0
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_RD_CLI2GRP_MAP1
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_WR_CLI2GRP_MAP0
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_WR_CLI2GRP_MAP1
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_RD_GRP2VC_MAP
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_GMI_WR_GRP2VC_MAP
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_GMI_RD_LAZY
+#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_GMI_WR_LAZY
+#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_GMI_RD_CAM_CNTL
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_GMI_WR_CAM_CNTL
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_GMI_PAGE_BURST
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_AGE
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_GMI_WR_PRI_AGE
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_GMI_RD_PRI_QUEUING
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_WR_PRI_QUEUING
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_RD_PRI_FIXED
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_WR_PRI_FIXED
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_RD_PRI_URGENCY
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_GMI_WR_PRI_URGENCY
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI1
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI2
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI3
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI1
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI2
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI3
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_RD_CLI2GRP_MAP0
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_IO_RD_CLI2GRP_MAP1
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_IO_WR_CLI2GRP_MAP0
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_IO_WR_CLI2GRP_MAP1
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_IO_RD_COMBINE_FLUSH
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA3_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA3_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA3_IO_WR_COMBINE_FLUSH
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA3_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA3_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA3_IO_GROUP_BURST
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_AGE
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_IO_WR_PRI_AGE
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_IO_RD_PRI_QUEUING
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_WR_PRI_QUEUING
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_RD_PRI_FIXED
+#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_WR_PRI_FIXED
+#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_RD_PRI_URGENCY
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_IO_WR_PRI_URGENCY
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_IO_RD_PRI_URGENCY_MASKING
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_IO_WR_PRI_URGENCY_MASKING
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI1
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI2
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI3
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI1
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI2
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI3
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_SDP_ARB_DRAM
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA3_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA3_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA3_SDP_ARB_GMI
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA3_SDP_ARB_FINAL
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//MMEA3_SDP_DRAM_PRIORITY
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_GMI_PRIORITY
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_IO_PRIORITY
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_CREDITS
+#define MMEA3_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA3_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA3_SDP_TAG_RESERVE0
+#define MMEA3_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA3_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA3_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA3_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA3_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA3_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA3_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA3_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA3_SDP_TAG_RESERVE1
+#define MMEA3_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA3_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA3_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA3_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA3_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA3_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA3_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA3_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA3_SDP_VCC_RESERVE0
+#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA3_SDP_VCC_RESERVE1
+#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA3_SDP_VCD_RESERVE0
+#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA3_SDP_VCD_RESERVE1
+#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA3_SDP_REQ_CNTL
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define MMEA3_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//MMEA3_MISC
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA3_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA3_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA3_LATENCY_SAMPLING
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA3_PERFCOUNTER_LO
+#define MMEA3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA3_PERFCOUNTER_HI
+#define MMEA3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA3_PERFCOUNTER0_CFG
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA3_PERFCOUNTER1_CFG
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA3_PERFCOUNTER_RSLT_CNTL
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA3_DSM_CNTL
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA3_DSM_CNTLA
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA3_DSM_CNTLB
+#define MMEA3_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA3_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA3_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA3_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA3_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA3_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA3_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA3_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA3_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA3_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA3_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA3_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA3_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA3_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA3_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA3_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//MMEA3_DSM_CNTL2
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA3_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA3_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA3_DSM_CNTL2A
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA3_DSM_CNTL2B
+#define MMEA3_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA3_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA3_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA3_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA3_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA3_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA3_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA3_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA3_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA3_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA3_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA3_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA3_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA3_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA3_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//MMEA3_CGTT_CLK_CTRL
+#define MMEA3_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA3_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA3_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA3_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA3_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA3_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA3_EDC_MODE
+#define MMEA3_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA3_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA3_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA3_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA3_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA3_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA3_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA3_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA3_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA3_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA3_ERR_STATUS
+#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA3_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA3_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA3_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define MMEA3_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define MMEA3_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define MMEA3_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define MMEA3_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define MMEA3_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA3_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA3_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define MMEA3_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define MMEA3_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define MMEA3_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define MMEA3_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define MMEA3_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define MMEA3_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//MMEA3_MISC2
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA3_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA3_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define MMEA3_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define MMEA3_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define MMEA3_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define MMEA3_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define MMEA3_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA3_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+#define MMEA3_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define MMEA3_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define MMEA3_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define MMEA3_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define MMEA3_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define MMEA3_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA3_MISC_AON
+#define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
+#define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
+#define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
+#define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
+
+
+// addressBlock: aid_mmhub_ea_mmeadec4
+//MMEA4_DRAM_RD_CLI2GRP_MAP0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_RD_CLI2GRP_MAP1
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_WR_CLI2GRP_MAP0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_WR_CLI2GRP_MAP1
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_RD_GRP2VC_MAP
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_DRAM_WR_GRP2VC_MAP
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_DRAM_RD_LAZY
+#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_DRAM_WR_LAZY
+#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_DRAM_RD_CAM_CNTL
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_DRAM_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_DRAM_WR_CAM_CNTL
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_DRAM_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_DRAM_PAGE_BURST
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_AGE
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_DRAM_WR_PRI_AGE
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_DRAM_RD_PRI_QUEUING
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_WR_PRI_QUEUING
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_RD_PRI_FIXED
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_WR_PRI_FIXED
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_RD_PRI_URGENCY
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_DRAM_WR_PRI_URGENCY
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_CLI2GRP_MAP0
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_RD_CLI2GRP_MAP1
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_WR_CLI2GRP_MAP0
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_WR_CLI2GRP_MAP1
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_RD_GRP2VC_MAP
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_GMI_WR_GRP2VC_MAP
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_GMI_RD_LAZY
+#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_GMI_WR_LAZY
+#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_GMI_RD_CAM_CNTL
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_GMI_WR_CAM_CNTL
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_GMI_PAGE_BURST
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_AGE
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_GMI_WR_PRI_AGE
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_GMI_RD_PRI_QUEUING
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_WR_PRI_QUEUING
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_RD_PRI_FIXED
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_WR_PRI_FIXED
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_RD_PRI_URGENCY
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_GMI_WR_PRI_URGENCY
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI1
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI2
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI3
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI1
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI2
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI3
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_RD_CLI2GRP_MAP0
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_IO_RD_CLI2GRP_MAP1
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_IO_WR_CLI2GRP_MAP0
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_IO_WR_CLI2GRP_MAP1
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_IO_RD_COMBINE_FLUSH
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA4_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA4_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA4_IO_WR_COMBINE_FLUSH
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA4_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA4_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//MMEA4_IO_GROUP_BURST
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_AGE
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_IO_WR_PRI_AGE
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_IO_RD_PRI_QUEUING
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_WR_PRI_QUEUING
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_RD_PRI_FIXED
+#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_WR_PRI_FIXED
+#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_RD_PRI_URGENCY
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_IO_WR_PRI_URGENCY
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_IO_RD_PRI_URGENCY_MASKING
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_IO_WR_PRI_URGENCY_MASKING
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI1
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI2
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI3
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI1
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI2
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI3
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_SDP_ARB_DRAM
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA4_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA4_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA4_SDP_ARB_GMI
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA4_SDP_ARB_FINAL
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_STRETCH__SHIFT 0x1b
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1c
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_STRETCH_MASK 0x08000000L
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x10000000L
+//MMEA4_SDP_DRAM_PRIORITY
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_GMI_PRIORITY
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_IO_PRIORITY
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_CREDITS
+#define MMEA4_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA4_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA4_SDP_TAG_RESERVE0
+#define MMEA4_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA4_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA4_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA4_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA4_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA4_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA4_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA4_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA4_SDP_TAG_RESERVE1
+#define MMEA4_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA4_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA4_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA4_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA4_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA4_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA4_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA4_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA4_SDP_VCC_RESERVE0
+#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA4_SDP_VCC_RESERVE1
+#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA4_SDP_VCD_RESERVE0
+#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA4_SDP_VCD_RESERVE1
+#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA4_SDP_REQ_CNTL
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define MMEA4_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//MMEA4_MISC
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA4_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA4_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA4_LATENCY_SAMPLING
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA4_PERFCOUNTER_LO
+#define MMEA4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA4_PERFCOUNTER_HI
+#define MMEA4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA4_PERFCOUNTER0_CFG
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA4_PERFCOUNTER1_CFG
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA4_PERFCOUNTER_RSLT_CNTL
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA4_DSM_CNTL
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA4_DSM_CNTLA
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA4_DSM_CNTLB
+#define MMEA4_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA4_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA4_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA4_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA4_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA4_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA4_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA4_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA4_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA4_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA4_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA4_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA4_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA4_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA4_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA4_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//MMEA4_DSM_CNTL2
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA4_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA4_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA4_DSM_CNTL2A
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA4_DSM_CNTL2B
+#define MMEA4_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA4_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA4_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA4_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA4_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA4_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA4_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA4_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA4_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA4_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA4_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA4_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA4_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA4_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA4_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//MMEA4_CGTT_CLK_CTRL
+#define MMEA4_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA4_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA4_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA4_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA4_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA4_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA4_EDC_MODE
+#define MMEA4_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA4_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA4_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA4_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA4_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA4_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA4_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA4_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA4_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA4_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA4_ERR_STATUS
+#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA4_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA4_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA4_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define MMEA4_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define MMEA4_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define MMEA4_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define MMEA4_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR__SHIFT 0x12
+#define MMEA4_ERR_STATUS__FUE_FLAG_CLIENT__SHIFT 0x13
+#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA4_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA4_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define MMEA4_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define MMEA4_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define MMEA4_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define MMEA4_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+#define MMEA4_ERR_STATUS__BUSY_ON_CMPL_FATAL_ERROR_MASK 0x00040000L
+#define MMEA4_ERR_STATUS__FUE_FLAG_CLIENT_MASK 0x00080000L
+//MMEA4_MISC2
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA4_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA4_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define MMEA4_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define MMEA4_MISC2__DRAM_RD_THROTTLE__SHIFT 0x10
+#define MMEA4_MISC2__DRAM_WR_THROTTLE__SHIFT 0x11
+#define MMEA4_MISC2__GMI_RD_THROTTLE__SHIFT 0x12
+#define MMEA4_MISC2__GMI_WR_THROTTLE__SHIFT 0x13
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA4_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+#define MMEA4_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define MMEA4_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define MMEA4_MISC2__DRAM_RD_THROTTLE_MASK 0x00010000L
+#define MMEA4_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
+#define MMEA4_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
+#define MMEA4_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA4_MISC_AON
+#define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
+#define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
+#define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
+#define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
+
+
+// addressBlock: aid_mmhub_pctldec0
+//PCTL0_CTRL
+#define PCTL0_CTRL__PG_ENABLE__SHIFT 0x0
+#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x1
+#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x4
+#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x10
+#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK__SHIFT 0x11
+#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK__SHIFT 0x12
+#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK__SHIFT 0x13
+#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK__SHIFT 0x14
+#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK__SHIFT 0x15
+#define PCTL0_CTRL__OVR_EA5_SDP_PARTACK__SHIFT 0x16
+#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK__SHIFT 0x17
+#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK__SHIFT 0x18
+#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK__SHIFT 0x19
+#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK__SHIFT 0x1a
+#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK__SHIFT 0x1b
+#define PCTL0_CTRL__OVR_EA5_SDP_FULLACK__SHIFT 0x1c
+#define PCTL0_CTRL__RSMU_RDTIMER_ENABLE__SHIFT 0x1d
+#define PCTL0_CTRL__RSMU_RDTIMER_THRESHOLD__SHIFT 0x1e
+#define PCTL0_CTRL__PG_ENABLE_MASK 0x00000001L
+#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE_MASK 0x0000000EL
+#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x000007F0L
+#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x0000F800L
+#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00010000L
+#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK_MASK 0x00020000L
+#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK_MASK 0x00040000L
+#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK_MASK 0x00080000L
+#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK_MASK 0x00100000L
+#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK_MASK 0x00200000L
+#define PCTL0_CTRL__OVR_EA5_SDP_PARTACK_MASK 0x00400000L
+#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK_MASK 0x00800000L
+#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK_MASK 0x01000000L
+#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK_MASK 0x02000000L
+#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK_MASK 0x04000000L
+#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK_MASK 0x08000000L
+#define PCTL0_CTRL__OVR_EA5_SDP_FULLACK_MASK 0x10000000L
+#define PCTL0_CTRL__RSMU_RDTIMER_ENABLE_MASK 0x20000000L
+#define PCTL0_CTRL__RSMU_RDTIMER_THRESHOLD_MASK 0xC0000000L
+//PCTL0_MMHUB_DEEPSLEEP_IB
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR__SHIFT 0x1f
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR_MASK 0x80000000L
+//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB__SHIFT 0x11
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_CANE__SHIFT 0x12
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB_MASK 0x00020000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_CANE_MASK 0x00040000L
+//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16_MASK 0x00010000L
+//PCTL0_PG_IGNORE_DEEPSLEEP
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x0
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x1
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x2
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x3
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x4
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x5
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x6
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x7
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x8
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0x9
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xa
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xb
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xc
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xd
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xe
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0xf
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x10
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB__SHIFT 0x11
+#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x12
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000001L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000002L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000004L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000008L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000010L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000020L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000040L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000080L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000100L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000200L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000400L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00000800L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00001000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00002000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00004000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00008000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00010000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB_MASK 0x00020000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00040000L
+//PCTL0_PG_IGNORE_DEEPSLEEP_IB
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS__SHIFT 0x11
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS_MASK 0x00020000L
+//PCTL0_SLICE0_CFG_DAGB_BUSY
+#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE0_CFG_DS_ALLOW
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE0_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE1_CFG_DAGB_BUSY
+#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE1_CFG_DS_ALLOW
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE1_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE2_CFG_DAGB_BUSY
+#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE2_CFG_DS_ALLOW
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE2_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE3_CFG_DAGB_BUSY
+#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE3_CFG_DS_ALLOW
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE3_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE4_CFG_DAGB_BUSY
+#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE4_CFG_DS_ALLOW
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE4_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_UTCL2_MISC
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb
+#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc
+#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf
+#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000007FFL
+#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L
+#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L
+#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L
+#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE0_MISC
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000003FFL
+#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE1_MISC
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000003FFL
+#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE2_MISC
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000003FFL
+#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE3_MISC
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000003FFL
+#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE4_MISC
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_NONSECURE_START_PTR__SHIFT 0x0
+#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_NONSECURE_START_PTR_MASK 0x000003FFL
+#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1dec
+//MC_VM_MX_L1_TLB0_STATUS
+#define MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB1_STATUS
+#define MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB2_STATUS
+#define MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB3_STATUS
+#define MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB4_STATUS
+#define MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB5_STATUS
+#define MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB6_STATUS
+#define MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB7_STATUS
+#define MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1pldec
+//MC_VM_MX_L1_PERFCOUNTER0_CFG
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER1_CFG
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER2_CFG
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER3_CFG
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: aid_mmhub_l1tlb_vml1prdec
+//MC_VM_MX_L1_PERFCOUNTER_LO
+#define MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_MX_L1_PERFCOUNTER_HI
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2dec
+//ATC_L2_CNTL
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define ATC_L2_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x14
+#define ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE__SHIFT 0x16
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+#define ATC_L2_CNTL__FRAG_APT_INTXN_MODE_MASK 0x00300000L
+#define ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE_MASK 0x0FC00000L
+//ATC_L2_CNTL2
+#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATC_L2_CNTL2__NUM_BANKS_LOG2__SHIFT 0x6
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x9
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xb
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0xc
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xf
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x12
+#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATC_L2_CNTL2__NUM_BANKS_LOG2_MASK 0x000001C0L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x00000600L
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000800L
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00007000L
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00038000L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00FC0000L
+//ATC_L2_CACHE_DATA0
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATC_L2_CACHE_DATA1
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA2
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA3
+#define ATC_L2_CACHE_DATA3__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA3__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CNTL3
+#define ATC_L2_CNTL3__L2_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define ATC_L2_CNTL3__L2_MIDK_FRAGMENT_SIZE__SHIFT 0x6
+#define ATC_L2_CNTL3__L2_BIGK_FRAGMENT_SIZE__SHIFT 0xc
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x12
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x15
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x1b
+#define ATC_L2_CNTL3__REPEATER_FGCG_OFF__SHIFT 0x1e
+#define ATC_L2_CNTL3__L2_SMALLK_FRAGMENT_SIZE_MASK 0x0000003FL
+#define ATC_L2_CNTL3__L2_MIDK_FRAGMENT_SIZE_MASK 0x00000FC0L
+#define ATC_L2_CNTL3__L2_BIGK_FRAGMENT_SIZE_MASK 0x0003F000L
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x001C0000L
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x07E00000L
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x38000000L
+#define ATC_L2_CNTL3__REPEATER_FGCG_OFF_MASK 0x40000000L
+//ATC_L2_STATUS
+#define ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS__SHIFT 0x1
+#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS_MASK 0x00000002L
+//ATC_L2_STATUS2
+#define ATC_L2_STATUS2__UCE_MEM_ADDR__SHIFT 0x0
+#define ATC_L2_STATUS2__UCE_MEM_INST__SHIFT 0xc
+#define ATC_L2_STATUS2__UCE_SRT_CACHE__SHIFT 0x14
+#define ATC_L2_STATUS2__UCE__SHIFT 0x15
+#define ATC_L2_STATUS2__UCE_MEM_ADDR_MASK 0x00000FFFL
+#define ATC_L2_STATUS2__UCE_MEM_INST_MASK 0x000FF000L
+#define ATC_L2_STATUS2__UCE_SRT_CACHE_MASK 0x00100000L
+#define ATC_L2_STATUS2__UCE_MASK 0x00200000L
+//ATC_L2_MISC_CG
+#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATC_L2_MEM_POWER_LS
+#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATC_L2_CGTT_CLK_CTRL
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_32K_DSM_INDEX
+#define ATC_L2_CACHE_32K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_32K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CACHE_32K_DSM_CNTL
+#define ATC_L2_CACHE_32K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_32K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_32K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_32K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_32K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_32K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_32K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_32K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_32K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_32K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_32K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_32K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_32K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATC_L2_CNTL4
+#define ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0
+#define ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa
+#define ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL
+#define ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L
+//ATC_L2_MM_GROUP_RT_CLASSES
+#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0
+#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_mmhub_utcl2_vml2pfdec
+//VM_L2_CNTL
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VM_L2_CNTL2
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VM_L2_CNTL3
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VM_L2_STATUS
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VM_DUMMY_PAGE_FAULT_CNTL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_CNTL
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VM_L2_PROTECTION_FAULT_CNTL2
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_STATUS
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
+//VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VM_L2_CNTL4
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
+#define VM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE__SHIFT 0x1e
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+#define VM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
+#define VM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE_MASK 0x40000000L
+//VM_L2_CNTL5
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE__SHIFT 0x0
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE__SHIFT 0x1
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE_MASK 0x00000001L
+#define VM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE_MASK 0x00000002L
+//VM_L2_MM_GROUP_RT_CLASSES
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VM_L2_BANK_SELECT_RESERVED_CID
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_BANK_SELECT_RESERVED_CID2
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_CACHE_PARITY_CNTL
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VM_L2_CGTT_CLK_CTRL
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//VM_L2_CGTT_BUSY_CTRL
+#define VM_L2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x4
+#define VM_L2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000010L
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define VML2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define VML2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define VML2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define VML2_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define VML2_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define VML2_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define VML2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define VML2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define VML2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define VML2_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define VML2_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define VML2_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define VML2_WALKER_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_WALKER_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define VML2_WALKER_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define VML2_WALKER_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define VML2_WALKER_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define VML2_WALKER_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define VML2_WALKER_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define VML2_WALKER_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define VML2_WALKER_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__INJECT_DELAY__SHIFT 0x0
+#define UTCL2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define UTCL2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define UTCL2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define UTCL2_MEM_ECC_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define UTCL2_MEM_ECC_CNTL__WRITE_COUNTERS__SHIFT 0x10
+#define UTCL2_MEM_ECC_CNTL__TEST_FUE__SHIFT 0x11
+#define UTCL2_MEM_ECC_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define UTCL2_MEM_ECC_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define UTCL2_MEM_ECC_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define UTCL2_MEM_ECC_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define UTCL2_MEM_ECC_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+#define UTCL2_MEM_ECC_CNTL__WRITE_COUNTERS_MASK 0x00010000L
+#define UTCL2_MEM_ECC_CNTL__TEST_FUE_MASK 0x00020000L
+//VML2_MEM_ECC_STATUS
+#define VML2_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define VML2_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define VML2_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define VML2_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//VML2_WALKER_MEM_ECC_STATUS
+#define VML2_WALKER_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define VML2_WALKER_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define VML2_WALKER_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//UTCL2_MEM_ECC_STATUS
+#define UTCL2_MEM_ECC_STATUS__UCE__SHIFT 0x0
+#define UTCL2_MEM_ECC_STATUS__FED__SHIFT 0x1
+#define UTCL2_MEM_ECC_STATUS__UCE_MASK 0x00000001L
+#define UTCL2_MEM_ECC_STATUS__FED_MASK 0x00000002L
+//UTCL2_EDC_MODE
+#define UTCL2_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define UTCL2_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define UTCL2_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define UTCL2_EDC_MODE__DED_MODE__SHIFT 0x14
+#define UTCL2_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define UTCL2_EDC_MODE__BYPASS__SHIFT 0x1f
+#define UTCL2_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define UTCL2_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define UTCL2_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define UTCL2_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define UTCL2_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define UTCL2_EDC_MODE__BYPASS_MASK 0x80000000L
+//UTCL2_EDC_CONFIG
+#define UTCL2_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define UTCL2_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define UTCL2_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define UTCL2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+
+
+// addressBlock: aid_mmhub_utcl2_vml2vcdec
+//VM_CONTEXT0_CNTL
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT1_CNTL
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT2_CNTL
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT3_CNTL
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT4_CNTL
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT5_CNTL
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT6_CNTL
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT7_CNTL
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT8_CNTL
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT9_CNTL
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT10_CNTL
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT11_CNTL
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT12_CNTL
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT13_CNTL
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT14_CNTL
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXT15_CNTL
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define VM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//VM_CONTEXTS_DISABLE
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VM_INVALIDATE_ENG0_SEM
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG1_SEM
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG2_SEM
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG3_SEM
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG4_SEM
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG5_SEM
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG6_SEM
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG7_SEM
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG8_SEM
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG9_SEM
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG10_SEM
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG11_SEM
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG12_SEM
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG13_SEM
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG14_SEM
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG15_SEM
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG16_SEM
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG17_SEM
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG0_REQ
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG1_REQ
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG2_REQ
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG3_REQ
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG4_REQ
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG5_REQ
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG6_REQ
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG7_REQ
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG8_REQ
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG9_REQ
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG10_REQ
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG11_REQ
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG12_REQ
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG13_REQ
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG14_REQ
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG15_REQ
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG16_REQ
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG17_REQ
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x18
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+#define VM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x01000000L
+//VM_INVALIDATE_ENG0_ACK
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG1_ACK
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG2_ACK
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG3_ACK
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG4_ACK
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG5_ACK
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG6_ACK
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG7_ACK
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG8_ACK
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG9_ACK
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG10_ACK
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG11_ACK
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG12_ACK
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG13_ACK
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG14_ACK
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG15_ACK
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG16_ACK
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG17_ACK
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedpfdec
+//MC_VM_NB_MMIOBASE
+#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//MC_VM_NB_MMIOLIMIT
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//MC_VM_NB_PCI_CTRL
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//MC_VM_NB_PCI_ARB
+#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x0000FFFFL
+//MC_VM_FB_OFFSET
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//MC_VM_STEERING
+#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//MC_SHARED_VIRT_RESET_REQ
+#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//MC_MEM_POWER_LS
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_APT_CNTL
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define MC_VM_APT_CNTL__CHECK_IS_LOCAL__SHIFT 0x2
+#define MC_VM_APT_CNTL__PERMS_GRANTED__SHIFT 0x3
+#define MC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL__SHIFT 0x4
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+#define MC_VM_APT_CNTL__CHECK_IS_LOCAL_MASK 0x00000004L
+#define MC_VM_APT_CNTL__PERMS_GRANTED_MASK 0x00000008L
+#define MC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL_MASK 0x00000030L
+//MC_VM_LOCAL_HBM_ADDRESS_START
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_END
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x00FFFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//UTCL2_CGTT_CLK_CTRL
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL
+//MC_VM_CACHEABLE_DRAM_CNTL
+#define MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_HOST_MAPPING
+#define MC_VM_HOST_MAPPING__MODE__SHIFT 0x0
+#define MC_VM_HOST_MAPPING__MODE_MASK 0x00000001L
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedvcdec
+//MC_VM_FB_LOCATION_BASE
+#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//MC_VM_FB_LOCATION_TOP
+#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_TOP
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_BOT
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//MC_VM_AGP_BASE
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_MX_L1_TLB_CNTL
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: aid_mmhub_utcl2_vmsharedhvdec
+//MC_VM_FB_SIZE_OFFSET_VF0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF1
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF2
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF3
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF4
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF5
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF6
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF7
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF8
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF9
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF11
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF12
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF13
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF14
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF15
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VM_IOMMU_MMIO_CNTRL_1
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//MC_VM_MARC_BASE_LO_0
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_1
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_2
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_3
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_HI_0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_1
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_2
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_3
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_LO_0
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_1
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_2
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_3
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_HI_0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_1
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_2
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_3
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_LO_0
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_1
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_2
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_3
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_HI_0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_1
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_2
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_3
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VM_IOMMU_CONTROL_REGISTER
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VM_PCIE_ATS_CNTL
+#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_0
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_1
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_2
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_3
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_4
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_5
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_6
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_7
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_8
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_9
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_10
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_11
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_12
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_13
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_14
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_15
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//MC_SHARED_ACTIVE_FCN_ID
+#define MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MC_VM_XGMI_GPUIOV_ENABLE
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2pfcntrdec
+//ATC_L2_PERFCOUNTER_LO
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_L2_PERFCOUNTER_HI
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: aid_mmhub_utcl2_atcl2pfcntldec
+//ATC_L2_PERFCOUNTER0_CFG
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER1_CFG
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: aid_mmhub_utcl2_vml2pldec
+//MC_VM_L2_PERFCOUNTER0_CFG
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER1_CFG
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER2_CFG
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER3_CFG
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER4_CFG
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER5_CFG
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER6_CFG
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER7_CFG
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: aid_mmhub_utcl2_vml2prdec
+//MC_VM_L2_PERFCOUNTER_LO
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_L2_PERFCOUNTER_HI
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbdec
+//L2TLB_TLB0_STATUS
+#define L2TLB_TLB0_STATUS__BUSY__SHIFT 0x0
+#define L2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define L2TLB_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define L2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR_MASK 0xFFFFFFFFL
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID__SHIFT 0x4
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID__SHIFT 0x9
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF__SHIFT 0xd
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA__SHIFT 0xe
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM__SHIFT 0x10
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM__SHIFT 0x11
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM__SHIFT 0x12
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID__SHIFT 0x13
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ__SHIFT 0x1f
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR_MASK 0x0000000FL
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID_MASK 0x000000F0L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID_MASK 0x00001E00L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF_MASK 0x00002000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA_MASK 0x0000C000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM_MASK 0x00010000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM_MASK 0x00020000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM_MASK 0x00040000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID_MASK 0x0FF80000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ_MASK 0x80000000L
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR_MASK 0xFFFFFFFFL
+//UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR__SHIFT 0x0
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS__SHIFT 0x4
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE__SHIFT 0x7
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP__SHIFT 0xd
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA__SHIFT 0xe
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO__SHIFT 0xf
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ__SHIFT 0x10
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE__SHIFT 0x11
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE__SHIFT 0x12
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG__SHIFT 0x14
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC__SHIFT 0x15
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK__SHIFT 0x16
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK__SHIFT 0x1f
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR_MASK 0x0000000FL
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS_MASK 0x00000070L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE_MASK 0x00001F80L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP_MASK 0x00002000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA_MASK 0x00004000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO_MASK 0x00008000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ_MASK 0x00010000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE_MASK 0x00020000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE_MASK 0x000C0000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG_MASK 0x00100000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC_MASK 0x00200000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK_MASK 0x00C00000L
+#define UTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK_MASK 0x80000000L
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbpldec
+//L2TLB_PERFCOUNTER0_CFG
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER1_CFG
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER2_CFG
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER3_CFG
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define L2TLB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define L2TLB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define L2TLB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define L2TLB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define L2TLB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define L2TLB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//L2TLB_PERFCOUNTER_RSLT_CNTL
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define L2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: aid_mmhub_utcl2_l2tlbprdec
+//L2TLB_PERFCOUNTER_LO
+#define L2TLB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//L2TLB_PERFCOUNTER_HI
+#define L2TLB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define L2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
new file mode 100644
index 000000000000..f04fa95a770c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_OFFSET_HEADER
+#define _mp_13_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+// base address: 0x0
+#define regMP0_SMN_C2PMSG_32 0x0060
+#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_33 0x0061
+#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_34 0x0062
+#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_35 0x0063
+#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_36 0x0064
+#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_37 0x0065
+#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_38 0x0066
+#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_39 0x0067
+#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_40 0x0068
+#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_41 0x0069
+#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_42 0x006a
+#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_43 0x006b
+#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_44 0x006c
+#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_45 0x006d
+#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_46 0x006e
+#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_47 0x006f
+#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_48 0x0070
+#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_49 0x0071
+#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_50 0x0072
+#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_51 0x0073
+#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_52 0x0074
+#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_53 0x0075
+#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_54 0x0076
+#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_55 0x0077
+#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_56 0x0078
+#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_57 0x0079
+#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_58 0x007a
+#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_59 0x007b
+#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_60 0x007c
+#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_61 0x007d
+#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_62 0x007e
+#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_63 0x007f
+#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_64 0x0080
+#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_65 0x0081
+#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_66 0x0082
+#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_68 0x0084
+#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_69 0x0085
+#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_70 0x0086
+#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_71 0x0087
+#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_72 0x0088
+#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_73 0x0089
+#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_74 0x008a
+#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_75 0x008b
+#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_76 0x008c
+#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_77 0x008d
+#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_78 0x008e
+#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_79 0x008f
+#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_80 0x0090
+#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_81 0x0091
+#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_82 0x0092
+#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_83 0x0093
+#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_84 0x0094
+#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_85 0x0095
+#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_86 0x0096
+#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_87 0x0097
+#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_88 0x0098
+#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_89 0x0099
+#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_90 0x009a
+#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_91 0x009b
+#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_92 0x009c
+#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_93 0x009d
+#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_94 0x009e
+#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_95 0x009f
+#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_96 0x00a0
+#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_97 0x00a1
+#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_98 0x00a2
+#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_99 0x00a3
+#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_100 0x00a4
+#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_101 0x00a5
+#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_102 0x00a6
+#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_103 0x00a7
+#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP0_SMN_IH_CREDIT 0x00c1
+#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT 0x00c2
+#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_32 0x0260
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_33 0x0261
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_34 0x0262
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_35 0x0263
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_36 0x0264
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_37 0x0265
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_38 0x0266
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_39 0x0267
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_40 0x0268
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_41 0x0269
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_42 0x026a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_43 0x026b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_44 0x026c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_45 0x026d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_46 0x026e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_47 0x026f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_48 0x0270
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_49 0x0271
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_50 0x0272
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_51 0x0273
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_52 0x0274
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_53 0x0275
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_54 0x0276
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_55 0x0277
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_56 0x0278
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_57 0x0279
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_58 0x027a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_59 0x027b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_60 0x027c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_61 0x027d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_62 0x027e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_63 0x027f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_64 0x0280
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_65 0x0281
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_66 0x0282
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_67 0x0283
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_68 0x0284
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_69 0x0285
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_70 0x0286
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_71 0x0287
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_72 0x0288
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_73 0x0289
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_74 0x028a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_75 0x028b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_76 0x028c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_77 0x028d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_78 0x028e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_79 0x028f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_80 0x0290
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_81 0x0291
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_82 0x0292
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_83 0x0293
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_84 0x0294
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_85 0x0295
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_86 0x0296
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_87 0x0297
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_88 0x0298
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_89 0x0299
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_90 0x029a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_91 0x029b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_92 0x029c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_93 0x029d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_94 0x029e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_95 0x029f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_96 0x02a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_97 0x02a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_98 0x02a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_99 0x02a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_100 0x02a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_101 0x02a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_102 0x02a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_103 0x02a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_104 0x02a8
+#define regMP1_SMN_C2PMSG_104_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_105 0x02a9
+#define regMP1_SMN_C2PMSG_105_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_106 0x02aa
+#define regMP1_SMN_C2PMSG_106_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_107 0x02ab
+#define regMP1_SMN_C2PMSG_107_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_108 0x02ac
+#define regMP1_SMN_C2PMSG_108_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_109 0x02ad
+#define regMP1_SMN_C2PMSG_109_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_110 0x02ae
+#define regMP1_SMN_C2PMSG_110_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_111 0x02af
+#define regMP1_SMN_C2PMSG_111_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_112 0x02b0
+#define regMP1_SMN_C2PMSG_112_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_113 0x02b1
+#define regMP1_SMN_C2PMSG_113_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_114 0x02b2
+#define regMP1_SMN_C2PMSG_114_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_115 0x02b3
+#define regMP1_SMN_C2PMSG_115_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_116 0x02b4
+#define regMP1_SMN_C2PMSG_116_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_117 0x02b5
+#define regMP1_SMN_C2PMSG_117_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_118 0x02b6
+#define regMP1_SMN_C2PMSG_118_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_119 0x02b7
+#define regMP1_SMN_C2PMSG_119_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_120 0x02b8
+#define regMP1_SMN_C2PMSG_120_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_121 0x02b9
+#define regMP1_SMN_C2PMSG_121_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_122 0x02ba
+#define regMP1_SMN_C2PMSG_122_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_123 0x02bb
+#define regMP1_SMN_C2PMSG_123_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_124 0x02bc
+#define regMP1_SMN_C2PMSG_124_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_125 0x02bd
+#define regMP1_SMN_C2PMSG_125_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_126 0x02be
+#define regMP1_SMN_C2PMSG_126_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_127 0x02bf
+#define regMP1_SMN_C2PMSG_127_BASE_IDX 0
+#define regMP1_SMN_IH_CREDIT 0x02c1
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT 0x02c2
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+#define regMP1_SMN_FPS_CNT 0x02c4
+#define regMP1_SMN_FPS_CNT_BASE_IDX 0
+#define regMP1_SMN_PUB_CTRL 0x02c5
+#define regMP1_SMN_PUB_CTRL_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH0 0x0340
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH1 0x0341
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH2 0x0342
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH3 0x0343
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH4 0x0344
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH5 0x0345
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH6 0x0346
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH7 0x0347
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH8 0x0348
+#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH10 0x034a
+#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH11 0x034b
+#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH12 0x034c
+#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH13 0x034d
+#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH14 0x034e
+#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH15 0x034f
+#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH16 0x0350
+#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH17 0x0351
+#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH18 0x0352
+#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH19 0x0353
+#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH20 0x0354
+#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH21 0x0355
+#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH22 0x0356
+#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH23 0x0357
+#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH24 0x0358
+#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH25 0x0359
+#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH26 0x035a
+#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH27 0x035b
+#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH28 0x035c
+#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH29 0x035d
+#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH30 0x035e
+#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH31 0x035f
+#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+// base address: 0x0
+#define regMP1_FIRMWARE_FLAGS 0xbee00a
+#define regMP1_FIRMWARE_FLAGS_BASE_IDX 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
new file mode 100644
index 000000000000..780d9824d5ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_SH_MASK_HEADER
+#define _mp_13_0_6_SH_MASK_HEADER
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_SMN_IH_SW_INT_CTRL
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH10
+#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH11
+#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH12
+#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH13
+#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH14
+#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH15
+#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH16
+#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH17
+#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH18
+#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH19
+#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH20
+#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH21
+#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH22
+#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH23
+#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH24
+#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH25
+#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH26
+#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH27
+#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH28
+#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH29
+#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH30
+#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH31
+#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+//MP1_FIRMWARE_FLAGS
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h
new file mode 100644
index 000000000000..033f2796c1e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h
@@ -0,0 +1,10002 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _nbio_7_9_0_OFFSET_HEADER
+#define _nbio_7_9_0_OFFSET_HEADER
+
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_SYSDEC
+// base address: 0x0
+#define regBIF_BX0_PCIE_INDEX 0x000c
+#define regBIF_BX0_PCIE_INDEX_BASE_IDX 0
+#define regBIF_BX0_PCIE_DATA 0x000d
+#define regBIF_BX0_PCIE_DATA_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX2 0x000e
+#define regBIF_BX0_PCIE_INDEX2_BASE_IDX 0
+#define regBIF_BX0_PCIE_DATA2 0x000f
+#define regBIF_BX0_PCIE_DATA2_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX_HI 0x0010
+#define regBIF_BX0_PCIE_INDEX_HI_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX2_HI 0x0011
+#define regBIF_BX0_PCIE_INDEX2_HI_BASE_IDX 0
+#define regBIF_BX0_SBIOS_SCRATCH_0 0x0034
+#define regBIF_BX0_SBIOS_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_1 0x0035
+#define regBIF_BX0_SBIOS_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_2 0x0036
+#define regBIF_BX0_SBIOS_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_3 0x0037
+#define regBIF_BX0_SBIOS_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_0 0x0038
+#define regBIF_BX0_BIOS_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_1 0x0039
+#define regBIF_BX0_BIOS_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_2 0x003a
+#define regBIF_BX0_BIOS_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_3 0x003b
+#define regBIF_BX0_BIOS_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_4 0x003c
+#define regBIF_BX0_BIOS_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_5 0x003d
+#define regBIF_BX0_BIOS_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_6 0x003e
+#define regBIF_BX0_BIOS_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_7 0x003f
+#define regBIF_BX0_BIOS_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_8 0x0040
+#define regBIF_BX0_BIOS_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_9 0x0041
+#define regBIF_BX0_BIOS_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_10 0x0042
+#define regBIF_BX0_BIOS_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_11 0x0043
+#define regBIF_BX0_BIOS_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_12 0x0044
+#define regBIF_BX0_BIOS_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_13 0x0045
+#define regBIF_BX0_BIOS_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_14 0x0046
+#define regBIF_BX0_BIOS_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_15 0x0047
+#define regBIF_BX0_BIOS_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_BIF_RLC_INTR_CNTL 0x004c
+#define regBIF_BX0_BIF_RLC_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_BIF_VCE_INTR_CNTL 0x004d
+#define regBIF_BX0_BIF_VCE_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_BIF_UVD_INTR_CNTL 0x004e
+#define regBIF_BX0_BIF_UVD_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR0 0x006c
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR0_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0 0x006d
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR1 0x006e
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR1_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1 0x006f
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR2 0x0070
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR2_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2 0x0071
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR3 0x0072
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR3_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3 0x0073
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR4 0x0074
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR4_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4 0x0075
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR5 0x0076
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR5_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5 0x0077
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR6 0x0078
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR6_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6 0x0079
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR7 0x007a
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR7_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7 0x007b
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_CNTL 0x007c
+#define regBIF_BX0_GFX_MMIOREG_CAM_CNTL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL 0x007d
+#define regBIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ONE_CPL 0x007e
+#define regBIF_BX0_GFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x007f
+#define regBIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_0 0x0080
+#define regBIF_BX0_DRIVER_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_1 0x0081
+#define regBIF_BX0_DRIVER_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_2 0x0082
+#define regBIF_BX0_DRIVER_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_3 0x0083
+#define regBIF_BX0_DRIVER_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_4 0x0084
+#define regBIF_BX0_DRIVER_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_5 0x0085
+#define regBIF_BX0_DRIVER_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_6 0x0086
+#define regBIF_BX0_DRIVER_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_7 0x0087
+#define regBIF_BX0_DRIVER_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_8 0x0088
+#define regBIF_BX0_DRIVER_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_9 0x0089
+#define regBIF_BX0_DRIVER_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_10 0x008a
+#define regBIF_BX0_DRIVER_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_11 0x008b
+#define regBIF_BX0_DRIVER_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_12 0x008c
+#define regBIF_BX0_DRIVER_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_13 0x008d
+#define regBIF_BX0_DRIVER_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_14 0x008e
+#define regBIF_BX0_DRIVER_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_15 0x008f
+#define regBIF_BX0_DRIVER_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_0 0x0090
+#define regBIF_BX0_FW_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_1 0x0091
+#define regBIF_BX0_FW_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_2 0x0092
+#define regBIF_BX0_FW_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_3 0x0093
+#define regBIF_BX0_FW_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_4 0x0094
+#define regBIF_BX0_FW_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_5 0x0095
+#define regBIF_BX0_FW_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_6 0x0096
+#define regBIF_BX0_FW_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_7 0x0097
+#define regBIF_BX0_FW_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_8 0x0098
+#define regBIF_BX0_FW_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_9 0x0099
+#define regBIF_BX0_FW_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_10 0x009a
+#define regBIF_BX0_FW_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_11 0x009b
+#define regBIF_BX0_FW_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_12 0x009c
+#define regBIF_BX0_FW_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_13 0x009d
+#define regBIF_BX0_FW_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_14 0x009e
+#define regBIF_BX0_FW_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_15 0x009f
+#define regBIF_BX0_FW_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_4 0x00a0
+#define regBIF_BX0_SBIOS_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_5 0x00a1
+#define regBIF_BX0_SBIOS_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_6 0x00a2
+#define regBIF_BX0_SBIOS_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_7 0x00a3
+#define regBIF_BX0_SBIOS_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_8 0x00a4
+#define regBIF_BX0_SBIOS_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_9 0x00a5
+#define regBIF_BX0_SBIOS_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_10 0x00a6
+#define regBIF_BX0_SBIOS_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_11 0x00a7
+#define regBIF_BX0_SBIOS_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_12 0x00a8
+#define regBIF_BX0_SBIOS_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_13 0x00a9
+#define regBIF_BX0_SBIOS_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_14 0x00aa
+#define regBIF_BX0_SBIOS_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_15 0x00ab
+#define regBIF_BX0_SBIOS_SCRATCH_15_BASE_IDX 1
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DWN_DEV0_0_DN_PCIE_RESERVED 0x0060
+#define regRCC_DWN_DEV0_0_DN_PCIE_RESERVED_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_SCRATCH 0x0061
+#define regRCC_DWN_DEV0_0_DN_PCIE_SCRATCH_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CNTL 0x0063
+#define regRCC_DWN_DEV0_0_DN_PCIE_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL 0x0064
+#define regRCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2 0x0065
+#define regRCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL 0x0066
+#define regRCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL 0x0067
+#define regRCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_F0 0x0068
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_F0_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC 0x0069
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2 0x006a
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DWNP_DEV0_0_PCIE_ERR_CNTL 0x006c
+#define regRCC_DWNP_DEV0_0_PCIE_ERR_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_RX_CNTL 0x006d
+#define regRCC_DWNP_DEV0_0_PCIE_RX_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL 0x006e
+#define regRCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_LC_CNTL2 0x006f
+#define regRCC_DWNP_DEV0_0_PCIE_LC_CNTL2_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIEP_STRAP_MISC 0x0070
+#define regRCC_DWNP_DEV0_0_PCIEP_STRAP_MISC_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP 0x0071
+#define regRCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_EP_DEV0_0_EP_PCIE_SCRATCH 0x0040
+#define regRCC_EP_DEV0_0_EP_PCIE_SCRATCH_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_CNTL 0x0042
+#define regRCC_EP_DEV0_0_EP_PCIE_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_CNTL 0x0043
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_STATUS 0x0044
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_STATUS_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL2 0x0045
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_BUS_CNTL 0x0046
+#define regRCC_EP_DEV0_0_EP_PCIE_BUS_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_CFG_CNTL 0x0047
+#define regRCC_EP_DEV0_0_EP_PCIE_CFG_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x0049
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x004a
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x004a
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x004a
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x004a
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC 0x004c
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2 0x004d
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP 0x004f
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0x0050
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL 0x0050
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x0050
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x0051
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x0051
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x0051
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x0051
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_PME_CONTROL 0x0052
+#define regRCC_EP_DEV0_0_EP_PCIE_PME_CONTROL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIEP_RESERVED 0x0053
+#define regRCC_EP_DEV0_0_EP_PCIEP_RESERVED_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_CNTL 0x0055
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID 0x0056
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_ERR_CNTL 0x0057
+#define regRCC_EP_DEV0_0_EP_PCIE_ERR_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL 0x0058
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL 0x0059
+#define regRCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_PF0_MM_INDEX 0x0000
+#define regBIF_BX_PF0_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_PF0_MM_DATA 0x0001
+#define regBIF_BX_PF0_MM_DATA_BASE_IDX 0
+#define regBIF_BX_PF0_MM_INDEX_HI 0x0006
+#define regBIF_BX_PF0_MM_INDEX_HI_BASE_IDX 0
+#define regBIF_BX_PF0_RSMU_INDEX 0x0000
+#define regBIF_BX_PF0_RSMU_INDEX_BASE_IDX 1
+#define regBIF_BX_PF0_RSMU_DATA 0x0001
+#define regBIF_BX_PF0_RSMU_DATA_BASE_IDX 1
+#define regBIF_BX_PF0_RSMU_INDEX_HI 0x0002
+#define regBIF_BX_PF0_RSMU_INDEX_HI_BASE_IDX 1
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_BIFDEC1
+// base address: 0x0
+#define regBIF_BX0_CC_BIF_BX_STRAP0 0x00e2
+#define regBIF_BX0_CC_BIF_BX_STRAP0_BASE_IDX 2
+#define regBIF_BX0_CC_BIF_BX_PINSTRAP0 0x00e4
+#define regBIF_BX0_CC_BIF_BX_PINSTRAP0_BASE_IDX 2
+#define regBIF_BX0_BIF_MM_INDACCESS_CNTL 0x00e6
+#define regBIF_BX0_BIF_MM_INDACCESS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BUS_CNTL 0x00e7
+#define regBIF_BX0_BUS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_SCRATCH0 0x00e8
+#define regBIF_BX0_BIF_SCRATCH0_BASE_IDX 2
+#define regBIF_BX0_BIF_SCRATCH1 0x00e9
+#define regBIF_BX0_BIF_SCRATCH1_BASE_IDX 2
+#define regBIF_BX0_BX_RESET_EN 0x00ed
+#define regBIF_BX0_BX_RESET_EN_BASE_IDX 2
+#define regBIF_BX0_MM_CFGREGS_CNTL 0x00ee
+#define regBIF_BX0_MM_CFGREGS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BX_RESET_CNTL 0x00f0
+#define regBIF_BX0_BX_RESET_CNTL_BASE_IDX 2
+#define regBIF_BX0_INTERRUPT_CNTL 0x00f1
+#define regBIF_BX0_INTERRUPT_CNTL_BASE_IDX 2
+#define regBIF_BX0_INTERRUPT_CNTL2 0x00f2
+#define regBIF_BX0_INTERRUPT_CNTL2_BASE_IDX 2
+#define regBIF_BX0_CLKREQB_PAD_CNTL 0x00f8
+#define regBIF_BX0_CLKREQB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_FEATURES_CONTROL_MISC 0x00fb
+#define regBIF_BX0_BIF_FEATURES_CONTROL_MISC_BASE_IDX 2
+#define regBIF_BX0_HDP_ATOMIC_CONTROL_MISC 0x00fc
+#define regBIF_BX0_HDP_ATOMIC_CONTROL_MISC_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_CNTL 0x00fd
+#define regBIF_BX0_BIF_DOORBELL_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_FB_EN 0x0100
+#define regBIF_BX0_BIF_FB_EN_BASE_IDX 2
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_MST_TRANS_PENDING_VF 0x0109
+#define regBIF_BX0_BIF_MST_TRANS_PENDING_VF_BASE_IDX 2
+#define regBIF_BX0_BIF_SLV_TRANS_PENDING_VF 0x010a
+#define regBIF_BX0_BIF_SLV_TRANS_PENDING_VF_BASE_IDX 2
+#define regBIF_BX0_BACO_CNTL 0x010b
+#define regBIF_BX0_BACO_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIME0 0x010c
+#define regBIF_BX0_BIF_BACO_EXIT_TIME0_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER1 0x010d
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER1_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER2 0x010e
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER2_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER3 0x010f
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER3_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER4 0x0110
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER4_BASE_IDX 2
+#define regBIF_BX0_MEM_TYPE_CNTL 0x0111
+#define regBIF_BX0_MEM_TYPE_CNTL_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_CNTL 0x0113
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_CNTL_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_0 0x0114
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_0_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_1 0x0115
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_1_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_2 0x0116
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_2_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_3 0x0117
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_3_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_4 0x0118
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_4_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_5 0x0119
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_5_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_6 0x011a
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_6_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_7 0x011b
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_7_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_8 0x011c
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_8_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_9 0x011d
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_9_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_10 0x011e
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_10_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_11 0x011f
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_11_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_12 0x0120
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_12_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_13 0x0121
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_13_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_14 0x0122
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_14_BASE_IDX 2
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_15 0x0123
+#define regBIF_BX0_NBIF_GFX_ADDR_LUT_15_BASE_IDX 2
+#define regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL 0x012d
+#define regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL 0x012e
+#define regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_CNTL 0x012f
+#define regBIF_BX0_BIF_RB_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_BASE 0x0130
+#define regBIF_BX0_BIF_RB_BASE_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_RPTR 0x0131
+#define regBIF_BX0_BIF_RB_RPTR_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR 0x0132
+#define regBIF_BX0_BIF_RB_WPTR_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_HI 0x0133
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_HI_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_LO 0x0134
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_LO_BASE_IDX 2
+#define regBIF_BX0_MAILBOX_INDEX 0x0135
+#define regBIF_BX0_MAILBOX_INDEX_BASE_IDX 2
+#define regBIF_BX0_BIF_MP1_INTR_CTRL 0x0142
+#define regBIF_BX0_BIF_MP1_INTR_CTRL_BASE_IDX 2
+#define regBIF_BX0_BIF_PERSTB_PAD_CNTL 0x0146
+#define regBIF_BX0_BIF_PERSTB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_PX_EN_PAD_CNTL 0x0147
+#define regBIF_BX0_BIF_PX_EN_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_REFPADKIN_PAD_CNTL 0x0148
+#define regBIF_BX0_BIF_REFPADKIN_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_CLKREQB_PAD_CNTL 0x0149
+#define regBIF_BX0_BIF_CLKREQB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_PWRBRK_PAD_CNTL 0x014a
+#define regBIF_BX0_BIF_PWRBRK_PAD_CNTL_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DEV0_0_RCC_ERR_INT_CNTL 0x0086
+#define regRCC_DEV0_0_RCC_ERR_INT_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BACO_CNTL_MISC 0x0087
+#define regRCC_DEV0_0_RCC_BACO_CNTL_MISC_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_RESET_EN 0x0088
+#define regRCC_DEV0_0_RCC_RESET_EN_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_VDM_SUPPORT 0x0089
+#define regRCC_DEV0_0_RCC_VDM_SUPPORT_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0 0x008a
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1 0x008b
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_GPUIOV_REGION 0x008c
+#define regRCC_DEV0_0_RCC_GPUIOV_REGION_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_GPU_HOSTVM_EN 0x008d
+#define regRCC_DEV0_0_RCC_GPU_HOSTVM_EN_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL 0x008e
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET 0x008f
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE 0x008f
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE0 0x00be
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE1 0x00bf
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUS_CNTL 0x00c1
+#define regRCC_DEV0_0_RCC_BUS_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_CNTL 0x00c2
+#define regRCC_DEV0_0_RCC_CONFIG_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_F0_BASE 0x00c6
+#define regRCC_DEV0_0_RCC_CONFIG_F0_BASE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_APER_SIZE 0x00c7
+#define regRCC_DEV0_0_RCC_CONFIG_APER_SIZE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE 0x00c8
+#define regRCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_XDMA_LO 0x00c9
+#define regRCC_DEV0_0_RCC_XDMA_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_XDMA_HI 0x00ca
+#define regRCC_DEV0_0_RCC_XDMA_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_FEATURES_CONTROL_MISC 0x00cb
+#define regRCC_DEV0_0_RCC_FEATURES_CONTROL_MISC_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL1 0x00cc
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST0 0x00cd
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST1 0x00ce
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL2 0x00cf
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL2_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM 0x00d0
+#define regRCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_HOST_BUSNUM 0x00d1
+#define regRCC_DEV0_0_RCC_HOST_BUSNUM_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI 0x00d2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO 0x00d3
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI 0x00d4
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO 0x00d5
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI 0x00d6
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO 0x00d7
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI 0x00d8
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO 0x00d9
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST0 0x00da
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST1 0x00db
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEV0_LINK_CNTL 0x00dd
+#define regRCC_DEV0_0_RCC_DEV0_LINK_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CMN_LINK_CNTL 0x00de
+#define regRCC_DEV0_0_RCC_CMN_LINK_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE 0x00df
+#define regRCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_LTR_LSWITCH_CNTL 0x00e0
+#define regRCC_DEV0_0_RCC_LTR_LSWITCH_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MH_ARB_CNTL 0x00e1
+#define regRCC_DEV0_0_RCC_MH_ARB_CNTL_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_BIFDEC1
+// base address: 0x0
+#define regRCC_STRAP0_RCC_BIF_STRAP0 0x0000
+#define regRCC_STRAP0_RCC_BIF_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP1 0x0001
+#define regRCC_STRAP0_RCC_BIF_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP2 0x0002
+#define regRCC_STRAP0_RCC_BIF_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP3 0x0003
+#define regRCC_STRAP0_RCC_BIF_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP4 0x0004
+#define regRCC_STRAP0_RCC_BIF_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP5 0x0005
+#define regRCC_STRAP0_RCC_BIF_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP6 0x0006
+#define regRCC_STRAP0_RCC_BIF_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP0 0x0007
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP1 0x0008
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP10 0x0009
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP10_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP11 0x000a
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP11_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP12 0x000b
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP12_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP13 0x000c
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP13_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP14 0x000d
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP14_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP2 0x000e
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP3 0x000f
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP4 0x0010
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP5 0x0011
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP6 0x0012
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP7 0x0013
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP7_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP8 0x0014
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP8_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP9 0x0015
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP9_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0 0x0016
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP1 0x0017
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP13 0x0018
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP13_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP14 0x0019
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP14_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP15 0x001a
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP15_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP16 0x001b
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP16_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP17 0x001c
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP17_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP18 0x001d
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP18_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP2 0x001e
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP26 0x001f
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP26_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP3 0x0020
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP4 0x0021
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP5 0x0022
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP8 0x0024
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP8_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP9 0x0025
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP9_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP0 0x0026
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP2 0x0032
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP20 0x0033
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP20_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP21 0x0034
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP21_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP22 0x0035
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP22_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP23 0x0036
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP23_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP24 0x0037
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP24_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP25 0x0038
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP25_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP3 0x0039
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP4 0x003a
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP5 0x003b
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP6 0x003c
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP7 0x003d
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP7_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_PF0_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_PF0_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_PF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_PF0_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_PF0_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_PF0_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_PF0_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_PF0_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_PF0_BIF_VMHV_MAILBOX_BASE_IDX 2
+#define regBIF_BX_PF0_PARTITION_COMPUTE_CAP 0x0161
+#define regBIF_BX_PF0_PARTITION_COMPUTE_CAP_BASE_IDX 2
+#define regBIF_BX_PF0_PARTITION_MEM_CAP 0x0162
+#define regBIF_BX_PF0_PARTITION_MEM_CAP_BASE_IDX 2
+#define regBIF_BX_PF0_PARTITION_COMPUTE_STATUS 0x0163
+#define regBIF_BX_PF0_PARTITION_COMPUTE_STATUS_BASE_IDX 2
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975]
+// base address: 0x3480
+#define regRCC_DEV0_EPF0_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_gdc_GDCDEC
+// base address: 0x0
+#define regGDC0_A2S_CNTL_CL0 0x0000
+#define regGDC0_A2S_CNTL_CL0_BASE_IDX 3
+#define regGDC0_A2S_CNTL_CL1 0x0001
+#define regGDC0_A2S_CNTL_CL1_BASE_IDX 3
+#define regGDC0_A2S_CNTL3_CL0 0x0018
+#define regGDC0_A2S_CNTL3_CL0_BASE_IDX 3
+#define regGDC0_A2S_CNTL3_CL1 0x0019
+#define regGDC0_A2S_CNTL3_CL1_BASE_IDX 3
+#define regGDC0_A2S_CNTL_SW0 0x0030
+#define regGDC0_A2S_CNTL_SW0_BASE_IDX 3
+#define regGDC0_A2S_CNTL_SW1 0x0031
+#define regGDC0_A2S_CNTL_SW1_BASE_IDX 3
+#define regGDC0_A2S_CNTL_SW2 0x0032
+#define regGDC0_A2S_CNTL_SW2_BASE_IDX 3
+#define regGDC0_A2S_TAG_ALLOC_0 0x003d
+#define regGDC0_A2S_TAG_ALLOC_0_BASE_IDX 3
+#define regGDC0_A2S_TAG_ALLOC_1 0x003e
+#define regGDC0_A2S_TAG_ALLOC_1_BASE_IDX 3
+#define regGDC0_A2S_MISC_CNTL 0x0041
+#define regGDC0_A2S_MISC_CNTL_BASE_IDX 3
+#define regGDC0_SHUB_REGS_IF_CTL 0x0043
+#define regGDC0_SHUB_REGS_IF_CTL_BASE_IDX 3
+#define regGDC0_NGDC_MGCG_CTRL 0x004a
+#define regGDC0_NGDC_MGCG_CTRL_BASE_IDX 3
+#define regGDC0_NGDC_RESERVED_0 0x004b
+#define regGDC0_NGDC_RESERVED_0_BASE_IDX 3
+#define regGDC0_NGDC_RESERVED_1 0x004c
+#define regGDC0_NGDC_RESERVED_1_BASE_IDX 3
+#define regGDC0_NBIF_GFX_DOORBELL_STATUS 0x004f
+#define regGDC0_NBIF_GFX_DOORBELL_STATUS_BASE_IDX 3
+#define regGDC0_ATDMA_MISC_CNTL 0x005d
+#define regGDC0_ATDMA_MISC_CNTL_BASE_IDX 3
+#define regGDC0_S2A_MISC_CNTL 0x005f
+#define regGDC0_S2A_MISC_CNTL_BASE_IDX 3
+#define regGDC0_NGDC_PG_MISC_CTRL 0x0078
+#define regGDC0_NGDC_PG_MISC_CTRL_BASE_IDX 3
+#define regGDC0_NGDC_PGMST_CTRL 0x0079
+#define regGDC0_NGDC_PGMST_CTRL_BASE_IDX 3
+#define regGDC0_NGDC_PGSLV_CTRL 0x007a
+#define regGDC0_NGDC_PGSLV_CTRL_BASE_IDX 3
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST 0x0048
+#define cfgBIF_CFG_DEV0_EPF0_ADAPTER_ID_W 0x004c
+#define cfgBIF_CFG_DEV0_EPF0_PMI_CAP_LIST 0x0050
+#define cfgBIF_CFG_DEV0_EPF0_PMI_CAP 0x0052
+#define cfgBIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL 0x0054
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST 0x0110
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1 0x0114
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2 0x0118
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL 0x011c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS 0x011e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP 0x0120
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL 0x0124
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS 0x012a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP 0x012c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL 0x0130
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS 0x0136
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST 0x0200
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP 0x0204
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL 0x0208
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP 0x020c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL 0x0210
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP 0x0214
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL 0x0218
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP 0x021c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL 0x0220
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP 0x0224
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL 0x0228
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP 0x022c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL 0x0230
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA 0x0248
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP 0x024c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST 0x0250
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_CAP 0x0254
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR 0x0258
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS 0x025c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL 0x025e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3 0x0274
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS 0x0278
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST 0x02a0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_CAP 0x02a4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL 0x02a6
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL 0x02b6
+#define cfgPCIE_PAGE_REQ_ENH_CAP_LIST 0x02c0
+#define cfgPCIE_PAGE_REQ_CNTL 0x02c4
+#define cfgPCIE_PAGE_REQ_STATUS 0x02c6
+#define cfgPCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x02c8
+#define cfgPCIE_OUTSTAND_PAGE_REQ_ALLOC 0x02cc
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST 0x02d0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_CAP 0x02d4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL 0x02d6
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST 0x02f0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_CAP 0x02f4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_CNTL 0x02f6
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0 0x02f8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_ADDR1 0x02fc
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_RCV0 0x0300
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_RCV1 0x0304
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL0 0x0308
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL1 0x030c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST 0x0320
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x0324
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL 0x032e
+#define cfgPCIE_SRIOV_ENH_CAP_LIST 0x0330
+#define cfgPCIE_SRIOV_CAP 0x0334
+#define cfgPCIE_SRIOV_CONTROL 0x0338
+#define cfgPCIE_SRIOV_STATUS 0x033a
+#define cfgPCIE_SRIOV_INITIAL_VFS 0x033c
+#define cfgPCIE_SRIOV_TOTAL_VFS 0x033e
+#define cfgPCIE_SRIOV_NUM_VFS 0x0340
+#define cfgPCIE_SRIOV_FUNC_DEP_LINK 0x0342
+#define cfgPCIE_SRIOV_FIRST_VF_OFFSET 0x0344
+#define cfgPCIE_SRIOV_VF_STRIDE 0x0346
+#define cfgPCIE_SRIOV_VF_DEVICE_ID 0x034a
+#define cfgPCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c
+#define cfgPCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_0 0x0354
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_1 0x0358
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_2 0x035c
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_3 0x0360
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_4 0x0364
+#define cfgPCIE_SRIOV_VF_BASE_ADDR_5 0x0368
+#define cfgPCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST 0x0400
+#define cfgBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP 0x0404
+#define cfgBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS 0x0408
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST 0x0410
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP_16GT 0x0414
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL_16GT 0x0418
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS_16GT 0x041c
+#define cfgBIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
+#define cfgBIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
+#define cfgBIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST 0x0450
+#define cfgBIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP 0x0454
+#define cfgBIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS 0x0456
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL 0x0458
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS 0x045a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL 0x045c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS 0x045e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL 0x0460
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS 0x0462
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL 0x0464
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS 0x0466
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL 0x0468
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS 0x046a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL 0x046c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS 0x046e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL 0x0470
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS 0x0472
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL 0x0474
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS 0x0476
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL 0x0478
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS 0x047a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL 0x047c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS 0x047e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL 0x0480
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS 0x0482
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL 0x0484
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS 0x0486
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL 0x0488
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS 0x048a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL 0x048c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS 0x048e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL 0x0490
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS 0x0492
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL 0x0494
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS 0x0496
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP_32GT 0x0504
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL_32GT 0x0508
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS_32GT 0x050c
+#define cfgPCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x0700
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x0704
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x0708
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x070c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x0710
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x0714
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x0718
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x071c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x0720
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x0724
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x0728
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION 0x0730
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x0734
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x0738
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x073c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x0740
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x0744
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x0748
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x074c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x0750
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x0754
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x0758
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x075c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x0760
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x0764
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x0768
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x076c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x0770
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x0774
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x0778
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x077c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x0780
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x0784
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x0788
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x078c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x0790
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x0794
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x0798
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x079c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x07a0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x07a4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x07a8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x07ac
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x07b0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x07c0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1 0x07c4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2 0x07c8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3 0x07cc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4 0x07d0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0 0x07f0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1 0x07f4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2 0x07f8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3 0x07fc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4 0x0800
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5 0x0804
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6 0x0808
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7 0x080c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8 0x0810
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x0820
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x0824
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x0828
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x082c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x0830
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x0834
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x0838
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x083c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x0840
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0 0x0850
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1 0x0854
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2 0x0858
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3 0x085c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4 0x0860
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5 0x0864
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6 0x0868
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7 0x086c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8 0x0870
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0 0x0880
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1 0x0884
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2 0x0888
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3 0x088c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4 0x0890
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5 0x0894
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6 0x0898
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7 0x089c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8 0x08a0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0 0x08b0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1 0x08b4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2 0x08b8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3 0x08bc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4 0x08c0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5 0x08c4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6 0x08c8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7 0x08cc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8 0x08d0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0 0x08e0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1 0x08e4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2 0x08e8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3 0x08ec
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4 0x08f0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5 0x08f4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6 0x08f8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7 0x08fc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8 0x0900
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0 0x0910
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1 0x0914
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2 0x0918
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3 0x091c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4 0x0920
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5 0x0924
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6 0x0928
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7 0x092c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8 0x0930
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0 0x0940
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1 0x0944
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2 0x0948
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3 0x094c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4 0x0950
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5 0x0954
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6 0x0958
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7 0x095c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8 0x0960
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0 0x0970
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1 0x0974
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2 0x0978
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3 0x097c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4 0x0980
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5 0x0984
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6 0x0988
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7 0x098c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8 0x0990
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0 0x09a0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1 0x09a4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2 0x09a8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3 0x09ac
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4 0x09b0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5 0x09b4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6 0x09b8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7 0x09bc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8 0x09c0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0 0x09d0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1 0x09d4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2 0x09d8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3 0x09dc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4 0x09e0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5 0x09e4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6 0x09e8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7 0x09ec
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8 0x09f0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0 0x0a00
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1 0x0a04
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2 0x0a08
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3 0x0a0c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4 0x0a10
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5 0x0a14
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6 0x0a18
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7 0x0a1c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8 0x0a20
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0 0x0a30
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1 0x0a34
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2 0x0a38
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3 0x0a3c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4 0x0a40
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5 0x0a44
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6 0x0a48
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7 0x0a4c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8 0x0a50
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0 0x0a60
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1 0x0a64
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2 0x0a68
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3 0x0a6c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4 0x0a70
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5 0x0a74
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6 0x0a78
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7 0x0a7c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8 0x0a80
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0 0x0a90
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1 0x0a94
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2 0x0a98
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3 0x0a9c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4 0x0aa0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5 0x0aa4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6 0x0aa8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7 0x0aac
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8 0x0ab0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0 0x0ac0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1 0x0ac4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2 0x0ac8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3 0x0acc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4 0x0ad0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5 0x0ad4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6 0x0ad8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7 0x0adc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8 0x0ae0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0 0x0af0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1 0x0af4
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2 0x0af8
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3 0x0afc
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4 0x0b00
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5 0x0b04
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6 0x0b08
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7 0x0b0c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8 0x0b10
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0 0x0b20
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1 0x0b24
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2 0x0b28
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3 0x0b2c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4 0x0b30
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5 0x0b34
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6 0x0b38
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7 0x0b3c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8 0x0b40
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0 0x0b50
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1 0x0b54
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2 0x0b58
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3 0x0b5c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4 0x0b60
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5 0x0b64
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6 0x0b68
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7 0x0b6c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8 0x0b70
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0 0x0b80
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1 0x0b84
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2 0x0b88
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3 0x0b8c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4 0x0b90
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5 0x0b94
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6 0x0b98
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7 0x0b9c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8 0x0ba0
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE 0x0c00
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_ENABLE 0x0c04
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE 0x0c08
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE 0x0c0c
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS 0x0c10
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_STATUS 0x0c14
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS 0x0c18
+#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS 0x0c1c
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF1_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF1_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF1_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF1_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF1_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF1_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF1_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF1_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF1_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF1_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF1_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF1_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF1_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF1_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF1_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF1_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF1_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF1_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST 0x0048
+#define cfgBIF_CFG_DEV0_EPF1_ADAPTER_ID_W 0x004c
+#define cfgBIF_CFG_DEV0_EPF1_PMI_CAP_LIST 0x0050
+#define cfgBIF_CFG_DEV0_EPF1_PMI_CAP 0x0052
+#define cfgBIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL 0x0054
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF1_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF1_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF1_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST 0x0200
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP 0x0204
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL 0x0208
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP 0x020c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL 0x0210
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP 0x0214
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL 0x0218
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP 0x021c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL 0x0220
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP 0x0224
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL 0x0228
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP 0x022c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL 0x0230
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA 0x0248
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP 0x024c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST 0x0250
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_CAP 0x0254
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR 0x0258
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS 0x025c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL 0x025e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST 0x02a0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_CAP 0x02a4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL 0x02a6
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST 0x02d0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_CAP 0x02d4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL 0x02d6
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
+// base address: 0x10100000
+#define regBIF_CFG_DEV0_RC0_VENDOR_ID 0x0000
+#define regBIF_CFG_DEV0_RC0_VENDOR_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_ID 0x0000
+#define regBIF_CFG_DEV0_RC0_DEVICE_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_COMMAND 0x0001
+#define regBIF_CFG_DEV0_RC0_COMMAND_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_STATUS 0x0001
+#define regBIF_CFG_DEV0_RC0_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_REVISION_ID 0x0002
+#define regBIF_CFG_DEV0_RC0_REVISION_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PROG_INTERFACE 0x0002
+#define regBIF_CFG_DEV0_RC0_PROG_INTERFACE_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SUB_CLASS 0x0002
+#define regBIF_CFG_DEV0_RC0_SUB_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_BASE_CLASS 0x0002
+#define regBIF_CFG_DEV0_RC0_BASE_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_CACHE_LINE 0x0003
+#define regBIF_CFG_DEV0_RC0_CACHE_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LATENCY 0x0003
+#define regBIF_CFG_DEV0_RC0_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_HEADER 0x0003
+#define regBIF_CFG_DEV0_RC0_HEADER_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_BIST 0x0003
+#define regBIF_CFG_DEV0_RC0_BIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_BASE_ADDR_1 0x0004
+#define regBIF_CFG_DEV0_RC0_BASE_ADDR_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_BASE_ADDR_2 0x0005
+#define regBIF_CFG_DEV0_RC0_BASE_ADDR_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY 0x0006
+#define regBIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_IO_BASE_LIMIT 0x0007
+#define regBIF_CFG_DEV0_RC0_IO_BASE_LIMIT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SECONDARY_STATUS 0x0007
+#define regBIF_CFG_DEV0_RC0_SECONDARY_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MEM_BASE_LIMIT 0x0008
+#define regBIF_CFG_DEV0_RC0_MEM_BASE_LIMIT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PREF_BASE_LIMIT 0x0009
+#define regBIF_CFG_DEV0_RC0_PREF_BASE_LIMIT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PREF_BASE_UPPER 0x000a
+#define regBIF_CFG_DEV0_RC0_PREF_BASE_UPPER_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PREF_LIMIT_UPPER 0x000b
+#define regBIF_CFG_DEV0_RC0_PREF_LIMIT_UPPER_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI 0x000c
+#define regBIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_CAP_PTR 0x000d
+#define regBIF_CFG_DEV0_RC0_CAP_PTR_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_ROM_BASE_ADDR 0x000e
+#define regBIF_CFG_DEV0_RC0_ROM_BASE_ADDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_INTERRUPT_LINE 0x000f
+#define regBIF_CFG_DEV0_RC0_INTERRUPT_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_INTERRUPT_PIN 0x000f
+#define regBIF_CFG_DEV0_RC0_INTERRUPT_PIN_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL 0x000f
+#define regBIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_EXT_BRIDGE_CNTL 0x0010
+#define regBIF_CFG_DEV0_RC0_EXT_BRIDGE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PMI_CAP_LIST 0x0014
+#define regBIF_CFG_DEV0_RC0_PMI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PMI_CAP 0x0014
+#define regBIF_CFG_DEV0_RC0_PMI_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PMI_STATUS_CNTL 0x0015
+#define regBIF_CFG_DEV0_RC0_PMI_STATUS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_CAP_LIST 0x0016
+#define regBIF_CFG_DEV0_RC0_PCIE_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_CAP 0x0016
+#define regBIF_CFG_DEV0_RC0_PCIE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_CAP 0x0017
+#define regBIF_CFG_DEV0_RC0_DEVICE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_CNTL 0x0018
+#define regBIF_CFG_DEV0_RC0_DEVICE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_STATUS 0x0018
+#define regBIF_CFG_DEV0_RC0_DEVICE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CAP 0x0019
+#define regBIF_CFG_DEV0_RC0_LINK_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL 0x001a
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS 0x001a
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_CAP 0x001b
+#define regBIF_CFG_DEV0_RC0_SLOT_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_CNTL 0x001c
+#define regBIF_CFG_DEV0_RC0_SLOT_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_STATUS 0x001c
+#define regBIF_CFG_DEV0_RC0_SLOT_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_ROOT_CNTL 0x001d
+#define regBIF_CFG_DEV0_RC0_ROOT_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_ROOT_CAP 0x001d
+#define regBIF_CFG_DEV0_RC0_ROOT_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_ROOT_STATUS 0x001e
+#define regBIF_CFG_DEV0_RC0_ROOT_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_CAP2 0x001f
+#define regBIF_CFG_DEV0_RC0_DEVICE_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_CNTL2 0x0020
+#define regBIF_CFG_DEV0_RC0_DEVICE_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DEVICE_STATUS2 0x0020
+#define regBIF_CFG_DEV0_RC0_DEVICE_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CAP2 0x0021
+#define regBIF_CFG_DEV0_RC0_LINK_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL2 0x0022
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS2 0x0022
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_CAP2 0x0023
+#define regBIF_CFG_DEV0_RC0_SLOT_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_CNTL2 0x0024
+#define regBIF_CFG_DEV0_RC0_SLOT_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SLOT_STATUS2 0x0024
+#define regBIF_CFG_DEV0_RC0_SLOT_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_CAP_LIST 0x0028
+#define regBIF_CFG_DEV0_RC0_MSI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_CNTL 0x0028
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_ADDR_LO 0x0029
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_ADDR_LO_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_ADDR_HI 0x002a
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_ADDR_HI_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_DATA 0x002a
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA 0x002a
+#define regBIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_DATA_64 0x002b
+#define regBIF_CFG_DEV0_RC0_MSI_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_64 0x002b
+#define regBIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SSID_CAP_LIST 0x0030
+#define regBIF_CFG_DEV0_RC0_SSID_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_SSID_CAP 0x0031
+#define regBIF_CFG_DEV0_RC0_SSID_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0040
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR 0x0041
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC1 0x0042
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC2 0x0043
+#define regBIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST 0x0044
+#define regBIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1 0x0045
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2 0x0046
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL 0x0047
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_STATUS 0x0047
+#define regBIF_CFG_DEV0_RC0_PCIE_PORT_VC_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP 0x0048
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL 0x0049
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS 0x004a
+#define regBIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP 0x004b
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL 0x004c
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS 0x004d
+#define regBIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0050
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW1 0x0051
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW2 0x0052
+#define regBIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0054
+#define regBIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS 0x0055
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK 0x0056
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY 0x0057
+#define regBIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS 0x0058
+#define regBIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK 0x0059
+#define regBIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL 0x005a
+#define regBIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG0 0x005b
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG1 0x005c
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG2 0x005d
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG3 0x005e
+#define regBIF_CFG_DEV0_RC0_PCIE_HDR_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD 0x005f
+#define regBIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS 0x0060
+#define regBIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID 0x0061
+#define regBIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG0 0x0062
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG1 0x0063
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG2 0x0064
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG3 0x0065
+#define regBIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST 0x009c
+#define regBIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3 0x009d
+#define regBIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_ERROR_STATUS 0x009e
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_ERROR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL 0x009f
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL 0x009f
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL 0x00a0
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL 0x00a0
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL 0x00a1
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL 0x00a1
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL 0x00a2
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL 0x00a2
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL 0x00a3
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL 0x00a3
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL 0x00a4
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL 0x00a4
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL 0x00a5
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL 0x00a5
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL 0x00a6
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL 0x00a6
+#define regBIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST 0x00a8
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_CAP 0x00a9
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_CNTL 0x00a9
+#define regBIF_CFG_DEV0_RC0_PCIE_ACS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST 0x0100
+#define regBIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP 0x0101
+#define regBIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS 0x0102
+#define regBIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST 0x0104
+#define regBIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CAP_16GT 0x0105
+#define regBIF_CFG_DEV0_RC0_LINK_CAP_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL_16GT 0x0106
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS_16GT 0x0107
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0108
+#define regBIF_CFG_DEV0_RC0_LOCAL_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0109
+#define regBIF_CFG_DEV0_RC0_RTM1_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x010a
+#define regBIF_CFG_DEV0_RC0_RTM2_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT 0x010c
+#define regBIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT 0x010c
+#define regBIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT 0x010c
+#define regBIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT 0x010c
+#define regBIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT 0x010d
+#define regBIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT 0x010d
+#define regBIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT 0x010d
+#define regBIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT 0x010d
+#define regBIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT 0x010e
+#define regBIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT 0x010e
+#define regBIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT 0x010e
+#define regBIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT 0x010e
+#define regBIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT 0x010f
+#define regBIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT 0x010f
+#define regBIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT 0x010f
+#define regBIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT 0x010f
+#define regBIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST 0x0114
+#define regBIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MARGINING_PORT_CAP 0x0115
+#define regBIF_CFG_DEV0_RC0_MARGINING_PORT_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS 0x0115
+#define regBIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL 0x0116
+#define regBIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS 0x0116
+#define regBIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL 0x0117
+#define regBIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS 0x0117
+#define regBIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL 0x0118
+#define regBIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS 0x0118
+#define regBIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL 0x0119
+#define regBIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS 0x0119
+#define regBIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL 0x011a
+#define regBIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS 0x011a
+#define regBIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL 0x011b
+#define regBIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS 0x011b
+#define regBIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL 0x011c
+#define regBIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS 0x011c
+#define regBIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL 0x011d
+#define regBIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS 0x011d
+#define regBIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL 0x011e
+#define regBIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS 0x011e
+#define regBIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL 0x011f
+#define regBIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS 0x011f
+#define regBIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL 0x0120
+#define regBIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS 0x0120
+#define regBIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL 0x0121
+#define regBIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS 0x0121
+#define regBIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL 0x0122
+#define regBIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS 0x0122
+#define regBIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL 0x0123
+#define regBIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS 0x0123
+#define regBIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL 0x0124
+#define regBIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS 0x0124
+#define regBIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL 0x0125
+#define regBIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS 0x0125
+#define regBIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CAP_32GT 0x0141
+#define regBIF_CFG_DEV0_RC0_LINK_CAP_32GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL_32GT 0x0142
+#define regBIF_CFG_DEV0_RC0_LINK_CNTL_32GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS_32GT 0x0143
+#define regBIF_CFG_DEV0_RC0_LINK_STATUS_32GT_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
+// base address: 0x10140000
+#define regBIF_CFG_DEV0_EPF0_0_VENDOR_ID 0x10000
+#define regBIF_CFG_DEV0_EPF0_0_VENDOR_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_ID 0x10000
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_COMMAND 0x10001
+#define regBIF_CFG_DEV0_EPF0_0_COMMAND_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_STATUS 0x10001
+#define regBIF_CFG_DEV0_EPF0_0_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_REVISION_ID 0x10002
+#define regBIF_CFG_DEV0_EPF0_0_REVISION_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PROG_INTERFACE 0x10002
+#define regBIF_CFG_DEV0_EPF0_0_PROG_INTERFACE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_SUB_CLASS 0x10002
+#define regBIF_CFG_DEV0_EPF0_0_SUB_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_CLASS 0x10002
+#define regBIF_CFG_DEV0_EPF0_0_BASE_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_CACHE_LINE 0x10003
+#define regBIF_CFG_DEV0_EPF0_0_CACHE_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LATENCY 0x10003
+#define regBIF_CFG_DEV0_EPF0_0_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_HEADER 0x10003
+#define regBIF_CFG_DEV0_EPF0_0_HEADER_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BIST 0x10003
+#define regBIF_CFG_DEV0_EPF0_0_BIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_1 0x10004
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_2 0x10005
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_3 0x10006
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_4 0x10007
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_5 0x10008
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_6 0x10009
+#define regBIF_CFG_DEV0_EPF0_0_BASE_ADDR_6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_CARDBUS_CIS_PTR 0x1000a
+#define regBIF_CFG_DEV0_EPF0_0_CARDBUS_CIS_PTR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_ADAPTER_ID 0x1000b
+#define regBIF_CFG_DEV0_EPF0_0_ADAPTER_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR 0x1000c
+#define regBIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_CAP_PTR 0x1000d
+#define regBIF_CFG_DEV0_EPF0_0_CAP_PTR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE 0x1000f
+#define regBIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN 0x1000f
+#define regBIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MIN_GRANT 0x1000f
+#define regBIF_CFG_DEV0_EPF0_0_MIN_GRANT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MAX_LATENCY 0x1000f
+#define regBIF_CFG_DEV0_EPF0_0_MAX_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST 0x10012
+#define regBIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W 0x10013
+#define regBIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST 0x10014
+#define regBIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PMI_CAP 0x10014
+#define regBIF_CFG_DEV0_EPF0_0_PMI_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL 0x10015
+#define regBIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST 0x10019
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CAP 0x10019
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CAP 0x1001a
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL 0x1001b
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS 0x1001b
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP 0x1001c
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL 0x1001d
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS 0x1001d
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CAP2 0x10022
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2 0x10023
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2 0x10023
+#define regBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP2 0x10024
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL2 0x10025
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS2 0x10025
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST 0x10028
+#define regBIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL 0x10028
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO 0x10029
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI 0x1002a
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA 0x1002a
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA 0x1002a
+#define regBIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MASK 0x1002b
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64 0x1002b
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_64 0x1002b
+#define regBIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MASK_64 0x1002c
+#define regBIF_CFG_DEV0_EPF0_0_MSI_MASK_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_PENDING 0x1002c
+#define regBIF_CFG_DEV0_EPF0_0_MSI_PENDING_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSI_PENDING_64 0x1002d
+#define regBIF_CFG_DEV0_EPF0_0_MSI_PENDING_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST 0x10030
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL 0x10030
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_TABLE 0x10031
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_TABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_PBA 0x10032
+#define regBIF_CFG_DEV0_EPF0_0_MSIX_PBA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10040
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR 0x10041
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1 0x10042
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2 0x10043
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST 0x10044
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1 0x10045
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2 0x10046
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL 0x10047
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS 0x10047
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP 0x10048
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL 0x10049
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS 0x1004a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP 0x1004b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL 0x1004c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS 0x1004d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x10050
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1 0x10051
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2 0x10052
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10054
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS 0x10055
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK 0x10056
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY 0x10057
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS 0x10058
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK 0x10059
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL 0x1005a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0 0x1005b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1 0x1005c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2 0x1005d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3 0x1005e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0 0x10062
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1 0x10063
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2 0x10064
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3 0x10065
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST 0x10080
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP 0x10081
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL 0x10082
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP 0x10083
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL 0x10084
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP 0x10085
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL 0x10086
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP 0x10087
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL 0x10088
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP 0x10089
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL 0x1008a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP 0x1008b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL 0x1008c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10090
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT 0x10091
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA 0x10092
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP 0x10093
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST 0x10094
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP 0x10095
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR 0x10096
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS 0x10097
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL 0x10097
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10098
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10098
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10098
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10098
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10099
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10099
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10099
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10099
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST 0x1009c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3 0x1009d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS 0x1009e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL 0x1009f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL 0x1009f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL 0x100a0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL 0x100a0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL 0x100a1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL 0x100a1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL 0x100a2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL 0x100a2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL 0x100a3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL 0x100a3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL 0x100a4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL 0x100a4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL 0x100a5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL 0x100a5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL 0x100a6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL 0x100a6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST 0x100a8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP 0x100a9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL 0x100a9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST 0x100ac
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP 0x100ad
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL 0x100ad
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST 0x100b0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL 0x100b1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS 0x100b1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x100b2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x100b3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST 0x100b4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP 0x100b5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL 0x100b5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST 0x100bc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP 0x100bd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL 0x100bd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0 0x100be
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1 0x100bf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0 0x100c0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1 0x100c1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0 0x100c2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1 0x100c3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x100c4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x100c5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST 0x100c8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP 0x100c9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST 0x100ca
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP 0x100cb
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL 0x100cb
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST 0x100cc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP 0x100cd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL 0x100ce
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS 0x100ce
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS 0x100cf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS 0x100cf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS 0x100d0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK 0x100d0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET 0x100d1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE 0x100d1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID 0x100d2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x100d3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x100d4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0 0x100d5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1 0x100d6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2 0x100d7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3 0x100d8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4 0x100d9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5 0x100da
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x100db
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST 0x10100
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP 0x10101
+#define regBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS 0x10102
+#define regBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST 0x10104
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT 0x10105
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT 0x10106
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT 0x10107
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x10108
+#define regBIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x10109
+#define regBIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x1010a
+#define regBIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST 0x10114
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP 0x10115
+#define regBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS 0x10115
+#define regBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL 0x10116
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS 0x10116
+#define regBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL 0x10117
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS 0x10117
+#define regBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL 0x10118
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS 0x10118
+#define regBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL 0x10119
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS 0x10119
+#define regBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL 0x1011a
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS 0x1011a
+#define regBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL 0x1011b
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS 0x1011b
+#define regBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL 0x1011c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS 0x1011c
+#define regBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL 0x1011d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS 0x1011d
+#define regBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL 0x1011e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS 0x1011e
+#define regBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL 0x1011f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS 0x1011f
+#define regBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL 0x10120
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS 0x10120
+#define regBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL 0x10121
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS 0x10121
+#define regBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL 0x10122
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS 0x10122
+#define regBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL 0x10123
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS 0x10123
+#define regBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL 0x10124
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS 0x10124
+#define regBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL 0x10125
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS 0x10125
+#define regBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT 0x10141
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT 0x10142
+#define regBIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT 0x10143
+#define regBIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x101c0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x101c1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x101c2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x101c3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x101c4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x101c5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x101c6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x101c7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x101c8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x101c9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x101ca
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION 0x101cc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x101cd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x101ce
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x101cf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x101d0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x101d1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x101d2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x101d3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x101d4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x101d5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x101d6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x101d7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x101d8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x101d9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x101da
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x101db
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x101dc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x101dd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x101de
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x101df
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x101e0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x101e1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x101e2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x101e3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x101e4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x101e5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x101e6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x101e7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x101e8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x101e9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x101ea
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x101eb
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x101ec
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x101f0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1 0x101f1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2 0x101f2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3 0x101f3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4 0x101f4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0 0x101fc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1 0x101fd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2 0x101fe
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3 0x101ff
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4 0x10200
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5 0x10201
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6 0x10202
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7 0x10203
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8 0x10204
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x10208
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x10209
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x1020a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x1020b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x1020c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x1020d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x1020e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x1020f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x10210
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0 0x10214
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1 0x10215
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2 0x10216
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3 0x10217
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4 0x10218
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5 0x10219
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6 0x1021a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7 0x1021b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8 0x1021c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0 0x10220
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1 0x10221
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2 0x10222
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3 0x10223
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4 0x10224
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5 0x10225
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6 0x10226
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7 0x10227
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8 0x10228
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0 0x1022c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1 0x1022d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2 0x1022e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3 0x1022f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4 0x10230
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5 0x10231
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6 0x10232
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7 0x10233
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8 0x10234
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0 0x10238
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1 0x10239
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2 0x1023a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3 0x1023b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4 0x1023c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5 0x1023d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6 0x1023e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7 0x1023f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8 0x10240
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0 0x10244
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1 0x10245
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2 0x10246
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3 0x10247
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4 0x10248
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5 0x10249
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6 0x1024a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7 0x1024b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8 0x1024c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0 0x10250
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1 0x10251
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2 0x10252
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3 0x10253
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4 0x10254
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5 0x10255
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6 0x10256
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7 0x10257
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8 0x10258
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0 0x1025c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1 0x1025d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2 0x1025e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3 0x1025f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4 0x10260
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5 0x10261
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6 0x10262
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7 0x10263
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8 0x10264
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0 0x10268
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1 0x10269
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2 0x1026a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3 0x1026b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4 0x1026c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5 0x1026d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6 0x1026e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7 0x1026f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8 0x10270
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0 0x10274
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1 0x10275
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2 0x10276
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3 0x10277
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4 0x10278
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5 0x10279
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6 0x1027a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7 0x1027b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8 0x1027c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0 0x10280
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1 0x10281
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2 0x10282
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3 0x10283
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4 0x10284
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5 0x10285
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6 0x10286
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7 0x10287
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8 0x10288
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0 0x1028c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1 0x1028d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2 0x1028e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3 0x1028f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4 0x10290
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5 0x10291
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6 0x10292
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7 0x10293
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8 0x10294
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0 0x10298
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1 0x10299
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2 0x1029a
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3 0x1029b
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4 0x1029c
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5 0x1029d
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6 0x1029e
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7 0x1029f
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8 0x102a0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0 0x102a4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1 0x102a5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2 0x102a6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3 0x102a7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4 0x102a8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5 0x102a9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6 0x102aa
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7 0x102ab
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8 0x102ac
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0 0x102b0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1 0x102b1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2 0x102b2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3 0x102b3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4 0x102b4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5 0x102b5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6 0x102b6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7 0x102b7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8 0x102b8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0 0x102bc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1 0x102bd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2 0x102be
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3 0x102bf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4 0x102c0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5 0x102c1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6 0x102c2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7 0x102c3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8 0x102c4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0 0x102c8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1 0x102c9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2 0x102ca
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3 0x102cb
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4 0x102cc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5 0x102cd
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6 0x102ce
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7 0x102cf
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8 0x102d0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0 0x102d4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1 0x102d5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2 0x102d6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3 0x102d7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4 0x102d8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5 0x102d9
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6 0x102da
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7 0x102db
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8 0x102dc
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0 0x102e0
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1 0x102e1
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2 0x102e2
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3 0x102e3
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4 0x102e4
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5 0x102e5
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6 0x102e6
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7 0x102e7
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8 0x102e8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE 0x10300
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_ENABLE 0x10301
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE 0x10302
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE 0x10303
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS 0x10304
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_STATUS 0x10305
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS 0x10306
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS 0x10307
+#define regBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
+// base address: 0x10141000
+#define regBIF_CFG_DEV0_EPF1_0_VENDOR_ID 0x10400
+#define regBIF_CFG_DEV0_EPF1_0_VENDOR_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_ID 0x10400
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_COMMAND 0x10401
+#define regBIF_CFG_DEV0_EPF1_0_COMMAND_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_STATUS 0x10401
+#define regBIF_CFG_DEV0_EPF1_0_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_REVISION_ID 0x10402
+#define regBIF_CFG_DEV0_EPF1_0_REVISION_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PROG_INTERFACE 0x10402
+#define regBIF_CFG_DEV0_EPF1_0_PROG_INTERFACE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_SUB_CLASS 0x10402
+#define regBIF_CFG_DEV0_EPF1_0_SUB_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_CLASS 0x10402
+#define regBIF_CFG_DEV0_EPF1_0_BASE_CLASS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_CACHE_LINE 0x10403
+#define regBIF_CFG_DEV0_EPF1_0_CACHE_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LATENCY 0x10403
+#define regBIF_CFG_DEV0_EPF1_0_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_HEADER 0x10403
+#define regBIF_CFG_DEV0_EPF1_0_HEADER_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BIST 0x10403
+#define regBIF_CFG_DEV0_EPF1_0_BIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_1 0x10404
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_2 0x10405
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_3 0x10406
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_4 0x10407
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_5 0x10408
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_6 0x10409
+#define regBIF_CFG_DEV0_EPF1_0_BASE_ADDR_6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_CARDBUS_CIS_PTR 0x1040a
+#define regBIF_CFG_DEV0_EPF1_0_CARDBUS_CIS_PTR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_ADAPTER_ID 0x1040b
+#define regBIF_CFG_DEV0_EPF1_0_ADAPTER_ID_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR 0x1040c
+#define regBIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_CAP_PTR 0x1040d
+#define regBIF_CFG_DEV0_EPF1_0_CAP_PTR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE 0x1040f
+#define regBIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN 0x1040f
+#define regBIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MIN_GRANT 0x1040f
+#define regBIF_CFG_DEV0_EPF1_0_MIN_GRANT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MAX_LATENCY 0x1040f
+#define regBIF_CFG_DEV0_EPF1_0_MAX_LATENCY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST 0x10412
+#define regBIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W 0x10413
+#define regBIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST 0x10414
+#define regBIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PMI_CAP 0x10414
+#define regBIF_CFG_DEV0_EPF1_0_PMI_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL 0x10415
+#define regBIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST 0x10419
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CAP 0x10419
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CAP 0x1041a
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL 0x1041b
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS 0x1041b
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CAP 0x1041c
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CNTL 0x1041d
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_STATUS 0x1041d
+#define regBIF_CFG_DEV0_EPF1_0_LINK_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CAP2 0x10422
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2 0x10423
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2 0x10423
+#define regBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CAP2 0x10424
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CAP2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CNTL2 0x10425
+#define regBIF_CFG_DEV0_EPF1_0_LINK_CNTL2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_LINK_STATUS2 0x10425
+#define regBIF_CFG_DEV0_EPF1_0_LINK_STATUS2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST 0x10428
+#define regBIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL 0x10428
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO 0x10429
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI 0x1042a
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA 0x1042a
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA 0x1042a
+#define regBIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MASK 0x1042b
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64 0x1042b
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_64 0x1042b
+#define regBIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MASK_64 0x1042c
+#define regBIF_CFG_DEV0_EPF1_0_MSI_MASK_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_PENDING 0x1042c
+#define regBIF_CFG_DEV0_EPF1_0_MSI_PENDING_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSI_PENDING_64 0x1042d
+#define regBIF_CFG_DEV0_EPF1_0_MSI_PENDING_64_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST 0x10430
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL 0x10430
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_TABLE 0x10431
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_TABLE_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_PBA 0x10432
+#define regBIF_CFG_DEV0_EPF1_0_MSIX_PBA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10440
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR 0x10441
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1 0x10442
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2 0x10443
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10454
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS 0x10455
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK 0x10456
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY 0x10457
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS 0x10458
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK 0x10459
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL 0x1045a
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0 0x1045b
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1 0x1045c
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2 0x1045d
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3 0x1045e
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0 0x10462
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1 0x10463
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2 0x10464
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3 0x10465
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST 0x10480
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP 0x10481
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL 0x10482
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP 0x10483
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL 0x10484
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP 0x10485
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL 0x10486
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP 0x10487
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL 0x10488
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP 0x10489
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL 0x1048a
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP 0x1048b
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL 0x1048c
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10490
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT 0x10491
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA 0x10492
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP 0x10493
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST 0x10494
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP 0x10495
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR 0x10496
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS 0x10497
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL 0x10497
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10498
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10498
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10498
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10498
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10499
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10499
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10499
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10499
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST 0x104a8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP 0x104a9
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL 0x104a9
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST 0x104b4
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP 0x104b5
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL 0x104b5
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST 0x104ca
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP 0x104cb
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP_BASE_IDX 8
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL 0x104cb
+#define regBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DEV0_1_RCC_VDM_SUPPORT 0xc440
+#define regRCC_DEV0_1_RCC_VDM_SUPPORT_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BUS_CNTL 0xc441
+#define regRCC_DEV0_1_RCC_BUS_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_FEATURES_CONTROL_MISC 0xc442
+#define regRCC_DEV0_1_RCC_FEATURES_CONTROL_MISC_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_DEV0_LINK_CNTL 0xc443
+#define regRCC_DEV0_1_RCC_DEV0_LINK_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CMN_LINK_CNTL 0xc444
+#define regRCC_DEV0_1_RCC_CMN_LINK_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE 0xc445
+#define regRCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_LTR_LSWITCH_CNTL 0xc446
+#define regRCC_DEV0_1_RCC_LTR_LSWITCH_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_MH_ARB_CNTL 0xc447
+#define regRCC_DEV0_1_RCC_MH_ARB_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0 0xc448
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1 0xc449
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_EP_DEV0_1_EP_PCIE_SCRATCH 0xc44c
+#define regRCC_EP_DEV0_1_EP_PCIE_SCRATCH_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_CNTL 0xc44e
+#define regRCC_EP_DEV0_1_EP_PCIE_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_CNTL 0xc44f
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_STATUS 0xc450
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_STATUS_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL2 0xc451
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL2_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_BUS_CNTL 0xc452
+#define regRCC_EP_DEV0_1_EP_PCIE_BUS_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_CFG_CNTL 0xc453
+#define regRCC_EP_DEV0_1_EP_PCIE_CFG_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL 0xc454
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC 0xc455
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2 0xc456
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP 0xc457
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0xc458
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL 0xc458
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0xc458
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_PME_CONTROL 0xc45c
+#define regRCC_EP_DEV0_1_EP_PCIE_PME_CONTROL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIEP_RESERVED 0xc45d
+#define regRCC_EP_DEV0_1_EP_PCIEP_RESERVED_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_CNTL 0xc45f
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID 0xc460
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_ERR_CNTL 0xc461
+#define regRCC_EP_DEV0_1_EP_PCIE_ERR_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL 0xc462
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL 0xc463
+#define regRCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DWN_DEV0_1_DN_PCIE_RESERVED 0xc468
+#define regRCC_DWN_DEV0_1_DN_PCIE_RESERVED_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_SCRATCH 0xc469
+#define regRCC_DWN_DEV0_1_DN_PCIE_SCRATCH_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_CNTL 0xc46b
+#define regRCC_DWN_DEV0_1_DN_PCIE_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL 0xc46c
+#define regRCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2 0xc46d
+#define regRCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL 0xc46e
+#define regRCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL 0xc46f
+#define regRCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_F0 0xc470
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_F0_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC 0xc471
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC_BASE_IDX 8
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2 0xc472
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DWNP_DEV0_1_PCIE_ERR_CNTL 0xc475
+#define regRCC_DWNP_DEV0_1_PCIE_ERR_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_1_PCIE_RX_CNTL 0xc476
+#define regRCC_DWNP_DEV0_1_PCIE_RX_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL 0xc477
+#define regRCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_1_PCIE_LC_CNTL2 0xc478
+#define regRCC_DWNP_DEV0_1_PCIE_LC_CNTL2_BASE_IDX 8
+#define regRCC_DWNP_DEV0_1_PCIEP_STRAP_MISC 0xc479
+#define regRCC_DWNP_DEV0_1_PCIEP_STRAP_MISC_BASE_IDX 8
+#define regRCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP 0xc47a
+#define regRCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_pfc_amdgfx_RCCPFCDEC
+// base address: 0x10134000
+#define regRCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL 0xd040
+#define regRCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE 0xd041
+#define regRCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0 0xd042
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1 0xd043
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2 0xd044
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3 0xd045
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4 0xd046
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5 0xd047
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5_BASE_IDX 8
+#define regRCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL 0xd048
+#define regRCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_pciemsix_0_usb_MSIXTDEC
+// base address: 0x10168000
+#define regPCIEMSIX_VECT0_ADDR_LO 0x1a000
+#define regPCIEMSIX_VECT0_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT0_ADDR_HI 0x1a001
+#define regPCIEMSIX_VECT0_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT0_MSG_DATA 0x1a002
+#define regPCIEMSIX_VECT0_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT0_CONTROL 0x1a003
+#define regPCIEMSIX_VECT0_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT1_ADDR_LO 0x1a004
+#define regPCIEMSIX_VECT1_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT1_ADDR_HI 0x1a005
+#define regPCIEMSIX_VECT1_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT1_MSG_DATA 0x1a006
+#define regPCIEMSIX_VECT1_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT1_CONTROL 0x1a007
+#define regPCIEMSIX_VECT1_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT2_ADDR_LO 0x1a008
+#define regPCIEMSIX_VECT2_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT2_ADDR_HI 0x1a009
+#define regPCIEMSIX_VECT2_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT2_MSG_DATA 0x1a00a
+#define regPCIEMSIX_VECT2_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT2_CONTROL 0x1a00b
+#define regPCIEMSIX_VECT2_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT3_ADDR_LO 0x1a00c
+#define regPCIEMSIX_VECT3_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT3_ADDR_HI 0x1a00d
+#define regPCIEMSIX_VECT3_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT3_MSG_DATA 0x1a00e
+#define regPCIEMSIX_VECT3_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT3_CONTROL 0x1a00f
+#define regPCIEMSIX_VECT3_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT4_ADDR_LO 0x1a010
+#define regPCIEMSIX_VECT4_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT4_ADDR_HI 0x1a011
+#define regPCIEMSIX_VECT4_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT4_MSG_DATA 0x1a012
+#define regPCIEMSIX_VECT4_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT4_CONTROL 0x1a013
+#define regPCIEMSIX_VECT4_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT5_ADDR_LO 0x1a014
+#define regPCIEMSIX_VECT5_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT5_ADDR_HI 0x1a015
+#define regPCIEMSIX_VECT5_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT5_MSG_DATA 0x1a016
+#define regPCIEMSIX_VECT5_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT5_CONTROL 0x1a017
+#define regPCIEMSIX_VECT5_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT6_ADDR_LO 0x1a018
+#define regPCIEMSIX_VECT6_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT6_ADDR_HI 0x1a019
+#define regPCIEMSIX_VECT6_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT6_MSG_DATA 0x1a01a
+#define regPCIEMSIX_VECT6_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT6_CONTROL 0x1a01b
+#define regPCIEMSIX_VECT6_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT7_ADDR_LO 0x1a01c
+#define regPCIEMSIX_VECT7_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT7_ADDR_HI 0x1a01d
+#define regPCIEMSIX_VECT7_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT7_MSG_DATA 0x1a01e
+#define regPCIEMSIX_VECT7_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT7_CONTROL 0x1a01f
+#define regPCIEMSIX_VECT7_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT8_ADDR_LO 0x1a020
+#define regPCIEMSIX_VECT8_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT8_ADDR_HI 0x1a021
+#define regPCIEMSIX_VECT8_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT8_MSG_DATA 0x1a022
+#define regPCIEMSIX_VECT8_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT8_CONTROL 0x1a023
+#define regPCIEMSIX_VECT8_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT9_ADDR_LO 0x1a024
+#define regPCIEMSIX_VECT9_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT9_ADDR_HI 0x1a025
+#define regPCIEMSIX_VECT9_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT9_MSG_DATA 0x1a026
+#define regPCIEMSIX_VECT9_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT9_CONTROL 0x1a027
+#define regPCIEMSIX_VECT9_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT10_ADDR_LO 0x1a028
+#define regPCIEMSIX_VECT10_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT10_ADDR_HI 0x1a029
+#define regPCIEMSIX_VECT10_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT10_MSG_DATA 0x1a02a
+#define regPCIEMSIX_VECT10_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT10_CONTROL 0x1a02b
+#define regPCIEMSIX_VECT10_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT11_ADDR_LO 0x1a02c
+#define regPCIEMSIX_VECT11_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT11_ADDR_HI 0x1a02d
+#define regPCIEMSIX_VECT11_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT11_MSG_DATA 0x1a02e
+#define regPCIEMSIX_VECT11_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT11_CONTROL 0x1a02f
+#define regPCIEMSIX_VECT11_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT12_ADDR_LO 0x1a030
+#define regPCIEMSIX_VECT12_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT12_ADDR_HI 0x1a031
+#define regPCIEMSIX_VECT12_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT12_MSG_DATA 0x1a032
+#define regPCIEMSIX_VECT12_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT12_CONTROL 0x1a033
+#define regPCIEMSIX_VECT12_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT13_ADDR_LO 0x1a034
+#define regPCIEMSIX_VECT13_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT13_ADDR_HI 0x1a035
+#define regPCIEMSIX_VECT13_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT13_MSG_DATA 0x1a036
+#define regPCIEMSIX_VECT13_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT13_CONTROL 0x1a037
+#define regPCIEMSIX_VECT13_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT14_ADDR_LO 0x1a038
+#define regPCIEMSIX_VECT14_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT14_ADDR_HI 0x1a039
+#define regPCIEMSIX_VECT14_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT14_MSG_DATA 0x1a03a
+#define regPCIEMSIX_VECT14_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT14_CONTROL 0x1a03b
+#define regPCIEMSIX_VECT14_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT15_ADDR_LO 0x1a03c
+#define regPCIEMSIX_VECT15_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT15_ADDR_HI 0x1a03d
+#define regPCIEMSIX_VECT15_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT15_MSG_DATA 0x1a03e
+#define regPCIEMSIX_VECT15_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT15_CONTROL 0x1a03f
+#define regPCIEMSIX_VECT15_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT16_ADDR_LO 0x1a040
+#define regPCIEMSIX_VECT16_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT16_ADDR_HI 0x1a041
+#define regPCIEMSIX_VECT16_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT16_MSG_DATA 0x1a042
+#define regPCIEMSIX_VECT16_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT16_CONTROL 0x1a043
+#define regPCIEMSIX_VECT16_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT17_ADDR_LO 0x1a044
+#define regPCIEMSIX_VECT17_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT17_ADDR_HI 0x1a045
+#define regPCIEMSIX_VECT17_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT17_MSG_DATA 0x1a046
+#define regPCIEMSIX_VECT17_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT17_CONTROL 0x1a047
+#define regPCIEMSIX_VECT17_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT18_ADDR_LO 0x1a048
+#define regPCIEMSIX_VECT18_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT18_ADDR_HI 0x1a049
+#define regPCIEMSIX_VECT18_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT18_MSG_DATA 0x1a04a
+#define regPCIEMSIX_VECT18_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT18_CONTROL 0x1a04b
+#define regPCIEMSIX_VECT18_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT19_ADDR_LO 0x1a04c
+#define regPCIEMSIX_VECT19_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT19_ADDR_HI 0x1a04d
+#define regPCIEMSIX_VECT19_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT19_MSG_DATA 0x1a04e
+#define regPCIEMSIX_VECT19_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT19_CONTROL 0x1a04f
+#define regPCIEMSIX_VECT19_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT20_ADDR_LO 0x1a050
+#define regPCIEMSIX_VECT20_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT20_ADDR_HI 0x1a051
+#define regPCIEMSIX_VECT20_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT20_MSG_DATA 0x1a052
+#define regPCIEMSIX_VECT20_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT20_CONTROL 0x1a053
+#define regPCIEMSIX_VECT20_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT21_ADDR_LO 0x1a054
+#define regPCIEMSIX_VECT21_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT21_ADDR_HI 0x1a055
+#define regPCIEMSIX_VECT21_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT21_MSG_DATA 0x1a056
+#define regPCIEMSIX_VECT21_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT21_CONTROL 0x1a057
+#define regPCIEMSIX_VECT21_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT22_ADDR_LO 0x1a058
+#define regPCIEMSIX_VECT22_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT22_ADDR_HI 0x1a059
+#define regPCIEMSIX_VECT22_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT22_MSG_DATA 0x1a05a
+#define regPCIEMSIX_VECT22_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT22_CONTROL 0x1a05b
+#define regPCIEMSIX_VECT22_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT23_ADDR_LO 0x1a05c
+#define regPCIEMSIX_VECT23_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT23_ADDR_HI 0x1a05d
+#define regPCIEMSIX_VECT23_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT23_MSG_DATA 0x1a05e
+#define regPCIEMSIX_VECT23_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT23_CONTROL 0x1a05f
+#define regPCIEMSIX_VECT23_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT24_ADDR_LO 0x1a060
+#define regPCIEMSIX_VECT24_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT24_ADDR_HI 0x1a061
+#define regPCIEMSIX_VECT24_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT24_MSG_DATA 0x1a062
+#define regPCIEMSIX_VECT24_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT24_CONTROL 0x1a063
+#define regPCIEMSIX_VECT24_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT25_ADDR_LO 0x1a064
+#define regPCIEMSIX_VECT25_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT25_ADDR_HI 0x1a065
+#define regPCIEMSIX_VECT25_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT25_MSG_DATA 0x1a066
+#define regPCIEMSIX_VECT25_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT25_CONTROL 0x1a067
+#define regPCIEMSIX_VECT25_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT26_ADDR_LO 0x1a068
+#define regPCIEMSIX_VECT26_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT26_ADDR_HI 0x1a069
+#define regPCIEMSIX_VECT26_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT26_MSG_DATA 0x1a06a
+#define regPCIEMSIX_VECT26_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT26_CONTROL 0x1a06b
+#define regPCIEMSIX_VECT26_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT27_ADDR_LO 0x1a06c
+#define regPCIEMSIX_VECT27_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT27_ADDR_HI 0x1a06d
+#define regPCIEMSIX_VECT27_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT27_MSG_DATA 0x1a06e
+#define regPCIEMSIX_VECT27_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT27_CONTROL 0x1a06f
+#define regPCIEMSIX_VECT27_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT28_ADDR_LO 0x1a070
+#define regPCIEMSIX_VECT28_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT28_ADDR_HI 0x1a071
+#define regPCIEMSIX_VECT28_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT28_MSG_DATA 0x1a072
+#define regPCIEMSIX_VECT28_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT28_CONTROL 0x1a073
+#define regPCIEMSIX_VECT28_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT29_ADDR_LO 0x1a074
+#define regPCIEMSIX_VECT29_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT29_ADDR_HI 0x1a075
+#define regPCIEMSIX_VECT29_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT29_MSG_DATA 0x1a076
+#define regPCIEMSIX_VECT29_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT29_CONTROL 0x1a077
+#define regPCIEMSIX_VECT29_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT30_ADDR_LO 0x1a078
+#define regPCIEMSIX_VECT30_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT30_ADDR_HI 0x1a079
+#define regPCIEMSIX_VECT30_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT30_MSG_DATA 0x1a07a
+#define regPCIEMSIX_VECT30_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT30_CONTROL 0x1a07b
+#define regPCIEMSIX_VECT30_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT31_ADDR_LO 0x1a07c
+#define regPCIEMSIX_VECT31_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT31_ADDR_HI 0x1a07d
+#define regPCIEMSIX_VECT31_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT31_MSG_DATA 0x1a07e
+#define regPCIEMSIX_VECT31_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT31_CONTROL 0x1a07f
+#define regPCIEMSIX_VECT31_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT32_ADDR_LO 0x1a080
+#define regPCIEMSIX_VECT32_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT32_ADDR_HI 0x1a081
+#define regPCIEMSIX_VECT32_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT32_MSG_DATA 0x1a082
+#define regPCIEMSIX_VECT32_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT32_CONTROL 0x1a083
+#define regPCIEMSIX_VECT32_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT33_ADDR_LO 0x1a084
+#define regPCIEMSIX_VECT33_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT33_ADDR_HI 0x1a085
+#define regPCIEMSIX_VECT33_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT33_MSG_DATA 0x1a086
+#define regPCIEMSIX_VECT33_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT33_CONTROL 0x1a087
+#define regPCIEMSIX_VECT33_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT34_ADDR_LO 0x1a088
+#define regPCIEMSIX_VECT34_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT34_ADDR_HI 0x1a089
+#define regPCIEMSIX_VECT34_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT34_MSG_DATA 0x1a08a
+#define regPCIEMSIX_VECT34_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT34_CONTROL 0x1a08b
+#define regPCIEMSIX_VECT34_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT35_ADDR_LO 0x1a08c
+#define regPCIEMSIX_VECT35_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT35_ADDR_HI 0x1a08d
+#define regPCIEMSIX_VECT35_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT35_MSG_DATA 0x1a08e
+#define regPCIEMSIX_VECT35_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT35_CONTROL 0x1a08f
+#define regPCIEMSIX_VECT35_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT36_ADDR_LO 0x1a090
+#define regPCIEMSIX_VECT36_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT36_ADDR_HI 0x1a091
+#define regPCIEMSIX_VECT36_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT36_MSG_DATA 0x1a092
+#define regPCIEMSIX_VECT36_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT36_CONTROL 0x1a093
+#define regPCIEMSIX_VECT36_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT37_ADDR_LO 0x1a094
+#define regPCIEMSIX_VECT37_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT37_ADDR_HI 0x1a095
+#define regPCIEMSIX_VECT37_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT37_MSG_DATA 0x1a096
+#define regPCIEMSIX_VECT37_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT37_CONTROL 0x1a097
+#define regPCIEMSIX_VECT37_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT38_ADDR_LO 0x1a098
+#define regPCIEMSIX_VECT38_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT38_ADDR_HI 0x1a099
+#define regPCIEMSIX_VECT38_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT38_MSG_DATA 0x1a09a
+#define regPCIEMSIX_VECT38_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT38_CONTROL 0x1a09b
+#define regPCIEMSIX_VECT38_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT39_ADDR_LO 0x1a09c
+#define regPCIEMSIX_VECT39_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT39_ADDR_HI 0x1a09d
+#define regPCIEMSIX_VECT39_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT39_MSG_DATA 0x1a09e
+#define regPCIEMSIX_VECT39_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT39_CONTROL 0x1a09f
+#define regPCIEMSIX_VECT39_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT40_ADDR_LO 0x1a0a0
+#define regPCIEMSIX_VECT40_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT40_ADDR_HI 0x1a0a1
+#define regPCIEMSIX_VECT40_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT40_MSG_DATA 0x1a0a2
+#define regPCIEMSIX_VECT40_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT40_CONTROL 0x1a0a3
+#define regPCIEMSIX_VECT40_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT41_ADDR_LO 0x1a0a4
+#define regPCIEMSIX_VECT41_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT41_ADDR_HI 0x1a0a5
+#define regPCIEMSIX_VECT41_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT41_MSG_DATA 0x1a0a6
+#define regPCIEMSIX_VECT41_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT41_CONTROL 0x1a0a7
+#define regPCIEMSIX_VECT41_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT42_ADDR_LO 0x1a0a8
+#define regPCIEMSIX_VECT42_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT42_ADDR_HI 0x1a0a9
+#define regPCIEMSIX_VECT42_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT42_MSG_DATA 0x1a0aa
+#define regPCIEMSIX_VECT42_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT42_CONTROL 0x1a0ab
+#define regPCIEMSIX_VECT42_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT43_ADDR_LO 0x1a0ac
+#define regPCIEMSIX_VECT43_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT43_ADDR_HI 0x1a0ad
+#define regPCIEMSIX_VECT43_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT43_MSG_DATA 0x1a0ae
+#define regPCIEMSIX_VECT43_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT43_CONTROL 0x1a0af
+#define regPCIEMSIX_VECT43_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT44_ADDR_LO 0x1a0b0
+#define regPCIEMSIX_VECT44_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT44_ADDR_HI 0x1a0b1
+#define regPCIEMSIX_VECT44_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT44_MSG_DATA 0x1a0b2
+#define regPCIEMSIX_VECT44_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT44_CONTROL 0x1a0b3
+#define regPCIEMSIX_VECT44_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT45_ADDR_LO 0x1a0b4
+#define regPCIEMSIX_VECT45_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT45_ADDR_HI 0x1a0b5
+#define regPCIEMSIX_VECT45_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT45_MSG_DATA 0x1a0b6
+#define regPCIEMSIX_VECT45_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT45_CONTROL 0x1a0b7
+#define regPCIEMSIX_VECT45_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT46_ADDR_LO 0x1a0b8
+#define regPCIEMSIX_VECT46_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT46_ADDR_HI 0x1a0b9
+#define regPCIEMSIX_VECT46_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT46_MSG_DATA 0x1a0ba
+#define regPCIEMSIX_VECT46_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT46_CONTROL 0x1a0bb
+#define regPCIEMSIX_VECT46_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT47_ADDR_LO 0x1a0bc
+#define regPCIEMSIX_VECT47_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT47_ADDR_HI 0x1a0bd
+#define regPCIEMSIX_VECT47_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT47_MSG_DATA 0x1a0be
+#define regPCIEMSIX_VECT47_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT47_CONTROL 0x1a0bf
+#define regPCIEMSIX_VECT47_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT48_ADDR_LO 0x1a0c0
+#define regPCIEMSIX_VECT48_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT48_ADDR_HI 0x1a0c1
+#define regPCIEMSIX_VECT48_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT48_MSG_DATA 0x1a0c2
+#define regPCIEMSIX_VECT48_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT48_CONTROL 0x1a0c3
+#define regPCIEMSIX_VECT48_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT49_ADDR_LO 0x1a0c4
+#define regPCIEMSIX_VECT49_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT49_ADDR_HI 0x1a0c5
+#define regPCIEMSIX_VECT49_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT49_MSG_DATA 0x1a0c6
+#define regPCIEMSIX_VECT49_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT49_CONTROL 0x1a0c7
+#define regPCIEMSIX_VECT49_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT50_ADDR_LO 0x1a0c8
+#define regPCIEMSIX_VECT50_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT50_ADDR_HI 0x1a0c9
+#define regPCIEMSIX_VECT50_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT50_MSG_DATA 0x1a0ca
+#define regPCIEMSIX_VECT50_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT50_CONTROL 0x1a0cb
+#define regPCIEMSIX_VECT50_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT51_ADDR_LO 0x1a0cc
+#define regPCIEMSIX_VECT51_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT51_ADDR_HI 0x1a0cd
+#define regPCIEMSIX_VECT51_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT51_MSG_DATA 0x1a0ce
+#define regPCIEMSIX_VECT51_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT51_CONTROL 0x1a0cf
+#define regPCIEMSIX_VECT51_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT52_ADDR_LO 0x1a0d0
+#define regPCIEMSIX_VECT52_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT52_ADDR_HI 0x1a0d1
+#define regPCIEMSIX_VECT52_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT52_MSG_DATA 0x1a0d2
+#define regPCIEMSIX_VECT52_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT52_CONTROL 0x1a0d3
+#define regPCIEMSIX_VECT52_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT53_ADDR_LO 0x1a0d4
+#define regPCIEMSIX_VECT53_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT53_ADDR_HI 0x1a0d5
+#define regPCIEMSIX_VECT53_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT53_MSG_DATA 0x1a0d6
+#define regPCIEMSIX_VECT53_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT53_CONTROL 0x1a0d7
+#define regPCIEMSIX_VECT53_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT54_ADDR_LO 0x1a0d8
+#define regPCIEMSIX_VECT54_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT54_ADDR_HI 0x1a0d9
+#define regPCIEMSIX_VECT54_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT54_MSG_DATA 0x1a0da
+#define regPCIEMSIX_VECT54_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT54_CONTROL 0x1a0db
+#define regPCIEMSIX_VECT54_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT55_ADDR_LO 0x1a0dc
+#define regPCIEMSIX_VECT55_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT55_ADDR_HI 0x1a0dd
+#define regPCIEMSIX_VECT55_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT55_MSG_DATA 0x1a0de
+#define regPCIEMSIX_VECT55_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT55_CONTROL 0x1a0df
+#define regPCIEMSIX_VECT55_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT56_ADDR_LO 0x1a0e0
+#define regPCIEMSIX_VECT56_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT56_ADDR_HI 0x1a0e1
+#define regPCIEMSIX_VECT56_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT56_MSG_DATA 0x1a0e2
+#define regPCIEMSIX_VECT56_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT56_CONTROL 0x1a0e3
+#define regPCIEMSIX_VECT56_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT57_ADDR_LO 0x1a0e4
+#define regPCIEMSIX_VECT57_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT57_ADDR_HI 0x1a0e5
+#define regPCIEMSIX_VECT57_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT57_MSG_DATA 0x1a0e6
+#define regPCIEMSIX_VECT57_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT57_CONTROL 0x1a0e7
+#define regPCIEMSIX_VECT57_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT58_ADDR_LO 0x1a0e8
+#define regPCIEMSIX_VECT58_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT58_ADDR_HI 0x1a0e9
+#define regPCIEMSIX_VECT58_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT58_MSG_DATA 0x1a0ea
+#define regPCIEMSIX_VECT58_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT58_CONTROL 0x1a0eb
+#define regPCIEMSIX_VECT58_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT59_ADDR_LO 0x1a0ec
+#define regPCIEMSIX_VECT59_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT59_ADDR_HI 0x1a0ed
+#define regPCIEMSIX_VECT59_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT59_MSG_DATA 0x1a0ee
+#define regPCIEMSIX_VECT59_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT59_CONTROL 0x1a0ef
+#define regPCIEMSIX_VECT59_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT60_ADDR_LO 0x1a0f0
+#define regPCIEMSIX_VECT60_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT60_ADDR_HI 0x1a0f1
+#define regPCIEMSIX_VECT60_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT60_MSG_DATA 0x1a0f2
+#define regPCIEMSIX_VECT60_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT60_CONTROL 0x1a0f3
+#define regPCIEMSIX_VECT60_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT61_ADDR_LO 0x1a0f4
+#define regPCIEMSIX_VECT61_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT61_ADDR_HI 0x1a0f5
+#define regPCIEMSIX_VECT61_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT61_MSG_DATA 0x1a0f6
+#define regPCIEMSIX_VECT61_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT61_CONTROL 0x1a0f7
+#define regPCIEMSIX_VECT61_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT62_ADDR_LO 0x1a0f8
+#define regPCIEMSIX_VECT62_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT62_ADDR_HI 0x1a0f9
+#define regPCIEMSIX_VECT62_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT62_MSG_DATA 0x1a0fa
+#define regPCIEMSIX_VECT62_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT62_CONTROL 0x1a0fb
+#define regPCIEMSIX_VECT62_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT63_ADDR_LO 0x1a0fc
+#define regPCIEMSIX_VECT63_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT63_ADDR_HI 0x1a0fd
+#define regPCIEMSIX_VECT63_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT63_MSG_DATA 0x1a0fe
+#define regPCIEMSIX_VECT63_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT63_CONTROL 0x1a0ff
+#define regPCIEMSIX_VECT63_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT64_ADDR_LO 0x1a100
+#define regPCIEMSIX_VECT64_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT64_ADDR_HI 0x1a101
+#define regPCIEMSIX_VECT64_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT64_MSG_DATA 0x1a102
+#define regPCIEMSIX_VECT64_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT64_CONTROL 0x1a103
+#define regPCIEMSIX_VECT64_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT65_ADDR_LO 0x1a104
+#define regPCIEMSIX_VECT65_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT65_ADDR_HI 0x1a105
+#define regPCIEMSIX_VECT65_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT65_MSG_DATA 0x1a106
+#define regPCIEMSIX_VECT65_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT65_CONTROL 0x1a107
+#define regPCIEMSIX_VECT65_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT66_ADDR_LO 0x1a108
+#define regPCIEMSIX_VECT66_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT66_ADDR_HI 0x1a109
+#define regPCIEMSIX_VECT66_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT66_MSG_DATA 0x1a10a
+#define regPCIEMSIX_VECT66_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT66_CONTROL 0x1a10b
+#define regPCIEMSIX_VECT66_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT67_ADDR_LO 0x1a10c
+#define regPCIEMSIX_VECT67_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT67_ADDR_HI 0x1a10d
+#define regPCIEMSIX_VECT67_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT67_MSG_DATA 0x1a10e
+#define regPCIEMSIX_VECT67_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT67_CONTROL 0x1a10f
+#define regPCIEMSIX_VECT67_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT68_ADDR_LO 0x1a110
+#define regPCIEMSIX_VECT68_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT68_ADDR_HI 0x1a111
+#define regPCIEMSIX_VECT68_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT68_MSG_DATA 0x1a112
+#define regPCIEMSIX_VECT68_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT68_CONTROL 0x1a113
+#define regPCIEMSIX_VECT68_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT69_ADDR_LO 0x1a114
+#define regPCIEMSIX_VECT69_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT69_ADDR_HI 0x1a115
+#define regPCIEMSIX_VECT69_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT69_MSG_DATA 0x1a116
+#define regPCIEMSIX_VECT69_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT69_CONTROL 0x1a117
+#define regPCIEMSIX_VECT69_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT70_ADDR_LO 0x1a118
+#define regPCIEMSIX_VECT70_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT70_ADDR_HI 0x1a119
+#define regPCIEMSIX_VECT70_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT70_MSG_DATA 0x1a11a
+#define regPCIEMSIX_VECT70_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT70_CONTROL 0x1a11b
+#define regPCIEMSIX_VECT70_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT71_ADDR_LO 0x1a11c
+#define regPCIEMSIX_VECT71_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT71_ADDR_HI 0x1a11d
+#define regPCIEMSIX_VECT71_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT71_MSG_DATA 0x1a11e
+#define regPCIEMSIX_VECT71_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT71_CONTROL 0x1a11f
+#define regPCIEMSIX_VECT71_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT72_ADDR_LO 0x1a120
+#define regPCIEMSIX_VECT72_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT72_ADDR_HI 0x1a121
+#define regPCIEMSIX_VECT72_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT72_MSG_DATA 0x1a122
+#define regPCIEMSIX_VECT72_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT72_CONTROL 0x1a123
+#define regPCIEMSIX_VECT72_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT73_ADDR_LO 0x1a124
+#define regPCIEMSIX_VECT73_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT73_ADDR_HI 0x1a125
+#define regPCIEMSIX_VECT73_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT73_MSG_DATA 0x1a126
+#define regPCIEMSIX_VECT73_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT73_CONTROL 0x1a127
+#define regPCIEMSIX_VECT73_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT74_ADDR_LO 0x1a128
+#define regPCIEMSIX_VECT74_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT74_ADDR_HI 0x1a129
+#define regPCIEMSIX_VECT74_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT74_MSG_DATA 0x1a12a
+#define regPCIEMSIX_VECT74_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT74_CONTROL 0x1a12b
+#define regPCIEMSIX_VECT74_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT75_ADDR_LO 0x1a12c
+#define regPCIEMSIX_VECT75_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT75_ADDR_HI 0x1a12d
+#define regPCIEMSIX_VECT75_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT75_MSG_DATA 0x1a12e
+#define regPCIEMSIX_VECT75_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT75_CONTROL 0x1a12f
+#define regPCIEMSIX_VECT75_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT76_ADDR_LO 0x1a130
+#define regPCIEMSIX_VECT76_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT76_ADDR_HI 0x1a131
+#define regPCIEMSIX_VECT76_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT76_MSG_DATA 0x1a132
+#define regPCIEMSIX_VECT76_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT76_CONTROL 0x1a133
+#define regPCIEMSIX_VECT76_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT77_ADDR_LO 0x1a134
+#define regPCIEMSIX_VECT77_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT77_ADDR_HI 0x1a135
+#define regPCIEMSIX_VECT77_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT77_MSG_DATA 0x1a136
+#define regPCIEMSIX_VECT77_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT77_CONTROL 0x1a137
+#define regPCIEMSIX_VECT77_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT78_ADDR_LO 0x1a138
+#define regPCIEMSIX_VECT78_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT78_ADDR_HI 0x1a139
+#define regPCIEMSIX_VECT78_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT78_MSG_DATA 0x1a13a
+#define regPCIEMSIX_VECT78_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT78_CONTROL 0x1a13b
+#define regPCIEMSIX_VECT78_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT79_ADDR_LO 0x1a13c
+#define regPCIEMSIX_VECT79_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT79_ADDR_HI 0x1a13d
+#define regPCIEMSIX_VECT79_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT79_MSG_DATA 0x1a13e
+#define regPCIEMSIX_VECT79_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT79_CONTROL 0x1a13f
+#define regPCIEMSIX_VECT79_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT80_ADDR_LO 0x1a140
+#define regPCIEMSIX_VECT80_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT80_ADDR_HI 0x1a141
+#define regPCIEMSIX_VECT80_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT80_MSG_DATA 0x1a142
+#define regPCIEMSIX_VECT80_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT80_CONTROL 0x1a143
+#define regPCIEMSIX_VECT80_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT81_ADDR_LO 0x1a144
+#define regPCIEMSIX_VECT81_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT81_ADDR_HI 0x1a145
+#define regPCIEMSIX_VECT81_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT81_MSG_DATA 0x1a146
+#define regPCIEMSIX_VECT81_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT81_CONTROL 0x1a147
+#define regPCIEMSIX_VECT81_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT82_ADDR_LO 0x1a148
+#define regPCIEMSIX_VECT82_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT82_ADDR_HI 0x1a149
+#define regPCIEMSIX_VECT82_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT82_MSG_DATA 0x1a14a
+#define regPCIEMSIX_VECT82_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT82_CONTROL 0x1a14b
+#define regPCIEMSIX_VECT82_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT83_ADDR_LO 0x1a14c
+#define regPCIEMSIX_VECT83_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT83_ADDR_HI 0x1a14d
+#define regPCIEMSIX_VECT83_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT83_MSG_DATA 0x1a14e
+#define regPCIEMSIX_VECT83_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT83_CONTROL 0x1a14f
+#define regPCIEMSIX_VECT83_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT84_ADDR_LO 0x1a150
+#define regPCIEMSIX_VECT84_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT84_ADDR_HI 0x1a151
+#define regPCIEMSIX_VECT84_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT84_MSG_DATA 0x1a152
+#define regPCIEMSIX_VECT84_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT84_CONTROL 0x1a153
+#define regPCIEMSIX_VECT84_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT85_ADDR_LO 0x1a154
+#define regPCIEMSIX_VECT85_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT85_ADDR_HI 0x1a155
+#define regPCIEMSIX_VECT85_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT85_MSG_DATA 0x1a156
+#define regPCIEMSIX_VECT85_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT85_CONTROL 0x1a157
+#define regPCIEMSIX_VECT85_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT86_ADDR_LO 0x1a158
+#define regPCIEMSIX_VECT86_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT86_ADDR_HI 0x1a159
+#define regPCIEMSIX_VECT86_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT86_MSG_DATA 0x1a15a
+#define regPCIEMSIX_VECT86_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT86_CONTROL 0x1a15b
+#define regPCIEMSIX_VECT86_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT87_ADDR_LO 0x1a15c
+#define regPCIEMSIX_VECT87_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT87_ADDR_HI 0x1a15d
+#define regPCIEMSIX_VECT87_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT87_MSG_DATA 0x1a15e
+#define regPCIEMSIX_VECT87_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT87_CONTROL 0x1a15f
+#define regPCIEMSIX_VECT87_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT88_ADDR_LO 0x1a160
+#define regPCIEMSIX_VECT88_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT88_ADDR_HI 0x1a161
+#define regPCIEMSIX_VECT88_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT88_MSG_DATA 0x1a162
+#define regPCIEMSIX_VECT88_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT88_CONTROL 0x1a163
+#define regPCIEMSIX_VECT88_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT89_ADDR_LO 0x1a164
+#define regPCIEMSIX_VECT89_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT89_ADDR_HI 0x1a165
+#define regPCIEMSIX_VECT89_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT89_MSG_DATA 0x1a166
+#define regPCIEMSIX_VECT89_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT89_CONTROL 0x1a167
+#define regPCIEMSIX_VECT89_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT90_ADDR_LO 0x1a168
+#define regPCIEMSIX_VECT90_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT90_ADDR_HI 0x1a169
+#define regPCIEMSIX_VECT90_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT90_MSG_DATA 0x1a16a
+#define regPCIEMSIX_VECT90_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT90_CONTROL 0x1a16b
+#define regPCIEMSIX_VECT90_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT91_ADDR_LO 0x1a16c
+#define regPCIEMSIX_VECT91_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT91_ADDR_HI 0x1a16d
+#define regPCIEMSIX_VECT91_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT91_MSG_DATA 0x1a16e
+#define regPCIEMSIX_VECT91_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT91_CONTROL 0x1a16f
+#define regPCIEMSIX_VECT91_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT92_ADDR_LO 0x1a170
+#define regPCIEMSIX_VECT92_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT92_ADDR_HI 0x1a171
+#define regPCIEMSIX_VECT92_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT92_MSG_DATA 0x1a172
+#define regPCIEMSIX_VECT92_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT92_CONTROL 0x1a173
+#define regPCIEMSIX_VECT92_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT93_ADDR_LO 0x1a174
+#define regPCIEMSIX_VECT93_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT93_ADDR_HI 0x1a175
+#define regPCIEMSIX_VECT93_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT93_MSG_DATA 0x1a176
+#define regPCIEMSIX_VECT93_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT93_CONTROL 0x1a177
+#define regPCIEMSIX_VECT93_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT94_ADDR_LO 0x1a178
+#define regPCIEMSIX_VECT94_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT94_ADDR_HI 0x1a179
+#define regPCIEMSIX_VECT94_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT94_MSG_DATA 0x1a17a
+#define regPCIEMSIX_VECT94_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT94_CONTROL 0x1a17b
+#define regPCIEMSIX_VECT94_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT95_ADDR_LO 0x1a17c
+#define regPCIEMSIX_VECT95_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT95_ADDR_HI 0x1a17d
+#define regPCIEMSIX_VECT95_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT95_MSG_DATA 0x1a17e
+#define regPCIEMSIX_VECT95_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT95_CONTROL 0x1a17f
+#define regPCIEMSIX_VECT95_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT96_ADDR_LO 0x1a180
+#define regPCIEMSIX_VECT96_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT96_ADDR_HI 0x1a181
+#define regPCIEMSIX_VECT96_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT96_MSG_DATA 0x1a182
+#define regPCIEMSIX_VECT96_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT96_CONTROL 0x1a183
+#define regPCIEMSIX_VECT96_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT97_ADDR_LO 0x1a184
+#define regPCIEMSIX_VECT97_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT97_ADDR_HI 0x1a185
+#define regPCIEMSIX_VECT97_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT97_MSG_DATA 0x1a186
+#define regPCIEMSIX_VECT97_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT97_CONTROL 0x1a187
+#define regPCIEMSIX_VECT97_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT98_ADDR_LO 0x1a188
+#define regPCIEMSIX_VECT98_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT98_ADDR_HI 0x1a189
+#define regPCIEMSIX_VECT98_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT98_MSG_DATA 0x1a18a
+#define regPCIEMSIX_VECT98_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT98_CONTROL 0x1a18b
+#define regPCIEMSIX_VECT98_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT99_ADDR_LO 0x1a18c
+#define regPCIEMSIX_VECT99_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT99_ADDR_HI 0x1a18d
+#define regPCIEMSIX_VECT99_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT99_MSG_DATA 0x1a18e
+#define regPCIEMSIX_VECT99_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT99_CONTROL 0x1a18f
+#define regPCIEMSIX_VECT99_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT100_ADDR_LO 0x1a190
+#define regPCIEMSIX_VECT100_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT100_ADDR_HI 0x1a191
+#define regPCIEMSIX_VECT100_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT100_MSG_DATA 0x1a192
+#define regPCIEMSIX_VECT100_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT100_CONTROL 0x1a193
+#define regPCIEMSIX_VECT100_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT101_ADDR_LO 0x1a194
+#define regPCIEMSIX_VECT101_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT101_ADDR_HI 0x1a195
+#define regPCIEMSIX_VECT101_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT101_MSG_DATA 0x1a196
+#define regPCIEMSIX_VECT101_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT101_CONTROL 0x1a197
+#define regPCIEMSIX_VECT101_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT102_ADDR_LO 0x1a198
+#define regPCIEMSIX_VECT102_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT102_ADDR_HI 0x1a199
+#define regPCIEMSIX_VECT102_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT102_MSG_DATA 0x1a19a
+#define regPCIEMSIX_VECT102_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT102_CONTROL 0x1a19b
+#define regPCIEMSIX_VECT102_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT103_ADDR_LO 0x1a19c
+#define regPCIEMSIX_VECT103_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT103_ADDR_HI 0x1a19d
+#define regPCIEMSIX_VECT103_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT103_MSG_DATA 0x1a19e
+#define regPCIEMSIX_VECT103_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT103_CONTROL 0x1a19f
+#define regPCIEMSIX_VECT103_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT104_ADDR_LO 0x1a1a0
+#define regPCIEMSIX_VECT104_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT104_ADDR_HI 0x1a1a1
+#define regPCIEMSIX_VECT104_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT104_MSG_DATA 0x1a1a2
+#define regPCIEMSIX_VECT104_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT104_CONTROL 0x1a1a3
+#define regPCIEMSIX_VECT104_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT105_ADDR_LO 0x1a1a4
+#define regPCIEMSIX_VECT105_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT105_ADDR_HI 0x1a1a5
+#define regPCIEMSIX_VECT105_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT105_MSG_DATA 0x1a1a6
+#define regPCIEMSIX_VECT105_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT105_CONTROL 0x1a1a7
+#define regPCIEMSIX_VECT105_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT106_ADDR_LO 0x1a1a8
+#define regPCIEMSIX_VECT106_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT106_ADDR_HI 0x1a1a9
+#define regPCIEMSIX_VECT106_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT106_MSG_DATA 0x1a1aa
+#define regPCIEMSIX_VECT106_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT106_CONTROL 0x1a1ab
+#define regPCIEMSIX_VECT106_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT107_ADDR_LO 0x1a1ac
+#define regPCIEMSIX_VECT107_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT107_ADDR_HI 0x1a1ad
+#define regPCIEMSIX_VECT107_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT107_MSG_DATA 0x1a1ae
+#define regPCIEMSIX_VECT107_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT107_CONTROL 0x1a1af
+#define regPCIEMSIX_VECT107_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT108_ADDR_LO 0x1a1b0
+#define regPCIEMSIX_VECT108_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT108_ADDR_HI 0x1a1b1
+#define regPCIEMSIX_VECT108_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT108_MSG_DATA 0x1a1b2
+#define regPCIEMSIX_VECT108_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT108_CONTROL 0x1a1b3
+#define regPCIEMSIX_VECT108_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT109_ADDR_LO 0x1a1b4
+#define regPCIEMSIX_VECT109_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT109_ADDR_HI 0x1a1b5
+#define regPCIEMSIX_VECT109_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT109_MSG_DATA 0x1a1b6
+#define regPCIEMSIX_VECT109_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT109_CONTROL 0x1a1b7
+#define regPCIEMSIX_VECT109_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT110_ADDR_LO 0x1a1b8
+#define regPCIEMSIX_VECT110_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT110_ADDR_HI 0x1a1b9
+#define regPCIEMSIX_VECT110_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT110_MSG_DATA 0x1a1ba
+#define regPCIEMSIX_VECT110_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT110_CONTROL 0x1a1bb
+#define regPCIEMSIX_VECT110_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT111_ADDR_LO 0x1a1bc
+#define regPCIEMSIX_VECT111_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT111_ADDR_HI 0x1a1bd
+#define regPCIEMSIX_VECT111_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT111_MSG_DATA 0x1a1be
+#define regPCIEMSIX_VECT111_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT111_CONTROL 0x1a1bf
+#define regPCIEMSIX_VECT111_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT112_ADDR_LO 0x1a1c0
+#define regPCIEMSIX_VECT112_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT112_ADDR_HI 0x1a1c1
+#define regPCIEMSIX_VECT112_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT112_MSG_DATA 0x1a1c2
+#define regPCIEMSIX_VECT112_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT112_CONTROL 0x1a1c3
+#define regPCIEMSIX_VECT112_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT113_ADDR_LO 0x1a1c4
+#define regPCIEMSIX_VECT113_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT113_ADDR_HI 0x1a1c5
+#define regPCIEMSIX_VECT113_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT113_MSG_DATA 0x1a1c6
+#define regPCIEMSIX_VECT113_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT113_CONTROL 0x1a1c7
+#define regPCIEMSIX_VECT113_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT114_ADDR_LO 0x1a1c8
+#define regPCIEMSIX_VECT114_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT114_ADDR_HI 0x1a1c9
+#define regPCIEMSIX_VECT114_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT114_MSG_DATA 0x1a1ca
+#define regPCIEMSIX_VECT114_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT114_CONTROL 0x1a1cb
+#define regPCIEMSIX_VECT114_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT115_ADDR_LO 0x1a1cc
+#define regPCIEMSIX_VECT115_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT115_ADDR_HI 0x1a1cd
+#define regPCIEMSIX_VECT115_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT115_MSG_DATA 0x1a1ce
+#define regPCIEMSIX_VECT115_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT115_CONTROL 0x1a1cf
+#define regPCIEMSIX_VECT115_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT116_ADDR_LO 0x1a1d0
+#define regPCIEMSIX_VECT116_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT116_ADDR_HI 0x1a1d1
+#define regPCIEMSIX_VECT116_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT116_MSG_DATA 0x1a1d2
+#define regPCIEMSIX_VECT116_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT116_CONTROL 0x1a1d3
+#define regPCIEMSIX_VECT116_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT117_ADDR_LO 0x1a1d4
+#define regPCIEMSIX_VECT117_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT117_ADDR_HI 0x1a1d5
+#define regPCIEMSIX_VECT117_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT117_MSG_DATA 0x1a1d6
+#define regPCIEMSIX_VECT117_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT117_CONTROL 0x1a1d7
+#define regPCIEMSIX_VECT117_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT118_ADDR_LO 0x1a1d8
+#define regPCIEMSIX_VECT118_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT118_ADDR_HI 0x1a1d9
+#define regPCIEMSIX_VECT118_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT118_MSG_DATA 0x1a1da
+#define regPCIEMSIX_VECT118_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT118_CONTROL 0x1a1db
+#define regPCIEMSIX_VECT118_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT119_ADDR_LO 0x1a1dc
+#define regPCIEMSIX_VECT119_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT119_ADDR_HI 0x1a1dd
+#define regPCIEMSIX_VECT119_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT119_MSG_DATA 0x1a1de
+#define regPCIEMSIX_VECT119_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT119_CONTROL 0x1a1df
+#define regPCIEMSIX_VECT119_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT120_ADDR_LO 0x1a1e0
+#define regPCIEMSIX_VECT120_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT120_ADDR_HI 0x1a1e1
+#define regPCIEMSIX_VECT120_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT120_MSG_DATA 0x1a1e2
+#define regPCIEMSIX_VECT120_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT120_CONTROL 0x1a1e3
+#define regPCIEMSIX_VECT120_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT121_ADDR_LO 0x1a1e4
+#define regPCIEMSIX_VECT121_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT121_ADDR_HI 0x1a1e5
+#define regPCIEMSIX_VECT121_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT121_MSG_DATA 0x1a1e6
+#define regPCIEMSIX_VECT121_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT121_CONTROL 0x1a1e7
+#define regPCIEMSIX_VECT121_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT122_ADDR_LO 0x1a1e8
+#define regPCIEMSIX_VECT122_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT122_ADDR_HI 0x1a1e9
+#define regPCIEMSIX_VECT122_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT122_MSG_DATA 0x1a1ea
+#define regPCIEMSIX_VECT122_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT122_CONTROL 0x1a1eb
+#define regPCIEMSIX_VECT122_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT123_ADDR_LO 0x1a1ec
+#define regPCIEMSIX_VECT123_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT123_ADDR_HI 0x1a1ed
+#define regPCIEMSIX_VECT123_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT123_MSG_DATA 0x1a1ee
+#define regPCIEMSIX_VECT123_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT123_CONTROL 0x1a1ef
+#define regPCIEMSIX_VECT123_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT124_ADDR_LO 0x1a1f0
+#define regPCIEMSIX_VECT124_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT124_ADDR_HI 0x1a1f1
+#define regPCIEMSIX_VECT124_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT124_MSG_DATA 0x1a1f2
+#define regPCIEMSIX_VECT124_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT124_CONTROL 0x1a1f3
+#define regPCIEMSIX_VECT124_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT125_ADDR_LO 0x1a1f4
+#define regPCIEMSIX_VECT125_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT125_ADDR_HI 0x1a1f5
+#define regPCIEMSIX_VECT125_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT125_MSG_DATA 0x1a1f6
+#define regPCIEMSIX_VECT125_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT125_CONTROL 0x1a1f7
+#define regPCIEMSIX_VECT125_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT126_ADDR_LO 0x1a1f8
+#define regPCIEMSIX_VECT126_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT126_ADDR_HI 0x1a1f9
+#define regPCIEMSIX_VECT126_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT126_MSG_DATA 0x1a1fa
+#define regPCIEMSIX_VECT126_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT126_CONTROL 0x1a1fb
+#define regPCIEMSIX_VECT126_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT127_ADDR_LO 0x1a1fc
+#define regPCIEMSIX_VECT127_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT127_ADDR_HI 0x1a1fd
+#define regPCIEMSIX_VECT127_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT127_MSG_DATA 0x1a1fe
+#define regPCIEMSIX_VECT127_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT127_CONTROL 0x1a1ff
+#define regPCIEMSIX_VECT127_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT128_ADDR_LO 0x1a200
+#define regPCIEMSIX_VECT128_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT128_ADDR_HI 0x1a201
+#define regPCIEMSIX_VECT128_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT128_MSG_DATA 0x1a202
+#define regPCIEMSIX_VECT128_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT128_CONTROL 0x1a203
+#define regPCIEMSIX_VECT128_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT129_ADDR_LO 0x1a204
+#define regPCIEMSIX_VECT129_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT129_ADDR_HI 0x1a205
+#define regPCIEMSIX_VECT129_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT129_MSG_DATA 0x1a206
+#define regPCIEMSIX_VECT129_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT129_CONTROL 0x1a207
+#define regPCIEMSIX_VECT129_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT130_ADDR_LO 0x1a208
+#define regPCIEMSIX_VECT130_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT130_ADDR_HI 0x1a209
+#define regPCIEMSIX_VECT130_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT130_MSG_DATA 0x1a20a
+#define regPCIEMSIX_VECT130_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT130_CONTROL 0x1a20b
+#define regPCIEMSIX_VECT130_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT131_ADDR_LO 0x1a20c
+#define regPCIEMSIX_VECT131_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT131_ADDR_HI 0x1a20d
+#define regPCIEMSIX_VECT131_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT131_MSG_DATA 0x1a20e
+#define regPCIEMSIX_VECT131_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT131_CONTROL 0x1a20f
+#define regPCIEMSIX_VECT131_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT132_ADDR_LO 0x1a210
+#define regPCIEMSIX_VECT132_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT132_ADDR_HI 0x1a211
+#define regPCIEMSIX_VECT132_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT132_MSG_DATA 0x1a212
+#define regPCIEMSIX_VECT132_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT132_CONTROL 0x1a213
+#define regPCIEMSIX_VECT132_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT133_ADDR_LO 0x1a214
+#define regPCIEMSIX_VECT133_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT133_ADDR_HI 0x1a215
+#define regPCIEMSIX_VECT133_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT133_MSG_DATA 0x1a216
+#define regPCIEMSIX_VECT133_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT133_CONTROL 0x1a217
+#define regPCIEMSIX_VECT133_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT134_ADDR_LO 0x1a218
+#define regPCIEMSIX_VECT134_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT134_ADDR_HI 0x1a219
+#define regPCIEMSIX_VECT134_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT134_MSG_DATA 0x1a21a
+#define regPCIEMSIX_VECT134_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT134_CONTROL 0x1a21b
+#define regPCIEMSIX_VECT134_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT135_ADDR_LO 0x1a21c
+#define regPCIEMSIX_VECT135_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT135_ADDR_HI 0x1a21d
+#define regPCIEMSIX_VECT135_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT135_MSG_DATA 0x1a21e
+#define regPCIEMSIX_VECT135_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT135_CONTROL 0x1a21f
+#define regPCIEMSIX_VECT135_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT136_ADDR_LO 0x1a220
+#define regPCIEMSIX_VECT136_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT136_ADDR_HI 0x1a221
+#define regPCIEMSIX_VECT136_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT136_MSG_DATA 0x1a222
+#define regPCIEMSIX_VECT136_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT136_CONTROL 0x1a223
+#define regPCIEMSIX_VECT136_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT137_ADDR_LO 0x1a224
+#define regPCIEMSIX_VECT137_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT137_ADDR_HI 0x1a225
+#define regPCIEMSIX_VECT137_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT137_MSG_DATA 0x1a226
+#define regPCIEMSIX_VECT137_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT137_CONTROL 0x1a227
+#define regPCIEMSIX_VECT137_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT138_ADDR_LO 0x1a228
+#define regPCIEMSIX_VECT138_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT138_ADDR_HI 0x1a229
+#define regPCIEMSIX_VECT138_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT138_MSG_DATA 0x1a22a
+#define regPCIEMSIX_VECT138_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT138_CONTROL 0x1a22b
+#define regPCIEMSIX_VECT138_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT139_ADDR_LO 0x1a22c
+#define regPCIEMSIX_VECT139_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT139_ADDR_HI 0x1a22d
+#define regPCIEMSIX_VECT139_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT139_MSG_DATA 0x1a22e
+#define regPCIEMSIX_VECT139_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT139_CONTROL 0x1a22f
+#define regPCIEMSIX_VECT139_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT140_ADDR_LO 0x1a230
+#define regPCIEMSIX_VECT140_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT140_ADDR_HI 0x1a231
+#define regPCIEMSIX_VECT140_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT140_MSG_DATA 0x1a232
+#define regPCIEMSIX_VECT140_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT140_CONTROL 0x1a233
+#define regPCIEMSIX_VECT140_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT141_ADDR_LO 0x1a234
+#define regPCIEMSIX_VECT141_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT141_ADDR_HI 0x1a235
+#define regPCIEMSIX_VECT141_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT141_MSG_DATA 0x1a236
+#define regPCIEMSIX_VECT141_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT141_CONTROL 0x1a237
+#define regPCIEMSIX_VECT141_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT142_ADDR_LO 0x1a238
+#define regPCIEMSIX_VECT142_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT142_ADDR_HI 0x1a239
+#define regPCIEMSIX_VECT142_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT142_MSG_DATA 0x1a23a
+#define regPCIEMSIX_VECT142_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT142_CONTROL 0x1a23b
+#define regPCIEMSIX_VECT142_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT143_ADDR_LO 0x1a23c
+#define regPCIEMSIX_VECT143_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT143_ADDR_HI 0x1a23d
+#define regPCIEMSIX_VECT143_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT143_MSG_DATA 0x1a23e
+#define regPCIEMSIX_VECT143_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT143_CONTROL 0x1a23f
+#define regPCIEMSIX_VECT143_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT144_ADDR_LO 0x1a240
+#define regPCIEMSIX_VECT144_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT144_ADDR_HI 0x1a241
+#define regPCIEMSIX_VECT144_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT144_MSG_DATA 0x1a242
+#define regPCIEMSIX_VECT144_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT144_CONTROL 0x1a243
+#define regPCIEMSIX_VECT144_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT145_ADDR_LO 0x1a244
+#define regPCIEMSIX_VECT145_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT145_ADDR_HI 0x1a245
+#define regPCIEMSIX_VECT145_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT145_MSG_DATA 0x1a246
+#define regPCIEMSIX_VECT145_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT145_CONTROL 0x1a247
+#define regPCIEMSIX_VECT145_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT146_ADDR_LO 0x1a248
+#define regPCIEMSIX_VECT146_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT146_ADDR_HI 0x1a249
+#define regPCIEMSIX_VECT146_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT146_MSG_DATA 0x1a24a
+#define regPCIEMSIX_VECT146_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT146_CONTROL 0x1a24b
+#define regPCIEMSIX_VECT146_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT147_ADDR_LO 0x1a24c
+#define regPCIEMSIX_VECT147_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT147_ADDR_HI 0x1a24d
+#define regPCIEMSIX_VECT147_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT147_MSG_DATA 0x1a24e
+#define regPCIEMSIX_VECT147_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT147_CONTROL 0x1a24f
+#define regPCIEMSIX_VECT147_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT148_ADDR_LO 0x1a250
+#define regPCIEMSIX_VECT148_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT148_ADDR_HI 0x1a251
+#define regPCIEMSIX_VECT148_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT148_MSG_DATA 0x1a252
+#define regPCIEMSIX_VECT148_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT148_CONTROL 0x1a253
+#define regPCIEMSIX_VECT148_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT149_ADDR_LO 0x1a254
+#define regPCIEMSIX_VECT149_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT149_ADDR_HI 0x1a255
+#define regPCIEMSIX_VECT149_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT149_MSG_DATA 0x1a256
+#define regPCIEMSIX_VECT149_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT149_CONTROL 0x1a257
+#define regPCIEMSIX_VECT149_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT150_ADDR_LO 0x1a258
+#define regPCIEMSIX_VECT150_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT150_ADDR_HI 0x1a259
+#define regPCIEMSIX_VECT150_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT150_MSG_DATA 0x1a25a
+#define regPCIEMSIX_VECT150_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT150_CONTROL 0x1a25b
+#define regPCIEMSIX_VECT150_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT151_ADDR_LO 0x1a25c
+#define regPCIEMSIX_VECT151_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT151_ADDR_HI 0x1a25d
+#define regPCIEMSIX_VECT151_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT151_MSG_DATA 0x1a25e
+#define regPCIEMSIX_VECT151_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT151_CONTROL 0x1a25f
+#define regPCIEMSIX_VECT151_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT152_ADDR_LO 0x1a260
+#define regPCIEMSIX_VECT152_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT152_ADDR_HI 0x1a261
+#define regPCIEMSIX_VECT152_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT152_MSG_DATA 0x1a262
+#define regPCIEMSIX_VECT152_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT152_CONTROL 0x1a263
+#define regPCIEMSIX_VECT152_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT153_ADDR_LO 0x1a264
+#define regPCIEMSIX_VECT153_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT153_ADDR_HI 0x1a265
+#define regPCIEMSIX_VECT153_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT153_MSG_DATA 0x1a266
+#define regPCIEMSIX_VECT153_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT153_CONTROL 0x1a267
+#define regPCIEMSIX_VECT153_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT154_ADDR_LO 0x1a268
+#define regPCIEMSIX_VECT154_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT154_ADDR_HI 0x1a269
+#define regPCIEMSIX_VECT154_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT154_MSG_DATA 0x1a26a
+#define regPCIEMSIX_VECT154_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT154_CONTROL 0x1a26b
+#define regPCIEMSIX_VECT154_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT155_ADDR_LO 0x1a26c
+#define regPCIEMSIX_VECT155_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT155_ADDR_HI 0x1a26d
+#define regPCIEMSIX_VECT155_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT155_MSG_DATA 0x1a26e
+#define regPCIEMSIX_VECT155_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT155_CONTROL 0x1a26f
+#define regPCIEMSIX_VECT155_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT156_ADDR_LO 0x1a270
+#define regPCIEMSIX_VECT156_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT156_ADDR_HI 0x1a271
+#define regPCIEMSIX_VECT156_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT156_MSG_DATA 0x1a272
+#define regPCIEMSIX_VECT156_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT156_CONTROL 0x1a273
+#define regPCIEMSIX_VECT156_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT157_ADDR_LO 0x1a274
+#define regPCIEMSIX_VECT157_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT157_ADDR_HI 0x1a275
+#define regPCIEMSIX_VECT157_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT157_MSG_DATA 0x1a276
+#define regPCIEMSIX_VECT157_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT157_CONTROL 0x1a277
+#define regPCIEMSIX_VECT157_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT158_ADDR_LO 0x1a278
+#define regPCIEMSIX_VECT158_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT158_ADDR_HI 0x1a279
+#define regPCIEMSIX_VECT158_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT158_MSG_DATA 0x1a27a
+#define regPCIEMSIX_VECT158_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT158_CONTROL 0x1a27b
+#define regPCIEMSIX_VECT158_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT159_ADDR_LO 0x1a27c
+#define regPCIEMSIX_VECT159_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT159_ADDR_HI 0x1a27d
+#define regPCIEMSIX_VECT159_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT159_MSG_DATA 0x1a27e
+#define regPCIEMSIX_VECT159_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT159_CONTROL 0x1a27f
+#define regPCIEMSIX_VECT159_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT160_ADDR_LO 0x1a280
+#define regPCIEMSIX_VECT160_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT160_ADDR_HI 0x1a281
+#define regPCIEMSIX_VECT160_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT160_MSG_DATA 0x1a282
+#define regPCIEMSIX_VECT160_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT160_CONTROL 0x1a283
+#define regPCIEMSIX_VECT160_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT161_ADDR_LO 0x1a284
+#define regPCIEMSIX_VECT161_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT161_ADDR_HI 0x1a285
+#define regPCIEMSIX_VECT161_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT161_MSG_DATA 0x1a286
+#define regPCIEMSIX_VECT161_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT161_CONTROL 0x1a287
+#define regPCIEMSIX_VECT161_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT162_ADDR_LO 0x1a288
+#define regPCIEMSIX_VECT162_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT162_ADDR_HI 0x1a289
+#define regPCIEMSIX_VECT162_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT162_MSG_DATA 0x1a28a
+#define regPCIEMSIX_VECT162_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT162_CONTROL 0x1a28b
+#define regPCIEMSIX_VECT162_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT163_ADDR_LO 0x1a28c
+#define regPCIEMSIX_VECT163_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT163_ADDR_HI 0x1a28d
+#define regPCIEMSIX_VECT163_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT163_MSG_DATA 0x1a28e
+#define regPCIEMSIX_VECT163_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT163_CONTROL 0x1a28f
+#define regPCIEMSIX_VECT163_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT164_ADDR_LO 0x1a290
+#define regPCIEMSIX_VECT164_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT164_ADDR_HI 0x1a291
+#define regPCIEMSIX_VECT164_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT164_MSG_DATA 0x1a292
+#define regPCIEMSIX_VECT164_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT164_CONTROL 0x1a293
+#define regPCIEMSIX_VECT164_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT165_ADDR_LO 0x1a294
+#define regPCIEMSIX_VECT165_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT165_ADDR_HI 0x1a295
+#define regPCIEMSIX_VECT165_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT165_MSG_DATA 0x1a296
+#define regPCIEMSIX_VECT165_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT165_CONTROL 0x1a297
+#define regPCIEMSIX_VECT165_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT166_ADDR_LO 0x1a298
+#define regPCIEMSIX_VECT166_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT166_ADDR_HI 0x1a299
+#define regPCIEMSIX_VECT166_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT166_MSG_DATA 0x1a29a
+#define regPCIEMSIX_VECT166_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT166_CONTROL 0x1a29b
+#define regPCIEMSIX_VECT166_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT167_ADDR_LO 0x1a29c
+#define regPCIEMSIX_VECT167_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT167_ADDR_HI 0x1a29d
+#define regPCIEMSIX_VECT167_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT167_MSG_DATA 0x1a29e
+#define regPCIEMSIX_VECT167_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT167_CONTROL 0x1a29f
+#define regPCIEMSIX_VECT167_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT168_ADDR_LO 0x1a2a0
+#define regPCIEMSIX_VECT168_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT168_ADDR_HI 0x1a2a1
+#define regPCIEMSIX_VECT168_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT168_MSG_DATA 0x1a2a2
+#define regPCIEMSIX_VECT168_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT168_CONTROL 0x1a2a3
+#define regPCIEMSIX_VECT168_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT169_ADDR_LO 0x1a2a4
+#define regPCIEMSIX_VECT169_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT169_ADDR_HI 0x1a2a5
+#define regPCIEMSIX_VECT169_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT169_MSG_DATA 0x1a2a6
+#define regPCIEMSIX_VECT169_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT169_CONTROL 0x1a2a7
+#define regPCIEMSIX_VECT169_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT170_ADDR_LO 0x1a2a8
+#define regPCIEMSIX_VECT170_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT170_ADDR_HI 0x1a2a9
+#define regPCIEMSIX_VECT170_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT170_MSG_DATA 0x1a2aa
+#define regPCIEMSIX_VECT170_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT170_CONTROL 0x1a2ab
+#define regPCIEMSIX_VECT170_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT171_ADDR_LO 0x1a2ac
+#define regPCIEMSIX_VECT171_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT171_ADDR_HI 0x1a2ad
+#define regPCIEMSIX_VECT171_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT171_MSG_DATA 0x1a2ae
+#define regPCIEMSIX_VECT171_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT171_CONTROL 0x1a2af
+#define regPCIEMSIX_VECT171_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT172_ADDR_LO 0x1a2b0
+#define regPCIEMSIX_VECT172_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT172_ADDR_HI 0x1a2b1
+#define regPCIEMSIX_VECT172_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT172_MSG_DATA 0x1a2b2
+#define regPCIEMSIX_VECT172_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT172_CONTROL 0x1a2b3
+#define regPCIEMSIX_VECT172_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT173_ADDR_LO 0x1a2b4
+#define regPCIEMSIX_VECT173_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT173_ADDR_HI 0x1a2b5
+#define regPCIEMSIX_VECT173_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT173_MSG_DATA 0x1a2b6
+#define regPCIEMSIX_VECT173_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT173_CONTROL 0x1a2b7
+#define regPCIEMSIX_VECT173_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT174_ADDR_LO 0x1a2b8
+#define regPCIEMSIX_VECT174_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT174_ADDR_HI 0x1a2b9
+#define regPCIEMSIX_VECT174_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT174_MSG_DATA 0x1a2ba
+#define regPCIEMSIX_VECT174_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT174_CONTROL 0x1a2bb
+#define regPCIEMSIX_VECT174_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT175_ADDR_LO 0x1a2bc
+#define regPCIEMSIX_VECT175_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT175_ADDR_HI 0x1a2bd
+#define regPCIEMSIX_VECT175_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT175_MSG_DATA 0x1a2be
+#define regPCIEMSIX_VECT175_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT175_CONTROL 0x1a2bf
+#define regPCIEMSIX_VECT175_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT176_ADDR_LO 0x1a2c0
+#define regPCIEMSIX_VECT176_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT176_ADDR_HI 0x1a2c1
+#define regPCIEMSIX_VECT176_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT176_MSG_DATA 0x1a2c2
+#define regPCIEMSIX_VECT176_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT176_CONTROL 0x1a2c3
+#define regPCIEMSIX_VECT176_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT177_ADDR_LO 0x1a2c4
+#define regPCIEMSIX_VECT177_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT177_ADDR_HI 0x1a2c5
+#define regPCIEMSIX_VECT177_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT177_MSG_DATA 0x1a2c6
+#define regPCIEMSIX_VECT177_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT177_CONTROL 0x1a2c7
+#define regPCIEMSIX_VECT177_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT178_ADDR_LO 0x1a2c8
+#define regPCIEMSIX_VECT178_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT178_ADDR_HI 0x1a2c9
+#define regPCIEMSIX_VECT178_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT178_MSG_DATA 0x1a2ca
+#define regPCIEMSIX_VECT178_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT178_CONTROL 0x1a2cb
+#define regPCIEMSIX_VECT178_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT179_ADDR_LO 0x1a2cc
+#define regPCIEMSIX_VECT179_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT179_ADDR_HI 0x1a2cd
+#define regPCIEMSIX_VECT179_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT179_MSG_DATA 0x1a2ce
+#define regPCIEMSIX_VECT179_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT179_CONTROL 0x1a2cf
+#define regPCIEMSIX_VECT179_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT180_ADDR_LO 0x1a2d0
+#define regPCIEMSIX_VECT180_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT180_ADDR_HI 0x1a2d1
+#define regPCIEMSIX_VECT180_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT180_MSG_DATA 0x1a2d2
+#define regPCIEMSIX_VECT180_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT180_CONTROL 0x1a2d3
+#define regPCIEMSIX_VECT180_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT181_ADDR_LO 0x1a2d4
+#define regPCIEMSIX_VECT181_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT181_ADDR_HI 0x1a2d5
+#define regPCIEMSIX_VECT181_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT181_MSG_DATA 0x1a2d6
+#define regPCIEMSIX_VECT181_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT181_CONTROL 0x1a2d7
+#define regPCIEMSIX_VECT181_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT182_ADDR_LO 0x1a2d8
+#define regPCIEMSIX_VECT182_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT182_ADDR_HI 0x1a2d9
+#define regPCIEMSIX_VECT182_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT182_MSG_DATA 0x1a2da
+#define regPCIEMSIX_VECT182_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT182_CONTROL 0x1a2db
+#define regPCIEMSIX_VECT182_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT183_ADDR_LO 0x1a2dc
+#define regPCIEMSIX_VECT183_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT183_ADDR_HI 0x1a2dd
+#define regPCIEMSIX_VECT183_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT183_MSG_DATA 0x1a2de
+#define regPCIEMSIX_VECT183_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT183_CONTROL 0x1a2df
+#define regPCIEMSIX_VECT183_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT184_ADDR_LO 0x1a2e0
+#define regPCIEMSIX_VECT184_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT184_ADDR_HI 0x1a2e1
+#define regPCIEMSIX_VECT184_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT184_MSG_DATA 0x1a2e2
+#define regPCIEMSIX_VECT184_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT184_CONTROL 0x1a2e3
+#define regPCIEMSIX_VECT184_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT185_ADDR_LO 0x1a2e4
+#define regPCIEMSIX_VECT185_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT185_ADDR_HI 0x1a2e5
+#define regPCIEMSIX_VECT185_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT185_MSG_DATA 0x1a2e6
+#define regPCIEMSIX_VECT185_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT185_CONTROL 0x1a2e7
+#define regPCIEMSIX_VECT185_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT186_ADDR_LO 0x1a2e8
+#define regPCIEMSIX_VECT186_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT186_ADDR_HI 0x1a2e9
+#define regPCIEMSIX_VECT186_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT186_MSG_DATA 0x1a2ea
+#define regPCIEMSIX_VECT186_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT186_CONTROL 0x1a2eb
+#define regPCIEMSIX_VECT186_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT187_ADDR_LO 0x1a2ec
+#define regPCIEMSIX_VECT187_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT187_ADDR_HI 0x1a2ed
+#define regPCIEMSIX_VECT187_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT187_MSG_DATA 0x1a2ee
+#define regPCIEMSIX_VECT187_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT187_CONTROL 0x1a2ef
+#define regPCIEMSIX_VECT187_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT188_ADDR_LO 0x1a2f0
+#define regPCIEMSIX_VECT188_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT188_ADDR_HI 0x1a2f1
+#define regPCIEMSIX_VECT188_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT188_MSG_DATA 0x1a2f2
+#define regPCIEMSIX_VECT188_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT188_CONTROL 0x1a2f3
+#define regPCIEMSIX_VECT188_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT189_ADDR_LO 0x1a2f4
+#define regPCIEMSIX_VECT189_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT189_ADDR_HI 0x1a2f5
+#define regPCIEMSIX_VECT189_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT189_MSG_DATA 0x1a2f6
+#define regPCIEMSIX_VECT189_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT189_CONTROL 0x1a2f7
+#define regPCIEMSIX_VECT189_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT190_ADDR_LO 0x1a2f8
+#define regPCIEMSIX_VECT190_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT190_ADDR_HI 0x1a2f9
+#define regPCIEMSIX_VECT190_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT190_MSG_DATA 0x1a2fa
+#define regPCIEMSIX_VECT190_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT190_CONTROL 0x1a2fb
+#define regPCIEMSIX_VECT190_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT191_ADDR_LO 0x1a2fc
+#define regPCIEMSIX_VECT191_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT191_ADDR_HI 0x1a2fd
+#define regPCIEMSIX_VECT191_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT191_MSG_DATA 0x1a2fe
+#define regPCIEMSIX_VECT191_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT191_CONTROL 0x1a2ff
+#define regPCIEMSIX_VECT191_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT192_ADDR_LO 0x1a300
+#define regPCIEMSIX_VECT192_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT192_ADDR_HI 0x1a301
+#define regPCIEMSIX_VECT192_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT192_MSG_DATA 0x1a302
+#define regPCIEMSIX_VECT192_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT192_CONTROL 0x1a303
+#define regPCIEMSIX_VECT192_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT193_ADDR_LO 0x1a304
+#define regPCIEMSIX_VECT193_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT193_ADDR_HI 0x1a305
+#define regPCIEMSIX_VECT193_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT193_MSG_DATA 0x1a306
+#define regPCIEMSIX_VECT193_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT193_CONTROL 0x1a307
+#define regPCIEMSIX_VECT193_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT194_ADDR_LO 0x1a308
+#define regPCIEMSIX_VECT194_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT194_ADDR_HI 0x1a309
+#define regPCIEMSIX_VECT194_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT194_MSG_DATA 0x1a30a
+#define regPCIEMSIX_VECT194_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT194_CONTROL 0x1a30b
+#define regPCIEMSIX_VECT194_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT195_ADDR_LO 0x1a30c
+#define regPCIEMSIX_VECT195_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT195_ADDR_HI 0x1a30d
+#define regPCIEMSIX_VECT195_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT195_MSG_DATA 0x1a30e
+#define regPCIEMSIX_VECT195_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT195_CONTROL 0x1a30f
+#define regPCIEMSIX_VECT195_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT196_ADDR_LO 0x1a310
+#define regPCIEMSIX_VECT196_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT196_ADDR_HI 0x1a311
+#define regPCIEMSIX_VECT196_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT196_MSG_DATA 0x1a312
+#define regPCIEMSIX_VECT196_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT196_CONTROL 0x1a313
+#define regPCIEMSIX_VECT196_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT197_ADDR_LO 0x1a314
+#define regPCIEMSIX_VECT197_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT197_ADDR_HI 0x1a315
+#define regPCIEMSIX_VECT197_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT197_MSG_DATA 0x1a316
+#define regPCIEMSIX_VECT197_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT197_CONTROL 0x1a317
+#define regPCIEMSIX_VECT197_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT198_ADDR_LO 0x1a318
+#define regPCIEMSIX_VECT198_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT198_ADDR_HI 0x1a319
+#define regPCIEMSIX_VECT198_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT198_MSG_DATA 0x1a31a
+#define regPCIEMSIX_VECT198_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT198_CONTROL 0x1a31b
+#define regPCIEMSIX_VECT198_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT199_ADDR_LO 0x1a31c
+#define regPCIEMSIX_VECT199_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT199_ADDR_HI 0x1a31d
+#define regPCIEMSIX_VECT199_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT199_MSG_DATA 0x1a31e
+#define regPCIEMSIX_VECT199_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT199_CONTROL 0x1a31f
+#define regPCIEMSIX_VECT199_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT200_ADDR_LO 0x1a320
+#define regPCIEMSIX_VECT200_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT200_ADDR_HI 0x1a321
+#define regPCIEMSIX_VECT200_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT200_MSG_DATA 0x1a322
+#define regPCIEMSIX_VECT200_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT200_CONTROL 0x1a323
+#define regPCIEMSIX_VECT200_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT201_ADDR_LO 0x1a324
+#define regPCIEMSIX_VECT201_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT201_ADDR_HI 0x1a325
+#define regPCIEMSIX_VECT201_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT201_MSG_DATA 0x1a326
+#define regPCIEMSIX_VECT201_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT201_CONTROL 0x1a327
+#define regPCIEMSIX_VECT201_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT202_ADDR_LO 0x1a328
+#define regPCIEMSIX_VECT202_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT202_ADDR_HI 0x1a329
+#define regPCIEMSIX_VECT202_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT202_MSG_DATA 0x1a32a
+#define regPCIEMSIX_VECT202_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT202_CONTROL 0x1a32b
+#define regPCIEMSIX_VECT202_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT203_ADDR_LO 0x1a32c
+#define regPCIEMSIX_VECT203_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT203_ADDR_HI 0x1a32d
+#define regPCIEMSIX_VECT203_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT203_MSG_DATA 0x1a32e
+#define regPCIEMSIX_VECT203_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT203_CONTROL 0x1a32f
+#define regPCIEMSIX_VECT203_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT204_ADDR_LO 0x1a330
+#define regPCIEMSIX_VECT204_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT204_ADDR_HI 0x1a331
+#define regPCIEMSIX_VECT204_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT204_MSG_DATA 0x1a332
+#define regPCIEMSIX_VECT204_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT204_CONTROL 0x1a333
+#define regPCIEMSIX_VECT204_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT205_ADDR_LO 0x1a334
+#define regPCIEMSIX_VECT205_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT205_ADDR_HI 0x1a335
+#define regPCIEMSIX_VECT205_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT205_MSG_DATA 0x1a336
+#define regPCIEMSIX_VECT205_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT205_CONTROL 0x1a337
+#define regPCIEMSIX_VECT205_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT206_ADDR_LO 0x1a338
+#define regPCIEMSIX_VECT206_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT206_ADDR_HI 0x1a339
+#define regPCIEMSIX_VECT206_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT206_MSG_DATA 0x1a33a
+#define regPCIEMSIX_VECT206_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT206_CONTROL 0x1a33b
+#define regPCIEMSIX_VECT206_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT207_ADDR_LO 0x1a33c
+#define regPCIEMSIX_VECT207_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT207_ADDR_HI 0x1a33d
+#define regPCIEMSIX_VECT207_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT207_MSG_DATA 0x1a33e
+#define regPCIEMSIX_VECT207_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT207_CONTROL 0x1a33f
+#define regPCIEMSIX_VECT207_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT208_ADDR_LO 0x1a340
+#define regPCIEMSIX_VECT208_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT208_ADDR_HI 0x1a341
+#define regPCIEMSIX_VECT208_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT208_MSG_DATA 0x1a342
+#define regPCIEMSIX_VECT208_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT208_CONTROL 0x1a343
+#define regPCIEMSIX_VECT208_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT209_ADDR_LO 0x1a344
+#define regPCIEMSIX_VECT209_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT209_ADDR_HI 0x1a345
+#define regPCIEMSIX_VECT209_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT209_MSG_DATA 0x1a346
+#define regPCIEMSIX_VECT209_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT209_CONTROL 0x1a347
+#define regPCIEMSIX_VECT209_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT210_ADDR_LO 0x1a348
+#define regPCIEMSIX_VECT210_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT210_ADDR_HI 0x1a349
+#define regPCIEMSIX_VECT210_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT210_MSG_DATA 0x1a34a
+#define regPCIEMSIX_VECT210_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT210_CONTROL 0x1a34b
+#define regPCIEMSIX_VECT210_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT211_ADDR_LO 0x1a34c
+#define regPCIEMSIX_VECT211_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT211_ADDR_HI 0x1a34d
+#define regPCIEMSIX_VECT211_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT211_MSG_DATA 0x1a34e
+#define regPCIEMSIX_VECT211_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT211_CONTROL 0x1a34f
+#define regPCIEMSIX_VECT211_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT212_ADDR_LO 0x1a350
+#define regPCIEMSIX_VECT212_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT212_ADDR_HI 0x1a351
+#define regPCIEMSIX_VECT212_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT212_MSG_DATA 0x1a352
+#define regPCIEMSIX_VECT212_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT212_CONTROL 0x1a353
+#define regPCIEMSIX_VECT212_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT213_ADDR_LO 0x1a354
+#define regPCIEMSIX_VECT213_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT213_ADDR_HI 0x1a355
+#define regPCIEMSIX_VECT213_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT213_MSG_DATA 0x1a356
+#define regPCIEMSIX_VECT213_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT213_CONTROL 0x1a357
+#define regPCIEMSIX_VECT213_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT214_ADDR_LO 0x1a358
+#define regPCIEMSIX_VECT214_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT214_ADDR_HI 0x1a359
+#define regPCIEMSIX_VECT214_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT214_MSG_DATA 0x1a35a
+#define regPCIEMSIX_VECT214_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT214_CONTROL 0x1a35b
+#define regPCIEMSIX_VECT214_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT215_ADDR_LO 0x1a35c
+#define regPCIEMSIX_VECT215_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT215_ADDR_HI 0x1a35d
+#define regPCIEMSIX_VECT215_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT215_MSG_DATA 0x1a35e
+#define regPCIEMSIX_VECT215_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT215_CONTROL 0x1a35f
+#define regPCIEMSIX_VECT215_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT216_ADDR_LO 0x1a360
+#define regPCIEMSIX_VECT216_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT216_ADDR_HI 0x1a361
+#define regPCIEMSIX_VECT216_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT216_MSG_DATA 0x1a362
+#define regPCIEMSIX_VECT216_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT216_CONTROL 0x1a363
+#define regPCIEMSIX_VECT216_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT217_ADDR_LO 0x1a364
+#define regPCIEMSIX_VECT217_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT217_ADDR_HI 0x1a365
+#define regPCIEMSIX_VECT217_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT217_MSG_DATA 0x1a366
+#define regPCIEMSIX_VECT217_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT217_CONTROL 0x1a367
+#define regPCIEMSIX_VECT217_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT218_ADDR_LO 0x1a368
+#define regPCIEMSIX_VECT218_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT218_ADDR_HI 0x1a369
+#define regPCIEMSIX_VECT218_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT218_MSG_DATA 0x1a36a
+#define regPCIEMSIX_VECT218_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT218_CONTROL 0x1a36b
+#define regPCIEMSIX_VECT218_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT219_ADDR_LO 0x1a36c
+#define regPCIEMSIX_VECT219_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT219_ADDR_HI 0x1a36d
+#define regPCIEMSIX_VECT219_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT219_MSG_DATA 0x1a36e
+#define regPCIEMSIX_VECT219_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT219_CONTROL 0x1a36f
+#define regPCIEMSIX_VECT219_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT220_ADDR_LO 0x1a370
+#define regPCIEMSIX_VECT220_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT220_ADDR_HI 0x1a371
+#define regPCIEMSIX_VECT220_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT220_MSG_DATA 0x1a372
+#define regPCIEMSIX_VECT220_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT220_CONTROL 0x1a373
+#define regPCIEMSIX_VECT220_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT221_ADDR_LO 0x1a374
+#define regPCIEMSIX_VECT221_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT221_ADDR_HI 0x1a375
+#define regPCIEMSIX_VECT221_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT221_MSG_DATA 0x1a376
+#define regPCIEMSIX_VECT221_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT221_CONTROL 0x1a377
+#define regPCIEMSIX_VECT221_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT222_ADDR_LO 0x1a378
+#define regPCIEMSIX_VECT222_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT222_ADDR_HI 0x1a379
+#define regPCIEMSIX_VECT222_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT222_MSG_DATA 0x1a37a
+#define regPCIEMSIX_VECT222_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT222_CONTROL 0x1a37b
+#define regPCIEMSIX_VECT222_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT223_ADDR_LO 0x1a37c
+#define regPCIEMSIX_VECT223_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT223_ADDR_HI 0x1a37d
+#define regPCIEMSIX_VECT223_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT223_MSG_DATA 0x1a37e
+#define regPCIEMSIX_VECT223_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT223_CONTROL 0x1a37f
+#define regPCIEMSIX_VECT223_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT224_ADDR_LO 0x1a380
+#define regPCIEMSIX_VECT224_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT224_ADDR_HI 0x1a381
+#define regPCIEMSIX_VECT224_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT224_MSG_DATA 0x1a382
+#define regPCIEMSIX_VECT224_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT224_CONTROL 0x1a383
+#define regPCIEMSIX_VECT224_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT225_ADDR_LO 0x1a384
+#define regPCIEMSIX_VECT225_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT225_ADDR_HI 0x1a385
+#define regPCIEMSIX_VECT225_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT225_MSG_DATA 0x1a386
+#define regPCIEMSIX_VECT225_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT225_CONTROL 0x1a387
+#define regPCIEMSIX_VECT225_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT226_ADDR_LO 0x1a388
+#define regPCIEMSIX_VECT226_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT226_ADDR_HI 0x1a389
+#define regPCIEMSIX_VECT226_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT226_MSG_DATA 0x1a38a
+#define regPCIEMSIX_VECT226_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT226_CONTROL 0x1a38b
+#define regPCIEMSIX_VECT226_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT227_ADDR_LO 0x1a38c
+#define regPCIEMSIX_VECT227_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT227_ADDR_HI 0x1a38d
+#define regPCIEMSIX_VECT227_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT227_MSG_DATA 0x1a38e
+#define regPCIEMSIX_VECT227_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT227_CONTROL 0x1a38f
+#define regPCIEMSIX_VECT227_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT228_ADDR_LO 0x1a390
+#define regPCIEMSIX_VECT228_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT228_ADDR_HI 0x1a391
+#define regPCIEMSIX_VECT228_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT228_MSG_DATA 0x1a392
+#define regPCIEMSIX_VECT228_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT228_CONTROL 0x1a393
+#define regPCIEMSIX_VECT228_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT229_ADDR_LO 0x1a394
+#define regPCIEMSIX_VECT229_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT229_ADDR_HI 0x1a395
+#define regPCIEMSIX_VECT229_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT229_MSG_DATA 0x1a396
+#define regPCIEMSIX_VECT229_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT229_CONTROL 0x1a397
+#define regPCIEMSIX_VECT229_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT230_ADDR_LO 0x1a398
+#define regPCIEMSIX_VECT230_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT230_ADDR_HI 0x1a399
+#define regPCIEMSIX_VECT230_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT230_MSG_DATA 0x1a39a
+#define regPCIEMSIX_VECT230_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT230_CONTROL 0x1a39b
+#define regPCIEMSIX_VECT230_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT231_ADDR_LO 0x1a39c
+#define regPCIEMSIX_VECT231_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT231_ADDR_HI 0x1a39d
+#define regPCIEMSIX_VECT231_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT231_MSG_DATA 0x1a39e
+#define regPCIEMSIX_VECT231_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT231_CONTROL 0x1a39f
+#define regPCIEMSIX_VECT231_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT232_ADDR_LO 0x1a3a0
+#define regPCIEMSIX_VECT232_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT232_ADDR_HI 0x1a3a1
+#define regPCIEMSIX_VECT232_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT232_MSG_DATA 0x1a3a2
+#define regPCIEMSIX_VECT232_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT232_CONTROL 0x1a3a3
+#define regPCIEMSIX_VECT232_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT233_ADDR_LO 0x1a3a4
+#define regPCIEMSIX_VECT233_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT233_ADDR_HI 0x1a3a5
+#define regPCIEMSIX_VECT233_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT233_MSG_DATA 0x1a3a6
+#define regPCIEMSIX_VECT233_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT233_CONTROL 0x1a3a7
+#define regPCIEMSIX_VECT233_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT234_ADDR_LO 0x1a3a8
+#define regPCIEMSIX_VECT234_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT234_ADDR_HI 0x1a3a9
+#define regPCIEMSIX_VECT234_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT234_MSG_DATA 0x1a3aa
+#define regPCIEMSIX_VECT234_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT234_CONTROL 0x1a3ab
+#define regPCIEMSIX_VECT234_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT235_ADDR_LO 0x1a3ac
+#define regPCIEMSIX_VECT235_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT235_ADDR_HI 0x1a3ad
+#define regPCIEMSIX_VECT235_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT235_MSG_DATA 0x1a3ae
+#define regPCIEMSIX_VECT235_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT235_CONTROL 0x1a3af
+#define regPCIEMSIX_VECT235_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT236_ADDR_LO 0x1a3b0
+#define regPCIEMSIX_VECT236_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT236_ADDR_HI 0x1a3b1
+#define regPCIEMSIX_VECT236_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT236_MSG_DATA 0x1a3b2
+#define regPCIEMSIX_VECT236_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT236_CONTROL 0x1a3b3
+#define regPCIEMSIX_VECT236_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT237_ADDR_LO 0x1a3b4
+#define regPCIEMSIX_VECT237_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT237_ADDR_HI 0x1a3b5
+#define regPCIEMSIX_VECT237_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT237_MSG_DATA 0x1a3b6
+#define regPCIEMSIX_VECT237_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT237_CONTROL 0x1a3b7
+#define regPCIEMSIX_VECT237_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT238_ADDR_LO 0x1a3b8
+#define regPCIEMSIX_VECT238_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT238_ADDR_HI 0x1a3b9
+#define regPCIEMSIX_VECT238_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT238_MSG_DATA 0x1a3ba
+#define regPCIEMSIX_VECT238_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT238_CONTROL 0x1a3bb
+#define regPCIEMSIX_VECT238_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT239_ADDR_LO 0x1a3bc
+#define regPCIEMSIX_VECT239_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT239_ADDR_HI 0x1a3bd
+#define regPCIEMSIX_VECT239_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT239_MSG_DATA 0x1a3be
+#define regPCIEMSIX_VECT239_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT239_CONTROL 0x1a3bf
+#define regPCIEMSIX_VECT239_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT240_ADDR_LO 0x1a3c0
+#define regPCIEMSIX_VECT240_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT240_ADDR_HI 0x1a3c1
+#define regPCIEMSIX_VECT240_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT240_MSG_DATA 0x1a3c2
+#define regPCIEMSIX_VECT240_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT240_CONTROL 0x1a3c3
+#define regPCIEMSIX_VECT240_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT241_ADDR_LO 0x1a3c4
+#define regPCIEMSIX_VECT241_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT241_ADDR_HI 0x1a3c5
+#define regPCIEMSIX_VECT241_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT241_MSG_DATA 0x1a3c6
+#define regPCIEMSIX_VECT241_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT241_CONTROL 0x1a3c7
+#define regPCIEMSIX_VECT241_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT242_ADDR_LO 0x1a3c8
+#define regPCIEMSIX_VECT242_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT242_ADDR_HI 0x1a3c9
+#define regPCIEMSIX_VECT242_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT242_MSG_DATA 0x1a3ca
+#define regPCIEMSIX_VECT242_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT242_CONTROL 0x1a3cb
+#define regPCIEMSIX_VECT242_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT243_ADDR_LO 0x1a3cc
+#define regPCIEMSIX_VECT243_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT243_ADDR_HI 0x1a3cd
+#define regPCIEMSIX_VECT243_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT243_MSG_DATA 0x1a3ce
+#define regPCIEMSIX_VECT243_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT243_CONTROL 0x1a3cf
+#define regPCIEMSIX_VECT243_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT244_ADDR_LO 0x1a3d0
+#define regPCIEMSIX_VECT244_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT244_ADDR_HI 0x1a3d1
+#define regPCIEMSIX_VECT244_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT244_MSG_DATA 0x1a3d2
+#define regPCIEMSIX_VECT244_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT244_CONTROL 0x1a3d3
+#define regPCIEMSIX_VECT244_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT245_ADDR_LO 0x1a3d4
+#define regPCIEMSIX_VECT245_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT245_ADDR_HI 0x1a3d5
+#define regPCIEMSIX_VECT245_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT245_MSG_DATA 0x1a3d6
+#define regPCIEMSIX_VECT245_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT245_CONTROL 0x1a3d7
+#define regPCIEMSIX_VECT245_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT246_ADDR_LO 0x1a3d8
+#define regPCIEMSIX_VECT246_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT246_ADDR_HI 0x1a3d9
+#define regPCIEMSIX_VECT246_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT246_MSG_DATA 0x1a3da
+#define regPCIEMSIX_VECT246_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT246_CONTROL 0x1a3db
+#define regPCIEMSIX_VECT246_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT247_ADDR_LO 0x1a3dc
+#define regPCIEMSIX_VECT247_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT247_ADDR_HI 0x1a3dd
+#define regPCIEMSIX_VECT247_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT247_MSG_DATA 0x1a3de
+#define regPCIEMSIX_VECT247_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT247_CONTROL 0x1a3df
+#define regPCIEMSIX_VECT247_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT248_ADDR_LO 0x1a3e0
+#define regPCIEMSIX_VECT248_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT248_ADDR_HI 0x1a3e1
+#define regPCIEMSIX_VECT248_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT248_MSG_DATA 0x1a3e2
+#define regPCIEMSIX_VECT248_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT248_CONTROL 0x1a3e3
+#define regPCIEMSIX_VECT248_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT249_ADDR_LO 0x1a3e4
+#define regPCIEMSIX_VECT249_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT249_ADDR_HI 0x1a3e5
+#define regPCIEMSIX_VECT249_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT249_MSG_DATA 0x1a3e6
+#define regPCIEMSIX_VECT249_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT249_CONTROL 0x1a3e7
+#define regPCIEMSIX_VECT249_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT250_ADDR_LO 0x1a3e8
+#define regPCIEMSIX_VECT250_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT250_ADDR_HI 0x1a3e9
+#define regPCIEMSIX_VECT250_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT250_MSG_DATA 0x1a3ea
+#define regPCIEMSIX_VECT250_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT250_CONTROL 0x1a3eb
+#define regPCIEMSIX_VECT250_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT251_ADDR_LO 0x1a3ec
+#define regPCIEMSIX_VECT251_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT251_ADDR_HI 0x1a3ed
+#define regPCIEMSIX_VECT251_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT251_MSG_DATA 0x1a3ee
+#define regPCIEMSIX_VECT251_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT251_CONTROL 0x1a3ef
+#define regPCIEMSIX_VECT251_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT252_ADDR_LO 0x1a3f0
+#define regPCIEMSIX_VECT252_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT252_ADDR_HI 0x1a3f1
+#define regPCIEMSIX_VECT252_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT252_MSG_DATA 0x1a3f2
+#define regPCIEMSIX_VECT252_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT252_CONTROL 0x1a3f3
+#define regPCIEMSIX_VECT252_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT253_ADDR_LO 0x1a3f4
+#define regPCIEMSIX_VECT253_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT253_ADDR_HI 0x1a3f5
+#define regPCIEMSIX_VECT253_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT253_MSG_DATA 0x1a3f6
+#define regPCIEMSIX_VECT253_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT253_CONTROL 0x1a3f7
+#define regPCIEMSIX_VECT253_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT254_ADDR_LO 0x1a3f8
+#define regPCIEMSIX_VECT254_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT254_ADDR_HI 0x1a3f9
+#define regPCIEMSIX_VECT254_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT254_MSG_DATA 0x1a3fa
+#define regPCIEMSIX_VECT254_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT254_CONTROL 0x1a3fb
+#define regPCIEMSIX_VECT254_CONTROL_BASE_IDX 8
+#define regPCIEMSIX_VECT255_ADDR_LO 0x1a3fc
+#define regPCIEMSIX_VECT255_ADDR_LO_BASE_IDX 8
+#define regPCIEMSIX_VECT255_ADDR_HI 0x1a3fd
+#define regPCIEMSIX_VECT255_ADDR_HI_BASE_IDX 8
+#define regPCIEMSIX_VECT255_MSG_DATA 0x1a3fe
+#define regPCIEMSIX_VECT255_MSG_DATA_BASE_IDX 8
+#define regPCIEMSIX_VECT255_CONTROL 0x1a3ff
+#define regPCIEMSIX_VECT255_CONTROL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_pciemsix_0_usb_MSIXPDEC
+// base address: 0x10169000
+#define regPCIEMSIX_PBA_0 0x1a400
+#define regPCIEMSIX_PBA_0_BASE_IDX 8
+#define regPCIEMSIX_PBA_1 0x1a401
+#define regPCIEMSIX_PBA_1_BASE_IDX 8
+#define regPCIEMSIX_PBA_2 0x1a402
+#define regPCIEMSIX_PBA_2_BASE_IDX 8
+#define regPCIEMSIX_PBA_3 0x1a403
+#define regPCIEMSIX_PBA_3_BASE_IDX 8
+#define regPCIEMSIX_PBA_4 0x1a404
+#define regPCIEMSIX_PBA_4_BASE_IDX 8
+#define regPCIEMSIX_PBA_5 0x1a405
+#define regPCIEMSIX_PBA_5_BASE_IDX 8
+#define regPCIEMSIX_PBA_6 0x1a406
+#define regPCIEMSIX_PBA_6_BASE_IDX 8
+#define regPCIEMSIX_PBA_7 0x1a407
+#define regPCIEMSIX_PBA_7_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_swus_SUMDEC
+// base address: 0x1013b000
+#define regSUM_INDEX 0xec38
+#define regSUM_INDEX_BASE_IDX 8
+#define regSUM_DATA 0xec39
+#define regSUM_DATA_BASE_IDX 8
+#define regSUM_INDEX_HI 0xec3b
+#define regSUM_INDEX_HI_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_rcc_strap_internal
+// base address: 0x10100000
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP0 0xc400
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP0_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP1 0xc401
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP1_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP2 0xc402
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP2_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP3 0xc403
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP3_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP4 0xc404
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP4_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP5 0xc405
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP5_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP6 0xc406
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP6_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP7 0xc407
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP7_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP8 0xc408
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP8_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP9 0xc409
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP9_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP10 0xc40a
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP10_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP11 0xc40b
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP11_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP12 0xc40c
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP12_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP13 0xc40d
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP13_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP14 0xc40e
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP14_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP0 0xc480
+#define regRCC_DEV1_PORT_STRAP0_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP1 0xc481
+#define regRCC_DEV1_PORT_STRAP1_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP2 0xc482
+#define regRCC_DEV1_PORT_STRAP2_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP3 0xc483
+#define regRCC_DEV1_PORT_STRAP3_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP4 0xc484
+#define regRCC_DEV1_PORT_STRAP4_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP5 0xc485
+#define regRCC_DEV1_PORT_STRAP5_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP6 0xc486
+#define regRCC_DEV1_PORT_STRAP6_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP7 0xc487
+#define regRCC_DEV1_PORT_STRAP7_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP8 0xc488
+#define regRCC_DEV1_PORT_STRAP8_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP9 0xc489
+#define regRCC_DEV1_PORT_STRAP9_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP10 0xc48a
+#define regRCC_DEV1_PORT_STRAP10_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP11 0xc48b
+#define regRCC_DEV1_PORT_STRAP11_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP12 0xc48c
+#define regRCC_DEV1_PORT_STRAP12_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP13 0xc48d
+#define regRCC_DEV1_PORT_STRAP13_BASE_IDX 8
+#define regRCC_DEV1_PORT_STRAP14 0xc48e
+#define regRCC_DEV1_PORT_STRAP14_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP0 0xc500
+#define regRCC_DEV2_PORT_STRAP0_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP1 0xc501
+#define regRCC_DEV2_PORT_STRAP1_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP2 0xc502
+#define regRCC_DEV2_PORT_STRAP2_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP3 0xc503
+#define regRCC_DEV2_PORT_STRAP3_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP4 0xc504
+#define regRCC_DEV2_PORT_STRAP4_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP5 0xc505
+#define regRCC_DEV2_PORT_STRAP5_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP6 0xc506
+#define regRCC_DEV2_PORT_STRAP6_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP7 0xc507
+#define regRCC_DEV2_PORT_STRAP7_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP8 0xc508
+#define regRCC_DEV2_PORT_STRAP8_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP9 0xc509
+#define regRCC_DEV2_PORT_STRAP9_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP10 0xc50a
+#define regRCC_DEV2_PORT_STRAP10_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP11 0xc50b
+#define regRCC_DEV2_PORT_STRAP11_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP12 0xc50c
+#define regRCC_DEV2_PORT_STRAP12_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP13 0xc50d
+#define regRCC_DEV2_PORT_STRAP13_BASE_IDX 8
+#define regRCC_DEV2_PORT_STRAP14 0xc50e
+#define regRCC_DEV2_PORT_STRAP14_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP0 0xc600
+#define regRCC_STRAP1_RCC_BIF_STRAP0_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP1 0xc601
+#define regRCC_STRAP1_RCC_BIF_STRAP1_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP2 0xc602
+#define regRCC_STRAP1_RCC_BIF_STRAP2_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP3 0xc603
+#define regRCC_STRAP1_RCC_BIF_STRAP3_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP4 0xc604
+#define regRCC_STRAP1_RCC_BIF_STRAP4_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP5 0xc605
+#define regRCC_STRAP1_RCC_BIF_STRAP5_BASE_IDX 8
+#define regRCC_STRAP1_RCC_BIF_STRAP6 0xc606
+#define regRCC_STRAP1_RCC_BIF_STRAP6_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP0 0xd000
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP0_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP1 0xd001
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP1_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP2 0xd002
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP2_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP3 0xd003
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP3_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP4 0xd004
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP4_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP5 0xd005
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP5_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP8 0xd008
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP8_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP9 0xd009
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP9_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP13 0xd00d
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP13_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP14 0xd00e
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP14_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP15 0xd00f
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP15_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP16 0xd010
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP16_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP17 0xd011
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP17_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP18 0xd012
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP18_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP26 0xd01a
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP26_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP0 0xd080
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP0_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP2 0xd082
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP2_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP3 0xd083
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP3_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP4 0xd084
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP4_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP5 0xd085
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP5_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP6 0xd086
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP6_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP7 0xd087
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP7_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP20 0xd094
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP20_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP21 0xd095
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP21_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP22 0xd096
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP22_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP23 0xd097
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP23_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP24 0xd098
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP24_BASE_IDX 8
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP25 0xd099
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP25_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP0 0xd100
+#define regRCC_DEV0_EPF2_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP2 0xd102
+#define regRCC_DEV0_EPF2_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP3 0xd103
+#define regRCC_DEV0_EPF2_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP4 0xd104
+#define regRCC_DEV0_EPF2_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP5 0xd105
+#define regRCC_DEV0_EPF2_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP6 0xd106
+#define regRCC_DEV0_EPF2_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP7 0xd107
+#define regRCC_DEV0_EPF2_STRAP7_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP10 0xd10a
+#define regRCC_DEV0_EPF2_STRAP10_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP11 0xd10b
+#define regRCC_DEV0_EPF2_STRAP11_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP12 0xd10c
+#define regRCC_DEV0_EPF2_STRAP12_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP13 0xd10d
+#define regRCC_DEV0_EPF2_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP14 0xd10e
+#define regRCC_DEV0_EPF2_STRAP14_BASE_IDX 8
+#define regRCC_DEV0_EPF2_STRAP20 0xd114
+#define regRCC_DEV0_EPF2_STRAP20_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP0 0xd180
+#define regRCC_DEV0_EPF3_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP2 0xd182
+#define regRCC_DEV0_EPF3_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP3 0xd183
+#define regRCC_DEV0_EPF3_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP4 0xd184
+#define regRCC_DEV0_EPF3_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP5 0xd185
+#define regRCC_DEV0_EPF3_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP6 0xd186
+#define regRCC_DEV0_EPF3_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP7 0xd187
+#define regRCC_DEV0_EPF3_STRAP7_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP10 0xd18a
+#define regRCC_DEV0_EPF3_STRAP10_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP11 0xd18b
+#define regRCC_DEV0_EPF3_STRAP11_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP12 0xd18c
+#define regRCC_DEV0_EPF3_STRAP12_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP13 0xd18d
+#define regRCC_DEV0_EPF3_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP14 0xd18e
+#define regRCC_DEV0_EPF3_STRAP14_BASE_IDX 8
+#define regRCC_DEV0_EPF3_STRAP20 0xd194
+#define regRCC_DEV0_EPF3_STRAP20_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP0 0xd200
+#define regRCC_DEV0_EPF4_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP2 0xd202
+#define regRCC_DEV0_EPF4_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP3 0xd203
+#define regRCC_DEV0_EPF4_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP4 0xd204
+#define regRCC_DEV0_EPF4_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP5 0xd205
+#define regRCC_DEV0_EPF4_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP6 0xd206
+#define regRCC_DEV0_EPF4_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP7 0xd207
+#define regRCC_DEV0_EPF4_STRAP7_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP13 0xd20d
+#define regRCC_DEV0_EPF4_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF4_STRAP14 0xd20e
+#define regRCC_DEV0_EPF4_STRAP14_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP0 0xd280
+#define regRCC_DEV0_EPF5_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP2 0xd282
+#define regRCC_DEV0_EPF5_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP3 0xd283
+#define regRCC_DEV0_EPF5_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP4 0xd284
+#define regRCC_DEV0_EPF5_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP5 0xd285
+#define regRCC_DEV0_EPF5_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP6 0xd286
+#define regRCC_DEV0_EPF5_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP7 0xd287
+#define regRCC_DEV0_EPF5_STRAP7_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP13 0xd28d
+#define regRCC_DEV0_EPF5_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF5_STRAP14 0xd28e
+#define regRCC_DEV0_EPF5_STRAP14_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP0 0xd300
+#define regRCC_DEV0_EPF6_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP2 0xd302
+#define regRCC_DEV0_EPF6_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP3 0xd303
+#define regRCC_DEV0_EPF6_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP4 0xd304
+#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP5 0xd305
+#define regRCC_DEV0_EPF6_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP6 0xd306
+#define regRCC_DEV0_EPF6_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP13 0xd30d
+#define regRCC_DEV0_EPF6_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF6_STRAP14 0xd30e
+#define regRCC_DEV0_EPF6_STRAP14_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP0 0xd380
+#define regRCC_DEV0_EPF7_STRAP0_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP2 0xd382
+#define regRCC_DEV0_EPF7_STRAP2_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP3 0xd383
+#define regRCC_DEV0_EPF7_STRAP3_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP4 0xd384
+#define regRCC_DEV0_EPF7_STRAP4_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP5 0xd385
+#define regRCC_DEV0_EPF7_STRAP5_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP6 0xd386
+#define regRCC_DEV0_EPF7_STRAP6_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP7 0xd387
+#define regRCC_DEV0_EPF7_STRAP7_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP13 0xd38d
+#define regRCC_DEV0_EPF7_STRAP13_BASE_IDX 8
+#define regRCC_DEV0_EPF7_STRAP14 0xd38e
+#define regRCC_DEV0_EPF7_STRAP14_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP0 0xd400
+#define regRCC_DEV1_EPF0_STRAP0_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP2 0xd402
+#define regRCC_DEV1_EPF0_STRAP2_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP3 0xd403
+#define regRCC_DEV1_EPF0_STRAP3_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP4 0xd404
+#define regRCC_DEV1_EPF0_STRAP4_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP5 0xd405
+#define regRCC_DEV1_EPF0_STRAP5_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP6 0xd406
+#define regRCC_DEV1_EPF0_STRAP6_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP7 0xd407
+#define regRCC_DEV1_EPF0_STRAP7_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP13 0xd40d
+#define regRCC_DEV1_EPF0_STRAP13_BASE_IDX 8
+#define regRCC_DEV1_EPF0_STRAP14 0xd40e
+#define regRCC_DEV1_EPF0_STRAP14_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP0 0xd480
+#define regRCC_DEV1_EPF1_STRAP0_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP2 0xd482
+#define regRCC_DEV1_EPF1_STRAP2_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP3 0xd483
+#define regRCC_DEV1_EPF1_STRAP3_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP4 0xd484
+#define regRCC_DEV1_EPF1_STRAP4_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP5 0xd485
+#define regRCC_DEV1_EPF1_STRAP5_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP6 0xd486
+#define regRCC_DEV1_EPF1_STRAP6_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP7 0xd487
+#define regRCC_DEV1_EPF1_STRAP7_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP13 0xd48d
+#define regRCC_DEV1_EPF1_STRAP13_BASE_IDX 8
+#define regRCC_DEV1_EPF1_STRAP14 0xd48e
+#define regRCC_DEV1_EPF1_STRAP14_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP0 0xd800
+#define regRCC_DEV2_EPF0_STRAP0_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP2 0xd802
+#define regRCC_DEV2_EPF0_STRAP2_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP3 0xd803
+#define regRCC_DEV2_EPF0_STRAP3_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP4 0xd804
+#define regRCC_DEV2_EPF0_STRAP4_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP5 0xd805
+#define regRCC_DEV2_EPF0_STRAP5_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP6 0xd806
+#define regRCC_DEV2_EPF0_STRAP6_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP7 0xd807
+#define regRCC_DEV2_EPF0_STRAP7_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP13 0xd80d
+#define regRCC_DEV2_EPF0_STRAP13_BASE_IDX 8
+#define regRCC_DEV2_EPF0_STRAP14 0xd80e
+#define regRCC_DEV2_EPF0_STRAP14_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP0 0xd880
+#define regRCC_DEV2_EPF1_STRAP0_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP2 0xd882
+#define regRCC_DEV2_EPF1_STRAP2_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP3 0xd883
+#define regRCC_DEV2_EPF1_STRAP3_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP4 0xd884
+#define regRCC_DEV2_EPF1_STRAP4_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP5 0xd885
+#define regRCC_DEV2_EPF1_STRAP5_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP6 0xd886
+#define regRCC_DEV2_EPF1_STRAP6_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP13 0xd88d
+#define regRCC_DEV2_EPF1_STRAP13_BASE_IDX 8
+#define regRCC_DEV2_EPF1_STRAP14 0xd88e
+#define regRCC_DEV2_EPF1_STRAP14_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP0 0xd900
+#define regRCC_DEV2_EPF2_STRAP0_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP2 0xd902
+#define regRCC_DEV2_EPF2_STRAP2_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP3 0xd903
+#define regRCC_DEV2_EPF2_STRAP3_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP4 0xd904
+#define regRCC_DEV2_EPF2_STRAP4_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP5 0xd905
+#define regRCC_DEV2_EPF2_STRAP5_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP6 0xd906
+#define regRCC_DEV2_EPF2_STRAP6_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP13 0xd90d
+#define regRCC_DEV2_EPF2_STRAP13_BASE_IDX 8
+#define regRCC_DEV2_EPF2_STRAP14 0xd90e
+#define regRCC_DEV2_EPF2_STRAP14_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_rst_bif_rst_regblk
+// base address: 0x10100000
+#define regHARD_RST_CTRL 0xe000
+#define regHARD_RST_CTRL_BASE_IDX 8
+#define regSELF_SOFT_RST 0xe002
+#define regSELF_SOFT_RST_BASE_IDX 8
+#define regBIF_GFX_DRV_VPU_RST 0xe003
+#define regBIF_GFX_DRV_VPU_RST_BASE_IDX 8
+#define regBIF_RST_MISC_CTRL 0xe004
+#define regBIF_RST_MISC_CTRL_BASE_IDX 8
+#define regBIF_RST_MISC_CTRL2 0xe005
+#define regBIF_RST_MISC_CTRL2_BASE_IDX 8
+#define regBIF_RST_MISC_CTRL3 0xe006
+#define regBIF_RST_MISC_CTRL3_BASE_IDX 8
+#define regDEV0_PF0_FLR_RST_CTRL 0xe008
+#define regDEV0_PF0_FLR_RST_CTRL_BASE_IDX 8
+#define regDEV0_PF1_FLR_RST_CTRL 0xe009
+#define regDEV0_PF1_FLR_RST_CTRL_BASE_IDX 8
+#define regBIF_INST_RESET_INTR_STS 0xe010
+#define regBIF_INST_RESET_INTR_STS_BASE_IDX 8
+#define regBIF_PF_FLR_INTR_STS 0xe011
+#define regBIF_PF_FLR_INTR_STS_BASE_IDX 8
+#define regBIF_D3HOTD0_INTR_STS 0xe012
+#define regBIF_D3HOTD0_INTR_STS_BASE_IDX 8
+#define regBIF_POWER_INTR_STS 0xe014
+#define regBIF_POWER_INTR_STS_BASE_IDX 8
+#define regBIF_PF_DSTATE_INTR_STS 0xe015
+#define regBIF_PF_DSTATE_INTR_STS_BASE_IDX 8
+#define regSELF_SOFT_RST_2 0xe016
+#define regSELF_SOFT_RST_2_BASE_IDX 8
+#define regBIF_INST_RESET_INTR_MASK 0xe020
+#define regBIF_INST_RESET_INTR_MASK_BASE_IDX 8
+#define regBIF_PF_FLR_INTR_MASK 0xe021
+#define regBIF_PF_FLR_INTR_MASK_BASE_IDX 8
+#define regBIF_D3HOTD0_INTR_MASK 0xe022
+#define regBIF_D3HOTD0_INTR_MASK_BASE_IDX 8
+#define regBIF_POWER_INTR_MASK 0xe024
+#define regBIF_POWER_INTR_MASK_BASE_IDX 8
+#define regBIF_PF_DSTATE_INTR_MASK 0xe025
+#define regBIF_PF_DSTATE_INTR_MASK_BASE_IDX 8
+#define regBIF_PF_FLR_RST 0xe040
+#define regBIF_PF_FLR_RST_BASE_IDX 8
+#define regBIF_DEV0_PF0_DSTATE_VALUE 0xe050
+#define regBIF_DEV0_PF0_DSTATE_VALUE_BASE_IDX 8
+#define regBIF_DEV0_PF1_DSTATE_VALUE 0xe051
+#define regBIF_DEV0_PF1_DSTATE_VALUE_BASE_IDX 8
+#define regDEV0_PF0_D3HOTD0_RST_CTRL 0xe078
+#define regDEV0_PF0_D3HOTD0_RST_CTRL_BASE_IDX 8
+#define regDEV0_PF1_D3HOTD0_RST_CTRL 0xe079
+#define regDEV0_PF1_D3HOTD0_RST_CTRL_BASE_IDX 8
+#define regBIF_PORT0_DSTATE_VALUE 0xe230
+#define regBIF_PORT0_DSTATE_VALUE_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_misc_bif_misc_regblk
+// base address: 0x10100000
+#define regREGS_ROM_OFFSET_CTRL 0xcc23
+#define regREGS_ROM_OFFSET_CTRL_BASE_IDX 8
+#define regNBIF_STRAP_BIOS_CNTL 0xcc81
+#define regNBIF_STRAP_BIOS_CNTL_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_0 0xcd00
+#define regDOORBELL0_CTRL_ENTRY_0_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_1 0xcd01
+#define regDOORBELL0_CTRL_ENTRY_1_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_2 0xcd02
+#define regDOORBELL0_CTRL_ENTRY_2_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_3 0xcd03
+#define regDOORBELL0_CTRL_ENTRY_3_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_4 0xcd04
+#define regDOORBELL0_CTRL_ENTRY_4_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_5 0xcd05
+#define regDOORBELL0_CTRL_ENTRY_5_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_6 0xcd06
+#define regDOORBELL0_CTRL_ENTRY_6_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_7 0xcd07
+#define regDOORBELL0_CTRL_ENTRY_7_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_8 0xcd08
+#define regDOORBELL0_CTRL_ENTRY_8_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_9 0xcd09
+#define regDOORBELL0_CTRL_ENTRY_9_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_10 0xcd0a
+#define regDOORBELL0_CTRL_ENTRY_10_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_11 0xcd0b
+#define regDOORBELL0_CTRL_ENTRY_11_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_12 0xcd0c
+#define regDOORBELL0_CTRL_ENTRY_12_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_13 0xcd0d
+#define regDOORBELL0_CTRL_ENTRY_13_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_14 0xcd0e
+#define regDOORBELL0_CTRL_ENTRY_14_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_15 0xcd0f
+#define regDOORBELL0_CTRL_ENTRY_15_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_16 0xcd10
+#define regDOORBELL0_CTRL_ENTRY_16_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_17 0xcd11
+#define regDOORBELL0_CTRL_ENTRY_17_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_18 0xcd12
+#define regDOORBELL0_CTRL_ENTRY_18_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_19 0xcd13
+#define regDOORBELL0_CTRL_ENTRY_19_BASE_IDX 8
+#define regDOORBELL0_CTRL_ENTRY_20 0xcd14
+#define regDOORBELL0_CTRL_ENTRY_20_BASE_IDX 8
+#define regAID0_VF0_BASE_ADDR 0xcd40
+#define regAID0_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF0_BASE_ADDR 0xcd41
+#define regAID1_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF0_BASE_ADDR 0xcd42
+#define regAID2_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF0_BASE_ADDR 0xcd43
+#define regAID3_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF0_BASE_ADDR 0xcd44
+#define regAID0_XCC0_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF0_BASE_ADDR 0xcd45
+#define regAID0_XCC1_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF0_BASE_ADDR 0xcd46
+#define regAID1_XCC0_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF0_BASE_ADDR 0xcd47
+#define regAID1_XCC1_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF0_BASE_ADDR 0xcd48
+#define regAID2_XCC0_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF0_BASE_ADDR 0xcd49
+#define regAID2_XCC1_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF0_BASE_ADDR 0xcd4a
+#define regAID3_XCC0_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF0_BASE_ADDR 0xcd4b
+#define regAID3_XCC1_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF0_BASE_ADDR 0xcd4c
+#define regAID0_NBIF_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF0_BASE_ADDR 0xcd4d
+#define regAID0_ATHUB_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF0_BASE_ADDR 0xcd4e
+#define regAID0_IH_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF0_BASE_ADDR 0xcd4f
+#define regAID0_HDP_VF0_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF1_BASE_ADDR 0xcd50
+#define regAID0_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF1_BASE_ADDR 0xcd51
+#define regAID1_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF1_BASE_ADDR 0xcd52
+#define regAID2_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF1_BASE_ADDR 0xcd53
+#define regAID3_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF1_BASE_ADDR 0xcd54
+#define regAID0_XCC0_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF1_BASE_ADDR 0xcd55
+#define regAID0_XCC1_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF1_BASE_ADDR 0xcd56
+#define regAID1_XCC0_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF1_BASE_ADDR 0xcd57
+#define regAID1_XCC1_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF1_BASE_ADDR 0xcd58
+#define regAID2_XCC0_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF1_BASE_ADDR 0xcd59
+#define regAID2_XCC1_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF1_BASE_ADDR 0xcd5a
+#define regAID3_XCC0_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF1_BASE_ADDR 0xcd5b
+#define regAID3_XCC1_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF1_BASE_ADDR 0xcd5c
+#define regAID0_NBIF_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF1_BASE_ADDR 0xcd5d
+#define regAID0_ATHUB_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF1_BASE_ADDR 0xcd5e
+#define regAID0_IH_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF1_BASE_ADDR 0xcd5f
+#define regAID0_HDP_VF1_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF2_BASE_ADDR 0xcd60
+#define regAID0_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF2_BASE_ADDR 0xcd61
+#define regAID1_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF2_BASE_ADDR 0xcd62
+#define regAID2_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF2_BASE_ADDR 0xcd63
+#define regAID3_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF2_BASE_ADDR 0xcd64
+#define regAID0_XCC0_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF2_BASE_ADDR 0xcd65
+#define regAID0_XCC1_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF2_BASE_ADDR 0xcd66
+#define regAID1_XCC0_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF2_BASE_ADDR 0xcd67
+#define regAID1_XCC1_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF2_BASE_ADDR 0xcd68
+#define regAID2_XCC0_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF2_BASE_ADDR 0xcd69
+#define regAID2_XCC1_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF2_BASE_ADDR 0xcd6a
+#define regAID3_XCC0_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF2_BASE_ADDR 0xcd6b
+#define regAID3_XCC1_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF2_BASE_ADDR 0xcd6c
+#define regAID0_NBIF_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF2_BASE_ADDR 0xcd6d
+#define regAID0_ATHUB_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF2_BASE_ADDR 0xcd6e
+#define regAID0_IH_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF2_BASE_ADDR 0xcd6f
+#define regAID0_HDP_VF2_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF3_BASE_ADDR 0xcd70
+#define regAID0_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF3_BASE_ADDR 0xcd71
+#define regAID1_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF3_BASE_ADDR 0xcd72
+#define regAID2_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF3_BASE_ADDR 0xcd73
+#define regAID3_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF3_BASE_ADDR 0xcd74
+#define regAID0_XCC0_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF3_BASE_ADDR 0xcd75
+#define regAID0_XCC1_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF3_BASE_ADDR 0xcd76
+#define regAID1_XCC0_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF3_BASE_ADDR 0xcd77
+#define regAID1_XCC1_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF3_BASE_ADDR 0xcd78
+#define regAID2_XCC0_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF3_BASE_ADDR 0xcd79
+#define regAID2_XCC1_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF3_BASE_ADDR 0xcd7a
+#define regAID3_XCC0_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF3_BASE_ADDR 0xcd7b
+#define regAID3_XCC1_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF3_BASE_ADDR 0xcd7c
+#define regAID0_NBIF_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF3_BASE_ADDR 0xcd7d
+#define regAID0_ATHUB_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF3_BASE_ADDR 0xcd7e
+#define regAID0_IH_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF3_BASE_ADDR 0xcd7f
+#define regAID0_HDP_VF3_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF4_BASE_ADDR 0xcd80
+#define regAID0_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF4_BASE_ADDR 0xcd81
+#define regAID1_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF4_BASE_ADDR 0xcd82
+#define regAID2_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF4_BASE_ADDR 0xcd83
+#define regAID3_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF4_BASE_ADDR 0xcd84
+#define regAID0_XCC0_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF4_BASE_ADDR 0xcd85
+#define regAID0_XCC1_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF4_BASE_ADDR 0xcd86
+#define regAID1_XCC0_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF4_BASE_ADDR 0xcd87
+#define regAID1_XCC1_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF4_BASE_ADDR 0xcd88
+#define regAID2_XCC0_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF4_BASE_ADDR 0xcd89
+#define regAID2_XCC1_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF4_BASE_ADDR 0xcd8a
+#define regAID3_XCC0_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF4_BASE_ADDR 0xcd8b
+#define regAID3_XCC1_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF4_BASE_ADDR 0xcd8c
+#define regAID0_NBIF_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF4_BASE_ADDR 0xcd8d
+#define regAID0_ATHUB_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF4_BASE_ADDR 0xcd8e
+#define regAID0_IH_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF4_BASE_ADDR 0xcd8f
+#define regAID0_HDP_VF4_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF5_BASE_ADDR 0xcd90
+#define regAID0_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF5_BASE_ADDR 0xcd91
+#define regAID1_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF5_BASE_ADDR 0xcd92
+#define regAID2_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF5_BASE_ADDR 0xcd93
+#define regAID3_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF5_BASE_ADDR 0xcd94
+#define regAID0_XCC0_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF5_BASE_ADDR 0xcd95
+#define regAID0_XCC1_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF5_BASE_ADDR 0xcd96
+#define regAID1_XCC0_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF5_BASE_ADDR 0xcd97
+#define regAID1_XCC1_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF5_BASE_ADDR 0xcd98
+#define regAID2_XCC0_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF5_BASE_ADDR 0xcd99
+#define regAID2_XCC1_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF5_BASE_ADDR 0xcd9a
+#define regAID3_XCC0_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF5_BASE_ADDR 0xcd9b
+#define regAID3_XCC1_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF5_BASE_ADDR 0xcd9c
+#define regAID0_NBIF_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF5_BASE_ADDR 0xcd9d
+#define regAID0_ATHUB_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF5_BASE_ADDR 0xcd9e
+#define regAID0_IH_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF5_BASE_ADDR 0xcd9f
+#define regAID0_HDP_VF5_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF6_BASE_ADDR 0xcda0
+#define regAID0_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF6_BASE_ADDR 0xcda1
+#define regAID1_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF6_BASE_ADDR 0xcda2
+#define regAID2_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF6_BASE_ADDR 0xcda3
+#define regAID3_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF6_BASE_ADDR 0xcda4
+#define regAID0_XCC0_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF6_BASE_ADDR 0xcda5
+#define regAID0_XCC1_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF6_BASE_ADDR 0xcda6
+#define regAID1_XCC0_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF6_BASE_ADDR 0xcda7
+#define regAID1_XCC1_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF6_BASE_ADDR 0xcda8
+#define regAID2_XCC0_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF6_BASE_ADDR 0xcda9
+#define regAID2_XCC1_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF6_BASE_ADDR 0xcdaa
+#define regAID3_XCC0_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF6_BASE_ADDR 0xcdab
+#define regAID3_XCC1_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF6_BASE_ADDR 0xcdac
+#define regAID0_NBIF_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF6_BASE_ADDR 0xcdad
+#define regAID0_ATHUB_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF6_BASE_ADDR 0xcdae
+#define regAID0_IH_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF6_BASE_ADDR 0xcdaf
+#define regAID0_HDP_VF6_BASE_ADDR_BASE_IDX 8
+#define regAID0_VF7_BASE_ADDR 0xcdb0
+#define regAID0_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID1_VF7_BASE_ADDR 0xcdb1
+#define regAID1_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID2_VF7_BASE_ADDR 0xcdb2
+#define regAID2_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID3_VF7_BASE_ADDR 0xcdb3
+#define regAID3_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_VF7_BASE_ADDR 0xcdb4
+#define regAID0_XCC0_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_VF7_BASE_ADDR 0xcdb5
+#define regAID0_XCC1_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_VF7_BASE_ADDR 0xcdb6
+#define regAID1_XCC0_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_VF7_BASE_ADDR 0xcdb7
+#define regAID1_XCC1_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_VF7_BASE_ADDR 0xcdb8
+#define regAID2_XCC0_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_VF7_BASE_ADDR 0xcdb9
+#define regAID2_XCC1_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_VF7_BASE_ADDR 0xcdba
+#define regAID3_XCC0_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_VF7_BASE_ADDR 0xcdbb
+#define regAID3_XCC1_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_NBIF_VF7_BASE_ADDR 0xcdbc
+#define regAID0_NBIF_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_ATHUB_VF7_BASE_ADDR 0xcdbd
+#define regAID0_ATHUB_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_IH_VF7_BASE_ADDR 0xcdbe
+#define regAID0_IH_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_HDP_VF7_BASE_ADDR 0xcdbf
+#define regAID0_HDP_VF7_BASE_ADDR_BASE_IDX 8
+#define regAID0_PF_BASE_ADDR 0xcdc0
+#define regAID0_PF_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC0_PF_BASE_ADDR 0xcdc1
+#define regAID0_XCC0_PF_BASE_ADDR_BASE_IDX 8
+#define regAID0_XCC1_PF_BASE_ADDR 0xcdc2
+#define regAID0_XCC1_PF_BASE_ADDR_BASE_IDX 8
+#define regAID1_PF_BASE_ADDR 0xcdc3
+#define regAID1_PF_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC0_PF_BASE_ADDR 0xcdc4
+#define regAID1_XCC0_PF_BASE_ADDR_BASE_IDX 8
+#define regAID1_XCC1_PF_BASE_ADDR 0xcdc5
+#define regAID1_XCC1_PF_BASE_ADDR_BASE_IDX 8
+#define regAID2_PF_BASE_ADDR 0xcdc6
+#define regAID2_PF_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC0_PF_BASE_ADDR 0xcdc7
+#define regAID2_XCC0_PF_BASE_ADDR_BASE_IDX 8
+#define regAID2_XCC1_PF_BASE_ADDR 0xcdc8
+#define regAID2_XCC1_PF_BASE_ADDR_BASE_IDX 8
+#define regAID3_PF_BASE_ADDR 0xcdc9
+#define regAID3_PF_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC0_PF_BASE_ADDR 0xcdca
+#define regAID3_XCC0_PF_BASE_ADDR_BASE_IDX 8
+#define regAID3_XCC1_PF_BASE_ADDR 0xcdcb
+#define regAID3_XCC1_PF_BASE_ADDR_BASE_IDX 8
+#define regNBIF_RRMT_CNTL 0xcddc
+#define regNBIF_RRMT_CNTL_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_PF 0xcf6e
+#define regBIFC_DOORBELL_ACCESS_EN_PF_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF0 0xcf6f
+#define regBIFC_DOORBELL_ACCESS_EN_VF0_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF1 0xcf70
+#define regBIFC_DOORBELL_ACCESS_EN_VF1_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF2 0xcf71
+#define regBIFC_DOORBELL_ACCESS_EN_VF2_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF3 0xcf72
+#define regBIFC_DOORBELL_ACCESS_EN_VF3_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF4 0xcf73
+#define regBIFC_DOORBELL_ACCESS_EN_VF4_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF5 0xcf74
+#define regBIFC_DOORBELL_ACCESS_EN_VF5_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF6 0xcf75
+#define regBIFC_DOORBELL_ACCESS_EN_VF6_BASE_IDX 8
+#define regBIFC_DOORBELL_ACCESS_EN_VF7 0xcf76
+#define regBIFC_DOORBELL_ACCESS_EN_VF7_BASE_IDX 8
+#define regMISC_SCRATCH 0xe800
+#define regMISC_SCRATCH_BASE_IDX 8
+#define regINTR_LINE_POLARITY 0xe801
+#define regINTR_LINE_POLARITY_BASE_IDX 8
+#define regINTR_LINE_ENABLE 0xe802
+#define regINTR_LINE_ENABLE_BASE_IDX 8
+#define regOUTSTANDING_VC_ALLOC 0xe803
+#define regOUTSTANDING_VC_ALLOC_BASE_IDX 8
+#define regBIFC_MISC_CTRL0 0xe804
+#define regBIFC_MISC_CTRL0_BASE_IDX 8
+#define regBIFC_MISC_CTRL1 0xe805
+#define regBIFC_MISC_CTRL1_BASE_IDX 8
+#define regBIFC_BME_ERR_LOG_LB 0xe806
+#define regBIFC_BME_ERR_LOG_LB_BASE_IDX 8
+#define regBIFC_LC_TIMER_CTRL 0xe807
+#define regBIFC_LC_TIMER_CTRL_BASE_IDX 8
+#define regBIFC_RCCBIH_BME_ERR_LOG0 0xe808
+#define regBIFC_RCCBIH_BME_ERR_LOG0_BASE_IDX 8
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1 0xe80a
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1_BASE_IDX 8
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3 0xe80b
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3_BASE_IDX 8
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5 0xe80c
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5_BASE_IDX 8
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7 0xe80d
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7_BASE_IDX 8
+#define regBIFC_DMA_ATTR_CNTL2_DEV0 0xe81a
+#define regBIFC_DMA_ATTR_CNTL2_DEV0_BASE_IDX 8
+#define regBME_DUMMY_CNTL_0 0xe825
+#define regBME_DUMMY_CNTL_0_BASE_IDX 8
+#define regBIFC_THT_CNTL 0xe827
+#define regBIFC_THT_CNTL_BASE_IDX 8
+#define regBIFC_HSTARB_CNTL 0xe828
+#define regBIFC_HSTARB_CNTL_BASE_IDX 8
+#define regBIFC_GSI_CNTL 0xe829
+#define regBIFC_GSI_CNTL_BASE_IDX 8
+#define regBIFC_PCIEFUNC_CNTL 0xe82a
+#define regBIFC_PCIEFUNC_CNTL_BASE_IDX 8
+#define regBIFC_PASID_CHECK_DIS 0xe82b
+#define regBIFC_PASID_CHECK_DIS_BASE_IDX 8
+#define regBIFC_SDP_CNTL_0 0xe82c
+#define regBIFC_SDP_CNTL_0_BASE_IDX 8
+#define regBIFC_SDP_CNTL_1 0xe82d
+#define regBIFC_SDP_CNTL_1_BASE_IDX 8
+#define regBIFC_PASID_STS 0xe82e
+#define regBIFC_PASID_STS_BASE_IDX 8
+#define regBIFC_ATHUB_ACT_CNTL 0xe82f
+#define regBIFC_ATHUB_ACT_CNTL_BASE_IDX 8
+#define regBIFC_PERF_CNTL_0 0xe830
+#define regBIFC_PERF_CNTL_0_BASE_IDX 8
+#define regBIFC_PERF_CNTL_1 0xe831
+#define regBIFC_PERF_CNTL_1_BASE_IDX 8
+#define regBIFC_PERF_CNT_MMIO_RD_L32BIT 0xe832
+#define regBIFC_PERF_CNT_MMIO_RD_L32BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_MMIO_WR_L32BIT 0xe833
+#define regBIFC_PERF_CNT_MMIO_WR_L32BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_DMA_RD_L32BIT 0xe834
+#define regBIFC_PERF_CNT_DMA_RD_L32BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_DMA_WR_L32BIT 0xe835
+#define regBIFC_PERF_CNT_DMA_WR_L32BIT_BASE_IDX 8
+#define regNBIF_REGIF_ERRSET_CTRL 0xe836
+#define regNBIF_REGIF_ERRSET_CTRL_BASE_IDX 8
+#define regBIFC_SDP_CNTL_2 0xe837
+#define regBIFC_SDP_CNTL_2_BASE_IDX 8
+#define regNBIF_PGMST_CTRL 0xe838
+#define regNBIF_PGMST_CTRL_BASE_IDX 8
+#define regNBIF_PGSLV_CTRL 0xe839
+#define regNBIF_PGSLV_CTRL_BASE_IDX 8
+#define regNBIF_PG_MISC_CTRL 0xe83a
+#define regNBIF_PG_MISC_CTRL_BASE_IDX 8
+#define regSMN_MST_EP_CNTL3 0xe83c
+#define regSMN_MST_EP_CNTL3_BASE_IDX 8
+#define regSMN_MST_EP_CNTL4 0xe83d
+#define regSMN_MST_EP_CNTL4_BASE_IDX 8
+#define regSMN_MST_CNTL1 0xe83e
+#define regSMN_MST_CNTL1_BASE_IDX 8
+#define regSMN_MST_EP_CNTL5 0xe83f
+#define regSMN_MST_EP_CNTL5_BASE_IDX 8
+#define regBIF_SELFRING_BUFFER_VID 0xe840
+#define regBIF_SELFRING_BUFFER_VID_BASE_IDX 8
+#define regBIF_SELFRING_VECTOR_CNTL 0xe841
+#define regBIF_SELFRING_VECTOR_CNTL_BASE_IDX 8
+#define regNBIF_STRAP_WRITE_CTRL 0xe845
+#define regNBIF_STRAP_WRITE_CTRL_BASE_IDX 8
+#define regNBIF_INTX_DSTATE_MISC_CNTL 0xe846
+#define regNBIF_INTX_DSTATE_MISC_CNTL_BASE_IDX 8
+#define regNBIF_PENDING_MISC_CNTL 0xe847
+#define regNBIF_PENDING_MISC_CNTL_BASE_IDX 8
+#define regBIF_GMI_WRR_WEIGHT 0xe848
+#define regBIF_GMI_WRR_WEIGHT_BASE_IDX 8
+#define regBIF_GMI_WRR_WEIGHT2 0xe849
+#define regBIF_GMI_WRR_WEIGHT2_BASE_IDX 8
+#define regBIF_GMI_WRR_WEIGHT3 0xe84a
+#define regBIF_GMI_WRR_WEIGHT3_BASE_IDX 8
+#define regNBIF_PWRBRK_REQUEST 0xe84c
+#define regNBIF_PWRBRK_REQUEST_BASE_IDX 8
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F0 0xe850
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F0_BASE_IDX 8
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F1 0xe851
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F1_BASE_IDX 8
+#define regBIF_DMA_MP4_ERR_LOG 0xe870
+#define regBIF_DMA_MP4_ERR_LOG_BASE_IDX 8
+#define regBIF_PASID_ERR_LOG 0xe871
+#define regBIF_PASID_ERR_LOG_BASE_IDX 8
+#define regBIF_PASID_ERR_CLR 0xe872
+#define regBIF_PASID_ERR_CLR_BASE_IDX 8
+#define regNBIF_VWIRE_CTRL 0xe880
+#define regNBIF_VWIRE_CTRL_BASE_IDX 8
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL 0xe881
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_BASE_IDX 8
+#define regNBIF_SMN_VWR_VCHG_RST_CTRL0 0xe882
+#define regNBIF_SMN_VWR_VCHG_RST_CTRL0_BASE_IDX 8
+#define regNBIF_SMN_VWR_VCHG_TRIG 0xe884
+#define regNBIF_SMN_VWR_VCHG_TRIG_BASE_IDX 8
+#define regNBIF_SMN_VWR_WTRIG_CNTL 0xe885
+#define regNBIF_SMN_VWR_WTRIG_CNTL_BASE_IDX 8
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_1 0xe886
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_1_BASE_IDX 8
+#define regNBIF_MGCG_CTRL_LCLK 0xe887
+#define regNBIF_MGCG_CTRL_LCLK_BASE_IDX 8
+#define regNBIF_DS_CTRL_LCLK 0xe888
+#define regNBIF_DS_CTRL_LCLK_BASE_IDX 8
+#define regSMN_MST_CNTL0 0xe889
+#define regSMN_MST_CNTL0_BASE_IDX 8
+#define regSMN_MST_EP_CNTL1 0xe88a
+#define regSMN_MST_EP_CNTL1_BASE_IDX 8
+#define regSMN_MST_EP_CNTL2 0xe88b
+#define regSMN_MST_EP_CNTL2_BASE_IDX 8
+#define regNBIF_SDP_VWR_VCHG_DIS_CTRL 0xe88c
+#define regNBIF_SDP_VWR_VCHG_DIS_CTRL_BASE_IDX 8
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL0 0xe88d
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL0_BASE_IDX 8
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL1 0xe88e
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL1_BASE_IDX 8
+#define regNBIF_SDP_VWR_VCHG_TRIG 0xe88f
+#define regNBIF_SDP_VWR_VCHG_TRIG_BASE_IDX 8
+#define regNBIF_SHUB_TODET_CTRL 0xe898
+#define regNBIF_SHUB_TODET_CTRL_BASE_IDX 8
+#define regNBIF_SHUB_TODET_CLIENT_CTRL 0xe899
+#define regNBIF_SHUB_TODET_CLIENT_CTRL_BASE_IDX 8
+#define regNBIF_SHUB_TODET_CLIENT_STATUS 0xe89a
+#define regNBIF_SHUB_TODET_CLIENT_STATUS_BASE_IDX 8
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL 0xe89b
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL_BASE_IDX 8
+#define regNBIF_SHUB_TODET_CLIENT_CTRL2 0xe89c
+#define regNBIF_SHUB_TODET_CLIENT_CTRL2_BASE_IDX 8
+#define regNBIF_SHUB_TODET_CLIENT_STATUS2 0xe89d
+#define regNBIF_SHUB_TODET_CLIENT_STATUS2_BASE_IDX 8
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2 0xe89e
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2_BASE_IDX 8
+#define regBIFC_BME_ERR_LOG_HB 0xe8ab
+#define regBIFC_BME_ERR_LOG_HB_BASE_IDX 8
+#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC 0xe8c0
+#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC_BASE_IDX 8
+#define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC 0xe8c1
+#define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC_BASE_IDX 8
+#define regBIFC_GMI_SDP_REQ_POOLCRED_ALLOC 0xe8c2
+#define regBIFC_GMI_SDP_REQ_POOLCRED_ALLOC_BASE_IDX 8
+#define regBIFC_GMI_SDP_DAT_POOLCRED_ALLOC 0xe8c3
+#define regBIFC_GMI_SDP_DAT_POOLCRED_ALLOC_BASE_IDX 8
+#define regDISCON_HYSTERESIS_HEAD_CTRL 0xe8c6
+#define regDISCON_HYSTERESIS_HEAD_CTRL_BASE_IDX 8
+#define regBIFC_EARLY_WAKEUP_CNTL 0xe8d2
+#define regBIFC_EARLY_WAKEUP_CNTL_BASE_IDX 8
+#define regBIFC_PERF_CNT_MMIO_RD_H16BIT 0xe8f0
+#define regBIFC_PERF_CNT_MMIO_RD_H16BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_MMIO_WR_H16BIT 0xe8f1
+#define regBIFC_PERF_CNT_MMIO_WR_H16BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_DMA_RD_H16BIT 0xe8f2
+#define regBIFC_PERF_CNT_DMA_RD_H16BIT_BASE_IDX 8
+#define regBIFC_PERF_CNT_DMA_WR_H16BIT 0xe8f3
+#define regBIFC_PERF_CNT_DMA_WR_H16BIT_BASE_IDX 8
+#define regBIFC_A2S_SDP_PORT_CTRL 0xeb00
+#define regBIFC_A2S_SDP_PORT_CTRL_BASE_IDX 8
+#define regBIFC_A2S_CNTL_SW0 0xeb01
+#define regBIFC_A2S_CNTL_SW0_BASE_IDX 8
+#define regBIFC_A2S_MISC_CNTL 0xeb02
+#define regBIFC_A2S_MISC_CNTL_BASE_IDX 8
+#define regBIFC_A2S_TAG_ALLOC_0 0xeb03
+#define regBIFC_A2S_TAG_ALLOC_0_BASE_IDX 8
+#define regBIFC_A2S_TAG_ALLOC_1 0xeb04
+#define regBIFC_A2S_TAG_ALLOC_1_BASE_IDX 8
+#define regBIFC_A2S_CNTL_CL0 0xeb05
+#define regBIFC_A2S_CNTL_CL0_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DWN_DEV0_2_DN_PCIE_RESERVED 0x8d80
+#define regRCC_DWN_DEV0_2_DN_PCIE_RESERVED_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_SCRATCH 0x8d81
+#define regRCC_DWN_DEV0_2_DN_PCIE_SCRATCH_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_CNTL 0x8d83
+#define regRCC_DWN_DEV0_2_DN_PCIE_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL 0x8d84
+#define regRCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2 0x8d85
+#define regRCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL 0x8d86
+#define regRCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL 0x8d87
+#define regRCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_F0 0x8d88
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_F0_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC 0x8d89
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC_BASE_IDX 8
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2 0x8d8a
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DWNP_DEV0_2_PCIE_ERR_CNTL 0x8d8c
+#define regRCC_DWNP_DEV0_2_PCIE_ERR_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_2_PCIE_RX_CNTL 0x8d8d
+#define regRCC_DWNP_DEV0_2_PCIE_RX_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL 0x8d8e
+#define regRCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL_BASE_IDX 8
+#define regRCC_DWNP_DEV0_2_PCIE_LC_CNTL2 0x8d8f
+#define regRCC_DWNP_DEV0_2_PCIE_LC_CNTL2_BASE_IDX 8
+#define regRCC_DWNP_DEV0_2_PCIEP_STRAP_MISC 0x8d90
+#define regRCC_DWNP_DEV0_2_PCIEP_STRAP_MISC_BASE_IDX 8
+#define regRCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP 0x8d91
+#define regRCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_EP_DEV0_2_EP_PCIE_SCRATCH 0x8d60
+#define regRCC_EP_DEV0_2_EP_PCIE_SCRATCH_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_CNTL 0x8d62
+#define regRCC_EP_DEV0_2_EP_PCIE_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_CNTL 0x8d63
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_STATUS 0x8d64
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_STATUS_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL2 0x8d65
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL2_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_BUS_CNTL 0x8d66
+#define regRCC_EP_DEV0_2_EP_PCIE_BUS_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_CFG_CNTL 0x8d67
+#define regRCC_EP_DEV0_2_EP_PCIE_CFG_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL 0x8d69
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x8d6a
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x8d6a
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x8d6a
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x8d6a
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 8
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC 0x8d6c
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2 0x8d6d
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP 0x8d6f
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0x8d70
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL 0x8d70
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x8d70
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x8d71
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x8d71
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x8d71
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x8d71
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 8
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_PME_CONTROL 0x8d72
+#define regRCC_EP_DEV0_2_EP_PCIE_PME_CONTROL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIEP_RESERVED 0x8d73
+#define regRCC_EP_DEV0_2_EP_PCIEP_RESERVED_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_CNTL 0x8d75
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID 0x8d76
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_ERR_CNTL 0x8d77
+#define regRCC_EP_DEV0_2_EP_PCIE_ERR_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL 0x8d78
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL_BASE_IDX 8
+#define regRCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL 0x8d79
+#define regRCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DEV0_1_RCC_ERR_INT_CNTL 0x8da6
+#define regRCC_DEV0_1_RCC_ERR_INT_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BACO_CNTL_MISC 0x8da7
+#define regRCC_DEV0_1_RCC_BACO_CNTL_MISC_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_RESET_EN 0x8da8
+#define regRCC_DEV0_1_RCC_RESET_EN_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_VDM_SUPPORT 0x8da9
+#define regRCC_DEV0_2_RCC_VDM_SUPPORT_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0 0x8daa
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1 0x8dab
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_GPUIOV_REGION 0x8dac
+#define regRCC_DEV0_1_RCC_GPUIOV_REGION_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_GPU_HOSTVM_EN 0x8dad
+#define regRCC_DEV0_1_RCC_GPU_HOSTVM_EN_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL 0x8dae
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET 0x8daf
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE 0x8daf
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE0 0x8dde
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE0_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE1 0x8ddf
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE1_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_BUS_CNTL 0x8de1
+#define regRCC_DEV0_2_RCC_BUS_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONFIG_CNTL 0x8de2
+#define regRCC_DEV0_1_RCC_CONFIG_CNTL_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONFIG_F0_BASE 0x8de6
+#define regRCC_DEV0_1_RCC_CONFIG_F0_BASE_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONFIG_APER_SIZE 0x8de7
+#define regRCC_DEV0_1_RCC_CONFIG_APER_SIZE_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE 0x8de8
+#define regRCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_XDMA_LO 0x8de9
+#define regRCC_DEV0_1_RCC_XDMA_LO_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_XDMA_HI 0x8dea
+#define regRCC_DEV0_1_RCC_XDMA_HI_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_FEATURES_CONTROL_MISC 0x8deb
+#define regRCC_DEV0_2_RCC_FEATURES_CONTROL_MISC_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL1 0x8dec
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL1_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST0 0x8ded
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST0_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST1 0x8dee
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST1_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL2 0x8def
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL2_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM 0x8df0
+#define regRCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_HOST_BUSNUM 0x8df1
+#define regRCC_DEV0_1_RCC_HOST_BUSNUM_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI 0x8df2
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO 0x8df3
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI 0x8df4
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO 0x8df5
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI 0x8df6
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO 0x8df7
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI 0x8df8
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO 0x8df9
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST0 0x8dfa
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST0_BASE_IDX 8
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST1 0x8dfb
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST1_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_DEV0_LINK_CNTL 0x8dfd
+#define regRCC_DEV0_2_RCC_DEV0_LINK_CNTL_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_CMN_LINK_CNTL 0x8dfe
+#define regRCC_DEV0_2_RCC_CMN_LINK_CNTL_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE 0x8dff
+#define regRCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_LTR_LSWITCH_CNTL 0x8e00
+#define regRCC_DEV0_2_RCC_LTR_LSWITCH_CNTL_BASE_IDX 8
+#define regRCC_DEV0_2_RCC_MH_ARB_CNTL 0x8e01
+#define regRCC_DEV0_2_RCC_MH_ARB_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_SYSDEC
+// base address: 0x10120000
+#define regBIF_BX1_PCIE_INDEX 0x800c
+#define regBIF_BX1_PCIE_INDEX_BASE_IDX 8
+#define regBIF_BX1_PCIE_DATA 0x800d
+#define regBIF_BX1_PCIE_DATA_BASE_IDX 8
+#define regBIF_BX1_PCIE_INDEX2 0x800e
+#define regBIF_BX1_PCIE_INDEX2_BASE_IDX 8
+#define regBIF_BX1_PCIE_DATA2 0x800f
+#define regBIF_BX1_PCIE_DATA2_BASE_IDX 8
+#define regBIF_BX1_PCIE_INDEX_HI 0x8010
+#define regBIF_BX1_PCIE_INDEX_HI_BASE_IDX 8
+#define regBIF_BX1_PCIE_INDEX2_HI 0x8011
+#define regBIF_BX1_PCIE_INDEX2_HI_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_0 0x8048
+#define regBIF_BX1_SBIOS_SCRATCH_0_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_1 0x8049
+#define regBIF_BX1_SBIOS_SCRATCH_1_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_2 0x804a
+#define regBIF_BX1_SBIOS_SCRATCH_2_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_3 0x804b
+#define regBIF_BX1_SBIOS_SCRATCH_3_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_0 0x804c
+#define regBIF_BX1_BIOS_SCRATCH_0_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_1 0x804d
+#define regBIF_BX1_BIOS_SCRATCH_1_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_2 0x804e
+#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_3 0x804f
+#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_4 0x8050
+#define regBIF_BX1_BIOS_SCRATCH_4_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_5 0x8051
+#define regBIF_BX1_BIOS_SCRATCH_5_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_6 0x8052
+#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_7 0x8053
+#define regBIF_BX1_BIOS_SCRATCH_7_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_8 0x8054
+#define regBIF_BX1_BIOS_SCRATCH_8_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_9 0x8055
+#define regBIF_BX1_BIOS_SCRATCH_9_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_10 0x8056
+#define regBIF_BX1_BIOS_SCRATCH_10_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_11 0x8057
+#define regBIF_BX1_BIOS_SCRATCH_11_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_12 0x8058
+#define regBIF_BX1_BIOS_SCRATCH_12_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_13 0x8059
+#define regBIF_BX1_BIOS_SCRATCH_13_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_14 0x805a
+#define regBIF_BX1_BIOS_SCRATCH_14_BASE_IDX 8
+#define regBIF_BX1_BIOS_SCRATCH_15 0x805b
+#define regBIF_BX1_BIOS_SCRATCH_15_BASE_IDX 8
+#define regBIF_BX1_BIF_RLC_INTR_CNTL 0x8060
+#define regBIF_BX1_BIF_RLC_INTR_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_VCE_INTR_CNTL 0x8061
+#define regBIF_BX1_BIF_VCE_INTR_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_UVD_INTR_CNTL 0x8062
+#define regBIF_BX1_BIF_UVD_INTR_CNTL_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR0 0x8080
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR0_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0 0x8081
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR1 0x8082
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR1_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1 0x8083
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR2 0x8084
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR2_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2 0x8085
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR3 0x8086
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR3_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3 0x8087
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR4 0x8088
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR4_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4 0x8089
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR5 0x808a
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR5_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5 0x808b
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR6 0x808c
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR6_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6 0x808d
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR7 0x808e
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR7_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7 0x808f
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_CNTL 0x8090
+#define regBIF_BX1_GFX_MMIOREG_CAM_CNTL_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL 0x8091
+#define regBIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_ONE_CPL 0x8092
+#define regBIF_BX1_GFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 8
+#define regBIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x8093
+#define regBIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_0 0x8094
+#define regBIF_BX1_DRIVER_SCRATCH_0_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_1 0x8095
+#define regBIF_BX1_DRIVER_SCRATCH_1_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_2 0x8096
+#define regBIF_BX1_DRIVER_SCRATCH_2_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_3 0x8097
+#define regBIF_BX1_DRIVER_SCRATCH_3_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_4 0x8098
+#define regBIF_BX1_DRIVER_SCRATCH_4_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_5 0x8099
+#define regBIF_BX1_DRIVER_SCRATCH_5_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_6 0x809a
+#define regBIF_BX1_DRIVER_SCRATCH_6_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_7 0x809b
+#define regBIF_BX1_DRIVER_SCRATCH_7_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_8 0x809c
+#define regBIF_BX1_DRIVER_SCRATCH_8_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_9 0x809d
+#define regBIF_BX1_DRIVER_SCRATCH_9_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_10 0x809e
+#define regBIF_BX1_DRIVER_SCRATCH_10_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_11 0x809f
+#define regBIF_BX1_DRIVER_SCRATCH_11_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_12 0x80a0
+#define regBIF_BX1_DRIVER_SCRATCH_12_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_13 0x80a1
+#define regBIF_BX1_DRIVER_SCRATCH_13_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_14 0x80a2
+#define regBIF_BX1_DRIVER_SCRATCH_14_BASE_IDX 8
+#define regBIF_BX1_DRIVER_SCRATCH_15 0x80a3
+#define regBIF_BX1_DRIVER_SCRATCH_15_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_0 0x80a4
+#define regBIF_BX1_FW_SCRATCH_0_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_1 0x80a5
+#define regBIF_BX1_FW_SCRATCH_1_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_2 0x80a6
+#define regBIF_BX1_FW_SCRATCH_2_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_3 0x80a7
+#define regBIF_BX1_FW_SCRATCH_3_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_4 0x80a8
+#define regBIF_BX1_FW_SCRATCH_4_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_5 0x80a9
+#define regBIF_BX1_FW_SCRATCH_5_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_6 0x80aa
+#define regBIF_BX1_FW_SCRATCH_6_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_7 0x80ab
+#define regBIF_BX1_FW_SCRATCH_7_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_8 0x80ac
+#define regBIF_BX1_FW_SCRATCH_8_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_9 0x80ad
+#define regBIF_BX1_FW_SCRATCH_9_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_10 0x80ae
+#define regBIF_BX1_FW_SCRATCH_10_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_11 0x80af
+#define regBIF_BX1_FW_SCRATCH_11_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_12 0x80b0
+#define regBIF_BX1_FW_SCRATCH_12_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_13 0x80b1
+#define regBIF_BX1_FW_SCRATCH_13_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_14 0x80b2
+#define regBIF_BX1_FW_SCRATCH_14_BASE_IDX 8
+#define regBIF_BX1_FW_SCRATCH_15 0x80b3
+#define regBIF_BX1_FW_SCRATCH_15_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_4 0x80b4
+#define regBIF_BX1_SBIOS_SCRATCH_4_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_5 0x80b5
+#define regBIF_BX1_SBIOS_SCRATCH_5_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_6 0x80b6
+#define regBIF_BX1_SBIOS_SCRATCH_6_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_7 0x80b7
+#define regBIF_BX1_SBIOS_SCRATCH_7_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_8 0x80b8
+#define regBIF_BX1_SBIOS_SCRATCH_8_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_9 0x80b9
+#define regBIF_BX1_SBIOS_SCRATCH_9_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_10 0x80ba
+#define regBIF_BX1_SBIOS_SCRATCH_10_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_11 0x80bb
+#define regBIF_BX1_SBIOS_SCRATCH_11_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_12 0x80bc
+#define regBIF_BX1_SBIOS_SCRATCH_12_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_13 0x80bd
+#define regBIF_BX1_SBIOS_SCRATCH_13_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_14 0x80be
+#define regBIF_BX1_SBIOS_SCRATCH_14_BASE_IDX 8
+#define regBIF_BX1_SBIOS_SCRATCH_15 0x80bf
+#define regBIF_BX1_SBIOS_SCRATCH_15_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_SYSPFVFDEC
+// base address: 0x10120000
+#define regBIF_BX_PF1_MM_INDEX 0x8000
+#define regBIF_BX_PF1_MM_INDEX_BASE_IDX 8
+#define regBIF_BX_PF1_MM_DATA 0x8001
+#define regBIF_BX_PF1_MM_DATA_BASE_IDX 8
+#define regBIF_BX_PF1_MM_INDEX_HI 0x8006
+#define regBIF_BX_PF1_MM_INDEX_HI_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_BIFDEC1
+// base address: 0x10120000
+#define regBIF_BX1_CC_BIF_BX_STRAP0 0x8e02
+#define regBIF_BX1_CC_BIF_BX_STRAP0_BASE_IDX 8
+#define regBIF_BX1_CC_BIF_BX_PINSTRAP0 0x8e04
+#define regBIF_BX1_CC_BIF_BX_PINSTRAP0_BASE_IDX 8
+#define regBIF_BX1_BIF_MM_INDACCESS_CNTL 0x8e06
+#define regBIF_BX1_BIF_MM_INDACCESS_CNTL_BASE_IDX 8
+#define regBIF_BX1_BUS_CNTL 0x8e07
+#define regBIF_BX1_BUS_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_SCRATCH0 0x8e08
+#define regBIF_BX1_BIF_SCRATCH0_BASE_IDX 8
+#define regBIF_BX1_BIF_SCRATCH1 0x8e09
+#define regBIF_BX1_BIF_SCRATCH1_BASE_IDX 8
+#define regBIF_BX1_BX_RESET_EN 0x8e0d
+#define regBIF_BX1_BX_RESET_EN_BASE_IDX 8
+#define regBIF_BX1_MM_CFGREGS_CNTL 0x8e0e
+#define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 8
+#define regBIF_BX1_BX_RESET_CNTL 0x8e10
+#define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 8
+#define regBIF_BX1_INTERRUPT_CNTL 0x8e11
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 8
+#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 8
+#define regBIF_BX1_CLKREQB_PAD_CNTL 0x8e18
+#define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x8e1b
+#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC_BASE_IDX 8
+#define regBIF_BX1_HDP_ATOMIC_CONTROL_MISC 0x8e1c
+#define regBIF_BX1_HDP_ATOMIC_CONTROL_MISC_BASE_IDX 8
+#define regBIF_BX1_BIF_DOORBELL_CNTL 0x8e1d
+#define regBIF_BX1_BIF_DOORBELL_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_DOORBELL_INT_CNTL 0x8e1e
+#define regBIF_BX1_BIF_DOORBELL_INT_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_FB_EN 0x8e20
+#define regBIF_BX1_BIF_FB_EN_BASE_IDX 8
+#define regBIF_BX1_BIF_INTR_CNTL 0x8e21
+#define regBIF_BX1_BIF_INTR_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_MST_TRANS_PENDING_VF 0x8e29
+#define regBIF_BX1_BIF_MST_TRANS_PENDING_VF_BASE_IDX 8
+#define regBIF_BX1_BIF_SLV_TRANS_PENDING_VF 0x8e2a
+#define regBIF_BX1_BIF_SLV_TRANS_PENDING_VF_BASE_IDX 8
+#define regBIF_BX1_BACO_CNTL 0x8e2b
+#define regBIF_BX1_BACO_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_BACO_EXIT_TIME0 0x8e2c
+#define regBIF_BX1_BIF_BACO_EXIT_TIME0_BASE_IDX 8
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER1 0x8e2d
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER1_BASE_IDX 8
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER2 0x8e2e
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER2_BASE_IDX 8
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER3 0x8e2f
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER3_BASE_IDX 8
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER4 0x8e30
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER4_BASE_IDX 8
+#define regBIF_BX1_MEM_TYPE_CNTL 0x8e31
+#define regBIF_BX1_MEM_TYPE_CNTL_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_CNTL 0x8e33
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_CNTL_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_0 0x8e34
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_0_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_1 0x8e35
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_1_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_2 0x8e36
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_2_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_3 0x8e37
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_3_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_4 0x8e38
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_4_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_5 0x8e39
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_5_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_6 0x8e3a
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_6_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_7 0x8e3b
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_7_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_8 0x8e3c
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_8_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_9 0x8e3d
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_9_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_10 0x8e3e
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_10_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_11 0x8e3f
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_11_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_12 0x8e40
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_12_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_13 0x8e41
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_13_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_14 0x8e42
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_14_BASE_IDX 8
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_15 0x8e43
+#define regBIF_BX1_NBIF_GFX_ADDR_LUT_15_BASE_IDX 8
+#define regBIF_BX1_VF_REGWR_EN 0x8e44
+#define regBIF_BX1_VF_REGWR_EN_BASE_IDX 8
+#define regBIF_BX1_VF_DOORBELL_EN 0x8e45
+#define regBIF_BX1_VF_DOORBELL_EN_BASE_IDX 8
+#define regBIF_BX1_VF_FB_EN 0x8e46
+#define regBIF_BX1_VF_FB_EN_BASE_IDX 8
+#define regBIF_BX1_VF_REGWR_STATUS 0x8e47
+#define regBIF_BX1_VF_REGWR_STATUS_BASE_IDX 8
+#define regBIF_BX1_VF_DOORBELL_STATUS 0x8e48
+#define regBIF_BX1_VF_DOORBELL_STATUS_BASE_IDX 8
+#define regBIF_BX1_VF_FB_STATUS 0x8e49
+#define regBIF_BX1_VF_FB_STATUS_BASE_IDX 8
+#define regBIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL 0x8e4d
+#define regBIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 8
+#define regBIF_BX1_REMAP_HDP_REG_FLUSH_CNTL 0x8e4e
+#define regBIF_BX1_REMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_CNTL 0x8e4f
+#define regBIF_BX1_BIF_RB_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_BASE 0x8e50
+#define regBIF_BX1_BIF_RB_BASE_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_RPTR 0x8e51
+#define regBIF_BX1_BIF_RB_RPTR_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_WPTR 0x8e52
+#define regBIF_BX1_BIF_RB_WPTR_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_HI 0x8e53
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_HI_BASE_IDX 8
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_LO 0x8e54
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_LO_BASE_IDX 8
+#define regBIF_BX1_MAILBOX_INDEX 0x8e55
+#define regBIF_BX1_MAILBOX_INDEX_BASE_IDX 8
+#define regBIF_BX1_BIF_MP1_INTR_CTRL 0x8e62
+#define regBIF_BX1_BIF_MP1_INTR_CTRL_BASE_IDX 8
+#define regBIF_BX1_BIF_PERSTB_PAD_CNTL 0x8e66
+#define regBIF_BX1_BIF_PERSTB_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_PX_EN_PAD_CNTL 0x8e67
+#define regBIF_BX1_BIF_PX_EN_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_REFPADKIN_PAD_CNTL 0x8e68
+#define regBIF_BX1_BIF_REFPADKIN_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_CLKREQB_PAD_CNTL 0x8e69
+#define regBIF_BX1_BIF_CLKREQB_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_PWRBRK_PAD_CNTL 0x8e6a
+#define regBIF_BX1_BIF_PWRBRK_PAD_CNTL_BASE_IDX 8
+#define regBIF_BX1_BIF_VCN0_GPUIOV_CFG_SIZE 0x8e6b
+#define regBIF_BX1_BIF_VCN0_GPUIOV_CFG_SIZE_BASE_IDX 8
+#define regBIF_BX1_BIF_VCN1_GPUIOV_CFG_SIZE 0x8e6c
+#define regBIF_BX1_BIF_VCN1_GPUIOV_CFG_SIZE_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
+// base address: 0x10120000
+#define regBIF_BX_PF1_BIF_BME_STATUS 0x8e0b
+#define regBIF_BX_PF1_BIF_BME_STATUS_BASE_IDX 8
+#define regBIF_BX_PF1_BIF_ATOMIC_ERR_LOG 0x8e0c
+#define regBIF_BX_PF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 8
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x8e13
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 8
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x8e14
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 8
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x8e15
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x8e16
+#define regBIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x8e17
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x8e19
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x8e1a
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_REQ 0x8e26
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_REQ_BASE_IDX 8
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_DONE 0x8e27
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_DONE_BASE_IDX 8
+#define regBIF_BX_PF1_BIF_TRANS_PENDING 0x8e28
+#define regBIF_BX_PF1_BIF_TRANS_PENDING_BASE_IDX 8
+#define regBIF_BX_PF1_NBIF_GFX_ADDR_LUT_BYPASS 0x8e32
+#define regBIF_BX_PF1_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0 0x8e56
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1 0x8e57
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2 0x8e58
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3 0x8e59
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0 0x8e5a
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1 0x8e5b
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2 0x8e5c
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3 0x8e5d
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_CONTROL 0x8e5e
+#define regBIF_BX_PF1_MAILBOX_CONTROL_BASE_IDX 8
+#define regBIF_BX_PF1_MAILBOX_INT_CNTL 0x8e5f
+#define regBIF_BX_PF1_MAILBOX_INT_CNTL_BASE_IDX 8
+#define regBIF_BX_PF1_BIF_VMHV_MAILBOX 0x8e60
+#define regBIF_BX_PF1_BIF_VMHV_MAILBOX_BASE_IDX 8
+#define regBIF_BX_PF1_PARTITION_COMPUTE_CAP 0x8e81
+#define regBIF_BX_PF1_PARTITION_COMPUTE_CAP_BASE_IDX 8
+#define regBIF_BX_PF1_PARTITION_MEM_CAP 0x8e82
+#define regBIF_BX_PF1_PARTITION_MEM_CAP_BASE_IDX 8
+#define regBIF_BX_PF1_PARTITION_COMPUTE_STATUS 0x8e83
+#define regBIF_BX_PF1_PARTITION_COMPUTE_STATUS_BASE_IDX 8
+#define regBIF_BX_PF1_PARTITION_MEM_STATUS 0x8e84
+#define regBIF_BX_PF1_PARTITION_MEM_STATUS_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_BIFDEC1:1
+// base address: 0x10120000
+#define regRCC_STRAP2_RCC_BIF_STRAP0 0x8d20
+#define regRCC_STRAP2_RCC_BIF_STRAP0_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP1 0x8d21
+#define regRCC_STRAP2_RCC_BIF_STRAP1_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP2 0x8d22
+#define regRCC_STRAP2_RCC_BIF_STRAP2_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP3 0x8d23
+#define regRCC_STRAP2_RCC_BIF_STRAP3_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP4 0x8d24
+#define regRCC_STRAP2_RCC_BIF_STRAP4_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP5 0x8d25
+#define regRCC_STRAP2_RCC_BIF_STRAP5_BASE_IDX 8
+#define regRCC_STRAP2_RCC_BIF_STRAP6 0x8d26
+#define regRCC_STRAP2_RCC_BIF_STRAP6_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP0 0x8d27
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP0_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP1 0x8d28
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP1_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP10 0x8d29
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP10_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP11 0x8d2a
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP11_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP12 0x8d2b
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP12_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP13 0x8d2c
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP13_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP14 0x8d2d
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP14_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP2 0x8d2e
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP2_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP3 0x8d2f
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP3_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP4 0x8d30
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP4_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP5 0x8d31
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP5_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP6 0x8d32
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP6_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP7 0x8d33
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP7_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP8 0x8d34
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP8_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP9 0x8d35
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP9_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP0 0x8d36
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP0_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP1 0x8d37
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP1_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP13 0x8d38
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP13_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP14 0x8d39
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP14_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP15 0x8d3a
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP15_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP16 0x8d3b
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP16_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP17 0x8d3c
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP17_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP18 0x8d3d
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP18_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP2 0x8d3e
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP2_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP26 0x8d3f
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP26_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP3 0x8d40
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP3_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP4 0x8d41
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP4_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP5 0x8d42
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP5_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP8 0x8d44
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP8_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP9 0x8d45
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP9_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP0 0x8d46
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP0_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP2 0x8d52
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP2_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP20 0x8d53
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP20_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP21 0x8d54
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP21_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP22 0x8d55
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP22_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP23 0x8d56
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP23_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP24 0x8d57
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP24_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP25 0x8d58
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP25_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP3 0x8d59
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP3_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP4 0x8d5a
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP4_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP5 0x8d5b
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP5_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP6 0x8d5c
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP6_BASE_IDX 8
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP7 0x8d5d
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP7_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_gdc_hst_sion_SIONDEC
+// base address: 0x1400000
+#define regS2A_DOORBELL_ENTRY_0_CTRL 0x7a80
+#define regS2A_DOORBELL_ENTRY_0_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_1_CTRL 0x7a81
+#define regS2A_DOORBELL_ENTRY_1_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_2_CTRL 0x7a82
+#define regS2A_DOORBELL_ENTRY_2_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_3_CTRL 0x7a83
+#define regS2A_DOORBELL_ENTRY_3_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_4_CTRL 0x7a84
+#define regS2A_DOORBELL_ENTRY_4_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_5_CTRL 0x7a85
+#define regS2A_DOORBELL_ENTRY_5_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_6_CTRL 0x7a86
+#define regS2A_DOORBELL_ENTRY_6_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_7_CTRL 0x7a87
+#define regS2A_DOORBELL_ENTRY_7_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_8_CTRL 0x7a88
+#define regS2A_DOORBELL_ENTRY_8_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_9_CTRL 0x7a89
+#define regS2A_DOORBELL_ENTRY_9_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_10_CTRL 0x7a8a
+#define regS2A_DOORBELL_ENTRY_10_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_11_CTRL 0x7a8b
+#define regS2A_DOORBELL_ENTRY_11_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_12_CTRL 0x7a8c
+#define regS2A_DOORBELL_ENTRY_12_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_13_CTRL 0x7a8d
+#define regS2A_DOORBELL_ENTRY_13_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_14_CTRL 0x7a8e
+#define regS2A_DOORBELL_ENTRY_14_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_ENTRY_15_CTRL 0x7a8f
+#define regS2A_DOORBELL_ENTRY_15_CTRL_BASE_IDX 5
+#define regS2A_DOORBELL_COMMON_CTRL_REG 0x7a90
+#define regS2A_DOORBELL_COMMON_CTRL_REG_BASE_IDX 5
+
+
+// addressBlock: aid_nbio_nbif0_gdc_GDCDEC
+// base address: 0x1400000
+#define regGDC1_A2S_CNTL_CL0 0x0ea0
+#define regGDC1_A2S_CNTL_CL0_BASE_IDX 5
+#define regGDC1_A2S_CNTL_CL1 0x0ea1
+#define regGDC1_A2S_CNTL_CL1_BASE_IDX 5
+#define regGDC1_A2S_CNTL3_CL0 0x0eb8
+#define regGDC1_A2S_CNTL3_CL0_BASE_IDX 5
+#define regGDC1_A2S_CNTL3_CL1 0x0eb9
+#define regGDC1_A2S_CNTL3_CL1_BASE_IDX 5
+#define regGDC1_A2S_CNTL_SW0 0x0ed0
+#define regGDC1_A2S_CNTL_SW0_BASE_IDX 5
+#define regGDC1_A2S_CNTL_SW1 0x0ed1
+#define regGDC1_A2S_CNTL_SW1_BASE_IDX 5
+#define regGDC1_A2S_CNTL_SW2 0x0ed2
+#define regGDC1_A2S_CNTL_SW2_BASE_IDX 5
+#define regGDC1_A2S_TAG_ALLOC_0 0x0edd
+#define regGDC1_A2S_TAG_ALLOC_0_BASE_IDX 5
+#define regGDC1_A2S_TAG_ALLOC_1 0x0ede
+#define regGDC1_A2S_TAG_ALLOC_1_BASE_IDX 5
+#define regGDC1_A2S_MISC_CNTL 0x0ee1
+#define regGDC1_A2S_MISC_CNTL_BASE_IDX 5
+#define regGDC1_SHUB_REGS_IF_CTL 0x0ee3
+#define regGDC1_SHUB_REGS_IF_CTL_BASE_IDX 5
+#define regGDC1_NGDC_MGCG_CTRL 0x0eea
+#define regGDC1_NGDC_MGCG_CTRL_BASE_IDX 5
+#define regGDC1_NGDC_RESERVED_0 0x0eeb
+#define regGDC1_NGDC_RESERVED_0_BASE_IDX 5
+#define regGDC1_NGDC_RESERVED_1 0x0eec
+#define regGDC1_NGDC_RESERVED_1_BASE_IDX 5
+#define regGDC1_NBIF_GFX_DOORBELL_STATUS 0x0eef
+#define regGDC1_NBIF_GFX_DOORBELL_STATUS_BASE_IDX 5
+#define regGDC1_ATDMA_MISC_CNTL 0x0efd
+#define regGDC1_ATDMA_MISC_CNTL_BASE_IDX 5
+#define regGDC1_S2A_MISC_CNTL 0x0eff
+#define regGDC1_S2A_MISC_CNTL_BASE_IDX 5
+#define regGDC1_NGDC_EARLY_WAKEUP_CTRL 0x0f01
+#define regGDC1_NGDC_EARLY_WAKEUP_CTRL_BASE_IDX 5
+#define regGDC1_NGDC_PG_MISC_CTRL 0x0f18
+#define regGDC1_NGDC_PG_MISC_CTRL_BASE_IDX 5
+#define regGDC1_NGDC_PGMST_CTRL 0x0f19
+#define regGDC1_NGDC_PGMST_CTRL_BASE_IDX 5
+#define regGDC1_NGDC_PGSLV_CTRL 0x0f1a
+#define regGDC1_NGDC_PGSLV_CTRL_BASE_IDX 5
+
+
+// addressBlock: aid_nbio_nbif0_gdc_sec_GDCSEC_DEC
+// base address: 0x1400000
+#define regXCC_DOORBELL_FENCE 0x740c
+#define regXCC_DOORBELL_FENCE_BASE_IDX 5
+
+
+// addressBlock: aid_nbio_nbif0_gdc_rst_GDCRST_DEC
+// base address: 0x1400000
+#define regSHUB_PF_FLR_RST 0x7c00
+#define regSHUB_PF_FLR_RST_BASE_IDX 5
+#define regSHUB_GFX_DRV_VPU_RST 0x7c01
+#define regSHUB_GFX_DRV_VPU_RST_BASE_IDX 5
+#define regSHUB_LINK_RESET 0x7c02
+#define regSHUB_LINK_RESET_BASE_IDX 5
+#define regSHUB_HARD_RST_CTRL 0x7c10
+#define regSHUB_HARD_RST_CTRL_BASE_IDX 5
+#define regSHUB_SOFT_RST_CTRL 0x7c11
+#define regSHUB_SOFT_RST_CTRL_BASE_IDX 5
+#define regSHUB_SDP_PORT_RST 0x7c12
+#define regSHUB_SDP_PORT_RST_BASE_IDX 5
+#define regSHUB_RST_MISC_TRL 0x7c13
+#define regSHUB_RST_MISC_TRL_BASE_IDX 5
+
+
+// addressBlock: aid_nbio_nbif0_syshub_mmreg_syshubdirect
+// base address: 0x1400000
+#define regHST_CLK0_SW0_CL0_CNTL 0x4140
+#define regHST_CLK0_SW0_CL0_CNTL_BASE_IDX 5
+#define regHST_CLK0_SW1_CL0_CNTL 0x4160
+#define regHST_CLK0_SW1_CL0_CNTL_BASE_IDX 5
+#define regHST_CLK0_SW1_CL1_CNTL 0x4161
+#define regHST_CLK0_SW1_CL1_CNTL_BASE_IDX 5
+#define regHST_CLK0_SW1_CL2_CNTL 0x4162
+#define regHST_CLK0_SW1_CL2_CNTL_BASE_IDX 5
+#define regDMA_CLK0_SW0_CL0_CNTL 0x4240
+#define regDMA_CLK0_SW0_CL0_CNTL_BASE_IDX 5
+#define regDMA_CLK0_SW0_CL1_CNTL 0x4241
+#define regDMA_CLK0_SW0_CL1_CNTL_BASE_IDX 5
+#define regNIC400_1_ASIB_0_FN_MOD 0xc042
+#define regNIC400_1_ASIB_0_FN_MOD_BASE_IDX 5
+#define regNIC400_1_IB_0_FN_MOD 0xfc42
+#define regNIC400_1_IB_0_FN_MOD_BASE_IDX 5
+#define regNIC400_2_ASIB_0_FN_MOD 0x10c42
+#define regNIC400_2_ASIB_0_FN_MOD_BASE_IDX 5
+#define regNIC400_2_ASIB_0_QOS_CNTL 0x10c43
+#define regNIC400_2_ASIB_0_QOS_CNTL_BASE_IDX 5
+#define regNIC400_2_ASIB_0_MAX_OT 0x10c44
+#define regNIC400_2_ASIB_0_MAX_OT_BASE_IDX 5
+#define regNIC400_2_ASIB_0_MAX_COMB_OT 0x10c45
+#define regNIC400_2_ASIB_0_MAX_COMB_OT_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AW_P 0x10c46
+#define regNIC400_2_ASIB_0_AW_P_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AW_B 0x10c47
+#define regNIC400_2_ASIB_0_AW_B_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AW_R 0x10c48
+#define regNIC400_2_ASIB_0_AW_R_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AR_P 0x10c49
+#define regNIC400_2_ASIB_0_AR_P_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AR_B 0x10c4a
+#define regNIC400_2_ASIB_0_AR_B_BASE_IDX 5
+#define regNIC400_2_ASIB_0_AR_R 0x10c4b
+#define regNIC400_2_ASIB_0_AR_R_BASE_IDX 5
+#define regNIC400_2_ASIB_0_TARGET_FC 0x10c4c
+#define regNIC400_2_ASIB_0_TARGET_FC_BASE_IDX 5
+#define regNIC400_2_ASIB_0_KI_FC 0x10c4d
+#define regNIC400_2_ASIB_0_KI_FC_BASE_IDX 5
+#define regNIC400_2_ASIB_0_QOS_RANGE 0x10c4e
+#define regNIC400_2_ASIB_0_QOS_RANGE_BASE_IDX 5
+#define regNIC400_2_ASIB_1_FN_MOD 0x11042
+#define regNIC400_2_ASIB_1_FN_MOD_BASE_IDX 5
+#define regNIC400_2_ASIB_1_QOS_CNTL 0x11043
+#define regNIC400_2_ASIB_1_QOS_CNTL_BASE_IDX 5
+#define regNIC400_2_ASIB_1_MAX_OT 0x11044
+#define regNIC400_2_ASIB_1_MAX_OT_BASE_IDX 5
+#define regNIC400_2_ASIB_1_MAX_COMB_OT 0x11045
+#define regNIC400_2_ASIB_1_MAX_COMB_OT_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AW_P 0x11046
+#define regNIC400_2_ASIB_1_AW_P_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AW_B 0x11047
+#define regNIC400_2_ASIB_1_AW_B_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AW_R 0x11048
+#define regNIC400_2_ASIB_1_AW_R_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AR_P 0x11049
+#define regNIC400_2_ASIB_1_AR_P_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AR_B 0x1104a
+#define regNIC400_2_ASIB_1_AR_B_BASE_IDX 5
+#define regNIC400_2_ASIB_1_AR_R 0x1104b
+#define regNIC400_2_ASIB_1_AR_R_BASE_IDX 5
+#define regNIC400_2_ASIB_1_TARGET_FC 0x1104c
+#define regNIC400_2_ASIB_1_TARGET_FC_BASE_IDX 5
+#define regNIC400_2_ASIB_1_KI_FC 0x1104d
+#define regNIC400_2_ASIB_1_KI_FC_BASE_IDX 5
+#define regNIC400_2_ASIB_1_QOS_RANGE 0x1104e
+#define regNIC400_2_ASIB_1_QOS_RANGE_BASE_IDX 5
+#define regNIC400_2_IB_0_FN_MOD 0x13c42
+#define regNIC400_2_IB_0_FN_MOD_BASE_IDX 5
+
+
+// addressBlock: aid_nbio_iohub_nb_nbcfg_nb_cfgdec
+// base address: 0x13b00000
+#define regNB_NBCFG0_NBCFG_SCRATCH_4 0xe8001e
+#define regNB_NBCFG0_NBCFG_SCRATCH_4_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13b10000
+#define regNB_CNTL 0xe84000
+#define regNB_CNTL_BASE_IDX 8
+#define regNB_SPARE1 0xe84003
+#define regNB_SPARE1_BASE_IDX 8
+#define regNB_SPARE2 0xe84004
+#define regNB_SPARE2_BASE_IDX 8
+#define regNB_REVID 0xe84005
+#define regNB_REVID_BASE_IDX 8
+#define regNBIO_LCLK_DS_MASK 0xe84009
+#define regNBIO_LCLK_DS_MASK_BASE_IDX 8
+#define regNB_BUS_NUM_CNTL 0xe84011
+#define regNB_BUS_NUM_CNTL_BASE_IDX 8
+#define regNB_MMIOBASE 0xe84017
+#define regNB_MMIOBASE_BASE_IDX 8
+#define regNB_MMIOLIMIT 0xe84018
+#define regNB_MMIOLIMIT_BASE_IDX 8
+#define regNB_LOWER_TOP_OF_DRAM2 0xe84019
+#define regNB_LOWER_TOP_OF_DRAM2_BASE_IDX 8
+#define regNB_UPPER_TOP_OF_DRAM2 0xe8401a
+#define regNB_UPPER_TOP_OF_DRAM2_BASE_IDX 8
+#define regNB_LOWER_DRAM2_BASE 0xe8401b
+#define regNB_LOWER_DRAM2_BASE_BASE_IDX 8
+#define regNB_UPPER_DRAM2_BASE 0xe8401c
+#define regNB_UPPER_DRAM2_BASE_BASE_IDX 8
+#define regSB_LOCATION 0xe8401f
+#define regSB_LOCATION_BASE_IDX 8
+#define regSW_US_LOCATION 0xe84020
+#define regSW_US_LOCATION_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr0 0xe8402e
+#define regNB_PROG_DEVICE_REMAP_PBr0_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr1 0xe8402f
+#define regNB_PROG_DEVICE_REMAP_PBr1_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr2 0xe84030
+#define regNB_PROG_DEVICE_REMAP_PBr2_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr3 0xe84031
+#define regNB_PROG_DEVICE_REMAP_PBr3_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr4 0xe84032
+#define regNB_PROG_DEVICE_REMAP_PBr4_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr5 0xe84033
+#define regNB_PROG_DEVICE_REMAP_PBr5_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr6 0xe84034
+#define regNB_PROG_DEVICE_REMAP_PBr6_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr7 0xe84035
+#define regNB_PROG_DEVICE_REMAP_PBr7_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr8 0xe84036
+#define regNB_PROG_DEVICE_REMAP_PBr8_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr10 0xe84038
+#define regNB_PROG_DEVICE_REMAP_PBr10_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr11 0xe84039
+#define regNB_PROG_DEVICE_REMAP_PBr11_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr12 0xe8403a
+#define regNB_PROG_DEVICE_REMAP_PBr12_BASE_IDX 8
+#define regNB_PROG_DEVICE_REMAP_PBr13 0xe8403b
+#define regNB_PROG_DEVICE_REMAP_PBr13_BASE_IDX 8
+#define regSW_NMI_CNTL 0xe84042
+#define regSW_NMI_CNTL_BASE_IDX 8
+#define regSW_SMI_CNTL 0xe84043
+#define regSW_SMI_CNTL_BASE_IDX 8
+#define regSW_SCI_CNTL 0xe84044
+#define regSW_SCI_CNTL_BASE_IDX 8
+#define regAPML_SW_STATUS 0xe84045
+#define regAPML_SW_STATUS_BASE_IDX 8
+#define regSW_GIC_SPI_CNTL 0xe84047
+#define regSW_GIC_SPI_CNTL_BASE_IDX 8
+#define regSW_SYNCFLOOD_CNTL 0xe84049
+#define regSW_SYNCFLOOD_CNTL_BASE_IDX 8
+#define regNB_TOP_OF_DRAM3 0xe8404e
+#define regNB_TOP_OF_DRAM3_BASE_IDX 8
+#define regCAM_CONTROL 0xe84052
+#define regCAM_CONTROL_BASE_IDX 8
+#define regCAM_TARGET_INDEX_ADDR_BOTTOM 0xe84053
+#define regCAM_TARGET_INDEX_ADDR_BOTTOM_BASE_IDX 8
+#define regCAM_TARGET_INDEX_ADDR_TOP 0xe84054
+#define regCAM_TARGET_INDEX_ADDR_TOP_BASE_IDX 8
+#define regCAM_TARGET_INDEX_DATA 0xe84055
+#define regCAM_TARGET_INDEX_DATA_BASE_IDX 8
+#define regCAM_TARGET_INDEX_DATA_MASK 0xe84056
+#define regCAM_TARGET_INDEX_DATA_MASK_BASE_IDX 8
+#define regCAM_TARGET_DATA_ADDR_BOTTOM 0xe84057
+#define regCAM_TARGET_DATA_ADDR_BOTTOM_BASE_IDX 8
+#define regCAM_TARGET_DATA_ADDR_TOP 0xe84059
+#define regCAM_TARGET_DATA_ADDR_TOP_BASE_IDX 8
+#define regCAM_TARGET_DATA 0xe8405a
+#define regCAM_TARGET_DATA_BASE_IDX 8
+#define regCAM_TARGET_DATA_MASK 0xe8405b
+#define regCAM_TARGET_DATA_MASK_BASE_IDX 8
+#define regP_DMA_DROPPED_LOG_LOWER 0xe84060
+#define regP_DMA_DROPPED_LOG_LOWER_BASE_IDX 8
+#define regP_DMA_DROPPED_LOG_UPPER 0xe84061
+#define regP_DMA_DROPPED_LOG_UPPER_BASE_IDX 8
+#define regNP_DMA_DROPPED_LOG_LOWER 0xe84062
+#define regNP_DMA_DROPPED_LOG_LOWER_BASE_IDX 8
+#define regNP_DMA_DROPPED_LOG_UPPER 0xe84063
+#define regNP_DMA_DROPPED_LOG_UPPER_BASE_IDX 8
+#define regPCIE_VDM_NODE0_CTRL4 0xe84064
+#define regPCIE_VDM_NODE0_CTRL4_BASE_IDX 8
+#define regPCIE_VDM_CNTL2 0xe8408c
+#define regPCIE_VDM_CNTL2_BASE_IDX 8
+#define regPCIE_VDM_CNTL3 0xe8408d
+#define regPCIE_VDM_CNTL3_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT0_0 0xe84090
+#define regSTALL_CONTROL_XBARPORT0_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT0_1 0xe84091
+#define regSTALL_CONTROL_XBARPORT0_1_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT1_0 0xe84093
+#define regSTALL_CONTROL_XBARPORT1_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT1_1 0xe84094
+#define regSTALL_CONTROL_XBARPORT1_1_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT2_0 0xe84096
+#define regSTALL_CONTROL_XBARPORT2_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT2_1 0xe84097
+#define regSTALL_CONTROL_XBARPORT2_1_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT3_0 0xe84099
+#define regSTALL_CONTROL_XBARPORT3_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT3_1 0xe8409a
+#define regSTALL_CONTROL_XBARPORT3_1_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT4_0 0xe8409c
+#define regSTALL_CONTROL_XBARPORT4_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT4_1 0xe8409d
+#define regSTALL_CONTROL_XBARPORT4_1_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT5_0 0xe8409f
+#define regSTALL_CONTROL_XBARPORT5_0_BASE_IDX 8
+#define regSTALL_CONTROL_XBARPORT5_1 0xe840a0
+#define regSTALL_CONTROL_XBARPORT5_1_BASE_IDX 8
+#define regNB_DRAM3_BASE 0xe840b1
+#define regNB_DRAM3_BASE_BASE_IDX 8
+#define regPSP_BASE_ADDR_LO 0xe840b8
+#define regPSP_BASE_ADDR_LO_BASE_IDX 8
+#define regPSP_BASE_ADDR_HI 0xe840b9
+#define regPSP_BASE_ADDR_HI_BASE_IDX 8
+#define regSMU_BASE_ADDR_LO 0xe840ba
+#define regSMU_BASE_ADDR_LO_BASE_IDX 8
+#define regSMU_BASE_ADDR_HI 0xe840bb
+#define regSMU_BASE_ADDR_HI_BASE_IDX 8
+#define regSCRATCH_4 0xe840fc
+#define regSCRATCH_4_BASE_IDX 8
+#define regSCRATCH_5 0xe840fd
+#define regSCRATCH_5_BASE_IDX 8
+#define regSMU_BLOCK_CPU 0xe840fe
+#define regSMU_BLOCK_CPU_BASE_IDX 8
+#define regSMU_BLOCK_CPU_STATUS 0xe840ff
+#define regSMU_BLOCK_CPU_STATUS_BASE_IDX 8
+#define regTRAP_STATUS 0xe84100
+#define regTRAP_STATUS_BASE_IDX 8
+#define regTRAP_REQUEST0 0xe84101
+#define regTRAP_REQUEST0_BASE_IDX 8
+#define regTRAP_REQUEST1 0xe84102
+#define regTRAP_REQUEST1_BASE_IDX 8
+#define regTRAP_REQUEST2 0xe84103
+#define regTRAP_REQUEST2_BASE_IDX 8
+#define regTRAP_REQUEST3 0xe84104
+#define regTRAP_REQUEST3_BASE_IDX 8
+#define regTRAP_REQUEST4 0xe84105
+#define regTRAP_REQUEST4_BASE_IDX 8
+#define regTRAP_REQUEST5 0xe84106
+#define regTRAP_REQUEST5_BASE_IDX 8
+#define regTRAP_REQUEST_DATASTRB0 0xe84108
+#define regTRAP_REQUEST_DATASTRB0_BASE_IDX 8
+#define regTRAP_REQUEST_DATASTRB1 0xe84109
+#define regTRAP_REQUEST_DATASTRB1_BASE_IDX 8
+#define regTRAP_REQUEST_DATA0 0xe84110
+#define regTRAP_REQUEST_DATA0_BASE_IDX 8
+#define regTRAP_REQUEST_DATA1 0xe84111
+#define regTRAP_REQUEST_DATA1_BASE_IDX 8
+#define regTRAP_REQUEST_DATA2 0xe84112
+#define regTRAP_REQUEST_DATA2_BASE_IDX 8
+#define regTRAP_REQUEST_DATA3 0xe84113
+#define regTRAP_REQUEST_DATA3_BASE_IDX 8
+#define regTRAP_REQUEST_DATA4 0xe84114
+#define regTRAP_REQUEST_DATA4_BASE_IDX 8
+#define regTRAP_REQUEST_DATA5 0xe84115
+#define regTRAP_REQUEST_DATA5_BASE_IDX 8
+#define regTRAP_REQUEST_DATA6 0xe84116
+#define regTRAP_REQUEST_DATA6_BASE_IDX 8
+#define regTRAP_REQUEST_DATA7 0xe84117
+#define regTRAP_REQUEST_DATA7_BASE_IDX 8
+#define regTRAP_REQUEST_DATA8 0xe84118
+#define regTRAP_REQUEST_DATA8_BASE_IDX 8
+#define regTRAP_REQUEST_DATA9 0xe84119
+#define regTRAP_REQUEST_DATA9_BASE_IDX 8
+#define regTRAP_REQUEST_DATA10 0xe8411a
+#define regTRAP_REQUEST_DATA10_BASE_IDX 8
+#define regTRAP_REQUEST_DATA11 0xe8411b
+#define regTRAP_REQUEST_DATA11_BASE_IDX 8
+#define regTRAP_REQUEST_DATA12 0xe8411c
+#define regTRAP_REQUEST_DATA12_BASE_IDX 8
+#define regTRAP_REQUEST_DATA13 0xe8411d
+#define regTRAP_REQUEST_DATA13_BASE_IDX 8
+#define regTRAP_REQUEST_DATA14 0xe8411e
+#define regTRAP_REQUEST_DATA14_BASE_IDX 8
+#define regTRAP_REQUEST_DATA15 0xe8411f
+#define regTRAP_REQUEST_DATA15_BASE_IDX 8
+#define regTRAP_RESPONSE_CONTROL 0xe84130
+#define regTRAP_RESPONSE_CONTROL_BASE_IDX 8
+#define regTRAP_RESPONSE0 0xe84131
+#define regTRAP_RESPONSE0_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA0 0xe84140
+#define regTRAP_RESPONSE_DATA0_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA1 0xe84141
+#define regTRAP_RESPONSE_DATA1_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA2 0xe84142
+#define regTRAP_RESPONSE_DATA2_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA3 0xe84143
+#define regTRAP_RESPONSE_DATA3_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA4 0xe84144
+#define regTRAP_RESPONSE_DATA4_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA5 0xe84145
+#define regTRAP_RESPONSE_DATA5_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA6 0xe84146
+#define regTRAP_RESPONSE_DATA6_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA7 0xe84147
+#define regTRAP_RESPONSE_DATA7_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA8 0xe84148
+#define regTRAP_RESPONSE_DATA8_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA9 0xe84149
+#define regTRAP_RESPONSE_DATA9_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA10 0xe8414a
+#define regTRAP_RESPONSE_DATA10_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA11 0xe8414b
+#define regTRAP_RESPONSE_DATA11_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA12 0xe8414c
+#define regTRAP_RESPONSE_DATA12_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA13 0xe8414d
+#define regTRAP_RESPONSE_DATA13_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA14 0xe8414e
+#define regTRAP_RESPONSE_DATA14_BASE_IDX 8
+#define regTRAP_RESPONSE_DATA15 0xe8414f
+#define regTRAP_RESPONSE_DATA15_BASE_IDX 8
+#define regTRAP0_CONTROL0 0xe84200
+#define regTRAP0_CONTROL0_BASE_IDX 8
+#define regTRAP0_ADDRESS_LO 0xe84202
+#define regTRAP0_ADDRESS_LO_BASE_IDX 8
+#define regTRAP0_ADDRESS_HI 0xe84203
+#define regTRAP0_ADDRESS_HI_BASE_IDX 8
+#define regTRAP0_COMMAND 0xe84204
+#define regTRAP0_COMMAND_BASE_IDX 8
+#define regTRAP0_ADDRESS_LO_MASK 0xe84206
+#define regTRAP0_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP0_ADDRESS_HI_MASK 0xe84207
+#define regTRAP0_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP0_COMMAND_MASK 0xe84208
+#define regTRAP0_COMMAND_MASK_BASE_IDX 8
+#define regTRAP1_CONTROL0 0xe84210
+#define regTRAP1_CONTROL0_BASE_IDX 8
+#define regTRAP1_ADDRESS_LO 0xe84212
+#define regTRAP1_ADDRESS_LO_BASE_IDX 8
+#define regTRAP1_ADDRESS_HI 0xe84213
+#define regTRAP1_ADDRESS_HI_BASE_IDX 8
+#define regTRAP1_COMMAND 0xe84214
+#define regTRAP1_COMMAND_BASE_IDX 8
+#define regTRAP1_ADDRESS_LO_MASK 0xe84216
+#define regTRAP1_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP1_ADDRESS_HI_MASK 0xe84217
+#define regTRAP1_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP1_COMMAND_MASK 0xe84218
+#define regTRAP1_COMMAND_MASK_BASE_IDX 8
+#define regTRAP2_CONTROL0 0xe84220
+#define regTRAP2_CONTROL0_BASE_IDX 8
+#define regTRAP2_ADDRESS_LO 0xe84222
+#define regTRAP2_ADDRESS_LO_BASE_IDX 8
+#define regTRAP2_ADDRESS_HI 0xe84223
+#define regTRAP2_ADDRESS_HI_BASE_IDX 8
+#define regTRAP2_COMMAND 0xe84224
+#define regTRAP2_COMMAND_BASE_IDX 8
+#define regTRAP2_ADDRESS_LO_MASK 0xe84226
+#define regTRAP2_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP2_ADDRESS_HI_MASK 0xe84227
+#define regTRAP2_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP2_COMMAND_MASK 0xe84228
+#define regTRAP2_COMMAND_MASK_BASE_IDX 8
+#define regTRAP3_CONTROL0 0xe84230
+#define regTRAP3_CONTROL0_BASE_IDX 8
+#define regTRAP3_ADDRESS_LO 0xe84232
+#define regTRAP3_ADDRESS_LO_BASE_IDX 8
+#define regTRAP3_ADDRESS_HI 0xe84233
+#define regTRAP3_ADDRESS_HI_BASE_IDX 8
+#define regTRAP3_COMMAND 0xe84234
+#define regTRAP3_COMMAND_BASE_IDX 8
+#define regTRAP3_ADDRESS_LO_MASK 0xe84236
+#define regTRAP3_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP3_ADDRESS_HI_MASK 0xe84237
+#define regTRAP3_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP3_COMMAND_MASK 0xe84238
+#define regTRAP3_COMMAND_MASK_BASE_IDX 8
+#define regTRAP4_CONTROL0 0xe84240
+#define regTRAP4_CONTROL0_BASE_IDX 8
+#define regTRAP4_ADDRESS_LO 0xe84242
+#define regTRAP4_ADDRESS_LO_BASE_IDX 8
+#define regTRAP4_ADDRESS_HI 0xe84243
+#define regTRAP4_ADDRESS_HI_BASE_IDX 8
+#define regTRAP4_COMMAND 0xe84244
+#define regTRAP4_COMMAND_BASE_IDX 8
+#define regTRAP4_ADDRESS_LO_MASK 0xe84246
+#define regTRAP4_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP4_ADDRESS_HI_MASK 0xe84247
+#define regTRAP4_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP4_COMMAND_MASK 0xe84248
+#define regTRAP4_COMMAND_MASK_BASE_IDX 8
+#define regTRAP5_CONTROL0 0xe84250
+#define regTRAP5_CONTROL0_BASE_IDX 8
+#define regTRAP5_ADDRESS_LO 0xe84252
+#define regTRAP5_ADDRESS_LO_BASE_IDX 8
+#define regTRAP5_ADDRESS_HI 0xe84253
+#define regTRAP5_ADDRESS_HI_BASE_IDX 8
+#define regTRAP5_COMMAND 0xe84254
+#define regTRAP5_COMMAND_BASE_IDX 8
+#define regTRAP5_ADDRESS_LO_MASK 0xe84256
+#define regTRAP5_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP5_ADDRESS_HI_MASK 0xe84257
+#define regTRAP5_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP5_COMMAND_MASK 0xe84258
+#define regTRAP5_COMMAND_MASK_BASE_IDX 8
+#define regTRAP6_CONTROL0 0xe84260
+#define regTRAP6_CONTROL0_BASE_IDX 8
+#define regTRAP6_ADDRESS_LO 0xe84262
+#define regTRAP6_ADDRESS_LO_BASE_IDX 8
+#define regTRAP6_ADDRESS_HI 0xe84263
+#define regTRAP6_ADDRESS_HI_BASE_IDX 8
+#define regTRAP6_COMMAND 0xe84264
+#define regTRAP6_COMMAND_BASE_IDX 8
+#define regTRAP6_ADDRESS_LO_MASK 0xe84266
+#define regTRAP6_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP6_ADDRESS_HI_MASK 0xe84267
+#define regTRAP6_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP6_COMMAND_MASK 0xe84268
+#define regTRAP6_COMMAND_MASK_BASE_IDX 8
+#define regTRAP7_CONTROL0 0xe84270
+#define regTRAP7_CONTROL0_BASE_IDX 8
+#define regTRAP7_ADDRESS_LO 0xe84272
+#define regTRAP7_ADDRESS_LO_BASE_IDX 8
+#define regTRAP7_ADDRESS_HI 0xe84273
+#define regTRAP7_ADDRESS_HI_BASE_IDX 8
+#define regTRAP7_COMMAND 0xe84274
+#define regTRAP7_COMMAND_BASE_IDX 8
+#define regTRAP7_ADDRESS_LO_MASK 0xe84276
+#define regTRAP7_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP7_ADDRESS_HI_MASK 0xe84277
+#define regTRAP7_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP7_COMMAND_MASK 0xe84278
+#define regTRAP7_COMMAND_MASK_BASE_IDX 8
+#define regTRAP8_CONTROL0 0xe84280
+#define regTRAP8_CONTROL0_BASE_IDX 8
+#define regTRAP8_ADDRESS_LO 0xe84282
+#define regTRAP8_ADDRESS_LO_BASE_IDX 8
+#define regTRAP8_ADDRESS_HI 0xe84283
+#define regTRAP8_ADDRESS_HI_BASE_IDX 8
+#define regTRAP8_COMMAND 0xe84284
+#define regTRAP8_COMMAND_BASE_IDX 8
+#define regTRAP8_ADDRESS_LO_MASK 0xe84286
+#define regTRAP8_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP8_ADDRESS_HI_MASK 0xe84287
+#define regTRAP8_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP8_COMMAND_MASK 0xe84288
+#define regTRAP8_COMMAND_MASK_BASE_IDX 8
+#define regTRAP9_CONTROL0 0xe84290
+#define regTRAP9_CONTROL0_BASE_IDX 8
+#define regTRAP9_ADDRESS_LO 0xe84292
+#define regTRAP9_ADDRESS_LO_BASE_IDX 8
+#define regTRAP9_ADDRESS_HI 0xe84293
+#define regTRAP9_ADDRESS_HI_BASE_IDX 8
+#define regTRAP9_COMMAND 0xe84294
+#define regTRAP9_COMMAND_BASE_IDX 8
+#define regTRAP9_ADDRESS_LO_MASK 0xe84296
+#define regTRAP9_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP9_ADDRESS_HI_MASK 0xe84297
+#define regTRAP9_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP9_COMMAND_MASK 0xe84298
+#define regTRAP9_COMMAND_MASK_BASE_IDX 8
+#define regTRAP10_CONTROL0 0xe842a0
+#define regTRAP10_CONTROL0_BASE_IDX 8
+#define regTRAP10_ADDRESS_LO 0xe842a2
+#define regTRAP10_ADDRESS_LO_BASE_IDX 8
+#define regTRAP10_ADDRESS_HI 0xe842a3
+#define regTRAP10_ADDRESS_HI_BASE_IDX 8
+#define regTRAP10_COMMAND 0xe842a4
+#define regTRAP10_COMMAND_BASE_IDX 8
+#define regTRAP10_ADDRESS_LO_MASK 0xe842a6
+#define regTRAP10_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP10_ADDRESS_HI_MASK 0xe842a7
+#define regTRAP10_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP10_COMMAND_MASK 0xe842a8
+#define regTRAP10_COMMAND_MASK_BASE_IDX 8
+#define regTRAP11_CONTROL0 0xe842b0
+#define regTRAP11_CONTROL0_BASE_IDX 8
+#define regTRAP11_ADDRESS_LO 0xe842b2
+#define regTRAP11_ADDRESS_LO_BASE_IDX 8
+#define regTRAP11_ADDRESS_HI 0xe842b3
+#define regTRAP11_ADDRESS_HI_BASE_IDX 8
+#define regTRAP11_COMMAND 0xe842b4
+#define regTRAP11_COMMAND_BASE_IDX 8
+#define regTRAP11_ADDRESS_LO_MASK 0xe842b6
+#define regTRAP11_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP11_ADDRESS_HI_MASK 0xe842b7
+#define regTRAP11_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP11_COMMAND_MASK 0xe842b8
+#define regTRAP11_COMMAND_MASK_BASE_IDX 8
+#define regTRAP12_CONTROL0 0xe842c0
+#define regTRAP12_CONTROL0_BASE_IDX 8
+#define regTRAP12_ADDRESS_LO 0xe842c2
+#define regTRAP12_ADDRESS_LO_BASE_IDX 8
+#define regTRAP12_ADDRESS_HI 0xe842c3
+#define regTRAP12_ADDRESS_HI_BASE_IDX 8
+#define regTRAP12_COMMAND 0xe842c4
+#define regTRAP12_COMMAND_BASE_IDX 8
+#define regTRAP12_ADDRESS_LO_MASK 0xe842c6
+#define regTRAP12_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP12_ADDRESS_HI_MASK 0xe842c7
+#define regTRAP12_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP12_COMMAND_MASK 0xe842c8
+#define regTRAP12_COMMAND_MASK_BASE_IDX 8
+#define regTRAP13_CONTROL0 0xe842d0
+#define regTRAP13_CONTROL0_BASE_IDX 8
+#define regTRAP13_ADDRESS_LO 0xe842d2
+#define regTRAP13_ADDRESS_LO_BASE_IDX 8
+#define regTRAP13_ADDRESS_HI 0xe842d3
+#define regTRAP13_ADDRESS_HI_BASE_IDX 8
+#define regTRAP13_COMMAND 0xe842d4
+#define regTRAP13_COMMAND_BASE_IDX 8
+#define regTRAP13_ADDRESS_LO_MASK 0xe842d6
+#define regTRAP13_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP13_ADDRESS_HI_MASK 0xe842d7
+#define regTRAP13_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP13_COMMAND_MASK 0xe842d8
+#define regTRAP13_COMMAND_MASK_BASE_IDX 8
+#define regTRAP14_CONTROL0 0xe842e0
+#define regTRAP14_CONTROL0_BASE_IDX 8
+#define regTRAP14_ADDRESS_LO 0xe842e2
+#define regTRAP14_ADDRESS_LO_BASE_IDX 8
+#define regTRAP14_ADDRESS_HI 0xe842e3
+#define regTRAP14_ADDRESS_HI_BASE_IDX 8
+#define regTRAP14_COMMAND 0xe842e4
+#define regTRAP14_COMMAND_BASE_IDX 8
+#define regTRAP14_ADDRESS_LO_MASK 0xe842e6
+#define regTRAP14_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP14_ADDRESS_HI_MASK 0xe842e7
+#define regTRAP14_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP14_COMMAND_MASK 0xe842e8
+#define regTRAP14_COMMAND_MASK_BASE_IDX 8
+#define regTRAP15_CONTROL0 0xe842f0
+#define regTRAP15_CONTROL0_BASE_IDX 8
+#define regTRAP15_ADDRESS_LO 0xe842f2
+#define regTRAP15_ADDRESS_LO_BASE_IDX 8
+#define regTRAP15_ADDRESS_HI 0xe842f3
+#define regTRAP15_ADDRESS_HI_BASE_IDX 8
+#define regTRAP15_COMMAND 0xe842f4
+#define regTRAP15_COMMAND_BASE_IDX 8
+#define regTRAP15_ADDRESS_LO_MASK 0xe842f6
+#define regTRAP15_ADDRESS_LO_MASK_BASE_IDX 8
+#define regTRAP15_ADDRESS_HI_MASK 0xe842f7
+#define regTRAP15_ADDRESS_HI_MASK_BASE_IDX 8
+#define regTRAP15_COMMAND_MASK 0xe842f8
+#define regTRAP15_COMMAND_MASK_BASE_IDX 8
+#define regSB_COMMAND 0xe85000
+#define regSB_COMMAND_BASE_IDX 8
+#define regSB_SUB_BUS_NUMBER_LATENCY 0xe85001
+#define regSB_SUB_BUS_NUMBER_LATENCY_BASE_IDX 8
+#define regSB_IO_BASE_LIMIT 0xe85002
+#define regSB_IO_BASE_LIMIT_BASE_IDX 8
+#define regSB_MEM_BASE_LIMIT 0xe85003
+#define regSB_MEM_BASE_LIMIT_BASE_IDX 8
+#define regSB_PREF_BASE_LIMIT 0xe85004
+#define regSB_PREF_BASE_LIMIT_BASE_IDX 8
+#define regSB_PREF_BASE_UPPER 0xe85005
+#define regSB_PREF_BASE_UPPER_BASE_IDX 8
+#define regSB_PREF_LIMIT_UPPER 0xe85006
+#define regSB_PREF_LIMIT_UPPER_BASE_IDX 8
+#define regSB_IO_BASE_LIMIT_HI 0xe85007
+#define regSB_IO_BASE_LIMIT_HI_BASE_IDX 8
+#define regSB_IRQ_BRIDGE_CNTL 0xe85008
+#define regSB_IRQ_BRIDGE_CNTL_BASE_IDX 8
+#define regSB_EXT_BRIDGE_CNTL 0xe85009
+#define regSB_EXT_BRIDGE_CNTL_BASE_IDX 8
+#define regSB_PMI_STATUS_CNTL 0xe8500a
+#define regSB_PMI_STATUS_CNTL_BASE_IDX 8
+#define regSB_SLOT_CAP 0xe8500b
+#define regSB_SLOT_CAP_BASE_IDX 8
+#define regSB_ROOT_CNTL 0xe8500c
+#define regSB_ROOT_CNTL_BASE_IDX 8
+#define regSB_DEVICE_CNTL2 0xe8500d
+#define regSB_DEVICE_CNTL2_BASE_IDX 8
+#define regMCA_SMN_INT_REQ_ADDR 0xe85020
+#define regMCA_SMN_INT_REQ_ADDR_BASE_IDX 8
+#define regMCA_SMN_INT_MCM_ADDR 0xe85021
+#define regMCA_SMN_INT_MCM_ADDR_BASE_IDX 8
+#define regMCA_SMN_INT_APERTUREID 0xe85022
+#define regMCA_SMN_INT_APERTUREID_BASE_IDX 8
+#define regMCA_SMN_INT_CONTROL 0xe85023
+#define regMCA_SMN_INT_CONTROL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13b20000
+#define regPARITY_CONTROL_0 0xe88000
+#define regPARITY_CONTROL_0_BASE_IDX 8
+#define regPARITY_CONTROL_1 0xe88001
+#define regPARITY_CONTROL_1_BASE_IDX 8
+#define regPARITY_SEVERITY_CONTROL_UNCORR_0 0xe88002
+#define regPARITY_SEVERITY_CONTROL_UNCORR_0_BASE_IDX 8
+#define regPARITY_SEVERITY_CONTROL_CORR_0 0xe88004
+#define regPARITY_SEVERITY_CONTROL_CORR_0_BASE_IDX 8
+#define regPARITY_SEVERITY_CONTROL_UCP_0 0xe88006
+#define regPARITY_SEVERITY_CONTROL_UCP_0_BASE_IDX 8
+#define regRAS_GLOBAL_STATUS_LO 0xe88008
+#define regRAS_GLOBAL_STATUS_LO_BASE_IDX 8
+#define regRAS_GLOBAL_STATUS_HI 0xe88009
+#define regRAS_GLOBAL_STATUS_HI_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP0 0xe8800a
+#define regPARITY_ERROR_STATUS_UNCORR_GRP0_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP1 0xe8800b
+#define regPARITY_ERROR_STATUS_UNCORR_GRP1_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP2 0xe8800c
+#define regPARITY_ERROR_STATUS_UNCORR_GRP2_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP3 0xe8800d
+#define regPARITY_ERROR_STATUS_UNCORR_GRP3_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP4 0xe8800e
+#define regPARITY_ERROR_STATUS_UNCORR_GRP4_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP5 0xe8800f
+#define regPARITY_ERROR_STATUS_UNCORR_GRP5_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP6 0xe88010
+#define regPARITY_ERROR_STATUS_UNCORR_GRP6_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP7 0xe88011
+#define regPARITY_ERROR_STATUS_UNCORR_GRP7_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP10 0xe88014
+#define regPARITY_ERROR_STATUS_UNCORR_GRP10_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP11 0xe88015
+#define regPARITY_ERROR_STATUS_UNCORR_GRP11_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP12 0xe88016
+#define regPARITY_ERROR_STATUS_UNCORR_GRP12_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP13 0xe88017
+#define regPARITY_ERROR_STATUS_UNCORR_GRP13_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP14 0xe88018
+#define regPARITY_ERROR_STATUS_UNCORR_GRP14_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP15 0xe88019
+#define regPARITY_ERROR_STATUS_UNCORR_GRP15_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UNCORR_GRP16 0xe8801a
+#define regPARITY_ERROR_STATUS_UNCORR_GRP16_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP0 0xe8801b
+#define regPARITY_ERROR_STATUS_CORR_GRP0_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP1 0xe8801c
+#define regPARITY_ERROR_STATUS_CORR_GRP1_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP2 0xe8801d
+#define regPARITY_ERROR_STATUS_CORR_GRP2_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP3 0xe8801e
+#define regPARITY_ERROR_STATUS_CORR_GRP3_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP4 0xe8801f
+#define regPARITY_ERROR_STATUS_CORR_GRP4_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP5 0xe88020
+#define regPARITY_ERROR_STATUS_CORR_GRP5_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP6 0xe88021
+#define regPARITY_ERROR_STATUS_CORR_GRP6_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP7 0xe88022
+#define regPARITY_ERROR_STATUS_CORR_GRP7_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP10 0xe88025
+#define regPARITY_ERROR_STATUS_CORR_GRP10_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP11 0xe88026
+#define regPARITY_ERROR_STATUS_CORR_GRP11_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP12 0xe88027
+#define regPARITY_ERROR_STATUS_CORR_GRP12_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP13 0xe88028
+#define regPARITY_ERROR_STATUS_CORR_GRP13_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP14 0xe88029
+#define regPARITY_ERROR_STATUS_CORR_GRP14_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP15 0xe8802a
+#define regPARITY_ERROR_STATUS_CORR_GRP15_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP16 0xe8802b
+#define regPARITY_ERROR_STATUS_CORR_GRP16_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_CORR_GRP17 0xe8802c
+#define regPARITY_ERROR_STATUS_CORR_GRP17_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP0 0xe8802d
+#define regPARITY_COUNTER_CORR_GRP0_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP1 0xe8802e
+#define regPARITY_COUNTER_CORR_GRP1_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP2 0xe8802f
+#define regPARITY_COUNTER_CORR_GRP2_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP3 0xe88030
+#define regPARITY_COUNTER_CORR_GRP3_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP4 0xe88031
+#define regPARITY_COUNTER_CORR_GRP4_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP5 0xe88032
+#define regPARITY_COUNTER_CORR_GRP5_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP6 0xe88033
+#define regPARITY_COUNTER_CORR_GRP6_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP7 0xe88034
+#define regPARITY_COUNTER_CORR_GRP7_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP10 0xe88037
+#define regPARITY_COUNTER_CORR_GRP10_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP11 0xe88038
+#define regPARITY_COUNTER_CORR_GRP11_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP12 0xe88039
+#define regPARITY_COUNTER_CORR_GRP12_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP13 0xe8803a
+#define regPARITY_COUNTER_CORR_GRP13_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP14 0xe8803b
+#define regPARITY_COUNTER_CORR_GRP14_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP15 0xe8803c
+#define regPARITY_COUNTER_CORR_GRP15_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP16 0xe8803d
+#define regPARITY_COUNTER_CORR_GRP16_BASE_IDX 8
+#define regPARITY_COUNTER_CORR_GRP17 0xe8803e
+#define regPARITY_COUNTER_CORR_GRP17_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP0 0xe8803f
+#define regPARITY_ERROR_STATUS_UCP_GRP0_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP1 0xe88040
+#define regPARITY_ERROR_STATUS_UCP_GRP1_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP2 0xe88041
+#define regPARITY_ERROR_STATUS_UCP_GRP2_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP3 0xe88042
+#define regPARITY_ERROR_STATUS_UCP_GRP3_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP4 0xe88043
+#define regPARITY_ERROR_STATUS_UCP_GRP4_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP5 0xe88044
+#define regPARITY_ERROR_STATUS_UCP_GRP5_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP6 0xe88045
+#define regPARITY_ERROR_STATUS_UCP_GRP6_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP7 0xe88046
+#define regPARITY_ERROR_STATUS_UCP_GRP7_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP10 0xe88049
+#define regPARITY_ERROR_STATUS_UCP_GRP10_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP11 0xe8804a
+#define regPARITY_ERROR_STATUS_UCP_GRP11_BASE_IDX 8
+#define regPARITY_ERROR_STATUS_UCP_GRP12 0xe8804b
+#define regPARITY_ERROR_STATUS_UCP_GRP12_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP0 0xe8804c
+#define regPARITY_COUNTER_UCP_GRP0_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP1 0xe8804d
+#define regPARITY_COUNTER_UCP_GRP1_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP2 0xe8804e
+#define regPARITY_COUNTER_UCP_GRP2_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP3 0xe8804f
+#define regPARITY_COUNTER_UCP_GRP3_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP4 0xe88050
+#define regPARITY_COUNTER_UCP_GRP4_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP5 0xe88051
+#define regPARITY_COUNTER_UCP_GRP5_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP6 0xe88052
+#define regPARITY_COUNTER_UCP_GRP6_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP7 0xe88053
+#define regPARITY_COUNTER_UCP_GRP7_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP10 0xe88056
+#define regPARITY_COUNTER_UCP_GRP10_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP11 0xe88057
+#define regPARITY_COUNTER_UCP_GRP11_BASE_IDX 8
+#define regPARITY_COUNTER_UCP_GRP12 0xe88058
+#define regPARITY_COUNTER_UCP_GRP12_BASE_IDX 8
+#define regMISC_SEVERITY_CONTROL 0xe88059
+#define regMISC_SEVERITY_CONTROL_BASE_IDX 8
+#define regMISC_RAS_CONTROL 0xe8805a
+#define regMISC_RAS_CONTROL_BASE_IDX 8
+#define regRAS_SCRATCH_0 0xe8805b
+#define regRAS_SCRATCH_0_BASE_IDX 8
+#define regRAS_SCRATCH_1 0xe8805c
+#define regRAS_SCRATCH_1_BASE_IDX 8
+#define regErrEvent_ACTION_CONTROL 0xe8805d
+#define regErrEvent_ACTION_CONTROL_BASE_IDX 8
+#define regParitySerr_ACTION_CONTROL 0xe8805e
+#define regParitySerr_ACTION_CONTROL_BASE_IDX 8
+#define regParityFatal_ACTION_CONTROL 0xe8805f
+#define regParityFatal_ACTION_CONTROL_BASE_IDX 8
+#define regParityNonFatal_ACTION_CONTROL 0xe88060
+#define regParityNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regParityCorr_ACTION_CONTROL 0xe88061
+#define regParityCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortASerr_ACTION_CONTROL 0xe88062
+#define regPCIE0PortASerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAIntFatal_ACTION_CONTROL 0xe88063
+#define regPCIE0PortAIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAIntNonFatal_ACTION_CONTROL 0xe88064
+#define regPCIE0PortAIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAIntCorr_ACTION_CONTROL 0xe88065
+#define regPCIE0PortAIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAExtFatal_ACTION_CONTROL 0xe88066
+#define regPCIE0PortAExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAExtNonFatal_ACTION_CONTROL 0xe88067
+#define regPCIE0PortAExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAExtCorr_ACTION_CONTROL 0xe88068
+#define regPCIE0PortAExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortAParityErr_ACTION_CONTROL 0xe88069
+#define regPCIE0PortAParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBSerr_ACTION_CONTROL 0xe8806a
+#define regPCIE0PortBSerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBIntFatal_ACTION_CONTROL 0xe8806b
+#define regPCIE0PortBIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBIntNonFatal_ACTION_CONTROL 0xe8806c
+#define regPCIE0PortBIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBIntCorr_ACTION_CONTROL 0xe8806d
+#define regPCIE0PortBIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBExtFatal_ACTION_CONTROL 0xe8806e
+#define regPCIE0PortBExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBExtNonFatal_ACTION_CONTROL 0xe8806f
+#define regPCIE0PortBExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBExtCorr_ACTION_CONTROL 0xe88070
+#define regPCIE0PortBExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortBParityErr_ACTION_CONTROL 0xe88071
+#define regPCIE0PortBParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCSerr_ACTION_CONTROL 0xe88072
+#define regPCIE0PortCSerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCIntFatal_ACTION_CONTROL 0xe88073
+#define regPCIE0PortCIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCIntNonFatal_ACTION_CONTROL 0xe88074
+#define regPCIE0PortCIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCIntCorr_ACTION_CONTROL 0xe88075
+#define regPCIE0PortCIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCExtFatal_ACTION_CONTROL 0xe88076
+#define regPCIE0PortCExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCExtNonFatal_ACTION_CONTROL 0xe88077
+#define regPCIE0PortCExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCExtCorr_ACTION_CONTROL 0xe88078
+#define regPCIE0PortCExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortCParityErr_ACTION_CONTROL 0xe88079
+#define regPCIE0PortCParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDSerr_ACTION_CONTROL 0xe8807a
+#define regPCIE0PortDSerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDIntFatal_ACTION_CONTROL 0xe8807b
+#define regPCIE0PortDIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDIntNonFatal_ACTION_CONTROL 0xe8807c
+#define regPCIE0PortDIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDIntCorr_ACTION_CONTROL 0xe8807d
+#define regPCIE0PortDIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDExtFatal_ACTION_CONTROL 0xe8807e
+#define regPCIE0PortDExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDExtNonFatal_ACTION_CONTROL 0xe8807f
+#define regPCIE0PortDExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDExtCorr_ACTION_CONTROL 0xe88080
+#define regPCIE0PortDExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortDParityErr_ACTION_CONTROL 0xe88081
+#define regPCIE0PortDParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortESerr_ACTION_CONTROL 0xe88082
+#define regPCIE0PortESerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEIntFatal_ACTION_CONTROL 0xe88083
+#define regPCIE0PortEIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEIntNonFatal_ACTION_CONTROL 0xe88084
+#define regPCIE0PortEIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEIntCorr_ACTION_CONTROL 0xe88085
+#define regPCIE0PortEIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEExtFatal_ACTION_CONTROL 0xe88086
+#define regPCIE0PortEExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEExtNonFatal_ACTION_CONTROL 0xe88087
+#define regPCIE0PortEExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEExtCorr_ACTION_CONTROL 0xe88088
+#define regPCIE0PortEExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortEParityErr_ACTION_CONTROL 0xe88089
+#define regPCIE0PortEParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFSerr_ACTION_CONTROL 0xe8808a
+#define regPCIE0PortFSerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFIntFatal_ACTION_CONTROL 0xe8808b
+#define regPCIE0PortFIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFIntNonFatal_ACTION_CONTROL 0xe8808c
+#define regPCIE0PortFIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFIntCorr_ACTION_CONTROL 0xe8808d
+#define regPCIE0PortFIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFExtFatal_ACTION_CONTROL 0xe8808e
+#define regPCIE0PortFExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFExtNonFatal_ACTION_CONTROL 0xe8808f
+#define regPCIE0PortFExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFExtCorr_ACTION_CONTROL 0xe88090
+#define regPCIE0PortFExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortFParityErr_ACTION_CONTROL 0xe88091
+#define regPCIE0PortFParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGSerr_ACTION_CONTROL 0xe88092
+#define regPCIE0PortGSerr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGIntFatal_ACTION_CONTROL 0xe88093
+#define regPCIE0PortGIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGIntNonFatal_ACTION_CONTROL 0xe88094
+#define regPCIE0PortGIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGIntCorr_ACTION_CONTROL 0xe88095
+#define regPCIE0PortGIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGExtFatal_ACTION_CONTROL 0xe88096
+#define regPCIE0PortGExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGExtNonFatal_ACTION_CONTROL 0xe88097
+#define regPCIE0PortGExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGExtCorr_ACTION_CONTROL 0xe88098
+#define regPCIE0PortGExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regPCIE0PortGParityErr_ACTION_CONTROL 0xe88099
+#define regPCIE0PortGParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortASerr_ACTION_CONTROL 0xe880ca
+#define regNBIF1PortASerr_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAIntFatal_ACTION_CONTROL 0xe880cb
+#define regNBIF1PortAIntFatal_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAIntNonFatal_ACTION_CONTROL 0xe880cc
+#define regNBIF1PortAIntNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAIntCorr_ACTION_CONTROL 0xe880cd
+#define regNBIF1PortAIntCorr_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAExtFatal_ACTION_CONTROL 0xe880ce
+#define regNBIF1PortAExtFatal_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAExtNonFatal_ACTION_CONTROL 0xe880cf
+#define regNBIF1PortAExtNonFatal_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAExtCorr_ACTION_CONTROL 0xe880d0
+#define regNBIF1PortAExtCorr_ACTION_CONTROL_BASE_IDX 8
+#define regNBIF1PortAParityErr_ACTION_CONTROL 0xe880d1
+#define regNBIF1PortAParityErr_ACTION_CONTROL_BASE_IDX 8
+#define regSYNCFLOOD_STATUS 0xe88200
+#define regSYNCFLOOD_STATUS_BASE_IDX 8
+#define regNMI_STATUS 0xe88201
+#define regNMI_STATUS_BASE_IDX 8
+#define regPOISON_ACTION_CONTROL 0xe88205
+#define regPOISON_ACTION_CONTROL_BASE_IDX 8
+#define regINTERNAL_POISON_STATUS 0xe88206
+#define regINTERNAL_POISON_STATUS_BASE_IDX 8
+#define regINTERNAL_POISON_MASK 0xe88207
+#define regINTERNAL_POISON_MASK_BASE_IDX 8
+#define regEGRESS_POISON_STATUS_LO 0xe88208
+#define regEGRESS_POISON_STATUS_LO_BASE_IDX 8
+#define regEGRESS_POISON_STATUS_HI 0xe88209
+#define regEGRESS_POISON_STATUS_HI_BASE_IDX 8
+#define regEGRESS_POISON_MASK_LO 0xe8820a
+#define regEGRESS_POISON_MASK_LO_BASE_IDX 8
+#define regEGRESS_POISON_MASK_HI 0xe8820b
+#define regEGRESS_POISON_MASK_HI_BASE_IDX 8
+#define regEGRESS_POISON_SEVERITY_DOWN 0xe8820c
+#define regEGRESS_POISON_SEVERITY_DOWN_BASE_IDX 8
+#define regEGRESS_POISON_SEVERITY_UPPER 0xe8820d
+#define regEGRESS_POISON_SEVERITY_UPPER_BASE_IDX 8
+#define regAPML_STATUS 0xe88370
+#define regAPML_STATUS_BASE_IDX 8
+#define regAPML_CONTROL 0xe88371
+#define regAPML_CONTROL_BASE_IDX 8
+#define regAPML_TRIGGER 0xe88372
+#define regAPML_TRIGGER_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg0_devind_cfgdecp
+// base address: 0x13b31000
+#define regNB_PCIE0DEVINDCFG0_STEERING_CNTL 0xe8c403
+#define regNB_PCIE0DEVINDCFG0_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg1_devind_cfgdecp
+// base address: 0x13b31400
+#define regNB_PCIE0DEVINDCFG1_STEERING_CNTL 0xe8c503
+#define regNB_PCIE0DEVINDCFG1_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg2_devind_cfgdecp
+// base address: 0x13b31800
+#define regNB_PCIE0DEVINDCFG2_STEERING_CNTL 0xe8c603
+#define regNB_PCIE0DEVINDCFG2_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg3_devind_cfgdecp
+// base address: 0x13b31c00
+#define regNB_PCIE0DEVINDCFG3_STEERING_CNTL 0xe8c703
+#define regNB_PCIE0DEVINDCFG3_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg4_devind_cfgdecp
+// base address: 0x13b32000
+#define regNB_PCIE0DEVINDCFG4_STEERING_CNTL 0xe8c803
+#define regNB_PCIE0DEVINDCFG4_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg5_devind_cfgdecp
+// base address: 0x13b32400
+#define regNB_PCIE0DEVINDCFG5_STEERING_CNTL 0xe8c903
+#define regNB_PCIE0DEVINDCFG5_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg6_devind_cfgdecp
+// base address: 0x13b32800
+#define regNB_PCIE0DEVINDCFG6_STEERING_CNTL 0xe8ca03
+#define regNB_PCIE0DEVINDCFG6_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_NBIF1devindcfg0_devind_cfgdecp
+// base address: 0x13b38000
+#define regNB_NBIF1DEVINDCFG0_STEERING_CNTL 0xe8e003
+#define regNB_NBIF1DEVINDCFG0_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_intSBdevindcfg0_devind_cfgdecp
+// base address: 0x13b3c000
+#define regNB_INTSBDEVINDCFG0_STEERING_CNTL 0xe8f003
+#define regNB_INTSBDEVINDCFG0_STEERING_CNTL_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg0_pciercbdgind_cfgdec
+// base address: 0x13b7d600
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION 0xe9f5b7
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX 0xe9f5b8
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_DATA 0xe9f5b9
+#define regNB_PCIE0RCBDG_INDCFG0_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg1_pciercbdgind_cfgdec
+// base address: 0x13b7d700
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_EXTENSION 0xe9f5f7
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX 0xe9f5f8
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_DATA 0xe9f5f9
+#define regNB_PCIE0RCBDG_INDCFG1_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg2_pciercbdgind_cfgdec
+// base address: 0x13b7d800
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_EXTENSION 0xe9f637
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX 0xe9f638
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_DATA 0xe9f639
+#define regNB_PCIE0RCBDG_INDCFG2_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg3_pciercbdgind_cfgdec
+// base address: 0x13b7d900
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_EXTENSION 0xe9f677
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX 0xe9f678
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_DATA 0xe9f679
+#define regNB_PCIE0RCBDG_INDCFG3_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg4_pciercbdgind_cfgdec
+// base address: 0x13b7da00
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_EXTENSION 0xe9f6b7
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX 0xe9f6b8
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_DATA 0xe9f6b9
+#define regNB_PCIE0RCBDG_INDCFG4_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg5_pciercbdgind_cfgdec
+// base address: 0x13b7db00
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_EXTENSION 0xe9f6f7
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX 0xe9f6f8
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_DATA 0xe9f6f9
+#define regNB_PCIE0RCBDG_INDCFG5_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg6_pciercbdgind_cfgdec
+// base address: 0x13b7dc00
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_EXTENSION 0xe9f737
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX 0xe9f738
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_DATA 0xe9f739
+#define regNB_PCIE0RCBDG_INDCFG6_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_NBIF1rcbdg_indcfg0_pciercbdgind_cfgdec
+// base address: 0x13b7f200
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION 0xe9fcb7
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION_BASE_IDX 8
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX 0xe9fcb8
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_BASE_IDX 8
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_DATA 0xe9fcb9
+#define regNB_NBIF1RCBDG_INDCFG0_RC_SMN_DATA_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_iommu_l2a_l2acfg
+// base address: 0x15700000
+#define regL2_PERF_CNTL_0 0x1580000
+#define regL2_PERF_CNTL_0_BASE_IDX 8
+#define regL2_PERF_COUNT_0 0x1580001
+#define regL2_PERF_COUNT_0_BASE_IDX 8
+#define regL2_PERF_COUNT_1 0x1580002
+#define regL2_PERF_COUNT_1_BASE_IDX 8
+#define regL2_PERF_CNTL_1 0x1580003
+#define regL2_PERF_CNTL_1_BASE_IDX 8
+#define regL2_PERF_COUNT_2 0x1580004
+#define regL2_PERF_COUNT_2_BASE_IDX 8
+#define regL2_PERF_COUNT_3 0x1580005
+#define regL2_PERF_COUNT_3_BASE_IDX 8
+#define regL2_STATUS_0 0x1580008
+#define regL2_STATUS_0_BASE_IDX 8
+#define regL2_CONTROL_0 0x158000c
+#define regL2_CONTROL_0_BASE_IDX 8
+#define regL2_CONTROL_1 0x158000d
+#define regL2_CONTROL_1_BASE_IDX 8
+#define regL2_DTC_CONTROL 0x1580010
+#define regL2_DTC_CONTROL_BASE_IDX 8
+#define regL2_DTC_HASH_CONTROL 0x1580011
+#define regL2_DTC_HASH_CONTROL_BASE_IDX 8
+#define regL2_DTC_WAY_CONTROL 0x1580012
+#define regL2_DTC_WAY_CONTROL_BASE_IDX 8
+#define regL2_ITC_CONTROL 0x1580014
+#define regL2_ITC_CONTROL_BASE_IDX 8
+#define regL2_ITC_HASH_CONTROL 0x1580015
+#define regL2_ITC_HASH_CONTROL_BASE_IDX 8
+#define regL2_ITC_WAY_CONTROL 0x1580016
+#define regL2_ITC_WAY_CONTROL_BASE_IDX 8
+#define regL2_PTC_A_CONTROL 0x1580018
+#define regL2_PTC_A_CONTROL_BASE_IDX 8
+#define regL2_PTC_A_HASH_CONTROL 0x1580019
+#define regL2_PTC_A_HASH_CONTROL_BASE_IDX 8
+#define regL2_PTC_A_WAY_CONTROL 0x158001a
+#define regL2_PTC_A_WAY_CONTROL_BASE_IDX 8
+#define regL2A_UPDATE_FILTER_CNTL 0x1580022
+#define regL2A_UPDATE_FILTER_CNTL_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_3 0x1580030
+#define regL2_ERR_RULE_CONTROL_3_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_4 0x1580031
+#define regL2_ERR_RULE_CONTROL_4_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_5 0x1580032
+#define regL2_ERR_RULE_CONTROL_5_BASE_IDX 8
+#define regL2_L2A_CK_GATE_CONTROL 0x1580033
+#define regL2_L2A_CK_GATE_CONTROL_BASE_IDX 8
+#define regL2_L2A_PGSIZE_CONTROL 0x1580034
+#define regL2_L2A_PGSIZE_CONTROL_BASE_IDX 8
+#define regL2_PWRGATE_CNTRL_REG_0 0x158003e
+#define regL2_PWRGATE_CNTRL_REG_0_BASE_IDX 8
+#define regL2_PWRGATE_CNTRL_REG_3 0x1580041
+#define regL2_PWRGATE_CNTRL_REG_3_BASE_IDX 8
+#define regL2_ECO_CNTRL_0 0x1580042
+#define regL2_ECO_CNTRL_0_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_iommu_l2indx_l2indxcfg
+// base address: 0x13f01000
+#define regL2_STATUS_1 0xf80448
+#define regL2_STATUS_1_BASE_IDX 8
+#define regL2_SB_LOCATION 0xf8044b
+#define regL2_SB_LOCATION_BASE_IDX 8
+#define regL2_CONTROL_5 0xf8044c
+#define regL2_CONTROL_5_BASE_IDX 8
+#define regL2_CONTROL_6 0xf8044f
+#define regL2_CONTROL_6_BASE_IDX 8
+#define regL2_PDC_CONTROL 0xf80450
+#define regL2_PDC_CONTROL_BASE_IDX 8
+#define regL2_PDC_HASH_CONTROL 0xf80451
+#define regL2_PDC_HASH_CONTROL_BASE_IDX 8
+#define regL2_PDC_WAY_CONTROL 0xf80452
+#define regL2_PDC_WAY_CONTROL_BASE_IDX 8
+#define regL2B_UPDATE_FILTER_CNTL 0xf80453
+#define regL2B_UPDATE_FILTER_CNTL_BASE_IDX 8
+#define regL2_TW_CONTROL 0xf80454
+#define regL2_TW_CONTROL_BASE_IDX 8
+#define regL2_CP_CONTROL 0xf80456
+#define regL2_CP_CONTROL_BASE_IDX 8
+#define regL2_CP_CONTROL_1 0xf80457
+#define regL2_CP_CONTROL_1_BASE_IDX 8
+#define regL2_TW_CONTROL_1 0xf8045a
+#define regL2_TW_CONTROL_1_BASE_IDX 8
+#define regL2_TW_CONTROL_2 0xf80461
+#define regL2_TW_CONTROL_2_BASE_IDX 8
+#define regL2_TW_CONTROL_3 0xf80462
+#define regL2_TW_CONTROL_3_BASE_IDX 8
+#define regL2_CREDIT_CONTROL_0 0xf80470
+#define regL2_CREDIT_CONTROL_0_BASE_IDX 8
+#define regL2_CREDIT_CONTROL_1 0xf80471
+#define regL2_CREDIT_CONTROL_1_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_0 0xf80480
+#define regL2_ERR_RULE_CONTROL_0_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_1 0xf80481
+#define regL2_ERR_RULE_CONTROL_1_BASE_IDX 8
+#define regL2_ERR_RULE_CONTROL_2 0xf80482
+#define regL2_ERR_RULE_CONTROL_2_BASE_IDX 8
+#define regL2_L2B_CK_GATE_CONTROL 0xf80490
+#define regL2_L2B_CK_GATE_CONTROL_BASE_IDX 8
+#define regPPR_CONTROL 0xf80492
+#define regPPR_CONTROL_BASE_IDX 8
+#define regL2_L2B_PGSIZE_CONTROL 0xf80494
+#define regL2_L2B_PGSIZE_CONTROL_BASE_IDX 8
+#define regL2_PERF_CNTL_2 0xf80499
+#define regL2_PERF_CNTL_2_BASE_IDX 8
+#define regL2_PERF_COUNT_4 0xf8049a
+#define regL2_PERF_COUNT_4_BASE_IDX 8
+#define regL2_PERF_COUNT_5 0xf8049b
+#define regL2_PERF_COUNT_5_BASE_IDX 8
+#define regL2_PERF_CNTL_3 0xf8049c
+#define regL2_PERF_CNTL_3_BASE_IDX 8
+#define regL2_PERF_COUNT_6 0xf8049d
+#define regL2_PERF_COUNT_6_BASE_IDX 8
+#define regL2_PERF_COUNT_7 0xf8049e
+#define regL2_PERF_COUNT_7_BASE_IDX 8
+#define regL2B_SDP_PARITY_ERROR_EN 0xf804a2
+#define regL2B_SDP_PARITY_ERROR_EN_BASE_IDX 8
+#define regL2_ECO_CNTRL_1 0xf804a3
+#define regL2_ECO_CNTRL_1_BASE_IDX 8
+#define regL2_CP_CONTROL_2 0xf804bf
+#define regL2_CP_CONTROL_2_BASE_IDX 8
+#define regL2_CP_CONTROL_3 0xf804c0
+#define regL2_CP_CONTROL_3_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_iohub_nb_ioapiccfg_ioapic_cfgdec
+// base address: 0x14300000
+#define regFEATURES_ENABLE 0x1080000
+#define regFEATURES_ENABLE_BASE_IDX 8
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_RC_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_RC_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_RC_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_RC_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_RC_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_RC_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_RC_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_RC_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_RC_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_RC_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_RC_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_RC_BIST 0x000f
+#define cfgBIF_CFG_DEV0_RC_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_RC_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY 0x0018
+#define cfgBIF_CFG_DEV0_RC_IO_BASE_LIMIT 0x001c
+#define cfgBIF_CFG_DEV0_RC_SECONDARY_STATUS 0x001e
+#define cfgBIF_CFG_DEV0_RC_MEM_BASE_LIMIT 0x0020
+#define cfgBIF_CFG_DEV0_RC_PREF_BASE_LIMIT 0x0024
+#define cfgBIF_CFG_DEV0_RC_PREF_BASE_UPPER 0x0028
+#define cfgBIF_CFG_DEV0_RC_PREF_LIMIT_UPPER 0x002c
+#define cfgBIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI 0x0030
+#define cfgBIF_CFG_DEV0_RC_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_RC_ROM_BASE_ADDR 0x0038
+#define cfgBIF_CFG_DEV0_RC_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_RC_INTERRUPT_PIN 0x003d
+#define cfgIRQ_BRIDGE_CNTL 0x003e
+#define cfgBIF_CFG_DEV0_RC_EXT_BRIDGE_CNTL 0x0040
+#define cfgBIF_CFG_DEV0_RC_PMI_CAP_LIST 0x0050
+#define cfgBIF_CFG_DEV0_RC_PMI_CAP 0x0052
+#define cfgBIF_CFG_DEV0_RC_PMI_STATUS_CNTL 0x0054
+#define cfgBIF_CFG_DEV0_RC_PCIE_CAP_LIST 0x0058
+#define cfgBIF_CFG_DEV0_RC_PCIE_CAP 0x005a
+#define cfgBIF_CFG_DEV0_RC_DEVICE_CAP 0x005c
+#define cfgBIF_CFG_DEV0_RC_DEVICE_CNTL 0x0060
+#define cfgBIF_CFG_DEV0_RC_DEVICE_STATUS 0x0062
+#define cfgBIF_CFG_DEV0_RC_LINK_CAP 0x0064
+#define cfgBIF_CFG_DEV0_RC_LINK_CNTL 0x0068
+#define cfgBIF_CFG_DEV0_RC_LINK_STATUS 0x006a
+#define cfgBIF_CFG_DEV0_RC_SLOT_CAP 0x006c
+#define cfgBIF_CFG_DEV0_RC_SLOT_CNTL 0x0070
+#define cfgBIF_CFG_DEV0_RC_SLOT_STATUS 0x0072
+#define cfgBIF_CFG_DEV0_RC_ROOT_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_RC_ROOT_CAP 0x0076
+#define cfgBIF_CFG_DEV0_RC_ROOT_STATUS 0x0078
+#define cfgBIF_CFG_DEV0_RC_DEVICE_CAP2 0x007c
+#define cfgBIF_CFG_DEV0_RC_DEVICE_CNTL2 0x0080
+#define cfgBIF_CFG_DEV0_RC_DEVICE_STATUS2 0x0082
+#define cfgBIF_CFG_DEV0_RC_LINK_CAP2 0x0084
+#define cfgBIF_CFG_DEV0_RC_LINK_CNTL2 0x0088
+#define cfgBIF_CFG_DEV0_RC_LINK_STATUS2 0x008a
+#define cfgBIF_CFG_DEV0_RC_SLOT_CAP2 0x008c
+#define cfgBIF_CFG_DEV0_RC_SLOT_CNTL2 0x0090
+#define cfgBIF_CFG_DEV0_RC_SLOT_STATUS2 0x0092
+#define cfgBIF_CFG_DEV0_RC_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_RC_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_RC_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_RC_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_RC_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_RC_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_RC_SSID_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_RC_SSID_CAP 0x00c4
+#define cfgBIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST 0x0110
+#define cfgBIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1 0x0114
+#define cfgBIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2 0x0118
+#define cfgBIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL 0x011c
+#define cfgBIF_CFG_DEV0_RC_PCIE_PORT_VC_STATUS 0x011e
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP 0x0120
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL 0x0124
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS 0x012a
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP 0x012c
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL 0x0130
+#define cfgBIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS 0x0136
+#define cfgBIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
+#define cfgBIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW1 0x0144
+#define cfgBIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW2 0x0148
+#define cfgBIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_RC_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_RC_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_RC_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_RC_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD 0x017c
+#define cfgBIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS 0x0180
+#define cfgBIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID 0x0184
+#define cfgBIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
+#define cfgBIF_CFG_DEV0_RC_PCIE_LINK_CNTL3 0x0274
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_ERROR_STATUS 0x0278
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
+#define cfgBIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
+#define cfgBIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST 0x02a0
+#define cfgBIF_CFG_DEV0_RC_PCIE_ACS_CAP 0x02a4
+#define cfgBIF_CFG_DEV0_RC_PCIE_ACS_CNTL 0x02a6
+#define cfgBIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST 0x0400
+#define cfgBIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP 0x0404
+#define cfgBIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS 0x0408
+#define cfgBIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST 0x0410
+#define cfgBIF_CFG_DEV0_RC_LINK_CAP_16GT 0x0414
+#define cfgBIF_CFG_DEV0_RC_LINK_CNTL_16GT 0x0418
+#define cfgBIF_CFG_DEV0_RC_LINK_STATUS_16GT 0x041c
+#define cfgBIF_CFG_DEV0_RC_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
+#define cfgBIF_CFG_DEV0_RC_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
+#define cfgBIF_CFG_DEV0_RC_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
+#define cfgBIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
+#define cfgBIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
+#define cfgBIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
+#define cfgBIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
+#define cfgBIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
+#define cfgBIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
+#define cfgBIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
+#define cfgBIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
+#define cfgBIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
+#define cfgBIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
+#define cfgBIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
+#define cfgBIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
+#define cfgBIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
+#define cfgBIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
+#define cfgBIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
+#define cfgBIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
+#define cfgBIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST 0x0450
+#define cfgBIF_CFG_DEV0_RC_MARGINING_PORT_CAP 0x0454
+#define cfgBIF_CFG_DEV0_RC_MARGINING_PORT_STATUS 0x0456
+#define cfgBIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL 0x0458
+#define cfgBIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS 0x045a
+#define cfgBIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL 0x045c
+#define cfgBIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS 0x045e
+#define cfgBIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL 0x0460
+#define cfgBIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS 0x0462
+#define cfgBIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL 0x0464
+#define cfgBIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS 0x0466
+#define cfgBIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL 0x0468
+#define cfgBIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS 0x046a
+#define cfgBIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL 0x046c
+#define cfgBIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS 0x046e
+#define cfgBIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL 0x0470
+#define cfgBIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS 0x0472
+#define cfgBIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL 0x0474
+#define cfgBIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS 0x0476
+#define cfgBIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL 0x0478
+#define cfgBIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS 0x047a
+#define cfgBIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL 0x047c
+#define cfgBIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS 0x047e
+#define cfgBIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL 0x0480
+#define cfgBIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS 0x0482
+#define cfgBIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL 0x0484
+#define cfgBIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS 0x0486
+#define cfgBIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL 0x0488
+#define cfgBIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS 0x048a
+#define cfgBIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL 0x048c
+#define cfgBIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS 0x048e
+#define cfgBIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL 0x0490
+#define cfgBIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS 0x0492
+#define cfgBIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL 0x0494
+#define cfgBIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS 0x0496
+#define cfgBIF_CFG_DEV0_RC_LINK_CAP_32GT 0x0504
+#define cfgBIF_CFG_DEV0_RC_LINK_CNTL_32GT 0x0508
+#define cfgBIF_CFG_DEV0_RC_LINK_STATUS_32GT 0x050c
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF0_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF0_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF0_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF0_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF0_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF1_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF1_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF1_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF1_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF1_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF2_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF2_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF2_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF2_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF2_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF3_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF3_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF3_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF3_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF3_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF4_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF4_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF4_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF4_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF4_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF5_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF5_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF5_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF5_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF5_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF6_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF6_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF6_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF6_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF6_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF7_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF7_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF7_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF7_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF7_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST 0x02b0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP 0x02b4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL 0x02b6
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF0_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF0_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF0_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF1_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF1_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF1_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF2_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF2_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF2_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF3_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF3_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF3_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF4_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF4_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF4_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF5_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF5_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF5_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF6_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF6_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF6_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_PBA_BASE_IDX 4
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
+#define regBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF7_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF7_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF7_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL_BASE_IDX 4
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_PBA_BASE_IDX 4
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
new file mode 100644
index 000000000000..a22481e7bcdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
@@ -0,0 +1,38900 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _nbio_7_9_0_SH_MASK_HEADER
+#define _nbio_7_9_0_SH_MASK_HEADER
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_SYSDEC
+//BIF_BX0_PCIE_INDEX
+#define BIF_BX0_PCIE_INDEX__PCIE_INDEX__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_DATA
+#define BIF_BX0_PCIE_DATA__PCIE_DATA__SHIFT 0x0
+#define BIF_BX0_PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_INDEX2
+#define BIF_BX0_PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_DATA2
+#define BIF_BX0_PCIE_DATA2__PCIE_DATA2__SHIFT 0x0
+#define BIF_BX0_PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_INDEX_HI
+#define BIF_BX0_PCIE_INDEX_HI__PCIE_INDEX_HI__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX_HI__PCIE_INDEX_HI_MASK 0x000000FFL
+//BIF_BX0_PCIE_INDEX2_HI
+#define BIF_BX0_PCIE_INDEX2_HI__PCIE_INDEX2_HI__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX2_HI__PCIE_INDEX2_HI_MASK 0x000000FFL
+//BIF_BX0_SBIOS_SCRATCH_0
+#define BIF_BX0_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_1
+#define BIF_BX0_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_2
+#define BIF_BX0_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_3
+#define BIF_BX0_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_0
+#define BIF_BX0_BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_1
+#define BIF_BX0_BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_2
+#define BIF_BX0_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_3
+#define BIF_BX0_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_4
+#define BIF_BX0_BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_5
+#define BIF_BX0_BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_6
+#define BIF_BX0_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_7
+#define BIF_BX0_BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_8
+#define BIF_BX0_BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_9
+#define BIF_BX0_BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_10
+#define BIF_BX0_BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_11
+#define BIF_BX0_BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_12
+#define BIF_BX0_BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_13
+#define BIF_BX0_BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_14
+#define BIF_BX0_BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_15
+#define BIF_BX0_BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_RLC_INTR_CNTL
+//BIF_BX0_BIF_VCE_INTR_CNTL
+//BIF_BX0_BIF_UVD_INTR_CNTL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR1
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR2
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR3
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR4
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR5
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR6
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR7
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_CNTL
+#define BIF_BX0_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL
+//BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_0
+#define BIF_BX0_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_1
+#define BIF_BX0_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_2
+#define BIF_BX0_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_3
+#define BIF_BX0_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_4
+#define BIF_BX0_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_5
+#define BIF_BX0_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_6
+#define BIF_BX0_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_7
+#define BIF_BX0_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_8
+#define BIF_BX0_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_9
+#define BIF_BX0_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_10
+#define BIF_BX0_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_11
+#define BIF_BX0_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_12
+#define BIF_BX0_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_13
+#define BIF_BX0_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_14
+#define BIF_BX0_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_15
+#define BIF_BX0_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_0
+#define BIF_BX0_FW_SCRATCH_0__FW_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_0__FW_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_1
+#define BIF_BX0_FW_SCRATCH_1__FW_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_1__FW_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_2
+#define BIF_BX0_FW_SCRATCH_2__FW_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_2__FW_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_3
+#define BIF_BX0_FW_SCRATCH_3__FW_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_3__FW_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_4
+#define BIF_BX0_FW_SCRATCH_4__FW_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_4__FW_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_5
+#define BIF_BX0_FW_SCRATCH_5__FW_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_5__FW_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_6
+#define BIF_BX0_FW_SCRATCH_6__FW_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_6__FW_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_7
+#define BIF_BX0_FW_SCRATCH_7__FW_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_7__FW_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_8
+#define BIF_BX0_FW_SCRATCH_8__FW_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_8__FW_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_9
+#define BIF_BX0_FW_SCRATCH_9__FW_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_9__FW_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_10
+#define BIF_BX0_FW_SCRATCH_10__FW_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_10__FW_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_11
+#define BIF_BX0_FW_SCRATCH_11__FW_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_11__FW_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_12
+#define BIF_BX0_FW_SCRATCH_12__FW_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_12__FW_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_13
+#define BIF_BX0_FW_SCRATCH_13__FW_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_13__FW_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_14
+#define BIF_BX0_FW_SCRATCH_14__FW_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_14__FW_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_15
+#define BIF_BX0_FW_SCRATCH_15__FW_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_15__FW_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_4
+#define BIF_BX0_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_5
+#define BIF_BX0_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_6
+#define BIF_BX0_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_7
+#define BIF_BX0_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_8
+#define BIF_BX0_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_9
+#define BIF_BX0_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_10
+#define BIF_BX0_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_11
+#define BIF_BX0_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_12
+#define BIF_BX0_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_13
+#define BIF_BX0_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_14
+#define BIF_BX0_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_15
+#define BIF_BX0_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_BIFDEC1
+//RCC_DWN_DEV0_0_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_0_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_0_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
+//RCC_DWNP_DEV0_0_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_0_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_0_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_BIFDEC1
+//RCC_EP_DEV0_0_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_0_EP_PCIE_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_0_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_0_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_0_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_0_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_SYSPFVFDEC
+//BIF_BX_PF0_MM_INDEX
+#define BIF_BX_PF0_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_PF0_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_PF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_PF0_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_PF0_MM_DATA
+#define BIF_BX_PF0_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MM_INDEX_HI
+#define BIF_BX_PF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_PF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_INDEX
+#define BIF_BX_PF0_RSMU_INDEX__RSMU_INDEX__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_INDEX__RSMU_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_DATA
+#define BIF_BX_PF0_RSMU_DATA__RSMU_DATA__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_DATA__RSMU_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_INDEX_HI
+#define BIF_BX_PF0_RSMU_INDEX_HI__RSMU_INDEX_HI__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_INDEX_HI__RSMU_INDEX_HI_MASK 0x000000FFL
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_BIFDEC1
+//BIF_BX0_CC_BIF_BX_STRAP0
+#define BIF_BX0_CC_BIF_BX_STRAP0__STRAP_RESERVED__SHIFT 0x19
+#define BIF_BX0_CC_BIF_BX_STRAP0__STRAP_RESERVED_MASK 0xFE000000L
+//BIF_BX0_CC_BIF_BX_PINSTRAP0
+//BIF_BX0_BIF_MM_INDACCESS_CNTL
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__WRITE_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__WRITE_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L
+//BIF_BX0_BUS_CNTL
+#define BIF_BX0_BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6
+#define BIF_BX0_BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7
+#define BIF_BX0_BUS_CNTL__SET_AZ_TC__SHIFT 0xa
+#define BIF_BX0_BUS_CNTL__SET_MC_TC__SHIFT 0xd
+#define BIF_BX0_BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10
+#define BIF_BX0_BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11
+#define BIF_BX0_BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS__SHIFT 0x18
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a
+#define BIF_BX0_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS__SHIFT 0x1b
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS__SHIFT 0x1c
+#define BIF_BX0_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f
+#define BIF_BX0_BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BIF_BX0_BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BIF_BX0_BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L
+#define BIF_BX0_BUS_CNTL__SET_MC_TC_MASK 0x0000E000L
+#define BIF_BX0_BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BIF_BX0_BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BIF_BX0_BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS_MASK 0x01000000L
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L
+#define BIF_BX0_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS_MASK 0x08000000L
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS_MASK 0x10000000L
+#define BIF_BX0_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L
+//BIF_BX0_BIF_SCRATCH0
+#define BIF_BX0_BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0
+#define BIF_BX0_BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_SCRATCH1
+#define BIF_BX0_BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0
+#define BIF_BX0_BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL
+//BIF_BX0_BX_RESET_EN
+#define BIF_BX0_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10
+#define BIF_BX0_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L
+//BIF_BX0_MM_CFGREGS_CNTL
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L
+//BIF_BX0_BX_RESET_CNTL
+#define BIF_BX0_BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0
+#define BIF_BX0_BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L
+//BIF_BX0_INTERRUPT_CNTL
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1
+#define BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3
+#define BIF_BX0_INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4
+#define BIF_BX0_INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf
+#define BIF_BX0_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10
+#define BIF_BX0_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN__SHIFT 0x12
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define BIF_BX0_INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L
+#define BIF_BX0_INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L
+#define BIF_BX0_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L
+#define BIF_BX0_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN_MASK 0x00040000L
+//BIF_BX0_INTERRUPT_CNTL2
+#define BIF_BX0_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0
+#define BIF_BX0_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL
+//BIF_BX0_CLKREQB_PAD_CNTL
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L
+//BIF_BX0_BIF_FEATURES_CONTROL_MISC
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE__SHIFT 0xb
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS__SHIFT 0xe
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT__SHIFT 0x10
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x19
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE_MASK 0x00000800L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS_MASK 0x00004000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT_MASK 0x01FF0000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x02000000L
+//BIF_BX0_HDP_ATOMIC_CONTROL_MISC
+#define BIF_BX0_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT__SHIFT 0x0
+#define BIF_BX0_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT_MASK 0x000000FFL
+//BIF_BX0_BIF_DOORBELL_CNTL
+#define BIF_BX0_BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3
+#define BIF_BX0_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b
+#define BIF_BX0_BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L
+#define BIF_BX0_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L
+//BIF_BX0_BIF_DOORBELL_INT_CNTL
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x17
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1c
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1d
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1e
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x1f
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x00800000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x10000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x20000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x40000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x80000000L
+//BIF_BX0_BIF_FB_EN
+#define BIF_BX0_BIF_FB_EN__FB_READ_EN__SHIFT 0x0
+#define BIF_BX0_BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
+#define BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+//BIF_BX0_BIF_INTR_CNTL
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+//BIF_BX0_BIF_MST_TRANS_PENDING_VF
+#define BIF_BX0_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX0_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX0_BIF_SLV_TRANS_PENDING_VF
+#define BIF_BX0_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX0_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX0_BACO_CNTL
+#define BIF_BX0_BACO_CNTL__BACO_EN__SHIFT 0x0
+#define BIF_BX0_BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2
+#define BIF_BX0_BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3
+#define BIF_BX0_BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5
+#define BIF_BX0_BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6
+#define BIF_BX0_BACO_CNTL__BACO_MODE__SHIFT 0x8
+#define BIF_BX0_BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9
+#define BIF_BX0_BACO_CNTL__PWRGOOD_VDDSOC__SHIFT 0x10
+#define BIF_BX0_BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f
+#define BIF_BX0_BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L
+#define BIF_BX0_BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BIF_BX0_BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L
+#define BIF_BX0_BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L
+#define BIF_BX0_BACO_CNTL__BACO_MODE_MASK 0x00000100L
+#define BIF_BX0_BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L
+#define BIF_BX0_BACO_CNTL__PWRGOOD_VDDSOC_MASK 0x00010000L
+#define BIF_BX0_BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L
+//BIF_BX0_BIF_BACO_EXIT_TIME0
+#define BIF_BX0_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER1
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L
+//BIF_BX0_BIF_BACO_EXIT_TIMER2
+#define BIF_BX0_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER3
+#define BIF_BX0_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER4
+#define BIF_BX0_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_MEM_TYPE_CNTL
+#define BIF_BX0_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0
+#define BIF_BX0_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L
+//BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE__SHIFT 0x1
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__LUT_BC_MODE__SHIFT 0x8
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE_MASK 0x00000001L
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE_MASK 0x00000002L
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_CNTL__LUT_BC_MODE_MASK 0x00000100L
+//BIF_BX0_NBIF_GFX_ADDR_LUT_0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_0__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_0__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_1
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_1__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_1__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_2
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_2__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_2__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_3
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_3__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_3__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_4
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_4__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_4__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_5
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_5__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_5__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_6
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_6__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_6__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_7
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_7__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_7__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_8
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_8__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_8__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_9
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_9__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_9__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_10
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_10__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_10__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_11
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_11__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_11__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_12
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_12__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_12__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_13
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_13__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_13__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_14
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_14__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_14__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_NBIF_GFX_ADDR_LUT_15
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_15__ADDR__SHIFT 0x0
+#define BIF_BX0_NBIF_GFX_ADDR_LUT_15__ADDR_MASK 0x00FFFFFFL
+//BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL
+#define BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL
+#define BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX0_BIF_RB_CNTL
+#define BIF_BX0_BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define BIF_BX0_BIF_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9
+#define BIF_BX0_BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11
+#define BIF_BX0_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL__SHIFT 0x19
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d
+#define BIF_BX0_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e
+#define BIF_BX0_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define BIF_BX0_BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define BIF_BX0_BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L
+#define BIF_BX0_BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L
+#define BIF_BX0_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL_MASK 0x02000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L
+#define BIF_BX0_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//BIF_BX0_BIF_RB_BASE
+#define BIF_BX0_BIF_RB_BASE__ADDR__SHIFT 0x0
+#define BIF_BX0_BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_RB_RPTR
+#define BIF_BX0_BIF_RB_RPTR__OFFSET__SHIFT 0x2
+#define BIF_BX0_BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX0_BIF_RB_WPTR
+#define BIF_BX0_BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0
+#define BIF_BX0_BIF_RB_WPTR__OFFSET__SHIFT 0x2
+#define BIF_BX0_BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L
+#define BIF_BX0_BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX0_BIF_RB_WPTR_ADDR_HI
+#define BIF_BX0_BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define BIF_BX0_BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL
+//BIF_BX0_BIF_RB_WPTR_ADDR_LO
+#define BIF_BX0_BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define BIF_BX0_BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//BIF_BX0_MAILBOX_INDEX
+#define BIF_BX0_MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0
+#define BIF_BX0_MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL
+//BIF_BX0_BIF_MP1_INTR_CTRL
+#define BIF_BX0_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0
+#define BIF_BX0_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L
+//BIF_BX0_BIF_PERSTB_PAD_CNTL
+#define BIF_BX0_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL
+//BIF_BX0_BIF_PX_EN_PAD_CNTL
+#define BIF_BX0_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x00000FFFL
+//BIF_BX0_BIF_REFPADKIN_PAD_CNTL
+#define BIF_BX0_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX0_BIF_CLKREQB_PAD_CNTL
+#define BIF_BX0_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x7FFFFFFFL
+//BIF_BX0_BIF_PWRBRK_PAD_CNTL
+#define BIF_BX0_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_BIFDEC1
+//RCC_DEV0_0_RCC_ERR_INT_CNTL
+#define RCC_DEV0_0_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_BACO_CNTL_MISC
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L
+//RCC_DEV0_0_RCC_RESET_EN
+#define RCC_DEV0_0_RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf
+#define RCC_DEV0_0_RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L
+//RCC_DEV0_0_RCC_VDM_SUPPORT
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+//RCC_DEV0_0_RCC_GPUIOV_REGION
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//RCC_DEV0_0_RCC_GPU_HOSTVM_EN
+#define RCC_DEV0_0_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN__SHIFT 0x1
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN_MASK 0x00000002L
+//RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE_MASK 0xFFFFL
+//RCC_DEV0_0_RCC_PEER_REG_RANGE0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_PEER_REG_RANGE1
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_BUS_CNTL
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_0_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_0_RCC_CONFIG_CNTL
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+//RCC_DEV0_0_RCC_CONFIG_F0_BASE
+#define RCC_DEV0_0_RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL
+//RCC_DEV0_0_RCC_CONFIG_APER_SIZE
+#define RCC_DEV0_0_RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE
+#define RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x07FFFFFFL
+//RCC_DEV0_0_RCC_XDMA_LO
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_XDMA_HI
+#define RCC_DEV0_0_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0
+#define RCC_DEV0_0_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL
+//RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_0_RCC_BUSNUM_CNTL1
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL
+//RCC_DEV0_0_RCC_BUSNUM_LIST0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID0__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID1__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID2__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID3__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_BUSNUM_LIST1
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID4__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID5__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID6__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID7__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_BUSNUM_CNTL2
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+//RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM
+#define RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_HOST_BUSNUM
+#define RCC_DEV0_0_RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0
+#define RCC_DEV0_0_RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL
+//RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x18
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x18
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_0_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_0_RCC_MH_ARB_CNTL
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_BIFDEC2
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_3__SHIFT 0x3
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_3_MASK 0x00000008L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_BIFDEC1
+//RCC_STRAP0_RCC_BIF_STRAP0
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000038L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP1
+#define RCC_STRAP0_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP2
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP0_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP3
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_BIF_STRAP4
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_BIF_STRAP5
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+//RCC_STRAP0_RCC_BIF_STRAP6
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0_MASK 0x3E000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP26
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0xa
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00000400L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1_MASK 0x00800000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP20
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP21
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP22
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP23
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP24
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP25
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1_MASK 0x00000001L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP7
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
+//BIF_BX_PF0_BIF_BME_STATUS
+#define BIF_BX_PF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_PF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_PF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_PF0_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_GPU_HDP_FLUSH_REQ
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF0_GPU_HDP_FLUSH_DONE
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF0_BIF_TRANS_PENDING
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_PF0_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_PF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_PF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_CONTROL
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_PF0_MAILBOX_INT_CNTL
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_PF0_BIF_VMHV_MAILBOX
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+//BIF_BX_PF0_PARTITION_COMPUTE_CAP
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__SPX_SUPPORT__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__DPX_SUPPORT__SHIFT 0x1
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__TPX_SUPPORT__SHIFT 0x2
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__QPX_SUPPORT__SHIFT 0x3
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__CPX_SUPPORT__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__CPX_AVAILABILITY__SHIFT 0xa
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__SPX_SUPPORT_MASK 0x00000001L
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__DPX_SUPPORT_MASK 0x00000002L
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__TPX_SUPPORT_MASK 0x00000004L
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__QPX_SUPPORT_MASK 0x00000008L
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__CPX_SUPPORT_MASK 0x00000010L
+#define BIF_BX_PF0_PARTITION_COMPUTE_CAP__CPX_AVAILABILITY_MASK 0x0003FC00L
+//BIF_BX_PF0_PARTITION_MEM_CAP
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS1_SUPPORT__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS2_SUPPORT__SHIFT 0x1
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS3_SUPPORT__SHIFT 0x2
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS4_SUPPORT__SHIFT 0x3
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS6_SUPPORT__SHIFT 0x5
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS8_SUPPORT__SHIFT 0x7
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS1_SUPPORT_MASK 0x00000001L
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS2_SUPPORT_MASK 0x00000002L
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS3_SUPPORT_MASK 0x00000004L
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS4_SUPPORT_MASK 0x00000008L
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS6_SUPPORT_MASK 0x00000020L
+#define BIF_BX_PF0_PARTITION_MEM_CAP__NPS8_SUPPORT_MASK 0x00000080L
+//BIF_BX_PF0_PARTITION_COMPUTE_STATUS
+#define BIF_BX_PF0_PARTITION_COMPUTE_STATUS__PARTITION_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_COMPUTE_STATUS__PARTITION_MODE_MASK 0x000000F0L
+//BIF_BX_PF0_PARTITION_MEM_STATUS
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975]
+//RCC_DEV0_EPF0_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_gdc_GDCDEC
+//GDC0_A2S_CNTL_CL0
+#define GDC0_A2S_CNTL_CL0__NSNOOP_MAP__SHIFT 0x0
+#define GDC0_A2S_CNTL_CL0__REQPASSPW_VC0_MAP__SHIFT 0x2
+#define GDC0_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP__SHIFT 0x4
+#define GDC0_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP__SHIFT 0x6
+#define GDC0_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP__SHIFT 0x8
+#define GDC0_A2S_CNTL_CL0__BLKLVL_MAP__SHIFT 0xa
+#define GDC0_A2S_CNTL_CL0__DATERR_MAP__SHIFT 0xc
+#define GDC0_A2S_CNTL_CL0__EXOKAY_WR_MAP__SHIFT 0xe
+#define GDC0_A2S_CNTL_CL0__EXOKAY_RD_MAP__SHIFT 0x10
+#define GDC0_A2S_CNTL_CL0__RESP_WR_MAP__SHIFT 0x12
+#define GDC0_A2S_CNTL_CL0__RESP_RD_MAP__SHIFT 0x14
+#define GDC0_A2S_CNTL_CL0__RDRSP_ERRMAP__SHIFT 0x16
+#define GDC0_A2S_CNTL_CL0__RDRSP_SEL_MODE__SHIFT 0x18
+#define GDC0_A2S_CNTL_CL0__STATIC_VC_ENABLE__SHIFT 0x1b
+#define GDC0_A2S_CNTL_CL0__STATIC_VC_VALUE__SHIFT 0x1c
+#define GDC0_A2S_CNTL_CL0__NSNOOP_MAP_MASK 0x00000003L
+#define GDC0_A2S_CNTL_CL0__REQPASSPW_VC0_MAP_MASK 0x0000000CL
+#define GDC0_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP_MASK 0x00000030L
+#define GDC0_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP_MASK 0x000000C0L
+#define GDC0_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP_MASK 0x00000300L
+#define GDC0_A2S_CNTL_CL0__BLKLVL_MAP_MASK 0x00000C00L
+#define GDC0_A2S_CNTL_CL0__DATERR_MAP_MASK 0x00003000L
+#define GDC0_A2S_CNTL_CL0__EXOKAY_WR_MAP_MASK 0x0000C000L
+#define GDC0_A2S_CNTL_CL0__EXOKAY_RD_MAP_MASK 0x00030000L
+#define GDC0_A2S_CNTL_CL0__RESP_WR_MAP_MASK 0x000C0000L
+#define GDC0_A2S_CNTL_CL0__RESP_RD_MAP_MASK 0x00300000L
+#define GDC0_A2S_CNTL_CL0__RDRSP_ERRMAP_MASK 0x00C00000L
+#define GDC0_A2S_CNTL_CL0__RDRSP_SEL_MODE_MASK 0x07000000L
+#define GDC0_A2S_CNTL_CL0__STATIC_VC_ENABLE_MASK 0x08000000L
+#define GDC0_A2S_CNTL_CL0__STATIC_VC_VALUE_MASK 0x70000000L
+//GDC0_A2S_CNTL_CL1
+#define GDC0_A2S_CNTL_CL1__NSNOOP_MAP__SHIFT 0x0
+#define GDC0_A2S_CNTL_CL1__REQPASSPW_VC0_MAP__SHIFT 0x2
+#define GDC0_A2S_CNTL_CL1__REQPASSPW_NVC0_MAP__SHIFT 0x4
+#define GDC0_A2S_CNTL_CL1__REQRSPPASSPW_VC0_MAP__SHIFT 0x6
+#define GDC0_A2S_CNTL_CL1__REQRSPPASSPW_NVC0_MAP__SHIFT 0x8
+#define GDC0_A2S_CNTL_CL1__BLKLVL_MAP__SHIFT 0xa
+#define GDC0_A2S_CNTL_CL1__DATERR_MAP__SHIFT 0xc
+#define GDC0_A2S_CNTL_CL1__EXOKAY_WR_MAP__SHIFT 0xe
+#define GDC0_A2S_CNTL_CL1__EXOKAY_RD_MAP__SHIFT 0x10
+#define GDC0_A2S_CNTL_CL1__RESP_WR_MAP__SHIFT 0x12
+#define GDC0_A2S_CNTL_CL1__RESP_RD_MAP__SHIFT 0x14
+#define GDC0_A2S_CNTL_CL1__RDRSP_ERRMAP__SHIFT 0x16
+#define GDC0_A2S_CNTL_CL1__RDRSP_SEL_MODE__SHIFT 0x18
+#define GDC0_A2S_CNTL_CL1__STATIC_VC_ENABLE__SHIFT 0x1b
+#define GDC0_A2S_CNTL_CL1__STATIC_VC_VALUE__SHIFT 0x1c
+#define GDC0_A2S_CNTL_CL1__NSNOOP_MAP_MASK 0x00000003L
+#define GDC0_A2S_CNTL_CL1__REQPASSPW_VC0_MAP_MASK 0x0000000CL
+#define GDC0_A2S_CNTL_CL1__REQPASSPW_NVC0_MAP_MASK 0x00000030L
+#define GDC0_A2S_CNTL_CL1__REQRSPPASSPW_VC0_MAP_MASK 0x000000C0L
+#define GDC0_A2S_CNTL_CL1__REQRSPPASSPW_NVC0_MAP_MASK 0x00000300L
+#define GDC0_A2S_CNTL_CL1__BLKLVL_MAP_MASK 0x00000C00L
+#define GDC0_A2S_CNTL_CL1__DATERR_MAP_MASK 0x00003000L
+#define GDC0_A2S_CNTL_CL1__EXOKAY_WR_MAP_MASK 0x0000C000L
+#define GDC0_A2S_CNTL_CL1__EXOKAY_RD_MAP_MASK 0x00030000L
+#define GDC0_A2S_CNTL_CL1__RESP_WR_MAP_MASK 0x000C0000L
+#define GDC0_A2S_CNTL_CL1__RESP_RD_MAP_MASK 0x00300000L
+#define GDC0_A2S_CNTL_CL1__RDRSP_ERRMAP_MASK 0x00C00000L
+#define GDC0_A2S_CNTL_CL1__RDRSP_SEL_MODE_MASK 0x07000000L
+#define GDC0_A2S_CNTL_CL1__STATIC_VC_ENABLE_MASK 0x08000000L
+#define GDC0_A2S_CNTL_CL1__STATIC_VC_VALUE_MASK 0x70000000L
+//GDC0_A2S_CNTL3_CL0
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_PH__SHIFT 0x0
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_STEERING__SHIFT 0x2
+#define GDC0_A2S_CNTL3_CL0__WR_ST_TAG_MODE__SHIFT 0x3
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_ST_ENTRY__SHIFT 0x4
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_PH_MASK 0x00000003L
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_STEERING_MASK 0x00000004L
+#define GDC0_A2S_CNTL3_CL0__WR_ST_TAG_MODE_MASK 0x00000008L
+#define GDC0_A2S_CNTL3_CL0__FORCE_WR_ST_ENTRY_MASK 0x000003F0L
+//GDC0_A2S_CNTL3_CL1
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_PH__SHIFT 0x0
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_STEERING__SHIFT 0x2
+#define GDC0_A2S_CNTL3_CL1__WR_ST_TAG_MODE__SHIFT 0x3
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_ST_ENTRY__SHIFT 0x4
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_PH_MASK 0x00000003L
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_STEERING_MASK 0x00000004L
+#define GDC0_A2S_CNTL3_CL1__WR_ST_TAG_MODE_MASK 0x00000008L
+#define GDC0_A2S_CNTL3_CL1__FORCE_WR_ST_ENTRY_MASK 0x000003F0L
+//GDC0_A2S_CNTL_SW0
+#define GDC0_A2S_CNTL_SW0__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC0_A2S_CNTL_SW0__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC0_A2S_CNTL_SW0__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC0_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC0_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC0_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC0_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC0_A2S_CNTL_SW0__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC0_A2S_CNTL_SW0__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC0_A2S_CNTL_SW0__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC0_A2S_CNTL_SW0__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC0_A2S_CNTL_SW0__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC0_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC0_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC0_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC0_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC0_A2S_CNTL_SW0__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC0_A2S_CNTL_SW0__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC0_A2S_CNTL_SW1
+#define GDC0_A2S_CNTL_SW1__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC0_A2S_CNTL_SW1__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC0_A2S_CNTL_SW1__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC0_A2S_CNTL_SW1__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC0_A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC0_A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC0_A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC0_A2S_CNTL_SW1__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC0_A2S_CNTL_SW1__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC0_A2S_CNTL_SW1__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC0_A2S_CNTL_SW1__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC0_A2S_CNTL_SW1__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC0_A2S_CNTL_SW1__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC0_A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC0_A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC0_A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC0_A2S_CNTL_SW1__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC0_A2S_CNTL_SW1__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC0_A2S_CNTL_SW2
+#define GDC0_A2S_CNTL_SW2__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC0_A2S_CNTL_SW2__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC0_A2S_CNTL_SW2__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC0_A2S_CNTL_SW2__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC0_A2S_CNTL_SW2__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC0_A2S_CNTL_SW2__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC0_A2S_CNTL_SW2__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC0_A2S_CNTL_SW2__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC0_A2S_CNTL_SW2__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC0_A2S_CNTL_SW2__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC0_A2S_CNTL_SW2__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC0_A2S_CNTL_SW2__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC0_A2S_CNTL_SW2__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC0_A2S_CNTL_SW2__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC0_A2S_CNTL_SW2__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC0_A2S_CNTL_SW2__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC0_A2S_CNTL_SW2__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC0_A2S_CNTL_SW2__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC0_A2S_TAG_ALLOC_0
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR__SHIFT 0x0
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD__SHIFT 0x8
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR__SHIFT 0x10
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR_MASK 0x000000FFL
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD_MASK 0x0000FF00L
+#define GDC0_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR_MASK 0x00FF0000L
+//GDC0_A2S_TAG_ALLOC_1
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR__SHIFT 0x0
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR__SHIFT 0x10
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD__SHIFT 0x18
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR_MASK 0x000000FFL
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR_MASK 0x00FF0000L
+#define GDC0_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD_MASK 0xFF000000L
+//GDC0_A2S_MISC_CNTL
+#define GDC0_A2S_MISC_CNTL__BLKLVL_FOR_MSG__SHIFT 0x0
+#define GDC0_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS__SHIFT 0x2
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE__SHIFT 0x3
+#define GDC0_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN__SHIFT 0x4
+#define GDC0_A2S_MISC_CNTL__RSP_REORDER_DIS__SHIFT 0x5
+#define GDC0_A2S_MISC_CNTL__WRRSP_ACCUM_SEL__SHIFT 0x6
+#define GDC0_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x7
+#define GDC0_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x8
+#define GDC0_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY__SHIFT 0x9
+#define GDC0_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0xa
+#define GDC0_A2S_MISC_CNTL__WR_TAG_SET_MIN__SHIFT 0x10
+#define GDC0_A2S_MISC_CNTL__RD_TAG_SET_MIN__SHIFT 0x15
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_MODE__SHIFT 0x1a
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE__SHIFT 0x1b
+#define GDC0_A2S_MISC_CNTL__BLKLVL_FOR_MSG_MASK 0x00000003L
+#define GDC0_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS_MASK 0x00000004L
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE_MASK 0x00000008L
+#define GDC0_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN_MASK 0x00000010L
+#define GDC0_A2S_MISC_CNTL__RSP_REORDER_DIS_MASK 0x00000020L
+#define GDC0_A2S_MISC_CNTL__WRRSP_ACCUM_SEL_MASK 0x00000040L
+#define GDC0_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000080L
+#define GDC0_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000100L
+#define GDC0_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY_MASK 0x00000200L
+#define GDC0_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000400L
+#define GDC0_A2S_MISC_CNTL__WR_TAG_SET_MIN_MASK 0x001F0000L
+#define GDC0_A2S_MISC_CNTL__RD_TAG_SET_MIN_MASK 0x03E00000L
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_MODE_MASK 0x04000000L
+#define GDC0_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE_MASK 0x08000000L
+//GDC0_SHUB_REGS_IF_CTL
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS__SHIFT 0x1
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS_MASK 0x00000002L
+//GDC0_NGDC_MGCG_CTRL
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
+#define GDC0_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN__SHIFT 0xe
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN_MASK 0x00004000L
+//GDC0_NGDC_RESERVED_0
+#define GDC0_NGDC_RESERVED_0__RESERVED__SHIFT 0x0
+#define GDC0_NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL
+//GDC0_NGDC_RESERVED_1
+#define GDC0_NGDC_RESERVED_1__RESERVED__SHIFT 0x0
+#define GDC0_NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL
+//GDC0_NBIF_GFX_DOORBELL_STATUS
+#define GDC0_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT__SHIFT 0x0
+#define GDC0_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT_MASK 0x0000FFFFL
+//GDC0_ATDMA_MISC_CNTL
+#define GDC0_ATDMA_MISC_CNTL__WRR_ARB_MODE__SHIFT 0x0
+#define GDC0_ATDMA_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0x1
+#define GDC0_ATDMA_MISC_CNTL__RDRSP_ARB_MODE__SHIFT 0x2
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC6_WEIGHT__SHIFT 0x8
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC0_WEIGHT__SHIFT 0x10
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC1_WEIGHT__SHIFT 0x18
+#define GDC0_ATDMA_MISC_CNTL__WRR_ARB_MODE_MASK 0x00000001L
+#define GDC0_ATDMA_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000002L
+#define GDC0_ATDMA_MISC_CNTL__RDRSP_ARB_MODE_MASK 0x0000000CL
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC6_WEIGHT_MASK 0x0000FF00L
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC0_WEIGHT_MASK 0x00FF0000L
+#define GDC0_ATDMA_MISC_CNTL__WRR_VC1_WEIGHT_MASK 0xFF000000L
+//GDC0_S2A_MISC_CNTL
+#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
+#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
+#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
+#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
+#define GDC0_S2A_MISC_CNTL__HDP_PERF_ENH_DIS__SHIFT 0xf
+#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
+#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
+#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
+#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
+#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
+#define GDC0_S2A_MISC_CNTL__HDP_PERF_ENH_DIS_MASK 0x00008000L
+#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
+//GDC0_NGDC_PG_MISC_CTRL
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1__SHIFT 0xd
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2__SHIFT 0x10
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1_MASK 0x00002000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2_MASK 0x00010000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//GDC0_NGDC_PGMST_CTRL
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN__SHIFT 0x8
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN_MASK 0x00000100L
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//GDC0_NGDC_PGSLV_CTRL
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS__SHIFT 0xa
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS_MASK 0x00007C00L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_COMMAND
+#define BIF_CFG_DEV0_EPF0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_STATUS
+#define BIF_CFG_DEV0_EPF0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_LATENCY
+#define BIF_CFG_DEV0_EPF0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_HEADER
+#define BIF_CFG_DEV0_EPF0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_BIST
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF0_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PMI_CAP
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//PCIE_PAGE_REQ_ENH_CAP_LIST
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//PCIE_PAGE_REQ_CNTL
+#define PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
+#define PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
+#define PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
+#define PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
+//PCIE_PAGE_REQ_STATUS
+#define PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
+#define PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
+#define PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
+#define PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
+#define PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
+#define PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
+#define PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
+#define PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
+//PCIE_OUTSTAND_PAGE_REQ_CAPACITY
+#define PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
+#define PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
+//PCIE_OUTSTAND_PAGE_REQ_ALLOC
+#define PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
+#define PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_MC_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_PCIE_MC_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
+//BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR1
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_RCV0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_RCV1
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL1
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_1
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+//PCIE_SRIOV_ENH_CAP_LIST
+#define PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//PCIE_SRIOV_CAP
+#define PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
+#define PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
+#define PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
+#define PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
+#define PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
+#define PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
+#define PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
+#define PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
+//PCIE_SRIOV_CONTROL
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
+#define PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
+#define PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
+#define PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
+//PCIE_SRIOV_STATUS
+#define PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
+#define PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
+//PCIE_SRIOV_INITIAL_VFS
+#define PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
+#define PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
+//PCIE_SRIOV_TOTAL_VFS
+#define PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
+#define PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
+//PCIE_SRIOV_NUM_VFS
+#define PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
+#define PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
+//PCIE_SRIOV_FUNC_DEP_LINK
+#define PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
+#define PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0xFFL
+//PCIE_SRIOV_FIRST_VF_OFFSET
+#define PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//PCIE_SRIOV_VF_STRIDE
+#define PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
+#define PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
+//PCIE_SRIOV_VF_DEVICE_ID
+#define PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
+#define PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
+//PCIE_SRIOV_SUPPORTED_PAGE_SIZE
+#define PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
+#define PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_SYSTEM_PAGE_SIZE
+#define PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
+#define PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_0
+#define PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_1
+#define PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_2
+#define PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_3
+#define PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_4
+#define PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_BASE_ADDR_5
+#define PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
+#define PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
+#define PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR__SHIFT 0x0
+#define PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
+#define PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR_MASK 0x00000007L
+#define PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_LINK_CAP_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
+//BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LINK_CAP_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES_MASK 0x0000F800L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL_MASK 0x00000700L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL_MASK 0x000000C0L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED_MASK 0x00000400L
+//PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
+#define PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN0SCH_OFFSET__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN1SCH_OFFSET__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN2SCH_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN3SCH_OFFSET__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN0SCH_OFFSET_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN1SCH_OFFSET_MASK 0x0000FF00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN2SCH_OFFSET_MASK 0x00FF0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN3SCH_OFFSET_MASK 0xFF000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN4SCH_OFFSET__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN5SCH_OFFSET__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN6SCH_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN7SCH_OFFSET__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN4SCH_OFFSET_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN5SCH_OFFSET_MASK 0x0000FF00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN6SCH_OFFSET_MASK 0x00FF0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN7SCH_OFFSET_MASK 0xFF000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN8SCH_OFFSET__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN9SCH_OFFSET__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN10SCH_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN11SCH_OFFSET__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN8SCH_OFFSET_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN9SCH_OFFSET_MASK 0x0000FF00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN10SCH_OFFSET_MASK 0x00FF0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN11SCH_OFFSET_MASK 0xFF000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX0SCH_OFFSET__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX1SCH_OFFSET__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX2SCH_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX3SCH_OFFSET__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX0SCH_OFFSET_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX1SCH_OFFSET_MASK 0x0000FF00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX2SCH_OFFSET_MASK 0x00FF0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX3SCH_OFFSET_MASK 0xFF000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX4SCH_OFFSET__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX5SCH_OFFSET__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX6SCH_OFFSET__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX7SCH_OFFSET__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX4SCH_OFFSET_MASK 0x000000FFL
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX5SCH_OFFSET_MASK 0x0000FF00L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX6SCH_OFFSET_MASK 0x00FF0000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX7SCH_OFFSET_MASK 0xFF000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0__DW0__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1__DW1__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2__DW2__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3__DW3__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4__DW4__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5__DW5__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6__DW6__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7__DW7__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8__DW8__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_CMD_COMPLETE_INTR_EN__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_CMD_COMPLETE_INTR_EN__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_NEED_FLR_INTR_EN__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_CMD_COMPLETE_INTR_EN__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_NEED_FLR_INTR_EN__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_CMD_COMPLETE_INTR_EN__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_NEED_FLR_INTR_EN__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_CMD_COMPLETE_INTR_EN_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_NEED_FLR_INTR_EN_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_CMD_COMPLETE_INTR_EN_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_SELF_RECOVERED_INTR_EN_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_NEED_FLR_INTR_EN_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_VM_BUSY_TRANSITION_INTR_EN_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_CMD_COMPLETE_INTR_EN_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_SELF_RECOVERED_INTR_EN_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_NEED_FLR_INTR_EN_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_VM_BUSY_TRANSITION_INTR_EN_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_ENABLE
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_CMD_COMPLETE_INTR_EN__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_CMD_COMPLETE_INTR_EN__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_NEED_FLR_INTR_EN__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_CMD_COMPLETE_INTR_EN__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_NEED_FLR_INTR_EN__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_CMD_COMPLETE_INTR_EN__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_NEED_FLR_INTR_EN__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_CMD_COMPLETE_INTR_EN_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_NEED_FLR_INTR_EN_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_CMD_COMPLETE_INTR_EN_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_SELF_RECOVERED_INTR_EN_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_NEED_FLR_INTR_EN_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_VM_BUSY_TRANSITION_INTR_EN_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_CMD_COMPLETE_INTR_EN_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_SELF_RECOVERED_INTR_EN_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_NEED_FLR_INTR_EN_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_VM_BUSY_TRANSITION_INTR_EN_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_CMD_COMPLETE_INTR_STATUS__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_CMD_COMPLETE_INTR_STATUS__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_CMD_COMPLETE_INTR_STATUS__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_CMD_COMPLETE_INTR_STATUS_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_NEED_FLR_INTR_STATUS_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_CMD_COMPLETE_INTR_STATUS_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_NEED_FLR_INTR_STATUS_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_CMD_COMPLETE_INTR_STATUS_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_NEED_FLR_INTR_STATUS_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_STATUS
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_CMD_COMPLETE_INTR_STATUS__SHIFT 0x14
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x15
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x16
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x17
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_CMD_COMPLETE_INTR_STATUS__SHIFT 0x18
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x19
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1a
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1b
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_CMD_COMPLETE_INTR_STATUS__SHIFT 0x1c
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1d
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1e
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1f
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_CMD_COMPLETE_INTR_STATUS_MASK 0x00100000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00200000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_NEED_FLR_INTR_STATUS_MASK 0x00400000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00800000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_CMD_COMPLETE_INTR_STATUS_MASK 0x01000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x02000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_NEED_FLR_INTR_STATUS_MASK 0x04000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x08000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_CMD_COMPLETE_INTR_STATUS_MASK 0x10000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x20000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_NEED_FLR_INTR_STATUS_MASK 0x40000000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x80000000L
+//PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
+//BIF_CFG_DEV0_EPF1_VENDOR_ID
+#define BIF_CFG_DEV0_EPF1_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_DEVICE_ID
+#define BIF_CFG_DEV0_EPF1_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_COMMAND
+#define BIF_CFG_DEV0_EPF1_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_STATUS
+#define BIF_CFG_DEV0_EPF1_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_REVISION_ID
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF1_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF1_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_SUB_CLASS
+#define BIF_CFG_DEV0_EPF1_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_BASE_CLASS
+#define BIF_CFG_DEV0_EPF1_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_CACHE_LINE
+#define BIF_CFG_DEV0_EPF1_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_LATENCY
+#define BIF_CFG_DEV0_EPF1_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_HEADER
+#define BIF_CFG_DEV0_EPF1_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF1_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_BIST
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF1_CAP_PTR
+#define BIF_CFG_DEV0_EPF1_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_MIN_GRANT
+#define BIF_CFG_DEV0_EPF1_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF1_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF1_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PMI_CAP
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF1_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF1_LINK_CAP
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_LINK_CNTL
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF1_LINK_STATUS
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_LINK_CAP2
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF1_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MASK
+#define BIF_CFG_DEV0_EPF1_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF1_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_PENDING
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_MSIX_PBA
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
+//BIF_CFG_DEV0_RC0_VENDOR_ID
+#define BIF_CFG_DEV0_RC0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_DEVICE_ID
+#define BIF_CFG_DEV0_RC0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_COMMAND
+#define BIF_CFG_DEV0_RC0_COMMAND__IOEN_DN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_COMMAND__MEMEN_DN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_COMMAND__IOEN_DN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_COMMAND__MEMEN_DN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_RC0_STATUS
+#define BIF_CFG_DEV0_RC0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_RC0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_RC0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_RC0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_RC0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_REVISION_ID
+#define BIF_CFG_DEV0_RC0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_PROG_INTERFACE
+#define BIF_CFG_DEV0_RC0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_SUB_CLASS
+#define BIF_CFG_DEV0_RC0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_BASE_CLASS
+#define BIF_CFG_DEV0_RC0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_CACHE_LINE
+#define BIF_CFG_DEV0_RC0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_LATENCY
+#define BIF_CFG_DEV0_RC0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_HEADER
+#define BIF_CFG_DEV0_RC0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_RC0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_RC0_BIST
+#define BIF_CFG_DEV0_RC0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_RC0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_RC0_BASE_ADDR_1
+#define BIF_CFG_DEV0_RC0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_BASE_ADDR_2
+#define BIF_CFG_DEV0_RC0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_RC0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_IO_BASE_LIMIT
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
+//BIF_CFG_DEV0_RC0_SECONDARY_STATUS
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_RC0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PREF_BASE_UPPER
+#define BIF_CFG_DEV0_RC0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PREF_LIMIT_UPPER
+#define BIF_CFG_DEV0_RC0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC0_CAP_PTR
+#define BIF_CFG_DEV0_RC0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_RC0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_RC0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_RC0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_RC0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE_MASK 0x0800L
+//BIF_CFG_DEV0_RC0_EXT_BRIDGE_CNTL
+#define BIF_CFG_DEV0_RC0_EXT_BRIDGE_CNTL__IO_PORT_80_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_EXT_BRIDGE_CNTL__IO_PORT_80_EN_MASK 0x01L
+//BIF_CFG_DEV0_RC0_PMI_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_PMI_CAP
+#define BIF_CFG_DEV0_RC0_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_PCIE_CAP
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_RC0_DEVICE_CAP
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_RC0_DEVICE_CNTL
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_DEVICE_STATUS
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_RC0_LINK_CAP
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_RC0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_RC0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_LINK_CNTL
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_RC0_LINK_STATUS
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_SLOT_CAP
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC0_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L
+//BIF_CFG_DEV0_RC0_SLOT_CNTL
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__AUTO_SLOT_PWR_LIMIT_DISABLE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__INBAND_PD_DISABLE__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__AUTO_SLOT_PWR_LIMIT_DISABLE_MASK 0x2000L
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL__INBAND_PD_DISABLE_MASK 0x4000L
+//BIF_CFG_DEV0_RC0_SLOT_STATUS
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L
+//BIF_CFG_DEV0_RC0_ROOT_CNTL
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_NONFATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__PM_INTERRUPT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_NONFATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__SERR_ON_FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__PM_INTERRUPT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN_MASK 0x0010L
+//BIF_CFG_DEV0_RC0_ROOT_CAP
+#define BIF_CFG_DEV0_RC0_ROOT_CAP__CRS_SOFTWARE_VISIBILITY__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_ROOT_CAP__CRS_SOFTWARE_VISIBILITY_MASK 0x0001L
+//BIF_CFG_DEV0_RC0_ROOT_STATUS
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_REQUESTOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_PENDING__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_REQUESTOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_ROOT_STATUS__PME_PENDING_MASK 0x00020000L
+//BIF_CFG_DEV0_RC0_DEVICE_CAP2
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_RC0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_LINK_CAP2
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_LINK_CNTL2
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_RC0_LINK_STATUS2
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_RC0_SLOT_CAP2
+#define BIF_CFG_DEV0_RC0_SLOT_CAP2__INBAND_PD_DISABLE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_CAP2__INBAND_PD_DISABLE_SUPPORTED_MASK 0x00000001L
+//BIF_CFG_DEV0_RC0_SLOT_CNTL2
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_SLOT_STATUS2
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_RC0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_RC0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_RC0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_RC0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC0_SSID_CAP_LIST
+#define BIF_CFG_DEV0_RC0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_SSID_CAP
+#define BIF_CFG_DEV0_RC0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
+//BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
+//BIF_CFG_DEV0_RC0_PCIE_PORT_VC_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x007F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_RC0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_RC0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__CORR_ERR_REP_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__NONFATAL_ERR_REP_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__FATAL_ERR_REP_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__CORR_ERR_REP_EN_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__NONFATAL_ERR_REP_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_CMD__FATAL_ERR_REP_EN_MASK 0x00000004L
+//BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_CORR_RCVD__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__MULT_ERR_CORR_RCVD__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_FATAL_NONFATAL_RCVD__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__MULT_ERR_FATAL_NONFATAL_RCVD__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__FIRST_UNCORRECTABLE_FATAL__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__NONFATAL_ERROR_MSG_RCVD__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__FATAL_ERROR_MSG_RCVD__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_COR_SUBCLASS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ADV_ERR_INT_MSG_NUM__SHIFT 0x1b
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_CORR_RCVD_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__MULT_ERR_CORR_RCVD_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_FATAL_NONFATAL_RCVD_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__MULT_ERR_FATAL_NONFATAL_RCVD_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__FIRST_UNCORRECTABLE_FATAL_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__NONFATAL_ERROR_MSG_RCVD_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__FATAL_ERROR_MSG_RCVD_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ERR_COR_SUBCLASS_MASK 0x00000180L
+#define BIF_CFG_DEV0_RC0_PCIE_ROOT_ERR_STATUS__ADV_ERR_INT_MSG_NUM_MASK 0xF8000000L
+//BIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID
+#define BIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID__ERR_CORR_SRC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID__ERR_FATAL_NONFATAL_SRC_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID__ERR_CORR_SRC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_ERR_SRC_ID__ERR_FATAL_NONFATAL_SRC_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_RC0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_RC0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_LINK_CAP_16GT
+#define BIF_CFG_DEV0_RC0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_LINK_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC0_LINK_STATUS_16GT
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
+//BIF_CFG_DEV0_RC0_LOCAL_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC0_RTM1_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC0_RTM2_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC0_MARGINING_PORT_CAP
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
+//BIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
+#define BIF_CFG_DEV0_RC0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
+//BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC0_LINK_CAP_32GT
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES__SHIFT 0xb
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES_MASK 0x0000F800L
+//BIF_CFG_DEV0_RC0_LINK_CNTL_32GT
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL_MASK 0x00000700L
+//BIF_CFG_DEV0_RC0_LINK_STATUS_32GT
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED__SHIFT 0x5
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON__SHIFT 0x8
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST__SHIFT 0x9
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED__SHIFT 0xa
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL_MASK 0x000000C0L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED_MASK 0x00000400L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_COMMAND
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_0_STATUS
+#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_LATENCY
+#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_HEADER
+#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_0_BIST
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_0_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_PMI_CAP
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_0_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
+//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY
+#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC
+#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
+//BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES_MASK 0x0000F800L
+//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL_MASK 0x00000700L
+//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL_MASK 0x000000C0L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED_MASK 0x00000400L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN0SCH_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN1SCH_OFFSET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN2SCH_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN3SCH_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN0SCH_OFFSET_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN1SCH_OFFSET_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN2SCH_OFFSET_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCN3SCH_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN4SCH_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN5SCH_OFFSET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN6SCH_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN7SCH_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN4SCH_OFFSET_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN5SCH_OFFSET_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN6SCH_OFFSET_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS1__VCN7SCH_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN8SCH_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN9SCH_OFFSET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN10SCH_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN11SCH_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN8SCH_OFFSET_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN9SCH_OFFSET_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN10SCH_OFFSET_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS2__VCN11SCH_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX0SCH_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX1SCH_OFFSET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX2SCH_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX3SCH_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX0SCH_OFFSET_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX1SCH_OFFSET_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX2SCH_OFFSET_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS3__GFX3SCH_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX4SCH_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX5SCH_OFFSET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX6SCH_OFFSET__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX7SCH_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX4SCH_OFFSET_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX5SCH_OFFSET_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX6SCH_OFFSET_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS4__GFX7SCH_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD0SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD2SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD3SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD4SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD5SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD6SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD7SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD8SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD9SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD10SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD11SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX0SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX1SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX2SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX3SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX4SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX5SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX6SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0__DW0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW0__DW0_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1__DW1__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW1__DW1_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2__DW2__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW2__DW2_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3__DW3__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW3__DW3_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4__DW4__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW4__DW4_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5__DW5__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW5__DW5_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6__DW6__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW6__DW6_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7__DW7__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW7__DW7_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8__DW8__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFX7SCH_DW8__DW8_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_CMD_COMPLETE_INTR_EN__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_CMD_COMPLETE_INTR_EN__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_NEED_FLR_INTR_EN__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_CMD_COMPLETE_INTR_EN__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_NEED_FLR_INTR_EN__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_CMD_COMPLETE_INTR_EN__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_NEED_FLR_INTR_EN__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A0_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A2_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A3_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A4_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_CMD_COMPLETE_INTR_EN_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_HANG_NEED_FLR_INTR_EN_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A5_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_CMD_COMPLETE_INTR_EN_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_SELF_RECOVERED_INTR_EN_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_HANG_NEED_FLR_INTR_EN_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A6_VM_BUSY_TRANSITION_INTR_EN_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_CMD_COMPLETE_INTR_EN_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_SELF_RECOVERED_INTR_EN_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_HANG_NEED_FLR_INTR_EN_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_ENABLE__ENGA_A7_VM_BUSY_TRANSITION_INTR_EN_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_ENABLE
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_CMD_COMPLETE_INTR_EN__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_CMD_COMPLETE_INTR_EN__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_NEED_FLR_INTR_EN__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_CMD_COMPLETE_INTR_EN__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_NEED_FLR_INTR_EN__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_CMD_COMPLETE_INTR_EN__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_NEED_FLR_INTR_EN__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B0_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B2_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B3_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B4_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_CMD_COMPLETE_INTR_EN_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_HANG_NEED_FLR_INTR_EN_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B5_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_CMD_COMPLETE_INTR_EN_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_SELF_RECOVERED_INTR_EN_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_HANG_NEED_FLR_INTR_EN_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B6_VM_BUSY_TRANSITION_INTR_EN_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_CMD_COMPLETE_INTR_EN_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_SELF_RECOVERED_INTR_EN_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_HANG_NEED_FLR_INTR_EN_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_ENABLE__ENGB_B7_VM_BUSY_TRANSITION_INTR_EN_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_CMD_COMPLETE_INTR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_CMD_COMPLETE_INTR_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_NEED_FLR_INTR_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_CMD_COMPLETE_INTR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_CMD_COMPLETE_INTR_EN__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B8_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_CMD_COMPLETE_INTR_EN_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_HANG_NEED_FLR_INTR_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B9_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B10_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_ENABLE__ENGB_B11_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_CMD_COMPLETE_INTR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_CMD_COMPLETE_INTR_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_CMD_COMPLETE_INTR_STATUS__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A0_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A2_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A3_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A4_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_CMD_COMPLETE_INTR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_HANG_NEED_FLR_INTR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A5_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_CMD_COMPLETE_INTR_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_HANG_NEED_FLR_INTR_STATUS_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A6_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_CMD_COMPLETE_INTR_STATUS_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_HANG_NEED_FLR_INTR_STATUS_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A0_7_INTR_STATUS__ENGA_A7_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGA_A8_15_INTR_STATUS
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_CMD_COMPLETE_INTR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_CMD_COMPLETE_INTR_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1b
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_CMD_COMPLETE_INTR_STATUS__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1d
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x1e
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B0_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B2_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B3_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B4_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_CMD_COMPLETE_INTR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_HANG_NEED_FLR_INTR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B5_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_CMD_COMPLETE_INTR_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_HANG_NEED_FLR_INTR_STATUS_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B6_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x08000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_CMD_COMPLETE_INTR_STATUS_MASK 0x10000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x20000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_HANG_NEED_FLR_INTR_STATUS_MASK 0x40000000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B0_7_INTR_STATUS__ENGB_B7_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_CMD_COMPLETE_INTR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B8_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_CMD_COMPLETE_INTR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B9_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B10_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_ENGB_B8_15_INTR_STATUS__ENGB_B11_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
+//BIF_CFG_DEV0_EPF1_0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_COMMAND
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_0_STATUS
+#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_REVISION_ID
+#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_LATENCY
+#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_HEADER
+#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_0_BIST
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF1_0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF1_0_CAP_PTR
+#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_PMI_CAP
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF1_0_LINK_CAP
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF1_0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF1_0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_MASK
+#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_RCCPORTDEC
+//RCC_DEV0_1_RCC_VDM_SUPPORT
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_1_RCC_BUS_CNTL
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_1_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_1_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_1_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_1_RCC_MH_ARB_CNTL
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+//RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_RCCPORTDEC
+//RCC_EP_DEV0_1_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_1_EP_PCIE_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_1_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_1_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_1_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_1_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_RCCPORTDEC
+//RCC_DWN_DEV0_1_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_1_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_1_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_RCCPORTDEC
+//RCC_DWNP_DEV0_1_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_1_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_1_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_pfc_amdgfx_RCCPFCDEC
+//RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
+//RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
+
+
+// addressBlock: aid_nbio_nbif0_pciemsix_0_usb_MSIXTDEC
+//PCIEMSIX_VECT0_ADDR_LO
+#define PCIEMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT0_ADDR_HI
+#define PCIEMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT0_MSG_DATA
+#define PCIEMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT0_CONTROL
+#define PCIEMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT1_ADDR_LO
+#define PCIEMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT1_ADDR_HI
+#define PCIEMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT1_MSG_DATA
+#define PCIEMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT1_CONTROL
+#define PCIEMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT2_ADDR_LO
+#define PCIEMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT2_ADDR_HI
+#define PCIEMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT2_MSG_DATA
+#define PCIEMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT2_CONTROL
+#define PCIEMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT3_ADDR_LO
+#define PCIEMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT3_ADDR_HI
+#define PCIEMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT3_MSG_DATA
+#define PCIEMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT3_CONTROL
+#define PCIEMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT4_ADDR_LO
+#define PCIEMSIX_VECT4_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT4_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT4_ADDR_HI
+#define PCIEMSIX_VECT4_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT4_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT4_MSG_DATA
+#define PCIEMSIX_VECT4_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT4_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT4_CONTROL
+#define PCIEMSIX_VECT4_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT4_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT5_ADDR_LO
+#define PCIEMSIX_VECT5_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT5_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT5_ADDR_HI
+#define PCIEMSIX_VECT5_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT5_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT5_MSG_DATA
+#define PCIEMSIX_VECT5_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT5_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT5_CONTROL
+#define PCIEMSIX_VECT5_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT5_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT6_ADDR_LO
+#define PCIEMSIX_VECT6_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT6_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT6_ADDR_HI
+#define PCIEMSIX_VECT6_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT6_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT6_MSG_DATA
+#define PCIEMSIX_VECT6_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT6_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT6_CONTROL
+#define PCIEMSIX_VECT6_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT6_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT7_ADDR_LO
+#define PCIEMSIX_VECT7_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT7_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT7_ADDR_HI
+#define PCIEMSIX_VECT7_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT7_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT7_MSG_DATA
+#define PCIEMSIX_VECT7_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT7_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT7_CONTROL
+#define PCIEMSIX_VECT7_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT7_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT8_ADDR_LO
+#define PCIEMSIX_VECT8_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT8_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT8_ADDR_HI
+#define PCIEMSIX_VECT8_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT8_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT8_MSG_DATA
+#define PCIEMSIX_VECT8_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT8_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT8_CONTROL
+#define PCIEMSIX_VECT8_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT8_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT9_ADDR_LO
+#define PCIEMSIX_VECT9_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT9_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT9_ADDR_HI
+#define PCIEMSIX_VECT9_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT9_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT9_MSG_DATA
+#define PCIEMSIX_VECT9_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT9_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT9_CONTROL
+#define PCIEMSIX_VECT9_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT9_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT10_ADDR_LO
+#define PCIEMSIX_VECT10_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT10_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT10_ADDR_HI
+#define PCIEMSIX_VECT10_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT10_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT10_MSG_DATA
+#define PCIEMSIX_VECT10_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT10_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT10_CONTROL
+#define PCIEMSIX_VECT10_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT10_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT11_ADDR_LO
+#define PCIEMSIX_VECT11_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT11_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT11_ADDR_HI
+#define PCIEMSIX_VECT11_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT11_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT11_MSG_DATA
+#define PCIEMSIX_VECT11_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT11_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT11_CONTROL
+#define PCIEMSIX_VECT11_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT11_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT12_ADDR_LO
+#define PCIEMSIX_VECT12_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT12_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT12_ADDR_HI
+#define PCIEMSIX_VECT12_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT12_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT12_MSG_DATA
+#define PCIEMSIX_VECT12_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT12_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT12_CONTROL
+#define PCIEMSIX_VECT12_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT12_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT13_ADDR_LO
+#define PCIEMSIX_VECT13_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT13_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT13_ADDR_HI
+#define PCIEMSIX_VECT13_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT13_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT13_MSG_DATA
+#define PCIEMSIX_VECT13_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT13_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT13_CONTROL
+#define PCIEMSIX_VECT13_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT13_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT14_ADDR_LO
+#define PCIEMSIX_VECT14_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT14_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT14_ADDR_HI
+#define PCIEMSIX_VECT14_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT14_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT14_MSG_DATA
+#define PCIEMSIX_VECT14_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT14_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT14_CONTROL
+#define PCIEMSIX_VECT14_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT14_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT15_ADDR_LO
+#define PCIEMSIX_VECT15_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT15_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT15_ADDR_HI
+#define PCIEMSIX_VECT15_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT15_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT15_MSG_DATA
+#define PCIEMSIX_VECT15_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT15_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT15_CONTROL
+#define PCIEMSIX_VECT15_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT15_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT16_ADDR_LO
+#define PCIEMSIX_VECT16_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT16_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT16_ADDR_HI
+#define PCIEMSIX_VECT16_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT16_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT16_MSG_DATA
+#define PCIEMSIX_VECT16_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT16_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT16_CONTROL
+#define PCIEMSIX_VECT16_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT16_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT17_ADDR_LO
+#define PCIEMSIX_VECT17_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT17_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT17_ADDR_HI
+#define PCIEMSIX_VECT17_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT17_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT17_MSG_DATA
+#define PCIEMSIX_VECT17_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT17_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT17_CONTROL
+#define PCIEMSIX_VECT17_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT17_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT18_ADDR_LO
+#define PCIEMSIX_VECT18_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT18_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT18_ADDR_HI
+#define PCIEMSIX_VECT18_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT18_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT18_MSG_DATA
+#define PCIEMSIX_VECT18_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT18_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT18_CONTROL
+#define PCIEMSIX_VECT18_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT18_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT19_ADDR_LO
+#define PCIEMSIX_VECT19_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT19_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT19_ADDR_HI
+#define PCIEMSIX_VECT19_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT19_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT19_MSG_DATA
+#define PCIEMSIX_VECT19_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT19_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT19_CONTROL
+#define PCIEMSIX_VECT19_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT19_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT20_ADDR_LO
+#define PCIEMSIX_VECT20_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT20_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT20_ADDR_HI
+#define PCIEMSIX_VECT20_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT20_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT20_MSG_DATA
+#define PCIEMSIX_VECT20_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT20_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT20_CONTROL
+#define PCIEMSIX_VECT20_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT20_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT21_ADDR_LO
+#define PCIEMSIX_VECT21_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT21_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT21_ADDR_HI
+#define PCIEMSIX_VECT21_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT21_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT21_MSG_DATA
+#define PCIEMSIX_VECT21_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT21_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT21_CONTROL
+#define PCIEMSIX_VECT21_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT21_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT22_ADDR_LO
+#define PCIEMSIX_VECT22_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT22_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT22_ADDR_HI
+#define PCIEMSIX_VECT22_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT22_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT22_MSG_DATA
+#define PCIEMSIX_VECT22_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT22_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT22_CONTROL
+#define PCIEMSIX_VECT22_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT22_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT23_ADDR_LO
+#define PCIEMSIX_VECT23_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT23_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT23_ADDR_HI
+#define PCIEMSIX_VECT23_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT23_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT23_MSG_DATA
+#define PCIEMSIX_VECT23_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT23_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT23_CONTROL
+#define PCIEMSIX_VECT23_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT23_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT24_ADDR_LO
+#define PCIEMSIX_VECT24_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT24_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT24_ADDR_HI
+#define PCIEMSIX_VECT24_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT24_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT24_MSG_DATA
+#define PCIEMSIX_VECT24_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT24_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT24_CONTROL
+#define PCIEMSIX_VECT24_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT24_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT25_ADDR_LO
+#define PCIEMSIX_VECT25_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT25_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT25_ADDR_HI
+#define PCIEMSIX_VECT25_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT25_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT25_MSG_DATA
+#define PCIEMSIX_VECT25_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT25_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT25_CONTROL
+#define PCIEMSIX_VECT25_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT25_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT26_ADDR_LO
+#define PCIEMSIX_VECT26_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT26_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT26_ADDR_HI
+#define PCIEMSIX_VECT26_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT26_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT26_MSG_DATA
+#define PCIEMSIX_VECT26_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT26_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT26_CONTROL
+#define PCIEMSIX_VECT26_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT26_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT27_ADDR_LO
+#define PCIEMSIX_VECT27_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT27_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT27_ADDR_HI
+#define PCIEMSIX_VECT27_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT27_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT27_MSG_DATA
+#define PCIEMSIX_VECT27_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT27_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT27_CONTROL
+#define PCIEMSIX_VECT27_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT27_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT28_ADDR_LO
+#define PCIEMSIX_VECT28_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT28_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT28_ADDR_HI
+#define PCIEMSIX_VECT28_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT28_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT28_MSG_DATA
+#define PCIEMSIX_VECT28_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT28_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT28_CONTROL
+#define PCIEMSIX_VECT28_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT28_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT29_ADDR_LO
+#define PCIEMSIX_VECT29_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT29_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT29_ADDR_HI
+#define PCIEMSIX_VECT29_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT29_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT29_MSG_DATA
+#define PCIEMSIX_VECT29_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT29_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT29_CONTROL
+#define PCIEMSIX_VECT29_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT29_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT30_ADDR_LO
+#define PCIEMSIX_VECT30_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT30_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT30_ADDR_HI
+#define PCIEMSIX_VECT30_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT30_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT30_MSG_DATA
+#define PCIEMSIX_VECT30_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT30_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT30_CONTROL
+#define PCIEMSIX_VECT30_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT30_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT31_ADDR_LO
+#define PCIEMSIX_VECT31_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT31_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT31_ADDR_HI
+#define PCIEMSIX_VECT31_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT31_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT31_MSG_DATA
+#define PCIEMSIX_VECT31_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT31_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT31_CONTROL
+#define PCIEMSIX_VECT31_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT31_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT32_ADDR_LO
+#define PCIEMSIX_VECT32_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT32_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT32_ADDR_HI
+#define PCIEMSIX_VECT32_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT32_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT32_MSG_DATA
+#define PCIEMSIX_VECT32_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT32_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT32_CONTROL
+#define PCIEMSIX_VECT32_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT32_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT33_ADDR_LO
+#define PCIEMSIX_VECT33_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT33_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT33_ADDR_HI
+#define PCIEMSIX_VECT33_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT33_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT33_MSG_DATA
+#define PCIEMSIX_VECT33_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT33_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT33_CONTROL
+#define PCIEMSIX_VECT33_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT33_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT34_ADDR_LO
+#define PCIEMSIX_VECT34_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT34_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT34_ADDR_HI
+#define PCIEMSIX_VECT34_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT34_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT34_MSG_DATA
+#define PCIEMSIX_VECT34_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT34_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT34_CONTROL
+#define PCIEMSIX_VECT34_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT34_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT35_ADDR_LO
+#define PCIEMSIX_VECT35_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT35_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT35_ADDR_HI
+#define PCIEMSIX_VECT35_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT35_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT35_MSG_DATA
+#define PCIEMSIX_VECT35_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT35_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT35_CONTROL
+#define PCIEMSIX_VECT35_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT35_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT36_ADDR_LO
+#define PCIEMSIX_VECT36_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT36_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT36_ADDR_HI
+#define PCIEMSIX_VECT36_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT36_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT36_MSG_DATA
+#define PCIEMSIX_VECT36_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT36_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT36_CONTROL
+#define PCIEMSIX_VECT36_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT36_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT37_ADDR_LO
+#define PCIEMSIX_VECT37_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT37_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT37_ADDR_HI
+#define PCIEMSIX_VECT37_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT37_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT37_MSG_DATA
+#define PCIEMSIX_VECT37_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT37_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT37_CONTROL
+#define PCIEMSIX_VECT37_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT37_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT38_ADDR_LO
+#define PCIEMSIX_VECT38_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT38_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT38_ADDR_HI
+#define PCIEMSIX_VECT38_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT38_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT38_MSG_DATA
+#define PCIEMSIX_VECT38_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT38_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT38_CONTROL
+#define PCIEMSIX_VECT38_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT38_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT39_ADDR_LO
+#define PCIEMSIX_VECT39_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT39_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT39_ADDR_HI
+#define PCIEMSIX_VECT39_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT39_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT39_MSG_DATA
+#define PCIEMSIX_VECT39_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT39_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT39_CONTROL
+#define PCIEMSIX_VECT39_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT39_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT40_ADDR_LO
+#define PCIEMSIX_VECT40_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT40_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT40_ADDR_HI
+#define PCIEMSIX_VECT40_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT40_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT40_MSG_DATA
+#define PCIEMSIX_VECT40_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT40_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT40_CONTROL
+#define PCIEMSIX_VECT40_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT40_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT41_ADDR_LO
+#define PCIEMSIX_VECT41_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT41_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT41_ADDR_HI
+#define PCIEMSIX_VECT41_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT41_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT41_MSG_DATA
+#define PCIEMSIX_VECT41_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT41_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT41_CONTROL
+#define PCIEMSIX_VECT41_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT41_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT42_ADDR_LO
+#define PCIEMSIX_VECT42_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT42_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT42_ADDR_HI
+#define PCIEMSIX_VECT42_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT42_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT42_MSG_DATA
+#define PCIEMSIX_VECT42_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT42_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT42_CONTROL
+#define PCIEMSIX_VECT42_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT42_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT43_ADDR_LO
+#define PCIEMSIX_VECT43_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT43_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT43_ADDR_HI
+#define PCIEMSIX_VECT43_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT43_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT43_MSG_DATA
+#define PCIEMSIX_VECT43_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT43_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT43_CONTROL
+#define PCIEMSIX_VECT43_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT43_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT44_ADDR_LO
+#define PCIEMSIX_VECT44_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT44_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT44_ADDR_HI
+#define PCIEMSIX_VECT44_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT44_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT44_MSG_DATA
+#define PCIEMSIX_VECT44_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT44_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT44_CONTROL
+#define PCIEMSIX_VECT44_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT44_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT45_ADDR_LO
+#define PCIEMSIX_VECT45_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT45_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT45_ADDR_HI
+#define PCIEMSIX_VECT45_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT45_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT45_MSG_DATA
+#define PCIEMSIX_VECT45_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT45_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT45_CONTROL
+#define PCIEMSIX_VECT45_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT45_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT46_ADDR_LO
+#define PCIEMSIX_VECT46_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT46_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT46_ADDR_HI
+#define PCIEMSIX_VECT46_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT46_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT46_MSG_DATA
+#define PCIEMSIX_VECT46_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT46_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT46_CONTROL
+#define PCIEMSIX_VECT46_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT46_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT47_ADDR_LO
+#define PCIEMSIX_VECT47_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT47_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT47_ADDR_HI
+#define PCIEMSIX_VECT47_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT47_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT47_MSG_DATA
+#define PCIEMSIX_VECT47_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT47_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT47_CONTROL
+#define PCIEMSIX_VECT47_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT47_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT48_ADDR_LO
+#define PCIEMSIX_VECT48_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT48_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT48_ADDR_HI
+#define PCIEMSIX_VECT48_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT48_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT48_MSG_DATA
+#define PCIEMSIX_VECT48_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT48_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT48_CONTROL
+#define PCIEMSIX_VECT48_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT48_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT49_ADDR_LO
+#define PCIEMSIX_VECT49_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT49_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT49_ADDR_HI
+#define PCIEMSIX_VECT49_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT49_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT49_MSG_DATA
+#define PCIEMSIX_VECT49_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT49_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT49_CONTROL
+#define PCIEMSIX_VECT49_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT49_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT50_ADDR_LO
+#define PCIEMSIX_VECT50_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT50_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT50_ADDR_HI
+#define PCIEMSIX_VECT50_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT50_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT50_MSG_DATA
+#define PCIEMSIX_VECT50_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT50_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT50_CONTROL
+#define PCIEMSIX_VECT50_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT50_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT51_ADDR_LO
+#define PCIEMSIX_VECT51_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT51_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT51_ADDR_HI
+#define PCIEMSIX_VECT51_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT51_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT51_MSG_DATA
+#define PCIEMSIX_VECT51_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT51_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT51_CONTROL
+#define PCIEMSIX_VECT51_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT51_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT52_ADDR_LO
+#define PCIEMSIX_VECT52_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT52_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT52_ADDR_HI
+#define PCIEMSIX_VECT52_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT52_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT52_MSG_DATA
+#define PCIEMSIX_VECT52_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT52_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT52_CONTROL
+#define PCIEMSIX_VECT52_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT52_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT53_ADDR_LO
+#define PCIEMSIX_VECT53_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT53_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT53_ADDR_HI
+#define PCIEMSIX_VECT53_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT53_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT53_MSG_DATA
+#define PCIEMSIX_VECT53_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT53_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT53_CONTROL
+#define PCIEMSIX_VECT53_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT53_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT54_ADDR_LO
+#define PCIEMSIX_VECT54_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT54_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT54_ADDR_HI
+#define PCIEMSIX_VECT54_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT54_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT54_MSG_DATA
+#define PCIEMSIX_VECT54_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT54_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT54_CONTROL
+#define PCIEMSIX_VECT54_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT54_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT55_ADDR_LO
+#define PCIEMSIX_VECT55_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT55_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT55_ADDR_HI
+#define PCIEMSIX_VECT55_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT55_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT55_MSG_DATA
+#define PCIEMSIX_VECT55_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT55_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT55_CONTROL
+#define PCIEMSIX_VECT55_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT55_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT56_ADDR_LO
+#define PCIEMSIX_VECT56_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT56_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT56_ADDR_HI
+#define PCIEMSIX_VECT56_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT56_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT56_MSG_DATA
+#define PCIEMSIX_VECT56_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT56_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT56_CONTROL
+#define PCIEMSIX_VECT56_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT56_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT57_ADDR_LO
+#define PCIEMSIX_VECT57_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT57_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT57_ADDR_HI
+#define PCIEMSIX_VECT57_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT57_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT57_MSG_DATA
+#define PCIEMSIX_VECT57_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT57_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT57_CONTROL
+#define PCIEMSIX_VECT57_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT57_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT58_ADDR_LO
+#define PCIEMSIX_VECT58_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT58_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT58_ADDR_HI
+#define PCIEMSIX_VECT58_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT58_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT58_MSG_DATA
+#define PCIEMSIX_VECT58_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT58_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT58_CONTROL
+#define PCIEMSIX_VECT58_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT58_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT59_ADDR_LO
+#define PCIEMSIX_VECT59_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT59_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT59_ADDR_HI
+#define PCIEMSIX_VECT59_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT59_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT59_MSG_DATA
+#define PCIEMSIX_VECT59_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT59_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT59_CONTROL
+#define PCIEMSIX_VECT59_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT59_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT60_ADDR_LO
+#define PCIEMSIX_VECT60_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT60_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT60_ADDR_HI
+#define PCIEMSIX_VECT60_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT60_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT60_MSG_DATA
+#define PCIEMSIX_VECT60_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT60_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT60_CONTROL
+#define PCIEMSIX_VECT60_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT60_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT61_ADDR_LO
+#define PCIEMSIX_VECT61_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT61_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT61_ADDR_HI
+#define PCIEMSIX_VECT61_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT61_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT61_MSG_DATA
+#define PCIEMSIX_VECT61_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT61_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT61_CONTROL
+#define PCIEMSIX_VECT61_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT61_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT62_ADDR_LO
+#define PCIEMSIX_VECT62_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT62_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT62_ADDR_HI
+#define PCIEMSIX_VECT62_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT62_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT62_MSG_DATA
+#define PCIEMSIX_VECT62_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT62_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT62_CONTROL
+#define PCIEMSIX_VECT62_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT62_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT63_ADDR_LO
+#define PCIEMSIX_VECT63_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT63_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT63_ADDR_HI
+#define PCIEMSIX_VECT63_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT63_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT63_MSG_DATA
+#define PCIEMSIX_VECT63_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT63_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT63_CONTROL
+#define PCIEMSIX_VECT63_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT63_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT64_ADDR_LO
+#define PCIEMSIX_VECT64_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT64_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT64_ADDR_HI
+#define PCIEMSIX_VECT64_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT64_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT64_MSG_DATA
+#define PCIEMSIX_VECT64_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT64_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT64_CONTROL
+#define PCIEMSIX_VECT64_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT64_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT65_ADDR_LO
+#define PCIEMSIX_VECT65_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT65_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT65_ADDR_HI
+#define PCIEMSIX_VECT65_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT65_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT65_MSG_DATA
+#define PCIEMSIX_VECT65_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT65_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT65_CONTROL
+#define PCIEMSIX_VECT65_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT65_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT66_ADDR_LO
+#define PCIEMSIX_VECT66_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT66_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT66_ADDR_HI
+#define PCIEMSIX_VECT66_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT66_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT66_MSG_DATA
+#define PCIEMSIX_VECT66_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT66_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT66_CONTROL
+#define PCIEMSIX_VECT66_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT66_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT67_ADDR_LO
+#define PCIEMSIX_VECT67_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT67_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT67_ADDR_HI
+#define PCIEMSIX_VECT67_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT67_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT67_MSG_DATA
+#define PCIEMSIX_VECT67_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT67_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT67_CONTROL
+#define PCIEMSIX_VECT67_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT67_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT68_ADDR_LO
+#define PCIEMSIX_VECT68_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT68_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT68_ADDR_HI
+#define PCIEMSIX_VECT68_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT68_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT68_MSG_DATA
+#define PCIEMSIX_VECT68_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT68_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT68_CONTROL
+#define PCIEMSIX_VECT68_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT68_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT69_ADDR_LO
+#define PCIEMSIX_VECT69_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT69_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT69_ADDR_HI
+#define PCIEMSIX_VECT69_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT69_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT69_MSG_DATA
+#define PCIEMSIX_VECT69_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT69_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT69_CONTROL
+#define PCIEMSIX_VECT69_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT69_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT70_ADDR_LO
+#define PCIEMSIX_VECT70_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT70_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT70_ADDR_HI
+#define PCIEMSIX_VECT70_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT70_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT70_MSG_DATA
+#define PCIEMSIX_VECT70_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT70_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT70_CONTROL
+#define PCIEMSIX_VECT70_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT70_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT71_ADDR_LO
+#define PCIEMSIX_VECT71_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT71_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT71_ADDR_HI
+#define PCIEMSIX_VECT71_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT71_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT71_MSG_DATA
+#define PCIEMSIX_VECT71_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT71_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT71_CONTROL
+#define PCIEMSIX_VECT71_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT71_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT72_ADDR_LO
+#define PCIEMSIX_VECT72_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT72_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT72_ADDR_HI
+#define PCIEMSIX_VECT72_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT72_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT72_MSG_DATA
+#define PCIEMSIX_VECT72_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT72_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT72_CONTROL
+#define PCIEMSIX_VECT72_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT72_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT73_ADDR_LO
+#define PCIEMSIX_VECT73_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT73_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT73_ADDR_HI
+#define PCIEMSIX_VECT73_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT73_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT73_MSG_DATA
+#define PCIEMSIX_VECT73_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT73_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT73_CONTROL
+#define PCIEMSIX_VECT73_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT73_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT74_ADDR_LO
+#define PCIEMSIX_VECT74_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT74_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT74_ADDR_HI
+#define PCIEMSIX_VECT74_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT74_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT74_MSG_DATA
+#define PCIEMSIX_VECT74_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT74_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT74_CONTROL
+#define PCIEMSIX_VECT74_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT74_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT75_ADDR_LO
+#define PCIEMSIX_VECT75_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT75_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT75_ADDR_HI
+#define PCIEMSIX_VECT75_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT75_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT75_MSG_DATA
+#define PCIEMSIX_VECT75_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT75_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT75_CONTROL
+#define PCIEMSIX_VECT75_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT75_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT76_ADDR_LO
+#define PCIEMSIX_VECT76_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT76_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT76_ADDR_HI
+#define PCIEMSIX_VECT76_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT76_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT76_MSG_DATA
+#define PCIEMSIX_VECT76_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT76_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT76_CONTROL
+#define PCIEMSIX_VECT76_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT76_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT77_ADDR_LO
+#define PCIEMSIX_VECT77_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT77_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT77_ADDR_HI
+#define PCIEMSIX_VECT77_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT77_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT77_MSG_DATA
+#define PCIEMSIX_VECT77_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT77_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT77_CONTROL
+#define PCIEMSIX_VECT77_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT77_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT78_ADDR_LO
+#define PCIEMSIX_VECT78_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT78_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT78_ADDR_HI
+#define PCIEMSIX_VECT78_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT78_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT78_MSG_DATA
+#define PCIEMSIX_VECT78_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT78_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT78_CONTROL
+#define PCIEMSIX_VECT78_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT78_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT79_ADDR_LO
+#define PCIEMSIX_VECT79_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT79_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT79_ADDR_HI
+#define PCIEMSIX_VECT79_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT79_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT79_MSG_DATA
+#define PCIEMSIX_VECT79_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT79_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT79_CONTROL
+#define PCIEMSIX_VECT79_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT79_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT80_ADDR_LO
+#define PCIEMSIX_VECT80_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT80_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT80_ADDR_HI
+#define PCIEMSIX_VECT80_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT80_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT80_MSG_DATA
+#define PCIEMSIX_VECT80_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT80_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT80_CONTROL
+#define PCIEMSIX_VECT80_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT80_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT81_ADDR_LO
+#define PCIEMSIX_VECT81_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT81_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT81_ADDR_HI
+#define PCIEMSIX_VECT81_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT81_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT81_MSG_DATA
+#define PCIEMSIX_VECT81_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT81_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT81_CONTROL
+#define PCIEMSIX_VECT81_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT81_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT82_ADDR_LO
+#define PCIEMSIX_VECT82_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT82_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT82_ADDR_HI
+#define PCIEMSIX_VECT82_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT82_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT82_MSG_DATA
+#define PCIEMSIX_VECT82_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT82_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT82_CONTROL
+#define PCIEMSIX_VECT82_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT82_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT83_ADDR_LO
+#define PCIEMSIX_VECT83_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT83_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT83_ADDR_HI
+#define PCIEMSIX_VECT83_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT83_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT83_MSG_DATA
+#define PCIEMSIX_VECT83_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT83_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT83_CONTROL
+#define PCIEMSIX_VECT83_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT83_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT84_ADDR_LO
+#define PCIEMSIX_VECT84_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT84_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT84_ADDR_HI
+#define PCIEMSIX_VECT84_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT84_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT84_MSG_DATA
+#define PCIEMSIX_VECT84_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT84_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT84_CONTROL
+#define PCIEMSIX_VECT84_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT84_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT85_ADDR_LO
+#define PCIEMSIX_VECT85_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT85_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT85_ADDR_HI
+#define PCIEMSIX_VECT85_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT85_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT85_MSG_DATA
+#define PCIEMSIX_VECT85_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT85_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT85_CONTROL
+#define PCIEMSIX_VECT85_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT85_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT86_ADDR_LO
+#define PCIEMSIX_VECT86_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT86_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT86_ADDR_HI
+#define PCIEMSIX_VECT86_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT86_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT86_MSG_DATA
+#define PCIEMSIX_VECT86_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT86_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT86_CONTROL
+#define PCIEMSIX_VECT86_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT86_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT87_ADDR_LO
+#define PCIEMSIX_VECT87_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT87_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT87_ADDR_HI
+#define PCIEMSIX_VECT87_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT87_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT87_MSG_DATA
+#define PCIEMSIX_VECT87_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT87_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT87_CONTROL
+#define PCIEMSIX_VECT87_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT87_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT88_ADDR_LO
+#define PCIEMSIX_VECT88_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT88_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT88_ADDR_HI
+#define PCIEMSIX_VECT88_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT88_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT88_MSG_DATA
+#define PCIEMSIX_VECT88_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT88_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT88_CONTROL
+#define PCIEMSIX_VECT88_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT88_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT89_ADDR_LO
+#define PCIEMSIX_VECT89_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT89_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT89_ADDR_HI
+#define PCIEMSIX_VECT89_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT89_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT89_MSG_DATA
+#define PCIEMSIX_VECT89_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT89_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT89_CONTROL
+#define PCIEMSIX_VECT89_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT89_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT90_ADDR_LO
+#define PCIEMSIX_VECT90_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT90_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT90_ADDR_HI
+#define PCIEMSIX_VECT90_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT90_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT90_MSG_DATA
+#define PCIEMSIX_VECT90_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT90_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT90_CONTROL
+#define PCIEMSIX_VECT90_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT90_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT91_ADDR_LO
+#define PCIEMSIX_VECT91_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT91_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT91_ADDR_HI
+#define PCIEMSIX_VECT91_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT91_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT91_MSG_DATA
+#define PCIEMSIX_VECT91_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT91_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT91_CONTROL
+#define PCIEMSIX_VECT91_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT91_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT92_ADDR_LO
+#define PCIEMSIX_VECT92_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT92_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT92_ADDR_HI
+#define PCIEMSIX_VECT92_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT92_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT92_MSG_DATA
+#define PCIEMSIX_VECT92_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT92_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT92_CONTROL
+#define PCIEMSIX_VECT92_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT92_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT93_ADDR_LO
+#define PCIEMSIX_VECT93_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT93_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT93_ADDR_HI
+#define PCIEMSIX_VECT93_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT93_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT93_MSG_DATA
+#define PCIEMSIX_VECT93_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT93_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT93_CONTROL
+#define PCIEMSIX_VECT93_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT93_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT94_ADDR_LO
+#define PCIEMSIX_VECT94_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT94_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT94_ADDR_HI
+#define PCIEMSIX_VECT94_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT94_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT94_MSG_DATA
+#define PCIEMSIX_VECT94_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT94_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT94_CONTROL
+#define PCIEMSIX_VECT94_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT94_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT95_ADDR_LO
+#define PCIEMSIX_VECT95_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT95_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT95_ADDR_HI
+#define PCIEMSIX_VECT95_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT95_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT95_MSG_DATA
+#define PCIEMSIX_VECT95_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT95_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT95_CONTROL
+#define PCIEMSIX_VECT95_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT95_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT96_ADDR_LO
+#define PCIEMSIX_VECT96_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT96_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT96_ADDR_HI
+#define PCIEMSIX_VECT96_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT96_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT96_MSG_DATA
+#define PCIEMSIX_VECT96_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT96_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT96_CONTROL
+#define PCIEMSIX_VECT96_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT96_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT97_ADDR_LO
+#define PCIEMSIX_VECT97_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT97_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT97_ADDR_HI
+#define PCIEMSIX_VECT97_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT97_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT97_MSG_DATA
+#define PCIEMSIX_VECT97_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT97_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT97_CONTROL
+#define PCIEMSIX_VECT97_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT97_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT98_ADDR_LO
+#define PCIEMSIX_VECT98_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT98_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT98_ADDR_HI
+#define PCIEMSIX_VECT98_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT98_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT98_MSG_DATA
+#define PCIEMSIX_VECT98_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT98_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT98_CONTROL
+#define PCIEMSIX_VECT98_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT98_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT99_ADDR_LO
+#define PCIEMSIX_VECT99_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT99_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT99_ADDR_HI
+#define PCIEMSIX_VECT99_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT99_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT99_MSG_DATA
+#define PCIEMSIX_VECT99_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT99_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT99_CONTROL
+#define PCIEMSIX_VECT99_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT99_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT100_ADDR_LO
+#define PCIEMSIX_VECT100_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT100_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT100_ADDR_HI
+#define PCIEMSIX_VECT100_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT100_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT100_MSG_DATA
+#define PCIEMSIX_VECT100_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT100_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT100_CONTROL
+#define PCIEMSIX_VECT100_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT100_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT101_ADDR_LO
+#define PCIEMSIX_VECT101_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT101_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT101_ADDR_HI
+#define PCIEMSIX_VECT101_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT101_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT101_MSG_DATA
+#define PCIEMSIX_VECT101_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT101_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT101_CONTROL
+#define PCIEMSIX_VECT101_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT101_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT102_ADDR_LO
+#define PCIEMSIX_VECT102_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT102_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT102_ADDR_HI
+#define PCIEMSIX_VECT102_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT102_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT102_MSG_DATA
+#define PCIEMSIX_VECT102_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT102_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT102_CONTROL
+#define PCIEMSIX_VECT102_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT102_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT103_ADDR_LO
+#define PCIEMSIX_VECT103_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT103_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT103_ADDR_HI
+#define PCIEMSIX_VECT103_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT103_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT103_MSG_DATA
+#define PCIEMSIX_VECT103_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT103_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT103_CONTROL
+#define PCIEMSIX_VECT103_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT103_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT104_ADDR_LO
+#define PCIEMSIX_VECT104_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT104_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT104_ADDR_HI
+#define PCIEMSIX_VECT104_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT104_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT104_MSG_DATA
+#define PCIEMSIX_VECT104_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT104_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT104_CONTROL
+#define PCIEMSIX_VECT104_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT104_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT105_ADDR_LO
+#define PCIEMSIX_VECT105_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT105_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT105_ADDR_HI
+#define PCIEMSIX_VECT105_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT105_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT105_MSG_DATA
+#define PCIEMSIX_VECT105_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT105_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT105_CONTROL
+#define PCIEMSIX_VECT105_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT105_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT106_ADDR_LO
+#define PCIEMSIX_VECT106_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT106_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT106_ADDR_HI
+#define PCIEMSIX_VECT106_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT106_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT106_MSG_DATA
+#define PCIEMSIX_VECT106_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT106_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT106_CONTROL
+#define PCIEMSIX_VECT106_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT106_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT107_ADDR_LO
+#define PCIEMSIX_VECT107_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT107_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT107_ADDR_HI
+#define PCIEMSIX_VECT107_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT107_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT107_MSG_DATA
+#define PCIEMSIX_VECT107_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT107_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT107_CONTROL
+#define PCIEMSIX_VECT107_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT107_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT108_ADDR_LO
+#define PCIEMSIX_VECT108_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT108_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT108_ADDR_HI
+#define PCIEMSIX_VECT108_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT108_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT108_MSG_DATA
+#define PCIEMSIX_VECT108_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT108_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT108_CONTROL
+#define PCIEMSIX_VECT108_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT108_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT109_ADDR_LO
+#define PCIEMSIX_VECT109_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT109_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT109_ADDR_HI
+#define PCIEMSIX_VECT109_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT109_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT109_MSG_DATA
+#define PCIEMSIX_VECT109_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT109_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT109_CONTROL
+#define PCIEMSIX_VECT109_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT109_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT110_ADDR_LO
+#define PCIEMSIX_VECT110_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT110_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT110_ADDR_HI
+#define PCIEMSIX_VECT110_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT110_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT110_MSG_DATA
+#define PCIEMSIX_VECT110_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT110_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT110_CONTROL
+#define PCIEMSIX_VECT110_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT110_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT111_ADDR_LO
+#define PCIEMSIX_VECT111_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT111_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT111_ADDR_HI
+#define PCIEMSIX_VECT111_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT111_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT111_MSG_DATA
+#define PCIEMSIX_VECT111_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT111_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT111_CONTROL
+#define PCIEMSIX_VECT111_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT111_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT112_ADDR_LO
+#define PCIEMSIX_VECT112_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT112_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT112_ADDR_HI
+#define PCIEMSIX_VECT112_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT112_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT112_MSG_DATA
+#define PCIEMSIX_VECT112_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT112_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT112_CONTROL
+#define PCIEMSIX_VECT112_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT112_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT113_ADDR_LO
+#define PCIEMSIX_VECT113_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT113_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT113_ADDR_HI
+#define PCIEMSIX_VECT113_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT113_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT113_MSG_DATA
+#define PCIEMSIX_VECT113_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT113_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT113_CONTROL
+#define PCIEMSIX_VECT113_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT113_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT114_ADDR_LO
+#define PCIEMSIX_VECT114_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT114_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT114_ADDR_HI
+#define PCIEMSIX_VECT114_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT114_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT114_MSG_DATA
+#define PCIEMSIX_VECT114_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT114_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT114_CONTROL
+#define PCIEMSIX_VECT114_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT114_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT115_ADDR_LO
+#define PCIEMSIX_VECT115_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT115_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT115_ADDR_HI
+#define PCIEMSIX_VECT115_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT115_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT115_MSG_DATA
+#define PCIEMSIX_VECT115_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT115_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT115_CONTROL
+#define PCIEMSIX_VECT115_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT115_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT116_ADDR_LO
+#define PCIEMSIX_VECT116_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT116_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT116_ADDR_HI
+#define PCIEMSIX_VECT116_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT116_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT116_MSG_DATA
+#define PCIEMSIX_VECT116_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT116_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT116_CONTROL
+#define PCIEMSIX_VECT116_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT116_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT117_ADDR_LO
+#define PCIEMSIX_VECT117_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT117_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT117_ADDR_HI
+#define PCIEMSIX_VECT117_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT117_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT117_MSG_DATA
+#define PCIEMSIX_VECT117_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT117_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT117_CONTROL
+#define PCIEMSIX_VECT117_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT117_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT118_ADDR_LO
+#define PCIEMSIX_VECT118_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT118_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT118_ADDR_HI
+#define PCIEMSIX_VECT118_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT118_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT118_MSG_DATA
+#define PCIEMSIX_VECT118_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT118_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT118_CONTROL
+#define PCIEMSIX_VECT118_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT118_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT119_ADDR_LO
+#define PCIEMSIX_VECT119_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT119_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT119_ADDR_HI
+#define PCIEMSIX_VECT119_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT119_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT119_MSG_DATA
+#define PCIEMSIX_VECT119_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT119_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT119_CONTROL
+#define PCIEMSIX_VECT119_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT119_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT120_ADDR_LO
+#define PCIEMSIX_VECT120_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT120_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT120_ADDR_HI
+#define PCIEMSIX_VECT120_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT120_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT120_MSG_DATA
+#define PCIEMSIX_VECT120_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT120_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT120_CONTROL
+#define PCIEMSIX_VECT120_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT120_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT121_ADDR_LO
+#define PCIEMSIX_VECT121_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT121_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT121_ADDR_HI
+#define PCIEMSIX_VECT121_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT121_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT121_MSG_DATA
+#define PCIEMSIX_VECT121_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT121_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT121_CONTROL
+#define PCIEMSIX_VECT121_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT121_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT122_ADDR_LO
+#define PCIEMSIX_VECT122_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT122_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT122_ADDR_HI
+#define PCIEMSIX_VECT122_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT122_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT122_MSG_DATA
+#define PCIEMSIX_VECT122_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT122_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT122_CONTROL
+#define PCIEMSIX_VECT122_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT122_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT123_ADDR_LO
+#define PCIEMSIX_VECT123_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT123_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT123_ADDR_HI
+#define PCIEMSIX_VECT123_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT123_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT123_MSG_DATA
+#define PCIEMSIX_VECT123_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT123_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT123_CONTROL
+#define PCIEMSIX_VECT123_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT123_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT124_ADDR_LO
+#define PCIEMSIX_VECT124_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT124_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT124_ADDR_HI
+#define PCIEMSIX_VECT124_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT124_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT124_MSG_DATA
+#define PCIEMSIX_VECT124_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT124_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT124_CONTROL
+#define PCIEMSIX_VECT124_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT124_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT125_ADDR_LO
+#define PCIEMSIX_VECT125_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT125_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT125_ADDR_HI
+#define PCIEMSIX_VECT125_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT125_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT125_MSG_DATA
+#define PCIEMSIX_VECT125_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT125_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT125_CONTROL
+#define PCIEMSIX_VECT125_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT125_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT126_ADDR_LO
+#define PCIEMSIX_VECT126_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT126_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT126_ADDR_HI
+#define PCIEMSIX_VECT126_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT126_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT126_MSG_DATA
+#define PCIEMSIX_VECT126_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT126_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT126_CONTROL
+#define PCIEMSIX_VECT126_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT126_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT127_ADDR_LO
+#define PCIEMSIX_VECT127_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT127_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT127_ADDR_HI
+#define PCIEMSIX_VECT127_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT127_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT127_MSG_DATA
+#define PCIEMSIX_VECT127_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT127_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT127_CONTROL
+#define PCIEMSIX_VECT127_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT127_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT128_ADDR_LO
+#define PCIEMSIX_VECT128_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT128_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT128_ADDR_HI
+#define PCIEMSIX_VECT128_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT128_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT128_MSG_DATA
+#define PCIEMSIX_VECT128_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT128_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT128_CONTROL
+#define PCIEMSIX_VECT128_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT128_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT129_ADDR_LO
+#define PCIEMSIX_VECT129_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT129_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT129_ADDR_HI
+#define PCIEMSIX_VECT129_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT129_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT129_MSG_DATA
+#define PCIEMSIX_VECT129_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT129_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT129_CONTROL
+#define PCIEMSIX_VECT129_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT129_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT130_ADDR_LO
+#define PCIEMSIX_VECT130_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT130_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT130_ADDR_HI
+#define PCIEMSIX_VECT130_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT130_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT130_MSG_DATA
+#define PCIEMSIX_VECT130_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT130_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT130_CONTROL
+#define PCIEMSIX_VECT130_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT130_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT131_ADDR_LO
+#define PCIEMSIX_VECT131_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT131_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT131_ADDR_HI
+#define PCIEMSIX_VECT131_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT131_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT131_MSG_DATA
+#define PCIEMSIX_VECT131_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT131_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT131_CONTROL
+#define PCIEMSIX_VECT131_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT131_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT132_ADDR_LO
+#define PCIEMSIX_VECT132_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT132_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT132_ADDR_HI
+#define PCIEMSIX_VECT132_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT132_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT132_MSG_DATA
+#define PCIEMSIX_VECT132_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT132_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT132_CONTROL
+#define PCIEMSIX_VECT132_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT132_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT133_ADDR_LO
+#define PCIEMSIX_VECT133_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT133_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT133_ADDR_HI
+#define PCIEMSIX_VECT133_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT133_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT133_MSG_DATA
+#define PCIEMSIX_VECT133_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT133_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT133_CONTROL
+#define PCIEMSIX_VECT133_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT133_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT134_ADDR_LO
+#define PCIEMSIX_VECT134_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT134_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT134_ADDR_HI
+#define PCIEMSIX_VECT134_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT134_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT134_MSG_DATA
+#define PCIEMSIX_VECT134_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT134_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT134_CONTROL
+#define PCIEMSIX_VECT134_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT134_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT135_ADDR_LO
+#define PCIEMSIX_VECT135_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT135_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT135_ADDR_HI
+#define PCIEMSIX_VECT135_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT135_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT135_MSG_DATA
+#define PCIEMSIX_VECT135_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT135_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT135_CONTROL
+#define PCIEMSIX_VECT135_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT135_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT136_ADDR_LO
+#define PCIEMSIX_VECT136_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT136_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT136_ADDR_HI
+#define PCIEMSIX_VECT136_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT136_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT136_MSG_DATA
+#define PCIEMSIX_VECT136_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT136_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT136_CONTROL
+#define PCIEMSIX_VECT136_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT136_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT137_ADDR_LO
+#define PCIEMSIX_VECT137_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT137_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT137_ADDR_HI
+#define PCIEMSIX_VECT137_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT137_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT137_MSG_DATA
+#define PCIEMSIX_VECT137_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT137_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT137_CONTROL
+#define PCIEMSIX_VECT137_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT137_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT138_ADDR_LO
+#define PCIEMSIX_VECT138_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT138_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT138_ADDR_HI
+#define PCIEMSIX_VECT138_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT138_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT138_MSG_DATA
+#define PCIEMSIX_VECT138_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT138_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT138_CONTROL
+#define PCIEMSIX_VECT138_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT138_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT139_ADDR_LO
+#define PCIEMSIX_VECT139_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT139_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT139_ADDR_HI
+#define PCIEMSIX_VECT139_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT139_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT139_MSG_DATA
+#define PCIEMSIX_VECT139_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT139_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT139_CONTROL
+#define PCIEMSIX_VECT139_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT139_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT140_ADDR_LO
+#define PCIEMSIX_VECT140_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT140_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT140_ADDR_HI
+#define PCIEMSIX_VECT140_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT140_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT140_MSG_DATA
+#define PCIEMSIX_VECT140_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT140_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT140_CONTROL
+#define PCIEMSIX_VECT140_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT140_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT141_ADDR_LO
+#define PCIEMSIX_VECT141_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT141_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT141_ADDR_HI
+#define PCIEMSIX_VECT141_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT141_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT141_MSG_DATA
+#define PCIEMSIX_VECT141_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT141_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT141_CONTROL
+#define PCIEMSIX_VECT141_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT141_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT142_ADDR_LO
+#define PCIEMSIX_VECT142_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT142_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT142_ADDR_HI
+#define PCIEMSIX_VECT142_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT142_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT142_MSG_DATA
+#define PCIEMSIX_VECT142_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT142_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT142_CONTROL
+#define PCIEMSIX_VECT142_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT142_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT143_ADDR_LO
+#define PCIEMSIX_VECT143_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT143_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT143_ADDR_HI
+#define PCIEMSIX_VECT143_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT143_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT143_MSG_DATA
+#define PCIEMSIX_VECT143_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT143_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT143_CONTROL
+#define PCIEMSIX_VECT143_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT143_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT144_ADDR_LO
+#define PCIEMSIX_VECT144_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT144_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT144_ADDR_HI
+#define PCIEMSIX_VECT144_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT144_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT144_MSG_DATA
+#define PCIEMSIX_VECT144_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT144_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT144_CONTROL
+#define PCIEMSIX_VECT144_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT144_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT145_ADDR_LO
+#define PCIEMSIX_VECT145_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT145_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT145_ADDR_HI
+#define PCIEMSIX_VECT145_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT145_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT145_MSG_DATA
+#define PCIEMSIX_VECT145_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT145_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT145_CONTROL
+#define PCIEMSIX_VECT145_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT145_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT146_ADDR_LO
+#define PCIEMSIX_VECT146_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT146_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT146_ADDR_HI
+#define PCIEMSIX_VECT146_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT146_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT146_MSG_DATA
+#define PCIEMSIX_VECT146_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT146_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT146_CONTROL
+#define PCIEMSIX_VECT146_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT146_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT147_ADDR_LO
+#define PCIEMSIX_VECT147_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT147_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT147_ADDR_HI
+#define PCIEMSIX_VECT147_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT147_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT147_MSG_DATA
+#define PCIEMSIX_VECT147_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT147_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT147_CONTROL
+#define PCIEMSIX_VECT147_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT147_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT148_ADDR_LO
+#define PCIEMSIX_VECT148_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT148_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT148_ADDR_HI
+#define PCIEMSIX_VECT148_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT148_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT148_MSG_DATA
+#define PCIEMSIX_VECT148_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT148_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT148_CONTROL
+#define PCIEMSIX_VECT148_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT148_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT149_ADDR_LO
+#define PCIEMSIX_VECT149_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT149_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT149_ADDR_HI
+#define PCIEMSIX_VECT149_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT149_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT149_MSG_DATA
+#define PCIEMSIX_VECT149_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT149_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT149_CONTROL
+#define PCIEMSIX_VECT149_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT149_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT150_ADDR_LO
+#define PCIEMSIX_VECT150_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT150_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT150_ADDR_HI
+#define PCIEMSIX_VECT150_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT150_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT150_MSG_DATA
+#define PCIEMSIX_VECT150_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT150_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT150_CONTROL
+#define PCIEMSIX_VECT150_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT150_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT151_ADDR_LO
+#define PCIEMSIX_VECT151_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT151_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT151_ADDR_HI
+#define PCIEMSIX_VECT151_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT151_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT151_MSG_DATA
+#define PCIEMSIX_VECT151_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT151_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT151_CONTROL
+#define PCIEMSIX_VECT151_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT151_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT152_ADDR_LO
+#define PCIEMSIX_VECT152_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT152_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT152_ADDR_HI
+#define PCIEMSIX_VECT152_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT152_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT152_MSG_DATA
+#define PCIEMSIX_VECT152_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT152_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT152_CONTROL
+#define PCIEMSIX_VECT152_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT152_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT153_ADDR_LO
+#define PCIEMSIX_VECT153_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT153_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT153_ADDR_HI
+#define PCIEMSIX_VECT153_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT153_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT153_MSG_DATA
+#define PCIEMSIX_VECT153_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT153_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT153_CONTROL
+#define PCIEMSIX_VECT153_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT153_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT154_ADDR_LO
+#define PCIEMSIX_VECT154_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT154_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT154_ADDR_HI
+#define PCIEMSIX_VECT154_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT154_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT154_MSG_DATA
+#define PCIEMSIX_VECT154_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT154_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT154_CONTROL
+#define PCIEMSIX_VECT154_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT154_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT155_ADDR_LO
+#define PCIEMSIX_VECT155_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT155_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT155_ADDR_HI
+#define PCIEMSIX_VECT155_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT155_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT155_MSG_DATA
+#define PCIEMSIX_VECT155_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT155_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT155_CONTROL
+#define PCIEMSIX_VECT155_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT155_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT156_ADDR_LO
+#define PCIEMSIX_VECT156_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT156_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT156_ADDR_HI
+#define PCIEMSIX_VECT156_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT156_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT156_MSG_DATA
+#define PCIEMSIX_VECT156_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT156_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT156_CONTROL
+#define PCIEMSIX_VECT156_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT156_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT157_ADDR_LO
+#define PCIEMSIX_VECT157_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT157_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT157_ADDR_HI
+#define PCIEMSIX_VECT157_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT157_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT157_MSG_DATA
+#define PCIEMSIX_VECT157_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT157_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT157_CONTROL
+#define PCIEMSIX_VECT157_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT157_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT158_ADDR_LO
+#define PCIEMSIX_VECT158_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT158_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT158_ADDR_HI
+#define PCIEMSIX_VECT158_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT158_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT158_MSG_DATA
+#define PCIEMSIX_VECT158_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT158_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT158_CONTROL
+#define PCIEMSIX_VECT158_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT158_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT159_ADDR_LO
+#define PCIEMSIX_VECT159_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT159_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT159_ADDR_HI
+#define PCIEMSIX_VECT159_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT159_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT159_MSG_DATA
+#define PCIEMSIX_VECT159_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT159_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT159_CONTROL
+#define PCIEMSIX_VECT159_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT159_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT160_ADDR_LO
+#define PCIEMSIX_VECT160_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT160_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT160_ADDR_HI
+#define PCIEMSIX_VECT160_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT160_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT160_MSG_DATA
+#define PCIEMSIX_VECT160_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT160_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT160_CONTROL
+#define PCIEMSIX_VECT160_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT160_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT161_ADDR_LO
+#define PCIEMSIX_VECT161_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT161_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT161_ADDR_HI
+#define PCIEMSIX_VECT161_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT161_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT161_MSG_DATA
+#define PCIEMSIX_VECT161_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT161_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT161_CONTROL
+#define PCIEMSIX_VECT161_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT161_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT162_ADDR_LO
+#define PCIEMSIX_VECT162_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT162_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT162_ADDR_HI
+#define PCIEMSIX_VECT162_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT162_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT162_MSG_DATA
+#define PCIEMSIX_VECT162_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT162_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT162_CONTROL
+#define PCIEMSIX_VECT162_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT162_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT163_ADDR_LO
+#define PCIEMSIX_VECT163_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT163_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT163_ADDR_HI
+#define PCIEMSIX_VECT163_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT163_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT163_MSG_DATA
+#define PCIEMSIX_VECT163_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT163_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT163_CONTROL
+#define PCIEMSIX_VECT163_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT163_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT164_ADDR_LO
+#define PCIEMSIX_VECT164_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT164_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT164_ADDR_HI
+#define PCIEMSIX_VECT164_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT164_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT164_MSG_DATA
+#define PCIEMSIX_VECT164_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT164_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT164_CONTROL
+#define PCIEMSIX_VECT164_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT164_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT165_ADDR_LO
+#define PCIEMSIX_VECT165_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT165_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT165_ADDR_HI
+#define PCIEMSIX_VECT165_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT165_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT165_MSG_DATA
+#define PCIEMSIX_VECT165_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT165_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT165_CONTROL
+#define PCIEMSIX_VECT165_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT165_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT166_ADDR_LO
+#define PCIEMSIX_VECT166_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT166_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT166_ADDR_HI
+#define PCIEMSIX_VECT166_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT166_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT166_MSG_DATA
+#define PCIEMSIX_VECT166_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT166_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT166_CONTROL
+#define PCIEMSIX_VECT166_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT166_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT167_ADDR_LO
+#define PCIEMSIX_VECT167_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT167_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT167_ADDR_HI
+#define PCIEMSIX_VECT167_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT167_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT167_MSG_DATA
+#define PCIEMSIX_VECT167_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT167_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT167_CONTROL
+#define PCIEMSIX_VECT167_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT167_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT168_ADDR_LO
+#define PCIEMSIX_VECT168_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT168_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT168_ADDR_HI
+#define PCIEMSIX_VECT168_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT168_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT168_MSG_DATA
+#define PCIEMSIX_VECT168_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT168_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT168_CONTROL
+#define PCIEMSIX_VECT168_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT168_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT169_ADDR_LO
+#define PCIEMSIX_VECT169_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT169_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT169_ADDR_HI
+#define PCIEMSIX_VECT169_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT169_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT169_MSG_DATA
+#define PCIEMSIX_VECT169_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT169_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT169_CONTROL
+#define PCIEMSIX_VECT169_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT169_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT170_ADDR_LO
+#define PCIEMSIX_VECT170_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT170_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT170_ADDR_HI
+#define PCIEMSIX_VECT170_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT170_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT170_MSG_DATA
+#define PCIEMSIX_VECT170_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT170_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT170_CONTROL
+#define PCIEMSIX_VECT170_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT170_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT171_ADDR_LO
+#define PCIEMSIX_VECT171_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT171_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT171_ADDR_HI
+#define PCIEMSIX_VECT171_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT171_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT171_MSG_DATA
+#define PCIEMSIX_VECT171_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT171_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT171_CONTROL
+#define PCIEMSIX_VECT171_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT171_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT172_ADDR_LO
+#define PCIEMSIX_VECT172_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT172_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT172_ADDR_HI
+#define PCIEMSIX_VECT172_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT172_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT172_MSG_DATA
+#define PCIEMSIX_VECT172_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT172_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT172_CONTROL
+#define PCIEMSIX_VECT172_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT172_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT173_ADDR_LO
+#define PCIEMSIX_VECT173_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT173_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT173_ADDR_HI
+#define PCIEMSIX_VECT173_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT173_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT173_MSG_DATA
+#define PCIEMSIX_VECT173_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT173_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT173_CONTROL
+#define PCIEMSIX_VECT173_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT173_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT174_ADDR_LO
+#define PCIEMSIX_VECT174_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT174_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT174_ADDR_HI
+#define PCIEMSIX_VECT174_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT174_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT174_MSG_DATA
+#define PCIEMSIX_VECT174_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT174_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT174_CONTROL
+#define PCIEMSIX_VECT174_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT174_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT175_ADDR_LO
+#define PCIEMSIX_VECT175_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT175_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT175_ADDR_HI
+#define PCIEMSIX_VECT175_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT175_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT175_MSG_DATA
+#define PCIEMSIX_VECT175_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT175_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT175_CONTROL
+#define PCIEMSIX_VECT175_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT175_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT176_ADDR_LO
+#define PCIEMSIX_VECT176_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT176_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT176_ADDR_HI
+#define PCIEMSIX_VECT176_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT176_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT176_MSG_DATA
+#define PCIEMSIX_VECT176_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT176_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT176_CONTROL
+#define PCIEMSIX_VECT176_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT176_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT177_ADDR_LO
+#define PCIEMSIX_VECT177_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT177_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT177_ADDR_HI
+#define PCIEMSIX_VECT177_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT177_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT177_MSG_DATA
+#define PCIEMSIX_VECT177_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT177_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT177_CONTROL
+#define PCIEMSIX_VECT177_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT177_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT178_ADDR_LO
+#define PCIEMSIX_VECT178_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT178_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT178_ADDR_HI
+#define PCIEMSIX_VECT178_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT178_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT178_MSG_DATA
+#define PCIEMSIX_VECT178_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT178_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT178_CONTROL
+#define PCIEMSIX_VECT178_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT178_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT179_ADDR_LO
+#define PCIEMSIX_VECT179_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT179_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT179_ADDR_HI
+#define PCIEMSIX_VECT179_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT179_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT179_MSG_DATA
+#define PCIEMSIX_VECT179_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT179_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT179_CONTROL
+#define PCIEMSIX_VECT179_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT179_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT180_ADDR_LO
+#define PCIEMSIX_VECT180_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT180_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT180_ADDR_HI
+#define PCIEMSIX_VECT180_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT180_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT180_MSG_DATA
+#define PCIEMSIX_VECT180_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT180_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT180_CONTROL
+#define PCIEMSIX_VECT180_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT180_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT181_ADDR_LO
+#define PCIEMSIX_VECT181_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT181_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT181_ADDR_HI
+#define PCIEMSIX_VECT181_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT181_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT181_MSG_DATA
+#define PCIEMSIX_VECT181_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT181_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT181_CONTROL
+#define PCIEMSIX_VECT181_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT181_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT182_ADDR_LO
+#define PCIEMSIX_VECT182_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT182_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT182_ADDR_HI
+#define PCIEMSIX_VECT182_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT182_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT182_MSG_DATA
+#define PCIEMSIX_VECT182_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT182_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT182_CONTROL
+#define PCIEMSIX_VECT182_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT182_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT183_ADDR_LO
+#define PCIEMSIX_VECT183_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT183_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT183_ADDR_HI
+#define PCIEMSIX_VECT183_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT183_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT183_MSG_DATA
+#define PCIEMSIX_VECT183_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT183_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT183_CONTROL
+#define PCIEMSIX_VECT183_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT183_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT184_ADDR_LO
+#define PCIEMSIX_VECT184_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT184_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT184_ADDR_HI
+#define PCIEMSIX_VECT184_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT184_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT184_MSG_DATA
+#define PCIEMSIX_VECT184_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT184_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT184_CONTROL
+#define PCIEMSIX_VECT184_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT184_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT185_ADDR_LO
+#define PCIEMSIX_VECT185_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT185_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT185_ADDR_HI
+#define PCIEMSIX_VECT185_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT185_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT185_MSG_DATA
+#define PCIEMSIX_VECT185_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT185_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT185_CONTROL
+#define PCIEMSIX_VECT185_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT185_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT186_ADDR_LO
+#define PCIEMSIX_VECT186_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT186_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT186_ADDR_HI
+#define PCIEMSIX_VECT186_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT186_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT186_MSG_DATA
+#define PCIEMSIX_VECT186_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT186_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT186_CONTROL
+#define PCIEMSIX_VECT186_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT186_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT187_ADDR_LO
+#define PCIEMSIX_VECT187_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT187_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT187_ADDR_HI
+#define PCIEMSIX_VECT187_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT187_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT187_MSG_DATA
+#define PCIEMSIX_VECT187_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT187_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT187_CONTROL
+#define PCIEMSIX_VECT187_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT187_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT188_ADDR_LO
+#define PCIEMSIX_VECT188_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT188_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT188_ADDR_HI
+#define PCIEMSIX_VECT188_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT188_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT188_MSG_DATA
+#define PCIEMSIX_VECT188_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT188_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT188_CONTROL
+#define PCIEMSIX_VECT188_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT188_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT189_ADDR_LO
+#define PCIEMSIX_VECT189_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT189_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT189_ADDR_HI
+#define PCIEMSIX_VECT189_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT189_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT189_MSG_DATA
+#define PCIEMSIX_VECT189_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT189_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT189_CONTROL
+#define PCIEMSIX_VECT189_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT189_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT190_ADDR_LO
+#define PCIEMSIX_VECT190_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT190_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT190_ADDR_HI
+#define PCIEMSIX_VECT190_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT190_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT190_MSG_DATA
+#define PCIEMSIX_VECT190_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT190_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT190_CONTROL
+#define PCIEMSIX_VECT190_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT190_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT191_ADDR_LO
+#define PCIEMSIX_VECT191_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT191_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT191_ADDR_HI
+#define PCIEMSIX_VECT191_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT191_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT191_MSG_DATA
+#define PCIEMSIX_VECT191_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT191_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT191_CONTROL
+#define PCIEMSIX_VECT191_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT191_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT192_ADDR_LO
+#define PCIEMSIX_VECT192_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT192_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT192_ADDR_HI
+#define PCIEMSIX_VECT192_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT192_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT192_MSG_DATA
+#define PCIEMSIX_VECT192_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT192_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT192_CONTROL
+#define PCIEMSIX_VECT192_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT192_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT193_ADDR_LO
+#define PCIEMSIX_VECT193_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT193_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT193_ADDR_HI
+#define PCIEMSIX_VECT193_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT193_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT193_MSG_DATA
+#define PCIEMSIX_VECT193_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT193_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT193_CONTROL
+#define PCIEMSIX_VECT193_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT193_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT194_ADDR_LO
+#define PCIEMSIX_VECT194_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT194_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT194_ADDR_HI
+#define PCIEMSIX_VECT194_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT194_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT194_MSG_DATA
+#define PCIEMSIX_VECT194_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT194_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT194_CONTROL
+#define PCIEMSIX_VECT194_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT194_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT195_ADDR_LO
+#define PCIEMSIX_VECT195_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT195_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT195_ADDR_HI
+#define PCIEMSIX_VECT195_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT195_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT195_MSG_DATA
+#define PCIEMSIX_VECT195_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT195_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT195_CONTROL
+#define PCIEMSIX_VECT195_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT195_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT196_ADDR_LO
+#define PCIEMSIX_VECT196_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT196_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT196_ADDR_HI
+#define PCIEMSIX_VECT196_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT196_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT196_MSG_DATA
+#define PCIEMSIX_VECT196_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT196_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT196_CONTROL
+#define PCIEMSIX_VECT196_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT196_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT197_ADDR_LO
+#define PCIEMSIX_VECT197_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT197_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT197_ADDR_HI
+#define PCIEMSIX_VECT197_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT197_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT197_MSG_DATA
+#define PCIEMSIX_VECT197_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT197_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT197_CONTROL
+#define PCIEMSIX_VECT197_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT197_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT198_ADDR_LO
+#define PCIEMSIX_VECT198_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT198_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT198_ADDR_HI
+#define PCIEMSIX_VECT198_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT198_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT198_MSG_DATA
+#define PCIEMSIX_VECT198_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT198_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT198_CONTROL
+#define PCIEMSIX_VECT198_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT198_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT199_ADDR_LO
+#define PCIEMSIX_VECT199_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT199_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT199_ADDR_HI
+#define PCIEMSIX_VECT199_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT199_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT199_MSG_DATA
+#define PCIEMSIX_VECT199_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT199_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT199_CONTROL
+#define PCIEMSIX_VECT199_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT199_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT200_ADDR_LO
+#define PCIEMSIX_VECT200_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT200_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT200_ADDR_HI
+#define PCIEMSIX_VECT200_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT200_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT200_MSG_DATA
+#define PCIEMSIX_VECT200_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT200_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT200_CONTROL
+#define PCIEMSIX_VECT200_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT200_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT201_ADDR_LO
+#define PCIEMSIX_VECT201_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT201_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT201_ADDR_HI
+#define PCIEMSIX_VECT201_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT201_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT201_MSG_DATA
+#define PCIEMSIX_VECT201_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT201_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT201_CONTROL
+#define PCIEMSIX_VECT201_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT201_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT202_ADDR_LO
+#define PCIEMSIX_VECT202_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT202_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT202_ADDR_HI
+#define PCIEMSIX_VECT202_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT202_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT202_MSG_DATA
+#define PCIEMSIX_VECT202_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT202_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT202_CONTROL
+#define PCIEMSIX_VECT202_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT202_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT203_ADDR_LO
+#define PCIEMSIX_VECT203_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT203_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT203_ADDR_HI
+#define PCIEMSIX_VECT203_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT203_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT203_MSG_DATA
+#define PCIEMSIX_VECT203_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT203_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT203_CONTROL
+#define PCIEMSIX_VECT203_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT203_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT204_ADDR_LO
+#define PCIEMSIX_VECT204_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT204_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT204_ADDR_HI
+#define PCIEMSIX_VECT204_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT204_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT204_MSG_DATA
+#define PCIEMSIX_VECT204_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT204_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT204_CONTROL
+#define PCIEMSIX_VECT204_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT204_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT205_ADDR_LO
+#define PCIEMSIX_VECT205_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT205_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT205_ADDR_HI
+#define PCIEMSIX_VECT205_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT205_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT205_MSG_DATA
+#define PCIEMSIX_VECT205_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT205_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT205_CONTROL
+#define PCIEMSIX_VECT205_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT205_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT206_ADDR_LO
+#define PCIEMSIX_VECT206_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT206_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT206_ADDR_HI
+#define PCIEMSIX_VECT206_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT206_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT206_MSG_DATA
+#define PCIEMSIX_VECT206_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT206_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT206_CONTROL
+#define PCIEMSIX_VECT206_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT206_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT207_ADDR_LO
+#define PCIEMSIX_VECT207_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT207_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT207_ADDR_HI
+#define PCIEMSIX_VECT207_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT207_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT207_MSG_DATA
+#define PCIEMSIX_VECT207_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT207_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT207_CONTROL
+#define PCIEMSIX_VECT207_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT207_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT208_ADDR_LO
+#define PCIEMSIX_VECT208_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT208_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT208_ADDR_HI
+#define PCIEMSIX_VECT208_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT208_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT208_MSG_DATA
+#define PCIEMSIX_VECT208_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT208_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT208_CONTROL
+#define PCIEMSIX_VECT208_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT208_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT209_ADDR_LO
+#define PCIEMSIX_VECT209_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT209_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT209_ADDR_HI
+#define PCIEMSIX_VECT209_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT209_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT209_MSG_DATA
+#define PCIEMSIX_VECT209_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT209_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT209_CONTROL
+#define PCIEMSIX_VECT209_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT209_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT210_ADDR_LO
+#define PCIEMSIX_VECT210_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT210_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT210_ADDR_HI
+#define PCIEMSIX_VECT210_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT210_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT210_MSG_DATA
+#define PCIEMSIX_VECT210_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT210_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT210_CONTROL
+#define PCIEMSIX_VECT210_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT210_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT211_ADDR_LO
+#define PCIEMSIX_VECT211_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT211_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT211_ADDR_HI
+#define PCIEMSIX_VECT211_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT211_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT211_MSG_DATA
+#define PCIEMSIX_VECT211_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT211_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT211_CONTROL
+#define PCIEMSIX_VECT211_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT211_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT212_ADDR_LO
+#define PCIEMSIX_VECT212_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT212_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT212_ADDR_HI
+#define PCIEMSIX_VECT212_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT212_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT212_MSG_DATA
+#define PCIEMSIX_VECT212_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT212_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT212_CONTROL
+#define PCIEMSIX_VECT212_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT212_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT213_ADDR_LO
+#define PCIEMSIX_VECT213_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT213_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT213_ADDR_HI
+#define PCIEMSIX_VECT213_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT213_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT213_MSG_DATA
+#define PCIEMSIX_VECT213_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT213_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT213_CONTROL
+#define PCIEMSIX_VECT213_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT213_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT214_ADDR_LO
+#define PCIEMSIX_VECT214_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT214_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT214_ADDR_HI
+#define PCIEMSIX_VECT214_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT214_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT214_MSG_DATA
+#define PCIEMSIX_VECT214_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT214_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT214_CONTROL
+#define PCIEMSIX_VECT214_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT214_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT215_ADDR_LO
+#define PCIEMSIX_VECT215_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT215_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT215_ADDR_HI
+#define PCIEMSIX_VECT215_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT215_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT215_MSG_DATA
+#define PCIEMSIX_VECT215_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT215_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT215_CONTROL
+#define PCIEMSIX_VECT215_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT215_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT216_ADDR_LO
+#define PCIEMSIX_VECT216_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT216_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT216_ADDR_HI
+#define PCIEMSIX_VECT216_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT216_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT216_MSG_DATA
+#define PCIEMSIX_VECT216_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT216_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT216_CONTROL
+#define PCIEMSIX_VECT216_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT216_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT217_ADDR_LO
+#define PCIEMSIX_VECT217_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT217_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT217_ADDR_HI
+#define PCIEMSIX_VECT217_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT217_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT217_MSG_DATA
+#define PCIEMSIX_VECT217_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT217_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT217_CONTROL
+#define PCIEMSIX_VECT217_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT217_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT218_ADDR_LO
+#define PCIEMSIX_VECT218_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT218_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT218_ADDR_HI
+#define PCIEMSIX_VECT218_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT218_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT218_MSG_DATA
+#define PCIEMSIX_VECT218_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT218_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT218_CONTROL
+#define PCIEMSIX_VECT218_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT218_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT219_ADDR_LO
+#define PCIEMSIX_VECT219_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT219_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT219_ADDR_HI
+#define PCIEMSIX_VECT219_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT219_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT219_MSG_DATA
+#define PCIEMSIX_VECT219_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT219_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT219_CONTROL
+#define PCIEMSIX_VECT219_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT219_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT220_ADDR_LO
+#define PCIEMSIX_VECT220_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT220_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT220_ADDR_HI
+#define PCIEMSIX_VECT220_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT220_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT220_MSG_DATA
+#define PCIEMSIX_VECT220_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT220_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT220_CONTROL
+#define PCIEMSIX_VECT220_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT220_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT221_ADDR_LO
+#define PCIEMSIX_VECT221_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT221_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT221_ADDR_HI
+#define PCIEMSIX_VECT221_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT221_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT221_MSG_DATA
+#define PCIEMSIX_VECT221_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT221_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT221_CONTROL
+#define PCIEMSIX_VECT221_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT221_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT222_ADDR_LO
+#define PCIEMSIX_VECT222_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT222_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT222_ADDR_HI
+#define PCIEMSIX_VECT222_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT222_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT222_MSG_DATA
+#define PCIEMSIX_VECT222_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT222_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT222_CONTROL
+#define PCIEMSIX_VECT222_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT222_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT223_ADDR_LO
+#define PCIEMSIX_VECT223_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT223_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT223_ADDR_HI
+#define PCIEMSIX_VECT223_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT223_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT223_MSG_DATA
+#define PCIEMSIX_VECT223_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT223_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT223_CONTROL
+#define PCIEMSIX_VECT223_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT223_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT224_ADDR_LO
+#define PCIEMSIX_VECT224_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT224_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT224_ADDR_HI
+#define PCIEMSIX_VECT224_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT224_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT224_MSG_DATA
+#define PCIEMSIX_VECT224_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT224_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT224_CONTROL
+#define PCIEMSIX_VECT224_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT224_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT225_ADDR_LO
+#define PCIEMSIX_VECT225_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT225_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT225_ADDR_HI
+#define PCIEMSIX_VECT225_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT225_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT225_MSG_DATA
+#define PCIEMSIX_VECT225_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT225_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT225_CONTROL
+#define PCIEMSIX_VECT225_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT225_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT226_ADDR_LO
+#define PCIEMSIX_VECT226_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT226_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT226_ADDR_HI
+#define PCIEMSIX_VECT226_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT226_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT226_MSG_DATA
+#define PCIEMSIX_VECT226_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT226_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT226_CONTROL
+#define PCIEMSIX_VECT226_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT226_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT227_ADDR_LO
+#define PCIEMSIX_VECT227_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT227_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT227_ADDR_HI
+#define PCIEMSIX_VECT227_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT227_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT227_MSG_DATA
+#define PCIEMSIX_VECT227_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT227_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT227_CONTROL
+#define PCIEMSIX_VECT227_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT227_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT228_ADDR_LO
+#define PCIEMSIX_VECT228_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT228_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT228_ADDR_HI
+#define PCIEMSIX_VECT228_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT228_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT228_MSG_DATA
+#define PCIEMSIX_VECT228_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT228_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT228_CONTROL
+#define PCIEMSIX_VECT228_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT228_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT229_ADDR_LO
+#define PCIEMSIX_VECT229_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT229_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT229_ADDR_HI
+#define PCIEMSIX_VECT229_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT229_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT229_MSG_DATA
+#define PCIEMSIX_VECT229_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT229_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT229_CONTROL
+#define PCIEMSIX_VECT229_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT229_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT230_ADDR_LO
+#define PCIEMSIX_VECT230_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT230_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT230_ADDR_HI
+#define PCIEMSIX_VECT230_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT230_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT230_MSG_DATA
+#define PCIEMSIX_VECT230_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT230_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT230_CONTROL
+#define PCIEMSIX_VECT230_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT230_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT231_ADDR_LO
+#define PCIEMSIX_VECT231_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT231_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT231_ADDR_HI
+#define PCIEMSIX_VECT231_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT231_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT231_MSG_DATA
+#define PCIEMSIX_VECT231_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT231_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT231_CONTROL
+#define PCIEMSIX_VECT231_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT231_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT232_ADDR_LO
+#define PCIEMSIX_VECT232_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT232_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT232_ADDR_HI
+#define PCIEMSIX_VECT232_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT232_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT232_MSG_DATA
+#define PCIEMSIX_VECT232_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT232_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT232_CONTROL
+#define PCIEMSIX_VECT232_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT232_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT233_ADDR_LO
+#define PCIEMSIX_VECT233_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT233_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT233_ADDR_HI
+#define PCIEMSIX_VECT233_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT233_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT233_MSG_DATA
+#define PCIEMSIX_VECT233_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT233_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT233_CONTROL
+#define PCIEMSIX_VECT233_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT233_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT234_ADDR_LO
+#define PCIEMSIX_VECT234_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT234_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT234_ADDR_HI
+#define PCIEMSIX_VECT234_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT234_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT234_MSG_DATA
+#define PCIEMSIX_VECT234_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT234_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT234_CONTROL
+#define PCIEMSIX_VECT234_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT234_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT235_ADDR_LO
+#define PCIEMSIX_VECT235_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT235_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT235_ADDR_HI
+#define PCIEMSIX_VECT235_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT235_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT235_MSG_DATA
+#define PCIEMSIX_VECT235_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT235_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT235_CONTROL
+#define PCIEMSIX_VECT235_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT235_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT236_ADDR_LO
+#define PCIEMSIX_VECT236_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT236_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT236_ADDR_HI
+#define PCIEMSIX_VECT236_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT236_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT236_MSG_DATA
+#define PCIEMSIX_VECT236_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT236_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT236_CONTROL
+#define PCIEMSIX_VECT236_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT236_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT237_ADDR_LO
+#define PCIEMSIX_VECT237_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT237_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT237_ADDR_HI
+#define PCIEMSIX_VECT237_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT237_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT237_MSG_DATA
+#define PCIEMSIX_VECT237_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT237_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT237_CONTROL
+#define PCIEMSIX_VECT237_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT237_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT238_ADDR_LO
+#define PCIEMSIX_VECT238_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT238_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT238_ADDR_HI
+#define PCIEMSIX_VECT238_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT238_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT238_MSG_DATA
+#define PCIEMSIX_VECT238_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT238_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT238_CONTROL
+#define PCIEMSIX_VECT238_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT238_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT239_ADDR_LO
+#define PCIEMSIX_VECT239_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT239_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT239_ADDR_HI
+#define PCIEMSIX_VECT239_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT239_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT239_MSG_DATA
+#define PCIEMSIX_VECT239_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT239_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT239_CONTROL
+#define PCIEMSIX_VECT239_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT239_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT240_ADDR_LO
+#define PCIEMSIX_VECT240_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT240_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT240_ADDR_HI
+#define PCIEMSIX_VECT240_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT240_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT240_MSG_DATA
+#define PCIEMSIX_VECT240_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT240_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT240_CONTROL
+#define PCIEMSIX_VECT240_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT240_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT241_ADDR_LO
+#define PCIEMSIX_VECT241_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT241_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT241_ADDR_HI
+#define PCIEMSIX_VECT241_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT241_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT241_MSG_DATA
+#define PCIEMSIX_VECT241_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT241_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT241_CONTROL
+#define PCIEMSIX_VECT241_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT241_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT242_ADDR_LO
+#define PCIEMSIX_VECT242_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT242_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT242_ADDR_HI
+#define PCIEMSIX_VECT242_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT242_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT242_MSG_DATA
+#define PCIEMSIX_VECT242_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT242_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT242_CONTROL
+#define PCIEMSIX_VECT242_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT242_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT243_ADDR_LO
+#define PCIEMSIX_VECT243_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT243_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT243_ADDR_HI
+#define PCIEMSIX_VECT243_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT243_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT243_MSG_DATA
+#define PCIEMSIX_VECT243_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT243_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT243_CONTROL
+#define PCIEMSIX_VECT243_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT243_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT244_ADDR_LO
+#define PCIEMSIX_VECT244_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT244_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT244_ADDR_HI
+#define PCIEMSIX_VECT244_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT244_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT244_MSG_DATA
+#define PCIEMSIX_VECT244_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT244_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT244_CONTROL
+#define PCIEMSIX_VECT244_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT244_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT245_ADDR_LO
+#define PCIEMSIX_VECT245_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT245_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT245_ADDR_HI
+#define PCIEMSIX_VECT245_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT245_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT245_MSG_DATA
+#define PCIEMSIX_VECT245_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT245_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT245_CONTROL
+#define PCIEMSIX_VECT245_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT245_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT246_ADDR_LO
+#define PCIEMSIX_VECT246_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT246_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT246_ADDR_HI
+#define PCIEMSIX_VECT246_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT246_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT246_MSG_DATA
+#define PCIEMSIX_VECT246_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT246_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT246_CONTROL
+#define PCIEMSIX_VECT246_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT246_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT247_ADDR_LO
+#define PCIEMSIX_VECT247_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT247_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT247_ADDR_HI
+#define PCIEMSIX_VECT247_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT247_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT247_MSG_DATA
+#define PCIEMSIX_VECT247_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT247_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT247_CONTROL
+#define PCIEMSIX_VECT247_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT247_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT248_ADDR_LO
+#define PCIEMSIX_VECT248_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT248_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT248_ADDR_HI
+#define PCIEMSIX_VECT248_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT248_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT248_MSG_DATA
+#define PCIEMSIX_VECT248_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT248_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT248_CONTROL
+#define PCIEMSIX_VECT248_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT248_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT249_ADDR_LO
+#define PCIEMSIX_VECT249_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT249_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT249_ADDR_HI
+#define PCIEMSIX_VECT249_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT249_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT249_MSG_DATA
+#define PCIEMSIX_VECT249_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT249_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT249_CONTROL
+#define PCIEMSIX_VECT249_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT249_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT250_ADDR_LO
+#define PCIEMSIX_VECT250_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT250_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT250_ADDR_HI
+#define PCIEMSIX_VECT250_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT250_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT250_MSG_DATA
+#define PCIEMSIX_VECT250_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT250_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT250_CONTROL
+#define PCIEMSIX_VECT250_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT250_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT251_ADDR_LO
+#define PCIEMSIX_VECT251_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT251_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT251_ADDR_HI
+#define PCIEMSIX_VECT251_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT251_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT251_MSG_DATA
+#define PCIEMSIX_VECT251_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT251_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT251_CONTROL
+#define PCIEMSIX_VECT251_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT251_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT252_ADDR_LO
+#define PCIEMSIX_VECT252_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT252_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT252_ADDR_HI
+#define PCIEMSIX_VECT252_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT252_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT252_MSG_DATA
+#define PCIEMSIX_VECT252_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT252_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT252_CONTROL
+#define PCIEMSIX_VECT252_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT252_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT253_ADDR_LO
+#define PCIEMSIX_VECT253_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT253_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT253_ADDR_HI
+#define PCIEMSIX_VECT253_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT253_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT253_MSG_DATA
+#define PCIEMSIX_VECT253_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT253_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT253_CONTROL
+#define PCIEMSIX_VECT253_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT253_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT254_ADDR_LO
+#define PCIEMSIX_VECT254_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT254_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT254_ADDR_HI
+#define PCIEMSIX_VECT254_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT254_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT254_MSG_DATA
+#define PCIEMSIX_VECT254_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT254_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT254_CONTROL
+#define PCIEMSIX_VECT254_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT254_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT255_ADDR_LO
+#define PCIEMSIX_VECT255_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT255_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT255_ADDR_HI
+#define PCIEMSIX_VECT255_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT255_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT255_MSG_DATA
+#define PCIEMSIX_VECT255_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT255_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT255_CONTROL
+#define PCIEMSIX_VECT255_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT255_CONTROL__MASK_BIT_MASK 0x00000001L
+
+
+// addressBlock: aid_nbio_nbif0_pciemsix_0_usb_MSIXPDEC
+//PCIEMSIX_PBA_0
+#define PCIEMSIX_PBA_0__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_0__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_1
+#define PCIEMSIX_PBA_1__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_1__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_2
+#define PCIEMSIX_PBA_2__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_2__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_3
+#define PCIEMSIX_PBA_3__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_3__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_4
+#define PCIEMSIX_PBA_4__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_4__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_5
+#define PCIEMSIX_PBA_5__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_5__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_6
+#define PCIEMSIX_PBA_6__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_6__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_7
+#define PCIEMSIX_PBA_7__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_7__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_bif_swus_SUMDEC
+//SUM_INDEX
+#define SUM_INDEX__SUM_INDEX__SHIFT 0x0
+#define SUM_INDEX__SUM_INDEX_MASK 0xFFFFFFFFL
+//SUM_DATA
+#define SUM_DATA__SUM_DATA__SHIFT 0x0
+#define SUM_DATA__SUM_DATA_MASK 0xFFFFFFFFL
+//SUM_INDEX_HI
+#define SUM_INDEX_HI__SUM_INDEX_HI__SHIFT 0x0
+#define SUM_INDEX_HI__SUM_INDEX_HI_MASK 0x000000FFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_rcc_strap_internal
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_DEV1_PORT_STRAP0
+//RCC_DEV1_PORT_STRAP1
+//RCC_DEV1_PORT_STRAP2
+//RCC_DEV1_PORT_STRAP3
+//RCC_DEV1_PORT_STRAP4
+//RCC_DEV1_PORT_STRAP5
+//RCC_DEV1_PORT_STRAP6
+//RCC_DEV1_PORT_STRAP7
+//RCC_DEV1_PORT_STRAP8
+//RCC_DEV1_PORT_STRAP9
+//RCC_DEV1_PORT_STRAP10
+//RCC_DEV1_PORT_STRAP11
+//RCC_DEV1_PORT_STRAP12
+//RCC_DEV1_PORT_STRAP13
+//RCC_DEV1_PORT_STRAP14
+//RCC_DEV2_PORT_STRAP0
+//RCC_DEV2_PORT_STRAP1
+//RCC_DEV2_PORT_STRAP2
+//RCC_DEV2_PORT_STRAP3
+//RCC_DEV2_PORT_STRAP4
+//RCC_DEV2_PORT_STRAP5
+//RCC_DEV2_PORT_STRAP6
+//RCC_DEV2_PORT_STRAP7
+//RCC_DEV2_PORT_STRAP8
+//RCC_DEV2_PORT_STRAP9
+//RCC_DEV2_PORT_STRAP10
+//RCC_DEV2_PORT_STRAP11
+//RCC_DEV2_PORT_STRAP12
+//RCC_DEV2_PORT_STRAP13
+//RCC_DEV2_PORT_STRAP14
+//RCC_STRAP1_RCC_BIF_STRAP0
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000038L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP1
+#define RCC_STRAP1_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP2
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP1_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP3
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_BIF_STRAP4
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_BIF_STRAP5
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+//RCC_STRAP1_RCC_BIF_STRAP6
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0xa
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00000400L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0_MASK 0x3E000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP26
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1_MASK 0x00800000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1_MASK 0x00000001L
+
+
+// addressBlock: aid_nbio_nbif0_bif_rst_bif_rst_regblk
+//HARD_RST_CTRL
+#define HARD_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0
+#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1
+#define HARD_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2
+#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3
+#define HARD_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4
+#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5
+#define HARD_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6
+#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7
+#define HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x9
+#define HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0xa
+#define HARD_RST_CTRL__STRAP_RST_EN__SHIFT 0x17
+#define HARD_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d
+#define HARD_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e
+#define HARD_RST_CTRL__CORE_RST_EN__SHIFT 0x1f
+#define HARD_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L
+#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L
+#define HARD_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L
+#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L
+#define HARD_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L
+#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L
+#define HARD_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L
+#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L
+#define HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000200L
+#define HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000400L
+#define HARD_RST_CTRL__STRAP_RST_EN_MASK 0x00800000L
+#define HARD_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L
+#define HARD_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L
+#define HARD_RST_CTRL__CORE_RST_EN_MASK 0x80000000L
+//SELF_SOFT_RST
+#define SELF_SOFT_RST__DSPT0_CFG_RST__SHIFT 0x0
+#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST__SHIFT 0x1
+#define SELF_SOFT_RST__DSPT0_PRV_RST__SHIFT 0x2
+#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST__SHIFT 0x3
+#define SELF_SOFT_RST__EP0_CFG_RST__SHIFT 0x4
+#define SELF_SOFT_RST__EP0_CFG_STICKY_RST__SHIFT 0x5
+#define SELF_SOFT_RST__EP0_PRV_RST__SHIFT 0x6
+#define SELF_SOFT_RST__EP0_PRV_STICKY_RST__SHIFT 0x7
+#define SELF_SOFT_RST__HRPU_SDP_PORT_RST__SHIFT 0x18
+#define SELF_SOFT_RST__GSID_SDP_PORT_RST__SHIFT 0x19
+#define SELF_SOFT_RST__GMIU_SDP_PORT_RST__SHIFT 0x1a
+#define SELF_SOFT_RST__GMID_SDP_PORT_RST__SHIFT 0x1b
+#define SELF_SOFT_RST__CORE_STICKY_RST__SHIFT 0x1d
+#define SELF_SOFT_RST__RELOAD_STRAP__SHIFT 0x1e
+#define SELF_SOFT_RST__CORE_RST__SHIFT 0x1f
+#define SELF_SOFT_RST__DSPT0_CFG_RST_MASK 0x00000001L
+#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST_MASK 0x00000002L
+#define SELF_SOFT_RST__DSPT0_PRV_RST_MASK 0x00000004L
+#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST_MASK 0x00000008L
+#define SELF_SOFT_RST__EP0_CFG_RST_MASK 0x00000010L
+#define SELF_SOFT_RST__EP0_CFG_STICKY_RST_MASK 0x00000020L
+#define SELF_SOFT_RST__EP0_PRV_RST_MASK 0x00000040L
+#define SELF_SOFT_RST__EP0_PRV_STICKY_RST_MASK 0x00000080L
+#define SELF_SOFT_RST__HRPU_SDP_PORT_RST_MASK 0x01000000L
+#define SELF_SOFT_RST__GSID_SDP_PORT_RST_MASK 0x02000000L
+#define SELF_SOFT_RST__GMIU_SDP_PORT_RST_MASK 0x04000000L
+#define SELF_SOFT_RST__GMID_SDP_PORT_RST_MASK 0x08000000L
+#define SELF_SOFT_RST__CORE_STICKY_RST_MASK 0x20000000L
+#define SELF_SOFT_RST__RELOAD_STRAP_MASK 0x40000000L
+#define SELF_SOFT_RST__CORE_RST_MASK 0x80000000L
+//BIF_GFX_DRV_VPU_RST
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST__SHIFT 0x0
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST__SHIFT 0x1
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST__SHIFT 0x2
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST__SHIFT 0x3
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST__SHIFT 0x4
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST__SHIFT 0x5
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST__SHIFT 0x6
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST__SHIFT 0x7
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST_MASK 0x00000001L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST_MASK 0x00000002L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST_MASK 0x00000004L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST_MASK 0x00000008L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST_MASK 0x00000010L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST_MASK 0x00000020L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST_MASK 0x00000040L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST_MASK 0x00000080L
+//BIF_RST_MISC_CTRL
+#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB__SHIFT 0x0
+#define BIF_RST_MISC_CTRL__DRV_RST_MODE__SHIFT 0x2
+#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK__SHIFT 0x4
+#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR__SHIFT 0x5
+#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR__SHIFT 0x6
+#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN__SHIFT 0x8
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE__SHIFT 0x9
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT__SHIFT 0xa
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL__SHIFT 0xd
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL__SHIFT 0xf
+#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x11
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS__SHIFT 0x17
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS__SHIFT 0x18
+#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB_MASK 0x00000001L
+#define BIF_RST_MISC_CTRL__DRV_RST_MODE_MASK 0x0000000CL
+#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK_MASK 0x00000010L
+#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR_MASK 0x00000020L
+#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR_MASK 0x00000040L
+#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN_MASK 0x00000100L
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE_MASK 0x00000200L
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT_MASK 0x00001C00L
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL_MASK 0x00006000L
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL_MASK 0x00018000L
+#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x000E0000L
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS_MASK 0x00800000L
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS_MASK 0x03000000L
+//BIF_RST_MISC_CTRL2
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_PROTECT__SHIFT 0x0
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_PROTECT__SHIFT 0x1
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_PROTECT__SHIFT 0x2
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT__SHIFT 0xf
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE__SHIFT 0x10
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE__SHIFT 0x11
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE__SHIFT 0x12
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_DIS__SHIFT 0x1e
+#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE__SHIFT 0x1f
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_PROTECT_MASK 0x00000001L
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_PROTECT_MASK 0x00000002L
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_PROTECT_MASK 0x00000004L
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_MASK 0x00008000L
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE_MASK 0x00010000L
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE_MASK 0x00020000L
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE_MASK 0x00040000L
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_DIS_MASK 0x40000000L
+#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE_MASK 0x80000000L
+//BIF_RST_MISC_CTRL3
+#define BIF_RST_MISC_CTRL3__TIMER_SCALE__SHIFT 0x0
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT__SHIFT 0x4
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE__SHIFT 0x6
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_HARD__SHIFT 0x7
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SOFT__SHIFT 0xa
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SELF__SHIFT 0xd
+#define BIF_RST_MISC_CTRL3__TIMER_SCALE_MASK 0x0000000FL
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT_MASK 0x00000030L
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE_MASK 0x00000040L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_HARD_MASK 0x00000380L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SOFT_MASK 0x00001C00L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SELF_MASK 0x0000E000L
+//DEV0_PF0_FLR_RST_CTRL
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN__SHIFT 0x5
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN__SHIFT 0x6
+#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN__SHIFT 0x7
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN__SHIFT 0x8
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN__SHIFT 0x9
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN__SHIFT 0xa
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN__SHIFT 0xb
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN__SHIFT 0xc
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN__SHIFT 0xd
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN__SHIFT 0xe
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN__SHIFT 0xf
+#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN__SHIFT 0x10
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN__SHIFT 0x1f
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN_MASK 0x00000020L
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN_MASK 0x00000040L
+#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN_MASK 0x00000080L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN_MASK 0x00000100L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN_MASK 0x00000200L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN_MASK 0x00000400L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN_MASK 0x00000800L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN_MASK 0x00001000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN_MASK 0x00002000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN_MASK 0x00004000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN_MASK 0x00008000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN_MASK 0x00010000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN_MASK 0x80000000L
+//DEV0_PF1_FLR_RST_CTRL
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+//BIF_INST_RESET_INTR_STS
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS__SHIFT 0x0
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS__SHIFT 0x1
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS__SHIFT 0x2
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS__SHIFT 0x3
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS__SHIFT 0x4
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS_MASK 0x00000001L
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS_MASK 0x00000002L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS_MASK 0x00000004L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS_MASK 0x00000008L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS_MASK 0x00000010L
+//BIF_PF_FLR_INTR_STS
+#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS__SHIFT 0x0
+#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS__SHIFT 0x1
+#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS_MASK 0x00000001L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS_MASK 0x00000002L
+//BIF_D3HOTD0_INTR_STS
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS__SHIFT 0x0
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS__SHIFT 0x1
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS_MASK 0x00000001L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS_MASK 0x00000002L
+//BIF_POWER_INTR_STS
+#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS__SHIFT 0x0
+#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS__SHIFT 0x10
+#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS_MASK 0x00000001L
+#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS_MASK 0x00010000L
+//BIF_PF_DSTATE_INTR_STS
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS__SHIFT 0x0
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS__SHIFT 0x1
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS__SHIFT 0x2
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS__SHIFT 0x3
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS__SHIFT 0x4
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS__SHIFT 0x5
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS__SHIFT 0x6
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS__SHIFT 0x7
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS_MASK 0x00000001L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS_MASK 0x00000002L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS_MASK 0x00000004L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS_MASK 0x00000008L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS_MASK 0x00000010L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS_MASK 0x00000020L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS_MASK 0x00000040L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS_MASK 0x00000080L
+//SELF_SOFT_RST_2
+#define SELF_SOFT_RST_2__DSPT3_CFG_RST__SHIFT 0x0
+#define SELF_SOFT_RST_2__DSPT3_CFG_STICKY_RST__SHIFT 0x1
+#define SELF_SOFT_RST_2__DSPT3_PRV_RST__SHIFT 0x2
+#define SELF_SOFT_RST_2__DSPT3_PRV_STICKY_RST__SHIFT 0x3
+#define SELF_SOFT_RST_2__EP3_CFG_RST__SHIFT 0x4
+#define SELF_SOFT_RST_2__EP3_CFG_STICKY_RST__SHIFT 0x5
+#define SELF_SOFT_RST_2__EP3_PRV_RST__SHIFT 0x6
+#define SELF_SOFT_RST_2__EP3_PRV_STICKY_RST__SHIFT 0x7
+#define SELF_SOFT_RST_2__GMISP0_SDP_PORT_RST__SHIFT 0x18
+#define SELF_SOFT_RST_2__STRAP_RST__SHIFT 0x19
+#define SELF_SOFT_RST_2__DSPT3_CFG_RST_MASK 0x00000001L
+#define SELF_SOFT_RST_2__DSPT3_CFG_STICKY_RST_MASK 0x00000002L
+#define SELF_SOFT_RST_2__DSPT3_PRV_RST_MASK 0x00000004L
+#define SELF_SOFT_RST_2__DSPT3_PRV_STICKY_RST_MASK 0x00000008L
+#define SELF_SOFT_RST_2__EP3_CFG_RST_MASK 0x00000010L
+#define SELF_SOFT_RST_2__EP3_CFG_STICKY_RST_MASK 0x00000020L
+#define SELF_SOFT_RST_2__EP3_PRV_RST_MASK 0x00000040L
+#define SELF_SOFT_RST_2__EP3_PRV_STICKY_RST_MASK 0x00000080L
+#define SELF_SOFT_RST_2__GMISP0_SDP_PORT_RST_MASK 0x01000000L
+#define SELF_SOFT_RST_2__STRAP_RST_MASK 0x02000000L
+//BIF_INST_RESET_INTR_MASK
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK__SHIFT 0x0
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK__SHIFT 0x1
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK__SHIFT 0x2
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK__SHIFT 0x3
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK__SHIFT 0x4
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK_MASK 0x00000001L
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK_MASK 0x00000002L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK_MASK 0x00000004L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK_MASK 0x00000008L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK_MASK 0x00000010L
+//BIF_PF_FLR_INTR_MASK
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK__SHIFT 0x0
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK__SHIFT 0x1
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK_MASK 0x00000001L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK_MASK 0x00000002L
+//BIF_D3HOTD0_INTR_MASK
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK__SHIFT 0x0
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK__SHIFT 0x1
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK_MASK 0x00000001L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK_MASK 0x00000002L
+//BIF_POWER_INTR_MASK
+#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK__SHIFT 0x0
+#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK__SHIFT 0x10
+#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK_MASK 0x00000001L
+#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK_MASK 0x00010000L
+//BIF_PF_DSTATE_INTR_MASK
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK__SHIFT 0x0
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK__SHIFT 0x1
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK__SHIFT 0x2
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK__SHIFT 0x3
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK__SHIFT 0x4
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK__SHIFT 0x5
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK__SHIFT 0x6
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK__SHIFT 0x7
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK_MASK 0x00000001L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK_MASK 0x00000002L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK_MASK 0x00000004L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK_MASK 0x00000008L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK_MASK 0x00000010L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK_MASK 0x00000020L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK_MASK 0x00000040L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK_MASK 0x00000080L
+//BIF_PF_FLR_RST
+#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
+#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
+#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
+#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
+//BIF_DEV0_PF0_DSTATE_VALUE
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF1_DSTATE_VALUE
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE_MASK 0x00030000L
+//DEV0_PF0_D3HOTD0_RST_CTRL
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF1_D3HOTD0_RST_CTRL
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//BIF_PORT0_DSTATE_VALUE
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE_MASK 0x00030000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_misc_bif_misc_regblk
+//REGS_ROM_OFFSET_CTRL
+#define REGS_ROM_OFFSET_CTRL__ROM_OFFSET__SHIFT 0x0
+#define REGS_ROM_OFFSET_CTRL__ROM_OFFSET_MASK 0x7FL
+//NBIF_STRAP_BIOS_CNTL
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_BIOS_EN__SHIFT 0x0
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_PCIE_ID_BIOS_EN__SHIFT 0x1
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_BIOS_EN_MASK 0x00000001L
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_PCIE_ID_BIOS_EN_MASK 0x00000002L
+//DOORBELL0_CTRL_ENTRY_0
+#define DOORBELL0_CTRL_ENTRY_0__BIF_DOORBELL0_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_0__BIF_DOORBELL0_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_0__DOORBELL0_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_0__BIF_DOORBELL0_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_0__BIF_DOORBELL0_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_0__DOORBELL0_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_1
+#define DOORBELL0_CTRL_ENTRY_1__BIF_DOORBELL1_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_1__BIF_DOORBELL1_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_1__DOORBELL1_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_1__BIF_DOORBELL1_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_1__BIF_DOORBELL1_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_1__DOORBELL1_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_2
+#define DOORBELL0_CTRL_ENTRY_2__BIF_DOORBELL2_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_2__BIF_DOORBELL2_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_2__DOORBELL2_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_2__BIF_DOORBELL2_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_2__BIF_DOORBELL2_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_2__DOORBELL2_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_3
+#define DOORBELL0_CTRL_ENTRY_3__BIF_DOORBELL3_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_3__BIF_DOORBELL3_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_3__DOORBELL3_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_3__BIF_DOORBELL3_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_3__BIF_DOORBELL3_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_3__DOORBELL3_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_4
+#define DOORBELL0_CTRL_ENTRY_4__BIF_DOORBELL4_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_4__BIF_DOORBELL4_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_4__DOORBELL4_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_4__BIF_DOORBELL4_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_4__BIF_DOORBELL4_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_4__DOORBELL4_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_5
+#define DOORBELL0_CTRL_ENTRY_5__BIF_DOORBELL5_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_5__BIF_DOORBELL5_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_5__DOORBELL5_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_5__BIF_DOORBELL5_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_5__BIF_DOORBELL5_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_5__DOORBELL5_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_6
+#define DOORBELL0_CTRL_ENTRY_6__BIF_DOORBELL6_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_6__BIF_DOORBELL6_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_6__DOORBELL6_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_6__BIF_DOORBELL6_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_6__BIF_DOORBELL6_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_6__DOORBELL6_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_7
+#define DOORBELL0_CTRL_ENTRY_7__BIF_DOORBELL7_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_7__BIF_DOORBELL7_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_7__DOORBELL7_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_7__BIF_DOORBELL7_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_7__BIF_DOORBELL7_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_7__DOORBELL7_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_8
+#define DOORBELL0_CTRL_ENTRY_8__BIF_DOORBELL8_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_8__BIF_DOORBELL8_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_8__DOORBELL8_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_8__BIF_DOORBELL8_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_8__BIF_DOORBELL8_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_8__DOORBELL8_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_9
+#define DOORBELL0_CTRL_ENTRY_9__BIF_DOORBELL9_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_9__BIF_DOORBELL9_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_9__DOORBELL9_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_9__BIF_DOORBELL9_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_9__BIF_DOORBELL9_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_9__DOORBELL9_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_10
+#define DOORBELL0_CTRL_ENTRY_10__BIF_DOORBELL10_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_10__BIF_DOORBELL10_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_10__DOORBELL10_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_10__BIF_DOORBELL10_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_10__BIF_DOORBELL10_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_10__DOORBELL10_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_11
+#define DOORBELL0_CTRL_ENTRY_11__BIF_DOORBELL11_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_11__BIF_DOORBELL11_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_11__DOORBELL11_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_11__BIF_DOORBELL11_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_11__BIF_DOORBELL11_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_11__DOORBELL11_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_12
+#define DOORBELL0_CTRL_ENTRY_12__BIF_DOORBELL12_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_12__BIF_DOORBELL12_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_12__DOORBELL12_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_12__BIF_DOORBELL12_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_12__BIF_DOORBELL12_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_12__DOORBELL12_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_13
+#define DOORBELL0_CTRL_ENTRY_13__BIF_DOORBELL13_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_13__BIF_DOORBELL13_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_13__DOORBELL13_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_13__BIF_DOORBELL13_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_13__BIF_DOORBELL13_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_13__DOORBELL13_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_14
+#define DOORBELL0_CTRL_ENTRY_14__BIF_DOORBELL14_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_14__BIF_DOORBELL14_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_14__DOORBELL14_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_14__BIF_DOORBELL14_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_14__BIF_DOORBELL14_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_14__DOORBELL14_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_15
+#define DOORBELL0_CTRL_ENTRY_15__BIF_DOORBELL15_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_15__BIF_DOORBELL15_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_15__DOORBELL15_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_15__BIF_DOORBELL15_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_15__BIF_DOORBELL15_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_15__DOORBELL15_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_16
+#define DOORBELL0_CTRL_ENTRY_16__BIF_DOORBELL16_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_16__BIF_DOORBELL16_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_16__DOORBELL16_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_16__BIF_DOORBELL16_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_16__BIF_DOORBELL16_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_16__DOORBELL16_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_17
+#define DOORBELL0_CTRL_ENTRY_17__BIF_DOORBELL17_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_17__BIF_DOORBELL17_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_17__DOORBELL17_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_17__BIF_DOORBELL17_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_17__BIF_DOORBELL17_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_17__DOORBELL17_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_18
+#define DOORBELL0_CTRL_ENTRY_18__BIF_DOORBELL18_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_18__BIF_DOORBELL18_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_18__DOORBELL18_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_18__BIF_DOORBELL18_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_18__BIF_DOORBELL18_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_18__DOORBELL18_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_19
+#define DOORBELL0_CTRL_ENTRY_19__BIF_DOORBELL19_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_19__BIF_DOORBELL19_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_19__DOORBELL19_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_19__BIF_DOORBELL19_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_19__BIF_DOORBELL19_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_19__DOORBELL19_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//DOORBELL0_CTRL_ENTRY_20
+#define DOORBELL0_CTRL_ENTRY_20__BIF_DOORBELL20_RANGE_OFFSET_ENTRY__SHIFT 0x0
+#define DOORBELL0_CTRL_ENTRY_20__BIF_DOORBELL20_RANGE_SIZE_ENTRY__SHIFT 0xa
+#define DOORBELL0_CTRL_ENTRY_20__DOORBELL20_FENCE_ENABLE_ENTRY__SHIFT 0x10
+#define DOORBELL0_CTRL_ENTRY_20__BIF_DOORBELL20_RANGE_OFFSET_ENTRY_MASK 0x000003FFL
+#define DOORBELL0_CTRL_ENTRY_20__BIF_DOORBELL20_RANGE_SIZE_ENTRY_MASK 0x00007C00L
+#define DOORBELL0_CTRL_ENTRY_20__DOORBELL20_FENCE_ENABLE_ENTRY_MASK 0x001F0000L
+//AID0_VF0_BASE_ADDR
+#define AID0_VF0_BASE_ADDR__AID0_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_VF0_BASE_ADDR__AID0_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF0_BASE_ADDR
+#define AID1_VF0_BASE_ADDR__AID1_VF0_BASE_ADDR__SHIFT 0x0
+#define AID1_VF0_BASE_ADDR__AID1_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF0_BASE_ADDR
+#define AID2_VF0_BASE_ADDR__AID2_VF0_BASE_ADDR__SHIFT 0x0
+#define AID2_VF0_BASE_ADDR__AID2_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF0_BASE_ADDR
+#define AID3_VF0_BASE_ADDR__AID3_VF0_BASE_ADDR__SHIFT 0x0
+#define AID3_VF0_BASE_ADDR__AID3_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF0_BASE_ADDR
+#define AID0_XCC0_VF0_BASE_ADDR__AID0_XCC0_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF0_BASE_ADDR__AID0_XCC0_VF0_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF0_BASE_ADDR
+#define AID0_XCC1_VF0_BASE_ADDR__AID0_XCC1_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF0_BASE_ADDR__AID0_XCC1_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF0_BASE_ADDR
+#define AID1_XCC0_VF0_BASE_ADDR__AID1_XCC0_VF0_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF0_BASE_ADDR__AID1_XCC0_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF0_BASE_ADDR
+#define AID1_XCC1_VF0_BASE_ADDR__AID1_XCC1_VF0_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF0_BASE_ADDR__AID1_XCC1_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF0_BASE_ADDR
+#define AID2_XCC0_VF0_BASE_ADDR__AID2_XCC0_VF0_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF0_BASE_ADDR__AID2_XCC0_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF0_BASE_ADDR
+#define AID2_XCC1_VF0_BASE_ADDR__AID2_XCC1_VF0_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF0_BASE_ADDR__AID2_XCC1_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF0_BASE_ADDR
+#define AID3_XCC0_VF0_BASE_ADDR__AID3_XCC0_VF0_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF0_BASE_ADDR__AID3_XCC0_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF0_BASE_ADDR
+#define AID3_XCC1_VF0_BASE_ADDR__AID3_XCC1_VF0_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF0_BASE_ADDR__AID3_XCC1_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF0_BASE_ADDR
+#define AID0_NBIF_VF0_BASE_ADDR__AID0_NBIF_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF0_BASE_ADDR__AID0_NBIF_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF0_BASE_ADDR
+#define AID0_ATHUB_VF0_BASE_ADDR__AID0_ATHUB_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF0_BASE_ADDR__AID0_ATHUB_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF0_BASE_ADDR
+#define AID0_IH_VF0_BASE_ADDR__AID0_IH_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF0_BASE_ADDR__AID0_IH_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF0_BASE_ADDR
+#define AID0_HDP_VF0_BASE_ADDR__AID0_HDP_VF0_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF0_BASE_ADDR__AID0_HDP_VF0_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF1_BASE_ADDR
+#define AID0_VF1_BASE_ADDR__AID0_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_VF1_BASE_ADDR__AID0_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF1_BASE_ADDR
+#define AID1_VF1_BASE_ADDR__AID1_VF1_BASE_ADDR__SHIFT 0x0
+#define AID1_VF1_BASE_ADDR__AID1_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF1_BASE_ADDR
+#define AID2_VF1_BASE_ADDR__AID2_VF1_BASE_ADDR__SHIFT 0x0
+#define AID2_VF1_BASE_ADDR__AID2_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF1_BASE_ADDR
+#define AID3_VF1_BASE_ADDR__AID3_VF1_BASE_ADDR__SHIFT 0x0
+#define AID3_VF1_BASE_ADDR__AID3_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF1_BASE_ADDR
+#define AID0_XCC0_VF1_BASE_ADDR__AID0_XCC0_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF1_BASE_ADDR__AID0_XCC0_VF1_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF1_BASE_ADDR
+#define AID0_XCC1_VF1_BASE_ADDR__AID0_XCC1_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF1_BASE_ADDR__AID0_XCC1_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF1_BASE_ADDR
+#define AID1_XCC0_VF1_BASE_ADDR__AID1_XCC0_VF1_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF1_BASE_ADDR__AID1_XCC0_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF1_BASE_ADDR
+#define AID1_XCC1_VF1_BASE_ADDR__AID1_XCC1_VF1_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF1_BASE_ADDR__AID1_XCC1_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF1_BASE_ADDR
+#define AID2_XCC0_VF1_BASE_ADDR__AID2_XCC0_VF1_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF1_BASE_ADDR__AID2_XCC0_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF1_BASE_ADDR
+#define AID2_XCC1_VF1_BASE_ADDR__AID2_XCC1_VF1_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF1_BASE_ADDR__AID2_XCC1_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF1_BASE_ADDR
+#define AID3_XCC0_VF1_BASE_ADDR__AID3_XCC0_VF1_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF1_BASE_ADDR__AID3_XCC0_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF1_BASE_ADDR
+#define AID3_XCC1_VF1_BASE_ADDR__AID3_XCC1_VF1_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF1_BASE_ADDR__AID3_XCC1_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF1_BASE_ADDR
+#define AID0_NBIF_VF1_BASE_ADDR__AID0_NBIF_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF1_BASE_ADDR__AID0_NBIF_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF1_BASE_ADDR
+#define AID0_ATHUB_VF1_BASE_ADDR__AID0_ATHUB_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF1_BASE_ADDR__AID0_ATHUB_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF1_BASE_ADDR
+#define AID0_IH_VF1_BASE_ADDR__AID0_IH_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF1_BASE_ADDR__AID0_IH_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF1_BASE_ADDR
+#define AID0_HDP_VF1_BASE_ADDR__AID0_HDP_VF1_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF1_BASE_ADDR__AID0_HDP_VF1_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF2_BASE_ADDR
+#define AID0_VF2_BASE_ADDR__AID0_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_VF2_BASE_ADDR__AID0_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF2_BASE_ADDR
+#define AID1_VF2_BASE_ADDR__AID1_VF2_BASE_ADDR__SHIFT 0x0
+#define AID1_VF2_BASE_ADDR__AID1_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF2_BASE_ADDR
+#define AID2_VF2_BASE_ADDR__AID2_VF2_BASE_ADDR__SHIFT 0x0
+#define AID2_VF2_BASE_ADDR__AID2_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF2_BASE_ADDR
+#define AID3_VF2_BASE_ADDR__AID3_VF2_BASE_ADDR__SHIFT 0x0
+#define AID3_VF2_BASE_ADDR__AID3_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF2_BASE_ADDR
+#define AID0_XCC0_VF2_BASE_ADDR__AID0_XCC0_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF2_BASE_ADDR__AID0_XCC0_VF2_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF2_BASE_ADDR
+#define AID0_XCC1_VF2_BASE_ADDR__AID0_XCC1_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF2_BASE_ADDR__AID0_XCC1_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF2_BASE_ADDR
+#define AID1_XCC0_VF2_BASE_ADDR__AID1_XCC0_VF2_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF2_BASE_ADDR__AID1_XCC0_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF2_BASE_ADDR
+#define AID1_XCC1_VF2_BASE_ADDR__AID1_XCC1_VF2_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF2_BASE_ADDR__AID1_XCC1_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF2_BASE_ADDR
+#define AID2_XCC0_VF2_BASE_ADDR__AID2_XCC0_VF2_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF2_BASE_ADDR__AID2_XCC0_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF2_BASE_ADDR
+#define AID2_XCC1_VF2_BASE_ADDR__AID2_XCC1_VF2_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF2_BASE_ADDR__AID2_XCC1_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF2_BASE_ADDR
+#define AID3_XCC0_VF2_BASE_ADDR__AID3_XCC0_VF2_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF2_BASE_ADDR__AID3_XCC0_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF2_BASE_ADDR
+#define AID3_XCC1_VF2_BASE_ADDR__AID3_XCC1_VF2_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF2_BASE_ADDR__AID3_XCC1_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF2_BASE_ADDR
+#define AID0_NBIF_VF2_BASE_ADDR__AID0_NBIF_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF2_BASE_ADDR__AID0_NBIF_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF2_BASE_ADDR
+#define AID0_ATHUB_VF2_BASE_ADDR__AID0_ATHUB_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF2_BASE_ADDR__AID0_ATHUB_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF2_BASE_ADDR
+#define AID0_IH_VF2_BASE_ADDR__AID0_IH_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF2_BASE_ADDR__AID0_IH_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF2_BASE_ADDR
+#define AID0_HDP_VF2_BASE_ADDR__AID0_HDP_VF2_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF2_BASE_ADDR__AID0_HDP_VF2_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF3_BASE_ADDR
+#define AID0_VF3_BASE_ADDR__AID0_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_VF3_BASE_ADDR__AID0_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF3_BASE_ADDR
+#define AID1_VF3_BASE_ADDR__AID1_VF3_BASE_ADDR__SHIFT 0x0
+#define AID1_VF3_BASE_ADDR__AID1_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF3_BASE_ADDR
+#define AID2_VF3_BASE_ADDR__AID2_VF3_BASE_ADDR__SHIFT 0x0
+#define AID2_VF3_BASE_ADDR__AID2_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF3_BASE_ADDR
+#define AID3_VF3_BASE_ADDR__AID3_VF3_BASE_ADDR__SHIFT 0x0
+#define AID3_VF3_BASE_ADDR__AID3_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF3_BASE_ADDR
+#define AID0_XCC0_VF3_BASE_ADDR__AID0_XCC0_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF3_BASE_ADDR__AID0_XCC0_VF3_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF3_BASE_ADDR
+#define AID0_XCC1_VF3_BASE_ADDR__AID0_XCC1_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF3_BASE_ADDR__AID0_XCC1_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF3_BASE_ADDR
+#define AID1_XCC0_VF3_BASE_ADDR__AID1_XCC0_VF3_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF3_BASE_ADDR__AID1_XCC0_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF3_BASE_ADDR
+#define AID1_XCC1_VF3_BASE_ADDR__AID1_XCC1_VF3_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF3_BASE_ADDR__AID1_XCC1_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF3_BASE_ADDR
+#define AID2_XCC0_VF3_BASE_ADDR__AID2_XCC0_VF3_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF3_BASE_ADDR__AID2_XCC0_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF3_BASE_ADDR
+#define AID2_XCC1_VF3_BASE_ADDR__AID2_XCC1_VF3_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF3_BASE_ADDR__AID2_XCC1_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF3_BASE_ADDR
+#define AID3_XCC0_VF3_BASE_ADDR__AID3_XCC0_VF3_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF3_BASE_ADDR__AID3_XCC0_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF3_BASE_ADDR
+#define AID3_XCC1_VF3_BASE_ADDR__AID3_XCC1_VF3_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF3_BASE_ADDR__AID3_XCC1_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF3_BASE_ADDR
+#define AID0_NBIF_VF3_BASE_ADDR__AID0_NBIF_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF3_BASE_ADDR__AID0_NBIF_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF3_BASE_ADDR
+#define AID0_ATHUB_VF3_BASE_ADDR__AID0_ATHUB_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF3_BASE_ADDR__AID0_ATHUB_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF3_BASE_ADDR
+#define AID0_IH_VF3_BASE_ADDR__AID0_IH_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF3_BASE_ADDR__AID0_IH_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF3_BASE_ADDR
+#define AID0_HDP_VF3_BASE_ADDR__AID0_HDP_VF3_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF3_BASE_ADDR__AID0_HDP_VF3_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF4_BASE_ADDR
+#define AID0_VF4_BASE_ADDR__AID0_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_VF4_BASE_ADDR__AID0_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF4_BASE_ADDR
+#define AID1_VF4_BASE_ADDR__AID1_VF4_BASE_ADDR__SHIFT 0x0
+#define AID1_VF4_BASE_ADDR__AID1_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF4_BASE_ADDR
+#define AID2_VF4_BASE_ADDR__AID2_VF4_BASE_ADDR__SHIFT 0x0
+#define AID2_VF4_BASE_ADDR__AID2_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF4_BASE_ADDR
+#define AID3_VF4_BASE_ADDR__AID3_VF4_BASE_ADDR__SHIFT 0x0
+#define AID3_VF4_BASE_ADDR__AID3_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF4_BASE_ADDR
+#define AID0_XCC0_VF4_BASE_ADDR__AID0_XCC0_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF4_BASE_ADDR__AID0_XCC0_VF4_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF4_BASE_ADDR
+#define AID0_XCC1_VF4_BASE_ADDR__AID0_XCC1_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF4_BASE_ADDR__AID0_XCC1_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF4_BASE_ADDR
+#define AID1_XCC0_VF4_BASE_ADDR__AID1_XCC0_VF4_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF4_BASE_ADDR__AID1_XCC0_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF4_BASE_ADDR
+#define AID1_XCC1_VF4_BASE_ADDR__AID1_XCC1_VF4_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF4_BASE_ADDR__AID1_XCC1_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF4_BASE_ADDR
+#define AID2_XCC0_VF4_BASE_ADDR__AID2_XCC0_VF4_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF4_BASE_ADDR__AID2_XCC0_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF4_BASE_ADDR
+#define AID2_XCC1_VF4_BASE_ADDR__AID2_XCC1_VF4_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF4_BASE_ADDR__AID2_XCC1_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF4_BASE_ADDR
+#define AID3_XCC0_VF4_BASE_ADDR__AID3_XCC0_VF4_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF4_BASE_ADDR__AID3_XCC0_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF4_BASE_ADDR
+#define AID3_XCC1_VF4_BASE_ADDR__AID3_XCC1_VF4_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF4_BASE_ADDR__AID3_XCC1_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF4_BASE_ADDR
+#define AID0_NBIF_VF4_BASE_ADDR__AID0_NBIF_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF4_BASE_ADDR__AID0_NBIF_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF4_BASE_ADDR
+#define AID0_ATHUB_VF4_BASE_ADDR__AID0_ATHUB_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF4_BASE_ADDR__AID0_ATHUB_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF4_BASE_ADDR
+#define AID0_IH_VF4_BASE_ADDR__AID0_IH_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF4_BASE_ADDR__AID0_IH_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF4_BASE_ADDR
+#define AID0_HDP_VF4_BASE_ADDR__AID0_HDP_VF4_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF4_BASE_ADDR__AID0_HDP_VF4_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF5_BASE_ADDR
+#define AID0_VF5_BASE_ADDR__AID0_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_VF5_BASE_ADDR__AID0_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF5_BASE_ADDR
+#define AID1_VF5_BASE_ADDR__AID1_VF5_BASE_ADDR__SHIFT 0x0
+#define AID1_VF5_BASE_ADDR__AID1_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF5_BASE_ADDR
+#define AID2_VF5_BASE_ADDR__AID2_VF5_BASE_ADDR__SHIFT 0x0
+#define AID2_VF5_BASE_ADDR__AID2_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF5_BASE_ADDR
+#define AID3_VF5_BASE_ADDR__AID3_VF5_BASE_ADDR__SHIFT 0x0
+#define AID3_VF5_BASE_ADDR__AID3_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF5_BASE_ADDR
+#define AID0_XCC0_VF5_BASE_ADDR__AID0_XCC0_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF5_BASE_ADDR__AID0_XCC0_VF5_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF5_BASE_ADDR
+#define AID0_XCC1_VF5_BASE_ADDR__AID0_XCC1_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF5_BASE_ADDR__AID0_XCC1_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF5_BASE_ADDR
+#define AID1_XCC0_VF5_BASE_ADDR__AID1_XCC0_VF5_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF5_BASE_ADDR__AID1_XCC0_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF5_BASE_ADDR
+#define AID1_XCC1_VF5_BASE_ADDR__AID1_XCC1_VF5_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF5_BASE_ADDR__AID1_XCC1_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF5_BASE_ADDR
+#define AID2_XCC0_VF5_BASE_ADDR__AID2_XCC0_VF5_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF5_BASE_ADDR__AID2_XCC0_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF5_BASE_ADDR
+#define AID2_XCC1_VF5_BASE_ADDR__AID2_XCC1_VF5_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF5_BASE_ADDR__AID2_XCC1_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF5_BASE_ADDR
+#define AID3_XCC0_VF5_BASE_ADDR__AID3_XCC0_VF5_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF5_BASE_ADDR__AID3_XCC0_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF5_BASE_ADDR
+#define AID3_XCC1_VF5_BASE_ADDR__AID3_XCC1_VF5_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF5_BASE_ADDR__AID3_XCC1_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF5_BASE_ADDR
+#define AID0_NBIF_VF5_BASE_ADDR__AID0_NBIF_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF5_BASE_ADDR__AID0_NBIF_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF5_BASE_ADDR
+#define AID0_ATHUB_VF5_BASE_ADDR__AID0_ATHUB_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF5_BASE_ADDR__AID0_ATHUB_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF5_BASE_ADDR
+#define AID0_IH_VF5_BASE_ADDR__AID0_IH_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF5_BASE_ADDR__AID0_IH_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF5_BASE_ADDR
+#define AID0_HDP_VF5_BASE_ADDR__AID0_HDP_VF5_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF5_BASE_ADDR__AID0_HDP_VF5_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF6_BASE_ADDR
+#define AID0_VF6_BASE_ADDR__AID0_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_VF6_BASE_ADDR__AID0_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF6_BASE_ADDR
+#define AID1_VF6_BASE_ADDR__AID1_VF6_BASE_ADDR__SHIFT 0x0
+#define AID1_VF6_BASE_ADDR__AID1_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF6_BASE_ADDR
+#define AID2_VF6_BASE_ADDR__AID2_VF6_BASE_ADDR__SHIFT 0x0
+#define AID2_VF6_BASE_ADDR__AID2_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF6_BASE_ADDR
+#define AID3_VF6_BASE_ADDR__AID3_VF6_BASE_ADDR__SHIFT 0x0
+#define AID3_VF6_BASE_ADDR__AID3_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF6_BASE_ADDR
+#define AID0_XCC0_VF6_BASE_ADDR__AID0_XCC0_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF6_BASE_ADDR__AID0_XCC0_VF6_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF6_BASE_ADDR
+#define AID0_XCC1_VF6_BASE_ADDR__AID0_XCC1_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF6_BASE_ADDR__AID0_XCC1_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF6_BASE_ADDR
+#define AID1_XCC0_VF6_BASE_ADDR__AID1_XCC0_VF6_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF6_BASE_ADDR__AID1_XCC0_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF6_BASE_ADDR
+#define AID1_XCC1_VF6_BASE_ADDR__AID1_XCC1_VF6_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF6_BASE_ADDR__AID1_XCC1_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF6_BASE_ADDR
+#define AID2_XCC0_VF6_BASE_ADDR__AID2_XCC0_VF6_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF6_BASE_ADDR__AID2_XCC0_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF6_BASE_ADDR
+#define AID2_XCC1_VF6_BASE_ADDR__AID2_XCC1_VF6_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF6_BASE_ADDR__AID2_XCC1_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF6_BASE_ADDR
+#define AID3_XCC0_VF6_BASE_ADDR__AID3_XCC0_VF6_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF6_BASE_ADDR__AID3_XCC0_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF6_BASE_ADDR
+#define AID3_XCC1_VF6_BASE_ADDR__AID3_XCC1_VF6_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF6_BASE_ADDR__AID3_XCC1_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF6_BASE_ADDR
+#define AID0_NBIF_VF6_BASE_ADDR__AID0_NBIF_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF6_BASE_ADDR__AID0_NBIF_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF6_BASE_ADDR
+#define AID0_ATHUB_VF6_BASE_ADDR__AID0_ATHUB_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF6_BASE_ADDR__AID0_ATHUB_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF6_BASE_ADDR
+#define AID0_IH_VF6_BASE_ADDR__AID0_IH_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF6_BASE_ADDR__AID0_IH_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF6_BASE_ADDR
+#define AID0_HDP_VF6_BASE_ADDR__AID0_HDP_VF6_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF6_BASE_ADDR__AID0_HDP_VF6_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_VF7_BASE_ADDR
+#define AID0_VF7_BASE_ADDR__AID0_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_VF7_BASE_ADDR__AID0_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_VF7_BASE_ADDR
+#define AID1_VF7_BASE_ADDR__AID1_VF7_BASE_ADDR__SHIFT 0x0
+#define AID1_VF7_BASE_ADDR__AID1_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_VF7_BASE_ADDR
+#define AID2_VF7_BASE_ADDR__AID2_VF7_BASE_ADDR__SHIFT 0x0
+#define AID2_VF7_BASE_ADDR__AID2_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_VF7_BASE_ADDR
+#define AID3_VF7_BASE_ADDR__AID3_VF7_BASE_ADDR__SHIFT 0x0
+#define AID3_VF7_BASE_ADDR__AID3_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_VF7_BASE_ADDR
+#define AID0_XCC0_VF7_BASE_ADDR__AID0_XCC0_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_VF7_BASE_ADDR__AID0_XCC0_VF7_BASE_ADDR_MASK 0x0001FFFFL
+//AID0_XCC1_VF7_BASE_ADDR
+#define AID0_XCC1_VF7_BASE_ADDR__AID0_XCC1_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_VF7_BASE_ADDR__AID0_XCC1_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_VF7_BASE_ADDR
+#define AID1_XCC0_VF7_BASE_ADDR__AID1_XCC0_VF7_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_VF7_BASE_ADDR__AID1_XCC0_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_VF7_BASE_ADDR
+#define AID1_XCC1_VF7_BASE_ADDR__AID1_XCC1_VF7_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_VF7_BASE_ADDR__AID1_XCC1_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_VF7_BASE_ADDR
+#define AID2_XCC0_VF7_BASE_ADDR__AID2_XCC0_VF7_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_VF7_BASE_ADDR__AID2_XCC0_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_VF7_BASE_ADDR
+#define AID2_XCC1_VF7_BASE_ADDR__AID2_XCC1_VF7_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_VF7_BASE_ADDR__AID2_XCC1_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_VF7_BASE_ADDR
+#define AID3_XCC0_VF7_BASE_ADDR__AID3_XCC0_VF7_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_VF7_BASE_ADDR__AID3_XCC0_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_VF7_BASE_ADDR
+#define AID3_XCC1_VF7_BASE_ADDR__AID3_XCC1_VF7_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_VF7_BASE_ADDR__AID3_XCC1_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_NBIF_VF7_BASE_ADDR
+#define AID0_NBIF_VF7_BASE_ADDR__AID0_NBIF_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_NBIF_VF7_BASE_ADDR__AID0_NBIF_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_ATHUB_VF7_BASE_ADDR
+#define AID0_ATHUB_VF7_BASE_ADDR__AID0_ATHUB_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_ATHUB_VF7_BASE_ADDR__AID0_ATHUB_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_IH_VF7_BASE_ADDR
+#define AID0_IH_VF7_BASE_ADDR__AID0_IH_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_IH_VF7_BASE_ADDR__AID0_IH_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_HDP_VF7_BASE_ADDR
+#define AID0_HDP_VF7_BASE_ADDR__AID0_HDP_VF7_BASE_ADDR__SHIFT 0x0
+#define AID0_HDP_VF7_BASE_ADDR__AID0_HDP_VF7_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_PF_BASE_ADDR
+#define AID0_PF_BASE_ADDR__AID0_PF_BASE_ADDR__SHIFT 0x0
+#define AID0_PF_BASE_ADDR__AID0_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC0_PF_BASE_ADDR
+#define AID0_XCC0_PF_BASE_ADDR__AID0_XCC0_PF_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC0_PF_BASE_ADDR__AID0_XCC0_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID0_XCC1_PF_BASE_ADDR
+#define AID0_XCC1_PF_BASE_ADDR__AID0_XCC1_PF_BASE_ADDR__SHIFT 0x0
+#define AID0_XCC1_PF_BASE_ADDR__AID0_XCC1_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_PF_BASE_ADDR
+#define AID1_PF_BASE_ADDR__AID1_PF_BASE_ADDR__SHIFT 0x0
+#define AID1_PF_BASE_ADDR__AID1_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC0_PF_BASE_ADDR
+#define AID1_XCC0_PF_BASE_ADDR__AID1_XCC0_PF_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC0_PF_BASE_ADDR__AID1_XCC0_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID1_XCC1_PF_BASE_ADDR
+#define AID1_XCC1_PF_BASE_ADDR__AID1_XCC1_PF_BASE_ADDR__SHIFT 0x0
+#define AID1_XCC1_PF_BASE_ADDR__AID1_XCC1_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_PF_BASE_ADDR
+#define AID2_PF_BASE_ADDR__AID2_PF_BASE_ADDR__SHIFT 0x0
+#define AID2_PF_BASE_ADDR__AID2_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC0_PF_BASE_ADDR
+#define AID2_XCC0_PF_BASE_ADDR__AID2_XCC0_PF_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC0_PF_BASE_ADDR__AID2_XCC0_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID2_XCC1_PF_BASE_ADDR
+#define AID2_XCC1_PF_BASE_ADDR__AID2_XCC1_PF_BASE_ADDR__SHIFT 0x0
+#define AID2_XCC1_PF_BASE_ADDR__AID2_XCC1_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_PF_BASE_ADDR
+#define AID3_PF_BASE_ADDR__AID3_PF_BASE_ADDR__SHIFT 0x0
+#define AID3_PF_BASE_ADDR__AID3_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC0_PF_BASE_ADDR
+#define AID3_XCC0_PF_BASE_ADDR__AID3_XCC0_PF_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC0_PF_BASE_ADDR__AID3_XCC0_PF_BASE_ADDR_MASK 0x0000FFFFL
+//AID3_XCC1_PF_BASE_ADDR
+#define AID3_XCC1_PF_BASE_ADDR__AID3_XCC1_PF_BASE_ADDR__SHIFT 0x0
+#define AID3_XCC1_PF_BASE_ADDR__AID3_XCC1_PF_BASE_ADDR_MASK 0x0000FFFFL
+//NBIF_RRMT_CNTL
+#define NBIF_RRMT_CNTL__PARTITION_MODE__SHIFT 0x0
+#define NBIF_RRMT_CNTL__AID_DIE_ID__SHIFT 0x4
+#define NBIF_RRMT_CNTL__RRMT_ENABLE__SHIFT 0x8
+#define NBIF_RRMT_CNTL__RRMT_Invalid_Address_H__SHIFT 0x18
+#define NBIF_RRMT_CNTL__PARTITION_MODE_MASK 0x00000007L
+#define NBIF_RRMT_CNTL__AID_DIE_ID_MASK 0x00000030L
+#define NBIF_RRMT_CNTL__RRMT_ENABLE_MASK 0x00000100L
+#define NBIF_RRMT_CNTL__RRMT_Invalid_Address_H_MASK 0xFF000000L
+//BIFC_DOORBELL_ACCESS_EN_PF
+#define BIFC_DOORBELL_ACCESS_EN_PF__BIFC_DOORBELL_ACCESS_EN_PF__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_PF__BIFC_DOORBELL_ACCESS_EN_PF_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF0
+#define BIFC_DOORBELL_ACCESS_EN_VF0__BIFC_DOORBELL_ACCESS_EN_VF0__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF0__BIFC_DOORBELL_ACCESS_EN_VF0_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF1
+#define BIFC_DOORBELL_ACCESS_EN_VF1__BIFC_DOORBELL_ACCESS_EN_VF1__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF1__BIFC_DOORBELL_ACCESS_EN_VF1_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF2
+#define BIFC_DOORBELL_ACCESS_EN_VF2__BIFC_DOORBELL_ACCESS_EN_VF2__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF2__BIFC_DOORBELL_ACCESS_EN_VF2_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF3
+#define BIFC_DOORBELL_ACCESS_EN_VF3__BIFC_DOORBELL_ACCESS_EN_VF3__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF3__BIFC_DOORBELL_ACCESS_EN_VF3_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF4
+#define BIFC_DOORBELL_ACCESS_EN_VF4__BIFC_DOORBELL_ACCESS_EN_VF4__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF4__BIFC_DOORBELL_ACCESS_EN_VF4_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF5
+#define BIFC_DOORBELL_ACCESS_EN_VF5__BIFC_DOORBELL_ACCESS_EN_VF5__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF5__BIFC_DOORBELL_ACCESS_EN_VF5_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF6
+#define BIFC_DOORBELL_ACCESS_EN_VF6__BIFC_DOORBELL_ACCESS_EN_VF6__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF6__BIFC_DOORBELL_ACCESS_EN_VF6_MASK 0x000FFFFFL
+//BIFC_DOORBELL_ACCESS_EN_VF7
+#define BIFC_DOORBELL_ACCESS_EN_VF7__BIFC_DOORBELL_ACCESS_EN_VF7__SHIFT 0x0
+#define BIFC_DOORBELL_ACCESS_EN_VF7__BIFC_DOORBELL_ACCESS_EN_VF7_MASK 0x000FFFFFL
+//MISC_SCRATCH
+#define MISC_SCRATCH__MISC_SCRATCH0__SHIFT 0x0
+#define MISC_SCRATCH__MISC_SCRATCH0_MASK 0xFFFFFFFFL
+//INTR_LINE_POLARITY
+#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0__SHIFT 0x0
+#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0_MASK 0x000000FFL
+//INTR_LINE_ENABLE
+#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0__SHIFT 0x0
+#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0_MASK 0x000000FFL
+//OUTSTANDING_VC_ALLOC
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC__SHIFT 0x0
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC__SHIFT 0x2
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC__SHIFT 0x4
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC__SHIFT 0x6
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC__SHIFT 0x8
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC__SHIFT 0xa
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC__SHIFT 0xc
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC__SHIFT 0xe
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD__SHIFT 0x10
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC__SHIFT 0x18
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC__SHIFT 0x1a
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD__SHIFT 0x1c
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC_MASK 0x00000003L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC_MASK 0x0000000CL
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC_MASK 0x00000030L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC_MASK 0x000000C0L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC_MASK 0x00000300L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC_MASK 0x00000C00L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC_MASK 0x00003000L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC_MASK 0x0000C000L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD_MASK 0x000F0000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC_MASK 0x03000000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC_MASK 0x0C000000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD_MASK 0xF0000000L
+//BIFC_MISC_CTRL0
+#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN__SHIFT 0x0
+#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN__SHIFT 0x1
+#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS__SHIFT 0x4
+#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE__SHIFT 0x8
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_P__SHIFT 0x9
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN__SHIFT 0xb
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS__SHIFT 0xc
+#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS__SHIFT 0xd
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_NP__SHIFT 0xe
+#define BIFC_MISC_CTRL0__HRP_CHAIN_DISABLE__SHIFT 0xf
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS__SHIFT 0x10
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL__SHIFT 0x11
+#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW__SHIFT 0x12
+#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH__SHIFT 0x13
+#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO__SHIFT 0x14
+#define BIFC_MISC_CTRL0__HST_FLUSH_DEFER_EN__SHIFT 0x15
+#define BIFC_MISC_CTRL0__HST_FLUSH_CLR_LOCK_EN__SHIFT 0x16
+#define BIFC_MISC_CTRL0__STFETCH_BLOCK_IN_RST__SHIFT 0x17
+#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS__SHIFT 0x18
+#define BIFC_MISC_CTRL0__ATS_MSG_BLOCK_IN_RST__SHIFT 0x19
+#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS__SHIFT 0x1a
+#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE__SHIFT 0x1b
+#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE__SHIFT 0x1c
+#define BIFC_MISC_CTRL0__DMA_ALL_RST_PROTECT_STS_SEL__SHIFT 0x1d
+#define BIFC_MISC_CTRL0__HDP_P2P_DIRECT_ADD_ADJUST__SHIFT 0x1e
+#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION__SHIFT 0x1f
+#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN_MASK 0x00000001L
+#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN_MASK 0x00000006L
+#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS_MASK 0x000000F0L
+#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE_MASK 0x00000100L
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_P_MASK 0x00000200L
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN_MASK 0x00000800L
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS_MASK 0x00001000L
+#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS_MASK 0x00002000L
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_NP_MASK 0x00004000L
+#define BIFC_MISC_CTRL0__HRP_CHAIN_DISABLE_MASK 0x00008000L
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS_MASK 0x00010000L
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL_MASK 0x00020000L
+#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW_MASK 0x00040000L
+#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH_MASK 0x00080000L
+#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO_MASK 0x00100000L
+#define BIFC_MISC_CTRL0__HST_FLUSH_DEFER_EN_MASK 0x00200000L
+#define BIFC_MISC_CTRL0__HST_FLUSH_CLR_LOCK_EN_MASK 0x00400000L
+#define BIFC_MISC_CTRL0__STFETCH_BLOCK_IN_RST_MASK 0x00800000L
+#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS_MASK 0x01000000L
+#define BIFC_MISC_CTRL0__ATS_MSG_BLOCK_IN_RST_MASK 0x02000000L
+#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS_MASK 0x04000000L
+#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE_MASK 0x08000000L
+#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE_MASK 0x10000000L
+#define BIFC_MISC_CTRL0__DMA_ALL_RST_PROTECT_STS_SEL_MASK 0x20000000L
+#define BIFC_MISC_CTRL0__HDP_P2P_DIRECT_ADD_ADJUST_MASK 0x40000000L
+#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION_MASK 0x80000000L
+//BIFC_MISC_CTRL1
+#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT__SHIFT 0x0
+#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT__SHIFT 0x1
+#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT__SHIFT 0x2
+#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT__SHIFT 0x3
+#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS__SHIFT 0x4
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR__SHIFT 0x5
+#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS__SHIFT 0x6
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP__SHIFT 0x7
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS__SHIFT 0x8
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS__SHIFT 0xa
+#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ__SHIFT 0xc
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE__SHIFT 0xd
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE__SHIFT 0xe
+#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1__SHIFT 0xf
+#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS__SHIFT 0x10
+#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS__SHIFT 0x11
+#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS__SHIFT 0x12
+#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS__SHIFT 0x13
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR__SHIFT 0x14
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE__SHIFT 0x15
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_VALUE__SHIFT 0x16
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_FOR_AERLOG__SHIFT 0x17
+#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK__SHIFT 0x18
+#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK__SHIFT 0x19
+#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK__SHIFT 0x1a
+#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK__SHIFT 0x1b
+#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT__SHIFT 0x1c
+#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN__SHIFT 0x1d
+#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL__SHIFT 0x1e
+#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT_MASK 0x00000001L
+#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT_MASK 0x00000002L
+#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT_MASK 0x00000004L
+#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT_MASK 0x00000008L
+#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS_MASK 0x00000010L
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR_MASK 0x00000020L
+#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS_MASK 0x00000040L
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP_MASK 0x00000080L
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS_MASK 0x00000300L
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS_MASK 0x00000C00L
+#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ_MASK 0x00001000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_MASK 0x00002000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE_MASK 0x00004000L
+#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1_MASK 0x00008000L
+#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS_MASK 0x00010000L
+#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS_MASK 0x00020000L
+#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS_MASK 0x00040000L
+#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS_MASK 0x00080000L
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR_MASK 0x00100000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_MASK 0x00200000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_VALUE_MASK 0x00400000L
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_FOR_AERLOG_MASK 0x00800000L
+#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK_MASK 0x01000000L
+#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK_MASK 0x02000000L
+#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK_MASK 0x04000000L
+#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK_MASK 0x08000000L
+#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT_MASK 0x10000000L
+#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN_MASK 0x20000000L
+#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL_MASK 0xC0000000L
+//BIFC_BME_ERR_LOG_LB
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x0
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x1
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x10
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x11
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
+//BIFC_LC_TIMER_CTRL
+#define BIFC_LC_TIMER_CTRL__ASPM_IDLE_TIMER_SCALE__SHIFT 0x0
+#define BIFC_LC_TIMER_CTRL__L1_EXIT_TIMER_SCALE__SHIFT 0x10
+#define BIFC_LC_TIMER_CTRL__ASPM_IDLE_TIMER_SCALE_MASK 0x0000FFFFL
+#define BIFC_LC_TIMER_CTRL__L1_EXIT_TIMER_SCALE_MASK 0xFFFF0000L
+//BIFC_RCCBIH_BME_ERR_LOG0
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x0
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x1
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x10
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x11
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7_MASK 0xC0000000L
+//BIFC_DMA_ATTR_CNTL2_DEV0
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0__SHIFT 0x0
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1__SHIFT 0x4
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2__SHIFT 0x8
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3__SHIFT 0xc
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4__SHIFT 0x10
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5__SHIFT 0x14
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6__SHIFT 0x18
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7__SHIFT 0x1c
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0_MASK 0x00000001L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1_MASK 0x00000010L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2_MASK 0x00000100L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3_MASK 0x00001000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4_MASK 0x00010000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5_MASK 0x00100000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6_MASK 0x01000000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7_MASK 0x10000000L
+//BME_DUMMY_CNTL_0
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0__SHIFT 0x0
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1__SHIFT 0x2
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2__SHIFT 0x4
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3__SHIFT 0x6
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4__SHIFT 0x8
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5__SHIFT 0xa
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6__SHIFT 0xc
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7__SHIFT 0xe
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0_MASK 0x00000003L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1_MASK 0x0000000CL
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2_MASK 0x00000030L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3_MASK 0x000000C0L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4_MASK 0x00000300L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5_MASK 0x00000C00L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6_MASK 0x00003000L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7_MASK 0x0000C000L
+//BIFC_THT_CNTL
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0__SHIFT 0x0
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0__SHIFT 0x4
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1__SHIFT 0x8
+#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN__SHIFT 0x10
+#define BIFC_THT_CNTL__THT_NTB_VC0_APER0_ADSC_PUSH_DIS__SHIFT 0x18
+#define BIFC_THT_CNTL__THT_NTB_VC0_OTHAPER_ADSC_PUSH_DIS__SHIFT 0x19
+#define BIFC_THT_CNTL__THT_NTB_VC1_APER0_ADSC_PUSH_DIS__SHIFT 0x1a
+#define BIFC_THT_CNTL__THT_NTB_VC1_OTHAPER_ADSC_PUSH_DIS__SHIFT 0x1b
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0_MASK 0x0000000FL
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0_MASK 0x000000F0L
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1_MASK 0x00000F00L
+#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN_MASK 0x00010000L
+#define BIFC_THT_CNTL__THT_NTB_VC0_APER0_ADSC_PUSH_DIS_MASK 0x01000000L
+#define BIFC_THT_CNTL__THT_NTB_VC0_OTHAPER_ADSC_PUSH_DIS_MASK 0x02000000L
+#define BIFC_THT_CNTL__THT_NTB_VC1_APER0_ADSC_PUSH_DIS_MASK 0x04000000L
+#define BIFC_THT_CNTL__THT_NTB_VC1_OTHAPER_ADSC_PUSH_DIS_MASK 0x08000000L
+//BIFC_HSTARB_CNTL
+#define BIFC_HSTARB_CNTL__SLVARB_MODE__SHIFT 0x0
+#define BIFC_HSTARB_CNTL__CFG_BLOCK_P_EN__SHIFT 0x8
+#define BIFC_HSTARB_CNTL__SLVARB_MODE_MASK 0x00000003L
+#define BIFC_HSTARB_CNTL__CFG_BLOCK_P_EN_MASK 0x00000100L
+//BIFC_GSI_CNTL
+#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE__SHIFT 0x0
+#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE__SHIFT 0x2
+#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN__SHIFT 0x5
+#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN__SHIFT 0x6
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN__SHIFT 0x7
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN__SHIFT 0x8
+#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE__SHIFT 0xa
+#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE__SHIFT 0xc
+#define BIFC_GSI_CNTL__GSI_SMN_PARITY_CHK_BE_MSK__SHIFT 0xf
+#define BIFC_GSI_CNTL__GSI_SMN_BURST_EN__SHIFT 0x10
+#define BIFC_GSI_CNTL__GSI_SMN_256B_SPLIT_64B_EN__SHIFT 0x11
+#define BIFC_GSI_CNTL__SMN_PP_PIPE_ENABLE__SHIFT 0x1b
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_FBFLUSH__SHIFT 0x1c
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPFLUSH__SHIFT 0x1d
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPRD__SHIFT 0x1e
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_ALL__SHIFT 0x1f
+#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE_MASK 0x00000003L
+#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE_MASK 0x0000001CL
+#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN_MASK 0x00000020L
+#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN_MASK 0x00000040L
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN_MASK 0x00000080L
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN_MASK 0x00000100L
+#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE_MASK 0x00000C00L
+#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE_MASK 0x00003000L
+#define BIFC_GSI_CNTL__GSI_SMN_PARITY_CHK_BE_MSK_MASK 0x00008000L
+#define BIFC_GSI_CNTL__GSI_SMN_BURST_EN_MASK 0x00010000L
+#define BIFC_GSI_CNTL__GSI_SMN_256B_SPLIT_64B_EN_MASK 0x00020000L
+#define BIFC_GSI_CNTL__SMN_PP_PIPE_ENABLE_MASK 0x08000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_FBFLUSH_MASK 0x10000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPFLUSH_MASK 0x20000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPRD_MASK 0x40000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_ALL_MASK 0x80000000L
+//BIFC_PCIEFUNC_CNTL
+#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC__SHIFT 0x0
+#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC_MASK 0x0000FFFFL
+//BIFC_PASID_CHECK_DIS
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0__SHIFT 0x0
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1__SHIFT 0x1
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0_MASK 0x00000001L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1_MASK 0x00000002L
+//BIFC_SDP_CNTL_0
+#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS__SHIFT 0x8
+#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS__SHIFT 0x10
+#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS__SHIFT 0x18
+#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS_MASK 0x0000FF00L
+#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS_MASK 0x00FF0000L
+#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS_MASK 0xFF000000L
+//BIFC_SDP_CNTL_1
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS__SHIFT 0x1
+#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS__SHIFT 0x2
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS__SHIFT 0x3
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x4
+#define BIFC_SDP_CNTL_1__NP_KEEP_GOING_STALL_P__SHIFT 0x5
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x7
+#define BIFC_SDP_CNTL_1__ATOMIC_STALL_BY_RDWR_EN__SHIFT 0x8
+#define BIFC_SDP_CNTL_1__POOL_CREDIT_ALLOC_OVERRIDE_DYNAMIC__SHIFT 0x9
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS_MASK 0x00000001L
+#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS_MASK 0x00000002L
+#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS_MASK 0x00000004L
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS_MASK 0x00000008L
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000010L
+#define BIFC_SDP_CNTL_1__NP_KEEP_GOING_STALL_P_MASK 0x00000020L
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000080L
+#define BIFC_SDP_CNTL_1__ATOMIC_STALL_BY_RDWR_EN_MASK 0x00000100L
+#define BIFC_SDP_CNTL_1__POOL_CREDIT_ALLOC_OVERRIDE_DYNAMIC_MASK 0x00000200L
+//BIFC_PASID_STS
+#define BIFC_PASID_STS__PASID_STS__SHIFT 0x0
+#define BIFC_PASID_STS__PASID_STS_MASK 0x0000000FL
+//BIFC_ATHUB_ACT_CNTL
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE__SHIFT 0x0
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_SLFR_DATAERR_RSP_STS_TYPE__SHIFT 0x3
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_REQ_DROP_DIS__SHIFT 0x8
+#define BIFC_ATHUB_ACT_CNTL__GSI_ATHUB_ACT_FLUSH_TRIGGER__SHIFT 0x9
+#define BIFC_ATHUB_ACT_CNTL__GMI_ATHUB_ACT_FLUSH_TRIGGER__SHIFT 0xa
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE_MASK 0x00000007L
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_SLFR_DATAERR_RSP_STS_TYPE_MASK 0x00000038L
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_REQ_DROP_DIS_MASK 0x00000100L
+#define BIFC_ATHUB_ACT_CNTL__GSI_ATHUB_ACT_FLUSH_TRIGGER_MASK 0x00000200L
+#define BIFC_ATHUB_ACT_CNTL__GMI_ATHUB_ACT_FLUSH_TRIGGER_MASK 0x00000400L
+//BIFC_PERF_CNTL_0
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN__SHIFT 0x0
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN__SHIFT 0x1
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET__SHIFT 0x8
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET__SHIFT 0x9
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL__SHIFT 0x10
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL__SHIFT 0x18
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN_MASK 0x00000001L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN_MASK 0x00000002L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET_MASK 0x00000100L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET_MASK 0x00000200L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL_MASK 0x007F0000L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL_MASK 0x7F000000L
+//BIFC_PERF_CNTL_1
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN__SHIFT 0x0
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN__SHIFT 0x1
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET__SHIFT 0x4
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET__SHIFT 0x5
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL__SHIFT 0x8
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL__SHIFT 0x10
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN_MASK 0x00000001L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN_MASK 0x00000002L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET_MASK 0x00000010L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET_MASK 0x00000020L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL_MASK 0x0000FF00L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL_MASK 0x01FF0000L
+//BIFC_PERF_CNT_MMIO_RD_L32BIT
+#define BIFC_PERF_CNT_MMIO_RD_L32BIT__PERF_CNT_MMIO_RD_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_RD_L32BIT__PERF_CNT_MMIO_RD_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_MMIO_WR_L32BIT
+#define BIFC_PERF_CNT_MMIO_WR_L32BIT__PERF_CNT_MMIO_WR_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_WR_L32BIT__PERF_CNT_MMIO_WR_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_DMA_RD_L32BIT
+#define BIFC_PERF_CNT_DMA_RD_L32BIT__PERF_CNT_DMA_RD_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_RD_L32BIT__PERF_CNT_DMA_RD_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_DMA_WR_L32BIT
+#define BIFC_PERF_CNT_DMA_WR_L32BIT__PERF_CNT_DMA_WR_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_WR_L32BIT__PERF_CNT_DMA_WR_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//NBIF_REGIF_ERRSET_CTRL
+#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+//BIFC_SDP_CNTL_2
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define BIFC_SDP_CNTL_2__HRP_SDP_DISCON_HYSTERESIS_H__SHIFT 0x10
+#define BIFC_SDP_CNTL_2__GSI_SDP_DISCON_HYSTERESIS_H__SHIFT 0x18
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+#define BIFC_SDP_CNTL_2__HRP_SDP_DISCON_HYSTERESIS_H_MASK 0x000F0000L
+#define BIFC_SDP_CNTL_2__GSI_SDP_DISCON_HYSTERESIS_H_MASK 0x0F000000L
+//NBIF_PGMST_CTRL
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_EN__SHIFT 0x8
+#define NBIF_PGMST_CTRL__NBIF_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define NBIF_PGMST_CTRL__NBIF_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_EN_MASK 0x00000100L
+#define NBIF_PGMST_CTRL__NBIF_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define NBIF_PGMST_CTRL__NBIF_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//NBIF_PGSLV_CTRL
+#define NBIF_PGSLV_CTRL__NBIF_CFG_IDLE_HYSTERESIS__SHIFT 0x0
+#define NBIF_PGSLV_CTRL__NBIF_CFG_IDLE_HYSTERESIS_MASK 0x0000001FL
+//NBIF_PG_MISC_CTRL
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define NBIF_PG_MISC_CTRL__NBIF_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM1__SHIFT 0xd
+#define NBIF_PG_MISC_CTRL__NBIF_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM2__SHIFT 0x10
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define NBIF_PG_MISC_CTRL__NBIF_PG_PCIE_NBIF_LD_MASK__SHIFT 0x1e
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM1_MASK 0x00002000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM2_MASK 0x00010000L
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_PCIE_NBIF_LD_MASK_MASK 0x40000000L
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//SMN_MST_EP_CNTL3
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_EP_CNTL4
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_CNTL1
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS__SHIFT 0x0
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0__SHIFT 0x10
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS_MASK 0x00000001L
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0_MASK 0x00010000L
+//SMN_MST_EP_CNTL5
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7_MASK 0x00000080L
+//BIF_SELFRING_BUFFER_VID
+#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID__SHIFT 0x0
+#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID__SHIFT 0x8
+#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID__SHIFT 0x10
+#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID_MASK 0x000000FFL
+#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID_MASK 0x0000FF00L
+#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID_MASK 0x00FF0000L
+//BIF_SELFRING_VECTOR_CNTL
+#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS__SHIFT 0x0
+#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM__SHIFT 0x1
+#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS_MASK 0x00000001L
+#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM_MASK 0x00000002L
+//NBIF_STRAP_WRITE_CTRL
+#define NBIF_STRAP_WRITE_CTRL__NBIF_STRAP_WRITE_ONCE_ENABLE__SHIFT 0x0
+#define NBIF_STRAP_WRITE_CTRL__NBIF_STRAP_WRITE_ONCE_ENABLE_MASK 0x00000001L
+//NBIF_INTX_DSTATE_MISC_CNTL
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP__SHIFT 0x0
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN__SHIFT 0x1
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS__SHIFT 0x2
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP__SHIFT 0x3
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN__SHIFT 0x4
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP__SHIFT 0x5
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN__SHIFT 0x6
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS__SHIFT 0x7
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP_MASK 0x00000001L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN_MASK 0x00000002L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS_MASK 0x00000004L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP_MASK 0x00000008L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN_MASK 0x00000010L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP_MASK 0x00000020L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN_MASK 0x00000040L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS_MASK 0x00000080L
+//NBIF_PENDING_MISC_CNTL
+#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS__SHIFT 0x0
+#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS__SHIFT 0x1
+#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS_MASK 0x00000001L
+#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS_MASK 0x00000002L
+//BIF_GMI_WRR_WEIGHT
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_COUNTER_MODE__SHIFT 0x1d
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_MODE__SHIFT 0x1e
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_SIZE_MODE__SHIFT 0x1f
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_COUNTER_MODE_MASK 0x20000000L
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_MODE_MASK 0x40000000L
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_SIZE_MODE_MASK 0x80000000L
+//BIF_GMI_WRR_WEIGHT2
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT__SHIFT 0x0
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT__SHIFT 0x8
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT__SHIFT 0x10
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT__SHIFT 0x18
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT_MASK 0x000000FFL
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT_MASK 0x0000FF00L
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT_MASK 0x00FF0000L
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT_MASK 0xFF000000L
+//BIF_GMI_WRR_WEIGHT3
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT__SHIFT 0x0
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT__SHIFT 0x8
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT__SHIFT 0x10
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT__SHIFT 0x18
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT_MASK 0x000000FFL
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT_MASK 0x0000FF00L
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT_MASK 0x00FF0000L
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT_MASK 0xFF000000L
+//NBIF_PWRBRK_REQUEST
+#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST__SHIFT 0x0
+#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST_MASK 0x00000001L
+//BIF_ATOMIC_ERR_LOG_DEV0_F0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1_MASK 0x00080000L
+//BIF_DMA_MP4_ERR_LOG
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x0
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x1
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x10
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x11
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR_MASK 0x00000001L
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00000002L
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR_MASK 0x00010000L
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00020000L
+//BIF_PASID_ERR_LOG
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0__SHIFT 0x0
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1__SHIFT 0x1
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0_MASK 0x00000001L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1_MASK 0x00000002L
+//BIF_PASID_ERR_CLR
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0__SHIFT 0x0
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1__SHIFT 0x1
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0_MASK 0x00000001L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1_MASK 0x00000002L
+//NBIF_VWIRE_CTRL
+#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS__SHIFT 0x0
+#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT__SHIFT 0x4
+#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED__SHIFT 0x8
+#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS__SHIFT 0x10
+#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT__SHIFT 0x14
+#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL__SHIFT 0x1a
+#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS_MASK 0x00000001L
+#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT_MASK 0x000000F0L
+#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED_MASK 0x00000100L
+#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS_MASK 0x00010000L
+#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT_MASK 0x00F00000L
+#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL_MASK 0x0C000000L
+//NBIF_SMN_VWR_VCHG_DIS_CTRL
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS_MASK 0x00000040L
+//NBIF_SMN_VWR_VCHG_RST_CTRL0
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV_MASK 0x00000040L
+//NBIF_SMN_VWR_VCHG_TRIG
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG_MASK 0x00000040L
+//NBIF_SMN_VWR_WTRIG_CNTL
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS__SHIFT 0x0
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS__SHIFT 0x1
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS__SHIFT 0x2
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS__SHIFT 0x3
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS__SHIFT 0x4
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS__SHIFT 0x5
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS__SHIFT 0x6
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS_MASK 0x00000001L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS_MASK 0x00000002L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS_MASK 0x00000004L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS_MASK 0x00000008L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS_MASK 0x00000010L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS_MASK 0x00000020L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS_MASK 0x00000040L
+//NBIF_SMN_VWR_VCHG_DIS_CTRL_1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV_MASK 0x00000040L
+//NBIF_MGCG_CTRL_LCLK
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK__SHIFT 0x0
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK__SHIFT 0x1
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK__SHIFT 0x2
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK__SHIFT 0xa
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK__SHIFT 0xb
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK__SHIFT 0xc
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK__SHIFT 0xd
+#define NBIF_MGCG_CTRL_LCLK__NBIF_SRAM_FGCG_EN_LCLK__SHIFT 0xe
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK 0x00000001L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK_MASK 0x00000002L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK_MASK 0x000003FCL
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK_MASK 0x00000400L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK_MASK 0x00000800L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK_MASK 0x00002000L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_SRAM_FGCG_EN_LCLK_MASK 0x00004000L
+//NBIF_DS_CTRL_LCLK
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN__SHIFT 0x0
+#define NBIF_DS_CTRL_LCLK__ATHUB_LCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER__SHIFT 0x10
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN_MASK 0x00000001L
+#define NBIF_DS_CTRL_LCLK__ATHUB_LCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000002L
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER_MASK 0xFFFF0000L
+//SMN_MST_CNTL0
+#define SMN_MST_CNTL0__SMN_ARB_MODE__SHIFT 0x0
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS__SHIFT 0x8
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS__SHIFT 0x9
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS__SHIFT 0xa
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS__SHIFT 0xb
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0__SHIFT 0x10
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0__SHIFT 0x14
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0__SHIFT 0x18
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0__SHIFT 0x1c
+#define SMN_MST_CNTL0__SMN_ARB_MODE_MASK 0x00000003L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS_MASK 0x00000100L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS_MASK 0x00000200L
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS_MASK 0x00000400L
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS_MASK 0x00000800L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0_MASK 0x00010000L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0_MASK 0x00100000L
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0_MASK 0x01000000L
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0_MASK 0x10000000L
+//SMN_MST_EP_CNTL1
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_EP_CNTL2
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7_MASK 0x00000080L
+//NBIF_SDP_VWR_VCHG_DIS_CTRL
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_RST_CTRL0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_RST_CTRL1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_TRIG
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG_MASK 0x01000000L
+//NBIF_SHUB_TODET_CTRL
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_AER_LOG_EN__SHIFT 0x1
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_TIMER_UNIT__SHIFT 0x8
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TIMEOUT_COUNT__SHIFT 0x10
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_EN_MASK 0x00000001L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_AER_LOG_EN_MASK 0x00000002L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_TIMER_UNIT_MASK 0x00000700L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TIMEOUT_COUNT_MASK 0xFFFF0000L
+//NBIF_SHUB_TODET_CLIENT_CTRL
+#define NBIF_SHUB_TODET_CLIENT_CTRL__NBIF_SHUB_TODET_SLVERR_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_CTRL__NBIF_SHUB_TODET_SLVERR_EN_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_STATUS
+#define NBIF_SHUB_TODET_CLIENT_STATUS__NBIF_SHUB_TODET_CLIENT_STATUS__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_STATUS__NBIF_SHUB_TODET_CLIENT_STATUS_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_SYNCFLOOD_CTRL
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL__NBIF_SHUB_TODET_SYNCFLOOD_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL__NBIF_SHUB_TODET_SYNCFLOOD_EN_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_CTRL2
+#define NBIF_SHUB_TODET_CLIENT_CTRL2__NBIF_SHUB_TODET_SLVERR_EN2__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_CTRL2__NBIF_SHUB_TODET_SLVERR_EN2_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_STATUS2
+#define NBIF_SHUB_TODET_CLIENT_STATUS2__NBIF_SHUB_TODET_CLIENT_STATUS2__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_STATUS2__NBIF_SHUB_TODET_CLIENT_STATUS2_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_SYNCFLOOD_CTRL2
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL2__NBIF_SHUB_TODET_SYNCFLOOD_EN2__SHIFT 0x0
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL2__NBIF_SHUB_TODET_SYNCFLOOD_EN2_MASK 0xFFFFFFFFL
+//BIFC_BME_ERR_LOG_HB
+//BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+//BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+//BIFC_GMI_SDP_REQ_POOLCRED_ALLOC
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//BIFC_GMI_SDP_DAT_POOLCRED_ALLOC
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//DISCON_HYSTERESIS_HEAD_CTRL
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_DNS_SDP_DISCON_HYSTERESIS_H__SHIFT 0x0
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_UPS_SDP_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_DNS_SDP_DISCON_HYSTERESIS_H_MASK 0x0000000FL
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_UPS_SDP_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+//BIFC_EARLY_WAKEUP_CNTL
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_ACTIVE__SHIFT 0x0
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_DS_EXIT__SHIFT 0x1
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_ALLOW_AER_ACTIVE__SHIFT 0x2
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_ACTIVE_MASK 0x00000001L
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_DS_EXIT_MASK 0x00000002L
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_ALLOW_AER_ACTIVE_MASK 0x00000004L
+//BIFC_PERF_CNT_MMIO_RD_H16BIT
+#define BIFC_PERF_CNT_MMIO_RD_H16BIT__PERF_CNT_MMIO_RD_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_RD_H16BIT__PERF_CNT_MMIO_RD_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_MMIO_WR_H16BIT
+#define BIFC_PERF_CNT_MMIO_WR_H16BIT__PERF_CNT_MMIO_WR_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_WR_H16BIT__PERF_CNT_MMIO_WR_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_DMA_RD_H16BIT
+#define BIFC_PERF_CNT_DMA_RD_H16BIT__PERF_CNT_DMA_RD_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_RD_H16BIT__PERF_CNT_DMA_RD_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_DMA_WR_H16BIT
+#define BIFC_PERF_CNT_DMA_WR_H16BIT__PERF_CNT_DMA_WR_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_WR_H16BIT__PERF_CNT_DMA_WR_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_A2S_SDP_PORT_CTRL
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_DIS__SHIFT 0xc
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_DIS_MASK 0x00001000L
+//BIFC_A2S_CNTL_SW0
+#define BIFC_A2S_CNTL_SW0__RDRSP_ERRMAP__SHIFT 0x0
+#define BIFC_A2S_CNTL_SW0__RDRSP_SEL_MODE__SHIFT 0x2
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_ENABLE__SHIFT 0x5
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_VALUE__SHIFT 0x6
+#define BIFC_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define BIFC_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define BIFC_A2S_CNTL_SW0__WRR_RD_WEIGHT__SHIFT 0x10
+#define BIFC_A2S_CNTL_SW0__WRR_WR_WEIGHT__SHIFT 0x18
+#define BIFC_A2S_CNTL_SW0__RDRSP_ERRMAP_MASK 0x00000003L
+#define BIFC_A2S_CNTL_SW0__RDRSP_SEL_MODE_MASK 0x0000001CL
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_ENABLE_MASK 0x00000020L
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_VALUE_MASK 0x000001C0L
+#define BIFC_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define BIFC_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define BIFC_A2S_CNTL_SW0__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define BIFC_A2S_CNTL_SW0__WRR_WR_WEIGHT_MASK 0xFF000000L
+//BIFC_A2S_MISC_CNTL
+#define BIFC_A2S_MISC_CNTL__BLKLVL_FOR_MSG__SHIFT 0x0
+#define BIFC_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS__SHIFT 0x2
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE__SHIFT 0x3
+#define BIFC_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN__SHIFT 0x4
+#define BIFC_A2S_MISC_CNTL__RSP_REORDER_DIS__SHIFT 0x5
+#define BIFC_A2S_MISC_CNTL__WRRSP_ACCUM_SEL__SHIFT 0x6
+#define BIFC_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x7
+#define BIFC_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x8
+#define BIFC_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY__SHIFT 0x9
+#define BIFC_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0xa
+#define BIFC_A2S_MISC_CNTL__WR_TAG_SET_MIN__SHIFT 0x10
+#define BIFC_A2S_MISC_CNTL__RD_TAG_SET_MIN__SHIFT 0x15
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_MODE__SHIFT 0x1a
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE__SHIFT 0x1b
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_EN__SHIFT 0x1c
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_VALUE__SHIFT 0x1d
+#define BIFC_A2S_MISC_CNTL__BLKLVL_FOR_MSG_MASK 0x00000003L
+#define BIFC_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS_MASK 0x00000004L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE_MASK 0x00000008L
+#define BIFC_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN_MASK 0x00000010L
+#define BIFC_A2S_MISC_CNTL__RSP_REORDER_DIS_MASK 0x00000020L
+#define BIFC_A2S_MISC_CNTL__WRRSP_ACCUM_SEL_MASK 0x00000040L
+#define BIFC_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000080L
+#define BIFC_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000100L
+#define BIFC_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY_MASK 0x00000200L
+#define BIFC_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000400L
+#define BIFC_A2S_MISC_CNTL__WR_TAG_SET_MIN_MASK 0x001F0000L
+#define BIFC_A2S_MISC_CNTL__RD_TAG_SET_MIN_MASK 0x03E00000L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_MODE_MASK 0x04000000L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE_MASK 0x08000000L
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_EN_MASK 0x10000000L
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_VALUE_MASK 0x20000000L
+//BIFC_A2S_TAG_ALLOC_0
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR__SHIFT 0x0
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD__SHIFT 0x8
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR__SHIFT 0x10
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR_MASK 0x000000FFL
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD_MASK 0x0000FF00L
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR_MASK 0x00FF0000L
+//BIFC_A2S_TAG_ALLOC_1
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR__SHIFT 0x0
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR__SHIFT 0x10
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD__SHIFT 0x18
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR_MASK 0x000000FFL
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR_MASK 0x00FF0000L
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD_MASK 0xFF000000L
+//BIFC_A2S_CNTL_CL0
+#define BIFC_A2S_CNTL_CL0__NSNOOP_MAP__SHIFT 0x0
+#define BIFC_A2S_CNTL_CL0__REQPASSPW_VC0_MAP__SHIFT 0x2
+#define BIFC_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP__SHIFT 0x4
+#define BIFC_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP__SHIFT 0x6
+#define BIFC_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP__SHIFT 0x8
+#define BIFC_A2S_CNTL_CL0__BLKLVL_MAP__SHIFT 0xa
+#define BIFC_A2S_CNTL_CL0__DATERR_MAP__SHIFT 0xc
+#define BIFC_A2S_CNTL_CL0__EXOKAY_WR_MAP__SHIFT 0xe
+#define BIFC_A2S_CNTL_CL0__EXOKAY_RD_MAP__SHIFT 0x10
+#define BIFC_A2S_CNTL_CL0__RESP_WR_MAP__SHIFT 0x12
+#define BIFC_A2S_CNTL_CL0__RESP_RD_MAP__SHIFT 0x14
+#define BIFC_A2S_CNTL_CL0__NSNOOP_MAP_MASK 0x00000003L
+#define BIFC_A2S_CNTL_CL0__REQPASSPW_VC0_MAP_MASK 0x0000000CL
+#define BIFC_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP_MASK 0x00000030L
+#define BIFC_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP_MASK 0x000000C0L
+#define BIFC_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP_MASK 0x00000300L
+#define BIFC_A2S_CNTL_CL0__BLKLVL_MAP_MASK 0x00000C00L
+#define BIFC_A2S_CNTL_CL0__DATERR_MAP_MASK 0x00003000L
+#define BIFC_A2S_CNTL_CL0__EXOKAY_WR_MAP_MASK 0x0000C000L
+#define BIFC_A2S_CNTL_CL0__EXOKAY_RD_MAP_MASK 0x00030000L
+#define BIFC_A2S_CNTL_CL0__RESP_WR_MAP_MASK 0x000C0000L
+#define BIFC_A2S_CNTL_CL0__RESP_RD_MAP_MASK 0x00300000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwn_dev0_BIFDEC1
+//RCC_DWN_DEV0_2_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_2_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_2_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_2_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_2_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
+//RCC_DWNP_DEV0_2_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_2_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_2_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_ep_dev0_BIFDEC1
+//RCC_EP_DEV0_2_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_2_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_2_EP_PCIE_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_2_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_2_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_2_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_2_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_2_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_2_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_BIFDEC1
+//RCC_DEV0_1_RCC_ERR_INT_CNTL
+#define RCC_DEV0_1_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_BACO_CNTL_MISC
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L
+//RCC_DEV0_1_RCC_RESET_EN
+#define RCC_DEV0_1_RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf
+#define RCC_DEV0_1_RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L
+//RCC_DEV0_2_RCC_VDM_SUPPORT
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+//RCC_DEV0_1_RCC_GPUIOV_REGION
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//RCC_DEV0_1_RCC_GPU_HOSTVM_EN
+#define RCC_DEV0_1_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN__SHIFT 0x1
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN_MASK 0x00000002L
+//RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE_MASK 0xFFFFL
+//RCC_DEV0_1_RCC_PEER_REG_RANGE0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_1_RCC_PEER_REG_RANGE1
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_2_RCC_BUS_CNTL
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_2_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_2_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_1_RCC_CONFIG_CNTL
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+//RCC_DEV0_1_RCC_CONFIG_F0_BASE
+#define RCC_DEV0_1_RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL
+//RCC_DEV0_1_RCC_CONFIG_APER_SIZE
+#define RCC_DEV0_1_RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE
+#define RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x07FFFFFFL
+//RCC_DEV0_1_RCC_XDMA_LO
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_XDMA_HI
+#define RCC_DEV0_1_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0
+#define RCC_DEV0_1_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL
+//RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_1_RCC_BUSNUM_CNTL1
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL
+//RCC_DEV0_1_RCC_BUSNUM_LIST0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID0__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID1__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID2__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID3__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_BUSNUM_LIST1
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID4__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID5__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID6__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID7__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_BUSNUM_CNTL2
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+//RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM
+#define RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_HOST_BUSNUM
+#define RCC_DEV0_1_RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0
+#define RCC_DEV0_1_RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL
+//RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x18
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x18
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xFF000000L
+//RCC_DEV0_2_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_2_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_2_RCC_MH_ARB_CNTL
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_SYSDEC
+//BIF_BX1_PCIE_INDEX
+#define BIF_BX1_PCIE_INDEX__PCIE_INDEX__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_DATA
+#define BIF_BX1_PCIE_DATA__PCIE_DATA__SHIFT 0x0
+#define BIF_BX1_PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_INDEX2
+#define BIF_BX1_PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_DATA2
+#define BIF_BX1_PCIE_DATA2__PCIE_DATA2__SHIFT 0x0
+#define BIF_BX1_PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_INDEX_HI
+#define BIF_BX1_PCIE_INDEX_HI__PCIE_INDEX_HI__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX_HI__PCIE_INDEX_HI_MASK 0x000000FFL
+//BIF_BX1_PCIE_INDEX2_HI
+#define BIF_BX1_PCIE_INDEX2_HI__PCIE_INDEX2_HI__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX2_HI__PCIE_INDEX2_HI_MASK 0x000000FFL
+//BIF_BX1_SBIOS_SCRATCH_0
+#define BIF_BX1_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_1
+#define BIF_BX1_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_2
+#define BIF_BX1_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_3
+#define BIF_BX1_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_0
+#define BIF_BX1_BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_1
+#define BIF_BX1_BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_2
+#define BIF_BX1_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_3
+#define BIF_BX1_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_4
+#define BIF_BX1_BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_5
+#define BIF_BX1_BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_6
+#define BIF_BX1_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_7
+#define BIF_BX1_BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_8
+#define BIF_BX1_BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_9
+#define BIF_BX1_BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_10
+#define BIF_BX1_BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_11
+#define BIF_BX1_BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_12
+#define BIF_BX1_BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_13
+#define BIF_BX1_BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_14
+#define BIF_BX1_BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_15
+#define BIF_BX1_BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_RLC_INTR_CNTL
+//BIF_BX1_BIF_VCE_INTR_CNTL
+//BIF_BX1_BIF_UVD_INTR_CNTL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR1
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR2
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR3
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR4
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR5
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR6
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR7
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_CNTL
+#define BIF_BX1_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL
+//BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_0
+#define BIF_BX1_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_1
+#define BIF_BX1_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_2
+#define BIF_BX1_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_3
+#define BIF_BX1_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_4
+#define BIF_BX1_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_5
+#define BIF_BX1_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_6
+#define BIF_BX1_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_7
+#define BIF_BX1_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_8
+#define BIF_BX1_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_9
+#define BIF_BX1_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_10
+#define BIF_BX1_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_11
+#define BIF_BX1_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_12
+#define BIF_BX1_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_13
+#define BIF_BX1_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_14
+#define BIF_BX1_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_15
+#define BIF_BX1_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_0
+#define BIF_BX1_FW_SCRATCH_0__FW_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_0__FW_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_1
+#define BIF_BX1_FW_SCRATCH_1__FW_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_1__FW_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_2
+#define BIF_BX1_FW_SCRATCH_2__FW_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_2__FW_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_3
+#define BIF_BX1_FW_SCRATCH_3__FW_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_3__FW_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_4
+#define BIF_BX1_FW_SCRATCH_4__FW_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_4__FW_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_5
+#define BIF_BX1_FW_SCRATCH_5__FW_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_5__FW_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_6
+#define BIF_BX1_FW_SCRATCH_6__FW_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_6__FW_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_7
+#define BIF_BX1_FW_SCRATCH_7__FW_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_7__FW_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_8
+#define BIF_BX1_FW_SCRATCH_8__FW_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_8__FW_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_9
+#define BIF_BX1_FW_SCRATCH_9__FW_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_9__FW_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_10
+#define BIF_BX1_FW_SCRATCH_10__FW_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_10__FW_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_11
+#define BIF_BX1_FW_SCRATCH_11__FW_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_11__FW_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_12
+#define BIF_BX1_FW_SCRATCH_12__FW_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_12__FW_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_13
+#define BIF_BX1_FW_SCRATCH_13__FW_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_13__FW_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_14
+#define BIF_BX1_FW_SCRATCH_14__FW_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_14__FW_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_15
+#define BIF_BX1_FW_SCRATCH_15__FW_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_15__FW_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_4
+#define BIF_BX1_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_5
+#define BIF_BX1_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_6
+#define BIF_BX1_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_7
+#define BIF_BX1_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_8
+#define BIF_BX1_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_9
+#define BIF_BX1_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_10
+#define BIF_BX1_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_11
+#define BIF_BX1_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_12
+#define BIF_BX1_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_13
+#define BIF_BX1_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_14
+#define BIF_BX1_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_15
+#define BIF_BX1_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_SYSPFVFDEC
+//BIF_BX_PF1_MM_INDEX
+#define BIF_BX_PF1_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_PF1_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_PF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_PF1_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_PF1_MM_DATA
+#define BIF_BX_PF1_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MM_INDEX_HI
+#define BIF_BX_PF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_PF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_BIFDEC1
+//BIF_BX1_CC_BIF_BX_STRAP0
+#define BIF_BX1_CC_BIF_BX_STRAP0__STRAP_RESERVED__SHIFT 0x19
+#define BIF_BX1_CC_BIF_BX_STRAP0__STRAP_RESERVED_MASK 0xFE000000L
+//BIF_BX1_CC_BIF_BX_PINSTRAP0
+//BIF_BX1_BIF_MM_INDACCESS_CNTL
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__WRITE_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__WRITE_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L
+//BIF_BX1_BUS_CNTL
+#define BIF_BX1_BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6
+#define BIF_BX1_BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7
+#define BIF_BX1_BUS_CNTL__SET_AZ_TC__SHIFT 0xa
+#define BIF_BX1_BUS_CNTL__SET_MC_TC__SHIFT 0xd
+#define BIF_BX1_BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10
+#define BIF_BX1_BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11
+#define BIF_BX1_BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS__SHIFT 0x18
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a
+#define BIF_BX1_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS__SHIFT 0x1b
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS__SHIFT 0x1c
+#define BIF_BX1_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f
+#define BIF_BX1_BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BIF_BX1_BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BIF_BX1_BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L
+#define BIF_BX1_BUS_CNTL__SET_MC_TC_MASK 0x0000E000L
+#define BIF_BX1_BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BIF_BX1_BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BIF_BX1_BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS_MASK 0x01000000L
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L
+#define BIF_BX1_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS_MASK 0x08000000L
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS_MASK 0x10000000L
+#define BIF_BX1_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L
+//BIF_BX1_BIF_SCRATCH0
+#define BIF_BX1_BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0
+#define BIF_BX1_BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_SCRATCH1
+#define BIF_BX1_BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0
+#define BIF_BX1_BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL
+//BIF_BX1_BX_RESET_EN
+#define BIF_BX1_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10
+#define BIF_BX1_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L
+//BIF_BX1_MM_CFGREGS_CNTL
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L
+//BIF_BX1_BX_RESET_CNTL
+#define BIF_BX1_BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0
+#define BIF_BX1_BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L
+//BIF_BX1_INTERRUPT_CNTL
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1
+#define BIF_BX1_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3
+#define BIF_BX1_INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4
+#define BIF_BX1_INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf
+#define BIF_BX1_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10
+#define BIF_BX1_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN__SHIFT 0x12
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define BIF_BX1_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define BIF_BX1_INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L
+#define BIF_BX1_INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L
+#define BIF_BX1_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L
+#define BIF_BX1_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN_MASK 0x00040000L
+//BIF_BX1_INTERRUPT_CNTL2
+#define BIF_BX1_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0
+#define BIF_BX1_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL
+//BIF_BX1_CLKREQB_PAD_CNTL
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L
+//BIF_BX1_BIF_FEATURES_CONTROL_MISC
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE__SHIFT 0xb
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS__SHIFT 0xe
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT__SHIFT 0x10
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x19
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE_MASK 0x00000800L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS_MASK 0x00004000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT_MASK 0x01FF0000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x02000000L
+//BIF_BX1_HDP_ATOMIC_CONTROL_MISC
+#define BIF_BX1_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT__SHIFT 0x0
+#define BIF_BX1_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT_MASK 0x000000FFL
+//BIF_BX1_BIF_DOORBELL_CNTL
+#define BIF_BX1_BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2
+#define BIF_BX1_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3
+#define BIF_BX1_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b
+#define BIF_BX1_BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L
+#define BIF_BX1_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L
+#define BIF_BX1_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L
+//BIF_BX1_BIF_DOORBELL_INT_CNTL
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x17
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1c
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1d
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1e
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x1f
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x00800000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x10000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x20000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x40000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x80000000L
+//BIF_BX1_BIF_FB_EN
+#define BIF_BX1_BIF_FB_EN__FB_READ_EN__SHIFT 0x0
+#define BIF_BX1_BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
+#define BIF_BX1_BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_BX1_BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+//BIF_BX1_BIF_INTR_CNTL
+#define BIF_BX1_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX1_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+//BIF_BX1_BIF_MST_TRANS_PENDING_VF
+#define BIF_BX1_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX1_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX1_BIF_SLV_TRANS_PENDING_VF
+#define BIF_BX1_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX1_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX1_BACO_CNTL
+#define BIF_BX1_BACO_CNTL__BACO_EN__SHIFT 0x0
+#define BIF_BX1_BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2
+#define BIF_BX1_BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3
+#define BIF_BX1_BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5
+#define BIF_BX1_BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6
+#define BIF_BX1_BACO_CNTL__BACO_MODE__SHIFT 0x8
+#define BIF_BX1_BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9
+#define BIF_BX1_BACO_CNTL__PWRGOOD_VDDSOC__SHIFT 0x10
+#define BIF_BX1_BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f
+#define BIF_BX1_BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BIF_BX1_BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L
+#define BIF_BX1_BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BIF_BX1_BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L
+#define BIF_BX1_BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L
+#define BIF_BX1_BACO_CNTL__BACO_MODE_MASK 0x00000100L
+#define BIF_BX1_BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L
+#define BIF_BX1_BACO_CNTL__PWRGOOD_VDDSOC_MASK 0x00010000L
+#define BIF_BX1_BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L
+//BIF_BX1_BIF_BACO_EXIT_TIME0
+#define BIF_BX1_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER1
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L
+//BIF_BX1_BIF_BACO_EXIT_TIMER2
+#define BIF_BX1_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER3
+#define BIF_BX1_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER4
+#define BIF_BX1_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_MEM_TYPE_CNTL
+#define BIF_BX1_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0
+#define BIF_BX1_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L
+//BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE__SHIFT 0x1
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__LUT_BC_MODE__SHIFT 0x8
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE_MASK 0x00000001L
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE_MASK 0x00000002L
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_CNTL__LUT_BC_MODE_MASK 0x00000100L
+//BIF_BX1_NBIF_GFX_ADDR_LUT_0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_0__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_0__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_1
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_1__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_1__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_2
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_2__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_2__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_3
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_3__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_3__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_4
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_4__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_4__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_5
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_5__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_5__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_6
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_6__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_6__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_7
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_7__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_7__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_8
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_8__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_8__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_9
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_9__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_9__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_10
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_10__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_10__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_11
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_11__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_11__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_12
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_12__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_12__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_13
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_13__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_13__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_14
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_14__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_14__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_NBIF_GFX_ADDR_LUT_15
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_15__ADDR__SHIFT 0x0
+#define BIF_BX1_NBIF_GFX_ADDR_LUT_15__ADDR_MASK 0x00FFFFFFL
+//BIF_BX1_VF_REGWR_EN
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF30_MASK 0x40000000L
+//BIF_BX1_VF_DOORBELL_EN
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_RD_LOG_DIS__SHIFT 0x1f
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF30_MASK 0x40000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_RD_LOG_DIS_MASK 0x80000000L
+//BIF_BX1_VF_FB_EN
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF30_MASK 0x40000000L
+//BIF_BX1_VF_REGWR_STATUS
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_VF_DOORBELL_STATUS
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_VF_FB_STATUS
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL
+#define BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL
+#define BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX1_BIF_RB_CNTL
+#define BIF_BX1_BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define BIF_BX1_BIF_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9
+#define BIF_BX1_BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11
+#define BIF_BX1_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL__SHIFT 0x19
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d
+#define BIF_BX1_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e
+#define BIF_BX1_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define BIF_BX1_BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define BIF_BX1_BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L
+#define BIF_BX1_BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L
+#define BIF_BX1_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL_MASK 0x02000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L
+#define BIF_BX1_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//BIF_BX1_BIF_RB_BASE
+#define BIF_BX1_BIF_RB_BASE__ADDR__SHIFT 0x0
+#define BIF_BX1_BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_RB_RPTR
+#define BIF_BX1_BIF_RB_RPTR__OFFSET__SHIFT 0x2
+#define BIF_BX1_BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX1_BIF_RB_WPTR
+#define BIF_BX1_BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0
+#define BIF_BX1_BIF_RB_WPTR__OFFSET__SHIFT 0x2
+#define BIF_BX1_BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L
+#define BIF_BX1_BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX1_BIF_RB_WPTR_ADDR_HI
+#define BIF_BX1_BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define BIF_BX1_BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL
+//BIF_BX1_BIF_RB_WPTR_ADDR_LO
+#define BIF_BX1_BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define BIF_BX1_BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//BIF_BX1_MAILBOX_INDEX
+#define BIF_BX1_MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0
+#define BIF_BX1_MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL
+//BIF_BX1_BIF_MP1_INTR_CTRL
+#define BIF_BX1_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0
+#define BIF_BX1_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L
+//BIF_BX1_BIF_PERSTB_PAD_CNTL
+#define BIF_BX1_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL
+//BIF_BX1_BIF_PX_EN_PAD_CNTL
+#define BIF_BX1_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x00000FFFL
+//BIF_BX1_BIF_REFPADKIN_PAD_CNTL
+#define BIF_BX1_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX1_BIF_CLKREQB_PAD_CNTL
+#define BIF_BX1_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x7FFFFFFFL
+//BIF_BX1_BIF_PWRBRK_PAD_CNTL
+#define BIF_BX1_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX1_BIF_VCN0_GPUIOV_CFG_SIZE
+#define BIF_BX1_BIF_VCN0_GPUIOV_CFG_SIZE__VCN0_GPUIOV_CFG_SIZE__SHIFT 0x0
+#define BIF_BX1_BIF_VCN0_GPUIOV_CFG_SIZE__VCN0_GPUIOV_CFG_SIZE_MASK 0x0000000FL
+//BIF_BX1_BIF_VCN1_GPUIOV_CFG_SIZE
+#define BIF_BX1_BIF_VCN1_GPUIOV_CFG_SIZE__VCN1_GPUIOV_CFG_SIZE__SHIFT 0x0
+#define BIF_BX1_BIF_VCN1_GPUIOV_CFG_SIZE__VCN1_GPUIOV_CFG_SIZE_MASK 0x0000000FL
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
+//BIF_BX_PF1_BIF_BME_STATUS
+#define BIF_BX_PF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_PF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_PF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_PF1_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_GPU_HDP_FLUSH_REQ
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF1_GPU_HDP_FLUSH_DONE
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF1_BIF_TRANS_PENDING
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_PF1_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_PF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_PF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_CONTROL
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_PF1_MAILBOX_INT_CNTL
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_PF1_BIF_VMHV_MAILBOX
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+//BIF_BX_PF1_PARTITION_COMPUTE_CAP
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__SPX_SUPPORT__SHIFT 0x0
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__DPX_SUPPORT__SHIFT 0x1
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__TPX_SUPPORT__SHIFT 0x2
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__QPX_SUPPORT__SHIFT 0x3
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__CPX_SUPPORT__SHIFT 0x4
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__CPX_AVAILABILITY__SHIFT 0xa
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__SPX_SUPPORT_MASK 0x00000001L
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__DPX_SUPPORT_MASK 0x00000002L
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__TPX_SUPPORT_MASK 0x00000004L
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__QPX_SUPPORT_MASK 0x00000008L
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__CPX_SUPPORT_MASK 0x00000010L
+#define BIF_BX_PF1_PARTITION_COMPUTE_CAP__CPX_AVAILABILITY_MASK 0x0003FC00L
+//BIF_BX_PF1_PARTITION_MEM_CAP
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS1_SUPPORT__SHIFT 0x0
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS2_SUPPORT__SHIFT 0x1
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS3_SUPPORT__SHIFT 0x2
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS4_SUPPORT__SHIFT 0x3
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS6_SUPPORT__SHIFT 0x5
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS8_SUPPORT__SHIFT 0x7
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS1_SUPPORT_MASK 0x00000001L
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS2_SUPPORT_MASK 0x00000002L
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS3_SUPPORT_MASK 0x00000004L
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS4_SUPPORT_MASK 0x00000008L
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS6_SUPPORT_MASK 0x00000020L
+#define BIF_BX_PF1_PARTITION_MEM_CAP__NPS8_SUPPORT_MASK 0x00000080L
+//BIF_BX_PF1_PARTITION_COMPUTE_STATUS
+#define BIF_BX_PF1_PARTITION_COMPUTE_STATUS__PARTITION_MODE__SHIFT 0x4
+#define BIF_BX_PF1_PARTITION_COMPUTE_STATUS__PARTITION_MODE_MASK 0x000000F0L
+//BIF_BX_PF1_PARTITION_MEM_STATUS
+#define BIF_BX_PF1_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF1_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF1_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF1_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_strap_BIFDEC1:1
+//RCC_STRAP2_RCC_BIF_STRAP0
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000038L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP1
+#define RCC_STRAP2_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP2
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP2_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP3
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_BIF_STRAP4
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_BIF_STRAP5
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+//RCC_STRAP2_RCC_BIF_STRAP6
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_INVALIDATE_QUEUE_DEPTH_DEV0_F0_MASK 0x3E000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_ATS_PAGE_ALIGNED_REQUEST_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP26
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP26__STRAP_GPUIOV_VSEC_LENGTH_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0xa
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00000400L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_VC_EN_DEV0_F1_MASK 0x00800000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP20
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP21
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP22
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP23
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP24
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP25
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_EN_DEV0_F1_MASK 0x00000001L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP7
+
+
+// addressBlock: aid_nbio_nbif0_gdc_hst_sion_SIONDEC
+//S2A_DOORBELL_ENTRY_0_CTRL
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_1_CTRL
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_2_CTRL
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_3_CTRL
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_4_CTRL
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_5_CTRL
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_6_CTRL
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_7_CTRL
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_8_CTRL
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_9_CTRL
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_10_CTRL
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_11_CTRL
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_12_CTRL
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_13_CTRL
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_14_CTRL
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_ENTRY_15_CTRL
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE__SHIFT 0x0
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID__SHIFT 0x1
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE__SHIFT 0x6
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET__SHIFT 0x7
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE__SHIFT 0x11
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE_MASK 0x00000001L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID_MASK 0x0000003EL
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE_MASK 0x00000040L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET_MASK 0x0001FF80L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE_MASK 0x01FE0000L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//S2A_DOORBELL_COMMON_CTRL_REG
+#define S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x0
+#define S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00000001L
+
+
+// addressBlock: aid_nbio_nbif0_gdc_GDCDEC
+//GDC1_A2S_CNTL_CL0
+#define GDC1_A2S_CNTL_CL0__NSNOOP_MAP__SHIFT 0x0
+#define GDC1_A2S_CNTL_CL0__REQPASSPW_VC0_MAP__SHIFT 0x2
+#define GDC1_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP__SHIFT 0x4
+#define GDC1_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP__SHIFT 0x6
+#define GDC1_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP__SHIFT 0x8
+#define GDC1_A2S_CNTL_CL0__BLKLVL_MAP__SHIFT 0xa
+#define GDC1_A2S_CNTL_CL0__DATERR_MAP__SHIFT 0xc
+#define GDC1_A2S_CNTL_CL0__EXOKAY_WR_MAP__SHIFT 0xe
+#define GDC1_A2S_CNTL_CL0__EXOKAY_RD_MAP__SHIFT 0x10
+#define GDC1_A2S_CNTL_CL0__RESP_WR_MAP__SHIFT 0x12
+#define GDC1_A2S_CNTL_CL0__RESP_RD_MAP__SHIFT 0x14
+#define GDC1_A2S_CNTL_CL0__RDRSP_ERRMAP__SHIFT 0x16
+#define GDC1_A2S_CNTL_CL0__RDRSP_SEL_MODE__SHIFT 0x18
+#define GDC1_A2S_CNTL_CL0__STATIC_VC_ENABLE__SHIFT 0x1b
+#define GDC1_A2S_CNTL_CL0__STATIC_VC_VALUE__SHIFT 0x1c
+#define GDC1_A2S_CNTL_CL0__NSNOOP_MAP_MASK 0x00000003L
+#define GDC1_A2S_CNTL_CL0__REQPASSPW_VC0_MAP_MASK 0x0000000CL
+#define GDC1_A2S_CNTL_CL0__REQPASSPW_NVC0_MAP_MASK 0x00000030L
+#define GDC1_A2S_CNTL_CL0__REQRSPPASSPW_VC0_MAP_MASK 0x000000C0L
+#define GDC1_A2S_CNTL_CL0__REQRSPPASSPW_NVC0_MAP_MASK 0x00000300L
+#define GDC1_A2S_CNTL_CL0__BLKLVL_MAP_MASK 0x00000C00L
+#define GDC1_A2S_CNTL_CL0__DATERR_MAP_MASK 0x00003000L
+#define GDC1_A2S_CNTL_CL0__EXOKAY_WR_MAP_MASK 0x0000C000L
+#define GDC1_A2S_CNTL_CL0__EXOKAY_RD_MAP_MASK 0x00030000L
+#define GDC1_A2S_CNTL_CL0__RESP_WR_MAP_MASK 0x000C0000L
+#define GDC1_A2S_CNTL_CL0__RESP_RD_MAP_MASK 0x00300000L
+#define GDC1_A2S_CNTL_CL0__RDRSP_ERRMAP_MASK 0x00C00000L
+#define GDC1_A2S_CNTL_CL0__RDRSP_SEL_MODE_MASK 0x07000000L
+#define GDC1_A2S_CNTL_CL0__STATIC_VC_ENABLE_MASK 0x08000000L
+#define GDC1_A2S_CNTL_CL0__STATIC_VC_VALUE_MASK 0x70000000L
+//GDC1_A2S_CNTL_CL1
+#define GDC1_A2S_CNTL_CL1__NSNOOP_MAP__SHIFT 0x0
+#define GDC1_A2S_CNTL_CL1__REQPASSPW_VC0_MAP__SHIFT 0x2
+#define GDC1_A2S_CNTL_CL1__REQPASSPW_NVC0_MAP__SHIFT 0x4
+#define GDC1_A2S_CNTL_CL1__REQRSPPASSPW_VC0_MAP__SHIFT 0x6
+#define GDC1_A2S_CNTL_CL1__REQRSPPASSPW_NVC0_MAP__SHIFT 0x8
+#define GDC1_A2S_CNTL_CL1__BLKLVL_MAP__SHIFT 0xa
+#define GDC1_A2S_CNTL_CL1__DATERR_MAP__SHIFT 0xc
+#define GDC1_A2S_CNTL_CL1__EXOKAY_WR_MAP__SHIFT 0xe
+#define GDC1_A2S_CNTL_CL1__EXOKAY_RD_MAP__SHIFT 0x10
+#define GDC1_A2S_CNTL_CL1__RESP_WR_MAP__SHIFT 0x12
+#define GDC1_A2S_CNTL_CL1__RESP_RD_MAP__SHIFT 0x14
+#define GDC1_A2S_CNTL_CL1__RDRSP_ERRMAP__SHIFT 0x16
+#define GDC1_A2S_CNTL_CL1__RDRSP_SEL_MODE__SHIFT 0x18
+#define GDC1_A2S_CNTL_CL1__STATIC_VC_ENABLE__SHIFT 0x1b
+#define GDC1_A2S_CNTL_CL1__STATIC_VC_VALUE__SHIFT 0x1c
+#define GDC1_A2S_CNTL_CL1__NSNOOP_MAP_MASK 0x00000003L
+#define GDC1_A2S_CNTL_CL1__REQPASSPW_VC0_MAP_MASK 0x0000000CL
+#define GDC1_A2S_CNTL_CL1__REQPASSPW_NVC0_MAP_MASK 0x00000030L
+#define GDC1_A2S_CNTL_CL1__REQRSPPASSPW_VC0_MAP_MASK 0x000000C0L
+#define GDC1_A2S_CNTL_CL1__REQRSPPASSPW_NVC0_MAP_MASK 0x00000300L
+#define GDC1_A2S_CNTL_CL1__BLKLVL_MAP_MASK 0x00000C00L
+#define GDC1_A2S_CNTL_CL1__DATERR_MAP_MASK 0x00003000L
+#define GDC1_A2S_CNTL_CL1__EXOKAY_WR_MAP_MASK 0x0000C000L
+#define GDC1_A2S_CNTL_CL1__EXOKAY_RD_MAP_MASK 0x00030000L
+#define GDC1_A2S_CNTL_CL1__RESP_WR_MAP_MASK 0x000C0000L
+#define GDC1_A2S_CNTL_CL1__RESP_RD_MAP_MASK 0x00300000L
+#define GDC1_A2S_CNTL_CL1__RDRSP_ERRMAP_MASK 0x00C00000L
+#define GDC1_A2S_CNTL_CL1__RDRSP_SEL_MODE_MASK 0x07000000L
+#define GDC1_A2S_CNTL_CL1__STATIC_VC_ENABLE_MASK 0x08000000L
+#define GDC1_A2S_CNTL_CL1__STATIC_VC_VALUE_MASK 0x70000000L
+//GDC1_A2S_CNTL3_CL0
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_PH__SHIFT 0x0
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_STEERING__SHIFT 0x2
+#define GDC1_A2S_CNTL3_CL0__WR_ST_TAG_MODE__SHIFT 0x3
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_ST_ENTRY__SHIFT 0x4
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_PH_MASK 0x00000003L
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_STEERING_MASK 0x00000004L
+#define GDC1_A2S_CNTL3_CL0__WR_ST_TAG_MODE_MASK 0x00000008L
+#define GDC1_A2S_CNTL3_CL0__FORCE_WR_ST_ENTRY_MASK 0x000003F0L
+//GDC1_A2S_CNTL3_CL1
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_PH__SHIFT 0x0
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_STEERING__SHIFT 0x2
+#define GDC1_A2S_CNTL3_CL1__WR_ST_TAG_MODE__SHIFT 0x3
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_ST_ENTRY__SHIFT 0x4
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_PH_MASK 0x00000003L
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_STEERING_MASK 0x00000004L
+#define GDC1_A2S_CNTL3_CL1__WR_ST_TAG_MODE_MASK 0x00000008L
+#define GDC1_A2S_CNTL3_CL1__FORCE_WR_ST_ENTRY_MASK 0x000003F0L
+//GDC1_A2S_CNTL_SW0
+#define GDC1_A2S_CNTL_SW0__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC1_A2S_CNTL_SW0__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC1_A2S_CNTL_SW0__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC1_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC1_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC1_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC1_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC1_A2S_CNTL_SW0__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC1_A2S_CNTL_SW0__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC1_A2S_CNTL_SW0__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC1_A2S_CNTL_SW0__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC1_A2S_CNTL_SW0__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC1_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC1_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC1_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC1_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC1_A2S_CNTL_SW0__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC1_A2S_CNTL_SW0__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC1_A2S_CNTL_SW1
+#define GDC1_A2S_CNTL_SW1__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC1_A2S_CNTL_SW1__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC1_A2S_CNTL_SW1__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC1_A2S_CNTL_SW1__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC1_A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC1_A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC1_A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC1_A2S_CNTL_SW1__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC1_A2S_CNTL_SW1__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC1_A2S_CNTL_SW1__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC1_A2S_CNTL_SW1__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC1_A2S_CNTL_SW1__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC1_A2S_CNTL_SW1__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC1_A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC1_A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC1_A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC1_A2S_CNTL_SW1__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC1_A2S_CNTL_SW1__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC1_A2S_CNTL_SW2
+#define GDC1_A2S_CNTL_SW2__STATIC_VC_ENABLE__SHIFT 0x0
+#define GDC1_A2S_CNTL_SW2__STATIC_VC_VALUE__SHIFT 0x1
+#define GDC1_A2S_CNTL_SW2__FORCE_RSP_REORDER_EN__SHIFT 0x6
+#define GDC1_A2S_CNTL_SW2__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define GDC1_A2S_CNTL_SW2__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define GDC1_A2S_CNTL_SW2__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define GDC1_A2S_CNTL_SW2__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define GDC1_A2S_CNTL_SW2__WRR_RD_WEIGHT__SHIFT 0x10
+#define GDC1_A2S_CNTL_SW2__WRR_WR_WEIGHT__SHIFT 0x18
+#define GDC1_A2S_CNTL_SW2__STATIC_VC_ENABLE_MASK 0x00000001L
+#define GDC1_A2S_CNTL_SW2__STATIC_VC_VALUE_MASK 0x0000000EL
+#define GDC1_A2S_CNTL_SW2__FORCE_RSP_REORDER_EN_MASK 0x00000040L
+#define GDC1_A2S_CNTL_SW2__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define GDC1_A2S_CNTL_SW2__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define GDC1_A2S_CNTL_SW2__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define GDC1_A2S_CNTL_SW2__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define GDC1_A2S_CNTL_SW2__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define GDC1_A2S_CNTL_SW2__WRR_WR_WEIGHT_MASK 0xFF000000L
+//GDC1_A2S_TAG_ALLOC_0
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR__SHIFT 0x0
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD__SHIFT 0x8
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR__SHIFT 0x10
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR_MASK 0x000000FFL
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD_MASK 0x0000FF00L
+#define GDC1_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR_MASK 0x00FF0000L
+//GDC1_A2S_TAG_ALLOC_1
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR__SHIFT 0x0
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR__SHIFT 0x10
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD__SHIFT 0x18
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR_MASK 0x000000FFL
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR_MASK 0x00FF0000L
+#define GDC1_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD_MASK 0xFF000000L
+//GDC1_A2S_MISC_CNTL
+#define GDC1_A2S_MISC_CNTL__BLKLVL_FOR_MSG__SHIFT 0x0
+#define GDC1_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS__SHIFT 0x2
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE__SHIFT 0x3
+#define GDC1_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN__SHIFT 0x4
+#define GDC1_A2S_MISC_CNTL__RSP_REORDER_DIS__SHIFT 0x5
+#define GDC1_A2S_MISC_CNTL__WRRSP_ACCUM_SEL__SHIFT 0x6
+#define GDC1_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x7
+#define GDC1_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x8
+#define GDC1_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY__SHIFT 0x9
+#define GDC1_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0xa
+#define GDC1_A2S_MISC_CNTL__WR_TAG_SET_MIN__SHIFT 0x10
+#define GDC1_A2S_MISC_CNTL__RD_TAG_SET_MIN__SHIFT 0x15
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_MODE__SHIFT 0x1a
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE__SHIFT 0x1b
+#define GDC1_A2S_MISC_CNTL__BLKLVL_FOR_MSG_MASK 0x00000003L
+#define GDC1_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS_MASK 0x00000004L
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE_MASK 0x00000008L
+#define GDC1_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN_MASK 0x00000010L
+#define GDC1_A2S_MISC_CNTL__RSP_REORDER_DIS_MASK 0x00000020L
+#define GDC1_A2S_MISC_CNTL__WRRSP_ACCUM_SEL_MASK 0x00000040L
+#define GDC1_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000080L
+#define GDC1_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000100L
+#define GDC1_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY_MASK 0x00000200L
+#define GDC1_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000400L
+#define GDC1_A2S_MISC_CNTL__WR_TAG_SET_MIN_MASK 0x001F0000L
+#define GDC1_A2S_MISC_CNTL__RD_TAG_SET_MIN_MASK 0x03E00000L
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_MODE_MASK 0x04000000L
+#define GDC1_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE_MASK 0x08000000L
+//GDC1_SHUB_REGS_IF_CTL
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS__SHIFT 0x1
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS_MASK 0x00000002L
+//GDC1_NGDC_MGCG_CTRL
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
+#define GDC1_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN__SHIFT 0xe
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN_MASK 0x00004000L
+//GDC1_NGDC_RESERVED_0
+#define GDC1_NGDC_RESERVED_0__RESERVED__SHIFT 0x0
+#define GDC1_NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL
+//GDC1_NGDC_RESERVED_1
+#define GDC1_NGDC_RESERVED_1__RESERVED__SHIFT 0x0
+#define GDC1_NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL
+//GDC1_NBIF_GFX_DOORBELL_STATUS
+#define GDC1_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT__SHIFT 0x0
+#define GDC1_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT_MASK 0x0000FFFFL
+//GDC1_ATDMA_MISC_CNTL
+#define GDC1_ATDMA_MISC_CNTL__WRR_ARB_MODE__SHIFT 0x0
+#define GDC1_ATDMA_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0x1
+#define GDC1_ATDMA_MISC_CNTL__RDRSP_ARB_MODE__SHIFT 0x2
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC6_WEIGHT__SHIFT 0x8
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC0_WEIGHT__SHIFT 0x10
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC1_WEIGHT__SHIFT 0x18
+#define GDC1_ATDMA_MISC_CNTL__WRR_ARB_MODE_MASK 0x00000001L
+#define GDC1_ATDMA_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000002L
+#define GDC1_ATDMA_MISC_CNTL__RDRSP_ARB_MODE_MASK 0x0000000CL
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC6_WEIGHT_MASK 0x0000FF00L
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC0_WEIGHT_MASK 0x00FF0000L
+#define GDC1_ATDMA_MISC_CNTL__WRR_VC1_WEIGHT_MASK 0xFF000000L
+//GDC1_S2A_MISC_CNTL
+#define GDC1_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
+#define GDC1_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
+#define GDC1_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
+#define GDC1_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
+#define GDC1_S2A_MISC_CNTL__HDP_PERF_ENH_DIS__SHIFT 0xf
+#define GDC1_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
+#define GDC1_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
+#define GDC1_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
+#define GDC1_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
+#define GDC1_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
+#define GDC1_S2A_MISC_CNTL__HDP_PERF_ENH_DIS_MASK 0x00008000L
+#define GDC1_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
+//GDC1_NGDC_EARLY_WAKEUP_CTRL
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_ACTIVE__SHIFT 0x0
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_DS_EXIT__SHIFT 0x1
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_ALLOW_AER_ACTIVE__SHIFT 0x2
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_ACTIVE_MASK 0x00000001L
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_DS_EXIT_MASK 0x00000002L
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_ALLOW_AER_ACTIVE_MASK 0x00000004L
+//GDC1_NGDC_PG_MISC_CTRL
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1__SHIFT 0xd
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2__SHIFT 0x10
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1_MASK 0x00002000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2_MASK 0x00010000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//GDC1_NGDC_PGMST_CTRL
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN__SHIFT 0x8
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN_MASK 0x00000100L
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//GDC1_NGDC_PGSLV_CTRL
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS__SHIFT 0xa
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS_MASK 0x00007C00L
+
+
+// addressBlock: aid_nbio_nbif0_gdc_sec_GDCSEC_DEC
+//XCC_DOORBELL_FENCE
+#define XCC_DOORBELL_FENCE__XCC_0_DOORBELL_FENCE__SHIFT 0x0
+#define XCC_DOORBELL_FENCE__XCC_1_DOORBELL_FENCE__SHIFT 0x1
+#define XCC_DOORBELL_FENCE__XCC_2_DOORBELL_FENCE__SHIFT 0x2
+#define XCC_DOORBELL_FENCE__XCC_3_DOORBELL_FENCE__SHIFT 0x3
+#define XCC_DOORBELL_FENCE__XCC_4_DOORBELL_FENCE__SHIFT 0x4
+#define XCC_DOORBELL_FENCE__XCC_5_DOORBELL_FENCE__SHIFT 0x5
+#define XCC_DOORBELL_FENCE__XCC_6_DOORBELL_FENCE__SHIFT 0x6
+#define XCC_DOORBELL_FENCE__XCC_7_DOORBELL_FENCE__SHIFT 0x7
+#define XCC_DOORBELL_FENCE__SHUB_SLV_MODE__SHIFT 0x10
+#define XCC_DOORBELL_FENCE__RMOTE_CP_SENT__SHIFT 0x11
+#define XCC_DOORBELL_FENCE__CP_0_SENT__SHIFT 0x12
+#define XCC_DOORBELL_FENCE__CP_1_SENT__SHIFT 0x13
+#define XCC_DOORBELL_FENCE__CP_2_SENT__SHIFT 0x14
+#define XCC_DOORBELL_FENCE__CP_3_SENT__SHIFT 0x15
+#define XCC_DOORBELL_FENCE__CP_4_SENT__SHIFT 0x16
+#define XCC_DOORBELL_FENCE__CP_5_SENT__SHIFT 0x17
+#define XCC_DOORBELL_FENCE__CP_6_SENT__SHIFT 0x18
+#define XCC_DOORBELL_FENCE__CP_7_SENT__SHIFT 0x19
+#define XCC_DOORBELL_FENCE__REMOTE_CLIENT_SENT__SHIFT 0x1a
+#define XCC_DOORBELL_FENCE__REMOTE_CLIENT_CLR_PENDING__SHIFT 0x1b
+#define XCC_DOORBELL_FENCE__XCC_0_DOORBELL_FENCE_MASK 0x00000001L
+#define XCC_DOORBELL_FENCE__XCC_1_DOORBELL_FENCE_MASK 0x00000002L
+#define XCC_DOORBELL_FENCE__XCC_2_DOORBELL_FENCE_MASK 0x00000004L
+#define XCC_DOORBELL_FENCE__XCC_3_DOORBELL_FENCE_MASK 0x00000008L
+#define XCC_DOORBELL_FENCE__XCC_4_DOORBELL_FENCE_MASK 0x00000010L
+#define XCC_DOORBELL_FENCE__XCC_5_DOORBELL_FENCE_MASK 0x00000020L
+#define XCC_DOORBELL_FENCE__XCC_6_DOORBELL_FENCE_MASK 0x00000040L
+#define XCC_DOORBELL_FENCE__XCC_7_DOORBELL_FENCE_MASK 0x00000080L
+#define XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK 0x00010000L
+#define XCC_DOORBELL_FENCE__RMOTE_CP_SENT_MASK 0x00020000L
+#define XCC_DOORBELL_FENCE__CP_0_SENT_MASK 0x00040000L
+#define XCC_DOORBELL_FENCE__CP_1_SENT_MASK 0x00080000L
+#define XCC_DOORBELL_FENCE__CP_2_SENT_MASK 0x00100000L
+#define XCC_DOORBELL_FENCE__CP_3_SENT_MASK 0x00200000L
+#define XCC_DOORBELL_FENCE__CP_4_SENT_MASK 0x00400000L
+#define XCC_DOORBELL_FENCE__CP_5_SENT_MASK 0x00800000L
+#define XCC_DOORBELL_FENCE__CP_6_SENT_MASK 0x01000000L
+#define XCC_DOORBELL_FENCE__CP_7_SENT_MASK 0x02000000L
+#define XCC_DOORBELL_FENCE__REMOTE_CLIENT_SENT_MASK 0x04000000L
+#define XCC_DOORBELL_FENCE__REMOTE_CLIENT_CLR_PENDING_MASK 0x08000000L
+
+
+// addressBlock: aid_nbio_nbif0_gdc_rst_GDCRST_DEC
+//SHUB_PF_FLR_RST
+#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
+#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
+#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
+#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
+//SHUB_GFX_DRV_VPU_RST
+#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST__SHIFT 0x0
+#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST_MASK 0x00000001L
+//SHUB_LINK_RESET
+#define SHUB_LINK_RESET__LINK_P0_RESET__SHIFT 0x0
+#define SHUB_LINK_RESET__LINK_P1_RESET__SHIFT 0x1
+#define SHUB_LINK_RESET__LINK_P2_RESET__SHIFT 0x2
+#define SHUB_LINK_RESET__LINK_P3_RESET__SHIFT 0x3
+#define SHUB_LINK_RESET__LINK_P0_RESET_MASK 0x00000001L
+#define SHUB_LINK_RESET__LINK_P1_RESET_MASK 0x00000002L
+#define SHUB_LINK_RESET__LINK_P2_RESET_MASK 0x00000004L
+#define SHUB_LINK_RESET__LINK_P3_RESET_MASK 0x00000008L
+//SHUB_HARD_RST_CTRL
+#define SHUB_HARD_RST_CTRL__COR_RESET_EN__SHIFT 0x0
+#define SHUB_HARD_RST_CTRL__REG_RESET_EN__SHIFT 0x1
+#define SHUB_HARD_RST_CTRL__STY_RESET_EN__SHIFT 0x2
+#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
+#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
+#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
+#define SHUB_HARD_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
+#define SHUB_HARD_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
+#define SHUB_HARD_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
+#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
+#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
+#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
+//SHUB_SOFT_RST_CTRL
+#define SHUB_SOFT_RST_CTRL__COR_RESET_EN__SHIFT 0x0
+#define SHUB_SOFT_RST_CTRL__REG_RESET_EN__SHIFT 0x1
+#define SHUB_SOFT_RST_CTRL__STY_RESET_EN__SHIFT 0x2
+#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
+#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
+#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
+#define SHUB_SOFT_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
+#define SHUB_SOFT_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
+#define SHUB_SOFT_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
+#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
+#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
+#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
+//SHUB_SDP_PORT_RST
+#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST__SHIFT 0x0
+#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST__SHIFT 0x1
+#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST__SHIFT 0x2
+#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST__SHIFT 0x3
+#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST__SHIFT 0x4
+#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST__SHIFT 0x6
+#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST__SHIFT 0x8
+#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST__SHIFT 0x9
+#define SHUB_SDP_PORT_RST__MPDMATF_DMA_SDP_PORT_RST__SHIFT 0xa
+#define SHUB_SDP_PORT_RST__MPDMAPM_DMA_SDP_PORT_RST__SHIFT 0xb
+#define SHUB_SDP_PORT_RST__MPDMATF_HST_SDP_PORT_RST__SHIFT 0xc
+#define SHUB_SDP_PORT_RST__SHUB_CNDI_HST_SDP_PORT_RST__SHIFT 0xd
+#define SHUB_SDP_PORT_RST__SION_AON_RST__SHIFT 0x18
+#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST_MASK 0x00000001L
+#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST_MASK 0x00000002L
+#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST_MASK 0x00000004L
+#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST_MASK 0x00000008L
+#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST_MASK 0x00000010L
+#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST_MASK 0x00000040L
+#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST_MASK 0x00000100L
+#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST_MASK 0x00000200L
+#define SHUB_SDP_PORT_RST__MPDMATF_DMA_SDP_PORT_RST_MASK 0x00000400L
+#define SHUB_SDP_PORT_RST__MPDMAPM_DMA_SDP_PORT_RST_MASK 0x00000800L
+#define SHUB_SDP_PORT_RST__MPDMATF_HST_SDP_PORT_RST_MASK 0x00001000L
+#define SHUB_SDP_PORT_RST__SHUB_CNDI_HST_SDP_PORT_RST_MASK 0x00002000L
+#define SHUB_SDP_PORT_RST__SION_AON_RST_MASK 0x01000000L
+
+
+// addressBlock: aid_nbio_nbif0_syshub_mmreg_syshubdirect
+//HST_CLK0_SW0_CL0_CNTL
+#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//HST_CLK0_SW1_CL0_CNTL
+#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//HST_CLK0_SW1_CL1_CNTL
+#define HST_CLK0_SW1_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW1_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW1_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW1_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//HST_CLK0_SW1_CL2_CNTL
+#define HST_CLK0_SW1_CL2_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW1_CL2_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW1_CL2_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW1_CL2_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//DMA_CLK0_SW0_CL0_CNTL
+#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8
+#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9
+#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT__SHIFT 0x10
+#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18
+#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L
+#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L
+#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L
+#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L
+//DMA_CLK0_SW0_CL1_CNTL
+#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8
+#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9
+#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT__SHIFT 0x10
+#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18
+#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L
+#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L
+#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L
+#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L
+//NIC400_1_ASIB_0_FN_MOD
+#define NIC400_1_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_1_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_1_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_1_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+//NIC400_1_IB_0_FN_MOD
+#define NIC400_1_IB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_1_IB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_1_IB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_1_IB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+//NIC400_2_ASIB_0_FN_MOD
+#define NIC400_2_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_2_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_2_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_2_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+//NIC400_2_ASIB_0_QOS_CNTL
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_rate__SHIFT 0x0
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_rate__SHIFT 0x1
+#define NIC400_2_ASIB_0_QOS_CNTL__en_awar_rate__SHIFT 0x2
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_fc__SHIFT 0x3
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_fc__SHIFT 0x4
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_ot__SHIFT 0x5
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_ot__SHIFT 0x6
+#define NIC400_2_ASIB_0_QOS_CNTL__en_awar_ot__SHIFT 0x7
+#define NIC400_2_ASIB_0_QOS_CNTL__mode_aw_fc__SHIFT 0x10
+#define NIC400_2_ASIB_0_QOS_CNTL__mode_ar_fc__SHIFT 0x14
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_rate_MASK 0x00000001L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_rate_MASK 0x00000002L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_awar_rate_MASK 0x00000004L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_fc_MASK 0x00000008L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_fc_MASK 0x00000010L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_aw_ot_MASK 0x00000020L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_ar_ot_MASK 0x00000040L
+#define NIC400_2_ASIB_0_QOS_CNTL__en_awar_ot_MASK 0x00000080L
+#define NIC400_2_ASIB_0_QOS_CNTL__mode_aw_fc_MASK 0x00010000L
+#define NIC400_2_ASIB_0_QOS_CNTL__mode_ar_fc_MASK 0x00100000L
+//NIC400_2_ASIB_0_MAX_OT
+#define NIC400_2_ASIB_0_MAX_OT__aw_max_otf__SHIFT 0x0
+#define NIC400_2_ASIB_0_MAX_OT__aw_max_oti__SHIFT 0x8
+#define NIC400_2_ASIB_0_MAX_OT__ar_max_otf__SHIFT 0x10
+#define NIC400_2_ASIB_0_MAX_OT__ar_max_oti__SHIFT 0x18
+#define NIC400_2_ASIB_0_MAX_OT__aw_max_otf_MASK 0x000000FFL
+#define NIC400_2_ASIB_0_MAX_OT__aw_max_oti_MASK 0x00003F00L
+#define NIC400_2_ASIB_0_MAX_OT__ar_max_otf_MASK 0x00FF0000L
+#define NIC400_2_ASIB_0_MAX_OT__ar_max_oti_MASK 0x3F000000L
+//NIC400_2_ASIB_0_MAX_COMB_OT
+#define NIC400_2_ASIB_0_MAX_COMB_OT__awar_max_otf__SHIFT 0x0
+#define NIC400_2_ASIB_0_MAX_COMB_OT__awar_max_oti__SHIFT 0x8
+#define NIC400_2_ASIB_0_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL
+#define NIC400_2_ASIB_0_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L
+//NIC400_2_ASIB_0_AW_P
+#define NIC400_2_ASIB_0_AW_P__aw_p__SHIFT 0x18
+#define NIC400_2_ASIB_0_AW_P__aw_p_MASK 0xFF000000L
+//NIC400_2_ASIB_0_AW_B
+#define NIC400_2_ASIB_0_AW_B__aw_b__SHIFT 0x0
+#define NIC400_2_ASIB_0_AW_B__aw_b_MASK 0x0000FFFFL
+//NIC400_2_ASIB_0_AW_R
+#define NIC400_2_ASIB_0_AW_R__aw_r__SHIFT 0x14
+#define NIC400_2_ASIB_0_AW_R__aw_r_MASK 0xFFF00000L
+//NIC400_2_ASIB_0_AR_P
+#define NIC400_2_ASIB_0_AR_P__ar_p__SHIFT 0x18
+#define NIC400_2_ASIB_0_AR_P__ar_p_MASK 0xFF000000L
+//NIC400_2_ASIB_0_AR_B
+#define NIC400_2_ASIB_0_AR_B__ar_b__SHIFT 0x0
+#define NIC400_2_ASIB_0_AR_B__ar_b_MASK 0x0000FFFFL
+//NIC400_2_ASIB_0_AR_R
+#define NIC400_2_ASIB_0_AR_R__ar_r__SHIFT 0x14
+#define NIC400_2_ASIB_0_AR_R__ar_r_MASK 0xFFF00000L
+//NIC400_2_ASIB_0_TARGET_FC
+#define NIC400_2_ASIB_0_TARGET_FC__aw_tgt_latency__SHIFT 0x0
+#define NIC400_2_ASIB_0_TARGET_FC__ar_tgt_latency__SHIFT 0x10
+#define NIC400_2_ASIB_0_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL
+#define NIC400_2_ASIB_0_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L
+//NIC400_2_ASIB_0_KI_FC
+#define NIC400_2_ASIB_0_KI_FC__aw_tgt_latency__SHIFT 0x0
+#define NIC400_2_ASIB_0_KI_FC__ar_tgt_latency__SHIFT 0x8
+#define NIC400_2_ASIB_0_KI_FC__aw_tgt_latency_MASK 0x00000007L
+#define NIC400_2_ASIB_0_KI_FC__ar_tgt_latency_MASK 0x00000700L
+//NIC400_2_ASIB_0_QOS_RANGE
+#define NIC400_2_ASIB_0_QOS_RANGE__aw_min_qos__SHIFT 0x0
+#define NIC400_2_ASIB_0_QOS_RANGE__aw_max_qos__SHIFT 0x8
+#define NIC400_2_ASIB_0_QOS_RANGE__ar_min_qos__SHIFT 0x10
+#define NIC400_2_ASIB_0_QOS_RANGE__ar_max_qos__SHIFT 0x18
+#define NIC400_2_ASIB_0_QOS_RANGE__aw_min_qos_MASK 0x0000000FL
+#define NIC400_2_ASIB_0_QOS_RANGE__aw_max_qos_MASK 0x00000F00L
+#define NIC400_2_ASIB_0_QOS_RANGE__ar_min_qos_MASK 0x000F0000L
+#define NIC400_2_ASIB_0_QOS_RANGE__ar_max_qos_MASK 0x0F000000L
+//NIC400_2_ASIB_1_FN_MOD
+#define NIC400_2_ASIB_1_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_2_ASIB_1_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_2_ASIB_1_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_2_ASIB_1_FN_MOD__write_iss_override_MASK 0x00000002L
+//NIC400_2_ASIB_1_QOS_CNTL
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_rate__SHIFT 0x0
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_rate__SHIFT 0x1
+#define NIC400_2_ASIB_1_QOS_CNTL__en_awar_rate__SHIFT 0x2
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_fc__SHIFT 0x3
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_fc__SHIFT 0x4
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_ot__SHIFT 0x5
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_ot__SHIFT 0x6
+#define NIC400_2_ASIB_1_QOS_CNTL__en_awar_ot__SHIFT 0x7
+#define NIC400_2_ASIB_1_QOS_CNTL__mode_aw_fc__SHIFT 0x10
+#define NIC400_2_ASIB_1_QOS_CNTL__mode_ar_fc__SHIFT 0x14
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_rate_MASK 0x00000001L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_rate_MASK 0x00000002L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_awar_rate_MASK 0x00000004L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_fc_MASK 0x00000008L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_fc_MASK 0x00000010L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_aw_ot_MASK 0x00000020L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_ar_ot_MASK 0x00000040L
+#define NIC400_2_ASIB_1_QOS_CNTL__en_awar_ot_MASK 0x00000080L
+#define NIC400_2_ASIB_1_QOS_CNTL__mode_aw_fc_MASK 0x00010000L
+#define NIC400_2_ASIB_1_QOS_CNTL__mode_ar_fc_MASK 0x00100000L
+//NIC400_2_ASIB_1_MAX_OT
+#define NIC400_2_ASIB_1_MAX_OT__aw_max_otf__SHIFT 0x0
+#define NIC400_2_ASIB_1_MAX_OT__aw_max_oti__SHIFT 0x8
+#define NIC400_2_ASIB_1_MAX_OT__ar_max_otf__SHIFT 0x10
+#define NIC400_2_ASIB_1_MAX_OT__ar_max_oti__SHIFT 0x18
+#define NIC400_2_ASIB_1_MAX_OT__aw_max_otf_MASK 0x000000FFL
+#define NIC400_2_ASIB_1_MAX_OT__aw_max_oti_MASK 0x00003F00L
+#define NIC400_2_ASIB_1_MAX_OT__ar_max_otf_MASK 0x00FF0000L
+#define NIC400_2_ASIB_1_MAX_OT__ar_max_oti_MASK 0x3F000000L
+//NIC400_2_ASIB_1_MAX_COMB_OT
+#define NIC400_2_ASIB_1_MAX_COMB_OT__awar_max_otf__SHIFT 0x0
+#define NIC400_2_ASIB_1_MAX_COMB_OT__awar_max_oti__SHIFT 0x8
+#define NIC400_2_ASIB_1_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL
+#define NIC400_2_ASIB_1_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L
+//NIC400_2_ASIB_1_AW_P
+#define NIC400_2_ASIB_1_AW_P__aw_p__SHIFT 0x18
+#define NIC400_2_ASIB_1_AW_P__aw_p_MASK 0xFF000000L
+//NIC400_2_ASIB_1_AW_B
+#define NIC400_2_ASIB_1_AW_B__aw_b__SHIFT 0x0
+#define NIC400_2_ASIB_1_AW_B__aw_b_MASK 0x0000FFFFL
+//NIC400_2_ASIB_1_AW_R
+#define NIC400_2_ASIB_1_AW_R__aw_r__SHIFT 0x14
+#define NIC400_2_ASIB_1_AW_R__aw_r_MASK 0xFFF00000L
+//NIC400_2_ASIB_1_AR_P
+#define NIC400_2_ASIB_1_AR_P__ar_p__SHIFT 0x18
+#define NIC400_2_ASIB_1_AR_P__ar_p_MASK 0xFF000000L
+//NIC400_2_ASIB_1_AR_B
+#define NIC400_2_ASIB_1_AR_B__ar_b__SHIFT 0x0
+#define NIC400_2_ASIB_1_AR_B__ar_b_MASK 0x0000FFFFL
+//NIC400_2_ASIB_1_AR_R
+#define NIC400_2_ASIB_1_AR_R__ar_r__SHIFT 0x14
+#define NIC400_2_ASIB_1_AR_R__ar_r_MASK 0xFFF00000L
+//NIC400_2_ASIB_1_TARGET_FC
+#define NIC400_2_ASIB_1_TARGET_FC__aw_tgt_latency__SHIFT 0x0
+#define NIC400_2_ASIB_1_TARGET_FC__ar_tgt_latency__SHIFT 0x10
+#define NIC400_2_ASIB_1_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL
+#define NIC400_2_ASIB_1_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L
+//NIC400_2_ASIB_1_KI_FC
+#define NIC400_2_ASIB_1_KI_FC__aw_tgt_latency__SHIFT 0x0
+#define NIC400_2_ASIB_1_KI_FC__ar_tgt_latency__SHIFT 0x8
+#define NIC400_2_ASIB_1_KI_FC__aw_tgt_latency_MASK 0x00000007L
+#define NIC400_2_ASIB_1_KI_FC__ar_tgt_latency_MASK 0x00000700L
+//NIC400_2_ASIB_1_QOS_RANGE
+#define NIC400_2_ASIB_1_QOS_RANGE__aw_min_qos__SHIFT 0x0
+#define NIC400_2_ASIB_1_QOS_RANGE__aw_max_qos__SHIFT 0x8
+#define NIC400_2_ASIB_1_QOS_RANGE__ar_min_qos__SHIFT 0x10
+#define NIC400_2_ASIB_1_QOS_RANGE__ar_max_qos__SHIFT 0x18
+#define NIC400_2_ASIB_1_QOS_RANGE__aw_min_qos_MASK 0x0000000FL
+#define NIC400_2_ASIB_1_QOS_RANGE__aw_max_qos_MASK 0x00000F00L
+#define NIC400_2_ASIB_1_QOS_RANGE__ar_min_qos_MASK 0x000F0000L
+#define NIC400_2_ASIB_1_QOS_RANGE__ar_max_qos_MASK 0x0F000000L
+//NIC400_2_IB_0_FN_MOD
+#define NIC400_2_IB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_2_IB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_2_IB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_2_IB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_iohub_nb_nbcfg_nb_cfgdec
+//NB_NBCFG0_NBCFG_SCRATCH_4
+#define NB_NBCFG0_NBCFG_SCRATCH_4__NBCFG_SCRATCH_4__SHIFT 0x0
+#define NB_NBCFG0_NBCFG_SCRATCH_4__NBCFG_SCRATCH_4_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_misc_misc_cfgdec
+//NB_CNTL
+#define NB_CNTL__HWINIT_WR_LOCK__SHIFT 0x7
+#define NB_CNTL__HWINIT_WR_LOCK_MASK 0x00000080L
+//NB_SPARE1
+#define NB_SPARE1__NB_SPARE1_RW__SHIFT 0x0
+#define NB_SPARE1__NB_SPARE1_RW_MASK 0xFFFFFFFFL
+//NB_SPARE2
+#define NB_SPARE2__NB_SPARE2_RW1C_0__SHIFT 0x0
+#define NB_SPARE2__NB_SPARE2_RW1C_1__SHIFT 0x1
+#define NB_SPARE2__NB_SPARE2_RW1C_2__SHIFT 0x2
+#define NB_SPARE2__NB_SPARE2_RW1C_3__SHIFT 0x3
+#define NB_SPARE2__NB_SPARE2_RW1C_4__SHIFT 0x4
+#define NB_SPARE2__NB_SPARE2_RW1C_5__SHIFT 0x5
+#define NB_SPARE2__NB_SPARE2_RW1C_6__SHIFT 0x6
+#define NB_SPARE2__NB_SPARE2_RW1C_7__SHIFT 0x7
+#define NB_SPARE2__NB_SPARE2_RW1C_8__SHIFT 0x8
+#define NB_SPARE2__NB_SPARE2_RW1C_9__SHIFT 0x9
+#define NB_SPARE2__NB_SPARE2_RW1C_10__SHIFT 0xa
+#define NB_SPARE2__NB_SPARE2_RW1C_11__SHIFT 0xb
+#define NB_SPARE2__NB_SPARE2_RW1C_12__SHIFT 0xc
+#define NB_SPARE2__NB_SPARE2_RW1C_13__SHIFT 0xd
+#define NB_SPARE2__NB_SPARE2_RW1C_14__SHIFT 0xe
+#define NB_SPARE2__NB_SPARE2_RW1C_15__SHIFT 0xf
+#define NB_SPARE2__NB_SPARE2_RW1C_16__SHIFT 0x10
+#define NB_SPARE2__NB_SPARE2_RW1C_17__SHIFT 0x11
+#define NB_SPARE2__NB_SPARE2_RW1C_18__SHIFT 0x12
+#define NB_SPARE2__NB_SPARE2_RW1C_19__SHIFT 0x13
+#define NB_SPARE2__NB_SPARE2_RW1C_20__SHIFT 0x14
+#define NB_SPARE2__NB_SPARE2_RW1C_21__SHIFT 0x15
+#define NB_SPARE2__NB_SPARE2_RW1C_22__SHIFT 0x16
+#define NB_SPARE2__NB_SPARE2_RW1C_23__SHIFT 0x17
+#define NB_SPARE2__NB_SPARE2_RW1C_24__SHIFT 0x18
+#define NB_SPARE2__NB_SPARE2_RW1C_25__SHIFT 0x19
+#define NB_SPARE2__NB_SPARE2_RW1C_26__SHIFT 0x1a
+#define NB_SPARE2__NB_SPARE2_RW1C_27__SHIFT 0x1b
+#define NB_SPARE2__NB_SPARE2_RW1C_28__SHIFT 0x1c
+#define NB_SPARE2__NB_SPARE2_RW1C_29__SHIFT 0x1d
+#define NB_SPARE2__NB_SPARE2_RW1C_30__SHIFT 0x1e
+#define NB_SPARE2__NB_SPARE2_RW1C_31__SHIFT 0x1f
+#define NB_SPARE2__NB_SPARE2_RW1C_0_MASK 0x00000001L
+#define NB_SPARE2__NB_SPARE2_RW1C_1_MASK 0x00000002L
+#define NB_SPARE2__NB_SPARE2_RW1C_2_MASK 0x00000004L
+#define NB_SPARE2__NB_SPARE2_RW1C_3_MASK 0x00000008L
+#define NB_SPARE2__NB_SPARE2_RW1C_4_MASK 0x00000010L
+#define NB_SPARE2__NB_SPARE2_RW1C_5_MASK 0x00000020L
+#define NB_SPARE2__NB_SPARE2_RW1C_6_MASK 0x00000040L
+#define NB_SPARE2__NB_SPARE2_RW1C_7_MASK 0x00000080L
+#define NB_SPARE2__NB_SPARE2_RW1C_8_MASK 0x00000100L
+#define NB_SPARE2__NB_SPARE2_RW1C_9_MASK 0x00000200L
+#define NB_SPARE2__NB_SPARE2_RW1C_10_MASK 0x00000400L
+#define NB_SPARE2__NB_SPARE2_RW1C_11_MASK 0x00000800L
+#define NB_SPARE2__NB_SPARE2_RW1C_12_MASK 0x00001000L
+#define NB_SPARE2__NB_SPARE2_RW1C_13_MASK 0x00002000L
+#define NB_SPARE2__NB_SPARE2_RW1C_14_MASK 0x00004000L
+#define NB_SPARE2__NB_SPARE2_RW1C_15_MASK 0x00008000L
+#define NB_SPARE2__NB_SPARE2_RW1C_16_MASK 0x00010000L
+#define NB_SPARE2__NB_SPARE2_RW1C_17_MASK 0x00020000L
+#define NB_SPARE2__NB_SPARE2_RW1C_18_MASK 0x00040000L
+#define NB_SPARE2__NB_SPARE2_RW1C_19_MASK 0x00080000L
+#define NB_SPARE2__NB_SPARE2_RW1C_20_MASK 0x00100000L
+#define NB_SPARE2__NB_SPARE2_RW1C_21_MASK 0x00200000L
+#define NB_SPARE2__NB_SPARE2_RW1C_22_MASK 0x00400000L
+#define NB_SPARE2__NB_SPARE2_RW1C_23_MASK 0x00800000L
+#define NB_SPARE2__NB_SPARE2_RW1C_24_MASK 0x01000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_25_MASK 0x02000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_26_MASK 0x04000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_27_MASK 0x08000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_28_MASK 0x10000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_29_MASK 0x20000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_30_MASK 0x40000000L
+#define NB_SPARE2__NB_SPARE2_RW1C_31_MASK 0x80000000L
+//NB_REVID
+#define NB_REVID__REVISION_ID__SHIFT 0x0
+#define NB_REVID__REVISION_ID_MASK 0x000003FFL
+//NBIO_LCLK_DS_MASK
+#define NBIO_LCLK_DS_MASK__LCLK_DS_MASK__SHIFT 0x0
+#define NBIO_LCLK_DS_MASK__LCLK_DS_MASK_MASK 0xFFFFFFFFL
+//NB_BUS_NUM_CNTL
+#define NB_BUS_NUM_CNTL__NB_BUS_NUM__SHIFT 0x0
+#define NB_BUS_NUM_CNTL__NB_BUS_LAT_Mode__SHIFT 0x8
+#define NB_BUS_NUM_CNTL__NB_SEGMENT__SHIFT 0x10
+#define NB_BUS_NUM_CNTL__NB_BUS_NUM_MASK 0x000000FFL
+#define NB_BUS_NUM_CNTL__NB_BUS_LAT_Mode_MASK 0x00000100L
+#define NB_BUS_NUM_CNTL__NB_SEGMENT_MASK 0x00FF0000L
+//NB_MMIOBASE
+#define NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//NB_MMIOLIMIT
+#define NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//NB_LOWER_TOP_OF_DRAM2
+#define NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//NB_UPPER_TOP_OF_DRAM2
+#define NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x000001FFL
+//NB_LOWER_DRAM2_BASE
+#define NB_LOWER_DRAM2_BASE__LOWER_DRAM2_BASE__SHIFT 0x17
+#define NB_LOWER_DRAM2_BASE__LOWER_DRAM2_BASE_MASK 0xFF800000L
+//NB_UPPER_DRAM2_BASE
+#define NB_UPPER_DRAM2_BASE__UPPER_DRAM2_BASE__SHIFT 0x0
+#define NB_UPPER_DRAM2_BASE__UPPER_DRAM2_BASE_MASK 0x000001FFL
+//SB_LOCATION
+#define SB_LOCATION__SBlocated_Port__SHIFT 0x0
+#define SB_LOCATION__SBlocated_Core__SHIFT 0x10
+#define SB_LOCATION__SBlocated_Port_MASK 0x0000FFFFL
+#define SB_LOCATION__SBlocated_Core_MASK 0xFFFF0000L
+//SW_US_LOCATION
+#define SW_US_LOCATION__SW_USlocated_Port__SHIFT 0x0
+#define SW_US_LOCATION__SW_USlocated_Core__SHIFT 0x10
+#define SW_US_LOCATION__SW_USlocated_Port_MASK 0x0000FFFFL
+#define SW_US_LOCATION__SW_USlocated_Core_MASK 0xFFFF0000L
+//NB_PROG_DEVICE_REMAP_PBr0
+#define NB_PROG_DEVICE_REMAP_PBr0__PBr0_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr0__PBr0_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr1
+#define NB_PROG_DEVICE_REMAP_PBr1__PBr1_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr1__PBr1_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr2
+#define NB_PROG_DEVICE_REMAP_PBr2__PBr2_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr2__PBr2_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr3
+#define NB_PROG_DEVICE_REMAP_PBr3__PBr3_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr3__PBr3_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr4
+#define NB_PROG_DEVICE_REMAP_PBr4__PBr4_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr4__PBr4_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr5
+#define NB_PROG_DEVICE_REMAP_PBr5__PBr5_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr5__PBr5_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr6
+#define NB_PROG_DEVICE_REMAP_PBr6__PBr6_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr6__PBr6_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr7
+#define NB_PROG_DEVICE_REMAP_PBr7__PBr7_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr7__PBr7_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr8
+#define NB_PROG_DEVICE_REMAP_PBr8__PBr8_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr8__PBr8_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr10
+#define NB_PROG_DEVICE_REMAP_PBr10__PBr10_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr10__PBr10_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr11
+#define NB_PROG_DEVICE_REMAP_PBr11__PBr11_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr11__PBr11_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr12
+#define NB_PROG_DEVICE_REMAP_PBr12__PBr12_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr12__PBr12_DevFnMap_MASK 0x000000FFL
+//NB_PROG_DEVICE_REMAP_PBr13
+#define NB_PROG_DEVICE_REMAP_PBr13__PBr13_DevFnMap__SHIFT 0x0
+#define NB_PROG_DEVICE_REMAP_PBr13__PBr13_DevFnMap_MASK 0x000000FFL
+//SW_NMI_CNTL
+#define SW_NMI_CNTL__SW_NMI_Status__SHIFT 0x0
+#define SW_NMI_CNTL__SW_NMI_Status_MASK 0xFFFFFFFFL
+//SW_SMI_CNTL
+#define SW_SMI_CNTL__SW_SMI_Status__SHIFT 0x0
+#define SW_SMI_CNTL__SW_SMI_Status_MASK 0xFFFFFFFFL
+//SW_SCI_CNTL
+#define SW_SCI_CNTL__SW_SCI_Status__SHIFT 0x0
+#define SW_SCI_CNTL__SW_SCI_Status_MASK 0xFFFFFFFFL
+//APML_SW_STATUS
+#define APML_SW_STATUS__APML_NMI_STATUS__SHIFT 0x0
+#define APML_SW_STATUS__APML_NMI_STATUS_MASK 0x00000001L
+//SW_GIC_SPI_CNTL
+#define SW_GIC_SPI_CNTL__SW_NMI_GIC_SPI_Vector__SHIFT 0x0
+#define SW_GIC_SPI_CNTL__SW_SMI_GIC_SPI_Vector__SHIFT 0x8
+#define SW_GIC_SPI_CNTL__SW_SCI_GIC_SPI_Vector__SHIFT 0x10
+#define SW_GIC_SPI_CNTL__SW_NMI_GIC_SPI_Vector_MASK 0x000000FFL
+#define SW_GIC_SPI_CNTL__SW_SMI_GIC_SPI_Vector_MASK 0x0000FF00L
+#define SW_GIC_SPI_CNTL__SW_SCI_GIC_SPI_Vector_MASK 0x00FF0000L
+//SW_SYNCFLOOD_CNTL
+#define SW_SYNCFLOOD_CNTL__SW_SYNCFLOOD_PRIVATE__SHIFT 0x0
+#define SW_SYNCFLOOD_CNTL__SW_SYNCFLOOD_APML__SHIFT 0x1
+#define SW_SYNCFLOOD_CNTL__SW_SYNCFLOOD_PRIVATE_MASK 0x00000001L
+#define SW_SYNCFLOOD_CNTL__SW_SYNCFLOOD_APML_MASK 0x00000002L
+//NB_TOP_OF_DRAM3
+#define NB_TOP_OF_DRAM3__TOM3_LIMIT__SHIFT 0x0
+#define NB_TOP_OF_DRAM3__TOM3_ENABLE__SHIFT 0x1f
+#define NB_TOP_OF_DRAM3__TOM3_LIMIT_MASK 0x3FFFFFFFL
+#define NB_TOP_OF_DRAM3__TOM3_ENABLE_MASK 0x80000000L
+//CAM_CONTROL
+#define CAM_CONTROL__CAM_En__SHIFT 0x0
+#define CAM_CONTROL__Op__SHIFT 0x1
+#define CAM_CONTROL__AccessType__SHIFT 0x2
+#define CAM_CONTROL__DataMatchEn__SHIFT 0x3
+#define CAM_CONTROL__VC__SHIFT 0x4
+#define CAM_CONTROL__CrossTrigger__SHIFT 0x8
+#define CAM_CONTROL__CAM_En_MASK 0x00000001L
+#define CAM_CONTROL__Op_MASK 0x00000002L
+#define CAM_CONTROL__AccessType_MASK 0x00000004L
+#define CAM_CONTROL__DataMatchEn_MASK 0x00000008L
+#define CAM_CONTROL__VC_MASK 0x00000070L
+#define CAM_CONTROL__CrossTrigger_MASK 0x00000F00L
+//CAM_TARGET_INDEX_ADDR_BOTTOM
+#define CAM_TARGET_INDEX_ADDR_BOTTOM__IndexAddrBottom__SHIFT 0x0
+#define CAM_TARGET_INDEX_ADDR_BOTTOM__IndexAddrBottom_MASK 0xFFFFFFFFL
+//CAM_TARGET_INDEX_ADDR_TOP
+#define CAM_TARGET_INDEX_ADDR_TOP__IndexAddrTop__SHIFT 0x0
+#define CAM_TARGET_INDEX_ADDR_TOP__IndexAddrTop_MASK 0xFFFFFFFFL
+//CAM_TARGET_INDEX_DATA
+#define CAM_TARGET_INDEX_DATA__IndexData__SHIFT 0x0
+#define CAM_TARGET_INDEX_DATA__IndexData_MASK 0xFFFFFFFFL
+//CAM_TARGET_INDEX_DATA_MASK
+#define CAM_TARGET_INDEX_DATA_MASK__IndexDataMask__SHIFT 0x0
+#define CAM_TARGET_INDEX_DATA_MASK__IndexDataMask_MASK 0xFFFFFFFFL
+//CAM_TARGET_DATA_ADDR_BOTTOM
+#define CAM_TARGET_DATA_ADDR_BOTTOM__DataAddrBottom__SHIFT 0x0
+#define CAM_TARGET_DATA_ADDR_BOTTOM__DataAddrBottom_MASK 0xFFFFFFFFL
+//CAM_TARGET_DATA_ADDR_TOP
+#define CAM_TARGET_DATA_ADDR_TOP__DataAddrTop__SHIFT 0x0
+#define CAM_TARGET_DATA_ADDR_TOP__DataAddrTop_MASK 0xFFFFFFFFL
+//CAM_TARGET_DATA
+#define CAM_TARGET_DATA__Data__SHIFT 0x0
+#define CAM_TARGET_DATA__Data_MASK 0xFFFFFFFFL
+//CAM_TARGET_DATA_MASK
+#define CAM_TARGET_DATA_MASK__DataMask__SHIFT 0x0
+#define CAM_TARGET_DATA_MASK__DataMask_MASK 0xFFFFFFFFL
+//P_DMA_DROPPED_LOG_LOWER
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_0__SHIFT 0x0
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_1__SHIFT 0x1
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_2__SHIFT 0x2
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_3__SHIFT 0x3
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_4__SHIFT 0x4
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_5__SHIFT 0x5
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_6__SHIFT 0x6
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_7__SHIFT 0x7
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_8__SHIFT 0x8
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_9__SHIFT 0x9
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_10__SHIFT 0xa
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_11__SHIFT 0xb
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_12__SHIFT 0xc
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_13__SHIFT 0xd
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_14__SHIFT 0xe
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_15__SHIFT 0xf
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_16__SHIFT 0x10
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_17__SHIFT 0x11
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_18__SHIFT 0x12
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_19__SHIFT 0x13
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_20__SHIFT 0x14
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_21__SHIFT 0x15
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_22__SHIFT 0x16
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_23__SHIFT 0x17
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_24__SHIFT 0x18
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_25__SHIFT 0x19
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_26__SHIFT 0x1a
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_27__SHIFT 0x1b
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_28__SHIFT 0x1c
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_29__SHIFT 0x1d
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_30__SHIFT 0x1e
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_31__SHIFT 0x1f
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_0_MASK 0x00000001L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_1_MASK 0x00000002L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_2_MASK 0x00000004L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_3_MASK 0x00000008L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_4_MASK 0x00000010L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_5_MASK 0x00000020L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_6_MASK 0x00000040L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_7_MASK 0x00000080L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_8_MASK 0x00000100L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_9_MASK 0x00000200L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_10_MASK 0x00000400L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_11_MASK 0x00000800L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_12_MASK 0x00001000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_13_MASK 0x00002000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_14_MASK 0x00004000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_15_MASK 0x00008000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_16_MASK 0x00010000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_17_MASK 0x00020000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_18_MASK 0x00040000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_19_MASK 0x00080000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_20_MASK 0x00100000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_21_MASK 0x00200000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_22_MASK 0x00400000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_23_MASK 0x00800000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_24_MASK 0x01000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_25_MASK 0x02000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_26_MASK 0x04000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_27_MASK 0x08000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_28_MASK 0x10000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_29_MASK 0x20000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_30_MASK 0x40000000L
+#define P_DMA_DROPPED_LOG_LOWER__P_DMA_DROPPED_LOG_LOWER_31_MASK 0x80000000L
+//P_DMA_DROPPED_LOG_UPPER
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_0__SHIFT 0x0
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_1__SHIFT 0x1
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_2__SHIFT 0x2
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_3__SHIFT 0x3
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_4__SHIFT 0x4
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_5__SHIFT 0x5
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_6__SHIFT 0x6
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_7__SHIFT 0x7
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_8__SHIFT 0x8
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_9__SHIFT 0x9
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_10__SHIFT 0xa
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_11__SHIFT 0xb
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_12__SHIFT 0xc
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_13__SHIFT 0xd
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_14__SHIFT 0xe
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_15__SHIFT 0xf
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_16__SHIFT 0x10
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_17__SHIFT 0x11
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_18__SHIFT 0x12
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_19__SHIFT 0x13
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_20__SHIFT 0x14
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_21__SHIFT 0x15
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_22__SHIFT 0x16
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_23__SHIFT 0x17
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_24__SHIFT 0x18
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_25__SHIFT 0x19
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_26__SHIFT 0x1a
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_27__SHIFT 0x1b
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_28__SHIFT 0x1c
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_29__SHIFT 0x1d
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_30__SHIFT 0x1e
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_31__SHIFT 0x1f
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_0_MASK 0x00000001L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_1_MASK 0x00000002L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_2_MASK 0x00000004L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_3_MASK 0x00000008L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_4_MASK 0x00000010L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_5_MASK 0x00000020L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_6_MASK 0x00000040L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_7_MASK 0x00000080L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_8_MASK 0x00000100L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_9_MASK 0x00000200L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_10_MASK 0x00000400L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_11_MASK 0x00000800L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_12_MASK 0x00001000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_13_MASK 0x00002000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_14_MASK 0x00004000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_15_MASK 0x00008000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_16_MASK 0x00010000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_17_MASK 0x00020000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_18_MASK 0x00040000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_19_MASK 0x00080000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_20_MASK 0x00100000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_21_MASK 0x00200000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_22_MASK 0x00400000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_23_MASK 0x00800000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_24_MASK 0x01000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_25_MASK 0x02000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_26_MASK 0x04000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_27_MASK 0x08000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_28_MASK 0x10000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_29_MASK 0x20000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_30_MASK 0x40000000L
+#define P_DMA_DROPPED_LOG_UPPER__P_DMA_DROPPED_LOG_UPPER_31_MASK 0x80000000L
+//NP_DMA_DROPPED_LOG_LOWER
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_0__SHIFT 0x0
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_1__SHIFT 0x1
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_2__SHIFT 0x2
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_3__SHIFT 0x3
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_4__SHIFT 0x4
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_5__SHIFT 0x5
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_6__SHIFT 0x6
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_7__SHIFT 0x7
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_8__SHIFT 0x8
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_9__SHIFT 0x9
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_10__SHIFT 0xa
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_11__SHIFT 0xb
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_12__SHIFT 0xc
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_13__SHIFT 0xd
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_14__SHIFT 0xe
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_15__SHIFT 0xf
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_16__SHIFT 0x10
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_17__SHIFT 0x11
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_18__SHIFT 0x12
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_19__SHIFT 0x13
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_20__SHIFT 0x14
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_21__SHIFT 0x15
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_22__SHIFT 0x16
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_23__SHIFT 0x17
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_24__SHIFT 0x18
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_25__SHIFT 0x19
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_26__SHIFT 0x1a
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_27__SHIFT 0x1b
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_28__SHIFT 0x1c
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_29__SHIFT 0x1d
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_30__SHIFT 0x1e
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_31__SHIFT 0x1f
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_0_MASK 0x00000001L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_1_MASK 0x00000002L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_2_MASK 0x00000004L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_3_MASK 0x00000008L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_4_MASK 0x00000010L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_5_MASK 0x00000020L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_6_MASK 0x00000040L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_7_MASK 0x00000080L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_8_MASK 0x00000100L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_9_MASK 0x00000200L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_10_MASK 0x00000400L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_11_MASK 0x00000800L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_12_MASK 0x00001000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_13_MASK 0x00002000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_14_MASK 0x00004000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_15_MASK 0x00008000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_16_MASK 0x00010000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_17_MASK 0x00020000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_18_MASK 0x00040000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_19_MASK 0x00080000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_20_MASK 0x00100000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_21_MASK 0x00200000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_22_MASK 0x00400000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_23_MASK 0x00800000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_24_MASK 0x01000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_25_MASK 0x02000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_26_MASK 0x04000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_27_MASK 0x08000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_28_MASK 0x10000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_29_MASK 0x20000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_30_MASK 0x40000000L
+#define NP_DMA_DROPPED_LOG_LOWER__NP_DMA_DROPPED_LOG_LOWER_31_MASK 0x80000000L
+//NP_DMA_DROPPED_LOG_UPPER
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_0__SHIFT 0x0
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_1__SHIFT 0x1
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_2__SHIFT 0x2
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_3__SHIFT 0x3
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_4__SHIFT 0x4
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_5__SHIFT 0x5
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_6__SHIFT 0x6
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_7__SHIFT 0x7
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_8__SHIFT 0x8
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_9__SHIFT 0x9
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_10__SHIFT 0xa
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_11__SHIFT 0xb
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_12__SHIFT 0xc
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_13__SHIFT 0xd
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_14__SHIFT 0xe
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_15__SHIFT 0xf
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_16__SHIFT 0x10
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_17__SHIFT 0x11
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_18__SHIFT 0x12
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_19__SHIFT 0x13
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_20__SHIFT 0x14
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_21__SHIFT 0x15
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_22__SHIFT 0x16
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_23__SHIFT 0x17
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_24__SHIFT 0x18
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_25__SHIFT 0x19
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_26__SHIFT 0x1a
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_27__SHIFT 0x1b
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_28__SHIFT 0x1c
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_29__SHIFT 0x1d
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_30__SHIFT 0x1e
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_31__SHIFT 0x1f
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_0_MASK 0x00000001L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_1_MASK 0x00000002L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_2_MASK 0x00000004L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_3_MASK 0x00000008L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_4_MASK 0x00000010L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_5_MASK 0x00000020L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_6_MASK 0x00000040L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_7_MASK 0x00000080L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_8_MASK 0x00000100L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_9_MASK 0x00000200L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_10_MASK 0x00000400L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_11_MASK 0x00000800L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_12_MASK 0x00001000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_13_MASK 0x00002000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_14_MASK 0x00004000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_15_MASK 0x00008000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_16_MASK 0x00010000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_17_MASK 0x00020000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_18_MASK 0x00040000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_19_MASK 0x00080000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_20_MASK 0x00100000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_21_MASK 0x00200000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_22_MASK 0x00400000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_23_MASK 0x00800000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_24_MASK 0x01000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_25_MASK 0x02000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_26_MASK 0x04000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_27_MASK 0x08000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_28_MASK 0x10000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_29_MASK 0x20000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_30_MASK 0x40000000L
+#define NP_DMA_DROPPED_LOG_UPPER__NP_DMA_DROPPED_LOG_UPPER_31_MASK 0x80000000L
+//PCIE_VDM_NODE0_CTRL4
+#define PCIE_VDM_NODE0_CTRL4__BUS_RANGE_BASE__SHIFT 0x0
+#define PCIE_VDM_NODE0_CTRL4__BUS_RANGE_LIMIT__SHIFT 0x8
+#define PCIE_VDM_NODE0_CTRL4__NODE0_PRESENT__SHIFT 0x1f
+#define PCIE_VDM_NODE0_CTRL4__BUS_RANGE_BASE_MASK 0x000000FFL
+#define PCIE_VDM_NODE0_CTRL4__BUS_RANGE_LIMIT_MASK 0x0000FF00L
+#define PCIE_VDM_NODE0_CTRL4__NODE0_PRESENT_MASK 0x80000000L
+//PCIE_VDM_CNTL2
+#define PCIE_VDM_CNTL2__VdmP2pMode__SHIFT 0x0
+#define PCIE_VDM_CNTL2__MCTPEndpointEn__SHIFT 0x2
+#define PCIE_VDM_CNTL2__MCTPMultiSegEn__SHIFT 0x3
+#define PCIE_VDM_CNTL2__MCTPT2SMUEn__SHIFT 0x4
+#define PCIE_VDM_CNTL2__AMDVDM2SMUEn__SHIFT 0x5
+#define PCIE_VDM_CNTL2__OtherVDM2SMUEn__SHIFT 0x6
+#define PCIE_VDM_CNTL2__RouteAllToMCTPMaster__SHIFT 0x7
+#define PCIE_VDM_CNTL2__MCTPMasterSeg__SHIFT 0x8
+#define PCIE_VDM_CNTL2__MCTPMasterID__SHIFT 0x10
+#define PCIE_VDM_CNTL2__VdmP2pMode_MASK 0x00000003L
+#define PCIE_VDM_CNTL2__MCTPEndpointEn_MASK 0x00000004L
+#define PCIE_VDM_CNTL2__MCTPMultiSegEn_MASK 0x00000008L
+#define PCIE_VDM_CNTL2__MCTPT2SMUEn_MASK 0x00000010L
+#define PCIE_VDM_CNTL2__AMDVDM2SMUEn_MASK 0x00000020L
+#define PCIE_VDM_CNTL2__OtherVDM2SMUEn_MASK 0x00000040L
+#define PCIE_VDM_CNTL2__RouteAllToMCTPMaster_MASK 0x00000080L
+#define PCIE_VDM_CNTL2__MCTPMasterSeg_MASK 0x0000FF00L
+#define PCIE_VDM_CNTL2__MCTPMasterID_MASK 0xFFFF0000L
+//PCIE_VDM_CNTL3
+#define PCIE_VDM_CNTL3__APMTPMasterValid__SHIFT 0xf
+#define PCIE_VDM_CNTL3__APMTPMasterID__SHIFT 0x10
+#define PCIE_VDM_CNTL3__APMTPMasterValid_MASK 0x00008000L
+#define PCIE_VDM_CNTL3__APMTPMasterID_MASK 0xFFFF0000L
+//STALL_CONTROL_XBARPORT0_0
+#define STALL_CONTROL_XBARPORT0_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT0_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT0_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT0_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT0_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT0_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT0_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT0_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT0_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT0_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT0_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT0_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT0_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT0_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT0_1
+#define STALL_CONTROL_XBARPORT0_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT0_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT0_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT0_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT0_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT0_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT0_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT0_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT0_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT0_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT0_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT0_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT0_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT0_1__StallVC7RspEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT1_0
+#define STALL_CONTROL_XBARPORT1_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT1_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT1_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT1_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT1_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT1_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT1_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT1_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT1_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT1_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT1_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT1_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT1_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT1_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT1_1
+#define STALL_CONTROL_XBARPORT1_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT1_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT1_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT1_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT1_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT1_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT1_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT1_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT1_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT1_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT1_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT1_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT1_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT1_1__StallVC7RspEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT2_0
+#define STALL_CONTROL_XBARPORT2_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT2_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT2_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT2_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT2_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT2_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT2_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT2_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT2_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT2_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT2_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT2_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT2_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT2_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT2_1
+#define STALL_CONTROL_XBARPORT2_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT2_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT2_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT2_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT2_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT2_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT2_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT2_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT2_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT2_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT2_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT2_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT2_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT2_1__StallVC7RspEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT3_0
+#define STALL_CONTROL_XBARPORT3_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT3_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT3_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT3_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT3_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT3_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT3_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT3_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT3_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT3_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT3_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT3_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT3_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT3_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT3_1
+#define STALL_CONTROL_XBARPORT3_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT3_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT3_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT3_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT3_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT3_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT3_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT3_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT3_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT3_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT3_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT3_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT3_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT3_1__StallVC7RspEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT4_0
+#define STALL_CONTROL_XBARPORT4_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT4_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT4_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT4_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT4_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT4_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT4_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT4_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT4_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT4_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT4_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT4_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT4_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT4_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT4_1
+#define STALL_CONTROL_XBARPORT4_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT4_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT4_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT4_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT4_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT4_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT4_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT4_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT4_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT4_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT4_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT4_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT4_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT4_1__StallVC7RspEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT5_0
+#define STALL_CONTROL_XBARPORT5_0__StallVC0ReqEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT5_0__StallVC1ReqEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT5_0__StallVC2ReqEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT5_0__StallVC3ReqEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT5_0__StallVC4ReqEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT5_0__StallVC5ReqEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT5_0__StallVC7ReqEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT5_0__StallVC0ReqEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT5_0__StallVC1ReqEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT5_0__StallVC2ReqEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT5_0__StallVC3ReqEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT5_0__StallVC4ReqEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT5_0__StallVC5ReqEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT5_0__StallVC7ReqEn_MASK 0x30000000L
+//STALL_CONTROL_XBARPORT5_1
+#define STALL_CONTROL_XBARPORT5_1__StallVC0RspEn__SHIFT 0x0
+#define STALL_CONTROL_XBARPORT5_1__StallVC1RspEn__SHIFT 0x4
+#define STALL_CONTROL_XBARPORT5_1__StallVC2RspEn__SHIFT 0x8
+#define STALL_CONTROL_XBARPORT5_1__StallVC3RspEn__SHIFT 0xc
+#define STALL_CONTROL_XBARPORT5_1__StallVC4RspEn__SHIFT 0x10
+#define STALL_CONTROL_XBARPORT5_1__StallVC5RspEn__SHIFT 0x14
+#define STALL_CONTROL_XBARPORT5_1__StallVC7RspEn__SHIFT 0x1c
+#define STALL_CONTROL_XBARPORT5_1__StallVC0RspEn_MASK 0x00000003L
+#define STALL_CONTROL_XBARPORT5_1__StallVC1RspEn_MASK 0x00000030L
+#define STALL_CONTROL_XBARPORT5_1__StallVC2RspEn_MASK 0x00000300L
+#define STALL_CONTROL_XBARPORT5_1__StallVC3RspEn_MASK 0x00003000L
+#define STALL_CONTROL_XBARPORT5_1__StallVC4RspEn_MASK 0x00030000L
+#define STALL_CONTROL_XBARPORT5_1__StallVC5RspEn_MASK 0x00300000L
+#define STALL_CONTROL_XBARPORT5_1__StallVC7RspEn_MASK 0x30000000L
+//NB_DRAM3_BASE
+#define NB_DRAM3_BASE__DRAM3_BASE__SHIFT 0x0
+#define NB_DRAM3_BASE__DRAM3_BASE_MASK 0x3FFFFFFFL
+//PSP_BASE_ADDR_LO
+#define PSP_BASE_ADDR_LO__PSP_MMIO_EN__SHIFT 0x0
+#define PSP_BASE_ADDR_LO__PSP_MMIO_LOCK__SHIFT 0x8
+#define PSP_BASE_ADDR_LO__PSP_BASE_ADDR_LO__SHIFT 0x14
+#define PSP_BASE_ADDR_LO__PSP_MMIO_EN_MASK 0x00000001L
+#define PSP_BASE_ADDR_LO__PSP_MMIO_LOCK_MASK 0x00000100L
+#define PSP_BASE_ADDR_LO__PSP_BASE_ADDR_LO_MASK 0xFFF00000L
+//PSP_BASE_ADDR_HI
+#define PSP_BASE_ADDR_HI__PSP_BASE_ADDR_HI__SHIFT 0x0
+#define PSP_BASE_ADDR_HI__PSP_BASE_ADDR_HI_MASK 0x0000FFFFL
+//SMU_BASE_ADDR_LO
+#define SMU_BASE_ADDR_LO__SMU_MMIO_EN__SHIFT 0x0
+#define SMU_BASE_ADDR_LO__SMU_MMIO_LOCK__SHIFT 0x1
+#define SMU_BASE_ADDR_LO__SMU_BASE_ADDR_LO__SHIFT 0x14
+#define SMU_BASE_ADDR_LO__SMU_MMIO_EN_MASK 0x00000001L
+#define SMU_BASE_ADDR_LO__SMU_MMIO_LOCK_MASK 0x00000002L
+#define SMU_BASE_ADDR_LO__SMU_BASE_ADDR_LO_MASK 0xFFF00000L
+//SMU_BASE_ADDR_HI
+#define SMU_BASE_ADDR_HI__SMU_BASE_ADDR_HI__SHIFT 0x0
+#define SMU_BASE_ADDR_HI__SMU_BASE_ADDR_HI_MASK 0x0000FFFFL
+//SCRATCH_4
+#define SCRATCH_4__SCRATCH_4__SHIFT 0x0
+#define SCRATCH_4__SCRATCH_4_MASK 0xFFFFFFFFL
+//SCRATCH_5
+#define SCRATCH_5__SCRATCH_5__SHIFT 0x0
+#define SCRATCH_5__SCRATCH_5_MASK 0xFFFFFFFFL
+//SMU_BLOCK_CPU
+#define SMU_BLOCK_CPU__SMUBlockCPU_Valid__SHIFT 0x0
+#define SMU_BLOCK_CPU__SMUBlockCPU_Valid_MASK 0x00000001L
+//SMU_BLOCK_CPU_STATUS
+#define SMU_BLOCK_CPU_STATUS__SMUBlockCPU_Status__SHIFT 0x0
+#define SMU_BLOCK_CPU_STATUS__SMUBlockCPU_Status_MASK 0x00000001L
+//TRAP_STATUS
+#define TRAP_STATUS__TrapReqValid__SHIFT 0x0
+#define TRAP_STATUS__TrapNumber__SHIFT 0x8
+#define TRAP_STATUS__TrapS2Vld__SHIFT 0xc
+#define TRAP_STATUS__TrapS2Number__SHIFT 0x10
+#define TRAP_STATUS__TrapReqValid_MASK 0x00000001L
+#define TRAP_STATUS__TrapNumber_MASK 0x00000F00L
+#define TRAP_STATUS__TrapS2Vld_MASK 0x00001000L
+#define TRAP_STATUS__TrapS2Number_MASK 0x03FF0000L
+//TRAP_REQUEST0
+#define TRAP_REQUEST0__TrapReqAddrLo__SHIFT 0x2
+#define TRAP_REQUEST0__TrapReqAddrLo_MASK 0xFFFFFFFCL
+//TRAP_REQUEST1
+#define TRAP_REQUEST1__TrapReqAddrHi__SHIFT 0x0
+#define TRAP_REQUEST1__TrapReqAddrHi_MASK 0xFFFFFFFFL
+//TRAP_REQUEST2
+#define TRAP_REQUEST2__TrapReqCmd__SHIFT 0x0
+#define TRAP_REQUEST2__TrapAttr__SHIFT 0x8
+#define TRAP_REQUEST2__TrapReqLen__SHIFT 0x10
+#define TRAP_REQUEST2__TrapReqCmd_MASK 0x0000003FL
+#define TRAP_REQUEST2__TrapAttr_MASK 0x0000FF00L
+#define TRAP_REQUEST2__TrapReqLen_MASK 0x003F0000L
+//TRAP_REQUEST3
+#define TRAP_REQUEST3__TrapReqVC__SHIFT 0x0
+#define TRAP_REQUEST3__TrapReqBlockLevel__SHIFT 0x4
+#define TRAP_REQUEST3__TrapReqChain__SHIFT 0x6
+#define TRAP_REQUEST3__TrapReqIO__SHIFT 0x7
+#define TRAP_REQUEST3__TrapReqPassPW__SHIFT 0x8
+#define TRAP_REQUEST3__TrapReqRspPassPW__SHIFT 0x9
+#define TRAP_REQUEST3__TrapReqUnitID__SHIFT 0x10
+#define TRAP_REQUEST3__TrapReqVC_MASK 0x00000007L
+#define TRAP_REQUEST3__TrapReqBlockLevel_MASK 0x00000030L
+#define TRAP_REQUEST3__TrapReqChain_MASK 0x00000040L
+#define TRAP_REQUEST3__TrapReqIO_MASK 0x00000080L
+#define TRAP_REQUEST3__TrapReqPassPW_MASK 0x00000100L
+#define TRAP_REQUEST3__TrapReqRspPassPW_MASK 0x00000200L
+#define TRAP_REQUEST3__TrapReqUnitID_MASK 0x003F0000L
+//TRAP_REQUEST4
+#define TRAP_REQUEST4__TrapReqSecLevel__SHIFT 0x0
+#define TRAP_REQUEST4__TrapReqSecLevel_MASK 0x0000000FL
+//TRAP_REQUEST5
+#define TRAP_REQUEST5__TrapReqDataVC__SHIFT 0x0
+#define TRAP_REQUEST5__TrapReqDataErr__SHIFT 0x4
+#define TRAP_REQUEST5__TrapReqDataParity__SHIFT 0x8
+#define TRAP_REQUEST5__TrapReqDataVC_MASK 0x00000007L
+#define TRAP_REQUEST5__TrapReqDataErr_MASK 0x00000010L
+#define TRAP_REQUEST5__TrapReqDataParity_MASK 0x0000FF00L
+//TRAP_REQUEST_DATASTRB0
+#define TRAP_REQUEST_DATASTRB0__TrapReqDataBytEn0__SHIFT 0x0
+#define TRAP_REQUEST_DATASTRB0__TrapReqDataBytEn0_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATASTRB1
+#define TRAP_REQUEST_DATASTRB1__TrapReqDataBytEn1__SHIFT 0x0
+#define TRAP_REQUEST_DATASTRB1__TrapReqDataBytEn1_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA0
+#define TRAP_REQUEST_DATA0__TrapReqData0__SHIFT 0x0
+#define TRAP_REQUEST_DATA0__TrapReqData0_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA1
+#define TRAP_REQUEST_DATA1__TrapReqData1__SHIFT 0x0
+#define TRAP_REQUEST_DATA1__TrapReqData1_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA2
+#define TRAP_REQUEST_DATA2__TrapReqData2__SHIFT 0x0
+#define TRAP_REQUEST_DATA2__TrapReqData2_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA3
+#define TRAP_REQUEST_DATA3__TrapReqData3__SHIFT 0x0
+#define TRAP_REQUEST_DATA3__TrapReqData3_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA4
+#define TRAP_REQUEST_DATA4__TrapReqData4__SHIFT 0x0
+#define TRAP_REQUEST_DATA4__TrapReqData4_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA5
+#define TRAP_REQUEST_DATA5__TrapReqData5__SHIFT 0x0
+#define TRAP_REQUEST_DATA5__TrapReqData5_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA6
+#define TRAP_REQUEST_DATA6__TrapReqData6__SHIFT 0x0
+#define TRAP_REQUEST_DATA6__TrapReqData6_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA7
+#define TRAP_REQUEST_DATA7__TrapReqData7__SHIFT 0x0
+#define TRAP_REQUEST_DATA7__TrapReqData7_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA8
+#define TRAP_REQUEST_DATA8__TrapReqData8__SHIFT 0x0
+#define TRAP_REQUEST_DATA8__TrapReqData8_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA9
+#define TRAP_REQUEST_DATA9__TrapReqData9__SHIFT 0x0
+#define TRAP_REQUEST_DATA9__TrapReqData9_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA10
+#define TRAP_REQUEST_DATA10__TrapReqData10__SHIFT 0x0
+#define TRAP_REQUEST_DATA10__TrapReqData10_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA11
+#define TRAP_REQUEST_DATA11__TrapReqData11__SHIFT 0x0
+#define TRAP_REQUEST_DATA11__TrapReqData11_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA12
+#define TRAP_REQUEST_DATA12__TrapReqData12__SHIFT 0x0
+#define TRAP_REQUEST_DATA12__TrapReqData12_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA13
+#define TRAP_REQUEST_DATA13__TrapReqData13__SHIFT 0x0
+#define TRAP_REQUEST_DATA13__TrapReqData13_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA14
+#define TRAP_REQUEST_DATA14__TrapReqData14__SHIFT 0x0
+#define TRAP_REQUEST_DATA14__TrapReqData14_MASK 0xFFFFFFFFL
+//TRAP_REQUEST_DATA15
+#define TRAP_REQUEST_DATA15__TrapReqData15__SHIFT 0x0
+#define TRAP_REQUEST_DATA15__TrapReqData15_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_CONTROL
+#define TRAP_RESPONSE_CONTROL__TrapRspTrigger__SHIFT 0x0
+#define TRAP_RESPONSE_CONTROL__TrapRspReqPassthru__SHIFT 0x1
+#define TRAP_RESPONSE_CONTROL__TrapRspTrigger_MASK 0x00000001L
+#define TRAP_RESPONSE_CONTROL__TrapRspReqPassthru_MASK 0x00000002L
+//TRAP_RESPONSE0
+#define TRAP_RESPONSE0__TrapRspPassPW__SHIFT 0x0
+#define TRAP_RESPONSE0__TrapRspStatus__SHIFT 0x4
+#define TRAP_RESPONSE0__TrapRspDataStatus__SHIFT 0x10
+#define TRAP_RESPONSE0__TrapRspPassPW_MASK 0x00000001L
+#define TRAP_RESPONSE0__TrapRspStatus_MASK 0x000000F0L
+#define TRAP_RESPONSE0__TrapRspDataStatus_MASK 0x00FF0000L
+//TRAP_RESPONSE_DATA0
+#define TRAP_RESPONSE_DATA0__TrapRdRspData0__SHIFT 0x0
+#define TRAP_RESPONSE_DATA0__TrapRdRspData0_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA1
+#define TRAP_RESPONSE_DATA1__TrapRdRspData1__SHIFT 0x0
+#define TRAP_RESPONSE_DATA1__TrapRdRspData1_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA2
+#define TRAP_RESPONSE_DATA2__TrapRdRspData2__SHIFT 0x0
+#define TRAP_RESPONSE_DATA2__TrapRdRspData2_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA3
+#define TRAP_RESPONSE_DATA3__TrapRdRspData3__SHIFT 0x0
+#define TRAP_RESPONSE_DATA3__TrapRdRspData3_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA4
+#define TRAP_RESPONSE_DATA4__TrapRdRspData4__SHIFT 0x0
+#define TRAP_RESPONSE_DATA4__TrapRdRspData4_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA5
+#define TRAP_RESPONSE_DATA5__TrapRdRspData5__SHIFT 0x0
+#define TRAP_RESPONSE_DATA5__TrapRdRspData5_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA6
+#define TRAP_RESPONSE_DATA6__TrapRdRspData6__SHIFT 0x0
+#define TRAP_RESPONSE_DATA6__TrapRdRspData6_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA7
+#define TRAP_RESPONSE_DATA7__TrapRdRspData7__SHIFT 0x0
+#define TRAP_RESPONSE_DATA7__TrapRdRspData7_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA8
+#define TRAP_RESPONSE_DATA8__TrapRdRspData8__SHIFT 0x0
+#define TRAP_RESPONSE_DATA8__TrapRdRspData8_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA9
+#define TRAP_RESPONSE_DATA9__TrapRdRspData9__SHIFT 0x0
+#define TRAP_RESPONSE_DATA9__TrapRdRspData9_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA10
+#define TRAP_RESPONSE_DATA10__TrapRdRspData10__SHIFT 0x0
+#define TRAP_RESPONSE_DATA10__TrapRdRspData10_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA11
+#define TRAP_RESPONSE_DATA11__TrapRdRspData11__SHIFT 0x0
+#define TRAP_RESPONSE_DATA11__TrapRdRspData11_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA12
+#define TRAP_RESPONSE_DATA12__TrapRdRspData12__SHIFT 0x0
+#define TRAP_RESPONSE_DATA12__TrapRdRspData12_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA13
+#define TRAP_RESPONSE_DATA13__TrapRdRspData13__SHIFT 0x0
+#define TRAP_RESPONSE_DATA13__TrapRdRspData13_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA14
+#define TRAP_RESPONSE_DATA14__TrapRdRspData14__SHIFT 0x0
+#define TRAP_RESPONSE_DATA14__TrapRdRspData14_MASK 0xFFFFFFFFL
+//TRAP_RESPONSE_DATA15
+#define TRAP_RESPONSE_DATA15__TrapRdRspData15__SHIFT 0x0
+#define TRAP_RESPONSE_DATA15__TrapRdRspData15_MASK 0xFFFFFFFFL
+//TRAP0_CONTROL0
+#define TRAP0_CONTROL0__Trap0En__SHIFT 0x0
+#define TRAP0_CONTROL0__Trap0SMUIntr__SHIFT 0x3
+#define TRAP0_CONTROL0__Trap0Stage2Ptr__SHIFT 0xe
+#define TRAP0_CONTROL0__Trap0CrossTrigger__SHIFT 0x18
+#define TRAP0_CONTROL0__Trap0Stage2En__SHIFT 0x1f
+#define TRAP0_CONTROL0__Trap0En_MASK 0x00000001L
+#define TRAP0_CONTROL0__Trap0SMUIntr_MASK 0x00000008L
+#define TRAP0_CONTROL0__Trap0Stage2Ptr_MASK 0x00FFC000L
+#define TRAP0_CONTROL0__Trap0CrossTrigger_MASK 0x0F000000L
+#define TRAP0_CONTROL0__Trap0Stage2En_MASK 0x80000000L
+//TRAP0_ADDRESS_LO
+#define TRAP0_ADDRESS_LO__Trap0AddrLo__SHIFT 0x2
+#define TRAP0_ADDRESS_LO__Trap0AddrLo_MASK 0xFFFFFFFCL
+//TRAP0_ADDRESS_HI
+#define TRAP0_ADDRESS_HI__Trap0AddrHi__SHIFT 0x0
+#define TRAP0_ADDRESS_HI__Trap0AddrHi_MASK 0xFFFFFFFFL
+//TRAP0_COMMAND
+#define TRAP0_COMMAND__Trap0Cmd0__SHIFT 0x0
+#define TRAP0_COMMAND__Trap0Cmd1__SHIFT 0x8
+#define TRAP0_COMMAND__Trap0Cmd0_MASK 0x0000003FL
+#define TRAP0_COMMAND__Trap0Cmd1_MASK 0x00003F00L
+//TRAP0_ADDRESS_LO_MASK
+#define TRAP0_ADDRESS_LO_MASK__Trap0AddrLoMask__SHIFT 0x2
+#define TRAP0_ADDRESS_LO_MASK__Trap0AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP0_ADDRESS_HI_MASK
+#define TRAP0_ADDRESS_HI_MASK__Trap0AddrHiMask__SHIFT 0x0
+#define TRAP0_ADDRESS_HI_MASK__Trap0AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP0_COMMAND_MASK
+#define TRAP0_COMMAND_MASK__Trap0Cmd0Mask__SHIFT 0x0
+#define TRAP0_COMMAND_MASK__Trap0Cmd1Mask__SHIFT 0x8
+#define TRAP0_COMMAND_MASK__Trap0Cmd0Mask_MASK 0x0000003FL
+#define TRAP0_COMMAND_MASK__Trap0Cmd1Mask_MASK 0x00003F00L
+//TRAP1_CONTROL0
+#define TRAP1_CONTROL0__Trap1En__SHIFT 0x0
+#define TRAP1_CONTROL0__Trap1SMUIntr__SHIFT 0x3
+#define TRAP1_CONTROL0__Trap1Stage2Ptr__SHIFT 0xe
+#define TRAP1_CONTROL0__Trap1CrossTrigger__SHIFT 0x18
+#define TRAP1_CONTROL0__Trap1Stage2En__SHIFT 0x1f
+#define TRAP1_CONTROL0__Trap1En_MASK 0x00000001L
+#define TRAP1_CONTROL0__Trap1SMUIntr_MASK 0x00000008L
+#define TRAP1_CONTROL0__Trap1Stage2Ptr_MASK 0x00FFC000L
+#define TRAP1_CONTROL0__Trap1CrossTrigger_MASK 0x0F000000L
+#define TRAP1_CONTROL0__Trap1Stage2En_MASK 0x80000000L
+//TRAP1_ADDRESS_LO
+#define TRAP1_ADDRESS_LO__Trap1AddrLo__SHIFT 0x2
+#define TRAP1_ADDRESS_LO__Trap1AddrLo_MASK 0xFFFFFFFCL
+//TRAP1_ADDRESS_HI
+#define TRAP1_ADDRESS_HI__Trap1AddrHi__SHIFT 0x0
+#define TRAP1_ADDRESS_HI__Trap1AddrHi_MASK 0xFFFFFFFFL
+//TRAP1_COMMAND
+#define TRAP1_COMMAND__Trap1Cmd0__SHIFT 0x0
+#define TRAP1_COMMAND__Trap1Cmd1__SHIFT 0x8
+#define TRAP1_COMMAND__Trap1Cmd0_MASK 0x0000003FL
+#define TRAP1_COMMAND__Trap1Cmd1_MASK 0x00003F00L
+//TRAP1_ADDRESS_LO_MASK
+#define TRAP1_ADDRESS_LO_MASK__Trap1AddrLoMask__SHIFT 0x2
+#define TRAP1_ADDRESS_LO_MASK__Trap1AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP1_ADDRESS_HI_MASK
+#define TRAP1_ADDRESS_HI_MASK__Trap1AddrHiMask__SHIFT 0x0
+#define TRAP1_ADDRESS_HI_MASK__Trap1AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP1_COMMAND_MASK
+#define TRAP1_COMMAND_MASK__Trap1Cmd0Mask__SHIFT 0x0
+#define TRAP1_COMMAND_MASK__Trap1Cmd1Mask__SHIFT 0x8
+#define TRAP1_COMMAND_MASK__Trap1Cmd0Mask_MASK 0x0000003FL
+#define TRAP1_COMMAND_MASK__Trap1Cmd1Mask_MASK 0x00003F00L
+//TRAP2_CONTROL0
+#define TRAP2_CONTROL0__Trap2En__SHIFT 0x0
+#define TRAP2_CONTROL0__Trap2SMUIntr__SHIFT 0x3
+#define TRAP2_CONTROL0__Trap2Stage2Ptr__SHIFT 0xe
+#define TRAP2_CONTROL0__Trap2CrossTrigger__SHIFT 0x18
+#define TRAP2_CONTROL0__Trap2Stage2En__SHIFT 0x1f
+#define TRAP2_CONTROL0__Trap2En_MASK 0x00000001L
+#define TRAP2_CONTROL0__Trap2SMUIntr_MASK 0x00000008L
+#define TRAP2_CONTROL0__Trap2Stage2Ptr_MASK 0x00FFC000L
+#define TRAP2_CONTROL0__Trap2CrossTrigger_MASK 0x0F000000L
+#define TRAP2_CONTROL0__Trap2Stage2En_MASK 0x80000000L
+//TRAP2_ADDRESS_LO
+#define TRAP2_ADDRESS_LO__Trap2AddrLo__SHIFT 0x2
+#define TRAP2_ADDRESS_LO__Trap2AddrLo_MASK 0xFFFFFFFCL
+//TRAP2_ADDRESS_HI
+#define TRAP2_ADDRESS_HI__Trap2AddrHi__SHIFT 0x0
+#define TRAP2_ADDRESS_HI__Trap2AddrHi_MASK 0xFFFFFFFFL
+//TRAP2_COMMAND
+#define TRAP2_COMMAND__Trap2Cmd0__SHIFT 0x0
+#define TRAP2_COMMAND__Trap2Cmd1__SHIFT 0x8
+#define TRAP2_COMMAND__Trap2Cmd0_MASK 0x0000003FL
+#define TRAP2_COMMAND__Trap2Cmd1_MASK 0x00003F00L
+//TRAP2_ADDRESS_LO_MASK
+#define TRAP2_ADDRESS_LO_MASK__Trap2AddrLoMask__SHIFT 0x2
+#define TRAP2_ADDRESS_LO_MASK__Trap2AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP2_ADDRESS_HI_MASK
+#define TRAP2_ADDRESS_HI_MASK__Trap2AddrHiMask__SHIFT 0x0
+#define TRAP2_ADDRESS_HI_MASK__Trap2AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP2_COMMAND_MASK
+#define TRAP2_COMMAND_MASK__Trap2Cmd0Mask__SHIFT 0x0
+#define TRAP2_COMMAND_MASK__Trap2Cmd1Mask__SHIFT 0x8
+#define TRAP2_COMMAND_MASK__Trap2Cmd0Mask_MASK 0x0000003FL
+#define TRAP2_COMMAND_MASK__Trap2Cmd1Mask_MASK 0x00003F00L
+//TRAP3_CONTROL0
+#define TRAP3_CONTROL0__Trap3En__SHIFT 0x0
+#define TRAP3_CONTROL0__Trap3SMUIntr__SHIFT 0x3
+#define TRAP3_CONTROL0__Trap3Stage2Ptr__SHIFT 0xe
+#define TRAP3_CONTROL0__Trap3CrossTrigger__SHIFT 0x18
+#define TRAP3_CONTROL0__Trap3Stage2En__SHIFT 0x1f
+#define TRAP3_CONTROL0__Trap3En_MASK 0x00000001L
+#define TRAP3_CONTROL0__Trap3SMUIntr_MASK 0x00000008L
+#define TRAP3_CONTROL0__Trap3Stage2Ptr_MASK 0x00FFC000L
+#define TRAP3_CONTROL0__Trap3CrossTrigger_MASK 0x0F000000L
+#define TRAP3_CONTROL0__Trap3Stage2En_MASK 0x80000000L
+//TRAP3_ADDRESS_LO
+#define TRAP3_ADDRESS_LO__Trap3AddrLo__SHIFT 0x2
+#define TRAP3_ADDRESS_LO__Trap3AddrLo_MASK 0xFFFFFFFCL
+//TRAP3_ADDRESS_HI
+#define TRAP3_ADDRESS_HI__Trap3AddrHi__SHIFT 0x0
+#define TRAP3_ADDRESS_HI__Trap3AddrHi_MASK 0xFFFFFFFFL
+//TRAP3_COMMAND
+#define TRAP3_COMMAND__Trap3Cmd0__SHIFT 0x0
+#define TRAP3_COMMAND__Trap3Cmd1__SHIFT 0x8
+#define TRAP3_COMMAND__Trap3Cmd0_MASK 0x0000003FL
+#define TRAP3_COMMAND__Trap3Cmd1_MASK 0x00003F00L
+//TRAP3_ADDRESS_LO_MASK
+#define TRAP3_ADDRESS_LO_MASK__Trap3AddrLoMask__SHIFT 0x2
+#define TRAP3_ADDRESS_LO_MASK__Trap3AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP3_ADDRESS_HI_MASK
+#define TRAP3_ADDRESS_HI_MASK__Trap3AddrHiMask__SHIFT 0x0
+#define TRAP3_ADDRESS_HI_MASK__Trap3AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP3_COMMAND_MASK
+#define TRAP3_COMMAND_MASK__Trap3Cmd0Mask__SHIFT 0x0
+#define TRAP3_COMMAND_MASK__Trap3Cmd1Mask__SHIFT 0x8
+#define TRAP3_COMMAND_MASK__Trap3Cmd0Mask_MASK 0x0000003FL
+#define TRAP3_COMMAND_MASK__Trap3Cmd1Mask_MASK 0x00003F00L
+//TRAP4_CONTROL0
+#define TRAP4_CONTROL0__Trap4En__SHIFT 0x0
+#define TRAP4_CONTROL0__Trap4SMUIntr__SHIFT 0x3
+#define TRAP4_CONTROL0__Trap4Stage2Ptr__SHIFT 0xe
+#define TRAP4_CONTROL0__Trap4CrossTrigger__SHIFT 0x18
+#define TRAP4_CONTROL0__Trap4Stage2En__SHIFT 0x1f
+#define TRAP4_CONTROL0__Trap4En_MASK 0x00000001L
+#define TRAP4_CONTROL0__Trap4SMUIntr_MASK 0x00000008L
+#define TRAP4_CONTROL0__Trap4Stage2Ptr_MASK 0x00FFC000L
+#define TRAP4_CONTROL0__Trap4CrossTrigger_MASK 0x0F000000L
+#define TRAP4_CONTROL0__Trap4Stage2En_MASK 0x80000000L
+//TRAP4_ADDRESS_LO
+#define TRAP4_ADDRESS_LO__Trap4AddrLo__SHIFT 0x2
+#define TRAP4_ADDRESS_LO__Trap4AddrLo_MASK 0xFFFFFFFCL
+//TRAP4_ADDRESS_HI
+#define TRAP4_ADDRESS_HI__Trap4AddrHi__SHIFT 0x0
+#define TRAP4_ADDRESS_HI__Trap4AddrHi_MASK 0xFFFFFFFFL
+//TRAP4_COMMAND
+#define TRAP4_COMMAND__Trap4Cmd0__SHIFT 0x0
+#define TRAP4_COMMAND__Trap4Cmd1__SHIFT 0x8
+#define TRAP4_COMMAND__Trap4Cmd0_MASK 0x0000003FL
+#define TRAP4_COMMAND__Trap4Cmd1_MASK 0x00003F00L
+//TRAP4_ADDRESS_LO_MASK
+#define TRAP4_ADDRESS_LO_MASK__Trap4AddrLoMask__SHIFT 0x2
+#define TRAP4_ADDRESS_LO_MASK__Trap4AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP4_ADDRESS_HI_MASK
+#define TRAP4_ADDRESS_HI_MASK__Trap4AddrHiMask__SHIFT 0x0
+#define TRAP4_ADDRESS_HI_MASK__Trap4AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP4_COMMAND_MASK
+#define TRAP4_COMMAND_MASK__Trap4Cmd0Mask__SHIFT 0x0
+#define TRAP4_COMMAND_MASK__Trap4Cmd1Mask__SHIFT 0x8
+#define TRAP4_COMMAND_MASK__Trap4Cmd0Mask_MASK 0x0000003FL
+#define TRAP4_COMMAND_MASK__Trap4Cmd1Mask_MASK 0x00003F00L
+//TRAP5_CONTROL0
+#define TRAP5_CONTROL0__Trap5En__SHIFT 0x0
+#define TRAP5_CONTROL0__Trap5SMUIntr__SHIFT 0x3
+#define TRAP5_CONTROL0__Trap5Stage2Ptr__SHIFT 0xe
+#define TRAP5_CONTROL0__Trap5CrossTrigger__SHIFT 0x18
+#define TRAP5_CONTROL0__Trap5Stage2En__SHIFT 0x1f
+#define TRAP5_CONTROL0__Trap5En_MASK 0x00000001L
+#define TRAP5_CONTROL0__Trap5SMUIntr_MASK 0x00000008L
+#define TRAP5_CONTROL0__Trap5Stage2Ptr_MASK 0x00FFC000L
+#define TRAP5_CONTROL0__Trap5CrossTrigger_MASK 0x0F000000L
+#define TRAP5_CONTROL0__Trap5Stage2En_MASK 0x80000000L
+//TRAP5_ADDRESS_LO
+#define TRAP5_ADDRESS_LO__Trap5AddrLo__SHIFT 0x2
+#define TRAP5_ADDRESS_LO__Trap5AddrLo_MASK 0xFFFFFFFCL
+//TRAP5_ADDRESS_HI
+#define TRAP5_ADDRESS_HI__Trap5AddrHi__SHIFT 0x0
+#define TRAP5_ADDRESS_HI__Trap5AddrHi_MASK 0xFFFFFFFFL
+//TRAP5_COMMAND
+#define TRAP5_COMMAND__Trap5Cmd0__SHIFT 0x0
+#define TRAP5_COMMAND__Trap5Cmd1__SHIFT 0x8
+#define TRAP5_COMMAND__Trap5Cmd0_MASK 0x0000003FL
+#define TRAP5_COMMAND__Trap5Cmd1_MASK 0x00003F00L
+//TRAP5_ADDRESS_LO_MASK
+#define TRAP5_ADDRESS_LO_MASK__Trap5AddrLoMask__SHIFT 0x2
+#define TRAP5_ADDRESS_LO_MASK__Trap5AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP5_ADDRESS_HI_MASK
+#define TRAP5_ADDRESS_HI_MASK__Trap5AddrHiMask__SHIFT 0x0
+#define TRAP5_ADDRESS_HI_MASK__Trap5AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP5_COMMAND_MASK
+#define TRAP5_COMMAND_MASK__Trap5Cmd0Mask__SHIFT 0x0
+#define TRAP5_COMMAND_MASK__Trap5Cmd1Mask__SHIFT 0x8
+#define TRAP5_COMMAND_MASK__Trap5Cmd0Mask_MASK 0x0000003FL
+#define TRAP5_COMMAND_MASK__Trap5Cmd1Mask_MASK 0x00003F00L
+//TRAP6_CONTROL0
+#define TRAP6_CONTROL0__Trap6En__SHIFT 0x0
+#define TRAP6_CONTROL0__Trap6SMUIntr__SHIFT 0x3
+#define TRAP6_CONTROL0__Trap6Stage2Ptr__SHIFT 0xe
+#define TRAP6_CONTROL0__Trap6CrossTrigger__SHIFT 0x18
+#define TRAP6_CONTROL0__Trap6Stage2En__SHIFT 0x1f
+#define TRAP6_CONTROL0__Trap6En_MASK 0x00000001L
+#define TRAP6_CONTROL0__Trap6SMUIntr_MASK 0x00000008L
+#define TRAP6_CONTROL0__Trap6Stage2Ptr_MASK 0x00FFC000L
+#define TRAP6_CONTROL0__Trap6CrossTrigger_MASK 0x0F000000L
+#define TRAP6_CONTROL0__Trap6Stage2En_MASK 0x80000000L
+//TRAP6_ADDRESS_LO
+#define TRAP6_ADDRESS_LO__Trap6AddrLo__SHIFT 0x2
+#define TRAP6_ADDRESS_LO__Trap6AddrLo_MASK 0xFFFFFFFCL
+//TRAP6_ADDRESS_HI
+#define TRAP6_ADDRESS_HI__Trap6AddrHi__SHIFT 0x0
+#define TRAP6_ADDRESS_HI__Trap6AddrHi_MASK 0xFFFFFFFFL
+//TRAP6_COMMAND
+#define TRAP6_COMMAND__Trap6Cmd0__SHIFT 0x0
+#define TRAP6_COMMAND__Trap6Cmd1__SHIFT 0x8
+#define TRAP6_COMMAND__Trap6Cmd0_MASK 0x0000003FL
+#define TRAP6_COMMAND__Trap6Cmd1_MASK 0x00003F00L
+//TRAP6_ADDRESS_LO_MASK
+#define TRAP6_ADDRESS_LO_MASK__Trap6AddrLoMask__SHIFT 0x2
+#define TRAP6_ADDRESS_LO_MASK__Trap6AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP6_ADDRESS_HI_MASK
+#define TRAP6_ADDRESS_HI_MASK__Trap6AddrHiMask__SHIFT 0x0
+#define TRAP6_ADDRESS_HI_MASK__Trap6AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP6_COMMAND_MASK
+#define TRAP6_COMMAND_MASK__Trap6Cmd0Mask__SHIFT 0x0
+#define TRAP6_COMMAND_MASK__Trap6Cmd1Mask__SHIFT 0x8
+#define TRAP6_COMMAND_MASK__Trap6Cmd0Mask_MASK 0x0000003FL
+#define TRAP6_COMMAND_MASK__Trap6Cmd1Mask_MASK 0x00003F00L
+//TRAP7_CONTROL0
+#define TRAP7_CONTROL0__Trap7En__SHIFT 0x0
+#define TRAP7_CONTROL0__Trap7SMUIntr__SHIFT 0x3
+#define TRAP7_CONTROL0__Trap7Stage2Ptr__SHIFT 0xe
+#define TRAP7_CONTROL0__Trap7CrossTrigger__SHIFT 0x18
+#define TRAP7_CONTROL0__Trap7Stage2En__SHIFT 0x1f
+#define TRAP7_CONTROL0__Trap7En_MASK 0x00000001L
+#define TRAP7_CONTROL0__Trap7SMUIntr_MASK 0x00000008L
+#define TRAP7_CONTROL0__Trap7Stage2Ptr_MASK 0x00FFC000L
+#define TRAP7_CONTROL0__Trap7CrossTrigger_MASK 0x0F000000L
+#define TRAP7_CONTROL0__Trap7Stage2En_MASK 0x80000000L
+//TRAP7_ADDRESS_LO
+#define TRAP7_ADDRESS_LO__Trap7AddrLo__SHIFT 0x2
+#define TRAP7_ADDRESS_LO__Trap7AddrLo_MASK 0xFFFFFFFCL
+//TRAP7_ADDRESS_HI
+#define TRAP7_ADDRESS_HI__Trap7AddrHi__SHIFT 0x0
+#define TRAP7_ADDRESS_HI__Trap7AddrHi_MASK 0xFFFFFFFFL
+//TRAP7_COMMAND
+#define TRAP7_COMMAND__Trap7Cmd0__SHIFT 0x0
+#define TRAP7_COMMAND__Trap7Cmd1__SHIFT 0x8
+#define TRAP7_COMMAND__Trap7Cmd0_MASK 0x0000003FL
+#define TRAP7_COMMAND__Trap7Cmd1_MASK 0x00003F00L
+//TRAP7_ADDRESS_LO_MASK
+#define TRAP7_ADDRESS_LO_MASK__Trap7AddrLoMask__SHIFT 0x2
+#define TRAP7_ADDRESS_LO_MASK__Trap7AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP7_ADDRESS_HI_MASK
+#define TRAP7_ADDRESS_HI_MASK__Trap7AddrHiMask__SHIFT 0x0
+#define TRAP7_ADDRESS_HI_MASK__Trap7AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP7_COMMAND_MASK
+#define TRAP7_COMMAND_MASK__Trap7Cmd0Mask__SHIFT 0x0
+#define TRAP7_COMMAND_MASK__Trap7Cmd1Mask__SHIFT 0x8
+#define TRAP7_COMMAND_MASK__Trap7Cmd0Mask_MASK 0x0000003FL
+#define TRAP7_COMMAND_MASK__Trap7Cmd1Mask_MASK 0x00003F00L
+//TRAP8_CONTROL0
+#define TRAP8_CONTROL0__Trap8En__SHIFT 0x0
+#define TRAP8_CONTROL0__Trap8SMUIntr__SHIFT 0x3
+#define TRAP8_CONTROL0__Trap8Stage2Ptr__SHIFT 0xe
+#define TRAP8_CONTROL0__Trap8CrossTrigger__SHIFT 0x18
+#define TRAP8_CONTROL0__Trap8Stage2En__SHIFT 0x1f
+#define TRAP8_CONTROL0__Trap8En_MASK 0x00000001L
+#define TRAP8_CONTROL0__Trap8SMUIntr_MASK 0x00000008L
+#define TRAP8_CONTROL0__Trap8Stage2Ptr_MASK 0x00FFC000L
+#define TRAP8_CONTROL0__Trap8CrossTrigger_MASK 0x0F000000L
+#define TRAP8_CONTROL0__Trap8Stage2En_MASK 0x80000000L
+//TRAP8_ADDRESS_LO
+#define TRAP8_ADDRESS_LO__Trap8AddrLo__SHIFT 0x2
+#define TRAP8_ADDRESS_LO__Trap8AddrLo_MASK 0xFFFFFFFCL
+//TRAP8_ADDRESS_HI
+#define TRAP8_ADDRESS_HI__Trap8AddrHi__SHIFT 0x0
+#define TRAP8_ADDRESS_HI__Trap8AddrHi_MASK 0xFFFFFFFFL
+//TRAP8_COMMAND
+#define TRAP8_COMMAND__Trap8Cmd0__SHIFT 0x0
+#define TRAP8_COMMAND__Trap8Cmd1__SHIFT 0x8
+#define TRAP8_COMMAND__Trap8Cmd0_MASK 0x0000003FL
+#define TRAP8_COMMAND__Trap8Cmd1_MASK 0x00003F00L
+//TRAP8_ADDRESS_LO_MASK
+#define TRAP8_ADDRESS_LO_MASK__Trap8AddrLoMask__SHIFT 0x2
+#define TRAP8_ADDRESS_LO_MASK__Trap8AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP8_ADDRESS_HI_MASK
+#define TRAP8_ADDRESS_HI_MASK__Trap8AddrHiMask__SHIFT 0x0
+#define TRAP8_ADDRESS_HI_MASK__Trap8AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP8_COMMAND_MASK
+#define TRAP8_COMMAND_MASK__Trap8Cmd0Mask__SHIFT 0x0
+#define TRAP8_COMMAND_MASK__Trap8Cmd1Mask__SHIFT 0x8
+#define TRAP8_COMMAND_MASK__Trap8Cmd0Mask_MASK 0x0000003FL
+#define TRAP8_COMMAND_MASK__Trap8Cmd1Mask_MASK 0x00003F00L
+//TRAP9_CONTROL0
+#define TRAP9_CONTROL0__Trap9En__SHIFT 0x0
+#define TRAP9_CONTROL0__Trap9SMUIntr__SHIFT 0x3
+#define TRAP9_CONTROL0__Trap9Stage2Ptr__SHIFT 0xe
+#define TRAP9_CONTROL0__Trap9CrossTrigger__SHIFT 0x18
+#define TRAP9_CONTROL0__Trap9Stage2En__SHIFT 0x1f
+#define TRAP9_CONTROL0__Trap9En_MASK 0x00000001L
+#define TRAP9_CONTROL0__Trap9SMUIntr_MASK 0x00000008L
+#define TRAP9_CONTROL0__Trap9Stage2Ptr_MASK 0x00FFC000L
+#define TRAP9_CONTROL0__Trap9CrossTrigger_MASK 0x0F000000L
+#define TRAP9_CONTROL0__Trap9Stage2En_MASK 0x80000000L
+//TRAP9_ADDRESS_LO
+#define TRAP9_ADDRESS_LO__Trap9AddrLo__SHIFT 0x2
+#define TRAP9_ADDRESS_LO__Trap9AddrLo_MASK 0xFFFFFFFCL
+//TRAP9_ADDRESS_HI
+#define TRAP9_ADDRESS_HI__Trap9AddrHi__SHIFT 0x0
+#define TRAP9_ADDRESS_HI__Trap9AddrHi_MASK 0xFFFFFFFFL
+//TRAP9_COMMAND
+#define TRAP9_COMMAND__Trap9Cmd0__SHIFT 0x0
+#define TRAP9_COMMAND__Trap9Cmd1__SHIFT 0x8
+#define TRAP9_COMMAND__Trap9Cmd0_MASK 0x0000003FL
+#define TRAP9_COMMAND__Trap9Cmd1_MASK 0x00003F00L
+//TRAP9_ADDRESS_LO_MASK
+#define TRAP9_ADDRESS_LO_MASK__Trap9AddrLoMask__SHIFT 0x2
+#define TRAP9_ADDRESS_LO_MASK__Trap9AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP9_ADDRESS_HI_MASK
+#define TRAP9_ADDRESS_HI_MASK__Trap9AddrHiMask__SHIFT 0x0
+#define TRAP9_ADDRESS_HI_MASK__Trap9AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP9_COMMAND_MASK
+#define TRAP9_COMMAND_MASK__Trap9Cmd0Mask__SHIFT 0x0
+#define TRAP9_COMMAND_MASK__Trap9Cmd1Mask__SHIFT 0x8
+#define TRAP9_COMMAND_MASK__Trap9Cmd0Mask_MASK 0x0000003FL
+#define TRAP9_COMMAND_MASK__Trap9Cmd1Mask_MASK 0x00003F00L
+//TRAP10_CONTROL0
+#define TRAP10_CONTROL0__Trap10En__SHIFT 0x0
+#define TRAP10_CONTROL0__Trap10SMUIntr__SHIFT 0x3
+#define TRAP10_CONTROL0__Trap10Stage2Ptr__SHIFT 0xe
+#define TRAP10_CONTROL0__Trap10CrossTrigger__SHIFT 0x18
+#define TRAP10_CONTROL0__Trap10Stage2En__SHIFT 0x1f
+#define TRAP10_CONTROL0__Trap10En_MASK 0x00000001L
+#define TRAP10_CONTROL0__Trap10SMUIntr_MASK 0x00000008L
+#define TRAP10_CONTROL0__Trap10Stage2Ptr_MASK 0x00FFC000L
+#define TRAP10_CONTROL0__Trap10CrossTrigger_MASK 0x0F000000L
+#define TRAP10_CONTROL0__Trap10Stage2En_MASK 0x80000000L
+//TRAP10_ADDRESS_LO
+#define TRAP10_ADDRESS_LO__Trap10AddrLo__SHIFT 0x2
+#define TRAP10_ADDRESS_LO__Trap10AddrLo_MASK 0xFFFFFFFCL
+//TRAP10_ADDRESS_HI
+#define TRAP10_ADDRESS_HI__Trap10AddrHi__SHIFT 0x0
+#define TRAP10_ADDRESS_HI__Trap10AddrHi_MASK 0xFFFFFFFFL
+//TRAP10_COMMAND
+#define TRAP10_COMMAND__Trap10Cmd0__SHIFT 0x0
+#define TRAP10_COMMAND__Trap10Cmd1__SHIFT 0x8
+#define TRAP10_COMMAND__Trap10Cmd0_MASK 0x0000003FL
+#define TRAP10_COMMAND__Trap10Cmd1_MASK 0x00003F00L
+//TRAP10_ADDRESS_LO_MASK
+#define TRAP10_ADDRESS_LO_MASK__Trap10AddrLoMask__SHIFT 0x2
+#define TRAP10_ADDRESS_LO_MASK__Trap10AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP10_ADDRESS_HI_MASK
+#define TRAP10_ADDRESS_HI_MASK__Trap10AddrHiMask__SHIFT 0x0
+#define TRAP10_ADDRESS_HI_MASK__Trap10AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP10_COMMAND_MASK
+#define TRAP10_COMMAND_MASK__Trap10Cmd0Mask__SHIFT 0x0
+#define TRAP10_COMMAND_MASK__Trap10Cmd1Mask__SHIFT 0x8
+#define TRAP10_COMMAND_MASK__Trap10Cmd0Mask_MASK 0x0000003FL
+#define TRAP10_COMMAND_MASK__Trap10Cmd1Mask_MASK 0x00003F00L
+//TRAP11_CONTROL0
+#define TRAP11_CONTROL0__Trap11En__SHIFT 0x0
+#define TRAP11_CONTROL0__Trap11SMUIntr__SHIFT 0x3
+#define TRAP11_CONTROL0__Trap11Stage2Ptr__SHIFT 0xe
+#define TRAP11_CONTROL0__Trap11CrossTrigger__SHIFT 0x18
+#define TRAP11_CONTROL0__Trap11Stage2En__SHIFT 0x1f
+#define TRAP11_CONTROL0__Trap11En_MASK 0x00000001L
+#define TRAP11_CONTROL0__Trap11SMUIntr_MASK 0x00000008L
+#define TRAP11_CONTROL0__Trap11Stage2Ptr_MASK 0x00FFC000L
+#define TRAP11_CONTROL0__Trap11CrossTrigger_MASK 0x0F000000L
+#define TRAP11_CONTROL0__Trap11Stage2En_MASK 0x80000000L
+//TRAP11_ADDRESS_LO
+#define TRAP11_ADDRESS_LO__Trap11AddrLo__SHIFT 0x2
+#define TRAP11_ADDRESS_LO__Trap11AddrLo_MASK 0xFFFFFFFCL
+//TRAP11_ADDRESS_HI
+#define TRAP11_ADDRESS_HI__Trap11AddrHi__SHIFT 0x0
+#define TRAP11_ADDRESS_HI__Trap11AddrHi_MASK 0xFFFFFFFFL
+//TRAP11_COMMAND
+#define TRAP11_COMMAND__Trap11Cmd0__SHIFT 0x0
+#define TRAP11_COMMAND__Trap11Cmd1__SHIFT 0x8
+#define TRAP11_COMMAND__Trap11Cmd0_MASK 0x0000003FL
+#define TRAP11_COMMAND__Trap11Cmd1_MASK 0x00003F00L
+//TRAP11_ADDRESS_LO_MASK
+#define TRAP11_ADDRESS_LO_MASK__Trap11AddrLoMask__SHIFT 0x2
+#define TRAP11_ADDRESS_LO_MASK__Trap11AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP11_ADDRESS_HI_MASK
+#define TRAP11_ADDRESS_HI_MASK__Trap11AddrHiMask__SHIFT 0x0
+#define TRAP11_ADDRESS_HI_MASK__Trap11AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP11_COMMAND_MASK
+#define TRAP11_COMMAND_MASK__Trap11Cmd0Mask__SHIFT 0x0
+#define TRAP11_COMMAND_MASK__Trap11Cmd1Mask__SHIFT 0x8
+#define TRAP11_COMMAND_MASK__Trap11Cmd0Mask_MASK 0x0000003FL
+#define TRAP11_COMMAND_MASK__Trap11Cmd1Mask_MASK 0x00003F00L
+//TRAP12_CONTROL0
+#define TRAP12_CONTROL0__Trap12En__SHIFT 0x0
+#define TRAP12_CONTROL0__Trap12SMUIntr__SHIFT 0x3
+#define TRAP12_CONTROL0__Trap12Stage2Ptr__SHIFT 0xe
+#define TRAP12_CONTROL0__Trap12CrossTrigger__SHIFT 0x18
+#define TRAP12_CONTROL0__Trap12Stage2En__SHIFT 0x1f
+#define TRAP12_CONTROL0__Trap12En_MASK 0x00000001L
+#define TRAP12_CONTROL0__Trap12SMUIntr_MASK 0x00000008L
+#define TRAP12_CONTROL0__Trap12Stage2Ptr_MASK 0x00FFC000L
+#define TRAP12_CONTROL0__Trap12CrossTrigger_MASK 0x0F000000L
+#define TRAP12_CONTROL0__Trap12Stage2En_MASK 0x80000000L
+//TRAP12_ADDRESS_LO
+#define TRAP12_ADDRESS_LO__Trap12AddrLo__SHIFT 0x2
+#define TRAP12_ADDRESS_LO__Trap12AddrLo_MASK 0xFFFFFFFCL
+//TRAP12_ADDRESS_HI
+#define TRAP12_ADDRESS_HI__Trap12AddrHi__SHIFT 0x0
+#define TRAP12_ADDRESS_HI__Trap12AddrHi_MASK 0xFFFFFFFFL
+//TRAP12_COMMAND
+#define TRAP12_COMMAND__Trap12Cmd0__SHIFT 0x0
+#define TRAP12_COMMAND__Trap12Cmd1__SHIFT 0x8
+#define TRAP12_COMMAND__Trap12Cmd0_MASK 0x0000003FL
+#define TRAP12_COMMAND__Trap12Cmd1_MASK 0x00003F00L
+//TRAP12_ADDRESS_LO_MASK
+#define TRAP12_ADDRESS_LO_MASK__Trap12AddrLoMask__SHIFT 0x2
+#define TRAP12_ADDRESS_LO_MASK__Trap12AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP12_ADDRESS_HI_MASK
+#define TRAP12_ADDRESS_HI_MASK__Trap12AddrHiMask__SHIFT 0x0
+#define TRAP12_ADDRESS_HI_MASK__Trap12AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP12_COMMAND_MASK
+#define TRAP12_COMMAND_MASK__Trap12Cmd0Mask__SHIFT 0x0
+#define TRAP12_COMMAND_MASK__Trap12Cmd1Mask__SHIFT 0x8
+#define TRAP12_COMMAND_MASK__Trap12Cmd0Mask_MASK 0x0000003FL
+#define TRAP12_COMMAND_MASK__Trap12Cmd1Mask_MASK 0x00003F00L
+//TRAP13_CONTROL0
+#define TRAP13_CONTROL0__Trap13En__SHIFT 0x0
+#define TRAP13_CONTROL0__Trap13SMUIntr__SHIFT 0x3
+#define TRAP13_CONTROL0__Trap13Stage2Ptr__SHIFT 0xe
+#define TRAP13_CONTROL0__Trap13CrossTrigger__SHIFT 0x18
+#define TRAP13_CONTROL0__Trap13Stage2En__SHIFT 0x1f
+#define TRAP13_CONTROL0__Trap13En_MASK 0x00000001L
+#define TRAP13_CONTROL0__Trap13SMUIntr_MASK 0x00000008L
+#define TRAP13_CONTROL0__Trap13Stage2Ptr_MASK 0x00FFC000L
+#define TRAP13_CONTROL0__Trap13CrossTrigger_MASK 0x0F000000L
+#define TRAP13_CONTROL0__Trap13Stage2En_MASK 0x80000000L
+//TRAP13_ADDRESS_LO
+#define TRAP13_ADDRESS_LO__Trap13AddrLo__SHIFT 0x2
+#define TRAP13_ADDRESS_LO__Trap13AddrLo_MASK 0xFFFFFFFCL
+//TRAP13_ADDRESS_HI
+#define TRAP13_ADDRESS_HI__Trap13AddrHi__SHIFT 0x0
+#define TRAP13_ADDRESS_HI__Trap13AddrHi_MASK 0xFFFFFFFFL
+//TRAP13_COMMAND
+#define TRAP13_COMMAND__Trap13Cmd0__SHIFT 0x0
+#define TRAP13_COMMAND__Trap13Cmd1__SHIFT 0x8
+#define TRAP13_COMMAND__Trap13Cmd0_MASK 0x0000003FL
+#define TRAP13_COMMAND__Trap13Cmd1_MASK 0x00003F00L
+//TRAP13_ADDRESS_LO_MASK
+#define TRAP13_ADDRESS_LO_MASK__Trap13AddrLoMask__SHIFT 0x2
+#define TRAP13_ADDRESS_LO_MASK__Trap13AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP13_ADDRESS_HI_MASK
+#define TRAP13_ADDRESS_HI_MASK__Trap13AddrHiMask__SHIFT 0x0
+#define TRAP13_ADDRESS_HI_MASK__Trap13AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP13_COMMAND_MASK
+#define TRAP13_COMMAND_MASK__Trap13Cmd0Mask__SHIFT 0x0
+#define TRAP13_COMMAND_MASK__Trap13Cmd1Mask__SHIFT 0x8
+#define TRAP13_COMMAND_MASK__Trap13Cmd0Mask_MASK 0x0000003FL
+#define TRAP13_COMMAND_MASK__Trap13Cmd1Mask_MASK 0x00003F00L
+//TRAP14_CONTROL0
+#define TRAP14_CONTROL0__Trap14En__SHIFT 0x0
+#define TRAP14_CONTROL0__Trap14SMUIntr__SHIFT 0x3
+#define TRAP14_CONTROL0__Trap14Stage2Ptr__SHIFT 0xe
+#define TRAP14_CONTROL0__Trap14CrossTrigger__SHIFT 0x18
+#define TRAP14_CONTROL0__Trap14Stage2En__SHIFT 0x1f
+#define TRAP14_CONTROL0__Trap14En_MASK 0x00000001L
+#define TRAP14_CONTROL0__Trap14SMUIntr_MASK 0x00000008L
+#define TRAP14_CONTROL0__Trap14Stage2Ptr_MASK 0x00FFC000L
+#define TRAP14_CONTROL0__Trap14CrossTrigger_MASK 0x0F000000L
+#define TRAP14_CONTROL0__Trap14Stage2En_MASK 0x80000000L
+//TRAP14_ADDRESS_LO
+#define TRAP14_ADDRESS_LO__Trap14AddrLo__SHIFT 0x2
+#define TRAP14_ADDRESS_LO__Trap14AddrLo_MASK 0xFFFFFFFCL
+//TRAP14_ADDRESS_HI
+#define TRAP14_ADDRESS_HI__Trap14AddrHi__SHIFT 0x0
+#define TRAP14_ADDRESS_HI__Trap14AddrHi_MASK 0xFFFFFFFFL
+//TRAP14_COMMAND
+#define TRAP14_COMMAND__Trap14Cmd0__SHIFT 0x0
+#define TRAP14_COMMAND__Trap14Cmd1__SHIFT 0x8
+#define TRAP14_COMMAND__Trap14Cmd0_MASK 0x0000003FL
+#define TRAP14_COMMAND__Trap14Cmd1_MASK 0x00003F00L
+//TRAP14_ADDRESS_LO_MASK
+#define TRAP14_ADDRESS_LO_MASK__Trap14AddrLoMask__SHIFT 0x2
+#define TRAP14_ADDRESS_LO_MASK__Trap14AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP14_ADDRESS_HI_MASK
+#define TRAP14_ADDRESS_HI_MASK__Trap14AddrHiMask__SHIFT 0x0
+#define TRAP14_ADDRESS_HI_MASK__Trap14AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP14_COMMAND_MASK
+#define TRAP14_COMMAND_MASK__Trap14Cmd0Mask__SHIFT 0x0
+#define TRAP14_COMMAND_MASK__Trap14Cmd1Mask__SHIFT 0x8
+#define TRAP14_COMMAND_MASK__Trap14Cmd0Mask_MASK 0x0000003FL
+#define TRAP14_COMMAND_MASK__Trap14Cmd1Mask_MASK 0x00003F00L
+//TRAP15_CONTROL0
+#define TRAP15_CONTROL0__Trap15En__SHIFT 0x0
+#define TRAP15_CONTROL0__Trap15SMUIntr__SHIFT 0x3
+#define TRAP15_CONTROL0__Trap15Stage2Ptr__SHIFT 0xe
+#define TRAP15_CONTROL0__Trap15CrossTrigger__SHIFT 0x18
+#define TRAP15_CONTROL0__Trap15Stage2En__SHIFT 0x1f
+#define TRAP15_CONTROL0__Trap15En_MASK 0x00000001L
+#define TRAP15_CONTROL0__Trap15SMUIntr_MASK 0x00000008L
+#define TRAP15_CONTROL0__Trap15Stage2Ptr_MASK 0x00FFC000L
+#define TRAP15_CONTROL0__Trap15CrossTrigger_MASK 0x0F000000L
+#define TRAP15_CONTROL0__Trap15Stage2En_MASK 0x80000000L
+//TRAP15_ADDRESS_LO
+#define TRAP15_ADDRESS_LO__Trap15AddrLo__SHIFT 0x2
+#define TRAP15_ADDRESS_LO__Trap15AddrLo_MASK 0xFFFFFFFCL
+//TRAP15_ADDRESS_HI
+#define TRAP15_ADDRESS_HI__Trap15AddrHi__SHIFT 0x0
+#define TRAP15_ADDRESS_HI__Trap15AddrHi_MASK 0xFFFFFFFFL
+//TRAP15_COMMAND
+#define TRAP15_COMMAND__Trap15Cmd0__SHIFT 0x0
+#define TRAP15_COMMAND__Trap15Cmd1__SHIFT 0x8
+#define TRAP15_COMMAND__Trap15Cmd0_MASK 0x0000003FL
+#define TRAP15_COMMAND__Trap15Cmd1_MASK 0x00003F00L
+//TRAP15_ADDRESS_LO_MASK
+#define TRAP15_ADDRESS_LO_MASK__Trap15AddrLoMask__SHIFT 0x2
+#define TRAP15_ADDRESS_LO_MASK__Trap15AddrLoMask_MASK 0xFFFFFFFCL
+//TRAP15_ADDRESS_HI_MASK
+#define TRAP15_ADDRESS_HI_MASK__Trap15AddrHiMask__SHIFT 0x0
+#define TRAP15_ADDRESS_HI_MASK__Trap15AddrHiMask_MASK 0xFFFFFFFFL
+//TRAP15_COMMAND_MASK
+#define TRAP15_COMMAND_MASK__Trap15Cmd0Mask__SHIFT 0x0
+#define TRAP15_COMMAND_MASK__Trap15Cmd1Mask__SHIFT 0x8
+#define TRAP15_COMMAND_MASK__Trap15Cmd0Mask_MASK 0x0000003FL
+#define TRAP15_COMMAND_MASK__Trap15Cmd1Mask_MASK 0x00003F00L
+//SB_COMMAND
+#define SB_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define SB_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define SB_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define SB_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define SB_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define SB_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+//SB_SUB_BUS_NUMBER_LATENCY
+#define SB_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
+#define SB_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
+#define SB_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
+#define SB_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
+//SB_IO_BASE_LIMIT
+#define SB_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
+#define SB_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
+#define SB_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
+#define SB_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
+//SB_MEM_BASE_LIMIT
+#define SB_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
+#define SB_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
+#define SB_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
+#define SB_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
+//SB_PREF_BASE_LIMIT
+#define SB_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
+#define SB_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
+#define SB_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
+#define SB_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
+//SB_PREF_BASE_UPPER
+#define SB_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
+#define SB_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
+//SB_PREF_LIMIT_UPPER
+#define SB_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
+#define SB_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
+//SB_IO_BASE_LIMIT_HI
+#define SB_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
+#define SB_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
+#define SB_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
+#define SB_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
+//SB_IRQ_BRIDGE_CNTL
+#define SB_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
+#define SB_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
+#define SB_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
+#define SB_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
+#define SB_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
+#define SB_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
+//SB_EXT_BRIDGE_CNTL
+#define SB_EXT_BRIDGE_CNTL__IO_PORT_80_EN__SHIFT 0x0
+#define SB_EXT_BRIDGE_CNTL__IO_PORT_80_EN_MASK 0x01L
+//SB_PMI_STATUS_CNTL
+#define SB_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define SB_PMI_STATUS_CNTL__POWER_STATE_MASK 0x03L
+//SB_SLOT_CAP
+#define SB_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7
+#define SB_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf
+#define SB_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L
+#define SB_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L
+//SB_ROOT_CNTL
+#define SB_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN__SHIFT 0x4
+#define SB_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN_MASK 0x0010L
+//SB_DEVICE_CNTL2
+#define SB_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define SB_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+//MCA_SMN_INT_REQ_ADDR
+#define MCA_SMN_INT_REQ_ADDR__SMN_INT_REQ_ADDR__SHIFT 0x0
+#define MCA_SMN_INT_REQ_ADDR__SMN_INT_REQ_ADDR_MASK 0x000FFFFFL
+//MCA_SMN_INT_MCM_ADDR
+#define MCA_SMN_INT_MCM_ADDR__SMN_INT_MCM_ADDR__SHIFT 0x0
+#define MCA_SMN_INT_MCM_ADDR__SMN_INT_MCM_ADDR_MASK 0x000000FFL
+//MCA_SMN_INT_APERTUREID
+#define MCA_SMN_INT_APERTUREID__SMN_INT_APERTUREID__SHIFT 0x0
+#define MCA_SMN_INT_APERTUREID__SMN_INT_APERTUREID_MASK 0x00000FFFL
+//MCA_SMN_INT_CONTROL
+#define MCA_SMN_INT_CONTROL__MCACrossTrigger__SHIFT 0x0
+#define MCA_SMN_INT_CONTROL__MCACrossTrigger_MASK 0x0000000FL
+
+
+// addressBlock: aid_nbio_iohub_nb_rascfg_ras_cfgdec
+//PARITY_CONTROL_0
+#define PARITY_CONTROL_0__ParityCorrThreshold__SHIFT 0x0
+#define PARITY_CONTROL_0__ParityUCPThreshold__SHIFT 0x10
+#define PARITY_CONTROL_0__ParityCorrThreshold_MASK 0x0000FFFFL
+#define PARITY_CONTROL_0__ParityUCPThreshold_MASK 0xFFFF0000L
+//PARITY_CONTROL_1
+#define PARITY_CONTROL_1__ParityErrGenGroupSel__SHIFT 0x0
+#define PARITY_CONTROL_1__ParityErrGenGroupTypeSel__SHIFT 0x8
+#define PARITY_CONTROL_1__ParityErrGenIdSel__SHIFT 0xb
+#define PARITY_CONTROL_1__ParityErrGenCmd__SHIFT 0x10
+#define PARITY_CONTROL_1__ParityErrGenTrigger__SHIFT 0x1e
+#define PARITY_CONTROL_1__ParityErrGenInjectAllow__SHIFT 0x1f
+#define PARITY_CONTROL_1__ParityErrGenGroupSel_MASK 0x000000FFL
+#define PARITY_CONTROL_1__ParityErrGenGroupTypeSel_MASK 0x00000100L
+#define PARITY_CONTROL_1__ParityErrGenIdSel_MASK 0x0000F800L
+#define PARITY_CONTROL_1__ParityErrGenCmd_MASK 0x000F0000L
+#define PARITY_CONTROL_1__ParityErrGenTrigger_MASK 0x40000000L
+#define PARITY_CONTROL_1__ParityErrGenInjectAllow_MASK 0x80000000L
+//PARITY_SEVERITY_CONTROL_UNCORR_0
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp0__SHIFT 0x0
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp1__SHIFT 0x2
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp2__SHIFT 0x4
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp3__SHIFT 0x6
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp4__SHIFT 0x8
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp5__SHIFT 0xa
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp6__SHIFT 0xc
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp7__SHIFT 0xe
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp8__SHIFT 0x10
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp9__SHIFT 0x12
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp10__SHIFT 0x14
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp11__SHIFT 0x16
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp12__SHIFT 0x18
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp13__SHIFT 0x1a
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp14__SHIFT 0x1c
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp15__SHIFT 0x1e
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp0_MASK 0x00000003L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp1_MASK 0x0000000CL
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp2_MASK 0x00000030L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp3_MASK 0x000000C0L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp4_MASK 0x00000300L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp5_MASK 0x00000C00L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp6_MASK 0x00003000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp7_MASK 0x0000C000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp8_MASK 0x00030000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp9_MASK 0x000C0000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp10_MASK 0x00300000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp11_MASK 0x00C00000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp12_MASK 0x03000000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp13_MASK 0x0C000000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp14_MASK 0x30000000L
+#define PARITY_SEVERITY_CONTROL_UNCORR_0__ParityErrSevUnCorrGrp15_MASK 0xC0000000L
+//PARITY_SEVERITY_CONTROL_CORR_0
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp0__SHIFT 0x0
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp1__SHIFT 0x2
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp2__SHIFT 0x4
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp3__SHIFT 0x6
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp4__SHIFT 0x8
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp5__SHIFT 0xa
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp6__SHIFT 0xc
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp7__SHIFT 0xe
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp8__SHIFT 0x10
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp9__SHIFT 0x12
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp10__SHIFT 0x14
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp11__SHIFT 0x16
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp12__SHIFT 0x18
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp13__SHIFT 0x1a
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp14__SHIFT 0x1c
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp15__SHIFT 0x1e
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp0_MASK 0x00000003L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp1_MASK 0x0000000CL
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp2_MASK 0x00000030L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp3_MASK 0x000000C0L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp4_MASK 0x00000300L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp5_MASK 0x00000C00L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp6_MASK 0x00003000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp7_MASK 0x0000C000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp8_MASK 0x00030000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp9_MASK 0x000C0000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp10_MASK 0x00300000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp11_MASK 0x00C00000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp12_MASK 0x03000000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp13_MASK 0x0C000000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp14_MASK 0x30000000L
+#define PARITY_SEVERITY_CONTROL_CORR_0__ParityErrSevCorrGrp15_MASK 0xC0000000L
+//PARITY_SEVERITY_CONTROL_UCP_0
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp0__SHIFT 0x0
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp1__SHIFT 0x2
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp2__SHIFT 0x4
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp3__SHIFT 0x6
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp4__SHIFT 0x8
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp5__SHIFT 0xa
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp6__SHIFT 0xc
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp7__SHIFT 0xe
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp8__SHIFT 0x10
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp9__SHIFT 0x12
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp10__SHIFT 0x14
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp11__SHIFT 0x16
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp12__SHIFT 0x18
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp0_MASK 0x00000003L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp1_MASK 0x0000000CL
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp2_MASK 0x00000030L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp3_MASK 0x000000C0L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp4_MASK 0x00000300L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp5_MASK 0x00000C00L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp6_MASK 0x00003000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp7_MASK 0x0000C000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp8_MASK 0x00030000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp9_MASK 0x000C0000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp10_MASK 0x00300000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp11_MASK 0x00C00000L
+#define PARITY_SEVERITY_CONTROL_UCP_0__ParityErrSevUCPGrp12_MASK 0x03000000L
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortBErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortCErr__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortDErr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortEErr__SHIFT 0x4
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortFErr__SHIFT 0x5
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortGErr__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortHErr__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortIErr__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortAErr__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortBErr__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortCErr__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortDErr__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_HI__NBIF1PortAErr__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortBErr_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortCErr_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortDErr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortEErr_MASK 0x00000010L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortFErr_MASK 0x00000020L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortGErr_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortHErr_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortIErr_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortAErr_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortBErr_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortCErr_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_HI__PCIE1PortDErr_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_HI__NBIF1PortAErr_MASK 0x00002000L
+//PARITY_ERROR_STATUS_UNCORR_GRP0
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP0__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP1
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP1__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP2
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP2__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP3
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP3__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP4
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP4__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP5
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP5__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP6
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP6__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP7
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP7__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP10
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP10__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP11
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP11__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP12
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP12__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP13
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP13__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP14
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP14__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP15
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP15__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UNCORR_GRP16
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UNCORR_GRP16__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP0
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP0__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP1
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP1__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP2
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP2__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP3
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP3__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP4
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP4__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP5
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP5__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP6
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP6__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP7
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP7__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP10
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP10__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP11
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP11__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP12
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP12__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP13
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP13__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP14
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP14__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP15
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP15__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP16
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP16__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_CORR_GRP17
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_CORR_GRP17__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP0
+#define PARITY_COUNTER_CORR_GRP0__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP0__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP0__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP0__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP1
+#define PARITY_COUNTER_CORR_GRP1__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP1__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP1__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP1__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP2
+#define PARITY_COUNTER_CORR_GRP2__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP2__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP2__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP2__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP3
+#define PARITY_COUNTER_CORR_GRP3__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP3__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP3__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP3__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP4
+#define PARITY_COUNTER_CORR_GRP4__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP4__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP4__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP4__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP5
+#define PARITY_COUNTER_CORR_GRP5__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP5__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP5__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP5__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP6
+#define PARITY_COUNTER_CORR_GRP6__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP6__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP6__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP6__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP7
+#define PARITY_COUNTER_CORR_GRP7__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP7__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP7__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP7__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP10
+#define PARITY_COUNTER_CORR_GRP10__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP10__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP10__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP10__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP11
+#define PARITY_COUNTER_CORR_GRP11__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP11__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP11__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP11__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP12
+#define PARITY_COUNTER_CORR_GRP12__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP12__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP12__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP12__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP13
+#define PARITY_COUNTER_CORR_GRP13__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP13__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP13__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP13__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP14
+#define PARITY_COUNTER_CORR_GRP14__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP14__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP14__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP14__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP15
+#define PARITY_COUNTER_CORR_GRP15__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP15__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP15__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP15__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP16
+#define PARITY_COUNTER_CORR_GRP16__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP16__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP16__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP16__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_CORR_GRP17
+#define PARITY_COUNTER_CORR_GRP17__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_CORR_GRP17__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_CORR_GRP17__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_CORR_GRP17__ResetEn_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP0
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP0__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP1
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP1__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP2
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP2__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP3
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP3__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP4
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP4__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP5
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP5__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP6
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP6__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP7
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP7__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP10
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP10__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP11
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP11__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_ERROR_STATUS_UCP_GRP12
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id0__SHIFT 0x0
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id1__SHIFT 0x1
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id2__SHIFT 0x2
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id3__SHIFT 0x3
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id4__SHIFT 0x4
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id5__SHIFT 0x5
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id6__SHIFT 0x6
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id7__SHIFT 0x7
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id8__SHIFT 0x8
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id9__SHIFT 0x9
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id10__SHIFT 0xa
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id11__SHIFT 0xb
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id12__SHIFT 0xc
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id13__SHIFT 0xd
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id14__SHIFT 0xe
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id15__SHIFT 0xf
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id16__SHIFT 0x10
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id17__SHIFT 0x11
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id18__SHIFT 0x12
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id19__SHIFT 0x13
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id20__SHIFT 0x14
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id21__SHIFT 0x15
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id22__SHIFT 0x16
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id23__SHIFT 0x17
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id24__SHIFT 0x18
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id25__SHIFT 0x19
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id26__SHIFT 0x1a
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id27__SHIFT 0x1b
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id28__SHIFT 0x1c
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id29__SHIFT 0x1d
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id30__SHIFT 0x1e
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id31__SHIFT 0x1f
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id0_MASK 0x00000001L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id1_MASK 0x00000002L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id2_MASK 0x00000004L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id3_MASK 0x00000008L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id4_MASK 0x00000010L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id5_MASK 0x00000020L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id6_MASK 0x00000040L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id7_MASK 0x00000080L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id8_MASK 0x00000100L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id9_MASK 0x00000200L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id10_MASK 0x00000400L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id11_MASK 0x00000800L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id12_MASK 0x00001000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id13_MASK 0x00002000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id14_MASK 0x00004000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id15_MASK 0x00008000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id16_MASK 0x00010000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id17_MASK 0x00020000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id18_MASK 0x00040000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id19_MASK 0x00080000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id20_MASK 0x00100000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id21_MASK 0x00200000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id22_MASK 0x00400000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id23_MASK 0x00800000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id24_MASK 0x01000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id25_MASK 0x02000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id26_MASK 0x04000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id27_MASK 0x08000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id28_MASK 0x10000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id29_MASK 0x20000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id30_MASK 0x40000000L
+#define PARITY_ERROR_STATUS_UCP_GRP12__ParityErrDetected_Id31_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP0
+#define PARITY_COUNTER_UCP_GRP0__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP0__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP0__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP0__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP1
+#define PARITY_COUNTER_UCP_GRP1__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP1__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP1__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP1__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP2
+#define PARITY_COUNTER_UCP_GRP2__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP2__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP2__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP2__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP3
+#define PARITY_COUNTER_UCP_GRP3__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP3__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP3__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP3__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP4
+#define PARITY_COUNTER_UCP_GRP4__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP4__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP4__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP4__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP5
+#define PARITY_COUNTER_UCP_GRP5__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP5__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP5__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP5__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP6
+#define PARITY_COUNTER_UCP_GRP6__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP6__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP6__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP6__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP7
+#define PARITY_COUNTER_UCP_GRP7__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP7__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP7__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP7__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP10
+#define PARITY_COUNTER_UCP_GRP10__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP10__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP10__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP10__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP11
+#define PARITY_COUNTER_UCP_GRP11__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP11__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP11__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP11__ResetEn_MASK 0x80000000L
+//PARITY_COUNTER_UCP_GRP12
+#define PARITY_COUNTER_UCP_GRP12__ThresholdCounter__SHIFT 0x0
+#define PARITY_COUNTER_UCP_GRP12__ResetEn__SHIFT 0x1f
+#define PARITY_COUNTER_UCP_GRP12__ThresholdCounter_MASK 0x0000FFFFL
+#define PARITY_COUNTER_UCP_GRP12__ResetEn_MASK 0x80000000L
+//MISC_SEVERITY_CONTROL
+#define MISC_SEVERITY_CONTROL__ErrEventErrSev__SHIFT 0x4
+#define MISC_SEVERITY_CONTROL__PcieParityErrSev__SHIFT 0x6
+#define MISC_SEVERITY_CONTROL__ErrEventErrSev_MASK 0x00000030L
+#define MISC_SEVERITY_CONTROL__PcieParityErrSev_MASK 0x000000C0L
+//MISC_RAS_CONTROL
+#define MISC_RAS_CONTROL__PIN_NMI_SyncFlood_En__SHIFT 0x2
+#define MISC_RAS_CONTROL__GNB_SB_LinkNeverDis__SHIFT 0x3
+#define MISC_RAS_CONTROL__InterruptOutputDis__SHIFT 0x9
+#define MISC_RAS_CONTROL__LinkDisOutputDis__SHIFT 0xa
+#define MISC_RAS_CONTROL__SyncFldOutputDis__SHIFT 0xb
+#define MISC_RAS_CONTROL__PCIe_NMI_En__SHIFT 0xc
+#define MISC_RAS_CONTROL__PCIe_SCI_En__SHIFT 0xd
+#define MISC_RAS_CONTROL__PCIe_SMI_En__SHIFT 0xe
+#define MISC_RAS_CONTROL__SW_SCI_En__SHIFT 0xf
+#define MISC_RAS_CONTROL__SW_SMI_En__SHIFT 0x10
+#define MISC_RAS_CONTROL__SW_NMI_En__SHIFT 0x11
+#define MISC_RAS_CONTROL__PIN_NMI_SyncFlood_En_MASK 0x00000004L
+#define MISC_RAS_CONTROL__GNB_SB_LinkNeverDis_MASK 0x00000008L
+#define MISC_RAS_CONTROL__InterruptOutputDis_MASK 0x00000200L
+#define MISC_RAS_CONTROL__LinkDisOutputDis_MASK 0x00000400L
+#define MISC_RAS_CONTROL__SyncFldOutputDis_MASK 0x00000800L
+#define MISC_RAS_CONTROL__PCIe_NMI_En_MASK 0x00001000L
+#define MISC_RAS_CONTROL__PCIe_SCI_En_MASK 0x00002000L
+#define MISC_RAS_CONTROL__PCIe_SMI_En_MASK 0x00004000L
+#define MISC_RAS_CONTROL__SW_SCI_En_MASK 0x00008000L
+#define MISC_RAS_CONTROL__SW_SMI_En_MASK 0x00010000L
+#define MISC_RAS_CONTROL__SW_NMI_En_MASK 0x00020000L
+//RAS_SCRATCH_0
+#define RAS_SCRATCH_0__SCRATCH_0__SHIFT 0x0
+#define RAS_SCRATCH_0__SCRATCH_0_MASK 0xFFFFFFFFL
+//RAS_SCRATCH_1
+#define RAS_SCRATCH_1__SCRATCH_1__SHIFT 0x0
+#define RAS_SCRATCH_1__SCRATCH_1_MASK 0xFFFFFFFFL
+//ErrEvent_ACTION_CONTROL
+#define ErrEvent_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define ErrEvent_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define ErrEvent_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define ErrEvent_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define ErrEvent_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define ErrEvent_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define ErrEvent_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define ErrEvent_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//ParitySerr_ACTION_CONTROL
+#define ParitySerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define ParitySerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define ParitySerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define ParitySerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define ParitySerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define ParitySerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define ParitySerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define ParitySerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//ParityFatal_ACTION_CONTROL
+#define ParityFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define ParityFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define ParityFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define ParityFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define ParityFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define ParityFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define ParityFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define ParityFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//ParityNonFatal_ACTION_CONTROL
+#define ParityNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define ParityNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define ParityNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define ParityNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define ParityNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define ParityNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define ParityNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define ParityNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//ParityCorr_ACTION_CONTROL
+#define ParityCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define ParityCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define ParityCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define ParityCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define ParityCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define ParityCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define ParityCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define ParityCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortASerr_ACTION_CONTROL
+#define PCIE0PortASerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortASerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortASerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortASerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortASerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortASerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortASerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortASerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAIntFatal_ACTION_CONTROL
+#define PCIE0PortAIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAIntNonFatal_ACTION_CONTROL
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAIntCorr_ACTION_CONTROL
+#define PCIE0PortAIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAExtFatal_ACTION_CONTROL
+#define PCIE0PortAExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAExtNonFatal_ACTION_CONTROL
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAExtCorr_ACTION_CONTROL
+#define PCIE0PortAExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortAParityErr_ACTION_CONTROL
+#define PCIE0PortAParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortAParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortAParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortAParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortAParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortAParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortAParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortAParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBSerr_ACTION_CONTROL
+#define PCIE0PortBSerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBSerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBSerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBSerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBSerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBSerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBSerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBSerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBIntFatal_ACTION_CONTROL
+#define PCIE0PortBIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBIntNonFatal_ACTION_CONTROL
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBIntCorr_ACTION_CONTROL
+#define PCIE0PortBIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBExtFatal_ACTION_CONTROL
+#define PCIE0PortBExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBExtNonFatal_ACTION_CONTROL
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBExtCorr_ACTION_CONTROL
+#define PCIE0PortBExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortBParityErr_ACTION_CONTROL
+#define PCIE0PortBParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortBParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortBParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortBParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortBParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortBParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortBParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortBParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCSerr_ACTION_CONTROL
+#define PCIE0PortCSerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCSerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCSerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCSerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCSerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCSerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCSerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCSerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCIntFatal_ACTION_CONTROL
+#define PCIE0PortCIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCIntNonFatal_ACTION_CONTROL
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCIntCorr_ACTION_CONTROL
+#define PCIE0PortCIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCExtFatal_ACTION_CONTROL
+#define PCIE0PortCExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCExtNonFatal_ACTION_CONTROL
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCExtCorr_ACTION_CONTROL
+#define PCIE0PortCExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortCParityErr_ACTION_CONTROL
+#define PCIE0PortCParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortCParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortCParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortCParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortCParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortCParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortCParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortCParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDSerr_ACTION_CONTROL
+#define PCIE0PortDSerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDSerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDSerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDSerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDSerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDSerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDSerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDSerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDIntFatal_ACTION_CONTROL
+#define PCIE0PortDIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDIntNonFatal_ACTION_CONTROL
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDIntCorr_ACTION_CONTROL
+#define PCIE0PortDIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDExtFatal_ACTION_CONTROL
+#define PCIE0PortDExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDExtNonFatal_ACTION_CONTROL
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDExtCorr_ACTION_CONTROL
+#define PCIE0PortDExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortDParityErr_ACTION_CONTROL
+#define PCIE0PortDParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortDParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortDParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortDParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortDParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortDParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortDParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortDParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortESerr_ACTION_CONTROL
+#define PCIE0PortESerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortESerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortESerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortESerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortESerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortESerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortESerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortESerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEIntFatal_ACTION_CONTROL
+#define PCIE0PortEIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEIntNonFatal_ACTION_CONTROL
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEIntCorr_ACTION_CONTROL
+#define PCIE0PortEIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEExtFatal_ACTION_CONTROL
+#define PCIE0PortEExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEExtNonFatal_ACTION_CONTROL
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEExtCorr_ACTION_CONTROL
+#define PCIE0PortEExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortEParityErr_ACTION_CONTROL
+#define PCIE0PortEParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortEParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortEParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortEParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortEParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortEParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortEParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortEParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFSerr_ACTION_CONTROL
+#define PCIE0PortFSerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFSerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFSerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFSerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFSerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFSerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFSerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFSerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFIntFatal_ACTION_CONTROL
+#define PCIE0PortFIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFIntNonFatal_ACTION_CONTROL
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFIntCorr_ACTION_CONTROL
+#define PCIE0PortFIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFExtFatal_ACTION_CONTROL
+#define PCIE0PortFExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFExtNonFatal_ACTION_CONTROL
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFExtCorr_ACTION_CONTROL
+#define PCIE0PortFExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortFParityErr_ACTION_CONTROL
+#define PCIE0PortFParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortFParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortFParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortFParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortFParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortFParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortFParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortFParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGSerr_ACTION_CONTROL
+#define PCIE0PortGSerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGSerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGSerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGSerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGSerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGSerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGSerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGSerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGIntFatal_ACTION_CONTROL
+#define PCIE0PortGIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGIntNonFatal_ACTION_CONTROL
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGIntCorr_ACTION_CONTROL
+#define PCIE0PortGIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGExtFatal_ACTION_CONTROL
+#define PCIE0PortGExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGExtNonFatal_ACTION_CONTROL
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGExtCorr_ACTION_CONTROL
+#define PCIE0PortGExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//PCIE0PortGParityErr_ACTION_CONTROL
+#define PCIE0PortGParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define PCIE0PortGParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define PCIE0PortGParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define PCIE0PortGParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define PCIE0PortGParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define PCIE0PortGParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define PCIE0PortGParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define PCIE0PortGParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortASerr_ACTION_CONTROL
+#define NBIF1PortASerr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortASerr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortASerr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortASerr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortASerr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortASerr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortASerr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortASerr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAIntFatal_ACTION_CONTROL
+#define NBIF1PortAIntFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAIntFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAIntFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAIntFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAIntFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAIntFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAIntFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAIntFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAIntNonFatal_ACTION_CONTROL
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAIntNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAIntCorr_ACTION_CONTROL
+#define NBIF1PortAIntCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAIntCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAIntCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAIntCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAIntCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAIntCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAIntCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAIntCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAExtFatal_ACTION_CONTROL
+#define NBIF1PortAExtFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAExtFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAExtFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAExtFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAExtFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAExtFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAExtFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAExtFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAExtNonFatal_ACTION_CONTROL
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAExtNonFatal_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAExtCorr_ACTION_CONTROL
+#define NBIF1PortAExtCorr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAExtCorr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAExtCorr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAExtCorr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAExtCorr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAExtCorr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAExtCorr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAExtCorr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//NBIF1PortAParityErr_ACTION_CONTROL
+#define NBIF1PortAParityErr_ACTION_CONTROL__APML_ERR_En__SHIFT 0x0
+#define NBIF1PortAParityErr_ACTION_CONTROL__IntrGenSel__SHIFT 0x1
+#define NBIF1PortAParityErr_ACTION_CONTROL__LinkDis_En__SHIFT 0x3
+#define NBIF1PortAParityErr_ACTION_CONTROL__SyncFlood_En__SHIFT 0x4
+#define NBIF1PortAParityErr_ACTION_CONTROL__APML_ERR_En_MASK 0x00000001L
+#define NBIF1PortAParityErr_ACTION_CONTROL__IntrGenSel_MASK 0x00000006L
+#define NBIF1PortAParityErr_ACTION_CONTROL__LinkDis_En_MASK 0x00000008L
+#define NBIF1PortAParityErr_ACTION_CONTROL__SyncFlood_En_MASK 0x00000010L
+//SYNCFLOOD_STATUS
+#define SYNCFLOOD_STATUS__SyncfloodFromRASCntl__SHIFT 0x0
+#define SYNCFLOOD_STATUS__SyncfloodFromAPML__SHIFT 0x1
+#define SYNCFLOOD_STATUS__SyncfloodFromPin__SHIFT 0x2
+#define SYNCFLOOD_STATUS__SyncfloodFromPrivate__SHIFT 0x4
+#define SYNCFLOOD_STATUS__SyncfloodFromMCA__SHIFT 0x5
+#define SYNCFLOOD_STATUS__SyncfloodFromRASCntl_MASK 0x00000001L
+#define SYNCFLOOD_STATUS__SyncfloodFromAPML_MASK 0x00000002L
+#define SYNCFLOOD_STATUS__SyncfloodFromPin_MASK 0x00000004L
+#define SYNCFLOOD_STATUS__SyncfloodFromPrivate_MASK 0x00000010L
+#define SYNCFLOOD_STATUS__SyncfloodFromMCA_MASK 0x00000020L
+//NMI_STATUS
+#define NMI_STATUS__NMIFromPin__SHIFT 0x0
+#define NMI_STATUS__NMIFromPin_MASK 0x00000001L
+//POISON_ACTION_CONTROL
+#define POISON_ACTION_CONTROL__IntPoisonAPMLErrEn__SHIFT 0x0
+#define POISON_ACTION_CONTROL__IntPoisonIntrGenSel__SHIFT 0x1
+#define POISON_ACTION_CONTROL__IntPoisonLinkDisEn__SHIFT 0x3
+#define POISON_ACTION_CONTROL__IntPoisonSyncFloodEn__SHIFT 0x4
+#define POISON_ACTION_CONTROL__EgressPoisonLSAPMLErrEn__SHIFT 0x8
+#define POISON_ACTION_CONTROL__EgressPoisonLSIntrGenSel__SHIFT 0x9
+#define POISON_ACTION_CONTROL__EgressPoisonLSLinkDisEn__SHIFT 0xb
+#define POISON_ACTION_CONTROL__EgressPoisonLSSyncFloodEn__SHIFT 0xc
+#define POISON_ACTION_CONTROL__EgressPoisonHSAPMLErrEn__SHIFT 0x10
+#define POISON_ACTION_CONTROL__EgressPoisonHSIntrGenSel__SHIFT 0x11
+#define POISON_ACTION_CONTROL__EgressPoisonHSLinkDisEn__SHIFT 0x13
+#define POISON_ACTION_CONTROL__EgressPoisonHSSyncFloodEn__SHIFT 0x14
+#define POISON_ACTION_CONTROL__IntPoisonAPMLErrEn_MASK 0x00000001L
+#define POISON_ACTION_CONTROL__IntPoisonIntrGenSel_MASK 0x00000006L
+#define POISON_ACTION_CONTROL__IntPoisonLinkDisEn_MASK 0x00000008L
+#define POISON_ACTION_CONTROL__IntPoisonSyncFloodEn_MASK 0x00000010L
+#define POISON_ACTION_CONTROL__EgressPoisonLSAPMLErrEn_MASK 0x00000100L
+#define POISON_ACTION_CONTROL__EgressPoisonLSIntrGenSel_MASK 0x00000600L
+#define POISON_ACTION_CONTROL__EgressPoisonLSLinkDisEn_MASK 0x00000800L
+#define POISON_ACTION_CONTROL__EgressPoisonLSSyncFloodEn_MASK 0x00001000L
+#define POISON_ACTION_CONTROL__EgressPoisonHSAPMLErrEn_MASK 0x00010000L
+#define POISON_ACTION_CONTROL__EgressPoisonHSIntrGenSel_MASK 0x00060000L
+#define POISON_ACTION_CONTROL__EgressPoisonHSLinkDisEn_MASK 0x00080000L
+#define POISON_ACTION_CONTROL__EgressPoisonHSSyncFloodEn_MASK 0x00100000L
+//INTERNAL_POISON_STATUS
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_0__SHIFT 0x0
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_1__SHIFT 0x1
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_2__SHIFT 0x2
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_3__SHIFT 0x3
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_4__SHIFT 0x4
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_5__SHIFT 0x5
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_6__SHIFT 0x6
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_7__SHIFT 0x7
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_0_MASK 0x00000001L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_1_MASK 0x00000002L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_2_MASK 0x00000004L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_3_MASK 0x00000008L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_4_MASK 0x00000010L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_5_MASK 0x00000020L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_6_MASK 0x00000040L
+#define INTERNAL_POISON_STATUS__IntPoisonStatus_7_MASK 0x00000080L
+//INTERNAL_POISON_MASK
+#define INTERNAL_POISON_MASK__IntPoisonMask__SHIFT 0x0
+#define INTERNAL_POISON_MASK__IntPoisonMask_MASK 0x000000FFL
+//EGRESS_POISON_STATUS_LO
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_0__SHIFT 0x0
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_1__SHIFT 0x1
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_2__SHIFT 0x2
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_3__SHIFT 0x3
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_4__SHIFT 0x4
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_5__SHIFT 0x5
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_6__SHIFT 0x6
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_7__SHIFT 0x7
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_8__SHIFT 0x8
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_9__SHIFT 0x9
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_10__SHIFT 0xa
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_11__SHIFT 0xb
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_12__SHIFT 0xc
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_13__SHIFT 0xd
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_14__SHIFT 0xe
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_15__SHIFT 0xf
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_16__SHIFT 0x10
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_17__SHIFT 0x11
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_18__SHIFT 0x12
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_19__SHIFT 0x13
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_20__SHIFT 0x14
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_21__SHIFT 0x15
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_22__SHIFT 0x16
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_23__SHIFT 0x17
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_24__SHIFT 0x18
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_25__SHIFT 0x19
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_26__SHIFT 0x1a
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_27__SHIFT 0x1b
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_28__SHIFT 0x1c
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_29__SHIFT 0x1d
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_30__SHIFT 0x1e
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_31__SHIFT 0x1f
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_0_MASK 0x00000001L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_1_MASK 0x00000002L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_2_MASK 0x00000004L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_3_MASK 0x00000008L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_4_MASK 0x00000010L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_5_MASK 0x00000020L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_6_MASK 0x00000040L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_7_MASK 0x00000080L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_8_MASK 0x00000100L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_9_MASK 0x00000200L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_10_MASK 0x00000400L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_11_MASK 0x00000800L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_12_MASK 0x00001000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_13_MASK 0x00002000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_14_MASK 0x00004000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_15_MASK 0x00008000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_16_MASK 0x00010000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_17_MASK 0x00020000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_18_MASK 0x00040000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_19_MASK 0x00080000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_20_MASK 0x00100000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_21_MASK 0x00200000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_22_MASK 0x00400000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_23_MASK 0x00800000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_24_MASK 0x01000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_25_MASK 0x02000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_26_MASK 0x04000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_27_MASK 0x08000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_28_MASK 0x10000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_29_MASK 0x20000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_30_MASK 0x40000000L
+#define EGRESS_POISON_STATUS_LO__EgressPoisonStatusLo_31_MASK 0x80000000L
+//EGRESS_POISON_STATUS_HI
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_0__SHIFT 0x0
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_1__SHIFT 0x1
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_2__SHIFT 0x2
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_3__SHIFT 0x3
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_4__SHIFT 0x4
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_5__SHIFT 0x5
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_6__SHIFT 0x6
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_7__SHIFT 0x7
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_8__SHIFT 0x8
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_9__SHIFT 0x9
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_10__SHIFT 0xa
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_11__SHIFT 0xb
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_12__SHIFT 0xc
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_13__SHIFT 0xd
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_14__SHIFT 0xe
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_15__SHIFT 0xf
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_16__SHIFT 0x10
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_17__SHIFT 0x11
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_18__SHIFT 0x12
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_19__SHIFT 0x13
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_20__SHIFT 0x14
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_21__SHIFT 0x15
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_22__SHIFT 0x16
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_23__SHIFT 0x17
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_24__SHIFT 0x18
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_25__SHIFT 0x19
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_26__SHIFT 0x1a
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_27__SHIFT 0x1b
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_28__SHIFT 0x1c
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_29__SHIFT 0x1d
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_30__SHIFT 0x1e
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_31__SHIFT 0x1f
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_0_MASK 0x00000001L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_1_MASK 0x00000002L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_2_MASK 0x00000004L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_3_MASK 0x00000008L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_4_MASK 0x00000010L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_5_MASK 0x00000020L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_6_MASK 0x00000040L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_7_MASK 0x00000080L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_8_MASK 0x00000100L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_9_MASK 0x00000200L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_10_MASK 0x00000400L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_11_MASK 0x00000800L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_12_MASK 0x00001000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_13_MASK 0x00002000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_14_MASK 0x00004000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_15_MASK 0x00008000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_16_MASK 0x00010000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_17_MASK 0x00020000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_18_MASK 0x00040000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_19_MASK 0x00080000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_20_MASK 0x00100000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_21_MASK 0x00200000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_22_MASK 0x00400000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_23_MASK 0x00800000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_24_MASK 0x01000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_25_MASK 0x02000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_26_MASK 0x04000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_27_MASK 0x08000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_28_MASK 0x10000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_29_MASK 0x20000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_30_MASK 0x40000000L
+#define EGRESS_POISON_STATUS_HI__EgressPoisonStatusHi_31_MASK 0x80000000L
+//EGRESS_POISON_MASK_LO
+#define EGRESS_POISON_MASK_LO__EgressPoisonMaskLo__SHIFT 0x0
+#define EGRESS_POISON_MASK_LO__EgressPoisonMaskLo_MASK 0xFFFFFFFFL
+//EGRESS_POISON_MASK_HI
+#define EGRESS_POISON_MASK_HI__EgressPoisonMaskHi__SHIFT 0x0
+#define EGRESS_POISON_MASK_HI__EgressPoisonMaskHi_MASK 0xFFFFFFFFL
+//EGRESS_POISON_SEVERITY_DOWN
+#define EGRESS_POISON_SEVERITY_DOWN__EgressPoisonSeverityDown__SHIFT 0x0
+#define EGRESS_POISON_SEVERITY_DOWN__EgressPoisonSeverityDown_MASK 0xFFFFFFFFL
+//EGRESS_POISON_SEVERITY_UPPER
+#define EGRESS_POISON_SEVERITY_UPPER__EgressPoisonSeverityUpper__SHIFT 0x0
+#define EGRESS_POISON_SEVERITY_UPPER__EgressPoisonSeverityUpper_MASK 0xFFFFFFFFL
+//APML_STATUS
+#define APML_STATUS__APML_Corr__SHIFT 0x0
+#define APML_STATUS__APML_NonFatal__SHIFT 0x1
+#define APML_STATUS__APML_Fatal__SHIFT 0x2
+#define APML_STATUS__APML_Serr__SHIFT 0x3
+#define APML_STATUS__APML_IntPoisonErr__SHIFT 0x4
+#define APML_STATUS__APML_EgressPoisonErrLo__SHIFT 0x5
+#define APML_STATUS__APML_EgressPoisonErrHi__SHIFT 0x6
+#define APML_STATUS__APML_Corr_MASK 0x00000001L
+#define APML_STATUS__APML_NonFatal_MASK 0x00000002L
+#define APML_STATUS__APML_Fatal_MASK 0x00000004L
+#define APML_STATUS__APML_Serr_MASK 0x00000008L
+#define APML_STATUS__APML_IntPoisonErr_MASK 0x00000010L
+#define APML_STATUS__APML_EgressPoisonErrLo_MASK 0x00000020L
+#define APML_STATUS__APML_EgressPoisonErrHi_MASK 0x00000040L
+//APML_CONTROL
+#define APML_CONTROL__APML_NMI_En__SHIFT 0x0
+#define APML_CONTROL__APML_SyncFlood_En__SHIFT 0x1
+#define APML_CONTROL__APML_OutputDis__SHIFT 0x8
+#define APML_CONTROL__APML_NMI_En_MASK 0x00000001L
+#define APML_CONTROL__APML_SyncFlood_En_MASK 0x00000002L
+#define APML_CONTROL__APML_OutputDis_MASK 0x00000100L
+//APML_TRIGGER
+#define APML_TRIGGER__APML_NMI_TRIGGER__SHIFT 0x0
+#define APML_TRIGGER__APML_NMI_TRIGGER_MASK 0x00000001L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg0_devind_cfgdecp
+//NB_PCIE0DEVINDCFG0_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG0_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG0_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG0_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG0_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg1_devind_cfgdecp
+//NB_PCIE0DEVINDCFG1_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG1_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG1_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG1_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG1_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg2_devind_cfgdecp
+//NB_PCIE0DEVINDCFG2_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG2_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG2_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG2_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG2_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg3_devind_cfgdecp
+//NB_PCIE0DEVINDCFG3_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG3_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG3_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG3_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG3_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg4_devind_cfgdecp
+//NB_PCIE0DEVINDCFG4_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG4_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG4_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG4_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG4_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg5_devind_cfgdecp
+//NB_PCIE0DEVINDCFG5_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG5_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG5_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG5_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG5_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0devindcfg6_devind_cfgdecp
+//NB_PCIE0DEVINDCFG6_STEERING_CNTL
+#define NB_PCIE0DEVINDCFG6_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_PCIE0DEVINDCFG6_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_PCIE0DEVINDCFG6_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_PCIE0DEVINDCFG6_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_NBIF1devindcfg0_devind_cfgdecp
+//NB_NBIF1DEVINDCFG0_STEERING_CNTL
+#define NB_NBIF1DEVINDCFG0_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_NBIF1DEVINDCFG0_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_NBIF1DEVINDCFG0_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_NBIF1DEVINDCFG0_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_intSBdevindcfg0_devind_cfgdecp
+//NB_INTSBDEVINDCFG0_STEERING_CNTL
+#define NB_INTSBDEVINDCFG0_STEERING_CNTL__ForceSteering__SHIFT 0x0
+#define NB_INTSBDEVINDCFG0_STEERING_CNTL__SteeringValue__SHIFT 0x8
+#define NB_INTSBDEVINDCFG0_STEERING_CNTL__ForceSteering_MASK 0x00000001L
+#define NB_INTSBDEVINDCFG0_STEERING_CNTL__SteeringValue_MASK 0x0000FF00L
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg0_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG0_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG0_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg1_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG1_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG1_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg2_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG2_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG2_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg3_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG3_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG3_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg4_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG4_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG4_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg5_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG5_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG5_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_PCIE0rcbdg_indcfg6_pciercbdgind_cfgdec
+//NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_EXTENSION
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_PCIE0RCBDG_INDCFG6_RC_SMN_DATA
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_PCIE0RCBDG_INDCFG6_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_nb_NBIF1rcbdg_indcfg0_pciercbdgind_cfgdec
+//NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION__SHIFT 0x0
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX_EXTENSION__RC_SMN_INDEX_EXTENSION_MASK 0x000000FFL
+//NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX__RC_SMN_INDEX__SHIFT 0x0
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_INDEX__RC_SMN_INDEX_MASK 0xFFFFFFFFL
+//NB_NBIF1RCBDG_INDCFG0_RC_SMN_DATA
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_DATA__RC_SMN_DATA__SHIFT 0x0
+#define NB_NBIF1RCBDG_INDCFG0_RC_SMN_DATA__RC_SMN_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_iommu_l2a_l2acfg
+//L2_PERF_CNTL_0
+#define L2_PERF_CNTL_0__L2PerfEvent0__SHIFT 0x0
+#define L2_PERF_CNTL_0__L2PerfEvent1__SHIFT 0x8
+#define L2_PERF_CNTL_0__L2PerfCountUpper0__SHIFT 0x10
+#define L2_PERF_CNTL_0__L2PerfCountUpper1__SHIFT 0x18
+#define L2_PERF_CNTL_0__L2PerfEvent0_MASK 0x000000FFL
+#define L2_PERF_CNTL_0__L2PerfEvent1_MASK 0x0000FF00L
+#define L2_PERF_CNTL_0__L2PerfCountUpper0_MASK 0x00FF0000L
+#define L2_PERF_CNTL_0__L2PerfCountUpper1_MASK 0xFF000000L
+//L2_PERF_COUNT_0
+#define L2_PERF_COUNT_0__L2PerfCount0__SHIFT 0x0
+#define L2_PERF_COUNT_0__L2PerfCount0_MASK 0xFFFFFFFFL
+//L2_PERF_COUNT_1
+#define L2_PERF_COUNT_1__L2PerfCount1__SHIFT 0x0
+#define L2_PERF_COUNT_1__L2PerfCount1_MASK 0xFFFFFFFFL
+//L2_PERF_CNTL_1
+#define L2_PERF_CNTL_1__L2PerfEvent2__SHIFT 0x0
+#define L2_PERF_CNTL_1__L2PerfEvent3__SHIFT 0x8
+#define L2_PERF_CNTL_1__L2PerfCountUpper2__SHIFT 0x10
+#define L2_PERF_CNTL_1__L2PerfCountUpper3__SHIFT 0x18
+#define L2_PERF_CNTL_1__L2PerfEvent2_MASK 0x000000FFL
+#define L2_PERF_CNTL_1__L2PerfEvent3_MASK 0x0000FF00L
+#define L2_PERF_CNTL_1__L2PerfCountUpper2_MASK 0x00FF0000L
+#define L2_PERF_CNTL_1__L2PerfCountUpper3_MASK 0xFF000000L
+//L2_PERF_COUNT_2
+#define L2_PERF_COUNT_2__L2PerfCount2__SHIFT 0x0
+#define L2_PERF_COUNT_2__L2PerfCount2_MASK 0xFFFFFFFFL
+//L2_PERF_COUNT_3
+#define L2_PERF_COUNT_3__L2PerfCount3__SHIFT 0x0
+#define L2_PERF_COUNT_3__L2PerfCount3_MASK 0xFFFFFFFFL
+//L2_STATUS_0
+#define L2_STATUS_0__L2STATUS0__SHIFT 0x0
+#define L2_STATUS_0__L2STATUS0_MASK 0xFFFFFFFFL
+//L2_CONTROL_0
+#define L2_CONTROL_0__AllowL1CacheVZero__SHIFT 0x1
+#define L2_CONTROL_0__AllowL1CacheATSRsp__SHIFT 0x2
+#define L2_CONTROL_0__DTCHitVZeroOrIVZero__SHIFT 0x3
+#define L2_CONTROL_0__SIDEPTEOnUntransExcl__SHIFT 0xa
+#define L2_CONTROL_0__SIDEPTEOnAddrTransExcl__SHIFT 0xb
+#define L2_CONTROL_0__AllowL1CacheLargePagemode0__SHIFT 0x13
+#define L2_CONTROL_0__IFifoBurstLength__SHIFT 0x14
+#define L2_CONTROL_0__IFifoClientPriority__SHIFT 0x18
+#define L2_CONTROL_0__AllowL1CacheVZero_MASK 0x00000002L
+#define L2_CONTROL_0__AllowL1CacheATSRsp_MASK 0x00000004L
+#define L2_CONTROL_0__DTCHitVZeroOrIVZero_MASK 0x00000008L
+#define L2_CONTROL_0__SIDEPTEOnUntransExcl_MASK 0x00000400L
+#define L2_CONTROL_0__SIDEPTEOnAddrTransExcl_MASK 0x00000800L
+#define L2_CONTROL_0__AllowL1CacheLargePagemode0_MASK 0x00080000L
+#define L2_CONTROL_0__IFifoBurstLength_MASK 0x00F00000L
+#define L2_CONTROL_0__IFifoClientPriority_MASK 0xFF000000L
+//L2_CONTROL_1
+#define L2_CONTROL_1__SeqInvBurstLimitInv__SHIFT 0x0
+#define L2_CONTROL_1__SeqInvBurstLimitL2Req__SHIFT 0x8
+#define L2_CONTROL_1__SeqInvBurstLimitEn__SHIFT 0x10
+#define L2_CONTROL_1__DBUSDis__SHIFT 0x11
+#define L2_CONTROL_1__PerfThreshold__SHIFT 0x18
+#define L2_CONTROL_1__SeqInvBurstLimitInv_MASK 0x000000FFL
+#define L2_CONTROL_1__SeqInvBurstLimitL2Req_MASK 0x0000FF00L
+#define L2_CONTROL_1__SeqInvBurstLimitEn_MASK 0x00010000L
+#define L2_CONTROL_1__DBUSDis_MASK 0x00020000L
+#define L2_CONTROL_1__PerfThreshold_MASK 0xFF000000L
+//L2_DTC_CONTROL
+#define L2_DTC_CONTROL__DTCLRUUpdatePri__SHIFT 0x3
+#define L2_DTC_CONTROL__DTCParityEn__SHIFT 0x4
+#define L2_DTC_CONTROL__DTCInvalidationSel__SHIFT 0x8
+#define L2_DTC_CONTROL__DTCSoftInvalidate__SHIFT 0xa
+#define L2_DTC_CONTROL__DTCBypass__SHIFT 0xd
+#define L2_DTC_CONTROL__DTCParitySupport__SHIFT 0xf
+#define L2_DTC_CONTROL__DTCWays__SHIFT 0x10
+#define L2_DTC_CONTROL__DTCEntries__SHIFT 0x1c
+#define L2_DTC_CONTROL__DTCLRUUpdatePri_MASK 0x00000008L
+#define L2_DTC_CONTROL__DTCParityEn_MASK 0x00000010L
+#define L2_DTC_CONTROL__DTCInvalidationSel_MASK 0x00000300L
+#define L2_DTC_CONTROL__DTCSoftInvalidate_MASK 0x00000400L
+#define L2_DTC_CONTROL__DTCBypass_MASK 0x00002000L
+#define L2_DTC_CONTROL__DTCParitySupport_MASK 0x00008000L
+#define L2_DTC_CONTROL__DTCWays_MASK 0x00FF0000L
+#define L2_DTC_CONTROL__DTCEntries_MASK 0xF0000000L
+//L2_DTC_HASH_CONTROL
+#define L2_DTC_HASH_CONTROL__DTCAddressMask__SHIFT 0x10
+#define L2_DTC_HASH_CONTROL__DTCAddressMask_MASK 0xFFFF0000L
+//L2_DTC_WAY_CONTROL
+#define L2_DTC_WAY_CONTROL__DTCWayDisable__SHIFT 0x0
+#define L2_DTC_WAY_CONTROL__DTCWayAccessDisable__SHIFT 0x10
+#define L2_DTC_WAY_CONTROL__DTCWayDisable_MASK 0x0000FFFFL
+#define L2_DTC_WAY_CONTROL__DTCWayAccessDisable_MASK 0xFFFF0000L
+//L2_ITC_CONTROL
+#define L2_ITC_CONTROL__ITCLRUUpdatePri__SHIFT 0x3
+#define L2_ITC_CONTROL__ITCParityEn__SHIFT 0x4
+#define L2_ITC_CONTROL__ITCInvalidationSel__SHIFT 0x8
+#define L2_ITC_CONTROL__ITCSoftInvalidate__SHIFT 0xa
+#define L2_ITC_CONTROL__ITCBypass__SHIFT 0xd
+#define L2_ITC_CONTROL__ITCParitySupport__SHIFT 0xf
+#define L2_ITC_CONTROL__ITCWays__SHIFT 0x10
+#define L2_ITC_CONTROL__ITCEntries__SHIFT 0x1c
+#define L2_ITC_CONTROL__ITCLRUUpdatePri_MASK 0x00000008L
+#define L2_ITC_CONTROL__ITCParityEn_MASK 0x00000010L
+#define L2_ITC_CONTROL__ITCInvalidationSel_MASK 0x00000300L
+#define L2_ITC_CONTROL__ITCSoftInvalidate_MASK 0x00000400L
+#define L2_ITC_CONTROL__ITCBypass_MASK 0x00002000L
+#define L2_ITC_CONTROL__ITCParitySupport_MASK 0x00008000L
+#define L2_ITC_CONTROL__ITCWays_MASK 0x00FF0000L
+#define L2_ITC_CONTROL__ITCEntries_MASK 0xF0000000L
+//L2_ITC_HASH_CONTROL
+#define L2_ITC_HASH_CONTROL__ITCAddressMask__SHIFT 0x10
+#define L2_ITC_HASH_CONTROL__ITCAddressMask_MASK 0xFFFF0000L
+//L2_ITC_WAY_CONTROL
+#define L2_ITC_WAY_CONTROL__ITCWayDisable__SHIFT 0x0
+#define L2_ITC_WAY_CONTROL__ITCWayAccessDisable__SHIFT 0x10
+#define L2_ITC_WAY_CONTROL__ITCWayDisable_MASK 0x0000FFFFL
+#define L2_ITC_WAY_CONTROL__ITCWayAccessDisable_MASK 0xFFFF0000L
+//L2_PTC_A_CONTROL
+#define L2_PTC_A_CONTROL__PTCAStoreFinalATSeperate__SHIFT 0x1
+#define L2_PTC_A_CONTROL__PTCAStorePartialATSeperate__SHIFT 0x2
+#define L2_PTC_A_CONTROL__PTCALRUUpdatePri__SHIFT 0x3
+#define L2_PTC_A_CONTROL__PTCAParityEn__SHIFT 0x4
+#define L2_PTC_A_CONTROL__PTCAInvalidationSel__SHIFT 0x8
+#define L2_PTC_A_CONTROL__PTCASoftInvalidate__SHIFT 0xa
+#define L2_PTC_A_CONTROL__PTCA2MMode__SHIFT 0xb
+#define L2_PTC_A_CONTROL__PCTA_Inv_Overlapping_Pages__SHIFT 0xc
+#define L2_PTC_A_CONTROL__PTCABypass__SHIFT 0xd
+#define L2_PTC_A_CONTROL__PTCAFastInvalidateGuest__SHIFT 0xe
+#define L2_PTC_A_CONTROL__PTCAParitySupport__SHIFT 0xf
+#define L2_PTC_A_CONTROL__PTCAWays__SHIFT 0x10
+#define L2_PTC_A_CONTROL__PTCAEntries__SHIFT 0x1c
+#define L2_PTC_A_CONTROL__PTCAStoreFinalATSeperate_MASK 0x00000002L
+#define L2_PTC_A_CONTROL__PTCAStorePartialATSeperate_MASK 0x00000004L
+#define L2_PTC_A_CONTROL__PTCALRUUpdatePri_MASK 0x00000008L
+#define L2_PTC_A_CONTROL__PTCAParityEn_MASK 0x00000010L
+#define L2_PTC_A_CONTROL__PTCAInvalidationSel_MASK 0x00000300L
+#define L2_PTC_A_CONTROL__PTCASoftInvalidate_MASK 0x00000400L
+#define L2_PTC_A_CONTROL__PTCA2MMode_MASK 0x00000800L
+#define L2_PTC_A_CONTROL__PCTA_Inv_Overlapping_Pages_MASK 0x00001000L
+#define L2_PTC_A_CONTROL__PTCABypass_MASK 0x00002000L
+#define L2_PTC_A_CONTROL__PTCAFastInvalidateGuest_MASK 0x00004000L
+#define L2_PTC_A_CONTROL__PTCAParitySupport_MASK 0x00008000L
+#define L2_PTC_A_CONTROL__PTCAWays_MASK 0x00FF0000L
+#define L2_PTC_A_CONTROL__PTCAEntries_MASK 0xF0000000L
+//L2_PTC_A_HASH_CONTROL
+#define L2_PTC_A_HASH_CONTROL__PTCAAddressMask__SHIFT 0x10
+#define L2_PTC_A_HASH_CONTROL__PTCAAddressMask_MASK 0xFFFF0000L
+//L2_PTC_A_WAY_CONTROL
+#define L2_PTC_A_WAY_CONTROL__PTCAWayDisable__SHIFT 0x0
+#define L2_PTC_A_WAY_CONTROL__PTCAWayAccessDisable__SHIFT 0x10
+#define L2_PTC_A_WAY_CONTROL__PTCAWayDisable_MASK 0x0000FFFFL
+#define L2_PTC_A_WAY_CONTROL__PTCAWayAccessDisable_MASK 0xFFFF0000L
+//L2A_UPDATE_FILTER_CNTL
+#define L2A_UPDATE_FILTER_CNTL__L2a_Update_Filter_Bypass__SHIFT 0x0
+#define L2A_UPDATE_FILTER_CNTL__L2a_Update_Filter_RdLatency__SHIFT 0x1
+#define L2A_UPDATE_FILTER_CNTL__L2a_Update_Filter_Bypass_MASK 0x00000001L
+#define L2A_UPDATE_FILTER_CNTL__L2a_Update_Filter_RdLatency_MASK 0x0000001EL
+//L2_ERR_RULE_CONTROL_3
+#define L2_ERR_RULE_CONTROL_3__ERRRuleLock1__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_3__ERRRuleDisable3__SHIFT 0x4
+#define L2_ERR_RULE_CONTROL_3__ERRRuleLock1_MASK 0x00000001L
+#define L2_ERR_RULE_CONTROL_3__ERRRuleDisable3_MASK 0xFFFFFFF0L
+//L2_ERR_RULE_CONTROL_4
+#define L2_ERR_RULE_CONTROL_4__ERRRuleDisable4__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_4__ERRRuleDisable4_MASK 0xFFFFFFFFL
+//L2_ERR_RULE_CONTROL_5
+#define L2_ERR_RULE_CONTROL_5__ERRRuleDisable5__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_5__ERRRuleDisable5_MASK 0xFFFFFFFFL
+//L2_L2A_CK_GATE_CONTROL
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ARegsDisable__SHIFT 0x0
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ADynamicDisable__SHIFT 0x1
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ACacheDisable__SHIFT 0x2
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2APerfDisable__SHIFT 0x3
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ALength__SHIFT 0x10
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2AStop__SHIFT 0x12
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2APortClkDis__SHIFT 0x14
+#define L2_L2A_CK_GATE_CONTROL__Reserved__SHIFT 0x15
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ARegsDisable_MASK 0x00000001L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ADynamicDisable_MASK 0x00000002L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ACacheDisable_MASK 0x00000004L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2APerfDisable_MASK 0x00000008L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2ALength_MASK 0x00030000L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2AStop_MASK 0x000C0000L
+#define L2_L2A_CK_GATE_CONTROL__CKGateL2APortClkDis_MASK 0x00100000L
+#define L2_L2A_CK_GATE_CONTROL__Reserved_MASK 0xFFE00000L
+//L2_L2A_PGSIZE_CONTROL
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_GST_PGSIZE__SHIFT 0x0
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_HOST_PGSIZE__SHIFT 0x8
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_PTCSCAN_MODE__SHIFT 0x11
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_GST_PGSIZE_MASK 0x0000007FL
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_HOST_PGSIZE_MASK 0x00007F00L
+#define L2_L2A_PGSIZE_CONTROL__L2AREG_PTCSCAN_MODE_MASK 0x000E0000L
+//L2_PWRGATE_CNTRL_REG_0
+#define L2_PWRGATE_CNTRL_REG_0__IP_PG_thres__SHIFT 0x0
+#define L2_PWRGATE_CNTRL_REG_0__IP_PG_thres_MASK 0xFFFFFFFFL
+//L2_PWRGATE_CNTRL_REG_3
+#define L2_PWRGATE_CNTRL_REG_3__IP_PG_en__SHIFT 0x0
+#define L2_PWRGATE_CNTRL_REG_3__IP_PG_busy__SHIFT 0x1
+#define L2_PWRGATE_CNTRL_REG_3__L2_PG_STATUS__SHIFT 0x2
+#define L2_PWRGATE_CNTRL_REG_3__CFG_FW_PG_EXIT_EN__SHIFT 0x3
+#define L2_PWRGATE_CNTRL_REG_3__IP_PG_en_MASK 0x00000001L
+#define L2_PWRGATE_CNTRL_REG_3__IP_PG_busy_MASK 0x00000002L
+#define L2_PWRGATE_CNTRL_REG_3__L2_PG_STATUS_MASK 0x00000004L
+#define L2_PWRGATE_CNTRL_REG_3__CFG_FW_PG_EXIT_EN_MASK 0x00000018L
+//L2_ECO_CNTRL_0
+#define L2_ECO_CNTRL_0__L2_ECO_0__SHIFT 0x0
+#define L2_ECO_CNTRL_0__L2_ECO_0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_iohub_iommu_l2indx_l2indxcfg
+//L2_STATUS_1
+#define L2_STATUS_1__L2STATUS1__SHIFT 0x0
+#define L2_STATUS_1__L2STATUS1_MASK 0xFFFFFFFFL
+//L2_SB_LOCATION
+#define L2_SB_LOCATION__SBlocated_Port__SHIFT 0x0
+#define L2_SB_LOCATION__SBlocated_Core__SHIFT 0x10
+#define L2_SB_LOCATION__SBlocated_Port_MASK 0x0000FFFFL
+#define L2_SB_LOCATION__SBlocated_Core_MASK 0xFFFF0000L
+//L2_CONTROL_5
+#define L2_CONTROL_5__QueueArbFBPri__SHIFT 0x0
+#define L2_CONTROL_5__FC1Dis__SHIFT 0x2
+#define L2_CONTROL_5__DTCUpdateVOneIVZero__SHIFT 0x3
+#define L2_CONTROL_5__DTCUpdateVZeroIVOne__SHIFT 0x4
+#define L2_CONTROL_5__FC3Dis__SHIFT 0x6
+#define L2_CONTROL_5__ForceTWonVCQoS__SHIFT 0xb
+#define L2_CONTROL_5__GST_partial_ptc_cntrl__SHIFT 0xc
+#define L2_CONTROL_5__STORE_PDPE_QOS_PTC__SHIFT 0x14
+#define L2_CONTROL_5__DTCUpdatePri__SHIFT 0x19
+#define L2_CONTROL_5__L2B_L2A_v1_trans_credits__SHIFT 0x1a
+#define L2_CONTROL_5__QueueArbFBPri_MASK 0x00000001L
+#define L2_CONTROL_5__FC1Dis_MASK 0x00000004L
+#define L2_CONTROL_5__DTCUpdateVOneIVZero_MASK 0x00000008L
+#define L2_CONTROL_5__DTCUpdateVZeroIVOne_MASK 0x00000010L
+#define L2_CONTROL_5__FC3Dis_MASK 0x00000040L
+#define L2_CONTROL_5__ForceTWonVCQoS_MASK 0x00000800L
+#define L2_CONTROL_5__GST_partial_ptc_cntrl_MASK 0x0007F000L
+#define L2_CONTROL_5__STORE_PDPE_QOS_PTC_MASK 0x00100000L
+#define L2_CONTROL_5__DTCUpdatePri_MASK 0x02000000L
+#define L2_CONTROL_5__L2B_L2A_v1_trans_credits_MASK 0xFC000000L
+//L2_CONTROL_6
+#define L2_CONTROL_6__SeqInvBurstLimitInv__SHIFT 0x0
+#define L2_CONTROL_6__SeqInvBurstLimitPDCReq__SHIFT 0x8
+#define L2_CONTROL_6__SeqInvBurstLimitEn__SHIFT 0x10
+#define L2_CONTROL_6__Perf2Threshold__SHIFT 0x18
+#define L2_CONTROL_6__SeqInvBurstLimitInv_MASK 0x000000FFL
+#define L2_CONTROL_6__SeqInvBurstLimitPDCReq_MASK 0x0000FF00L
+#define L2_CONTROL_6__SeqInvBurstLimitEn_MASK 0x00010000L
+#define L2_CONTROL_6__Perf2Threshold_MASK 0xFF000000L
+//L2_PDC_CONTROL
+#define L2_PDC_CONTROL__PDCLRUUpdatePri__SHIFT 0x3
+#define L2_PDC_CONTROL__PDCParityEn__SHIFT 0x4
+#define L2_PDC_CONTROL__PDCInvalidationSel__SHIFT 0x8
+#define L2_PDC_CONTROL__PDCSoftInvalidate__SHIFT 0xa
+#define L2_PDC_CONTROL__PDC_Inv_Overlapping_Pages__SHIFT 0xb
+#define L2_PDC_CONTROL__PDCSearchDirection__SHIFT 0xc
+#define L2_PDC_CONTROL__PDCBypass__SHIFT 0xd
+#define L2_PDC_CONTROL__PDCModeLookupFix__SHIFT 0xe
+#define L2_PDC_CONTROL__PDCParitySupport__SHIFT 0xf
+#define L2_PDC_CONTROL__PDCWays__SHIFT 0x10
+#define L2_PDC_CONTROL__PDCEntries__SHIFT 0x1c
+#define L2_PDC_CONTROL__PDCLRUUpdatePri_MASK 0x00000008L
+#define L2_PDC_CONTROL__PDCParityEn_MASK 0x00000010L
+#define L2_PDC_CONTROL__PDCInvalidationSel_MASK 0x00000300L
+#define L2_PDC_CONTROL__PDCSoftInvalidate_MASK 0x00000400L
+#define L2_PDC_CONTROL__PDC_Inv_Overlapping_Pages_MASK 0x00000800L
+#define L2_PDC_CONTROL__PDCSearchDirection_MASK 0x00001000L
+#define L2_PDC_CONTROL__PDCBypass_MASK 0x00002000L
+#define L2_PDC_CONTROL__PDCModeLookupFix_MASK 0x00004000L
+#define L2_PDC_CONTROL__PDCParitySupport_MASK 0x00008000L
+#define L2_PDC_CONTROL__PDCWays_MASK 0x00FF0000L
+#define L2_PDC_CONTROL__PDCEntries_MASK 0xF0000000L
+//L2_PDC_HASH_CONTROL
+#define L2_PDC_HASH_CONTROL__PDCAddressMask__SHIFT 0x10
+#define L2_PDC_HASH_CONTROL__PDCAddressMask_MASK 0xFFFF0000L
+//L2_PDC_WAY_CONTROL
+#define L2_PDC_WAY_CONTROL__PDCWayDisable__SHIFT 0x0
+#define L2_PDC_WAY_CONTROL__PDCWayAccessDisable__SHIFT 0x10
+#define L2_PDC_WAY_CONTROL__PDCWayDisable_MASK 0x0000FFFFL
+#define L2_PDC_WAY_CONTROL__PDCWayAccessDisable_MASK 0xFFFF0000L
+//L2B_UPDATE_FILTER_CNTL
+#define L2B_UPDATE_FILTER_CNTL__L2b_Update_Filter_Bypass__SHIFT 0x0
+#define L2B_UPDATE_FILTER_CNTL__L2b_Update_Filter_RdLatency__SHIFT 0x1
+#define L2B_UPDATE_FILTER_CNTL__L2b_Update_Filter_Bypass_MASK 0x00000001L
+#define L2B_UPDATE_FILTER_CNTL__L2b_Update_Filter_RdLatency_MASK 0x0000001EL
+//L2_TW_CONTROL
+#define L2_TW_CONTROL__RESERVED__SHIFT 0x0
+#define L2_TW_CONTROL__TWForceCoherent__SHIFT 0x6
+#define L2_TW_CONTROL__TWPrefetchEn__SHIFT 0x8
+#define L2_TW_CONTROL__TWPrefetchOnly4KDis__SHIFT 0x9
+#define L2_TW_CONTROL__TWPTEOnUntransExcl__SHIFT 0xa
+#define L2_TW_CONTROL__TWPTEOnAddrTransExcl__SHIFT 0xb
+#define L2_TW_CONTROL__TWPrefetchRange__SHIFT 0xc
+#define L2_TW_CONTROL__TWFilter_Dis__SHIFT 0x10
+#define L2_TW_CONTROL__TWFilter_64B_Dis__SHIFT 0x11
+#define L2_TW_CONTROL__TWContWalkOnPErrDis__SHIFT 0x12
+#define L2_TW_CONTROL__TWSetAccessBit_Dis__SHIFT 0x13
+#define L2_TW_CONTROL__TWClearAPBit_Dis__SHIFT 0x14
+#define L2_TW_CONTROL__TWCacheNestedPTE__SHIFT 0x19
+#define L2_TW_CONTROL__RESERVED_MASK 0x0000003FL
+#define L2_TW_CONTROL__TWForceCoherent_MASK 0x00000040L
+#define L2_TW_CONTROL__TWPrefetchEn_MASK 0x00000100L
+#define L2_TW_CONTROL__TWPrefetchOnly4KDis_MASK 0x00000200L
+#define L2_TW_CONTROL__TWPTEOnUntransExcl_MASK 0x00000400L
+#define L2_TW_CONTROL__TWPTEOnAddrTransExcl_MASK 0x00000800L
+#define L2_TW_CONTROL__TWPrefetchRange_MASK 0x00007000L
+#define L2_TW_CONTROL__TWFilter_Dis_MASK 0x00010000L
+#define L2_TW_CONTROL__TWFilter_64B_Dis_MASK 0x00020000L
+#define L2_TW_CONTROL__TWContWalkOnPErrDis_MASK 0x00040000L
+#define L2_TW_CONTROL__TWSetAccessBit_Dis_MASK 0x00080000L
+#define L2_TW_CONTROL__TWClearAPBit_Dis_MASK 0x00100000L
+#define L2_TW_CONTROL__TWCacheNestedPTE_MASK 0x02000000L
+//L2_CP_CONTROL
+#define L2_CP_CONTROL__CPPrefetchDis__SHIFT 0x0
+#define L2_CP_CONTROL__CPFlushOnWait__SHIFT 0x1
+#define L2_CP_CONTROL__CPFlushOnInv__SHIFT 0x2
+#define L2_CP_CONTROL__CPStallCmdErrForIOTLBCmpl__SHIFT 0x3
+#define L2_CP_CONTROL__CPForceReqPassPW__SHIFT 0x4
+#define L2_CP_CONTROL__CPForceOneOutstandingCommand__SHIFT 0x5
+#define L2_CP_CONTROL__CPRdDelay__SHIFT 0x10
+#define L2_CP_CONTROL__CPPrefetchDis_MASK 0x00000001L
+#define L2_CP_CONTROL__CPFlushOnWait_MASK 0x00000002L
+#define L2_CP_CONTROL__CPFlushOnInv_MASK 0x00000004L
+#define L2_CP_CONTROL__CPStallCmdErrForIOTLBCmpl_MASK 0x00000008L
+#define L2_CP_CONTROL__CPForceReqPassPW_MASK 0x00000010L
+#define L2_CP_CONTROL__CPForceOneOutstandingCommand_MASK 0x00000020L
+#define L2_CP_CONTROL__CPRdDelay_MASK 0xFFFF0000L
+//L2_CP_CONTROL_1
+#define L2_CP_CONTROL_1__CPL1Off__SHIFT 0x0
+#define L2_CP_CONTROL_1__Reserved__SHIFT 0x10
+#define L2_CP_CONTROL_1__CPL1Off_MASK 0x0000FFFFL
+#define L2_CP_CONTROL_1__Reserved_MASK 0xFFFF0000L
+//L2_TW_CONTROL_1
+#define L2_TW_CONTROL_1__TWTraceEn__SHIFT 0x0
+#define L2_TW_CONTROL_1__TWTraceNoWrap__SHIFT 0x1
+#define L2_TW_CONTROL_1__TWTraceForceDisable__SHIFT 0x2
+#define L2_TW_CONTROL_1__TWTraceMask__SHIFT 0xf
+#define L2_TW_CONTROL_1__TWTraceEn_MASK 0x00000001L
+#define L2_TW_CONTROL_1__TWTraceNoWrap_MASK 0x00000002L
+#define L2_TW_CONTROL_1__TWTraceForceDisable_MASK 0x00000004L
+#define L2_TW_CONTROL_1__TWTraceMask_MASK 0xFFFF8000L
+//L2_TW_CONTROL_2
+#define L2_TW_CONTROL_2__TWTraceAddrLo__SHIFT 0xc
+#define L2_TW_CONTROL_2__TWTraceAddrLo_MASK 0xFFFFF000L
+//L2_TW_CONTROL_3
+#define L2_TW_CONTROL_3__TWTraceAddrHi__SHIFT 0x0
+#define L2_TW_CONTROL_3__TWTraceAddrHi_MASK 0xFFFFFFFFL
+//L2_CREDIT_CONTROL_0
+#define L2_CREDIT_CONTROL_0__FC1Credits__SHIFT 0x0
+#define L2_CREDIT_CONTROL_0__FC1Override__SHIFT 0x7
+#define L2_CREDIT_CONTROL_0__FC3Credits__SHIFT 0xf
+#define L2_CREDIT_CONTROL_0__FC3Override__SHIFT 0x15
+#define L2_CREDIT_CONTROL_0__FC1Credits_MASK 0x0000007FL
+#define L2_CREDIT_CONTROL_0__FC1Override_MASK 0x00000080L
+#define L2_CREDIT_CONTROL_0__FC3Credits_MASK 0x001F8000L
+#define L2_CREDIT_CONTROL_0__FC3Override_MASK 0x00200000L
+//L2_CREDIT_CONTROL_1
+#define L2_CREDIT_CONTROL_1__CP_PREFETCH_credits__SHIFT 0x10
+#define L2_CREDIT_CONTROL_1__PPR_MCIF_credits__SHIFT 0x14
+#define L2_CREDIT_CONTROL_1__CP_PREFETCH_credits_MASK 0x000F0000L
+#define L2_CREDIT_CONTROL_1__PPR_MCIF_credits_MASK 0x00F00000L
+//L2_ERR_RULE_CONTROL_0
+#define L2_ERR_RULE_CONTROL_0__ERRRuleLock0__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_0__ERRRuleDisable0__SHIFT 0x1
+#define L2_ERR_RULE_CONTROL_0__ERRRuleLock0_MASK 0x00000001L
+#define L2_ERR_RULE_CONTROL_0__ERRRuleDisable0_MASK 0xFFFFFFFEL
+//L2_ERR_RULE_CONTROL_1
+#define L2_ERR_RULE_CONTROL_1__ERRRuleDisable1__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_1__ERRRuleDisable1_MASK 0xFFFFFFFFL
+//L2_ERR_RULE_CONTROL_2
+#define L2_ERR_RULE_CONTROL_2__ERRRuleDisable2__SHIFT 0x0
+#define L2_ERR_RULE_CONTROL_2__ERRRuleDisable2_MASK 0xFFFFFFFFL
+//L2_L2B_CK_GATE_CONTROL
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BRegsDisable__SHIFT 0x0
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BDynamicDisable__SHIFT 0x1
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BMiscDisable__SHIFT 0x2
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BCacheDisable__SHIFT 0x3
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BAPCDisable__SHIFT 0x4
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BPerfDisable__SHIFT 0x5
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BPortClkDis__SHIFT 0x6
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BLength__SHIFT 0x10
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BStop__SHIFT 0x12
+#define L2_L2B_CK_GATE_CONTROL__Reserved__SHIFT 0x14
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BRegsDisable_MASK 0x00000001L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BDynamicDisable_MASK 0x00000002L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BMiscDisable_MASK 0x00000004L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BCacheDisable_MASK 0x00000008L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BAPCDisable_MASK 0x00000010L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BPerfDisable_MASK 0x00000020L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BPortClkDis_MASK 0x00000040L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BLength_MASK 0x00030000L
+#define L2_L2B_CK_GATE_CONTROL__CKGateL2BStop_MASK 0x000C0000L
+#define L2_L2B_CK_GATE_CONTROL__Reserved_MASK 0xFFF00000L
+//PPR_CONTROL
+#define PPR_CONTROL__PPR_IntTimeDelay__SHIFT 0x0
+#define PPR_CONTROL__PPR_IntReqDelay__SHIFT 0x8
+#define PPR_CONTROL__PPR_IntCoallesce_En__SHIFT 0x10
+#define PPR_CONTROL__PPR_IntTimeDelay_MASK 0x000000FFL
+#define PPR_CONTROL__PPR_IntReqDelay_MASK 0x0000FF00L
+#define PPR_CONTROL__PPR_IntCoallesce_En_MASK 0x00010000L
+//L2_L2B_PGSIZE_CONTROL
+#define L2_L2B_PGSIZE_CONTROL__L2BREG_GST_PGSIZE__SHIFT 0x0
+#define L2_L2B_PGSIZE_CONTROL__L2BREG_HOST_PGSIZE__SHIFT 0x8
+#define L2_L2B_PGSIZE_CONTROL__L2BREG_GST_PGSIZE_MASK 0x0000007FL
+#define L2_L2B_PGSIZE_CONTROL__L2BREG_HOST_PGSIZE_MASK 0x00007F00L
+//L2_PERF_CNTL_2
+#define L2_PERF_CNTL_2__L2PerfEvent4__SHIFT 0x0
+#define L2_PERF_CNTL_2__L2PerfEvent5__SHIFT 0x8
+#define L2_PERF_CNTL_2__L2PerfCountUpper4__SHIFT 0x10
+#define L2_PERF_CNTL_2__L2PerfCountUpper5__SHIFT 0x18
+#define L2_PERF_CNTL_2__L2PerfEvent4_MASK 0x000000FFL
+#define L2_PERF_CNTL_2__L2PerfEvent5_MASK 0x0000FF00L
+#define L2_PERF_CNTL_2__L2PerfCountUpper4_MASK 0x00FF0000L
+#define L2_PERF_CNTL_2__L2PerfCountUpper5_MASK 0xFF000000L
+//L2_PERF_COUNT_4
+#define L2_PERF_COUNT_4__L2PerfCount4__SHIFT 0x0
+#define L2_PERF_COUNT_4__L2PerfCount4_MASK 0xFFFFFFFFL
+//L2_PERF_COUNT_5
+#define L2_PERF_COUNT_5__L2PerfCount5__SHIFT 0x0
+#define L2_PERF_COUNT_5__L2PerfCount5_MASK 0xFFFFFFFFL
+//L2_PERF_CNTL_3
+#define L2_PERF_CNTL_3__L2PerfEvent6__SHIFT 0x0
+#define L2_PERF_CNTL_3__L2PerfEvent7__SHIFT 0x8
+#define L2_PERF_CNTL_3__L2PerfCountUpper6__SHIFT 0x10
+#define L2_PERF_CNTL_3__L2PerfCountUpper7__SHIFT 0x18
+#define L2_PERF_CNTL_3__L2PerfEvent6_MASK 0x000000FFL
+#define L2_PERF_CNTL_3__L2PerfEvent7_MASK 0x0000FF00L
+#define L2_PERF_CNTL_3__L2PerfCountUpper6_MASK 0x00FF0000L
+#define L2_PERF_CNTL_3__L2PerfCountUpper7_MASK 0xFF000000L
+//L2_PERF_COUNT_6
+#define L2_PERF_COUNT_6__L2PerfCount6__SHIFT 0x0
+#define L2_PERF_COUNT_6__L2PerfCount6_MASK 0xFFFFFFFFL
+//L2_PERF_COUNT_7
+#define L2_PERF_COUNT_7__L2PerfCount7__SHIFT 0x0
+#define L2_PERF_COUNT_7__L2PerfCount7_MASK 0xFFFFFFFFL
+//L2B_SDP_PARITY_ERROR_EN
+#define L2B_SDP_PARITY_ERROR_EN__DVM_PARITY_ERROR_EN__SHIFT 0x0
+#define L2B_SDP_PARITY_ERROR_EN__CP_PARITY_ERROR_EN__SHIFT 0x1
+#define L2B_SDP_PARITY_ERROR_EN__TWW_PARITY_ERROR_EN__SHIFT 0x2
+#define L2B_SDP_PARITY_ERROR_EN__VFMMIO_PARITY_ERROR_EN__SHIFT 0x3
+#define L2B_SDP_PARITY_ERROR_EN__MSG_PARITY_ERROR_EN__SHIFT 0x4
+#define L2B_SDP_PARITY_ERROR_EN__DVM_PARITY_ERROR_EN_MASK 0x00000001L
+#define L2B_SDP_PARITY_ERROR_EN__CP_PARITY_ERROR_EN_MASK 0x00000002L
+#define L2B_SDP_PARITY_ERROR_EN__TWW_PARITY_ERROR_EN_MASK 0x00000004L
+#define L2B_SDP_PARITY_ERROR_EN__VFMMIO_PARITY_ERROR_EN_MASK 0x00000008L
+#define L2B_SDP_PARITY_ERROR_EN__MSG_PARITY_ERROR_EN_MASK 0x00000010L
+//L2_ECO_CNTRL_1
+#define L2_ECO_CNTRL_1__L2_ECO_1__SHIFT 0x0
+#define L2_ECO_CNTRL_1__L2_ECO_1_MASK 0xFFFFFFFFL
+//L2_CP_CONTROL_2
+#define L2_CP_CONTROL_2__OVERCONSTRAIN_CMD_WQ_ON_INV__SHIFT 0x0
+#define L2_CP_CONTROL_2__LEGACY_CWWB_PATH__SHIFT 0x1
+#define L2_CP_CONTROL_2__gstcp_always_refetch_v1__SHIFT 0x2
+#define L2_CP_CONTROL_2__inv_waitcmpl_mode__SHIFT 0x16
+#define L2_CP_CONTROL_2__inv_dvmsync_mode__SHIFT 0x18
+#define L2_CP_CONTROL_2__inv_pspflush_mode__SHIFT 0x1a
+#define L2_CP_CONTROL_2__wqmask_propagation_latency__SHIFT 0x1c
+#define L2_CP_CONTROL_2__OVERCONSTRAIN_CMD_WQ_ON_INV_MASK 0x00000001L
+#define L2_CP_CONTROL_2__LEGACY_CWWB_PATH_MASK 0x00000002L
+#define L2_CP_CONTROL_2__gstcp_always_refetch_v1_MASK 0x00000004L
+#define L2_CP_CONTROL_2__inv_waitcmpl_mode_MASK 0x00C00000L
+#define L2_CP_CONTROL_2__inv_dvmsync_mode_MASK 0x03000000L
+#define L2_CP_CONTROL_2__inv_pspflush_mode_MASK 0x0C000000L
+#define L2_CP_CONTROL_2__wqmask_propagation_latency_MASK 0xF0000000L
+//L2_CP_CONTROL_3
+#define L2_CP_CONTROL_3__INV_CMD_PRIORITY__SHIFT 0x0
+#define L2_CP_CONTROL_3__WAIT_CMD_PRIORITY__SHIFT 0x4
+#define L2_CP_CONTROL_3__SYNC_CMD_PRIORITY__SHIFT 0x8
+#define L2_CP_CONTROL_3__PSP_CMD_PRIORITY__SHIFT 0xc
+#define L2_CP_CONTROL_3__INV_CMD_PRIORITY_MASK 0x0000000FL
+#define L2_CP_CONTROL_3__WAIT_CMD_PRIORITY_MASK 0x000000F0L
+#define L2_CP_CONTROL_3__SYNC_CMD_PRIORITY_MASK 0x00000F00L
+#define L2_CP_CONTROL_3__PSP_CMD_PRIORITY_MASK 0x0000F000L
+
+
+// addressBlock: aid_nbio_iohub_nb_ioapiccfg_ioapic_cfgdec
+//FEATURES_ENABLE
+#define FEATURES_ENABLE__Ioapic_id_ext_en__SHIFT 0x2
+#define FEATURES_ENABLE__Ioapic_sb_feature_en__SHIFT 0x4
+#define FEATURES_ENABLE__Ioapic_secondary_en__SHIFT 0x5
+#define FEATURES_ENABLE__Ioapic_processor_mode__SHIFT 0x8
+#define FEATURES_ENABLE__INTx_LevelOnlyMode__SHIFT 0x9
+#define FEATURES_ENABLE__Ioapic_id_ext_en_MASK 0x00000004L
+#define FEATURES_ENABLE__Ioapic_sb_feature_en_MASK 0x00000010L
+#define FEATURES_ENABLE__Ioapic_secondary_en_MASK 0x00000020L
+#define FEATURES_ENABLE__Ioapic_processor_mode_MASK 0x00000100L
+#define FEATURES_ENABLE__INTx_LevelOnlyMode_MASK 0x00000200L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
+//BIF_CFG_DEV0_RC_VENDOR_ID
+#define BIF_CFG_DEV0_RC_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_DEVICE_ID
+#define BIF_CFG_DEV0_RC_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_COMMAND
+#define BIF_CFG_DEV0_RC_COMMAND__IOEN_DN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_COMMAND__MEMEN_DN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_COMMAND__IOEN_DN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_COMMAND__MEMEN_DN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_RC_STATUS
+#define BIF_CFG_DEV0_RC_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_RC_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_RC_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_RC_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_RC_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_RC_REVISION_ID
+#define BIF_CFG_DEV0_RC_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_RC_PROG_INTERFACE
+#define BIF_CFG_DEV0_RC_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_RC_SUB_CLASS
+#define BIF_CFG_DEV0_RC_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_RC_BASE_CLASS
+#define BIF_CFG_DEV0_RC_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_RC_CACHE_LINE
+#define BIF_CFG_DEV0_RC_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_RC_LATENCY
+#define BIF_CFG_DEV0_RC_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_RC_HEADER
+#define BIF_CFG_DEV0_RC_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_RC_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_RC_BIST
+#define BIF_CFG_DEV0_RC_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_RC_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_RC_BASE_ADDR_1
+#define BIF_CFG_DEV0_RC_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_BASE_ADDR_2
+#define BIF_CFG_DEV0_RC_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_RC_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_IO_BASE_LIMIT
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
+//BIF_CFG_DEV0_RC_SECONDARY_STATUS
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_RC_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_RC_MEM_BASE_LIMIT
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PREF_BASE_LIMIT
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PREF_BASE_UPPER
+#define BIF_CFG_DEV0_RC_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PREF_LIMIT_UPPER
+#define BIF_CFG_DEV0_RC_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC_CAP_PTR
+#define BIF_CFG_DEV0_RC_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_RC_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_RC_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_RC_INTERRUPT_LINE
+#define BIF_CFG_DEV0_RC_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_RC_INTERRUPT_PIN
+#define BIF_CFG_DEV0_RC_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//IRQ_BRIDGE_CNTL
+#define IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
+#define IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
+#define IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
+#define IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
+#define IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
+#define IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
+#define IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
+#define IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
+#define IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER__SHIFT 0x8
+#define IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER__SHIFT 0x9
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS__SHIFT 0xa
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE__SHIFT 0xb
+#define IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
+#define IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
+#define IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
+#define IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
+#define IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
+#define IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
+#define IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
+#define IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
+#define IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER_MASK 0x0100L
+#define IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER_MASK 0x0200L
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS_MASK 0x0400L
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE_MASK 0x0800L
+//BIF_CFG_DEV0_RC_EXT_BRIDGE_CNTL
+#define BIF_CFG_DEV0_RC_EXT_BRIDGE_CNTL__IO_PORT_80_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_EXT_BRIDGE_CNTL__IO_PORT_80_EN_MASK 0x01L
+//BIF_CFG_DEV0_RC_PMI_CAP_LIST
+#define BIF_CFG_DEV0_RC_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_PMI_CAP
+#define BIF_CFG_DEV0_RC_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_RC_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_RC_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_PCIE_CAP
+#define BIF_CFG_DEV0_RC_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_RC_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_RC_DEVICE_CAP
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_RC_DEVICE_CNTL
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L
+//BIF_CFG_DEV0_RC_DEVICE_STATUS
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_RC_LINK_CAP
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_RC_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_RC_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_RC_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_RC_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_LINK_CNTL
+#define BIF_CFG_DEV0_RC_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_RC_LINK_STATUS
+#define BIF_CFG_DEV0_RC_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_RC_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_RC_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_RC_SLOT_CAP
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L
+//BIF_CFG_DEV0_RC_SLOT_CNTL
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__AUTO_SLOT_PWR_LIMIT_DISABLE__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__INBAND_PD_DISABLE__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__AUTO_SLOT_PWR_LIMIT_DISABLE_MASK 0x2000L
+#define BIF_CFG_DEV0_RC_SLOT_CNTL__INBAND_PD_DISABLE_MASK 0x4000L
+//BIF_CFG_DEV0_RC_SLOT_STATUS
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L
+//BIF_CFG_DEV0_RC_ROOT_CNTL
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_NONFATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__PM_INTERRUPT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_NONFATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__SERR_ON_FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__PM_INTERRUPT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_ROOT_CNTL__CRS_SOFTWARE_VISIBILITY_EN_MASK 0x0010L
+//BIF_CFG_DEV0_RC_ROOT_CAP
+#define BIF_CFG_DEV0_RC_ROOT_CAP__CRS_SOFTWARE_VISIBILITY__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_ROOT_CAP__CRS_SOFTWARE_VISIBILITY_MASK 0x0001L
+//BIF_CFG_DEV0_RC_ROOT_STATUS
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_REQUESTOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_PENDING__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_REQUESTOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_ROOT_STATUS__PME_PENDING_MASK 0x00020000L
+//BIF_CFG_DEV0_RC_DEVICE_CAP2
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_RC_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_DEVICE_CNTL2
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_RC_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_RC_DEVICE_STATUS2
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_LINK_CAP2
+#define BIF_CFG_DEV0_RC_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_RC_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_RC_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_RC_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_LINK_CNTL2
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_RC_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_RC_LINK_STATUS2
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_RC_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_RC_SLOT_CAP2
+#define BIF_CFG_DEV0_RC_SLOT_CAP2__INBAND_PD_DISABLE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_CAP2__INBAND_PD_DISABLE_SUPPORTED_MASK 0x00000001L
+//BIF_CFG_DEV0_RC_SLOT_CNTL2
+#define BIF_CFG_DEV0_RC_SLOT_CNTL2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_SLOT_STATUS2
+#define BIF_CFG_DEV0_RC_SLOT_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_MSI_CAP_LIST
+#define BIF_CFG_DEV0_RC_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_RC_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_RC_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_RC_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_RC_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_RC_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_MSI_MSG_DATA
+#define BIF_CFG_DEV0_RC_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_RC_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_RC_SSID_CAP_LIST
+#define BIF_CFG_DEV0_RC_SSID_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_RC_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_SSID_CAP
+#define BIF_CFG_DEV0_RC_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
+//BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
+//BIF_CFG_DEV0_RC_PCIE_PORT_VC_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x007F0000L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_RC_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_RC_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_RC_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_RC_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__CORR_ERR_REP_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__NONFATAL_ERR_REP_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__FATAL_ERR_REP_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__CORR_ERR_REP_EN_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__NONFATAL_ERR_REP_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_CMD__FATAL_ERR_REP_EN_MASK 0x00000004L
+//BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_CORR_RCVD__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__MULT_ERR_CORR_RCVD__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_FATAL_NONFATAL_RCVD__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__MULT_ERR_FATAL_NONFATAL_RCVD__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__FIRST_UNCORRECTABLE_FATAL__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__NONFATAL_ERROR_MSG_RCVD__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__FATAL_ERROR_MSG_RCVD__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_COR_SUBCLASS__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ADV_ERR_INT_MSG_NUM__SHIFT 0x1b
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_CORR_RCVD_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__MULT_ERR_CORR_RCVD_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_FATAL_NONFATAL_RCVD_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__MULT_ERR_FATAL_NONFATAL_RCVD_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__FIRST_UNCORRECTABLE_FATAL_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__NONFATAL_ERROR_MSG_RCVD_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__FATAL_ERROR_MSG_RCVD_MASK 0x00000040L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ERR_COR_SUBCLASS_MASK 0x00000180L
+#define BIF_CFG_DEV0_RC_PCIE_ROOT_ERR_STATUS__ADV_ERR_INT_MSG_NUM_MASK 0xF8000000L
+//BIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID
+#define BIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID__ERR_CORR_SRC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID__ERR_FATAL_NONFATAL_SRC_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID__ERR_CORR_SRC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_ERR_SRC_ID__ERR_FATAL_NONFATAL_SRC_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_RC_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_RC_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_RC_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_RC_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_RC_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_LINK_CAP_16GT
+#define BIF_CFG_DEV0_RC_LINK_CAP_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_LINK_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_RC_LINK_STATUS_16GT
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
+//BIF_CFG_DEV0_RC_LOCAL_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC_RTM1_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC_RTM2_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_RC_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_RC_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_RC_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_RC_MARGINING_PORT_CAP
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
+//BIF_CFG_DEV0_RC_MARGINING_PORT_STATUS
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
+#define BIF_CFG_DEV0_RC_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
+//BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_RC_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_RC_LINK_CAP_32GT
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES__SHIFT 0xb
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED_MASK 0x00000400L
+#define BIF_CFG_DEV0_RC_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES_MASK 0x0000F800L
+//BIF_CFG_DEV0_RC_LINK_CNTL_32GT
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL_MASK 0x00000700L
+//BIF_CFG_DEV0_RC_LINK_STATUS_32GT
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT__SHIFT 0x0
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT__SHIFT 0x1
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT__SHIFT 0x2
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT__SHIFT 0x3
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT__SHIFT 0x4
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED__SHIFT 0x5
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON__SHIFT 0x8
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST__SHIFT 0x9
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED__SHIFT 0xa
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT_MASK 0x00000010L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED_MASK 0x00000020L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL_MASK 0x000000C0L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON_MASK 0x00000100L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST_MASK 0x00000200L
+#define BIF_CFG_DEV0_RC_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED_MASK 0x00000400L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF0_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_HEADER
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF0_BIST
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF0_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF1_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF1_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_HEADER
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF1_BIST
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF1_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF1_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF2_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF2_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_HEADER
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF2_BIST
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF2_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF2_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF3_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF3_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_HEADER
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF3_BIST
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF3_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF3_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF4_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF4_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_HEADER
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF4_BIST
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF4_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF4_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF5_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF5_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_HEADER
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF5_BIST
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF5_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF5_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF6_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF6_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_HEADER
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF6_BIST
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF6_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF6_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF7_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF7_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_HEADER
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF7_BIST
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF7_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF7_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CAP__RELAXED_ORDERING_SUPPORTED_MASK 0x0080L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL__STU__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL__STU_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF0_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF0_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF1_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF1_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF2_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF2_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF3_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF3_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF4_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF4_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF5_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF5_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF6_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF6_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS
+#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: aid_nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF7_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF7_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: aid_nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
index bd129266ebfd..a84a7cfaf71e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
@@ -135,6 +135,8 @@
#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX 0
#define mmIH_DOORBELL_RPTR 0x0087
#define mmIH_DOORBELL_RPTR_BASE_IDX 0
+#define mmIH_DOORBELL_RETRY_CAM 0x0088
+#define mmIH_DOORBELL_RETRY_CAM_BASE_IDX 0
#define mmIH_RB_CNTL_RING1 0x008c
#define mmIH_RB_CNTL_RING1_BASE_IDX 0
#define mmIH_RB_BASE_RING1 0x008d
@@ -159,6 +161,8 @@
#define mmIH_RB_WPTR_RING2_BASE_IDX 0
#define mmIH_DOORBELL_RPTR_RING2 0x009f
#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX 0
+#define mmIH_RETRY_CAM_ACK 0x00a4
+#define mmIH_RETRY_CAM_ACK_BASE_IDX 0
#define mmIH_VERSION 0x00a5
#define mmIH_VERSION_BASE_IDX 0
#define mmIH_CNTL 0x00c0
@@ -235,6 +239,8 @@
#define mmIH_MMHUB_ERROR_BASE_IDX 0
#define mmIH_MEM_POWER_CTRL 0x00e8
#define mmIH_MEM_POWER_CTRL_BASE_IDX 0
+#define mmIH_RETRY_INT_CAM_CNTL 0x00e9
+#define mmIH_RETRY_INT_CAM_CNTL_BASE_IDX 0
#define mmIH_REGISTER_LAST_PART2 0x00ff
#define mmIH_REGISTER_LAST_PART2_BASE_IDX 0
#define mmSEM_CLK_CTRL 0x0100
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
index 3ea83ea9ce3a..75c04fc275a0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
@@ -349,6 +349,17 @@
#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT 0x1c
#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK 0x03FFFFFFL
#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK 0x10000000L
+//IH_RETRY_INT_CAM_CNTL
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8
+#define IH_RETRY_INT_CAM_CNTL__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_ENABLE__SHIFT 0x11
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L
+#define IH_RETRY_INT_CAM_CNTL__ENABLE_MASK 0x00010000L
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_ENABLE_MASK 0x00020000L
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L
//IH_VERSION
#define IH_VERSION__MINVER__SHIFT 0x0
#define IH_VERSION__MAJVER__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_offset.h
new file mode 100644
index 000000000000..bd30bd818a58
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_offset.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_4_2_OFFSET_HEADER
+#define _osssys_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_osssys_osssysdec
+// base address: 0x4280
+#define regIH_VMID_0_LUT 0x0000
+#define regIH_VMID_0_LUT_BASE_IDX 0
+#define regIH_VMID_1_LUT 0x0001
+#define regIH_VMID_1_LUT_BASE_IDX 0
+#define regIH_VMID_2_LUT 0x0002
+#define regIH_VMID_2_LUT_BASE_IDX 0
+#define regIH_VMID_3_LUT 0x0003
+#define regIH_VMID_3_LUT_BASE_IDX 0
+#define regIH_VMID_4_LUT 0x0004
+#define regIH_VMID_4_LUT_BASE_IDX 0
+#define regIH_VMID_5_LUT 0x0005
+#define regIH_VMID_5_LUT_BASE_IDX 0
+#define regIH_VMID_6_LUT 0x0006
+#define regIH_VMID_6_LUT_BASE_IDX 0
+#define regIH_VMID_7_LUT 0x0007
+#define regIH_VMID_7_LUT_BASE_IDX 0
+#define regIH_VMID_8_LUT 0x0008
+#define regIH_VMID_8_LUT_BASE_IDX 0
+#define regIH_VMID_9_LUT 0x0009
+#define regIH_VMID_9_LUT_BASE_IDX 0
+#define regIH_VMID_10_LUT 0x000a
+#define regIH_VMID_10_LUT_BASE_IDX 0
+#define regIH_VMID_11_LUT 0x000b
+#define regIH_VMID_11_LUT_BASE_IDX 0
+#define regIH_VMID_12_LUT 0x000c
+#define regIH_VMID_12_LUT_BASE_IDX 0
+#define regIH_VMID_13_LUT 0x000d
+#define regIH_VMID_13_LUT_BASE_IDX 0
+#define regIH_VMID_14_LUT 0x000e
+#define regIH_VMID_14_LUT_BASE_IDX 0
+#define regIH_VMID_15_LUT 0x000f
+#define regIH_VMID_15_LUT_BASE_IDX 0
+#define regIH_VMID_0_LUT_MM 0x0010
+#define regIH_VMID_0_LUT_MM_BASE_IDX 0
+#define regIH_VMID_1_LUT_MM 0x0011
+#define regIH_VMID_1_LUT_MM_BASE_IDX 0
+#define regIH_VMID_2_LUT_MM 0x0012
+#define regIH_VMID_2_LUT_MM_BASE_IDX 0
+#define regIH_VMID_3_LUT_MM 0x0013
+#define regIH_VMID_3_LUT_MM_BASE_IDX 0
+#define regIH_VMID_4_LUT_MM 0x0014
+#define regIH_VMID_4_LUT_MM_BASE_IDX 0
+#define regIH_VMID_5_LUT_MM 0x0015
+#define regIH_VMID_5_LUT_MM_BASE_IDX 0
+#define regIH_VMID_6_LUT_MM 0x0016
+#define regIH_VMID_6_LUT_MM_BASE_IDX 0
+#define regIH_VMID_7_LUT_MM 0x0017
+#define regIH_VMID_7_LUT_MM_BASE_IDX 0
+#define regIH_VMID_8_LUT_MM 0x0018
+#define regIH_VMID_8_LUT_MM_BASE_IDX 0
+#define regIH_VMID_9_LUT_MM 0x0019
+#define regIH_VMID_9_LUT_MM_BASE_IDX 0
+#define regIH_VMID_10_LUT_MM 0x001a
+#define regIH_VMID_10_LUT_MM_BASE_IDX 0
+#define regIH_VMID_11_LUT_MM 0x001b
+#define regIH_VMID_11_LUT_MM_BASE_IDX 0
+#define regIH_VMID_12_LUT_MM 0x001c
+#define regIH_VMID_12_LUT_MM_BASE_IDX 0
+#define regIH_VMID_13_LUT_MM 0x001d
+#define regIH_VMID_13_LUT_MM_BASE_IDX 0
+#define regIH_VMID_14_LUT_MM 0x001e
+#define regIH_VMID_14_LUT_MM_BASE_IDX 0
+#define regIH_VMID_15_LUT_MM 0x001f
+#define regIH_VMID_15_LUT_MM_BASE_IDX 0
+#define regIH_COOKIE_0 0x0020
+#define regIH_COOKIE_0_BASE_IDX 0
+#define regIH_COOKIE_1 0x0021
+#define regIH_COOKIE_1_BASE_IDX 0
+#define regIH_COOKIE_2 0x0022
+#define regIH_COOKIE_2_BASE_IDX 0
+#define regIH_COOKIE_3 0x0023
+#define regIH_COOKIE_3_BASE_IDX 0
+#define regIH_COOKIE_4 0x0024
+#define regIH_COOKIE_4_BASE_IDX 0
+#define regIH_COOKIE_5 0x0025
+#define regIH_COOKIE_5_BASE_IDX 0
+#define regIH_COOKIE_6 0x0026
+#define regIH_COOKIE_6_BASE_IDX 0
+#define regIH_COOKIE_7 0x0027
+#define regIH_COOKIE_7_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART0 0x003f
+#define regIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define regIH_RB_CNTL 0x0080
+#define regIH_RB_CNTL_BASE_IDX 0
+#define regIH_RB_BASE 0x0081
+#define regIH_RB_BASE_BASE_IDX 0
+#define regIH_RB_BASE_HI 0x0082
+#define regIH_RB_BASE_HI_BASE_IDX 0
+#define regIH_RB_RPTR 0x0083
+#define regIH_RB_RPTR_BASE_IDX 0
+#define regIH_RB_WPTR 0x0084
+#define regIH_RB_WPTR_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_HI 0x0085
+#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_LO 0x0086
+#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define regIH_DOORBELL_RPTR 0x0087
+#define regIH_DOORBELL_RPTR_BASE_IDX 0
+#define regIH_DOORBELL_RETRY_CAM 0x0088
+#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0
+#define regIH_RB_CNTL_RING1 0x008c
+#define regIH_RB_CNTL_RING1_BASE_IDX 0
+#define regIH_RB_BASE_RING1 0x008d
+#define regIH_RB_BASE_RING1_BASE_IDX 0
+#define regIH_RB_BASE_HI_RING1 0x008e
+#define regIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define regIH_RB_RPTR_RING1 0x008f
+#define regIH_RB_RPTR_RING1_BASE_IDX 0
+#define regIH_RB_WPTR_RING1 0x0090
+#define regIH_RB_WPTR_RING1_BASE_IDX 0
+#define regIH_DOORBELL_RPTR_RING1 0x0093
+#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define regIH_RETRY_CAM_ACK 0x00a4
+#define regIH_RETRY_CAM_ACK_BASE_IDX 0
+#define regIH_VERSION 0x00a5
+#define regIH_VERSION_BASE_IDX 0
+#define regIH_CNTL 0x00c0
+#define regIH_CNTL_BASE_IDX 0
+#define regIH_CNTL2 0x00c1
+#define regIH_CNTL2_BASE_IDX 0
+#define regIH_STATUS 0x00c2
+#define regIH_STATUS_BASE_IDX 0
+#define regIH_PERFMON_CNTL 0x00c3
+#define regIH_PERFMON_CNTL_BASE_IDX 0
+#define regIH_PERFCOUNTER0_RESULT 0x00c4
+#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define regIH_PERFCOUNTER1_RESULT 0x00c5
+#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_FCN_ID 0x00cc
+#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define regIH_LIMIT_INT_RATE_CNTL 0x00cd
+#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define regIH_VF_RB_STATUS 0x00ce
+#define regIH_VF_RB_STATUS_BASE_IDX 0
+#define regIH_VF_RB_STATUS2 0x00cf
+#define regIH_VF_RB_STATUS2_BASE_IDX 0
+#define regIH_VF_RB1_STATUS 0x00d0
+#define regIH_VF_RB1_STATUS_BASE_IDX 0
+#define regIH_VF_RB1_STATUS2 0x00d1
+#define regIH_VF_RB1_STATUS2_BASE_IDX 0
+#define regIH_INT_FLOOD_CNTL 0x00d5
+#define regIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define regIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_STATUS 0x00d9
+#define regIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_STORM_CLIENT_LIST_CNTL 0x00da
+#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define regIH_CLK_CTRL 0x00db
+#define regIH_CLK_CTRL_BASE_IDX 0
+#define regIH_INT_FLAGS 0x00dc
+#define regIH_INT_FLAGS_BASE_IDX 0
+#define regIH_LAST_INT_INFO0 0x00dd
+#define regIH_LAST_INT_INFO0_BASE_IDX 0
+#define regIH_LAST_INT_INFO1 0x00de
+#define regIH_LAST_INT_INFO1_BASE_IDX 0
+#define regIH_LAST_INT_INFO2 0x00df
+#define regIH_LAST_INT_INFO2_BASE_IDX 0
+#define regIH_SCRATCH 0x00e0
+#define regIH_SCRATCH_BASE_IDX 0
+#define regIH_CLIENT_CREDIT_ERROR 0x00e1
+#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3
+#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4
+#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define regIH_CREDIT_STATUS 0x00e5
+#define regIH_CREDIT_STATUS_BASE_IDX 0
+#define regIH_MMHUB_ERROR 0x00e6
+#define regIH_MMHUB_ERROR_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL 0x00e9
+#define regIH_MEM_POWER_CTRL_BASE_IDX 0
+#define regIH_RETRY_INT_CAM_CNTL 0x00ea
+#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0
+#define regIH_VMID_LUT_INDEX 0x00ec
+#define regIH_VMID_LUT_INDEX_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL2 0x00f1
+#define regIH_MEM_POWER_CTRL2_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART2 0x00ff
+#define regIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define regSEM_MAILBOX 0x010a
+#define regSEM_MAILBOX_BASE_IDX 0
+#define regSEM_MAILBOX_CLEAR 0x010b
+#define regSEM_MAILBOX_CLEAR_BASE_IDX 0
+#define regSEM_REGISTER_LAST_PART2 0x017f
+#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define regIH_ACTIVE_FCN_ID 0x0180
+#define regIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define regIH_VIRT_RESET_REQ 0x0181
+#define regIH_VIRT_RESET_REQ_BASE_IDX 0
+#define regIH_CLIENT_CFG 0x0184
+#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_CLIENT_CFG_INDEX 0x0188
+#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA 0x0189
+#define regIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA2 0x018a
+#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0
+#define regIH_CID_REMAP_INDEX 0x018b
+#define regIH_CID_REMAP_INDEX_BASE_IDX 0
+#define regIH_CID_REMAP_DATA 0x018c
+#define regIH_CID_REMAP_DATA_BASE_IDX 0
+#define regIH_CHICKEN 0x018d
+#define regIH_CHICKEN_BASE_IDX 0
+#define regIH_INT_DROP_CNTL 0x018f
+#define regIH_INT_DROP_CNTL_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE0 0x0190
+#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE1 0x0191
+#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK0 0x0192
+#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK1 0x0193
+#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define regIH_MMHUB_CNTL 0x019e
+#define regIH_MMHUB_CNTL_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART1 0x019f
+#define regIH_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..7d1cf3bac801
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_sh_mask.h
@@ -0,0 +1,995 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_4_2_SH_MASK_HEADER
+#define _osssys_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_DOORBELL_RETRY_CAM
+#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RETRY_CAM_ACK
+#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0
+#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__SRAM_ECC_ENABLE__SHIFT 0x19
+#define IH_CNTL__FED_ENABLE__SHIFT 0x1a
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+#define IH_CNTL__SRAM_ECC_ENABLE_MASK 0x02000000L
+#define IH_CNTL__FED_ENABLE_MASK 0x04000000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x14
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x15
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x16
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00100000L
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00200000L
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00400000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN__SHIFT 0x7
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN_MASK 0x00000080L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000000EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x000000FFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK 0x00FF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x000000FFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK 0x00FF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x000000FFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK 0x00FF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x000000FFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x000000FFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x000000FFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x07000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17
+#define IH_CLK_CTRL__IH_RAS_CLK_SOFT_OVERRIDE__SHIFT 0x18
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L
+#define IH_CLK_CTRL__IH_RAS_CLK_SOFT_OVERRIDE_MASK 0x01000000L
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x00070000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00700000L
+//IH_GPU_IOV_VIOLATION_LOG2
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID__SHIFT 0x1a
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID_MASK 0x3C000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BUSER_FED__SHIFT 0x8
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+#define IH_MMHUB_ERROR__IH_BUSER_FED_MASK 0x00000100L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0x1e
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0xC0000000L
+//IH_RETRY_INT_CAM_CNTL
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8
+#define IH_RETRY_INT_CAM_CNTL__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_ENABLE__SHIFT 0x11
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00007F00L
+#define IH_RETRY_INT_CAM_CNTL__ENABLE_MASK 0x00010000L
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_ENABLE_MASK 0x00020000L
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L
+//IH_VMID_LUT_INDEX
+#define IH_VMID_LUT_INDEX__INDEX__SHIFT 0x0
+#define IH_VMID_LUT_INDEX__INDEX_MASK 0x0000000FL
+//IH_MEM_POWER_CTRL2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CLEAR
+#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0
+#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x3
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x00000007L
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF8L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x000000FFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000007FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x13
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT 0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19
+#define IH_CLIENT_CFG_DATA__DIE_TYPE__SHIFT 0x1a
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x00080000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK 0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L
+#define IH_CLIENT_CFG_DATA__DIE_TYPE_MASK 0x04000000L
+//IH_CLIENT_CFG_DATA2
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000007L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
new file mode 100644
index 000000000000..31bef0776ded
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
@@ -0,0 +1,1109 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_OFFSET_HEADER
+#define _sdma_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+// base address: 0x4980
+#define regSDMA_UCODE_ADDR 0x0000
+#define regSDMA_UCODE_ADDR_BASE_IDX 0
+#define regSDMA_UCODE_DATA 0x0001
+#define regSDMA_UCODE_DATA_BASE_IDX 0
+#define regSDMA_F32_CNTL 0x0002
+#define regSDMA_F32_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_CNTL 0x0005
+#define regSDMA_MMHUB_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_TRUSTLVL 0x0006
+#define regSDMA_MMHUB_TRUSTLVL_BASE_IDX 0
+#define regSDMA_VM_CNTL 0x0010
+#define regSDMA_VM_CNTL_BASE_IDX 0
+#define regSDMA_VM_CTX_LO 0x0011
+#define regSDMA_VM_CTX_LO_BASE_IDX 0
+#define regSDMA_VM_CTX_HI 0x0012
+#define regSDMA_VM_CTX_HI_BASE_IDX 0
+#define regSDMA_ACTIVE_FCN_ID 0x0013
+#define regSDMA_ACTIVE_FCN_ID_BASE_IDX 0
+#define regSDMA_VM_CTX_CNTL 0x0014
+#define regSDMA_VM_CTX_CNTL_BASE_IDX 0
+#define regSDMA_VIRT_RESET_REQ 0x0015
+#define regSDMA_VIRT_RESET_REQ_BASE_IDX 0
+#define regSDMA_VF_ENABLE 0x0016
+#define regSDMA_VF_ENABLE_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE0 0x0017
+#define regSDMA_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE1 0x0018
+#define regSDMA_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE2 0x0019
+#define regSDMA_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE3 0x001a
+#define regSDMA_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE0 0x001b
+#define regSDMA_PUB_REG_TYPE0_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE1 0x001c
+#define regSDMA_PUB_REG_TYPE1_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE2 0x001d
+#define regSDMA_PUB_REG_TYPE2_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE3 0x001e
+#define regSDMA_PUB_REG_TYPE3_BASE_IDX 0
+#define regSDMA_CONTEXT_GROUP_BOUNDARY 0x001f
+#define regSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH_HI 0x0020
+#define regSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH 0x0022
+#define regSDMA_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA_IB_OFFSET_FETCH 0x0023
+#define regSDMA_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA_PROGRAM 0x0024
+#define regSDMA_PROGRAM_BASE_IDX 0
+#define regSDMA_STATUS_REG 0x0025
+#define regSDMA_STATUS_REG_BASE_IDX 0
+#define regSDMA_STATUS1_REG 0x0026
+#define regSDMA_STATUS1_REG_BASE_IDX 0
+#define regSDMA_RD_BURST_CNTL 0x0027
+#define regSDMA_RD_BURST_CNTL_BASE_IDX 0
+#define regSDMA_HBM_PAGE_CONFIG 0x0028
+#define regSDMA_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA_UCODE_CHECKSUM 0x0029
+#define regSDMA_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA_FREEZE 0x002b
+#define regSDMA_FREEZE_BASE_IDX 0
+#define regSDMA_PHASE0_QUANTUM 0x002c
+#define regSDMA_PHASE0_QUANTUM_BASE_IDX 0
+#define regSDMA_PHASE1_QUANTUM 0x002d
+#define regSDMA_PHASE1_QUANTUM_BASE_IDX 0
+#define regSDMA_POWER_GATING 0x002e
+#define regSDMA_POWER_GATING_BASE_IDX 0
+#define regSDMA_PGFSM_CONFIG 0x002f
+#define regSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define regSDMA_PGFSM_WRITE 0x0030
+#define regSDMA_PGFSM_WRITE_BASE_IDX 0
+#define regSDMA_PGFSM_READ 0x0031
+#define regSDMA_PGFSM_READ_BASE_IDX 0
+#define regCC_SDMA_EDC_CONFIG 0x0032
+#define regCC_SDMA_EDC_CONFIG_BASE_IDX 0
+#define regSDMA_BA_THRESHOLD 0x0033
+#define regSDMA_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA_ID 0x0034
+#define regSDMA_ID_BASE_IDX 0
+#define regSDMA_VERSION 0x0035
+#define regSDMA_VERSION_BASE_IDX 0
+#define regSDMA_EDC_COUNTER 0x0036
+#define regSDMA_EDC_COUNTER_BASE_IDX 0
+#define regSDMA_EDC_COUNTER2 0x0037
+#define regSDMA_EDC_COUNTER2_BASE_IDX 0
+#define regSDMA_STATUS2_REG 0x0038
+#define regSDMA_STATUS2_REG_BASE_IDX 0
+#define regSDMA_ATOMIC_CNTL 0x0039
+#define regSDMA_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_LO 0x003a
+#define regSDMA_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_HI 0x003b
+#define regSDMA_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA_UTCL1_CNTL 0x003c
+#define regSDMA_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA_UTCL1_WATERMK 0x003d
+#define regSDMA_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA_UTCL1_RD_STATUS 0x003e
+#define regSDMA_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_WR_STATUS 0x003f
+#define regSDMA_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_INV0 0x0040
+#define regSDMA_UTCL1_INV0_BASE_IDX 0
+#define regSDMA_UTCL1_INV1 0x0041
+#define regSDMA_UTCL1_INV1_BASE_IDX 0
+#define regSDMA_UTCL1_INV2 0x0042
+#define regSDMA_UTCL1_INV2_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK0 0x0043
+#define regSDMA_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK1 0x0044
+#define regSDMA_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK0 0x0045
+#define regSDMA_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK1 0x0046
+#define regSDMA_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_TIMEOUT 0x0047
+#define regSDMA_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA_UTCL1_PAGE 0x0048
+#define regSDMA_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA_POWER_CNTL_IDLE 0x0049
+#define regSDMA_POWER_CNTL_IDLE_BASE_IDX 0
+#define regSDMA_RELAX_ORDERING_LUT 0x004a
+#define regSDMA_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS_2 0x004b
+#define regSDMA_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA_STATUS3_REG 0x004c
+#define regSDMA_STATUS3_REG_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PHASE2_QUANTUM 0x004f
+#define regSDMA_PHASE2_QUANTUM_BASE_IDX 0
+#define regSDMA_ERROR_LOG 0x0050
+#define regSDMA_ERROR_LOG_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG0 0x0051
+#define regSDMA_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG1 0x0052
+#define regSDMA_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG2 0x0053
+#define regSDMA_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG3 0x0054
+#define regSDMA_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA_F32_COUNTER 0x0055
+#define regSDMA_F32_COUNTER_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_MISC_CNTL 0x005a
+#define regSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_LO 0x005b
+#define regSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_HI 0x005c
+#define regSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0
+#define regSDMA_CRD_CNTL 0x005d
+#define regSDMA_CRD_CNTL_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG 0x005e
+#define regSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA_ULV_CNTL 0x005f
+#define regSDMA_ULV_CNTL_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG2 0x0062
+#define regSDMA_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA_STATUS4_REG 0x0063
+#define regSDMA_STATUS4_REG_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_DATA 0x0064
+#define regSDMA_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_ADDR 0x0065
+#define regSDMA_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA_CE_CTRL 0x0066
+#define regSDMA_CE_CTRL_BASE_IDX 0
+#define regSDMA_RAS_STATUS 0x0067
+#define regSDMA_RAS_STATUS_BASE_IDX 0
+#define regSDMA_CLK_STATUS 0x0068
+#define regSDMA_CLK_STATUS_BASE_IDX 0
+#define regSDMA_POWER_CNTL 0x006b
+#define regSDMA_POWER_CNTL_BASE_IDX 0
+#define regSDMA_CLK_CTRL 0x006c
+#define regSDMA_CLK_CTRL_BASE_IDX 0
+#define regSDMA_CNTL 0x006d
+#define regSDMA_CNTL_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS 0x006e
+#define regSDMA_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG 0x006f
+#define regSDMA_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG_READ 0x0070
+#define regSDMA_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA_GFX_RB_CNTL 0x0080
+#define regSDMA_GFX_RB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE 0x0081
+#define regSDMA_GFX_RB_BASE_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE_HI 0x0082
+#define regSDMA_GFX_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR 0x0083
+#define regSDMA_GFX_RB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_HI 0x0084
+#define regSDMA_GFX_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR 0x0085
+#define regSDMA_GFX_RB_WPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_HI 0x0086
+#define regSDMA_GFX_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_CNTL 0x008a
+#define regSDMA_GFX_IB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_IB_RPTR 0x008b
+#define regSDMA_GFX_IB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_IB_OFFSET 0x008c
+#define regSDMA_GFX_IB_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_LO 0x008d
+#define regSDMA_GFX_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_HI 0x008e
+#define regSDMA_GFX_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SIZE 0x008f
+#define regSDMA_GFX_IB_SIZE_BASE_IDX 0
+#define regSDMA_GFX_SKIP_CNTL 0x0090
+#define regSDMA_GFX_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_STATUS 0x0091
+#define regSDMA_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL 0x0092
+#define regSDMA_GFX_DOORBELL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_CNTL 0x0093
+#define regSDMA_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define regSDMA_GFX_STATUS 0x00a8
+#define regSDMA_GFX_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_LOG 0x00a9
+#define regSDMA_GFX_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_GFX_WATERMARK 0x00aa
+#define regSDMA_GFX_WATERMARK_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_OFFSET 0x00ab
+#define regSDMA_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_LO 0x00ac
+#define regSDMA_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_HI 0x00ad
+#define regSDMA_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SUB_REMAIN 0x00af
+#define regSDMA_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_GFX_PREEMPT 0x00b0
+#define regSDMA_GFX_PREEMPT_BASE_IDX 0
+#define regSDMA_GFX_DUMMY_REG 0x00b1
+#define regSDMA_GFX_DUMMY_REG_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_RB_AQL_CNTL 0x00b4
+#define regSDMA_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA0 0x00c0
+#define regSDMA_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA1 0x00c1
+#define regSDMA_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA2 0x00c2
+#define regSDMA_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA3 0x00c3
+#define regSDMA_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA4 0x00c4
+#define regSDMA_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA5 0x00c5
+#define regSDMA_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA6 0x00c6
+#define regSDMA_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA7 0x00c7
+#define regSDMA_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA8 0x00c8
+#define regSDMA_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA9 0x00c9
+#define regSDMA_GFX_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA10 0x00ca
+#define regSDMA_GFX_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_CNTL 0x00cb
+#define regSDMA_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_CNTL 0x00d8
+#define regSDMA_PAGE_RB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE 0x00d9
+#define regSDMA_PAGE_RB_BASE_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE_HI 0x00da
+#define regSDMA_PAGE_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR 0x00db
+#define regSDMA_PAGE_RB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_HI 0x00dc
+#define regSDMA_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR 0x00dd
+#define regSDMA_PAGE_RB_WPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_HI 0x00de
+#define regSDMA_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_CNTL 0x00e2
+#define regSDMA_PAGE_IB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_IB_RPTR 0x00e3
+#define regSDMA_PAGE_IB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_IB_OFFSET 0x00e4
+#define regSDMA_PAGE_IB_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_LO 0x00e5
+#define regSDMA_PAGE_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_HI 0x00e6
+#define regSDMA_PAGE_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SIZE 0x00e7
+#define regSDMA_PAGE_IB_SIZE_BASE_IDX 0
+#define regSDMA_PAGE_SKIP_CNTL 0x00e8
+#define regSDMA_PAGE_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_CONTEXT_STATUS 0x00e9
+#define regSDMA_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL 0x00ea
+#define regSDMA_PAGE_DOORBELL_BASE_IDX 0
+#define regSDMA_PAGE_STATUS 0x0100
+#define regSDMA_PAGE_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_LOG 0x0101
+#define regSDMA_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_PAGE_WATERMARK 0x0102
+#define regSDMA_PAGE_WATERMARK_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_OFFSET 0x0103
+#define regSDMA_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_LO 0x0104
+#define regSDMA_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_HI 0x0105
+#define regSDMA_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SUB_REMAIN 0x0107
+#define regSDMA_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_PAGE_PREEMPT 0x0108
+#define regSDMA_PAGE_PREEMPT_BASE_IDX 0
+#define regSDMA_PAGE_DUMMY_REG 0x0109
+#define regSDMA_PAGE_DUMMY_REG_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_RB_AQL_CNTL 0x010c
+#define regSDMA_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_MINOR_PTR_UPDATE 0x010d
+#define regSDMA_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA0 0x0118
+#define regSDMA_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA1 0x0119
+#define regSDMA_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA2 0x011a
+#define regSDMA_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA3 0x011b
+#define regSDMA_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA4 0x011c
+#define regSDMA_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA5 0x011d
+#define regSDMA_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA6 0x011e
+#define regSDMA_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA7 0x011f
+#define regSDMA_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA8 0x0120
+#define regSDMA_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA9 0x0121
+#define regSDMA_PAGE_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA10 0x0122
+#define regSDMA_PAGE_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_CNTL 0x0123
+#define regSDMA_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_CNTL 0x0130
+#define regSDMA_RLC0_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE 0x0131
+#define regSDMA_RLC0_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE_HI 0x0132
+#define regSDMA_RLC0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR 0x0133
+#define regSDMA_RLC0_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_HI 0x0134
+#define regSDMA_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR 0x0135
+#define regSDMA_RLC0_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_HI 0x0136
+#define regSDMA_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_CNTL 0x013a
+#define regSDMA_RLC0_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_IB_RPTR 0x013b
+#define regSDMA_RLC0_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_IB_OFFSET 0x013c
+#define regSDMA_RLC0_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_LO 0x013d
+#define regSDMA_RLC0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_HI 0x013e
+#define regSDMA_RLC0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SIZE 0x013f
+#define regSDMA_RLC0_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC0_SKIP_CNTL 0x0140
+#define regSDMA_RLC0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_CONTEXT_STATUS 0x0141
+#define regSDMA_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL 0x0142
+#define regSDMA_RLC0_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC0_STATUS 0x0158
+#define regSDMA_RLC0_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_LOG 0x0159
+#define regSDMA_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC0_WATERMARK 0x015a
+#define regSDMA_RLC0_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_OFFSET 0x015b
+#define regSDMA_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_LO 0x015c
+#define regSDMA_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_HI 0x015d
+#define regSDMA_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SUB_REMAIN 0x015f
+#define regSDMA_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC0_PREEMPT 0x0160
+#define regSDMA_RLC0_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC0_DUMMY_REG 0x0161
+#define regSDMA_RLC0_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_RB_AQL_CNTL 0x0164
+#define regSDMA_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_MINOR_PTR_UPDATE 0x0165
+#define regSDMA_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA0 0x0170
+#define regSDMA_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA1 0x0171
+#define regSDMA_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA2 0x0172
+#define regSDMA_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA3 0x0173
+#define regSDMA_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA4 0x0174
+#define regSDMA_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA5 0x0175
+#define regSDMA_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA6 0x0176
+#define regSDMA_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA7 0x0177
+#define regSDMA_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA8 0x0178
+#define regSDMA_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA9 0x0179
+#define regSDMA_RLC0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA10 0x017a
+#define regSDMA_RLC0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_CNTL 0x017b
+#define regSDMA_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_CNTL 0x0188
+#define regSDMA_RLC1_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE 0x0189
+#define regSDMA_RLC1_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE_HI 0x018a
+#define regSDMA_RLC1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR 0x018b
+#define regSDMA_RLC1_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_HI 0x018c
+#define regSDMA_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR 0x018d
+#define regSDMA_RLC1_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_HI 0x018e
+#define regSDMA_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_CNTL 0x0192
+#define regSDMA_RLC1_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_IB_RPTR 0x0193
+#define regSDMA_RLC1_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_IB_OFFSET 0x0194
+#define regSDMA_RLC1_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_LO 0x0195
+#define regSDMA_RLC1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_HI 0x0196
+#define regSDMA_RLC1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SIZE 0x0197
+#define regSDMA_RLC1_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC1_SKIP_CNTL 0x0198
+#define regSDMA_RLC1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_CONTEXT_STATUS 0x0199
+#define regSDMA_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL 0x019a
+#define regSDMA_RLC1_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC1_STATUS 0x01b0
+#define regSDMA_RLC1_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_LOG 0x01b1
+#define regSDMA_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC1_WATERMARK 0x01b2
+#define regSDMA_RLC1_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_OFFSET 0x01b3
+#define regSDMA_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_LO 0x01b4
+#define regSDMA_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_HI 0x01b5
+#define regSDMA_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SUB_REMAIN 0x01b7
+#define regSDMA_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC1_PREEMPT 0x01b8
+#define regSDMA_RLC1_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC1_DUMMY_REG 0x01b9
+#define regSDMA_RLC1_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_RB_AQL_CNTL 0x01bc
+#define regSDMA_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA0 0x01c8
+#define regSDMA_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA1 0x01c9
+#define regSDMA_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA2 0x01ca
+#define regSDMA_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA3 0x01cb
+#define regSDMA_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA4 0x01cc
+#define regSDMA_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA5 0x01cd
+#define regSDMA_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA6 0x01ce
+#define regSDMA_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA7 0x01cf
+#define regSDMA_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA8 0x01d0
+#define regSDMA_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA9 0x01d1
+#define regSDMA_RLC1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA10 0x01d2
+#define regSDMA_RLC1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_CNTL 0x01d3
+#define regSDMA_RLC1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_CNTL 0x01e0
+#define regSDMA_RLC2_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE 0x01e1
+#define regSDMA_RLC2_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE_HI 0x01e2
+#define regSDMA_RLC2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR 0x01e3
+#define regSDMA_RLC2_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_HI 0x01e4
+#define regSDMA_RLC2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR 0x01e5
+#define regSDMA_RLC2_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_HI 0x01e6
+#define regSDMA_RLC2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_CNTL 0x01ea
+#define regSDMA_RLC2_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_IB_RPTR 0x01eb
+#define regSDMA_RLC2_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_IB_OFFSET 0x01ec
+#define regSDMA_RLC2_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_LO 0x01ed
+#define regSDMA_RLC2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_HI 0x01ee
+#define regSDMA_RLC2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SIZE 0x01ef
+#define regSDMA_RLC2_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC2_SKIP_CNTL 0x01f0
+#define regSDMA_RLC2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_CONTEXT_STATUS 0x01f1
+#define regSDMA_RLC2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL 0x01f2
+#define regSDMA_RLC2_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC2_STATUS 0x0208
+#define regSDMA_RLC2_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_LOG 0x0209
+#define regSDMA_RLC2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC2_WATERMARK 0x020a
+#define regSDMA_RLC2_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_OFFSET 0x020b
+#define regSDMA_RLC2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_LO 0x020c
+#define regSDMA_RLC2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_HI 0x020d
+#define regSDMA_RLC2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SUB_REMAIN 0x020f
+#define regSDMA_RLC2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC2_PREEMPT 0x0210
+#define regSDMA_RLC2_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC2_DUMMY_REG 0x0211
+#define regSDMA_RLC2_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_RB_AQL_CNTL 0x0214
+#define regSDMA_RLC2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_MINOR_PTR_UPDATE 0x0215
+#define regSDMA_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA0 0x0220
+#define regSDMA_RLC2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA1 0x0221
+#define regSDMA_RLC2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA2 0x0222
+#define regSDMA_RLC2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA3 0x0223
+#define regSDMA_RLC2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA4 0x0224
+#define regSDMA_RLC2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA5 0x0225
+#define regSDMA_RLC2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA6 0x0226
+#define regSDMA_RLC2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA7 0x0227
+#define regSDMA_RLC2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA8 0x0228
+#define regSDMA_RLC2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA9 0x0229
+#define regSDMA_RLC2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA10 0x022a
+#define regSDMA_RLC2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_CNTL 0x022b
+#define regSDMA_RLC2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_CNTL 0x0238
+#define regSDMA_RLC3_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE 0x0239
+#define regSDMA_RLC3_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE_HI 0x023a
+#define regSDMA_RLC3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR 0x023b
+#define regSDMA_RLC3_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_HI 0x023c
+#define regSDMA_RLC3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR 0x023d
+#define regSDMA_RLC3_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_HI 0x023e
+#define regSDMA_RLC3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_CNTL 0x0242
+#define regSDMA_RLC3_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_IB_RPTR 0x0243
+#define regSDMA_RLC3_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_IB_OFFSET 0x0244
+#define regSDMA_RLC3_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_LO 0x0245
+#define regSDMA_RLC3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_HI 0x0246
+#define regSDMA_RLC3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SIZE 0x0247
+#define regSDMA_RLC3_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC3_SKIP_CNTL 0x0248
+#define regSDMA_RLC3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_CONTEXT_STATUS 0x0249
+#define regSDMA_RLC3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL 0x024a
+#define regSDMA_RLC3_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC3_STATUS 0x0260
+#define regSDMA_RLC3_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_LOG 0x0261
+#define regSDMA_RLC3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC3_WATERMARK 0x0262
+#define regSDMA_RLC3_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_OFFSET 0x0263
+#define regSDMA_RLC3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_LO 0x0264
+#define regSDMA_RLC3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_HI 0x0265
+#define regSDMA_RLC3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SUB_REMAIN 0x0267
+#define regSDMA_RLC3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC3_PREEMPT 0x0268
+#define regSDMA_RLC3_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC3_DUMMY_REG 0x0269
+#define regSDMA_RLC3_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_RB_AQL_CNTL 0x026c
+#define regSDMA_RLC3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_MINOR_PTR_UPDATE 0x026d
+#define regSDMA_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA0 0x0278
+#define regSDMA_RLC3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA1 0x0279
+#define regSDMA_RLC3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA2 0x027a
+#define regSDMA_RLC3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA3 0x027b
+#define regSDMA_RLC3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA4 0x027c
+#define regSDMA_RLC3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA5 0x027d
+#define regSDMA_RLC3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA6 0x027e
+#define regSDMA_RLC3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA7 0x027f
+#define regSDMA_RLC3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA8 0x0280
+#define regSDMA_RLC3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA9 0x0281
+#define regSDMA_RLC3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA10 0x0282
+#define regSDMA_RLC3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_CNTL 0x0283
+#define regSDMA_RLC3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_CNTL 0x0290
+#define regSDMA_RLC4_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE 0x0291
+#define regSDMA_RLC4_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE_HI 0x0292
+#define regSDMA_RLC4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR 0x0293
+#define regSDMA_RLC4_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_HI 0x0294
+#define regSDMA_RLC4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR 0x0295
+#define regSDMA_RLC4_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_HI 0x0296
+#define regSDMA_RLC4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_CNTL 0x029a
+#define regSDMA_RLC4_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_IB_RPTR 0x029b
+#define regSDMA_RLC4_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_IB_OFFSET 0x029c
+#define regSDMA_RLC4_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_LO 0x029d
+#define regSDMA_RLC4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_HI 0x029e
+#define regSDMA_RLC4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SIZE 0x029f
+#define regSDMA_RLC4_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC4_SKIP_CNTL 0x02a0
+#define regSDMA_RLC4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_CONTEXT_STATUS 0x02a1
+#define regSDMA_RLC4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL 0x02a2
+#define regSDMA_RLC4_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC4_STATUS 0x02b8
+#define regSDMA_RLC4_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_LOG 0x02b9
+#define regSDMA_RLC4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC4_WATERMARK 0x02ba
+#define regSDMA_RLC4_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_OFFSET 0x02bb
+#define regSDMA_RLC4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_LO 0x02bc
+#define regSDMA_RLC4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_HI 0x02bd
+#define regSDMA_RLC4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SUB_REMAIN 0x02bf
+#define regSDMA_RLC4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC4_PREEMPT 0x02c0
+#define regSDMA_RLC4_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC4_DUMMY_REG 0x02c1
+#define regSDMA_RLC4_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_RB_AQL_CNTL 0x02c4
+#define regSDMA_RLC4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA0 0x02d0
+#define regSDMA_RLC4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA1 0x02d1
+#define regSDMA_RLC4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA2 0x02d2
+#define regSDMA_RLC4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA3 0x02d3
+#define regSDMA_RLC4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA4 0x02d4
+#define regSDMA_RLC4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA5 0x02d5
+#define regSDMA_RLC4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA6 0x02d6
+#define regSDMA_RLC4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA7 0x02d7
+#define regSDMA_RLC4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA8 0x02d8
+#define regSDMA_RLC4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA9 0x02d9
+#define regSDMA_RLC4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA10 0x02da
+#define regSDMA_RLC4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_CNTL 0x02db
+#define regSDMA_RLC4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_CNTL 0x02e8
+#define regSDMA_RLC5_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE 0x02e9
+#define regSDMA_RLC5_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE_HI 0x02ea
+#define regSDMA_RLC5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR 0x02eb
+#define regSDMA_RLC5_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_HI 0x02ec
+#define regSDMA_RLC5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR 0x02ed
+#define regSDMA_RLC5_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_HI 0x02ee
+#define regSDMA_RLC5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_CNTL 0x02f2
+#define regSDMA_RLC5_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_IB_RPTR 0x02f3
+#define regSDMA_RLC5_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_IB_OFFSET 0x02f4
+#define regSDMA_RLC5_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_LO 0x02f5
+#define regSDMA_RLC5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_HI 0x02f6
+#define regSDMA_RLC5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SIZE 0x02f7
+#define regSDMA_RLC5_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC5_SKIP_CNTL 0x02f8
+#define regSDMA_RLC5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_CONTEXT_STATUS 0x02f9
+#define regSDMA_RLC5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL 0x02fa
+#define regSDMA_RLC5_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC5_STATUS 0x0310
+#define regSDMA_RLC5_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_LOG 0x0311
+#define regSDMA_RLC5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC5_WATERMARK 0x0312
+#define regSDMA_RLC5_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_OFFSET 0x0313
+#define regSDMA_RLC5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_LO 0x0314
+#define regSDMA_RLC5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_HI 0x0315
+#define regSDMA_RLC5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SUB_REMAIN 0x0317
+#define regSDMA_RLC5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC5_PREEMPT 0x0318
+#define regSDMA_RLC5_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC5_DUMMY_REG 0x0319
+#define regSDMA_RLC5_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_RB_AQL_CNTL 0x031c
+#define regSDMA_RLC5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_MINOR_PTR_UPDATE 0x031d
+#define regSDMA_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA0 0x0328
+#define regSDMA_RLC5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA1 0x0329
+#define regSDMA_RLC5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA2 0x032a
+#define regSDMA_RLC5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA3 0x032b
+#define regSDMA_RLC5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA4 0x032c
+#define regSDMA_RLC5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA5 0x032d
+#define regSDMA_RLC5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA6 0x032e
+#define regSDMA_RLC5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA7 0x032f
+#define regSDMA_RLC5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA8 0x0330
+#define regSDMA_RLC5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA9 0x0331
+#define regSDMA_RLC5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA10 0x0332
+#define regSDMA_RLC5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_CNTL 0x0333
+#define regSDMA_RLC5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_CNTL 0x0340
+#define regSDMA_RLC6_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE 0x0341
+#define regSDMA_RLC6_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE_HI 0x0342
+#define regSDMA_RLC6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR 0x0343
+#define regSDMA_RLC6_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_HI 0x0344
+#define regSDMA_RLC6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR 0x0345
+#define regSDMA_RLC6_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_HI 0x0346
+#define regSDMA_RLC6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_CNTL 0x034a
+#define regSDMA_RLC6_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_IB_RPTR 0x034b
+#define regSDMA_RLC6_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_IB_OFFSET 0x034c
+#define regSDMA_RLC6_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_LO 0x034d
+#define regSDMA_RLC6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_HI 0x034e
+#define regSDMA_RLC6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SIZE 0x034f
+#define regSDMA_RLC6_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC6_SKIP_CNTL 0x0350
+#define regSDMA_RLC6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_CONTEXT_STATUS 0x0351
+#define regSDMA_RLC6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL 0x0352
+#define regSDMA_RLC6_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC6_STATUS 0x0368
+#define regSDMA_RLC6_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_LOG 0x0369
+#define regSDMA_RLC6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC6_WATERMARK 0x036a
+#define regSDMA_RLC6_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_OFFSET 0x036b
+#define regSDMA_RLC6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_LO 0x036c
+#define regSDMA_RLC6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_HI 0x036d
+#define regSDMA_RLC6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SUB_REMAIN 0x036f
+#define regSDMA_RLC6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC6_PREEMPT 0x0370
+#define regSDMA_RLC6_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC6_DUMMY_REG 0x0371
+#define regSDMA_RLC6_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_RB_AQL_CNTL 0x0374
+#define regSDMA_RLC6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_MINOR_PTR_UPDATE 0x0375
+#define regSDMA_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA0 0x0380
+#define regSDMA_RLC6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA1 0x0381
+#define regSDMA_RLC6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA2 0x0382
+#define regSDMA_RLC6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA3 0x0383
+#define regSDMA_RLC6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA4 0x0384
+#define regSDMA_RLC6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA5 0x0385
+#define regSDMA_RLC6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA6 0x0386
+#define regSDMA_RLC6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA7 0x0387
+#define regSDMA_RLC6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA8 0x0388
+#define regSDMA_RLC6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA9 0x0389
+#define regSDMA_RLC6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA10 0x038a
+#define regSDMA_RLC6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_CNTL 0x038b
+#define regSDMA_RLC6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_CNTL 0x0398
+#define regSDMA_RLC7_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE 0x0399
+#define regSDMA_RLC7_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE_HI 0x039a
+#define regSDMA_RLC7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR 0x039b
+#define regSDMA_RLC7_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_HI 0x039c
+#define regSDMA_RLC7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR 0x039d
+#define regSDMA_RLC7_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_HI 0x039e
+#define regSDMA_RLC7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_CNTL 0x03a2
+#define regSDMA_RLC7_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_IB_RPTR 0x03a3
+#define regSDMA_RLC7_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_IB_OFFSET 0x03a4
+#define regSDMA_RLC7_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_LO 0x03a5
+#define regSDMA_RLC7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_HI 0x03a6
+#define regSDMA_RLC7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SIZE 0x03a7
+#define regSDMA_RLC7_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC7_SKIP_CNTL 0x03a8
+#define regSDMA_RLC7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_CONTEXT_STATUS 0x03a9
+#define regSDMA_RLC7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL 0x03aa
+#define regSDMA_RLC7_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC7_STATUS 0x03c0
+#define regSDMA_RLC7_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_LOG 0x03c1
+#define regSDMA_RLC7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC7_WATERMARK 0x03c2
+#define regSDMA_RLC7_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_OFFSET 0x03c3
+#define regSDMA_RLC7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_LO 0x03c4
+#define regSDMA_RLC7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_HI 0x03c5
+#define regSDMA_RLC7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SUB_REMAIN 0x03c7
+#define regSDMA_RLC7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC7_PREEMPT 0x03c8
+#define regSDMA_RLC7_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC7_DUMMY_REG 0x03c9
+#define regSDMA_RLC7_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_RB_AQL_CNTL 0x03cc
+#define regSDMA_RLC7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define regSDMA_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA0 0x03d8
+#define regSDMA_RLC7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA1 0x03d9
+#define regSDMA_RLC7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA2 0x03da
+#define regSDMA_RLC7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA3 0x03db
+#define regSDMA_RLC7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA4 0x03dc
+#define regSDMA_RLC7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA5 0x03dd
+#define regSDMA_RLC7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA6 0x03de
+#define regSDMA_RLC7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA7 0x03df
+#define regSDMA_RLC7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA8 0x03e0
+#define regSDMA_RLC7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA9 0x03e1
+#define regSDMA_RLC7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA10 0x03e2
+#define regSDMA_RLC7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_CNTL 0x03e3
+#define regSDMA_RLC7_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..e46cb3339355
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
@@ -0,0 +1,3276 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_SH_MASK_HEADER
+#define _sdma_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+//SDMA_UCODE_ADDR
+#define SDMA_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA_UCODE_ADDR__VALUE_MASK 0x00003FFFL
+//SDMA_UCODE_DATA
+#define SDMA_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_CNTL
+#define SDMA_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA_F32_CNTL__RESET__SHIFT 0x8
+#define SDMA_F32_CNTL__CHECKSUM_CLR__SHIFT 0x9
+#define SDMA_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA_F32_CNTL__STEP_MASK 0x00000002L
+#define SDMA_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA_F32_CNTL__RESET_MASK 0x00000100L
+#define SDMA_F32_CNTL__CHECKSUM_CLR_MASK 0x00000200L
+//SDMA_MMHUB_CNTL
+#define SDMA_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA_MMHUB_TRUSTLVL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x4
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x8
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0xc
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0x10
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0x14
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x18
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x1c
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x0000000FL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x000000F0L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x00000F00L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x0000F000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x000F0000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00F00000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x0F000000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7_MASK 0xF0000000L
+//SDMA_VM_CNTL
+#define SDMA_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA_VM_CTX_LO
+#define SDMA_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_VM_CTX_HI
+#define SDMA_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_ACTIVE_FCN_ID
+#define SDMA_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA_VM_CTX_CNTL
+#define SDMA_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA_VIRT_RESET_REQ
+#define SDMA_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA_VF_ENABLE
+#define SDMA_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA_CONTEXT_REG_TYPE0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA_CONTEXT_REG_TYPE1
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA_CONTEXT_REG_TYPE2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA_CONTEXT_REG_TYPE3
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_PUB_REG_TYPE0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10_MASK 0x00007C00L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE1__SDMA_ID__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ID_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE3
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE3__RESERVED__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE3__RESERVED_MASK 0xFFF80000L
+//SDMA_CONTEXT_GROUP_BOUNDARY
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH_HI
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH
+#define SDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA_IB_OFFSET_FETCH
+#define SDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA_PROGRAM
+#define SDMA_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA_STATUS_REG
+#define SDMA_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA_STATUS1_REG
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA_STATUS1_REG__CE_DRM_FULL__SHIFT 0xb
+#define SDMA_STATUS1_REG__CE_DRM1_FULL__SHIFT 0xc
+#define SDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0x10
+#define SDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA_STATUS1_REG__CE_DRM_FULL_MASK 0x00000800L
+#define SDMA_STATUS1_REG__CE_DRM1_FULL_MASK 0x00001000L
+#define SDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00010000L
+#define SDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA_RD_BURST_CNTL
+#define SDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA_HBM_PAGE_CONFIG
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA_UCODE_CHECKSUM
+#define SDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA_FREEZE
+#define SDMA_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA_PHASE0_QUANTUM
+#define SDMA_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_PHASE1_QUANTUM
+#define SDMA_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_POWER_GATING
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION__SHIFT 0x0
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION__SHIFT 0x1
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ__SHIFT 0x3
+#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION_MASK 0x00000001L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION_MASK 0x00000002L
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ_MASK 0x00000008L
+#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//SDMA_PGFSM_CONFIG
+#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//SDMA_PGFSM_WRITE
+#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PGFSM_READ
+#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//CC_SDMA_EDC_CONFIG
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_SDMA_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_SDMA_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SDMA_BA_THRESHOLD
+#define SDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA_ID
+#define SDMA_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA_VERSION
+#define SDMA_VERSION__MINVER__SHIFT 0x0
+#define SDMA_VERSION__MAJVER__SHIFT 0x8
+#define SDMA_VERSION__REV__SHIFT 0x10
+#define SDMA_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA_VERSION__REV_MASK 0x003F0000L
+//SDMA_EDC_COUNTER
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L
+//SDMA_EDC_COUNTER2
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L
+//SDMA_STATUS2_REG
+#define SDMA_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA_ATOMIC_CNTL
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA_ATOMIC_PREOP_LO
+#define SDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA_ATOMIC_PREOP_HI
+#define SDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_CNTL
+#define SDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA_UTCL1_WATERMK
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8
+#define SDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L
+#define SDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L
+//SDMA_UTCL1_RD_STATUS
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA_UTCL1_WR_STATUS
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8__SHIFT 0x8
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL__SHIFT 0x10
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17__SHIFT 0x11
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8_MASK 0x00000100L
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17_MASK 0x00020000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA_UTCL1_INV0
+#define SDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA_UTCL1_INV1
+#define SDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_INV2
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK1
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_WR_XNACK0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_WR_XNACK1
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_TIMEOUT
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA_UTCL1_PAGE
+#define SDMA_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0xa
+#define SDMA_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC_MASK 0x00000400L
+//SDMA_POWER_CNTL_IDLE
+#define SDMA_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA_RELAX_ORDERING_LUT
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA_CHICKEN_BITS_2
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+//SDMA_STATUS3_REG
+#define SDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA_PHYSICAL_ADDR_LO
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA_PHYSICAL_ADDR_HI
+#define SDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA_PHASE2_QUANTUM
+#define SDMA_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_ERROR_LOG
+#define SDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA_PUB_DUMMY_REG0
+#define SDMA_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG1
+#define SDMA_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG2
+#define SDMA_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG3
+#define SDMA_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_COUNTER
+#define SDMA_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA_PERFCNT_MISC_CNTL
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L
+//SDMA_PERFCNT_PERFCOUNTER_LO
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER_HI
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA_CRD_CNTL
+#define SDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA_GPU_IOV_VIOLATION_LOG
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA_ULV_CNTL
+#define SDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA_EA_DBIT_ADDR_DATA
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_EA_DBIT_ADDR_INDEX
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA_GPU_IOV_VIOLATION_LOG2
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA_STATUS4_REG
+#define SDMA_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7
+#define SDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8
+#define SDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x13
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS__SHIFT 0x14
+#define SDMA_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L
+#define SDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L
+#define SDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00080000L
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS_MASK 0x00100000L
+//SDMA_SCRATCH_RAM_DATA
+#define SDMA_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA_SCRATCH_RAM_ADDR
+#define SDMA_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA_CE_CTRL
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA_CE_CTRL__RESERVED__SHIFT 0x8
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L
+//SDMA_RAS_STATUS
+#define SDMA_RAS_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA_RAS_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA_RAS_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA_RAS_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA_RAS_STATUS__SRAM_ECC__SHIFT 0x5
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR__SHIFT 0xa
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR__SHIFT 0xb
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR__SHIFT 0xc
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR__SHIFT 0xd
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY__SHIFT 0xe
+#define SDMA_RAS_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA_RAS_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA_RAS_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA_RAS_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA_RAS_STATUS__SRAM_ECC_MASK 0x00000020L
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR_MASK 0x00000400L
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR_MASK 0x00000800L
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR_MASK 0x00001000L
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR_MASK 0x00002000L
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY_MASK 0x00004000L
+//SDMA_CLK_STATUS
+#define SDMA_CLK_STATUS__DYN_CLK__SHIFT 0x0
+#define SDMA_CLK_STATUS__PTR_CLK__SHIFT 0x1
+#define SDMA_CLK_STATUS__REG_CLK__SHIFT 0x2
+#define SDMA_CLK_STATUS__F32_CLK__SHIFT 0x3
+#define SDMA_CLK_STATUS__CE_CLK__SHIFT 0x4
+#define SDMA_CLK_STATUS__PERF_CLK__SHIFT 0x5
+#define SDMA_CLK_STATUS__DYN_CLK_MASK 0x00000001L
+#define SDMA_CLK_STATUS__PTR_CLK_MASK 0x00000002L
+#define SDMA_CLK_STATUS__REG_CLK_MASK 0x00000004L
+#define SDMA_CLK_STATUS__F32_CLK_MASK 0x00000008L
+#define SDMA_CLK_STATUS__CE_CLK_MASK 0x00000010L
+#define SDMA_CLK_STATUS__PERF_CLK_MASK 0x00000020L
+//SDMA_POWER_CNTL
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+//SDMA_CLK_CTRL
+#define SDMA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA_CNTL
+#define SDMA_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE__SHIFT 0x7
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE__SHIFT 0x8
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE__SHIFT 0x9
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE__SHIFT 0xa
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xe
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE_MASK 0x00000080L
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE_MASK 0x00000100L
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE_MASK 0x00000200L
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE_MASK 0x00000400L
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00004000L
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA_CHICKEN_BITS
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE__SHIFT 0x1a
+#define SDMA_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE_MASK 0x04000000L
+#define SDMA_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA_GB_ADDR_CONFIG
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GB_ADDR_CONFIG_READ
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GFX_RB_CNTL
+#define SDMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_GFX_RB_BASE
+#define SDMA_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_BASE_HI
+#define SDMA_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_GFX_RB_RPTR
+#define SDMA_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_HI
+#define SDMA_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR
+#define SDMA_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_HI
+#define SDMA_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_CNTL
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_GFX_RB_RPTR_ADDR_HI
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_ADDR_LO
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_IB_CNTL
+#define SDMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_GFX_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_GFX_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_GFX_IB_RPTR
+#define SDMA_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_OFFSET
+#define SDMA_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_BASE_LO
+#define SDMA_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_GFX_IB_BASE_HI
+#define SDMA_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SIZE
+#define SDMA_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_SKIP_CNTL
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_GFX_CONTEXT_STATUS
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_GFX_DOORBELL
+#define SDMA_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_GFX_CONTEXT_CNTL
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL__SHIFT 0x18
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL_MASK 0x0F000000L
+//SDMA_GFX_STATUS
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_GFX_DOORBELL_LOG
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_GFX_WATERMARK
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_GFX_DOORBELL_OFFSET
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_GFX_CSA_ADDR_LO
+#define SDMA_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_CSA_ADDR_HI
+#define SDMA_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SUB_REMAIN
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_PREEMPT
+#define SDMA_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_GFX_DUMMY_REG
+#define SDMA_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_RB_AQL_CNTL
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_GFX_MINOR_PTR_UPDATE
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_GFX_MIDCMD_DATA0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA1
+#define SDMA_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA2
+#define SDMA_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA3
+#define SDMA_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA4
+#define SDMA_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA5
+#define SDMA_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA6
+#define SDMA_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA7
+#define SDMA_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA8
+#define SDMA_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA9
+#define SDMA_GFX_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA10
+#define SDMA_GFX_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_CNTL
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_PAGE_RB_CNTL
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_PAGE_RB_BASE
+#define SDMA_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_BASE_HI
+#define SDMA_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_PAGE_RB_RPTR
+#define SDMA_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_HI
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR
+#define SDMA_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_HI
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_PAGE_RB_RPTR_ADDR_HI
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_ADDR_LO
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_IB_CNTL
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_PAGE_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_PAGE_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_PAGE_IB_RPTR
+#define SDMA_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_OFFSET
+#define SDMA_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_BASE_LO
+#define SDMA_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_PAGE_IB_BASE_HI
+#define SDMA_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SIZE
+#define SDMA_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_SKIP_CNTL
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_PAGE_CONTEXT_STATUS
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_PAGE_DOORBELL
+#define SDMA_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_PAGE_STATUS
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_PAGE_DOORBELL_LOG
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_PAGE_WATERMARK
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_PAGE_DOORBELL_OFFSET
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_PAGE_CSA_ADDR_LO
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_CSA_ADDR_HI
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SUB_REMAIN
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_PREEMPT
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_PAGE_DUMMY_REG
+#define SDMA_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_RB_AQL_CNTL
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_PAGE_MINOR_PTR_UPDATE
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_PAGE_MIDCMD_DATA0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA1
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA2
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA3
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA4
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA5
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA6
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA7
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA8
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA9
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA10
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_CNTL
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC0_RB_CNTL
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC0_RB_BASE
+#define SDMA_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_BASE_HI
+#define SDMA_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC0_RB_RPTR
+#define SDMA_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_HI
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR
+#define SDMA_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_HI
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC0_RB_RPTR_ADDR_HI
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_ADDR_LO
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_IB_CNTL
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC0_IB_RPTR
+#define SDMA_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_OFFSET
+#define SDMA_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_BASE_LO
+#define SDMA_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC0_IB_BASE_HI
+#define SDMA_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SIZE
+#define SDMA_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_SKIP_CNTL
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC0_CONTEXT_STATUS
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC0_DOORBELL
+#define SDMA_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC0_STATUS
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC0_DOORBELL_LOG
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC0_WATERMARK
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC0_DOORBELL_OFFSET
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC0_CSA_ADDR_LO
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_CSA_ADDR_HI
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SUB_REMAIN
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_PREEMPT
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC0_DUMMY_REG
+#define SDMA_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_RB_AQL_CNTL
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC0_MINOR_PTR_UPDATE
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC0_MIDCMD_DATA0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA1
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA2
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA3
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA4
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA5
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA6
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA7
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA8
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA9
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA10
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_CNTL
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC1_RB_CNTL
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC1_RB_BASE
+#define SDMA_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_BASE_HI
+#define SDMA_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC1_RB_RPTR
+#define SDMA_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_HI
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR
+#define SDMA_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_HI
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC1_RB_RPTR_ADDR_HI
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_ADDR_LO
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_IB_CNTL
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC1_IB_RPTR
+#define SDMA_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_OFFSET
+#define SDMA_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_BASE_LO
+#define SDMA_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC1_IB_BASE_HI
+#define SDMA_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SIZE
+#define SDMA_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_SKIP_CNTL
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC1_CONTEXT_STATUS
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC1_DOORBELL
+#define SDMA_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC1_STATUS
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC1_DOORBELL_LOG
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC1_WATERMARK
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC1_DOORBELL_OFFSET
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC1_CSA_ADDR_LO
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_CSA_ADDR_HI
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SUB_REMAIN
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_PREEMPT
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC1_DUMMY_REG
+#define SDMA_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_RB_AQL_CNTL
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC1_MINOR_PTR_UPDATE
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC1_MIDCMD_DATA0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA1
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA2
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA3
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA4
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA5
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA6
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA7
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA8
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA9
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA10
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_CNTL
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC2_RB_CNTL
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC2_RB_BASE
+#define SDMA_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_BASE_HI
+#define SDMA_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC2_RB_RPTR
+#define SDMA_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_HI
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR
+#define SDMA_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_HI
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC2_RB_RPTR_ADDR_HI
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_ADDR_LO
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_IB_CNTL
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC2_IB_RPTR
+#define SDMA_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_OFFSET
+#define SDMA_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_BASE_LO
+#define SDMA_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC2_IB_BASE_HI
+#define SDMA_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SIZE
+#define SDMA_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_SKIP_CNTL
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC2_CONTEXT_STATUS
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC2_DOORBELL
+#define SDMA_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC2_STATUS
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC2_DOORBELL_LOG
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC2_WATERMARK
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC2_DOORBELL_OFFSET
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC2_CSA_ADDR_LO
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_CSA_ADDR_HI
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SUB_REMAIN
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_PREEMPT
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC2_DUMMY_REG
+#define SDMA_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_RB_AQL_CNTL
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC2_MINOR_PTR_UPDATE
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC2_MIDCMD_DATA0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA1
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA2
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA3
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA4
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA5
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA6
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA7
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA8
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA9
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA10
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_CNTL
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC3_RB_CNTL
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC3_RB_BASE
+#define SDMA_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_BASE_HI
+#define SDMA_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC3_RB_RPTR
+#define SDMA_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_HI
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR
+#define SDMA_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_HI
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC3_RB_RPTR_ADDR_HI
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_ADDR_LO
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_IB_CNTL
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC3_IB_RPTR
+#define SDMA_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_OFFSET
+#define SDMA_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_BASE_LO
+#define SDMA_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC3_IB_BASE_HI
+#define SDMA_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SIZE
+#define SDMA_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_SKIP_CNTL
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC3_CONTEXT_STATUS
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC3_DOORBELL
+#define SDMA_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC3_STATUS
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC3_DOORBELL_LOG
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC3_WATERMARK
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC3_DOORBELL_OFFSET
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC3_CSA_ADDR_LO
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_CSA_ADDR_HI
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SUB_REMAIN
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_PREEMPT
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC3_DUMMY_REG
+#define SDMA_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_RB_AQL_CNTL
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC3_MINOR_PTR_UPDATE
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC3_MIDCMD_DATA0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA1
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA2
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA3
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA4
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA5
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA6
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA7
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA8
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA9
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA10
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_CNTL
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC4_RB_CNTL
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC4_RB_BASE
+#define SDMA_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_BASE_HI
+#define SDMA_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC4_RB_RPTR
+#define SDMA_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_HI
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR
+#define SDMA_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_HI
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC4_RB_RPTR_ADDR_HI
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_ADDR_LO
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_IB_CNTL
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC4_IB_RPTR
+#define SDMA_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_OFFSET
+#define SDMA_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_BASE_LO
+#define SDMA_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC4_IB_BASE_HI
+#define SDMA_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SIZE
+#define SDMA_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_SKIP_CNTL
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC4_CONTEXT_STATUS
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC4_DOORBELL
+#define SDMA_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC4_STATUS
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC4_DOORBELL_LOG
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC4_WATERMARK
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC4_DOORBELL_OFFSET
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC4_CSA_ADDR_LO
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_CSA_ADDR_HI
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SUB_REMAIN
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_PREEMPT
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC4_DUMMY_REG
+#define SDMA_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_RB_AQL_CNTL
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC4_MINOR_PTR_UPDATE
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC4_MIDCMD_DATA0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA1
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA2
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA3
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA4
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA5
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA6
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA7
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA8
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA9
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA10
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_CNTL
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC5_RB_CNTL
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC5_RB_BASE
+#define SDMA_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_BASE_HI
+#define SDMA_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC5_RB_RPTR
+#define SDMA_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_HI
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR
+#define SDMA_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_HI
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC5_RB_RPTR_ADDR_HI
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_ADDR_LO
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_IB_CNTL
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC5_IB_RPTR
+#define SDMA_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_OFFSET
+#define SDMA_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_BASE_LO
+#define SDMA_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC5_IB_BASE_HI
+#define SDMA_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SIZE
+#define SDMA_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_SKIP_CNTL
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC5_CONTEXT_STATUS
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC5_DOORBELL
+#define SDMA_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC5_STATUS
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC5_DOORBELL_LOG
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC5_WATERMARK
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC5_DOORBELL_OFFSET
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC5_CSA_ADDR_LO
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_CSA_ADDR_HI
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SUB_REMAIN
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_PREEMPT
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC5_DUMMY_REG
+#define SDMA_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_RB_AQL_CNTL
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC5_MINOR_PTR_UPDATE
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC5_MIDCMD_DATA0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA1
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA2
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA3
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA4
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA5
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA6
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA7
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA8
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA9
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA10
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_CNTL
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC6_RB_CNTL
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC6_RB_BASE
+#define SDMA_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_BASE_HI
+#define SDMA_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC6_RB_RPTR
+#define SDMA_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_HI
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR
+#define SDMA_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_HI
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC6_RB_RPTR_ADDR_HI
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_ADDR_LO
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_IB_CNTL
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC6_IB_RPTR
+#define SDMA_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_OFFSET
+#define SDMA_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_BASE_LO
+#define SDMA_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC6_IB_BASE_HI
+#define SDMA_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SIZE
+#define SDMA_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_SKIP_CNTL
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC6_CONTEXT_STATUS
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC6_DOORBELL
+#define SDMA_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC6_STATUS
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC6_DOORBELL_LOG
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC6_WATERMARK
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC6_DOORBELL_OFFSET
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC6_CSA_ADDR_LO
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_CSA_ADDR_HI
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SUB_REMAIN
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_PREEMPT
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC6_DUMMY_REG
+#define SDMA_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_RB_AQL_CNTL
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC6_MINOR_PTR_UPDATE
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC6_MIDCMD_DATA0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA1
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA2
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA3
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA4
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA5
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA6
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA7
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA8
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA9
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA10
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_CNTL
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC7_RB_CNTL
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC7_RB_BASE
+#define SDMA_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_BASE_HI
+#define SDMA_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC7_RB_RPTR
+#define SDMA_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_HI
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR
+#define SDMA_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_HI
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC7_RB_RPTR_ADDR_HI
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_ADDR_LO
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_IB_CNTL
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC7_IB_RPTR
+#define SDMA_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_OFFSET
+#define SDMA_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_BASE_LO
+#define SDMA_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC7_IB_BASE_HI
+#define SDMA_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SIZE
+#define SDMA_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_SKIP_CNTL
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC7_CONTEXT_STATUS
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC7_DOORBELL
+#define SDMA_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC7_STATUS
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC7_DOORBELL_LOG
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC7_WATERMARK
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC7_DOORBELL_OFFSET
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC7_CSA_ADDR_LO
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_CSA_ADDR_HI
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SUB_REMAIN
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_PREEMPT
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC7_DUMMY_REG
+#define SDMA_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_RB_AQL_CNTL
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC7_MINOR_PTR_UPDATE
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC7_MIDCMD_DATA0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA1
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA2
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA3
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA4
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA5
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA6
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA7
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA8
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA9
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA10
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_CNTL
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h
new file mode 100644
index 000000000000..c6c0cf1376a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_6_1_0_SH_MASK_HEADER
+#define _xgmi_6_1_0_SH_MASK_HEADER
+
+//PCS_XGMI3X16_PCS_ERROR_STATUS
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr__SHIFT 0x2
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr__SHIFT 0x3
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr__SHIFT 0x4
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr__SHIFT 0x7
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr__SHIFT 0xe
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr__SHIFT 0x16
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr__SHIFT 0x17
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr__SHIFT 0x18
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr__SHIFT 0x19
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr__SHIFT 0x1a
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr__SHIFT 0x1b
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr__SHIFT 0x1c
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr_MASK 0x00000004L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr_MASK 0x00000008L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr_MASK 0x00000010L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr_MASK 0x00000080L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr_MASK 0x00004000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr_MASK 0x00400000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr_MASK 0x00800000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr_MASK 0x01000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr_MASK 0x02000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr_MASK 0x04000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr_MASK 0x08000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr_MASK 0x10000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
index 9e8ed9f4bb15..3a4670bc4449 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
@@ -49,6 +49,8 @@
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
+
#define GFX_11_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
#define GFX_11_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
#define GFX_11_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index d18162e9ed1d..9f542f6e19ed 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -104,7 +104,9 @@ enum pp_clock_type {
PP_FCLK,
PP_DCEFCLK,
PP_VCLK,
+ PP_VCLK1,
PP_DCLK,
+ PP_DCLK1,
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
@@ -139,6 +141,8 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
};
enum amd_pp_task {
@@ -158,6 +162,8 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
PP_SMC_POWER_PROFILE_WINDOW3D = 0x7,
+ PP_SMC_POWER_PROFILE_CAPPED = 0x8,
+ PP_SMC_POWER_PROFILE_UNCAPPED = 0x9,
PP_SMC_POWER_PROFILE_COUNT,
};
@@ -329,6 +335,8 @@ struct amd_pm_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ int (*get_apu_thermal_limit)(void *handle, uint32_t *limit);
+ int (*set_apu_thermal_limit)(void *handle, uint32_t limit);
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
enum amd_pm_state_type (*get_current_power_state)(void *handle);
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
@@ -395,6 +403,7 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*asic_reset_enable_gfx_features)(void *handle);
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index b8ff7456ae0b..f8008270f813 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -25,14 +25,14 @@
#define V11_STRUCTS_H_
struct v11_gfx_mqd {
- uint32_t reserved_0; // offset: 0 (0x0)
- uint32_t reserved_1; // offset: 1 (0x1)
- uint32_t reserved_2; // offset: 2 (0x2)
- uint32_t reserved_3; // offset: 3 (0x3)
- uint32_t reserved_4; // offset: 4 (0x4)
- uint32_t reserved_5; // offset: 5 (0x5)
- uint32_t reserved_6; // offset: 6 (0x6)
- uint32_t reserved_7; // offset: 7 (0x7)
+ uint32_t shadow_base_lo; // offset: 0 (0x0)
+ uint32_t shadow_base_hi; // offset: 1 (0x1)
+ uint32_t gds_bkup_base_lo; // offset: 2 (0x2)
+ uint32_t gds_bkup_base_hi; // offset: 3 (0x3)
+ uint32_t fw_work_area_base_lo; // offset: 4 (0x4)
+ uint32_t fw_work_area_base_hi; // offset: 5 (0x5)
+ uint32_t shadow_initialized; // offset: 6 (0x6)
+ uint32_t ib_vmid; // offset: 7 (0x7)
uint32_t reserved_8; // offset: 8 (0x8)
uint32_t reserved_9; // offset: 9 (0x9)
uint32_t reserved_10; // offset: 10 (0xA)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 1b300c569faf..078aaaa53162 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -36,6 +36,8 @@
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
+#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
+
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -227,6 +229,24 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
+ return -ENOENT;
+
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -438,6 +458,34 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
return ret;
}
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1414,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- struct smu_context *smu = adev->powerplay.pp_handle;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return (smu->od_enabled || smu->is_apu);
+ } else {
+ struct pp_hwmgr *hwmgr;
- if ((is_support_sw_smu(adev) && smu->od_enabled) ||
- (is_support_sw_smu(adev) && smu->is_apu) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- return true;
+ /*
+ * dpm on some legacy asics don't carry od_enabled member
+ * as its pp_handle is casted directly from adev.
+ */
+ if (amdgpu_dpm_is_legacy_dpm(adev))
+ return false;
- return false;
+ hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
+
+ return hwmgr->od_enabled;
+ }
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 236657eece47..58c2246918fd 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -91,6 +91,8 @@ const char * const amdgpu_pp_profile_name[] = {
"COMPUTE",
"CUSTOM",
"WINDOW_3D",
+ "CAPPED",
+ "UNCAPPED",
};
/**
@@ -1178,6 +1180,21 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
}
+static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
+}
+
+static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
+}
+
static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1193,6 +1210,21 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
}
+static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
+}
+
+static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
+}
+
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1686,6 +1718,82 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
}
/**
+ * DOC: apu_thermal_cap
+ *
+ * The amdgpu driver provides a sysfs API for retrieving/updating thermal
+ * limit temperature in millidegrees Celsius
+ *
+ * Reading back the file shows you core limit value
+ *
+ * Writing an integer to the file, sets a new thermal limit. The value
+ * should be between 0 and 100. If the value is less than 0 or greater
+ * than 100, then the write request will be ignored.
+ */
+static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, size;
+ u32 limit;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
+ if (!ret)
+ size = sysfs_emit(buf, "%u\n", limit);
+ else
+ size = sysfs_emit(buf, "failed to get thermal limit\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ u32 value;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = kstrtou32(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > 100) {
+ dev_err(dev, "Invalid argument !\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
+ if (ret) {
+ dev_err(dev, "failed to update thermal limit\n");
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return count;
+}
+
+/**
* DOC: gpu_metrics
*
* The amdgpu driver provides a sysfs API for retrieving current gpu
@@ -1924,7 +2032,9 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
@@ -1937,6 +2047,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
@@ -1991,6 +2102,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2007,14 +2120,28 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
@@ -3059,7 +3186,7 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* hwmon interfaces for GPU power:
*
- * - power1_average: average power used by the GPU in microWatts
+ * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
*
* - power1_cap_min: minimum cap supported in microWatts
*
@@ -3268,7 +3395,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
- /* not implemented yet for GC 10.3.1 APUs */
+ /* In the case of APUs, this is only implemented on Vangogh */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
@@ -3277,7 +3404,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
return 0;
- /* not implemented yet for APUs having <= GC 9.3.0 */
+ /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index cb5b9df78b4d..d178f3f44081 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -369,6 +369,9 @@ struct amdgpu_pm {
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit);
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
+
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
@@ -386,6 +389,7 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 49c398ec0aaf..d6d9e3b1b2c0 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7714,20 +7714,13 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
err, fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
}
return err;
-
}
static int si_dpm_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 304190d5c9d2..11b7b4cffaae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -111,8 +111,7 @@ static int pp_sw_fini(void *handle)
hwmgr_sw_fini(hwmgr);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return 0;
}
@@ -769,10 +768,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
- *((uint32_t *)value) = hwmgr->pstate_sclk;
+ *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
return 0;
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
- *((uint32_t *)value) = hwmgr->pstate_mclk;
+ *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
return 0;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index ede71de2343d..86d6e88c7386 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
return 0;
}
+static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxGfxclkFrequency,
+ &hwmgr->pstate_sclk_peak);
+ hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
+}
+
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return ret;
}
+ smu10_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
- hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
-
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
/* disabled fine grain tuning function by default */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 7ef7e81525a3..e10cc5e7928e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include "pp_debug.h"
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -1501,6 +1500,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
return ret;
}
+static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ int32_t tmp_sclk, count, percentage;
+
+ if (golden_dpm_table->mclk_table.count == 1) {
+ percentage = 70;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
+ } else {
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+ }
+
+ tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
+ table_info->vdd_dep_on_sclk;
+
+ for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
+ }
+
+ hwmgr->pstate_mclk_peak =
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
@@ -1625,6 +1685,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"pcie performance request failed!", result = tmp_result);
+ smu7_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -3143,15 +3205,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
count >= 0; count--) {
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -3161,15 +3220,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -3181,8 +3237,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
- hwmgr->pstate_sclk = tmp_sclk;
- hwmgr->pstate_mclk = tmp_mclk;
return 0;
}
@@ -3195,9 +3249,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -4153,7 +4204,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
@@ -4210,7 +4261,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
result = smum_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -4218,7 +4269,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
/*populate MCLK dpm table to SMU7 */
result = smum_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
@@ -4309,7 +4360,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b50fd4a4a3d1..b015a601b385 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
data->acp_boot_level = 0xff;
}
+static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ hwmgr->pstate_sclk = table->entries[0].clk / 100;
+ hwmgr->pstate_mclk = 0;
+
+ hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
+ hwmgr->pstate_mclk_peak = 0;
+}
+
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_program_voting_clients(hwmgr);
@@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
smu8_program_bootup_state(hwmgr);
smu8_reset_acp_boot_level(hwmgr);
+ smu8_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
- hwmgr->pstate_sclk = table->entries[0].clk;
- hwmgr->pstate_mclk = 0;
level = smu8_get_max_sclk_level(hwmgr) - 1;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c8c9fb827bda..99cd2e63afdd 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -3008,6 +3007,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
return 0;
}
+static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
+ } else {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
+ }
+
+ hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
+ hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -3082,6 +3105,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
result = tmp_result);
}
+ vega10_populate_umdpstate_clocks(hwmgr);
+
return result;
}
@@ -4169,8 +4194,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
- hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
- hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4281,9 +4304,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
index 95b988823f50..bb90d8abf79b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "vega10_processpptables.h"
#include "ppatomfwctrl.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index a2f4d6773d45..e9db137cd1c6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1026,6 +1025,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
+ }
+
+ hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
+ hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -1077,6 +1095,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to setup default DPM tables!",
return result);
+
+ vega12_populate_umdpstate_clocks(hwmgr);
+
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index bd54fbd393b9..89148f73b514 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "vega12/smu9_driver_if.h"
#include "vega12_processpptables.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index b30684c84e20..0d4d4811527c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1555,26 +1554,23 @@ static int vega20_set_mclk_od(
return 0;
}
-static int vega20_populate_umdpstate_clocks(
- struct pp_hwmgr *hwmgr)
+static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
- hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
- hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
-
if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
}
- hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
- hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
-
- return 0;
+ hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
+ hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
}
static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
@@ -1753,10 +1749,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to initialize odn settings!",
return result);
- result = vega20_populate_umdpstate_clocks(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "[EnableDPMTasks] Failed to populate umdpstate clocks!",
- return result);
+ vega20_populate_umdpstate_clocks(hwmgr);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
index 1f9082539457..79c817752a33 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "smu11_driver_if.h"
#include "vega20_processpptables.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 27f8d0e0e6a8..5ce433e2c16a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -809,6 +809,8 @@ struct pp_hwmgr {
uint32_t workload_prority[Workload_Policy_Max];
uint32_t workload_setting[Workload_Policy_Max];
bool gfxoff_state_changed_by_workload;
+ uint32_t pstate_sclk_peak;
+ uint32_t pstate_mclk_peak;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
index fdc6b7a57bc9..c2efc70ef288 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
@@ -358,6 +358,7 @@ typedef struct {
QuadraticInt_t SsCurve;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -609,6 +610,7 @@ typedef struct {
uint32_t MmHubPadding[8];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
index 2818c98ff5ca..faae4b918d90 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
@@ -122,6 +122,7 @@ typedef struct {
uint16_t Vid; /* min voltage in SVI2 VID */
} DisplayClockTable_t;
+#pragma pack(push, 1)
typedef struct {
/* PowerTune */
uint16_t SocketPowerLimit; /* Watts */
@@ -323,6 +324,7 @@ typedef struct {
uint32_t MmHubPadding[3]; /* SMU internal use */
} PPTable_t;
+#pragma pack(pop)
typedef struct {
uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
index b6ffd08784e7..6456bea5d2d5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
@@ -245,6 +245,7 @@ typedef struct {
QuadraticInt_t SsCurve;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -508,6 +509,7 @@ typedef struct {
uint32_t MmHubPadding[7];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 5ca3c422f7d4..4bc8db1be738 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "linux/delay.h"
#include <linux/types.h>
#include <linux/pci.h>
@@ -2203,7 +2202,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return ci_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 03df35dee8ba..060fc140c574 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2165,7 +2165,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return iceland_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index 88a5641465dc..7eeab84d421a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(Watermarks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(Watermarks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_WMTABLE].handle,
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_WMTABLE].table);
@@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(DpmClocks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(DpmClocks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 04b561f5d932..acbe41174d7e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -2554,7 +2554,7 @@ static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return tonga_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ca3beb5d8f27..5633c5797e85 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -40,6 +40,7 @@
#include "smu_v13_0_0_ppt.h"
#include "smu_v13_0_4_ppt.h"
#include "smu_v13_0_5_ppt.h"
+#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "amd_pcie.h"
@@ -161,10 +162,15 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
{
- if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu)
- return -EOPNOTSUPP;
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
- return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
+ ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (ret)
+ dev_err(adev->dev, "Failed to enable gfx imu!\n");
+ }
+ return ret;
}
static u32 smu_get_mclk(void *handle, bool low)
@@ -195,6 +201,19 @@ static u32 smu_get_sclk(void *handle, bool low)
return clk_freq * 100;
}
+static int smu_set_gfx_imu_enable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
+ return 0;
+
+ return smu_set_gfx_power_up_by_imu(smu);
+}
+
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
bool enable)
{
@@ -609,6 +628,11 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(13, 0, 6):
+ smu_v13_0_6_set_ppt_funcs(smu);
+ /* Enable pp_od_clk_voltage node */
+ smu->od_enabled = true;
+ break;
case IP_VERSION(13, 0, 7):
smu_v13_0_7_set_ppt_funcs(smu);
break;
@@ -623,6 +647,7 @@ static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu;
+ int r;
smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
if (!smu)
@@ -640,7 +665,10 @@ static int smu_early_init(void *handle)
adev->powerplay.pp_handle = smu;
adev->powerplay.pp_funcs = &swsmu_pm_funcs;
- return smu_set_funcs(adev);
+ r = smu_set_funcs(adev);
+ if (r)
+ return r;
+ return smu_init_microcode(smu);
}
static int smu_set_default_dpm_table(struct smu_context *smu)
@@ -900,9 +928,8 @@ static int smu_alloc_dummy_read_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- dummy_read_1_table->size = 0x40000;
- dummy_read_1_table->align = PAGE_SIZE;
- dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (!dummy_read_1_table->size)
+ return 0;
ret = amdgpu_bo_create_kernel(adev,
dummy_read_1_table->size,
@@ -1067,12 +1094,6 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
- }
-
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1205,10 +1226,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- ret = smu_setup_pptable(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup pptable!\n");
- return ret;
+ /*
+ * It is assumed the pptable used before runpm is same as
+ * the one used afterwards. Thus, we can reuse the stored
+ * copy and do not need to resetup the pptable again.
+ */
+ if (!adev->in_runpm) {
+ ret = smu_setup_pptable(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup pptable!\n");
+ return ret;
+ }
}
/* smu_dump_pptable(smu); */
@@ -1386,15 +1414,9 @@ static int smu_hw_init(void *handle)
}
if (smu->is_apu) {
- if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
- likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to Enable gfx imu!\n");
- return ret;
- }
- }
-
+ ret = smu_set_gfx_imu_enable(smu);
+ if (ret)
+ return ret;
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
@@ -1500,6 +1522,20 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * for gpu reset case. Driver involvement is unnecessary.
+ */
+ if (amdgpu_in_reset(adev)) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ /*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
*/
@@ -1657,6 +1693,10 @@ static int smu_resume(void *handle)
return ret;
}
+ ret = smu_set_gfx_imu_enable(smu);
+ if (ret)
+ return ret;
+
smu_set_gfx_cgpg(smu, true);
smu->disable_uclk_switch = 0;
@@ -1672,8 +1712,6 @@ static int smu_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
struct smu_context *smu = handle;
- int index = 0;
- int num_of_active_display = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -1684,11 +1722,6 @@ static int smu_display_configuration_change(void *handle,
smu_set_min_dcef_deep_sleep(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
- for (index = 0; index < display_config->num_path_including_non_display; index++) {
- if (display_config->displays[index].controller_id != 0)
- num_of_active_display++;
- }
-
return 0;
}
@@ -1982,8 +2015,12 @@ static int smu_force_ppclk_levels(void *handle,
clk_type = SMU_DCEFCLK; break;
case PP_VCLK:
clk_type = SMU_VCLK; break;
+ case PP_VCLK1:
+ clk_type = SMU_VCLK1; break;
case PP_DCLK:
clk_type = SMU_DCLK; break;
+ case PP_DCLK1:
+ clk_type = SMU_DCLK1; break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
@@ -2369,8 +2406,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCEFCLK; break;
case PP_VCLK:
clk_type = SMU_VCLK; break;
+ case PP_VCLK1:
+ clk_type = SMU_VCLK1; break;
case PP_DCLK:
clk_type = SMU_DCLK; break;
+ case PP_DCLK1:
+ clk_type = SMU_DCLK1; break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
@@ -2473,6 +2514,14 @@ static int smu_read_sensor(void *handle,
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
*size = 8;
@@ -2506,6 +2555,28 @@ unlock:
return ret;
}
+static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
+ ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
+static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
+ ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
@@ -2839,6 +2910,23 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+static int smu_enable_gfx_features(void *handle)
+{
+ struct smu_context *smu = handle;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->enable_gfx_features)
+ ret = smu->ppt_funcs->enable_gfx_features(smu);
+
+ if (ret)
+ dev_err(smu->adev->dev, "enable gfx features failed!\n");
+
+ return ret;
+}
+
static int smu_get_max_sustainable_clocks_by_dc(void *handle,
struct pp_smu_nv_clock_table *max_clocks)
{
@@ -2990,6 +3078,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.emit_clock_levels = smu_emit_ppclk_levels,
.force_performance_level = smu_force_performance_level,
.read_sensor = smu_read_sensor,
+ .get_apu_thermal_limit = smu_get_apu_thermal_limit,
+ .set_apu_thermal_limit = smu_set_apu_thermal_limit,
.get_performance_level = smu_get_performance_level,
.get_current_power_state = smu_get_current_power_state,
.get_fan_speed_rpm = smu_get_fan_speed_rpm,
@@ -3023,6 +3113,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_ppfeature_status = smu_sys_get_pp_feature_mask,
.set_ppfeature_status = smu_sys_set_pp_feature_mask,
.asic_reset_mode_2 = smu_mode2_reset,
+ .asic_reset_enable_gfx_features = smu_enable_gfx_features,
.set_df_cstate = smu_set_df_cstate,
.set_xgmi_pstate = smu_set_xgmi_pstate,
.get_gpu_metrics = smu_sys_get_gpu_metrics,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3bc4128a22ac..09469c750a96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -722,6 +722,18 @@ struct pptable_funcs {
void *data, uint32_t *size);
/**
+ * @get_apu_thermal_limit: get apu core limit from smu
+ * &limit: current limit temperature in millidegrees Celsius
+ */
+ int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
+
+ /**
+ * @set_apu_thermal_limit: update all controllers with new limit
+ * &limit: limit temperature to be setted, in millidegrees Celsius
+ */
+ int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
+
+ /**
* @pre_display_config_changed: Prepare GPU for a display configuration
* change.
*
@@ -1201,6 +1213,8 @@ struct pptable_funcs {
* IPs reset varies by asic.
*/
int (*mode2_reset)(struct smu_context *smu);
+ /* for gfx feature enablement after mode2 reset */
+ int (*enable_gfx_features)(struct smu_context *smu);
/**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
index 43d43d6addc0..d518dee18e1b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
@@ -464,6 +464,7 @@ typedef struct {
uint16_t Padding16;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -733,6 +734,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
index 04752ade1016..c5c1943fb6a1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
@@ -515,6 +515,7 @@ typedef struct {
uint32_t BoardLevelEnergyAccumulator;
} OutOfBandMonitor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -814,6 +815,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 351a4af429b3..aa6d29de4002 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -599,6 +599,7 @@ typedef struct {
uint16_t Fmax;
} UclkDpmChangeRange_t;
+#pragma pack(push, 1)
typedef struct {
// MAJOR SECTION: SKU PARAMETERS
@@ -957,6 +958,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// MAJOR SECTION: SKU PARAMETERS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
index 8361ebd8d876..21e6028a49e6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
@@ -238,7 +238,9 @@ typedef struct {
#define WORKLOAD_PPLIB_VR_BIT 3
#define WORKLOAD_PPLIB_COMPUTE_BIT 4
#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_PPLIB_CAPPED_BIT 6
+#define WORKLOAD_PPLIB_UNCAPPED_BIT 7
+#define WORKLOAD_PPLIB_COUNT 8
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 7a6075daa7b2..90200f31ff52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
@@ -267,6 +267,7 @@ typedef struct {
QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V)
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -448,6 +449,7 @@ typedef struct {
uint32_t reserved[14];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index d6b964cf73bd..b686fb68a6e7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT))
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
- TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
+ TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
@@ -1346,10 +1347,12 @@ typedef struct {
uint32_t MmHubPadding[8];
} BoardTable_t;
+#pragma pack(push, 1)
typedef struct {
SkuTable_t SkuTable;
BoardTable_t BoardTable;
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index f77401709d83..2162ecd1057d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
+#define PMFW_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
@@ -198,7 +198,7 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
- uint16_t spare2;
+ uint16_t FilterAlphaValue;
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
new file mode 100644
index 000000000000..be596777cd2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_DRIVER_IF_H
+#define SMU_13_0_6_DRIVER_IF_H
+
+// *** IMPORTANT ***
+// PMFW TEAM: Always increment the interface version if
+// anything is changed in this file
+#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ UNSUPPORTED_1, //50 Kbits/s not supported anymore!
+ I2C_SPEED_STANDARD_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ UNSUPPORTED_2, //1 Mbits/s (in high speed mode) not supported anymore!
+ UNSUPPORTED_3, //2.3 Mbits/s not supported anymore!
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+typedef struct {
+ uint8_t ReadWriteData; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
+ uint8_t SlaveAddress; //Slave address of device
+ uint8_t NumCmds; //Number of commands
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t; // SW I2C Request Table
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+ uint32_t Spare[8];
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SwI2cRequestExternal_t;
+
+typedef enum {
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_LCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+//TODO confirm if this is used in SMU_13_0_6 PPSMC_MSG_SetUclkDpmMode
+typedef enum {
+ UCLK_DPM_MODE_BANDWIDTH,
+ UCLK_DPM_MODE_LATENCY,
+} UCLK_DPM_MODE_e;
+
+typedef struct {
+ //0-26 SOC, 27-29 SOCIO
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableAid_t;
+
+typedef struct {
+ //0-27 GFX, 28-29 SOC
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableXcd_t;
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+// #define TABLE_PPTABLE 0
+// #define TABLE_AVFS_PSM_DEBUG 1
+// #define TABLE_AVFS_FUSE_OVERRIDE 2
+// #define TABLE_PMSTATUSLOG 3
+// #define TABLE_SMU_METRICS 4
+// #define TABLE_DRIVER_SMU_CONFIG 5
+// #define TABLE_I2C_COMMANDS 6
+// #define TABLE_COUNT 7
+
+// // Table transfer status
+// #define TABLE_TRANSFER_OK 0x0
+// #define TABLE_TRANSFER_FAILED 0xFF
+// #define TABLE_TRANSFER_PENDING 0xAB
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d6b13933a98f..4c46a0392451 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
- (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
- (1 << FEATURE_DPM_UCLK_BIT) | \
- (1 << FEATURE_DPM_FCLK_BIT) | \
- (1 << FEATURE_DPM_SOCCLK_BIT) | \
- (1 << FEATURE_DPM_MP0CLK_BIT) | \
- (1 << FEATURE_DPM_LINK_BIT) | \
- (1 << FEATURE_DPM_DCN_BIT) | \
- (1 << FEATURE_DS_GFXCLK_BIT) | \
- (1 << FEATURE_DS_SOCCLK_BIT) | \
- (1 << FEATURE_DS_FCLK_BIT) | \
- (1 << FEATURE_DS_LCLK_BIT) | \
- (1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -1379,10 +1380,12 @@ typedef struct {
uint32_t MmHubPadding[8];
} BoardTable_t;
+#pragma pack(push, 1)
typedef struct {
SkuTable_t SkuTable;
BoardTable_t BoardTable;
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index 8b8266890a10..10cff75b44d5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -94,6 +94,7 @@
//Resets
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
#define PPSMC_MSG_Mode1Reset 0x2F
+#define PPSMC_MSG_Mode2Reset 0x4F
//Set SystemVirtual DramAddrHigh
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
new file mode 100644
index 000000000000..bdccbb4a6276
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PMFW_H
+#define SMU_13_0_6_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_CCLK = 1,
+/*2*/ FEATURE_DPM_FCLK = 2,
+/*3*/ FEATURE_DPM_GFXCLK = 3,
+/*4*/ FEATURE_DPM_LCLK = 4,
+/*5*/ FEATURE_DPM_SOCCLK = 5,
+/*6*/ FEATURE_DPM_UCLK = 6,
+/*7*/ FEATURE_DPM_VCN = 7,
+/*8*/ FEATURE_DPM_XGMI = 8,
+/*9*/ FEATURE_DS_FCLK = 9,
+/*10*/ FEATURE_DS_GFXCLK = 10,
+/*11*/ FEATURE_DS_LCLK = 11,
+/*12*/ FEATURE_DS_MP0CLK = 12,
+/*13*/ FEATURE_DS_MP1CLK = 13,
+/*14*/ FEATURE_DS_MPIOCLK = 14,
+/*15*/ FEATURE_DS_SOCCLK = 15,
+/*16*/ FEATURE_DS_VCN = 16,
+/*17*/ FEATURE_APCC_DFLL = 17,
+/*18*/ FEATURE_APCC_PLUS = 18,
+/*19*/ FEATURE_DF_CSTATE = 19,
+/*20*/ FEATURE_CC6 = 20,
+/*21*/ FEATURE_PC6 = 21,
+/*22*/ FEATURE_CPPC = 22,
+/*23*/ FEATURE_PPT = 23,
+/*24*/ FEATURE_TDC = 24,
+/*25*/ FEATURE_THERMAL = 25,
+/*26*/ FEATURE_SOC_PCC = 26,
+/*27*/ FEATURE_CCD_PCC = 27,
+/*28*/ FEATURE_CCD_EDC = 28,
+/*29*/ FEATURE_PROCHOT = 29,
+/*30*/ FEATURE_DVO_CCLK = 30,
+/*31*/ FEATURE_FDD_AID_HBM = 31,
+/*32*/ FEATURE_FDD_AID_SOC = 32,
+/*33*/ FEATURE_FDD_XCD_EDC = 33,
+/*34*/ FEATURE_FDD_XCD_XVMIN = 34,
+/*35*/ FEATURE_FW_CTF = 35,
+/*36*/ FEATURE_GFXOFF = 36,
+/*37*/ FEATURE_SMU_CG = 37,
+/*38*/ FEATURE_PSI7 = 38,
+/*39*/ FEATURE_CSTATE_BOOST = 39,
+/*40*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 40,
+/*41*/ FEATURE_CXL_QOS = 41,
+/*42*/ FEATURE_SOC_DC_RTC = 42,
+/*43*/ FEATURE_GFX_DC_RTC = 43,
+
+/*44*/ NUM_FEATURES = 44
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ VOLTAGE_COLD_0,
+ VOLTAGE_COLD_1,
+ VOLTAGE_COLD_2,
+ VOLTAGE_COLD_3,
+ VOLTAGE_COLD_4,
+ VOLTAGE_COLD_5,
+ VOLTAGE_COLD_6,
+ VOLTAGE_COLD_7,
+ VOLTAGE_MID_0,
+ VOLTAGE_MID_1,
+ VOLTAGE_MID_2,
+ VOLTAGE_MID_3,
+ VOLTAGE_MID_4,
+ VOLTAGE_MID_5,
+ VOLTAGE_MID_6,
+ VOLTAGE_MID_7,
+ VOLTAGE_HOT_0,
+ VOLTAGE_HOT_1,
+ VOLTAGE_HOT_2,
+ VOLTAGE_HOT_3,
+ VOLTAGE_HOT_4,
+ VOLTAGE_HOT_5,
+ VOLTAGE_HOT_6,
+ VOLTAGE_HOT_7,
+ VOLTAGE_GUARDBAND_COUNT
+} GFX_GUARDBAND_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+} VfMetricsTable_t;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
new file mode 100644
index 000000000000..b838e8db395a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PPSMC_H
+#define SMU_13_0_6_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_Message_Count 0x34
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 4180c71d930f..297b70b9388f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -242,7 +242,10 @@
__SMU_DUMMY_MAP(LogGfxOffResidency), \
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
- __SMU_DUMMY_MAP(AllowGpo),
+ __SMU_DUMMY_MAP(AllowGpo), \
+ __SMU_DUMMY_MAP(Mode2Reset), \
+ __SMU_DUMMY_MAP(RequestI2cTransaction), \
+ __SMU_DUMMY_MAP(GetMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..df3baaab0037 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,12 +28,13 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -61,6 +62,12 @@
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
+extern const int pmfw_decoded_link_speed[5];
+extern const int pmfw_decoded_link_width[7];
+
+#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
+#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
+
struct smu_13_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
@@ -244,10 +251,9 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_13_0_dpm_table *single_dpm_table);
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value);
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 0bcd4fe0ef17..c4000518dc56 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -304,7 +304,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
- | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT)
+ | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT);
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
@@ -494,6 +495,8 @@ static int navi10_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_table *dummy_read_1_table =
+ &smu_table->dummy_read_1_table;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -513,6 +516,10 @@ static int navi10_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ dummy_read_1_table->size = 0x40000;
+ dummy_read_1_table->align = PAGE_SIZE;
+ dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
GFP_KERNEL);
if (!smu_table->metrics_table)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 697e98a0a20a..75f18681e984 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
OverDriveTable_t *user_od_table =
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTable_t user_od_table_bak;
int ret = 0;
- /*
- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
- * - either they already have the default OD settings got during cold bootup
- * - or they have some user customized OD settings which cannot be overwritten
- */
- if (smu->adev->in_suspend)
- return 0;
-
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
0, (void *)boot_od_table, false);
if (ret) {
@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
sienna_cichlid_dump_od_table(smu, boot_od_table);
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
+ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
+ user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
+ user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
+ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
+ }
return 0;
}
@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = table_context->overdrive_table;
+ OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ res = smu_v11_0_restore_user_od_settings(smu);
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
+
+ return res;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
int res;
@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_default_od_settings = sienna_cichlid_set_default_od_settings,
.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
- .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
+ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index ad66d57aa102..e1ef88ee1ed3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
+ char ucode_prefix[30];
char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -105,43 +105,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(11, 0, 2):
- chip_name = "arcturus";
- break;
- default:
- dev_err(adev->dev, "Unsupported IP version 0x%x\n",
- adev->ip_versions[MP1_HWIP][0]);
- return -EINVAL;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -159,12 +127,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -172,8 +136,7 @@ void smu_v11_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
@@ -293,7 +256,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -301,7 +264,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index cb10c7e31264..7433dcaa16e0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -203,6 +203,8 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT]
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT),
};
static const uint8_t vangogh_throttler_map[] = {
@@ -1046,7 +1048,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
if (!buf)
return -EINVAL;
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on vangogh.
@@ -1070,7 +1072,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
int workload_type, ret;
uint32_t profile_mode = input[size];
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
@@ -1590,6 +1592,21 @@ static int vangogh_read_sensor(struct smu_context *smu,
return ret;
}
+static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetThermalLimit,
+ 0, limit);
+}
+
+static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetReducedThermalLimit,
+ limit, NULL);
+}
+
+
static int vangogh_set_watermarks_table(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges)
{
@@ -2372,6 +2389,7 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
* vangogh_get_gfxoff_residency
*
* @smu: amdgpu_device pointer
+ * @residency: placeholder for return value
*
* This function will be used to get gfxoff residency.
*
@@ -2390,6 +2408,7 @@ static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *resid
* vangogh_get_gfxoff_entrycount - get gfxoff entry count
*
* @smu: amdgpu_device pointer
+ * @entrycount: placeholder for return value
*
* This function will be used to get gfxoff entry count
*
@@ -2425,6 +2444,8 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
.is_dpm_running = vangogh_is_dpm_running,
.read_sensor = vangogh_read_sensor,
+ .get_apu_thermal_limit = vangogh_get_apu_thermal_limit,
+ .set_apu_thermal_limit = vangogh_set_apu_thermal_limit,
.get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_watermarks_table = vangogh_set_watermarks_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 85e22210963f..5cdc07165480 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
int ret = 0;
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
+ struct amdgpu_device *adev = smu->adev;
ret = smu_cmn_get_metrics_table(smu,
@@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = (metrics->CurrentSocketPower << 8) / 1000;
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
+ *value = metrics->CurrentSocketPower << 8;
+ else
+ *value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature / 100) *
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 56a02bc60cee..c788aa7a99a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -93,7 +93,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -101,7 +101,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9043f6ef1aee..7f3493b6c53c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e54b760b875b..393c6a7b9609 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -85,10 +85,12 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
+const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
+const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
char fw_name[30];
char ucode_prefix[30];
int err = 0;
@@ -100,21 +102,11 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran_smc";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -132,12 +124,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -145,8 +133,7 @@ void smu_v13_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
@@ -310,6 +297,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 6):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6;
+ adev->pm.fw_version = smu_version;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -327,7 +318,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -335,7 +326,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(adev->dev, "SMU driver if version not matched\n");
+ dev_info(adev->dev, "SMU driver if version not matched\n");
}
return ret;
@@ -1261,7 +1252,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t tach_period, crystal_clock_freq;
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_period;
int ret;
if (!speed)
@@ -1271,7 +1263,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
if (ret)
return ret;
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
@@ -1930,10 +1921,9 @@ int smu_v13_0_set_power_source(struct smu_context *smu,
NULL);
}
-static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint16_t level,
- uint32_t *value)
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -2064,45 +2054,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v13_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2284,10 +2235,23 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
- return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ArmD3,
- baco_seq,
- NULL);
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
}
bool smu_v13_0_baco_is_support(struct smu_context *smu)
@@ -2298,6 +2262,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
!smu_baco->platform_support)
return false;
+ /* return true if ASIC is in BACO state already */
+ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9643b21c636a..09405ef1e3c8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -46,6 +46,7 @@
#include "asic_reg/mp/mp_13_0_0_sh_mask.h"
#include "smu_cmn.h"
#include "amdgpu_ras.h"
+#include "umc_v8_10.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -90,6 +91,12 @@
#define DEBUGSMC_MSG_Mode1Reset 2
+/*
+ * SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
+ * use this to check ECCTABLE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
+
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -138,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(Mode2Reset, PPSMC_MSG_Mode2Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
@@ -145,6 +153,8 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -213,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
@@ -225,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -240,6 +252,7 @@ static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
static const uint8_t smu_v13_0_0_throttler_map[] = {
@@ -405,6 +418,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@@ -454,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -469,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
return 0;
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -1120,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- ((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
- (lane_width == link_width[pcie_table->pcie_lane[i]]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
"*" : "");
break;
@@ -1255,6 +1279,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!range)
return -EINVAL;
@@ -1555,12 +1582,14 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9]);
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0)
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0)
return -EINVAL;
result = smu_cmn_update_table(smu,
@@ -1617,7 +1646,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
smu->power_profile_mode = input[size];
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
}
@@ -1902,15 +1931,51 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
NULL);
}
+static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
+ uint32_t supported_version,
+ uint32_t *param)
+{
+ uint32_t smu_version;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ if ((smu_version >= supported_version) &&
+ ras && atomic_read(&ras->in_recovery))
+ /* Set RAS fatal error reset flag */
+ *param = 1 << 16;
+ else
+ *param = 0;
+}
+
static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
{
int ret;
+ uint32_t param;
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
- ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
- else
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */
+ smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_Mode1Reset, param, NULL);
+ break;
+
+ case IP_VERSION(13, 0, 10):
+ /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */
+ smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, &param);
+
+ ret = smu_cmn_send_debug_smc_msg_with_param(smu,
+ DEBUGSMC_MSG_Mode1Reset, param);
+ break;
+
+ default:
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ break;
+ }
if (!ret)
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
@@ -1918,6 +1983,30 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_0_mode2_reset(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);
+ else
+ return -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
+ FEATURE_PWR_GFX, NULL);
+ else
+ return -EOPNOTSUPP;
+}
+
static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1965,6 +2054,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return -EOPNOTSUPP;
+
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
+ (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+ return ret;
+ else
+ return -EOPNOTSUPP;
+}
+
+static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = smu_v13_0_0_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2033,11 +2180,14 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_exit = smu_v13_0_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_0_mode1_reset,
+ .mode2_reset = smu_v13_0_0_mode2_reset,
+ .enable_gfx_features = smu_v13_0_0_enable_gfx_features,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
.set_df_cstate = smu_v13_0_0_set_df_cstate,
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
+ .get_ecc_info = smu_v13_0_0_get_ecc_info,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
new file mode 100644
index 000000000000..ea8f3d6fb98b
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -0,0 +1,2069 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v13_0_6_pmfw.h"
+#include "smu13_driver_if_v13_0_6.h"
+#include "smu_v13_0_6_ppsmc.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "smu_v13_0.h"
+#include "smu_v13_0_6_ppt.h"
+#include "nbio/nbio_7_4_offset.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+#include "mp/mp_13_0_6_offset.h"
+#include "mp/mp_13_0_6_sh_mask.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/* TODO: Check final register offsets */
+#define MP1_Public 0x03b00000
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
+ [smu_feature] = { 1, (smu_13_0_6_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
+ FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
+ FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
+ FEATURE_MASK(FEATURE_DPM_VCN))
+
+/* possible frequency drift (1Mhz) */
+#define EPSILON 1
+
+#define smnPCIE_ESM_CTRL 0x111003D0
+
+static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+ CLK_MAP(LCLK, PPCLK_LCLK),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+};
+
+#define TABLE_PMSTATUSLOG 0
+#define TABLE_SMU_METRICS 1
+#define TABLE_I2C_COMMANDS 2
+#define TABLE_COUNT 3
+
+static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(I2C_COMMANDS),
+};
+
+#define THROTTLER_PROCHOT_GFX_BIT 0
+#define THROTTLER_PPT_BIT 1
+#define THROTTLER_TEMP_SOC_BIT 2
+#define THROTTLER_TEMP_VR_GFX_BIT 3
+#define THROTTLER_TEMP_HBM_BIT 4
+
+static const uint8_t smu_v13_0_6_throttler_map[] = {
+ [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
+};
+
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ bool Init;
+};
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+
+struct smu_v13_0_6_dpm_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature_num;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t *freq_table;
+};
+
+static int smu_v13_0_6_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->flags & AMD_IS_APU))
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table =
+ kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table) {
+ kfree(smu_table->metrics_table);
+ return -ENOMEM;
+ }
+
+ smu_table->driver_pptable =
+ kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context =
+ kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+ smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v13_0_6_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v13_0_6_allocate_dpm_context(smu);
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask,
+ uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ /* pptable will handle the features to enable */
+ memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
+ void *metrics_table, bool bypass_cache)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ if (bypass_cache || !smu_table->metrics_time ||
+ time_after(jiffies,
+ smu_table->metrics_time + msecs_to_jiffies(1))) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export SMU metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (metrics_table)
+ memcpy(metrics_table, smu_table->metrics_table, table_size);
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+ int i;
+
+ /* Store one-time values in driver PPTable */
+ if (!pptable->Init) {
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
+ metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
+ }
+
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t clock_limit = 0, param;
+ int ret = 0, clk_id = 0;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (pptable->Init)
+ clock_limit = pptable->MinGfxclkFrequency;
+ break;
+ case SMU_SOCCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_FCLK:
+ if (pptable->Init)
+ clock_limit = pptable->FclkFrequencyTable[0];
+ break;
+ case SMU_VCLK:
+ if (pptable->Init)
+ clock_limit = pptable->VclkFrequencyTable[0];
+ break;
+ case SMU_DCLK:
+ if (pptable->Init)
+ clock_limit = pptable->DclkFrequencyTable[0];
+ break;
+ default:
+ break;
+ }
+
+ if (min)
+ *min = clock_limit;
+
+ if (max)
+ *max = clock_limit;
+
+ return 0;
+ }
+
+ if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
+ clk_id = smu_cmn_to_asic_specific_index(
+ smu, CMN2ASIC_MAPPING_CLK, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+ }
+
+ if (max) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMaxGfxclkFrequency, max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMaxDpmFreq, param, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMinGfxclkFrequency, min);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMinDpmFreq, param, min);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *levels)
+{
+ int ret;
+
+ ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
+ if (!ret)
+ ++(*levels);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t gfxclkmin, gfxclkmax, levels;
+ int ret = 0, i, j;
+ struct smu_v13_0_6_dpm_map dpm_map[] = {
+ { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
+ &dpm_context->dpm_tables.soc_table,
+ pptable->SocclkFrequencyTable },
+ { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
+ &dpm_context->dpm_tables.uclk_table,
+ pptable->UclkFrequencyTable },
+ { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
+ &dpm_context->dpm_tables.fclk_table,
+ pptable->FclkFrequencyTable },
+ { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
+ &dpm_context->dpm_tables.vclk_table,
+ pptable->VclkFrequencyTable },
+ { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
+ &dpm_context->dpm_tables.dclk_table,
+ pptable->DclkFrequencyTable },
+ };
+
+ smu_v13_0_6_setup_driver_pptable(smu);
+
+ /* gfxclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ /* In the case of gfxclk, only fine-grained dpm is honored.
+ * Get min/max values from FW.
+ */
+ ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
+ &gfxclkmin, &gfxclkmax);
+ if (ret)
+ return ret;
+
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = gfxclkmin;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = gfxclkmax;
+ dpm_table->dpm_levels[1].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[1].value;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
+ dpm_table = dpm_map[j].dpm_table;
+ levels = 1;
+ if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
+ ret = smu_v13_0_6_get_dpm_level_count(
+ smu, dpm_map[j].clk_type, &levels);
+ if (ret)
+ return ret;
+ }
+ dpm_table->count = levels;
+ for (i = 0; i < dpm_table->count; ++i) {
+ dpm_table->dpm_levels[i].value =
+ dpm_map[j].freq_table[i];
+ dpm_table->dpm_levels[i].enabled = true;
+
+ }
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
+
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ /* TODO: PPTable is not available.
+ * 1) Find an alternate way to get 'PPTable values' here.
+ * 2) Check if there is SW CTF
+ */
+ table_context->thermal_controller_type = 0;
+
+ return 0;
+}
+
+static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags =
+ RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_13_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_13_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+
+ pstate_table->uclk_pstate.min = mem_table->min;
+ pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.curr.min = mem_table->min;
+ pstate_table->uclk_pstate.curr.max = mem_table->max;
+
+ pstate_table->socclk_pstate.min = soc_table->min;
+ pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.curr.min = soc_table->min;
+ pstate_table->socclk_pstate.curr.max = soc_table->max;
+
+ if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
+ soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
+ pstate_table->gfxclk_pstate.standard =
+ gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
+ pstate_table->uclk_pstate.standard =
+ mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
+ pstate_table->socclk_pstate.standard =
+ soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
+ } else {
+ pstate_table->gfxclk_pstate.standard =
+ pstate_table->gfxclk_pstate.min;
+ pstate_table->uclk_pstate.standard =
+ pstate_table->uclk_pstate.min;
+ pstate_table->socclk_pstate.standard =
+ pstate_table->socclk_pstate.min;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_13_0_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
+ dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
+ int32_t frequency2)
+{
+ return (abs(frequency1 - frequency2) <= EPSILON);
+}
+
+static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu,
+ MetricsTable_t *metrics)
+{
+ uint32_t throttler_status = 0;
+
+ throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0;
+ throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0;
+ throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0;
+ throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0;
+ throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0;
+
+ return throttler_status;
+}
+
+static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ *value = 0;
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = smu_v13_0_6_get_throttler_status(smu, metrics);
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_UCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct pp_clock_levels_with_latency clocks;
+ struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_13_0_dpm_context *dpm_context = NULL;
+ uint32_t display_levels;
+ uint32_t freq_values[3] = { 0 };
+ uint32_t min_clk, max_clk;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
+
+ dpm_context = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
+ fallthrough;
+ case SMU_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ display_levels = clocks.num_levels;
+
+ min_clk = pstate_table->gfxclk_pstate.curr.min;
+ max_clk = pstate_table->gfxclk_pstate.curr.max;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
+ /* fine-grained dpm has only 2 levels */
+ if (now > min_clk && now < max_clk) {
+ display_levels = clocks.num_levels + 1;
+ freq_values[2] = max_clk;
+ freq_values[1] = now;
+ }
+
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
+ if (display_levels == clocks.num_levels) {
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ freq_values[i], now) ?
+ "*" :
+ ""));
+ } else {
+ for (i = 0; i < display_levels; i++)
+ size += sysfs_emit_at(buf, size,
+ "%d: %uMhz %s\n", i,
+ freq_values[i],
+ i == 1 ? "*" : "");
+ }
+
+ break;
+
+ case SMU_OD_MCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
+ fallthrough;
+ case SMU_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_SOCCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_FCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get fclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_VCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current vclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get vclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_DCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current dclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get dclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask, uint32_t level)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ uint32_t freq;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
+ freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxGfxClk :
+ SMU_MSG_SetSoftMinGfxclk),
+ freq & 0xffff, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
+ freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
+ .value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
+ freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, uint32_t mask)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ if (soft_max_level >= single_dpm_table->count) {
+ dev_err(smu->adev->dev,
+ "Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_min_level);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_max_level);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ /*
+ * Should not arrive here since smu_13_0_6 does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_GFXACTIVITY, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_MEMACTIVITY, value);
+ break;
+ default:
+ dev_err(smu->adev->dev,
+ "Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ if (!value)
+ return -EINVAL;
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
+ value);
+}
+
+static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_HOTSPOT, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_MEM, value);
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor, void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_UCLK, (uint32_t *)data);
+ /* the output clock frequency in 10K unit */
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_GFXCLK, (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t power_limit = 0;
+ int ret;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (current_power_limit)
+ *current_power_limit = 0;
+ if (default_power_limit)
+ *default_power_limit = 0;
+ if (max_power_limit)
+ *max_power_limit = 0;
+
+ dev_warn(
+ smu->adev->dev,
+ "PPT feature is not enabled, power values can't be fetched.");
+
+ return 0;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+
+ if (ret) {
+ dev_err(smu->adev->dev, "Couldn't get PPT limit");
+ return -EINVAL;
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+}
+
+static int smu_v13_0_6_system_features_control(struct smu_context *smu,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing to be done for APU */
+ if (smu->adev->flags & AMD_IS_APU)
+ return 0;
+
+ ret = smu_v13_0_system_features_control(smu, enable);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ max & 0xffff, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
+ min & 0xffff, NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret;
+
+ /* Disable determinism if switching to another mode */
+ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
+ (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
+ smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ return 0;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
+ (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, gfx_table->min, gfx_table->max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ return 0;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ return -EINVAL;
+
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (min >= max) {
+ dev_err(smu->adev->dev,
+ "Minimum GFX clk should be less than the maximum allowed clock\n");
+ return -EINVAL;
+ }
+
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+
+ return ret;
+ }
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+ (max > dpm_context->dpm_tables.gfx_table.max)) {
+ dev_warn(
+ adev->dev,
+ "Invalid max frequency %d MHz specified for determinism\n",
+ max);
+ return -EINVAL;
+ }
+
+ /* Restore default min/max clocks and enable determinism */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
+ max_clk);
+ if (!ret) {
+ usleep_range(500, 1000);
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_EnableDeterminism, max, NULL);
+ if (ret) {
+ dev_err(adev->dev,
+ "Failed to enable determinism at GFX clock %d MHz\n",
+ max);
+ } else {
+ pstate_table->gfxclk_pstate.curr.min = min_clk;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ /* Only allowed in manual or determinism mode */
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
+ dev_warn(
+ smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.min);
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.min = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.max);
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ /* Use the default frequencies for manual and determinism mode */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (!pstate_table->gfxclk_pstate.custom.min)
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+
+ if (!pstate_table->gfxclk_pstate.custom.max)
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+
+ min_clk = pstate_table->gfxclk_pstate.custom.min;
+ max_clk = pstate_table->gfxclk_pstate.custom.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ uint32_t smu_version;
+ int ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO && smu_version < 0x552F00) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
+ void *table_data)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t table_size;
+ int ret = 0;
+
+ if (!table_data)
+ return -EINVAL;
+
+ table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
+
+ memcpy(table->cpu_addr, table_data, table_size);
+ /* Flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
+ NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_v13_0_6_request_i2c_xfer(smu, req);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
+ .master_xfer = smu_v13_0_6_i2c_xfer,
+ .functionality = smu_v13_0_6_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v13_0_6_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v13_0_6_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ //SmuMetrics_t *metrics = smu->smu_table.metrics_table;
+ uint32_t upper32 = 0, lower32 = 0;
+ int ret;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ goto out;
+
+ //upper32 = metrics->PublicSerialNumUpper32;
+ //lower32 = metrics->PublicSerialNumLower32;
+
+out:
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
+}
+
+static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
+{
+ /* smu_13_0_6 does not support baco */
+
+ return false;
+}
+
+static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
+ state, NULL);
+}
+
+static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
+ en ? 0 : 1, NULL);
+}
+
+static const struct throttling_logging_label {
+ uint32_t feature_mask;
+ const char *label;
+} logging_label[] = {
+ { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" },
+ { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" },
+ { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" },
+};
+static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
+{
+ int ret;
+ int throttler_idx, throtting_events = 0, buf_idx = 0;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t throttler_status;
+ char log_buf[256];
+
+ ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS,
+ &throttler_status);
+ if (ret)
+ return;
+
+ memset(log_buf, 0, sizeof(log_buf));
+ for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
+ throttler_idx++) {
+ if (throttler_status &
+ logging_label[throttler_idx].feature_mask) {
+ throtting_events++;
+ buf_idx += snprintf(log_buf + buf_idx,
+ sizeof(log_buf) - buf_idx, "%s%s",
+ throtting_events > 1 ? " and " : "",
+ logging_label[throttler_idx].label);
+ if (buf_idx >= sizeof(log_buf)) {
+ dev_err(adev->dev, "buffer overflow!\n");
+ log_buf[sizeof(log_buf) - 1] = '\0';
+ break;
+ }
+ }
+ }
+
+ dev_warn(
+ adev->dev,
+ "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
+ log_buf);
+ kgd2kfd_smi_event_throttle(
+ smu->adev->kfd.dev,
+ smu_cmn_get_indep_throttler_status(throttler_status,
+ smu_v13_0_6_throttler_map));
+}
+
+static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t esm_ctrl;
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+ if ((esm_ctrl >> 15) & 0x1FFFF)
+ return (((esm_ctrl >> 8) & 0x3F) + 128);
+
+ return smu_v13_0_get_current_pcie_link_speed(smu);
+}
+
+static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ MetricsTable_t *metrics;
+ int i, ret = 0;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+ /* TODO: Decide on how to fill in zero value fields */
+ gpu_metrics->temperature_edge = 0;
+ gpu_metrics->temperature_hotspot = 0;
+ gpu_metrics->temperature_mem = 0;
+ gpu_metrics->temperature_vrgfx = 0;
+ gpu_metrics->temperature_vrsoc = 0;
+ gpu_metrics->temperature_vrmem = 0;
+
+ gpu_metrics->average_gfx_activity = 0;
+ gpu_metrics->average_umc_activity = 0;
+ gpu_metrics->average_mm_activity = 0;
+
+ gpu_metrics->average_socket_power = 0;
+ gpu_metrics->energy_accumulator = 0;
+
+ gpu_metrics->average_gfxclk_frequency = 0;
+ gpu_metrics->average_socclk_frequency = 0;
+ gpu_metrics->average_uclk_frequency = 0;
+ gpu_metrics->average_vclk0_frequency = 0;
+ gpu_metrics->average_dclk0_frequency = 0;
+
+ gpu_metrics->current_gfxclk = 0;
+ gpu_metrics->current_socclk = 0;
+ gpu_metrics->current_uclk = 0;
+ gpu_metrics->current_vclk0 = 0;
+ gpu_metrics->current_dclk0 = 0;
+
+ gpu_metrics->throttle_status = 0;
+ gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(
+ gpu_metrics->throttle_status, smu_v13_0_6_throttler_map);
+
+ gpu_metrics->current_fan_speed = 0;
+
+ gpu_metrics->pcie_link_width = 0;
+ gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = 0;
+ gpu_metrics->mem_activity_acc = 0;
+
+ for (i = 0; i < NUM_HBM_INSTANCES; i++)
+ gpu_metrics->temperature_hbm[i] = 0;
+
+ gpu_metrics->firmware_timestamp = 0;
+
+ *table = (void *)gpu_metrics;
+ kfree(metrics);
+
+ return sizeof(struct gpu_metrics_v1_3);
+}
+
+static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+
+ mutex_lock(&smu->message_lock);
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+ /* This is similar to FLR, wait till max FLR timeout */
+ msleep(100);
+ dev_dbg(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ while (ret == -ETIME && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret != 1) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+ }
+
+ if (ret == 1)
+ ret = 0;
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
+static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras;
+ u32 fatal_err, param;
+ int ret = 0;
+
+ ras = amdgpu_ras_get_context(adev);
+ fatal_err = 0;
+ param = SMU_RESET_MODE_1;
+
+ /* fatal error triggered by ras, PMFW supports the flag */
+ if (ras && atomic_read(&ras->in_recovery))
+ fatal_err = 1;
+
+ param |= (fatal_err << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ param, NULL);
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
+{
+ /* TODO: Enable this when FW support is added */
+ return false;
+}
+
+static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update HBM bad pages number\n",
+ __func__);
+
+ return ret;
+}
+
+static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
+ /* init dpm */
+ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
+ /* dpm/clk tables */
+ .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
+ .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
+ .print_clk_levels = smu_v13_0_6_print_clk_levels,
+ .force_clk_levels = smu_v13_0_6_force_clk_levels,
+ .read_sensor = smu_v13_0_6_read_sensor,
+ .set_performance_level = smu_v13_0_6_set_performance_level,
+ .get_power_limit = smu_v13_0_6_get_power_limit,
+ .is_dpm_running = smu_v13_0_6_is_dpm_running,
+ .get_unique_id = smu_v13_0_6_get_unique_id,
+ .init_smc_tables = smu_v13_0_6_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .init_power = smu_v13_0_init_power,
+ .fini_power = smu_v13_0_fini_power,
+ .check_fw_status = smu_v13_0_6_check_fw_status,
+ /* pptable related */
+ .check_fw_version = smu_v13_0_check_fw_version,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .set_tool_table_location = smu_v13_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
+ .system_features_control = smu_v13_0_6_system_features_control,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
+ .get_enabled_mask = smu_v13_0_6_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_limit = smu_v13_0_6_set_power_limit,
+ .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
+ /* TODO: Thermal limits unknown, skip these for now
+ .register_irq_handler = smu_v13_0_register_irq_handler,
+ .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
+ .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
+ */
+ .setup_pptable = smu_v13_0_6_setup_pptable,
+ .baco_is_support = smu_v13_0_6_is_baco_supported,
+ .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
+ .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
+ .set_df_cstate = smu_v13_0_6_set_df_cstate,
+ .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
+ .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
+ .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .mode1_reset = smu_v13_0_6_mode1_reset,
+ .mode2_reset = smu_v13_0_6_mode2_reset,
+ .wait_for_event = smu_v13_0_wait_for_event,
+ .i2c_init = smu_v13_0_6_i2c_control_init,
+ .i2c_fini = smu_v13_0_6_i2c_control_fini,
+ .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+};
+
+void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
+ smu->message_map = smu_v13_0_6_message_map;
+ smu->clock_map = smu_v13_0_6_clk_map;
+ smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->table_map = smu_v13_0_6_table_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
new file mode 100644
index 000000000000..f0fa42a645c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_13_0_6_PPT_H__
+#define __SMU_13_0_6_PPT_H__
+
+#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
+#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
+#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+
+extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 5c6c6ad011ca..3d9ff46706fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -192,6 +193,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
@@ -573,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table);
if (ret)
return ret;
+
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@@ -826,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
@@ -1072,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- (gen_speed == pcie_table->pcie_gen[i]) &&
- (lane_width == pcie_table->pcie_lane[i]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;
@@ -1327,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ DriverReportedClocks_t driver_clocks =
+ pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (driver_clocks.GameClockAc &&
+ (driver_clocks.GameClockAc < gfx_table->max))
+ pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+ else
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1346,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max;
- /*
- * For now, just use the mininum clock frequency.
- * TODO: update them when the real pstate settings available
- */
- pstate_table->gfxclk_pstate.standard = gfx_table->min;
- pstate_table->uclk_pstate.standard = mem_table->min;
+ if (driver_clocks.BaseClockAc &&
+ driver_clocks.BaseClockAc < gfx_table->max)
+ pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+ else
+ pstate_table->gfxclk_pstate.standard = gfx_table->max;
+ pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min;
@@ -1477,7 +1546,9 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0) {
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0) {
result = -EINVAL;
goto out;
}
@@ -1674,7 +1745,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_7_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 768b6e7dbd77..3ecb900e6ecd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -404,6 +404,12 @@ int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
return __smu_cmn_send_debug_msg(smu, msg, 0);
}
+int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
+ uint32_t msg, uint32_t param)
+{
+ return __smu_cmn_send_debug_msg(smu, msg, param);
+}
+
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index)
@@ -472,13 +478,13 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_WORKLOAD:
- if (index > PP_SMC_POWER_PROFILE_WINDOW3D ||
+ if (index >= PP_SMC_POWER_PROFILE_COUNT ||
!smu->workload_map)
return -EINVAL;
mapping = smu->workload_map[index];
if (!mapping.valid_mapping)
- return -EINVAL;
+ return -ENOTSUPP;
return mapping.map_to;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index f82cf76dd3a4..d7cd358a53bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -45,6 +45,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
uint32_t msg);
+int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
+ uint32_t msg, uint32_t param);
+
int smu_cmn_wait_for_response(struct smu_context *smu);
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4cc07d6bb9d8..cea3fd5772b5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 3f4e719eebd8..28f76e07dd95 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 7339339ef6b8..3a872c292091 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 7043d1c9ed8f..9020bf820bc8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -26,7 +26,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -100,7 +100,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
- struct resource *res;
u32 version;
int ret;
@@ -115,8 +114,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
atomic_set(&hdlcd->dma_end_count, 0);
#endif
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
+ hdlcd->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdlcd->mmio)) {
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
@@ -195,8 +193,8 @@ static int hdlcd_setup_mode_config(struct drm_device *drm)
#ifdef CONFIG_DEBUG_FS
static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
@@ -208,8 +206,8 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
@@ -219,17 +217,10 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
return 0;
}
-static struct drm_info_list hdlcd_debugfs_list[] = {
+static struct drm_debugfs_info hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-
-static void hdlcd_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list),
- minor->debugfs_root, minor);
-}
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
@@ -237,9 +228,6 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = hdlcd_debugfs_init,
-#endif
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
@@ -303,11 +291,15 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
+#ifdef CONFIG_DEBUG_FS
+ drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list));
+#endif
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_register;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 589c1c66a6dc..c03cfd57b752 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -649,7 +649,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
- return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
+ return sysfs_emit(buf, "%08x\n", malidp->core_id);
}
static DEVICE_ATTR_RO(core_id);
@@ -724,8 +724,7 @@ static int malidp_bind(struct device *dev)
hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hwdev->regs = devm_ioremap_resource(dev, res);
+ hwdev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hwdev->regs))
return PTR_ERR(hwdev->regs);
@@ -852,7 +851,7 @@ static int malidp_bind(struct device *dev)
if (ret)
goto register_fail;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 0643887800b4..142668cd6d7c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
- kfree(priv);
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 584cee123bd8..0e44f53e9fa4 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,7 +129,7 @@ int armada_fbdev_init(struct drm_device *dev)
priv->fbdev = fbh;
- drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs);
ret = drm_fb_helper_init(dev, fbh);
if (ret) {
@@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
goto err_fb_helper;
}
- ret = drm_fb_helper_initial_config(fbh, 32);
+ ret = drm_fb_helper_initial_config(fbh);
if (ret) {
DRM_ERROR("failed to set initial config\n");
goto err_fb_setup;
@@ -147,6 +147,7 @@ int armada_fbdev_init(struct drm_device *dev)
err_fb_setup:
drm_fb_helper_fini(fbh);
err_fb_helper:
+ drm_fb_helper_unprepare(fbh);
priv->fbdev = NULL;
return ret;
}
@@ -164,6 +165,8 @@ void armada_fbdev_fini(struct drm_device *dev)
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
+ drm_fb_helper_unprepare(fbh);
+
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 55a3444a51d8..7877a57b8e26 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -5,7 +5,6 @@
#include <linux/reset.h>
#include <linux/regmap.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 718119e168a6..c8c7f8215155 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -14,9 +14,8 @@
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -342,7 +341,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(&priv->drm, 32);
+ drm_fbdev_dma_setup(&priv->drm, 32);
return 0;
err_unload:
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 4f2187025a21..78775e0c853f 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -3,7 +3,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index d367a90cd3de..563fa7a3b546 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 56483860306b..fbb070f63e36 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -9,7 +9,7 @@
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 i = 0, j = 0;
/*
@@ -125,7 +125,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
u8 bDPTX = 0;
u8 bDPExecute = 1;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
@@ -172,7 +172,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
@@ -188,7 +188,7 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
// Video On/Off
@@ -208,7 +208,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4f75a9efb610..1bc35a992369 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -10,7 +10,7 @@ MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
@@ -18,7 +18,7 @@ static void ast_release_firmware(void *data)
static int ast_load_dp501_microcode(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
@@ -28,7 +28,7 @@ static int ast_load_dp501_microcode(struct drm_device *dev)
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
-static void send_ack(struct ast_private *ast)
+static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -36,7 +36,7 @@ static void send_ack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static void send_nack(struct ast_private *ast)
+static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -44,7 +44,7 @@ static void send_nack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static bool wait_ack(struct ast_private *ast)
+static bool wait_ack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -60,7 +60,7 @@ static bool wait_ack(struct ast_private *ast)
return false;
}
-static bool wait_nack(struct ast_private *ast)
+static bool wait_nack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -76,18 +76,18 @@ static bool wait_nack(struct ast_private *ast)
return false;
}
-static void set_cmd_trigger(struct ast_private *ast)
+static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
-static void clear_cmd_trigger(struct ast_private *ast)
+static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
-static bool wait_fw_ready(struct ast_private *ast)
+static bool wait_fw_ready(struct ast_device *ast)
{
u8 waitready;
u32 retry = 0;
@@ -106,7 +106,7 @@ static bool wait_fw_ready(struct ast_private *ast)
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
@@ -128,7 +128,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
static bool ast_write_data(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
if (wait_nack(ast)) {
send_nack(ast);
@@ -146,7 +146,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data)
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 tmp;
*data = 0;
@@ -163,7 +163,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
return true;
}
-static void clear_cmd(struct ast_private *ast)
+static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
@@ -178,14 +178,14 @@ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
msleep(10);
}
-static u32 get_fw_base(struct ast_private *ast)
+static u32 get_fw_base(struct ast_device *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
@@ -204,7 +204,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
static bool ast_launch_m68k(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
@@ -274,7 +274,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
@@ -334,7 +334,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
static bool ast_init_dvo(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
@@ -407,7 +407,7 @@ static bool ast_init_dvo(struct drm_device *dev)
static void ast_init_analog(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
@@ -434,7 +434,7 @@ static void ast_init_analog(struct drm_device *dev)
void ast_init_3rdtx(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
if (ast->chip == AST2300 || ast->chip == AST2400) {
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 420fc75c240e..e1224ef4ad83 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -31,7 +31,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -90,27 +89,13 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
-static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
-{
- bool primary = false;
- resource_size_t base, size;
-
- base = pci_resource_start(pdev, 0);
- size = pci_resource_len(pdev, 0);
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
-
- return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver);
-}
-
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_private *ast;
+ struct ast_device *ast;
struct drm_device *dev;
int ret;
- ret = ast_remove_conflicting_framebuffers(pdev);
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d51b81fea9c8..a501169cddad 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -157,7 +157,7 @@ to_ast_sil164_connector(struct drm_connector *connector)
* Device
*/
-struct ast_private {
+struct ast_device {
struct drm_device base;
struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
@@ -210,14 +210,14 @@ struct ast_private {
const struct firmware *dp501_fw; /* dp501 fw */
};
-static inline struct ast_private *to_ast_private(struct drm_device *dev)
+static inline struct ast_device *to_ast_device(struct drm_device *dev)
{
- return container_of(dev, struct ast_private, base);
+ return container_of(dev, struct ast_device, base);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
@@ -238,62 +238,44 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
-#define __ast_read(x) \
-static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->regs + reg); \
-return val;\
+static inline u32 ast_read32(struct ast_device *ast, u32 reg)
+{
+ return ioread32(ast->regs + reg);
}
-__ast_read(8);
-__ast_read(16);
-__ast_read(32)
-
-#define __ast_io_read(x) \
-static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->ioregs + reg); \
-return val;\
+static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
+{
+ iowrite32(val, ast->regs + reg);
}
-__ast_io_read(8);
-__ast_io_read(16);
-__ast_io_read(32);
-
-#define __ast_write(x) \
-static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->regs + reg);\
- }
-
-__ast_write(8);
-__ast_write(16);
-__ast_write(32);
-
-#define __ast_io_write(x) \
-static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->ioregs + reg);\
- }
+static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
+{
+ return ioread8(ast->ioregs + reg);
+}
-__ast_io_write(8);
-__ast_io_write(16);
-#undef __ast_io_write
+static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
+{
+ iowrite8(val, ast->ioregs + reg);
+}
-static inline void ast_set_index_reg(struct ast_private *ast,
+static inline void ast_set_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t val)
{
- ast_io_write16(ast, base, ((u16)val << 8) | index);
+ ast_io_write8(ast, base, index);
+ ++base;
+ ast_io_write8(ast, base, val);
}
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val);
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index);
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask);
-static inline void ast_open_key(struct ast_private *ast)
+static inline void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
@@ -352,7 +334,7 @@ struct ast_crtc_state {
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_private *ast);
+int ast_mode_config_init(struct ast_device *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -476,16 +458,16 @@ int ast_mode_config_init(struct ast_private *ast);
#define ASTDP_1366x768_60 0x1E
#define ASTDP_1152x864_75 0x1F
-int ast_mm_init(struct ast_private *ast);
+int ast_mm_init(struct ast_device *ast);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
-u32 ast_mindwm(struct ast_private *ast, u32 r);
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_private *ast);
+u32 ast_mindwm(struct ast_device *ast, u32 r);
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
+void ast_patch_ahb_2500(struct ast_device *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
index 93e91c36d649..d64045c0b849 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -29,7 +29,7 @@
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -45,7 +45,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -61,7 +61,7 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
@@ -83,7 +83,7 @@ static int ast_i2c_getsda(void *i2c_priv)
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index bffa310a0431..f32ce29edba7 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -29,14 +29,13 @@
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val)
{
@@ -46,7 +45,7 @@ void ast_set_index_reg_mask(struct ast_private *ast,
ast_set_index_reg(ast, base, index, tmp);
}
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index)
{
uint8_t ret;
@@ -55,7 +54,7 @@ uint8_t ast_get_index_reg(struct ast_private *ast,
return ret;
}
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask)
{
uint8_t ret;
@@ -67,7 +66,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
@@ -123,7 +122,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
@@ -272,7 +271,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -395,22 +394,22 @@ static int ast_get_dram_info(struct drm_device *dev)
*/
static void ast_device_release(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags)
{
struct drm_device *dev;
- struct ast_private *ast;
+ struct ast_device *ast;
bool need_post;
int ret = 0;
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;
@@ -426,11 +425,12 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
/*
- * If we don't have IO space at all, use MMIO now and
- * assume the chip has MMIO enabled by default (rev 0x20
- * and higher).
+ * After AST2500, MMIO is enabled by default, and it should be adopted
+ * to be compatible with Arm.
*/
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
+ if (pdev->revision >= 0x40) {
+ ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
+ } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 248284a4b3ff..e16af60deef9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -33,7 +33,7 @@
#include "ast_drv.h"
-static u32 ast_get_vram_size(struct ast_private *ast)
+static u32 ast_get_vram_size(struct ast_device *ast)
{
u8 jreg;
u32 vram_size;
@@ -73,7 +73,7 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-int ast_mm_init(struct ast_private *ast)
+int ast_mm_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c7443317c747..36374828f6c8 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -35,7 +35,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -52,7 +51,7 @@
#define AST_LUT_SIZE 256
-static inline void ast_load_palette_index(struct ast_private *ast,
+static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
@@ -66,7 +65,7 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_set_gamma_linear(struct ast_private *ast,
+static void ast_crtc_set_gamma_linear(struct ast_device *ast,
const struct drm_format_info *format)
{
int i;
@@ -85,7 +84,7 @@ static void ast_crtc_set_gamma_linear(struct ast_private *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_private *ast,
+static void ast_crtc_set_gamma(struct ast_device *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
@@ -233,7 +232,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return true;
}
-static void ast_set_vbios_color_reg(struct ast_private *ast,
+static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -264,7 +263,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
}
}
-static void ast_set_vbios_mode_reg(struct ast_private *ast,
+static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -288,7 +287,7 @@ static void ast_set_vbios_mode_reg(struct ast_private *ast,
}
}
-static void ast_set_std_reg(struct ast_private *ast,
+static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -336,7 +335,7 @@ static void ast_set_std_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_private *ast,
+static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -451,7 +450,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
-static void ast_set_offset_reg(struct ast_private *ast,
+static void ast_set_offset_reg(struct ast_device *ast,
struct drm_framebuffer *fb)
{
u16 offset;
@@ -461,7 +460,7 @@ static void ast_set_offset_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
-static void ast_set_dclk_reg(struct ast_private *ast,
+static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -479,7 +478,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
((clk_info->param3 & 0x3) << 4));
}
-static void ast_set_color_reg(struct ast_private *ast,
+static void ast_set_color_reg(struct ast_device *ast,
const struct drm_format_info *format)
{
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
@@ -508,7 +507,7 @@ static void ast_set_color_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
}
-static void ast_set_crtthd_reg(struct ast_private *ast)
+static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (ast->chip == AST2600) {
@@ -530,7 +529,7 @@ static void ast_set_crtthd_reg(struct ast_private *ast)
}
}
-static void ast_set_sync_reg(struct ast_private *ast,
+static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -545,7 +544,7 @@ static void ast_set_sync_reg(struct ast_private *ast,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-static void ast_set_start_address_crt1(struct ast_private *ast,
+static void ast_set_start_address_crt1(struct ast_device *ast,
unsigned int offset)
{
u32 addr;
@@ -557,7 +556,7 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
}
-static void ast_wait_for_vretrace(struct ast_private *ast)
+static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
@@ -636,7 +635,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -646,7 +645,7 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
@@ -673,23 +672,34 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
/*
* Some BMCs stop scanning out the video signal after the driver
- * reprogrammed the offset or scanout address. This stalls display
- * output for several seconds and makes the display unusable.
- * Therefore only update the offset if it changes and reprogram the
- * address after enabling the plane.
+ * reprogrammed the offset. This stalls display output for several
+ * seconds and makes the display unusable. Therefore only update
+ * the offset if it changes.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
- if (!old_fb) {
- ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
- }
+}
+
+static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+
+ /*
+ * Some BMCs stop scanning out the video signal after the driver
+ * reprogrammed the scanout address. This stalls display
+ * output for several seconds and makes the display unusable.
+ * Therefore only reprogram the address after enabling the plane.
+ */
+ ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
}
@@ -698,6 +708,7 @@ static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
+ .atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
};
@@ -708,13 +719,13 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_primary_plane_init(struct ast_private *ast)
+static int ast_primary_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
- u64 offset = ast->vram_base;
+ u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
@@ -801,7 +812,7 @@ static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, i
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
-static void ast_set_cursor_base(struct ast_private *ast, u64 address)
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
@@ -812,7 +823,7 @@ static void ast_set_cursor_base(struct ast_private *ast, u64 address)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
-static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
@@ -828,7 +839,7 @@ static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
-static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
@@ -877,7 +888,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -932,7 +943,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_cursor_enabled(ast, false);
}
@@ -951,7 +962,7 @@ static const struct drm_plane_funcs ast_cursor_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_cursor_plane_init(struct ast_private *ast)
+static int ast_cursor_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
@@ -972,7 +983,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_base + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
@@ -996,7 +1007,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
@@ -1053,7 +1064,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
enum drm_mode_status status;
uint32_t jtemp;
@@ -1178,7 +1189,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -1203,7 +1214,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
@@ -1225,7 +1236,7 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -1313,7 +1324,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc *crtc = &ast->crtc;
int ret;
@@ -1339,7 +1350,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1412,7 +1423,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_vga_output_init(struct ast_private *ast)
+static int ast_vga_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1445,7 +1456,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
{
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1518,7 +1529,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_sil164_output_init(struct ast_private *ast)
+static int ast_sil164_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1605,7 +1616,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_dp501_output_init(struct ast_private *ast)
+static int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1692,7 +1703,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_astdp_output_init(struct ast_private *ast)
+static int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1722,7 +1733,7 @@ static int ast_astdp_output_init(struct ast_private *ast)
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(state->dev);
+ struct ast_device *ast = to_ast_device(state->dev);
/*
* Concurrent operations could possibly trigger a call to
@@ -1743,7 +1754,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
@@ -1764,7 +1775,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int ast_mode_config_init(struct ast_private *ast)
+int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
int ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 82fd3c8adee1..71bb36b865fd 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -39,7 +39,7 @@ static void ast_post_chip_2500(struct drm_device *dev);
void ast_enable_vga(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
@@ -47,7 +47,7 @@ void ast_enable_vga(struct drm_device *dev)
void ast_enable_mmio(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
@@ -55,7 +55,7 @@ void ast_enable_mmio(struct drm_device *dev)
bool ast_is_vga_enabled(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
@@ -70,7 +70,7 @@ static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -110,7 +110,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_private *ast, u32 r)
+u32 ast_mindwm(struct ast_device *ast, u32 r)
{
uint32_t data;
@@ -123,7 +123,7 @@ u32 ast_mindwm(struct ast_private *ast, u32 r)
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
@@ -162,7 +162,7 @@ static const u32 pattern_AST2150[14] = {
0x20F050E0
};
-static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -192,7 +192,7 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
}
#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -212,7 +212,7 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
}
#endif
-static int cbrtest_ast2150(struct ast_private *ast)
+static int cbrtest_ast2150(struct ast_device *ast)
{
int i;
@@ -222,7 +222,7 @@ static int cbrtest_ast2150(struct ast_private *ast)
return 1;
}
-static int cbrscan_ast2150(struct ast_private *ast, int busw)
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
{
u32 patcnt, loop;
@@ -239,7 +239,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
}
-static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
@@ -273,7 +273,7 @@ cbr_start:
static void ast_init_dram_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
@@ -366,7 +366,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
@@ -449,7 +449,7 @@ static const u32 pattern[8] = {
0x7C61D253
};
-static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -469,7 +469,7 @@ static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -490,32 +490,32 @@ static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
}
-static bool mmc_test_burst(struct ast_private *ast, u32 datagen)
+static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x41);
}
-static bool mmc_test_single(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc5);
}
-static u32 mmc_test_single2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x05);
}
-static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0x85);
}
-static int cbr_test(struct ast_private *ast)
+static int cbr_test(struct ast_device *ast)
{
u32 data;
int i;
@@ -534,7 +534,7 @@ static int cbr_test(struct ast_private *ast)
return 1;
}
-static int cbr_scan(struct ast_private *ast)
+static int cbr_scan(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -555,7 +555,7 @@ static int cbr_scan(struct ast_private *ast)
return data2;
}
-static u32 cbr_test2(struct ast_private *ast)
+static u32 cbr_test2(struct ast_device *ast)
{
u32 data;
@@ -569,7 +569,7 @@ static u32 cbr_test2(struct ast_private *ast)
return ~data & 0xffff;
}
-static u32 cbr_scan2(struct ast_private *ast)
+static u32 cbr_scan2(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -590,7 +590,7 @@ static u32 cbr_scan2(struct ast_private *ast)
return data2;
}
-static bool cbr_test3(struct ast_private *ast)
+static bool cbr_test3(struct ast_device *ast)
{
if (!mmc_test_burst(ast, 0))
return false;
@@ -599,7 +599,7 @@ static bool cbr_test3(struct ast_private *ast)
return true;
}
-static bool cbr_scan3(struct ast_private *ast)
+static bool cbr_scan3(struct ast_device *ast)
{
u32 patcnt, loop;
@@ -615,7 +615,7 @@ static bool cbr_scan3(struct ast_private *ast)
return true;
}
-static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
bool status = false;
@@ -714,7 +714,7 @@ FINETUNE_DONE:
return status;
} /* finetuneDQI_L */
-static void finetuneDQSI(struct ast_private *ast)
+static void finetuneDQSI(struct ast_device *ast)
{
u32 dlli, dqsip, dqidly;
u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
@@ -804,7 +804,7 @@ static void finetuneDQSI(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
}
-static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
bool status = false;
@@ -860,7 +860,7 @@ CBR_DONE2:
return status;
} /* CBRDLL2 */
-static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1102,7 +1102,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
-static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1225,7 +1225,7 @@ ddr3_init_start:
}
-static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1472,7 +1472,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
}
-static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1600,7 +1600,7 @@ ddr2_init_start:
static void ast_post_chip_2300(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
@@ -1681,7 +1681,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
} while ((reg & 0x40) == 0);
}
-static bool cbr_test_2500(struct ast_private *ast)
+static bool cbr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1692,7 +1692,7 @@ static bool cbr_test_2500(struct ast_private *ast)
return true;
}
-static bool ddr_test_2500(struct ast_private *ast)
+static bool ddr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1709,7 +1709,7 @@ static bool ddr_test_2500(struct ast_private *ast)
return true;
}
-static void ddr_init_common_2500(struct ast_private *ast)
+static void ddr_init_common_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
@@ -1732,7 +1732,7 @@ static void ddr_init_common_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
}
-static void ddr_phy_init_2500(struct ast_private *ast)
+static void ddr_phy_init_2500(struct ast_device *ast)
{
u32 data, pass, timecnt;
@@ -1766,7 +1766,7 @@ static void ddr_phy_init_2500(struct ast_private *ast)
* 4Gb : 0x80000000 ~ 0x9FFFFFFF
* 8Gb : 0x80000000 ~ 0xBFFFFFFF
*/
-static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
{
u32 reg_04, reg_14;
@@ -1797,7 +1797,7 @@ static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
ast_moutdwm(ast, 0x1E6E0014, reg_14);
}
-static void enable_cache_2500(struct ast_private *ast)
+static void enable_cache_2500(struct ast_device *ast)
{
u32 reg_04, data;
@@ -1810,7 +1810,7 @@ static void enable_cache_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
}
-static void set_mpll_2500(struct ast_private *ast)
+static void set_mpll_2500(struct ast_device *ast)
{
u32 addr, data, param;
@@ -1837,7 +1837,7 @@ static void set_mpll_2500(struct ast_private *ast)
udelay(100);
}
-static void reset_mmc_2500(struct ast_private *ast)
+static void reset_mmc_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E78505C, 0x00000004);
ast_moutdwm(ast, 0x1E785044, 0x00000001);
@@ -1848,7 +1848,7 @@ static void reset_mmc_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
}
-static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
@@ -1892,7 +1892,7 @@ static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
u32 data, data2, pass, retrycnt;
u32 ddr_vref, phy_vref;
@@ -2002,7 +2002,7 @@ static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static bool ast_dram_init_2500(struct ast_private *ast)
+static bool ast_dram_init_2500(struct ast_device *ast)
{
u32 data;
u32 max_tries = 5;
@@ -2030,7 +2030,7 @@ static bool ast_dram_init_2500(struct ast_private *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_private *ast)
+void ast_patch_ahb_2500(struct ast_device *ast)
{
u32 data;
@@ -2066,7 +2066,7 @@ void ast_patch_ahb_2500(struct ast_private *ast)
void ast_post_chip_2500(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 temp;
u8 reg;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index a2bb5b916235..29603561d501 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -760,7 +760,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(ddev, 24);
+ drm_fbdev_dma_setup(ddev, 24);
return 0;
@@ -784,7 +784,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -815,10 +814,10 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
return drm_atomic_helper_resume(drm_dev, dc->suspend.state);
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
- atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
+ atmel_hlcdc_dc_drm_suspend,
+ atmel_hlcdc_dc_drm_resume);
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
@@ -830,7 +829,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
- .pm = &atmel_hlcdc_dc_drm_pm_ops,
+ .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 57946d80b02d..f076a09afac0 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -15,17 +15,6 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
-config DRM_CDNS_DSI
- tristate "Cadence DPI/DSI bridge"
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
- select GENERIC_PHY_MIPI_DPHY
- depends on OF
- help
- Support Cadence DPI to DSI bridge. This is an internal
- bridge and is meant to be directly embedded in a SoC.
-
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
@@ -231,6 +220,18 @@ config DRM_PARADE_PS8640
The PS8640 is a high-performance and low-power
MIPI DSI to eDP converter
+config DRM_SAMSUNG_DSIM
+ tristate "Samsung MIPI DSIM bridge driver"
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ help
+ The Samsung MIPI DSIM bridge controller driver.
+ This MIPI DSIM bridge can be found it on Exynos SoCs and
+ NXP's i.MX8M Mini/Nano.
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -337,7 +338,7 @@ config DRM_TI_DLPC3433
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
- It supports upto 720p resolution with 60 and 120 Hz refresh
+ It supports up to 720p resolution with 60 and 120 Hz refresh
rates.
config DRM_TI_TFP410
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 1884803c6860..2b892b7ed59e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
@@ -15,6 +14,7 @@ obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
+obj-$(CONFIG_DRM_SAMSUNG_DSIM) += samsung-dsim.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index e7a6e456ed0d..ddceafa7b637 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1185,8 +1185,9 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+static int adv7511_probe(struct i2c_client *i2c)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
@@ -1392,7 +1393,7 @@ static struct i2c_driver adv7511_driver = {
.of_match_table = adv7511_of_ids,
},
.id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
+ .probe_new = adv7511_probe,
.remove = adv7511_remove,
};
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index fdfeadcefe80..7e3e56441aed 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode)
{
- int lanes;
+ unsigned long max_lane_freq;
struct mipi_dsi_device *dsi = adv->dsi;
+ u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- if (mode->clock > 80000)
- lanes = 4;
- else
- lanes = 3;
-
- /*
- * TODO: add support for dynamic switching of lanes
- * by using the bridge pre_enable() op . Till then filter
- * out the modes which shall need different number of lanes
- * than what was configured in the device tree.
- */
- if (lanes != dsi->lanes)
- return MODE_BAD;
+ /* Check max clock for either 7533 or 7535 */
+ if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
+ return MODE_CLOCK_HIGH;
+
+ /* Check max clock for each lane */
+ max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
+
+ if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
+ return MODE_CLOCK_HIGH;
return MODE_OK;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 660a54857929..3577c532abb4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -22,7 +22,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -692,8 +691,7 @@ static bool anx6345_get_chip_id(struct anx6345 *anx6345)
return false;
}
-static int anx6345_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx6345_i2c_probe(struct i2c_client *client)
{
struct anx6345 *anx6345;
struct device *dev;
@@ -817,7 +815,7 @@ static struct i2c_driver anx6345_driver = {
.name = "anx6345",
.of_match_table = of_match_ptr(anx6345_match_table),
},
- .probe = anx6345_i2c_probe,
+ .probe_new = anx6345_i2c_probe,
.remove = anx6345_i2c_remove,
.id_table = anx6345_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 5997049fde5b..a3a38bbe2786 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1214,8 +1214,7 @@ static const u16 anx78xx_chipid_list[] = {
0x7818,
};
-static int anx78xx_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx78xx_i2c_probe(struct i2c_client *client)
{
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
@@ -1390,7 +1389,7 @@ static struct i2c_driver anx78xx_driver = {
.name = "anx7814",
.of_match_table = of_match_ptr(anx78xx_match_table),
},
- .probe = anx78xx_i2c_probe,
+ .probe_new = anx78xx_i2c_probe,
.remove = anx78xx_i2c_remove,
.id_table = anx78xx_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index b0ff1ecb80a5..6846199a2ee1 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -26,7 +26,6 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1403,7 +1402,6 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
{
ctx->hpd_status = 0;
ctx->hpd_high_cnt = 0;
- ctx->display_timing_valid = 0;
}
static void anx7625_start_dp_work(struct anx7625_data *ctx)
@@ -2562,8 +2560,7 @@ static void anx7625_runtime_disable(void *data)
pm_runtime_disable(data);
}
-static int anx7625_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx7625_i2c_probe(struct i2c_client *client)
{
struct anx7625_data *platform;
struct anx7625_platform_data *pdata;
@@ -2756,7 +2753,7 @@ static struct i2c_driver anx7625_driver = {
.of_match_table = anx_match_table,
.pm = &anx7625_pm_ops,
},
- .probe = anx7625_i2c_probe,
+ .probe_new = anx7625_i2c_probe,
.remove = anx7625_i2c_remove,
.id_table = anx7625_id,
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index 1d06182bea71..ec35215a2003 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -1,4 +1,25 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CDNS_DSI
+ tristate "Cadence DPI/DSI bridge"
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ depends on OF
+ help
+ Support Cadence DPI to DSI bridge. This is an internal
+ bridge and is meant to be directly embedded in a SoC.
+
+if DRM_CDNS_DSI
+
+config DRM_CDNS_DSI_J721E
+ bool "J721E Cadence DSI wrapper support"
+ default y
+ help
+ Support J721E Cadence DSI wrapper. The wrapper manages
+ the routing of the DSS DPI signal to the Cadence DSI.
+endif
+
config DRM_CDNS_MHDP8546
tristate "Cadence DPI/DP bridge"
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
index 4d2db8df1bc6..c95fd5b81d13 100644
--- a/drivers/gpu/drm/bridge/cadence/Makefile
+++ b/drivers/gpu/drm/bridge/cadence/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+cdns-dsi-y := cdns-dsi-core.o
+cdns-dsi-$(CONFIG_DRM_CDNS_DSI_J721E) += cdns-dsi-j721e.o
obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
cdns-mhdp8546-y := cdns-mhdp8546-core.o cdns-mhdp8546-hdcp.o
cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index 20bece84ff8c..f50d65f54314 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -6,10 +6,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
@@ -18,14 +15,19 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
+#include "cdns-dsi-core.h"
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+#include "cdns-dsi-j721e.h"
+#endif
+
#define IP_CONF 0x0
#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
@@ -424,48 +426,6 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
-struct cdns_dsi_output {
- struct mipi_dsi_device *dev;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
- union phy_configure_opts phy_opts;
-};
-
-enum cdns_dsi_input_id {
- CDNS_SDI_INPUT,
- CDNS_DPI_INPUT,
- CDNS_DSC_INPUT,
-};
-
-struct cdns_dsi_cfg {
- unsigned int hfp;
- unsigned int hsa;
- unsigned int hbp;
- unsigned int hact;
- unsigned int htotal;
-};
-
-struct cdns_dsi_input {
- enum cdns_dsi_input_id id;
- struct drm_bridge bridge;
-};
-
-struct cdns_dsi {
- struct mipi_dsi_host base;
- void __iomem *regs;
- struct cdns_dsi_input input;
- struct cdns_dsi_output output;
- unsigned int direct_cmd_fifo_depth;
- unsigned int rx_fifo_depth;
- struct completion direct_cmd_comp;
- struct clk *dsi_p_clk;
- struct reset_control *dsi_p_rst;
- struct clk *dsi_sys_clk;
- bool link_initialized;
- bool phy_initialized;
- struct phy *dphy;
-};
-
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -709,6 +669,10 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
writel(val, dsi->regs + MCTL_MAIN_EN);
+
+ if (dsi->platform_ops && dsi->platform_ops->disable)
+ dsi->platform_ops->disable(dsi);
+
pm_runtime_put(dsi->base.dev);
}
@@ -804,6 +768,9 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ if (dsi->platform_ops && dsi->platform_ops->enable)
+ dsi->platform_ops->enable(dsi);
+
mode = &bridge->encoder->crtc->state->adjusted_mode;
nlanes = output->dev->lanes;
@@ -1244,6 +1211,8 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
goto err_disable_pclk;
}
+ dsi->platform_ops = of_device_get_match_data(&pdev->dev);
+
val = readl(dsi->regs + IP_CONF);
dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
@@ -1279,14 +1248,27 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
dsi->base.dev = &pdev->dev;
dsi->base.ops = &cdns_dsi_ops;
+ if (dsi->platform_ops && dsi->platform_ops->init) {
+ ret = dsi->platform_ops->init(dsi);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "platform initialization failed: %d\n",
+ ret);
+ goto err_disable_runtime_pm;
+ }
+ }
+
ret = mipi_dsi_host_register(&dsi->base);
if (ret)
- goto err_disable_runtime_pm;
+ goto err_deinit_platform;
clk_disable_unprepare(dsi->dsi_p_clk);
return 0;
+err_deinit_platform:
+ if (dsi->platform_ops && dsi->platform_ops->deinit)
+ dsi->platform_ops->deinit(dsi);
+
err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
@@ -1296,25 +1278,30 @@ err_disable_pclk:
return ret;
}
-static int cdns_dsi_drm_remove(struct platform_device *pdev)
+static void cdns_dsi_drm_remove(struct platform_device *pdev)
{
struct cdns_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->base);
- pm_runtime_disable(&pdev->dev);
- return 0;
+ if (dsi->platform_ops && dsi->platform_ops->deinit)
+ dsi->platform_ops->deinit(dsi);
+
+ pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id cdns_dsi_of_match[] = {
{ .compatible = "cdns,dsi" },
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ { .compatible = "ti,j721e-dsi", .data = &dsi_ti_j721e_ops, },
+#endif
{ },
};
MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
static struct platform_driver cdns_dsi_platform_driver = {
.probe = cdns_dsi_drm_probe,
- .remove = cdns_dsi_drm_remove,
+ .remove_new = cdns_dsi_drm_remove,
.driver = {
.name = "cdns-dsi",
.of_match_table = cdns_dsi_of_match,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
new file mode 100644
index 000000000000..ca7ea2da635c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright: 2017 Cadence Design Systems, Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __CDNS_DSI_H__
+#define __CDNS_DSI_H__
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/phy/phy.h>
+
+struct clk;
+struct reset_control;
+
+struct cdns_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ union phy_configure_opts phy_opts;
+};
+
+enum cdns_dsi_input_id {
+ CDNS_SDI_INPUT,
+ CDNS_DPI_INPUT,
+ CDNS_DSC_INPUT,
+};
+
+struct cdns_dsi_cfg {
+ unsigned int hfp;
+ unsigned int hsa;
+ unsigned int hbp;
+ unsigned int hact;
+ unsigned int htotal;
+};
+
+struct cdns_dsi_input {
+ enum cdns_dsi_input_id id;
+ struct drm_bridge bridge;
+};
+
+struct cdns_dsi;
+
+/**
+ * struct cdns_dsi_platform_ops - CDNS DSI Platform operations
+ * @init: Called in the CDNS DSI probe
+ * @deinit: Called in the CDNS DSI remove
+ * @enable: Called at the beginning of CDNS DSI bridge enable
+ * @disable: Called at the end of CDNS DSI bridge disable
+ */
+struct cdns_dsi_platform_ops {
+ int (*init)(struct cdns_dsi *dsi);
+ void (*deinit)(struct cdns_dsi *dsi);
+ void (*enable)(struct cdns_dsi *dsi);
+ void (*disable)(struct cdns_dsi *dsi);
+};
+
+struct cdns_dsi {
+ struct mipi_dsi_host base;
+ void __iomem *regs;
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ void __iomem *j721e_regs;
+#endif
+ const struct cdns_dsi_platform_ops *platform_ops;
+ struct cdns_dsi_input input;
+ struct cdns_dsi_output output;
+ unsigned int direct_cmd_fifo_depth;
+ unsigned int rx_fifo_depth;
+ struct completion direct_cmd_comp;
+ struct clk *dsi_p_clk;
+ struct reset_control *dsi_p_rst;
+ struct clk *dsi_sys_clk;
+ bool link_initialized;
+ bool phy_initialized;
+ struct phy *dphy;
+};
+
+#endif /* !__CDNS_DSI_H__ */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
new file mode 100644
index 000000000000..b654d4b3cb5c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "cdns-dsi-j721e.h"
+
+#define DSI_WRAP_REVISION 0x0
+#define DSI_WRAP_DPI_CONTROL 0x4
+#define DSI_WRAP_DSC_CONTROL 0x8
+#define DSI_WRAP_DPI_SECURE 0xc
+#define DSI_WRAP_DSI_0_ASF_STATUS 0x10
+
+#define DSI_WRAP_DPI_0_EN BIT(0)
+#define DSI_WRAP_DSI2_MUX_SEL BIT(4)
+
+static int cdns_dsi_j721e_init(struct cdns_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->base.dev);
+
+ dsi->j721e_regs = devm_platform_ioremap_resource(pdev, 1);
+ return PTR_ERR_OR_ZERO(dsi->j721e_regs);
+}
+
+static void cdns_dsi_j721e_enable(struct cdns_dsi *dsi)
+{
+ /*
+ * Enable DPI0 as its input. DSS0 DPI2 is connected
+ * to DSI DPI0. This is the only supported configuration on
+ * J721E.
+ */
+ writel(DSI_WRAP_DPI_0_EN, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+static void cdns_dsi_j721e_disable(struct cdns_dsi *dsi)
+{
+ /* Put everything to defaults */
+ writel(0, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+const struct cdns_dsi_platform_ops dsi_ti_j721e_ops = {
+ .init = cdns_dsi_j721e_init,
+ .enable = cdns_dsi_j721e_enable,
+ .disable = cdns_dsi_j721e_disable,
+};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
new file mode 100644
index 000000000000..275e5e8e7583
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#ifndef __CDNS_DSI_J721E_H__
+#define __CDNS_DSI_J721E_H__
+
+#include "cdns-dsi-core.h"
+
+extern const struct cdns_dsi_platform_ops dsi_ti_j721e_ops;
+
+#endif /* !__CDNS_DSI_J721E_H__ */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 31442a922502..f6822dfa3805 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -43,7 +43,6 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index bf920c3503aa..0e37840cd7a8 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -740,8 +740,7 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int chipone_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int chipone_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct chipone *icn;
@@ -796,7 +795,7 @@ static struct i2c_device_id chipone_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, chipone_i2c_id);
static struct i2c_driver chipone_i2c_driver = {
- .probe = chipone_i2c_probe,
+ .probe_new = chipone_i2c_probe,
.id_table = chipone_i2c_id,
.driver = {
.name = "chipone-icn6211-i2c",
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index b94f39a86846..339b759e4c81 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -528,8 +528,7 @@ static const struct regmap_config ch7033_regmap_config = {
.max_register = 0x7f,
};
-static int ch7033_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ch7033_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv;
@@ -604,7 +603,7 @@ static const struct i2c_device_id ch7033_ids[] = {
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
static struct i2c_driver ch7033_driver = {
- .probe = ch7033_probe,
+ .probe_new = ch7033_probe,
.remove = ch7033_remove,
.driver = {
.name = "ch7033",
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 9a12449ad7b8..56ae511367b1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -271,12 +271,9 @@ static int display_connector_probe(struct platform_device *pdev)
type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
- if (IS_ERR(conn->hpd_gpio)) {
- if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Unable to retrieve HPD GPIO\n");
- return PTR_ERR(conn->hpd_gpio);
- }
+ if (IS_ERR(conn->hpd_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(conn->hpd_gpio),
+ "Unable to retrieve HPD GPIO\n");
conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
} else {
@@ -382,7 +379,7 @@ static int display_connector_probe(struct platform_device *pdev)
return 0;
}
-static int display_connector_remove(struct platform_device *pdev)
+static void display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
@@ -396,8 +393,6 @@ static int display_connector_remove(struct platform_device *pdev)
if (!IS_ERR(conn->bridge.ddc))
i2c_put_adapter(conn->bridge.ddc);
-
- return 0;
}
static const struct of_device_id display_connector_match[] = {
@@ -426,7 +421,7 @@ MODULE_DEVICE_TABLE(of, display_connector_match);
static struct platform_driver display_connector_driver = {
.probe = display_connector_probe,
- .remove = display_connector_remove,
+ .remove_new = display_connector_remove,
.driver = {
.name = "display-connector",
.of_match_table = display_connector_match,
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index f9e0f8d99268..682623369498 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -18,7 +18,6 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-#define LDB_CTRL 0x5c
#define LDB_CTRL_CH0_ENABLE BIT(0)
#define LDB_CTRL_CH0_DI_SELECT BIT(1)
#define LDB_CTRL_CH1_ENABLE BIT(2)
@@ -35,9 +34,13 @@
#define LDB_CTRL_ASYNC_FIFO_ENABLE BIT(24)
#define LDB_CTRL_ASYNC_FIFO_THRESHOLD_MASK GENMASK(27, 25)
-#define LVDS_CTRL 0x128
#define LVDS_CTRL_CH0_EN BIT(0)
#define LVDS_CTRL_CH1_EN BIT(1)
+/*
+ * LVDS_CTRL_LVDS_EN bit is poorly named in i.MX93 reference manual.
+ * Clear it to enable LVDS and set it to disable LVDS.
+ */
+#define LVDS_CTRL_LVDS_EN BIT(1)
#define LVDS_CTRL_VBG_EN BIT(2)
#define LVDS_CTRL_HS_EN BIT(3)
#define LVDS_CTRL_PRE_EMPH_EN BIT(4)
@@ -52,20 +55,58 @@
#define LVDS_CTRL_VBG_ADJ(n) (((n) & 0x7) << 17)
#define LVDS_CTRL_VBG_ADJ_MASK GENMASK(19, 17)
+enum fsl_ldb_devtype {
+ IMX8MP_LDB,
+ IMX93_LDB,
+};
+
+struct fsl_ldb_devdata {
+ u32 ldb_ctrl;
+ u32 lvds_ctrl;
+ bool lvds_en_bit;
+};
+
+static const struct fsl_ldb_devdata fsl_ldb_devdata[] = {
+ [IMX8MP_LDB] = {
+ .ldb_ctrl = 0x5c,
+ .lvds_ctrl = 0x128,
+ },
+ [IMX93_LDB] = {
+ .ldb_ctrl = 0x20,
+ .lvds_ctrl = 0x24,
+ .lvds_en_bit = true,
+ },
+};
+
struct fsl_ldb {
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
struct clk *clk;
struct regmap *regmap;
- bool lvds_dual_link;
+ const struct fsl_ldb_devdata *devdata;
+ bool ch0_enabled;
+ bool ch1_enabled;
};
+static bool fsl_ldb_is_dual(const struct fsl_ldb *fsl_ldb)
+{
+ return (fsl_ldb->ch0_enabled && fsl_ldb->ch1_enabled);
+}
+
static inline struct fsl_ldb *to_fsl_ldb(struct drm_bridge *bridge)
{
return container_of(bridge, struct fsl_ldb, bridge);
}
+static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
+{
+ if (fsl_ldb_is_dual(fsl_ldb))
+ return clock * 3500;
+ else
+ return clock * 7000;
+}
+
static int fsl_ldb_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -85,6 +126,8 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
+ unsigned long configured_link_freq;
+ unsigned long requested_link_freq;
bool lvds_format_24bpp;
bool lvds_format_jeida;
u32 reg;
@@ -128,51 +171,48 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
mode = &crtc_state->adjusted_mode;
- if (fsl_ldb->lvds_dual_link)
- clk_set_rate(fsl_ldb->clk, mode->clock * 3500);
- else
- clk_set_rate(fsl_ldb->clk, mode->clock * 7000);
+ requested_link_freq = fsl_ldb_link_frequency(fsl_ldb, mode->clock);
+ clk_set_rate(fsl_ldb->clk, requested_link_freq);
+
+ configured_link_freq = clk_get_rate(fsl_ldb->clk);
+ if (configured_link_freq != requested_link_freq)
+ dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ configured_link_freq,
+ requested_link_freq);
+
clk_prepare_enable(fsl_ldb->clk);
/* Program LDB_CTRL */
- reg = LDB_CTRL_CH0_ENABLE;
+ reg = (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_ENABLE : 0) |
+ (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_ENABLE : 0) |
+ (fsl_ldb_is_dual(fsl_ldb) ? LDB_CTRL_SPLIT_MODE : 0);
- if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_CH1_ENABLE | LDB_CTRL_SPLIT_MODE;
+ if (lvds_format_24bpp)
+ reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_DATA_WIDTH : 0) |
+ (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_DATA_WIDTH : 0);
- if (lvds_format_24bpp) {
- reg |= LDB_CTRL_CH0_DATA_WIDTH;
- if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_CH1_DATA_WIDTH;
- }
+ if (lvds_format_jeida)
+ reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_BIT_MAPPING : 0) |
+ (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_BIT_MAPPING : 0);
- if (lvds_format_jeida) {
- reg |= LDB_CTRL_CH0_BIT_MAPPING;
- if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_CH1_BIT_MAPPING;
- }
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_DI0_VSYNC_POLARITY : 0) |
+ (fsl_ldb->ch1_enabled ? LDB_CTRL_DI1_VSYNC_POLARITY : 0);
- if (mode->flags & DRM_MODE_FLAG_PVSYNC) {
- reg |= LDB_CTRL_DI0_VSYNC_POLARITY;
- if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_DI1_VSYNC_POLARITY;
- }
-
- regmap_write(fsl_ldb->regmap, LDB_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, reg);
/* Program LVDS_CTRL */
reg = LVDS_CTRL_CC_ADJ(2) | LVDS_CTRL_PRE_EMPH_EN |
LVDS_CTRL_PRE_EMPH_ADJ(3) | LVDS_CTRL_VBG_EN;
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg);
/* Wait for VBG to stabilize. */
usleep_range(15, 20);
- reg |= LVDS_CTRL_CH0_EN;
- if (fsl_ldb->lvds_dual_link)
- reg |= LVDS_CTRL_CH1_EN;
+ reg |= (fsl_ldb->ch0_enabled ? LVDS_CTRL_CH0_EN : 0) |
+ (fsl_ldb->ch1_enabled ? LVDS_CTRL_CH1_EN : 0);
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg);
}
static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
@@ -180,9 +220,14 @@ static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- /* Stop both channels. */
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, 0);
- regmap_write(fsl_ldb->regmap, LDB_CTRL, 0);
+ /* Stop channel(s). */
+ if (fsl_ldb->devdata->lvds_en_bit)
+ /* Set LVDS_CTRL_LVDS_EN bit to disable. */
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl,
+ LVDS_CTRL_LVDS_EN);
+ else
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, 0);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, 0);
clk_disable_unprepare(fsl_ldb->clk);
}
@@ -218,7 +263,7 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- if (mode->clock > (fsl_ldb->lvds_dual_link ? 160000 : 80000))
+ if (mode->clock > (fsl_ldb_is_dual(fsl_ldb) ? 160000 : 80000))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -239,7 +284,7 @@ static int fsl_ldb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node;
- struct device_node *port1, *port2;
+ struct device_node *remote1, *remote2;
struct drm_panel *panel;
struct fsl_ldb *fsl_ldb;
int dual_link;
@@ -248,6 +293,10 @@ static int fsl_ldb_probe(struct platform_device *pdev)
if (!fsl_ldb)
return -ENOMEM;
+ fsl_ldb->devdata = of_device_get_match_data(dev);
+ if (!fsl_ldb->devdata)
+ return -EINVAL;
+
fsl_ldb->dev = &pdev->dev;
fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
@@ -260,10 +309,23 @@ static int fsl_ldb_probe(struct platform_device *pdev)
if (IS_ERR(fsl_ldb->regmap))
return PTR_ERR(fsl_ldb->regmap);
- /* Locate the panel DT node. */
- panel_node = of_graph_get_remote_node(dev->of_node, 1, 0);
- if (!panel_node)
- return -ENXIO;
+ /* Locate the remote ports and the panel node */
+ remote1 = of_graph_get_remote_node(dev->of_node, 1, 0);
+ remote2 = of_graph_get_remote_node(dev->of_node, 2, 0);
+ fsl_ldb->ch0_enabled = (remote1 != NULL);
+ fsl_ldb->ch1_enabled = (remote2 != NULL);
+ panel_node = of_node_get(remote1 ? remote1 : remote2);
+ of_node_put(remote1);
+ of_node_put(remote2);
+
+ if (!fsl_ldb->ch0_enabled && !fsl_ldb->ch1_enabled) {
+ of_node_put(panel_node);
+ return dev_err_probe(dev, -ENXIO, "No panel node found");
+ }
+
+ dev_dbg(dev, "Using %s\n",
+ fsl_ldb_is_dual(fsl_ldb) ? "dual-link mode" :
+ fsl_ldb->ch0_enabled ? "channel 0" : "channel 1");
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
@@ -274,20 +336,26 @@ static int fsl_ldb_probe(struct platform_device *pdev)
if (IS_ERR(fsl_ldb->panel_bridge))
return PTR_ERR(fsl_ldb->panel_bridge);
- /* Determine whether this is dual-link configuration */
- port1 = of_graph_get_port_by_id(dev->of_node, 1);
- port2 = of_graph_get_port_by_id(dev->of_node, 2);
- dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2);
- of_node_put(port1);
- of_node_put(port2);
- if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
- dev_err(dev, "LVDS channel pixel swap not supported.\n");
- return -EINVAL;
- }
+ if (fsl_ldb_is_dual(fsl_ldb)) {
+ struct device_node *port1, *port2;
+
+ port1 = of_graph_get_port_by_id(dev->of_node, 1);
+ port2 = of_graph_get_port_by_id(dev->of_node, 2);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2);
+ of_node_put(port1);
+ of_node_put(port2);
- if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS)
- fsl_ldb->lvds_dual_link = true;
+ if (dual_link < 0)
+ return dev_err_probe(dev, dual_link,
+ "Error getting dual link configuration\n");
+
+ /* Only DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS is supported */
+ if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+ dev_err(dev, "LVDS channel pixel swap not supported.\n");
+ return -EINVAL;
+ }
+ }
platform_set_drvdata(pdev, fsl_ldb);
@@ -296,24 +364,25 @@ static int fsl_ldb_probe(struct platform_device *pdev)
return 0;
}
-static int fsl_ldb_remove(struct platform_device *pdev)
+static void fsl_ldb_remove(struct platform_device *pdev)
{
struct fsl_ldb *fsl_ldb = platform_get_drvdata(pdev);
drm_bridge_remove(&fsl_ldb->bridge);
-
- return 0;
}
static const struct of_device_id fsl_ldb_match[] = {
- { .compatible = "fsl,imx8mp-ldb", },
+ { .compatible = "fsl,imx8mp-ldb",
+ .data = &fsl_ldb_devdata[IMX8MP_LDB], },
+ { .compatible = "fsl,imx93-ldb",
+ .data = &fsl_ldb_devdata[IMX93_LDB], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, fsl_ldb_match);
static struct platform_driver fsl_ldb_driver = {
.probe = fsl_ldb_probe,
- .remove = fsl_ldb_remove,
+ .remove_new = fsl_ldb_remove,
.driver = {
.name = "fsl-ldb",
.of_match_table = fsl_ldb_match,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
index 178af8d2d80b..386032a02599 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
@@ -532,7 +532,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qm_ldb_remove(struct platform_device *pdev)
+static void imx8qm_ldb_remove(struct platform_device *pdev)
{
struct imx8qm_ldb *imx8qm_ldb = platform_get_drvdata(pdev);
struct ldb *ldb = &imx8qm_ldb->base;
@@ -540,8 +540,6 @@ static int imx8qm_ldb_remove(struct platform_device *pdev)
ldb_remove_bridge_helper(ldb);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev)
@@ -573,7 +571,7 @@ MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids);
static struct platform_driver imx8qm_ldb_driver = {
.probe = imx8qm_ldb_probe,
- .remove = imx8qm_ldb_remove,
+ .remove_new = imx8qm_ldb_remove,
.driver = {
.pm = &imx8qm_ldb_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
index 63948d5d20fd..c806576b1e22 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
@@ -667,7 +667,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_ldb_remove(struct platform_device *pdev)
+static void imx8qxp_ldb_remove(struct platform_device *pdev)
{
struct imx8qxp_ldb *imx8qxp_ldb = platform_get_drvdata(pdev);
struct ldb *ldb = &imx8qxp_ldb->base;
@@ -675,8 +675,6 @@ static int imx8qxp_ldb_remove(struct platform_device *pdev)
ldb_remove_bridge_helper(ldb);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev)
@@ -708,7 +706,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids);
static struct platform_driver imx8qxp_ldb_driver = {
.probe = imx8qxp_ldb_probe,
- .remove = imx8qxp_ldb_remove,
+ .remove_new = imx8qxp_ldb_remove,
.driver = {
.pm = &imx8qxp_ldb_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 503bd8db8afe..d0868a6ac6c9 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -357,7 +357,7 @@ free_child:
return ret;
}
-static int imx8qxp_pc_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
struct imx8qxp_pc_channel *ch;
@@ -374,8 +374,6 @@ static int imx8qxp_pc_bridge_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev)
@@ -435,7 +433,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids);
static struct platform_driver imx8qxp_pc_bridge_driver = {
.probe = imx8qxp_pc_bridge_probe,
- .remove = imx8qxp_pc_bridge_remove,
+ .remove_new = imx8qxp_pc_bridge_remove,
.driver = {
.pm = &imx8qxp_pc_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 9e5f2b4dc2e5..ed8b7a4e0e11 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -313,7 +313,7 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
}
/* specially select the next bridge with companion PXL2DPI */
- if (of_find_property(remote, "fsl,companion-pxl2dpi", NULL))
+ if (of_property_present(remote, "fsl,companion-pxl2dpi"))
bridge_sel = ep_cnt;
ep_cnt++;
@@ -398,13 +398,11 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pixel_link *pl = platform_get_drvdata(pdev);
drm_bridge_remove(&pl->bridge);
-
- return 0;
}
static const struct of_device_id imx8qxp_pixel_link_dt_ids[] = {
@@ -416,7 +414,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids);
static struct platform_driver imx8qxp_pixel_link_bridge_driver = {
.probe = imx8qxp_pixel_link_bridge_probe,
- .remove = imx8qxp_pixel_link_bridge_remove,
+ .remove_new = imx8qxp_pixel_link_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pixel_link_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index d0fec82f0cf8..4a886cb808ca 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -455,15 +455,13 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pxl2dpi *p2d = platform_get_drvdata(pdev);
drm_bridge_remove(&p2d->bridge);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id imx8qxp_pxl2dpi_dt_ids[] = {
@@ -474,7 +472,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids);
static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = {
.probe = imx8qxp_pxl2dpi_bridge_probe,
- .remove = imx8qxp_pxl2dpi_bridge_remove,
+ .remove_new = imx8qxp_pxl2dpi_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pxl2dpi_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 21a9b8422bda..abaf6e23775e 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -26,7 +26,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -259,12 +258,12 @@
#define REG_AUD_INFOFRAM_SUM 0xFB
/* the following six registers are in bank1 */
-#define REG_DRV_0_DB_800_MV 0x7E
-#define REG_PRE_0_DB_800_MV 0x7F
-#define REG_PRE_3P5_DB_800_MV 0x81
-#define REG_SSC_CTRL0 0x88
-#define REG_SSC_CTRL1 0x89
-#define REG_SSC_CTRL2 0x8A
+#define REG_DRV_0_DB_800_MV 0x17E
+#define REG_PRE_0_DB_800_MV 0x17F
+#define REG_PRE_3P5_DB_800_MV 0x181
+#define REG_SSC_CTRL0 0x188
+#define REG_SSC_CTRL1 0x189
+#define REG_SSC_CTRL2 0x18A
#define RBR DP_LINK_BW_1_62
#define HBR DP_LINK_BW_2_7
@@ -412,7 +411,6 @@ struct it6505 {
* Mutex protects extcon and interrupt functions from interfering
* each other.
*/
- struct mutex irq_lock;
struct mutex extcon_lock;
struct mutex mode_lock; /* used to bridge_detect */
struct mutex aux_lock; /* used to aux data transfers */
@@ -438,6 +436,8 @@ struct it6505 {
bool powered;
bool hpd_state;
u32 afe_setting;
+ u32 max_dpi_pixel_clock;
+ u32 max_lane_count;
enum hdcp_state hdcp_status;
struct delayed_work hdcp_work;
struct work_struct hdcp_wait_ksv_list;
@@ -457,6 +457,8 @@ struct it6505 {
/* it6505 driver hold option */
bool enable_drv_hold;
+
+ struct edid *cached_edid;
};
struct it6505_step_train_para {
@@ -487,7 +489,7 @@ static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = {
};
static const struct regmap_range it6505_bridge_volatile_ranges[] = {
- { .range_min = 0, .range_max = 0xFF },
+ { .range_min = 0, .range_max = 0x1FF },
};
static const struct regmap_access_table it6505_bridge_volatile_table = {
@@ -495,11 +497,27 @@ static const struct regmap_access_table it6505_bridge_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges),
};
+static const struct regmap_range_cfg it6505_regmap_banks[] = {
+ {
+ .name = "it6505",
+ .range_min = 0x00,
+ .range_max = 0x1FF,
+ .selector_reg = REG_BANK_SEL,
+ .selector_mask = 0x1,
+ .selector_shift = 0,
+ .window_start = 0x00,
+ .window_len = 0x100,
+ },
+};
+
static const struct regmap_config it6505_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &it6505_bridge_volatile_table,
.cache_type = REGCACHE_NONE,
+ .ranges = it6505_regmap_banks,
+ .num_ranges = ARRAY_SIZE(it6505_regmap_banks),
+ .max_register = 0x1FF,
};
static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
@@ -1265,7 +1283,6 @@ static void it6505_init(struct it6505 *it6505)
it6505_write(it6505, REG_TIME_STMP_CTRL,
EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP);
it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00);
- it6505_write(it6505, REG_BANK_SEL, 0x01);
it6505_write(it6505, REG_DRV_0_DB_800_MV,
afe_setting_table[it6505->afe_setting][0]);
it6505_write(it6505, REG_PRE_0_DB_800_MV,
@@ -1275,7 +1292,6 @@ static void it6505_init(struct it6505 *it6505)
it6505_write(it6505, REG_SSC_CTRL0, 0x9E);
it6505_write(it6505, REG_SSC_CTRL1, 0x1C);
it6505_write(it6505, REG_SSC_CTRL2, 0x42);
- it6505_write(it6505, REG_BANK_SEL, 0x00);
}
static void it6505_video_disable(struct it6505 *it6505)
@@ -1463,7 +1479,8 @@ static void it6505_parse_link_capabilities(struct it6505 *it6505)
it6505->lane_count = link->num_lanes;
DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training",
it6505->lane_count);
- it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT);
+ it6505->lane_count = min_t(int, it6505->lane_count,
+ it6505->max_lane_count);
it6505->branch_device = drm_dp_is_branch(it6505->dpcd);
DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device",
@@ -1503,11 +1520,9 @@ static void it6505_setup_ssc(struct it6505 *it6505)
it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5,
it6505->enable_ssc ? SPREAD_AMP_5 : 0x00);
if (it6505->enable_ssc) {
- it6505_write(it6505, REG_BANK_SEL, 0x01);
it6505_write(it6505, REG_SSC_CTRL0, 0x9E);
it6505_write(it6505, REG_SSC_CTRL1, 0x1C);
it6505_write(it6505, REG_SSC_CTRL2, 0x42);
- it6505_write(it6505, REG_BANK_SEL, 0x00);
it6505_write(it6505, REG_SP_CTRL0, 0x07);
it6505_write(it6505, REG_IP_CTRL1, 0x29);
it6505_write(it6505, REG_IP_CTRL2, 0x03);
@@ -2244,6 +2259,12 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
status == connector_status_connected);
}
+static void it6505_remove_edid(struct it6505 *it6505)
+{
+ kfree(it6505->cached_edid);
+ it6505->cached_edid = NULL;
+}
+
static int it6505_process_hpd_irq(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
@@ -2270,6 +2291,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
it6505_reset_logic(it6505);
it6505_int_mask_enable(it6505);
it6505_init(it6505);
+ it6505_remove_edid(it6505);
return 0;
}
@@ -2353,6 +2375,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
it6505_video_reset(it6505);
} else {
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ it6505_remove_edid(it6505);
if (it6505->hdcp_desired)
it6505_stop_hdcp(it6505);
@@ -2494,10 +2517,8 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- mutex_lock(&it6505->irq_lock);
-
- if (it6505->enable_drv_hold || !it6505->powered)
- goto unlock;
+ if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
+ return IRQ_HANDLED;
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
@@ -2515,16 +2536,14 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status))
irq_vec[0].handler(it6505);
- if (!it6505->hpd_state)
- goto unlock;
-
- for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
- if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
- irq_vec[i].handler(it6505);
+ if (it6505->hpd_state) {
+ for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
+ if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+ irq_vec[i].handler(it6505);
+ }
}
-unlock:
- mutex_unlock(&it6505->irq_lock);
+ pm_runtime_put_sync(dev);
return IRQ_HANDLED;
}
@@ -2902,7 +2921,7 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- if (mode->clock > DPI_PIXEL_CLK_MAX)
+ if (mode->clock > it6505->max_dpi_pixel_clock)
return MODE_CLOCK_HIGH;
it6505->video_info.clock = mode->clock;
@@ -3016,16 +3035,18 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = &it6505->client->dev;
- struct edid *edid;
- edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505);
+ if (!it6505->cached_edid) {
+ it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
+ it6505);
- if (!edid) {
- DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
- return NULL;
+ if (!it6505->cached_edid) {
+ DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
+ return NULL;
+ }
}
- return edid;
+ return drm_edid_duplicate(it6505->cached_edid);
}
static const struct drm_bridge_funcs it6505_bridge_funcs = {
@@ -3089,10 +3110,32 @@ static int it6505_init_pdata(struct it6505 *it6505)
return 0;
}
+static int it6505_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min,
+ const unsigned int max)
+{
+ int ret;
+
+ ret = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (ret < 0)
+ return ret;
+
+ if (ret < min || ret > max)
+ return -EINVAL;
+
+ return ret;
+}
+
static void it6505_parse_dt(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
+ struct device_node *np = dev->of_node, *ep = NULL;
+ int len;
+ u64 link_frequencies;
+ u32 data_lanes[4];
u32 *afe_setting = &it6505->afe_setting;
+ u32 *max_lane_count = &it6505->max_lane_count;
+ u32 *max_dpi_pixel_clock = &it6505->max_dpi_pixel_clock;
it6505->lane_swap_disabled =
device_property_read_bool(dev, "no-laneswap");
@@ -3108,7 +3151,56 @@ static void it6505_parse_dt(struct it6505 *it6505)
} else {
*afe_setting = 0;
}
- DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting);
+
+ ep = of_graph_get_endpoint_by_regs(np, 1, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = it6505_get_data_lanes_count(ep, 1, 4);
+
+ if (len > 0 && len != 3) {
+ of_property_read_u32_array(ep, "data-lanes",
+ data_lanes, len);
+ *max_lane_count = len;
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error data-lanes, use default");
+ }
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error endpoint, use default");
+ }
+
+ ep = of_graph_get_endpoint_by_regs(np, 0, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = of_property_read_variable_u64_array(ep,
+ "link-frequencies",
+ &link_frequencies, 0,
+ 1);
+ if (len >= 0) {
+ do_div(link_frequencies, 1000);
+ if (link_frequencies > 297000) {
+ dev_err(dev,
+ "max pixel clock error, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ } else {
+ *max_dpi_pixel_clock = link_frequencies;
+ }
+ } else {
+ dev_err(dev, "error link frequencies, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+ } else {
+ dev_err(dev, "error endpoint, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %u, max_lane_count: %u",
+ it6505->afe_setting, it6505->max_lane_count);
+ DRM_DEV_DEBUG_DRIVER(dev, "using max_dpi_pixel_clock: %u kHz",
+ it6505->max_dpi_pixel_clock);
}
static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
@@ -3265,8 +3357,7 @@ static void it6505_shutdown(struct i2c_client *client)
it6505_lane_off(it6505);
}
-static int it6505_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int it6505_i2c_probe(struct i2c_client *client)
{
struct it6505 *it6505;
struct device *dev = &client->dev;
@@ -3277,7 +3368,6 @@ static int it6505_i2c_probe(struct i2c_client *client,
if (!it6505)
return -ENOMEM;
- mutex_init(&it6505->irq_lock);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
mutex_init(&it6505->aux_lock);
@@ -3367,6 +3457,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
+ it6505_remove_edid(it6505);
}
static const struct i2c_device_id it6505_id[] = {
@@ -3387,7 +3478,7 @@ static struct i2c_driver it6505_i2c_driver = {
.of_match_table = it6505_of_match,
.pm = &it6505_bridge_pm_ops,
},
- .probe = it6505_i2c_probe,
+ .probe_new = it6505_i2c_probe,
.remove = it6505_i2c_remove,
.shutdown = it6505_shutdown,
.id_table = it6505_id,
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 4f6f1deba28c..a2d723d6a4be 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -22,7 +22,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
@@ -35,10 +34,6 @@
#define IT66121_DEVICE_ID0_REG 0x02
#define IT66121_DEVICE_ID1_REG 0x03
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
#define IT66121_REVISION_MASK GENMASK(7, 4)
#define IT66121_DEVICE_ID1_MASK GENMASK(3, 0)
@@ -72,6 +67,7 @@
#define IT66121_AFE_XP_ENO BIT(4)
#define IT66121_AFE_XP_RESETB BIT(3)
#define IT66121_AFE_XP_PWDI BIT(2)
+#define IT6610_AFE_XP_BYPASS BIT(0)
#define IT66121_AFE_IP_REG 0x64
#define IT66121_AFE_IP_GAINBIT BIT(7)
@@ -286,13 +282,18 @@
#define IT66121_AUD_SWL_16BIT 0x2
#define IT66121_AUD_SWL_NOT_INDICATED 0x0
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
-#define IT66121_DEVICE_MASK 0x0F
#define IT66121_AFE_CLK_HIGH 80000 /* Khz */
+enum chip_id {
+ ID_IT6610,
+ ID_IT66121,
+};
+
+struct it66121_chip_info {
+ enum chip_id id;
+ u16 vid, pid;
+};
+
struct it66121_ctx {
struct regmap *regmap;
struct drm_bridge bridge;
@@ -301,7 +302,6 @@ struct it66121_ctx {
struct device *dev;
struct gpio_desc *gpio_reset;
struct i2c_client *client;
- struct regulator_bulk_data supplies[3];
u32 bus_width;
struct mutex lock; /* Protects fields below and device registers */
struct hdmi_avi_infoframe hdmi_avi_infoframe;
@@ -312,6 +312,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
+ const struct it66121_chip_info *info;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@@ -342,16 +343,6 @@ static void it66121_hw_reset(struct it66121_ctx *ctx)
gpiod_set_value(ctx->gpio_reset, 0);
}
-static inline int ite66121_power_on(struct it66121_ctx *ctx)
-{
- return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static inline int ite66121_power_off(struct it66121_ctx *ctx)
-{
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
static inline int it66121_preamble_ddc(struct it66121_ctx *ctx)
{
return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG, IT66121_MASTER_SEL_HOST);
@@ -406,16 +397,22 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_ER0,
IT66121_AFE_IP_GAINBIT);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK, 0x80);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK, 0x80);
+ if (ret)
+ return ret;
+ }
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT66121_AFE_XP_GAINBIT |
@@ -426,17 +423,24 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1, IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1);
+ IT66121_AFE_IP_ER0,
+ IT66121_AFE_IP_ER0);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK,
- IT66121_AFE_XP_EC1_LOWCLK);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_EC1);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK,
+ IT66121_AFE_XP_EC1_LOWCLK);
+ if (ret)
+ return ret;
+ }
}
/* Clear reset flags */
@@ -445,38 +449,36 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
+ if (ctx->info->id == ID_IT6610) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT6610_AFE_XP_BYPASS,
+ IT6610_AFE_XP_BYPASS);
+ if (ret)
+ return ret;
+ }
+
return it66121_fire_afe(ctx);
}
static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx)
{
int ret, val;
- u32 busy = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
- IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 error = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
+ IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 done = IT66121_DDC_STATUS_TX_DONE;
- ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val, true,
- IT66121_EDID_SLEEP_US, IT66121_EDID_TIMEOUT_US);
+ ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val,
+ val & (error | done), IT66121_EDID_SLEEP_US,
+ IT66121_EDID_TIMEOUT_US);
if (ret)
return ret;
- if (val & busy)
+ if (val & error)
return -EAGAIN;
return 0;
}
-static int it66121_clear_ddc_fifo(struct it66121_ctx *ctx)
-{
- int ret;
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- return regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
- IT66121_DDC_COMMAND_FIFO_CLR);
-}
-
static int it66121_abort_ddc_ops(struct it66121_ctx *ctx)
{
int ret;
@@ -516,7 +518,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
unsigned int block, size_t len)
{
struct it66121_ctx *ctx = context;
- unsigned int val;
int remain = len;
int offset = 0;
int ret, cnt;
@@ -524,26 +525,9 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset = (block % 2) * len;
block = block / 2;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_clear_ddc_fifo(ctx);
- if (ret)
- return ret;
-
while (remain > 0) {
cnt = (remain > IT66121_EDID_FIFO_SIZE) ?
IT66121_EDID_FIFO_SIZE : remain;
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
IT66121_DDC_COMMAND_FIFO_CLR);
@@ -554,25 +538,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
if (ret)
return ret;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
- IT66121_DDC_HEADER_EDID);
- if (ret)
- return ret;
-
ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset);
if (ret)
return ret;
@@ -593,20 +558,18 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset += cnt;
remain -= cnt;
- /* Per programming manual, sleep here before emptying the FIFO */
- msleep(20);
-
ret = it66121_wait_ddc_ready(ctx);
+ if (ret) {
+ it66121_abort_ddc_ops(ctx);
+ return ret;
+ }
+
+ ret = regmap_noinc_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG,
+ buf, cnt);
if (ret)
return ret;
- do {
- ret = regmap_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG, &val);
- if (ret)
- return ret;
- *(buf++) = val;
- cnt--;
- } while (cnt > 0);
+ buf += cnt;
}
return 0;
@@ -635,10 +598,12 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_RCLK, 0);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_RCLK, 0);
+ if (ret)
+ return ret;
+ }
ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG,
IT66121_INT_TX_CLK_OFF, 0);
@@ -684,11 +649,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
/* Per programming manual, sleep here for bridge to settle */
msleep(50);
- /* Start interrupts */
- return regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
- IT66121_INT_MASK1_DDC_NOACK |
- IT66121_INT_MASK1_DDC_FIFOERR |
- IT66121_INT_MASK1_DDC_BUSHANG, 0);
+ return 0;
}
static int it66121_set_mute(struct it66121_ctx *ctx, bool mute)
@@ -780,29 +741,32 @@ static void it66121_bridge_disable(struct drm_bridge *bridge,
ctx->connector = NULL;
}
+static int it66121_bridge_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+
+ if (ctx->info->id == ID_IT6610) {
+ /* The IT6610 only supports these settings */
+ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ bridge_state->input_bus_cfg.flags &=
+ ~DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ }
+
+ return 0;
+}
+
static
void it66121_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- int ret, i;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- const u16 aviinfo_reg[HDMI_AVI_INFOFRAME_SIZE] = {
- IT66121_AVIINFO_DB1_REG,
- IT66121_AVIINFO_DB2_REG,
- IT66121_AVIINFO_DB3_REG,
- IT66121_AVIINFO_DB4_REG,
- IT66121_AVIINFO_DB5_REG,
- IT66121_AVIINFO_DB6_REG,
- IT66121_AVIINFO_DB7_REG,
- IT66121_AVIINFO_DB8_REG,
- IT66121_AVIINFO_DB9_REG,
- IT66121_AVIINFO_DB10_REG,
- IT66121_AVIINFO_DB11_REG,
- IT66121_AVIINFO_DB12_REG,
- IT66121_AVIINFO_DB13_REG
- };
+ int ret;
mutex_lock(&ctx->lock);
@@ -822,10 +786,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
}
/* Write new AVI infoframe packet */
- for (i = 0; i < HDMI_AVI_INFOFRAME_SIZE; i++) {
- if (regmap_write(ctx->regmap, aviinfo_reg[i], buf[i + HDMI_INFOFRAME_HEADER_SIZE]))
- goto unlock;
- }
+ ret = regmap_bulk_write(ctx->regmap, IT66121_AVIINFO_DB1_REG,
+ &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_INFOFRAME_SIZE);
+ if (ret)
+ goto unlock;
+
if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3]))
goto unlock;
@@ -838,9 +804,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
- if (regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK))
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK,
+ IT66121_CLK_BANK_PWROFF_TXCLK)) {
goto unlock;
+ }
if (it66121_configure_input(ctx))
goto unlock;
@@ -848,7 +817,11 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
- regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0);
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
+ goto unlock;
+ }
unlock:
mutex_unlock(&ctx->lock);
@@ -906,9 +879,25 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
struct edid *edid;
+ int ret;
mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+
+out_unlock:
mutex_unlock(&ctx->lock);
return edid;
@@ -923,6 +912,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = {
.atomic_get_input_bus_fmts = it66121_bridge_atomic_get_input_bus_fmts,
.atomic_enable = it66121_bridge_enable,
.atomic_disable = it66121_bridge_disable,
+ .atomic_check = it66121_bridge_check,
.mode_set = it66121_bridge_mode_set,
.mode_valid = it66121_bridge_mode_valid,
.detect = it66121_bridge_detect,
@@ -952,21 +942,14 @@ static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
if (ret) {
dev_err(dev, "Cannot read STATUS1_REG %d\n", ret);
- } else {
- if (val & IT66121_INT_STATUS1_DDC_FIFOERR)
- it66121_clear_ddc_fifo(ctx);
- if (val & (IT66121_INT_STATUS1_DDC_BUSHANG |
- IT66121_INT_STATUS1_DDC_NOACK))
- it66121_abort_ddc_ops(ctx);
- if (val & IT66121_INT_STATUS1_HPD_STATUS) {
- regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
- IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
+ } else if (val & IT66121_INT_STATUS1_HPD_STATUS) {
+ regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
+ IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
- status = it66121_is_hpd_detect(ctx) ? connector_status_connected
- : connector_status_disconnected;
+ status = it66121_is_hpd_detect(ctx) ? connector_status_connected
+ : connector_status_disconnected;
- event = true;
- }
+ event = true;
}
regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG,
@@ -1512,9 +1495,13 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
return PTR_ERR_OR_ZERO(ctx->audio.pdev);
}
-static int it66121_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const char * const it66121_supplies[] = {
+ "vcn33", "vcn18", "vrf12"
+};
+
+static int it66121_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
@@ -1536,6 +1523,7 @@ static int it66121_probe(struct i2c_client *client,
ctx->dev = dev;
ctx->client = client;
+ ctx->info = (const struct it66121_chip_info *) id->driver_data;
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@@ -1565,26 +1553,18 @@ static int it66121_probe(struct i2c_client *client,
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
- ctx->supplies[0].supply = "vcn33";
- ctx->supplies[1].supply = "vcn18";
- ctx->supplies[2].supply = "vrf12";
- ret = devm_regulator_bulk_get(ctx->dev, 3, ctx->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it66121_supplies),
+ it66121_supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ dev_err(dev, "Failed to enable power supplies\n");
return ret;
}
- ret = ite66121_power_on(ctx);
- if (ret)
- return ret;
-
it66121_hw_reset(ctx);
ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
- if (IS_ERR(ctx->regmap)) {
- ite66121_power_off(ctx);
+ if (IS_ERR(ctx->regmap))
return PTR_ERR(ctx->regmap);
- }
regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]);
regmap_read(ctx->regmap, IT66121_VENDOR_ID1_REG, &vendor_ids[1]);
@@ -1595,9 +1575,8 @@ static int it66121_probe(struct i2c_client *client,
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
- if (vendor_ids[0] != IT66121_VENDOR_ID0 || vendor_ids[1] != IT66121_VENDOR_ID1 ||
- device_ids[0] != IT66121_DEVICE_ID0 || device_ids[1] != IT66121_DEVICE_ID1) {
- ite66121_power_off(ctx);
+ if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
+ (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
return -ENODEV;
}
@@ -1610,7 +1589,6 @@ static int it66121_probe(struct i2c_client *client,
IRQF_ONESHOT, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
- ite66121_power_off(ctx);
return ret;
}
@@ -1627,19 +1605,32 @@ static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
- ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
}
static const struct of_device_id it66121_dt_match[] = {
{ .compatible = "ite,it66121" },
+ { .compatible = "ite,it6610" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
+static const struct it66121_chip_info it66121_chip_info = {
+ .id = ID_IT66121,
+ .vid = 0x4954,
+ .pid = 0x0612,
+};
+
+static const struct it66121_chip_info it6610_chip_info = {
+ .id = ID_IT6610,
+ .vid = 0xca00,
+ .pid = 0x0611,
+};
+
static const struct i2c_device_id it66121_id[] = {
- { "it66121", 0 },
+ { "it66121", (kernel_ulong_t) &it66121_chip_info },
+ { "it6610", (kernel_ulong_t) &it6610_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);
@@ -1649,7 +1640,7 @@ static struct i2c_driver it66121_driver = {
.name = "it66121",
.of_match_table = it66121_dt_match,
},
- .probe = it66121_probe,
+ .probe_new = it66121_probe,
.remove = it66121_remove,
.id_table = it66121_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index a98efef0ba0e..13c131ade268 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -504,7 +504,6 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
- MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM |
MIPI_DSI_MODE_NO_EOT_PACKET;
@@ -517,14 +516,27 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
return 0;
}
+static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status)
+{
+ struct lt8912 *lt = data;
+
+ if (lt->bridge.dev)
+ drm_helper_hpd_irq_event(lt->bridge.dev);
+}
+
static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
{
int ret;
struct lt8912 *lt = bridge_to_lt8912(bridge);
struct drm_connector *connector = &lt->connector;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
ret = drm_connector_init(bridge->dev, connector,
&lt8912_connector_funcs,
@@ -578,6 +590,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
if (lt->is_attached) {
lt8912_hard_power_off(lt);
+
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(lt->hdmi_port);
+
drm_connector_unregister(&lt->connector);
drm_connector_cleanup(&lt->connector);
}
@@ -659,8 +675,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
lt->hdmi_port = of_drm_find_bridge(port_node);
if (!lt->hdmi_port) {
- dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
- ret = -ENODEV;
+ ret = -EPROBE_DEFER;
+ dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
goto err_free_host_node;
}
@@ -685,8 +701,7 @@ static int lt8912_put_dt(struct lt8912 *lt)
return 0;
}
-static int lt8912_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt8912_probe(struct i2c_client *client)
{
static struct lt8912 *lt;
int ret = 0;
@@ -758,7 +773,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
},
- .probe = lt8912_probe,
+ .probe_new = lt8912_probe,
.remove = lt8912_remove,
.id_table = lt8912_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 933ca028d612..3e19fff6547a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -720,8 +720,7 @@ static int lt9211_host_attach(struct lt9211 *ctx)
return 0;
}
-static int lt9211_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9211_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lt9211 *ctx;
@@ -786,7 +785,7 @@ static const struct of_device_id lt9211_match_table[] = {
MODULE_DEVICE_TABLE(of, lt9211_match_table);
static struct i2c_driver lt9211_driver = {
- .probe = lt9211_probe,
+ .probe_new = lt9211_probe,
.remove = lt9211_remove,
.id_table = lt9211_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 7c0a99173b39..a25d21a7d5c1 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -33,7 +34,7 @@
struct lt9611 {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_bridge *next_bridge;
struct regmap *regmap;
@@ -58,7 +59,6 @@ struct lt9611 {
enum drm_connector_status status;
u8 edid_buf[EDID_SEG_SIZE];
- u32 vic;
};
#define LT9611_PAGE_CONTROL 0xff
@@ -84,34 +84,11 @@ static const struct regmap_config lt9611_regmap_config = {
.num_ranges = ARRAY_SIZE(lt9611_ranges),
};
-struct lt9611_mode {
- u16 hdisplay;
- u16 vdisplay;
- u8 vrefresh;
- u8 lanes;
- u8 intfs;
-};
-
-static struct lt9611_mode lt9611_modes[] = {
- { 3840, 2160, 30, 4, 2 }, /* 3840x2160 24bit 30Hz 4Lane 2ports */
- { 1920, 1080, 60, 4, 1 }, /* 1080P 24bit 60Hz 4lane 1port */
- { 1920, 1080, 30, 3, 1 }, /* 1080P 24bit 30Hz 3lane 1port */
- { 1920, 1080, 24, 3, 1 },
- { 720, 480, 60, 4, 1 },
- { 720, 576, 50, 2, 1 },
- { 640, 480, 60, 2, 1 },
-};
-
static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge)
{
return container_of(bridge, struct lt9611, bridge);
}
-static struct lt9611 *connector_to_lt9611(struct drm_connector *connector)
-{
- return container_of(connector, struct lt9611, connector);
-}
-
static int lt9611_mipi_input_analog(struct lt9611 *lt9611)
{
const struct reg_sequence reg_cfg[] = {
@@ -141,7 +118,7 @@ static int lt9611_mipi_input_digital(struct lt9611 *lt9611,
{ 0x8306, 0x0a },
};
- if (mode->hdisplay == 3840)
+ if (lt9611->dsi1_node)
reg_cfg[1].def = 0x03;
return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
@@ -159,12 +136,12 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
hactive = mode->hdisplay;
hsync_len = mode->hsync_end - mode->hsync_start;
hfront_porch = mode->hsync_start - mode->hdisplay;
- hsync_porch = hsync_len + mode->htotal - mode->hsync_end;
+ hsync_porch = mode->htotal - mode->hsync_start;
vactive = mode->vdisplay;
vsync_len = mode->vsync_end - mode->vsync_start;
vfront_porch = mode->vsync_start - mode->vdisplay;
- vsync_porch = vsync_len + mode->vtotal - mode->vsync_end;
+ vsync_porch = mode->vtotal - mode->vsync_start;
regmap_write(lt9611->regmap, 0x830d, (u8)(v_total / 256));
regmap_write(lt9611->regmap, 0x830e, (u8)(v_total % 256));
@@ -187,12 +164,14 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256));
- regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256));
+ regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) |
+ ((hfront_porch / 256) << 4));
regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256));
}
-static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv)
{
+ unsigned int pcr_m = mode->clock * 5 * postdiv / 27000;
const struct reg_sequence reg_cfg[] = {
{ 0x830b, 0x01 },
{ 0x830c, 0x10 },
@@ -207,45 +186,40 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
/* stage 2 */
{ 0x834a, 0x40 },
- { 0x831d, 0x10 },
/* MK limit */
{ 0x832d, 0x38 },
{ 0x8331, 0x08 },
};
- const struct reg_sequence reg_cfg2[] = {
- { 0x830b, 0x03 },
- { 0x830c, 0xd0 },
- { 0x8348, 0x03 },
- { 0x8349, 0xe0 },
- { 0x8324, 0x72 },
- { 0x8325, 0x00 },
- { 0x832a, 0x01 },
- { 0x834a, 0x10 },
- { 0x831d, 0x10 },
- { 0x8326, 0x37 },
- };
+ u8 pol = 0x10;
- regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ pol |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ pol |= 0x1;
+ regmap_write(lt9611->regmap, 0x831d, pol);
- switch (mode->hdisplay) {
- case 640:
- regmap_write(lt9611->regmap, 0x8326, 0x14);
- break;
- case 1920:
- regmap_write(lt9611->regmap, 0x8326, 0x37);
- break;
- case 3840:
- regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2));
- break;
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ if (lt9611->dsi1_node) {
+ unsigned int hact = mode->hdisplay;
+
+ hact >>= 2;
+ hact += 0x50;
+ hact = min(hact, 0x3e0U);
+ regmap_write(lt9611->regmap, 0x830b, hact / 256);
+ regmap_write(lt9611->regmap, 0x830c, hact % 256);
+ regmap_write(lt9611->regmap, 0x8348, hact / 256);
+ regmap_write(lt9611->regmap, 0x8349, hact % 256);
}
+ regmap_write(lt9611->regmap, 0x8326, pcr_m);
+
/* pcr rst */
regmap_write(lt9611->regmap, 0x8011, 0x5a);
regmap_write(lt9611->regmap, 0x8011, 0xfa);
}
-static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv)
{
unsigned int pclk = mode->clock;
const struct reg_sequence reg_cfg[] = {
@@ -259,16 +233,21 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
{ 0x8126, 0x55 },
{ 0x8127, 0x66 },
{ 0x8128, 0x88 },
+ { 0x812a, 0x20 },
};
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
- if (pclk > 150000)
+ if (pclk > 150000) {
regmap_write(lt9611->regmap, 0x812d, 0x88);
- else if (pclk > 70000)
+ *postdiv = 1;
+ } else if (pclk > 70000) {
regmap_write(lt9611->regmap, 0x812d, 0x99);
- else
+ *postdiv = 2;
+ } else {
regmap_write(lt9611->regmap, 0x812d, 0xaa);
+ *postdiv = 4;
+ }
/*
* first divide pclk by 2 first
@@ -353,13 +332,55 @@ end:
return temp;
}
-static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611)
+static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- regmap_write(lt9611->regmap, 0x8443, 0x46 - lt9611->vic);
- regmap_write(lt9611->regmap, 0x8447, lt9611->vic);
- regmap_write(lt9611->regmap, 0x843d, 0x0a); /* UD1 infoframe */
+ union hdmi_infoframe infoframe;
+ ssize_t len;
+ u8 iframes = 0x0a; /* UD1 infoframe */
+ u8 buf[32];
+ int ret;
+ int i;
- regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi,
+ connector,
+ mode);
+ if (ret < 0)
+ goto out;
+
+ len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
+ if (len < 0)
+ goto out;
+
+ for (i = 0; i < len; i++)
+ regmap_write(lt9611->regmap, 0x8440 + i, buf[i]);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
+ connector,
+ mode);
+ if (ret < 0)
+ goto out;
+
+ len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
+ if (len < 0)
+ goto out;
+
+ for (i = 0; i < len; i++)
+ regmap_write(lt9611->regmap, 0x8474 + i, buf[i]);
+
+ iframes |= 0x20;
+
+out:
+ regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */
+}
+
+static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi)
+{
+ if (is_hdmi)
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ else
+ regmap_write(lt9611->regmap, 0x82d6, 0x0c);
regmap_write(lt9611->regmap, 0x82d7, 0x04);
}
@@ -448,12 +469,11 @@ static void lt9611_sleep_setup(struct lt9611 *lt9611)
{ 0x8023, 0x01 },
{ 0x8157, 0x03 }, /* set addr pin as output */
{ 0x8149, 0x0b },
- { 0x8151, 0x30 }, /* disable IRQ */
+
{ 0x8102, 0x48 }, /* MIPI Rx power down */
{ 0x8123, 0x80 },
{ 0x8130, 0x00 },
- { 0x8100, 0x01 }, /* bandgap power down */
- { 0x8101, 0x00 }, /* system clk power down */
+ { 0x8011, 0x0a },
};
regmap_multi_reg_write(lt9611->regmap,
@@ -564,24 +584,9 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static struct lt9611_mode *lt9611_find_mode(const struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lt9611_modes); i++) {
- if (lt9611_modes[i].hdisplay == mode->hdisplay &&
- lt9611_modes[i].vdisplay == mode->vdisplay &&
- lt9611_modes[i].vrefresh == drm_mode_vrefresh(mode)) {
- return &lt9611_modes[i];
- }
- }
-
- return NULL;
-}
-
-/* connector funcs */
-static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
+static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
int connected = 0;
@@ -594,12 +599,6 @@ static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
return lt9611->status;
}
-static enum drm_connector_status
-lt9611_connector_detect(struct drm_connector *connector, bool force)
-{
- return __lt9611_detect(connector_to_lt9611(connector));
-}
-
static int lt9611_read_edid(struct lt9611 *lt9611)
{
unsigned int temp;
@@ -681,36 +680,37 @@ lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static int lt9611_connector_get_modes(struct drm_connector *connector)
-{
- struct lt9611 *lt9611 = connector_to_lt9611(connector);
- unsigned int count;
- struct edid *edid;
-
- lt9611_power_on(lt9611);
- edid = drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-}
-
-static enum drm_mode_status
-lt9611_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
-
- return lt9611_mode ? MODE_OK : MODE_BAD;
-}
-
/* bridge funcs */
static void
lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ unsigned int postdiv;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ lt9611_mipi_input_digital(lt9611, mode);
+ lt9611_pll_setup(lt9611, mode, &postdiv);
+ lt9611_mipi_video_setup(lt9611, mode);
+ lt9611_pcr_setup(lt9611, mode, postdiv);
if (lt9611_power_on(lt9611)) {
dev_err(lt9611->dev, "power on failed\n");
@@ -718,7 +718,8 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
}
lt9611_mipi_input_analog(lt9611);
- lt9611_hdmi_tx_digital(lt9611);
+ lt9611_hdmi_set_infoframes(lt9611, connector, mode);
+ lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi);
lt9611_hdmi_tx_phy(lt9611);
msleep(500);
@@ -749,25 +750,10 @@ lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
}
}
-static struct
-drm_connector_helper_funcs lt9611_bridge_connector_helper_funcs = {
- .get_modes = lt9611_connector_get_modes,
- .mode_valid = lt9611_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs lt9611_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = lt9611_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
struct device_node *dsi_node)
{
- const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
+ const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node};
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
struct device *dev = lt9611->dev;
@@ -799,70 +785,54 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
return dsi;
}
-static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611)
-{
- int ret;
-
- ret = drm_connector_init(bridge->dev, &lt9611->connector,
- &lt9611_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(&lt9611->connector,
- &lt9611_bridge_connector_helper_funcs);
-
- if (!bridge->encoder) {
- DRM_ERROR("Parent encoder object not found");
- return -ENODEV;
- }
-
- drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
-
- return 0;
-}
-
static int lt9611_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- int ret;
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt9611_connector_init(bridge, lt9611);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ bridge, flags);
}
static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- if (!lt9611_mode)
- return MODE_BAD;
- else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
+ if (mode->hdisplay > 3840)
+ return MODE_BAD_HVALUE;
+
+ if (mode->vdisplay > 2160)
+ return MODE_BAD_VVALUE;
+
+ if (mode->hdisplay == 3840 &&
+ mode->vdisplay == 2160 &&
+ drm_mode_vrefresh(mode) > 30)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
else
return MODE_OK;
}
-static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ static const struct reg_sequence reg_cfg[] = {
+ { 0x8102, 0x12 },
+ { 0x8123, 0x40 },
+ { 0x8130, 0xea },
+ { 0x8011, 0xfa },
+ };
if (!lt9611->sleep)
return;
- lt9611_reset(lt9611);
- regmap_write(lt9611->regmap, 0x80ee, 0x01);
+ regmap_multi_reg_write(lt9611->regmap,
+ reg_cfg, ARRAY_SIZE(reg_cfg));
lt9611->sleep = false;
}
@@ -876,33 +846,6 @@ lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
lt9611_sleep_setup(lt9611);
}
-static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
-{
- struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- struct hdmi_avi_infoframe avi_frame;
- int ret;
-
- lt9611_bridge_pre_enable(bridge);
-
- lt9611_mipi_input_digital(lt9611, mode);
- lt9611_pll_setup(lt9611, mode);
- lt9611_mipi_video_setup(lt9611, mode);
- lt9611_pcr_setup(lt9611, mode);
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
- &lt9611->connector,
- mode);
- if (!ret)
- lt9611->vic = avi_frame.video_code;
-}
-
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
-{
- return __lt9611_detect(bridge_to_lt9611(bridge));
-}
-
static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -948,11 +891,11 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
- .mode_set = lt9611_bridge_mode_set,
.detect = lt9611_bridge_detect,
.get_edid = lt9611_bridge_get_edid,
.hpd_enable = lt9611_bridge_hpd_enable,
+ .atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
.atomic_enable = lt9611_bridge_atomic_enable,
.atomic_disable = lt9611_bridge_atomic_disable,
.atomic_post_disable = lt9611_bridge_atomic_post_disable,
@@ -975,7 +918,7 @@ static int lt9611_parse_dt(struct device *dev,
lt9611->ac_mode = of_property_read_bool(dev->of_node, "lt,ac-mode");
- return 0;
+ return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611->next_bridge);
}
static int lt9611_gpio_init(struct lt9611 *lt9611)
@@ -1108,8 +1051,7 @@ static void lt9611_audio_exit(struct lt9611 *lt9611)
}
}
-static int lt9611_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
struct device *dev = &client->dev;
@@ -1248,7 +1190,7 @@ static struct i2c_driver lt9611_driver = {
.name = "lt9611",
.of_match_table = lt9611_match_table,
},
- .probe = lt9611_probe,
+ .probe_new = lt9611_probe,
.remove = lt9611_remove,
.id_table = lt9611_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fa1ee6264d92..583daacf3705 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -844,8 +844,7 @@ static const struct attribute_group *lt9611uxc_attr_groups[] = {
NULL,
};
-static int lt9611uxc_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611uxc_probe(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc;
struct device *dev = &client->dev;
@@ -1012,7 +1011,7 @@ static struct i2c_driver lt9611uxc_driver = {
.of_match_table = lt9611uxc_match_table,
.dev_groups = lt9611uxc_attr_groups,
},
- .probe = lt9611uxc_probe,
+ .probe_new = lt9611uxc_probe,
.remove = lt9611uxc_remove,
.id_table = lt9611uxc_id,
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 39e7004de720..67368f23d4aa 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -215,13 +215,11 @@ static int lvds_codec_probe(struct platform_device *pdev)
return 0;
}
-static int lvds_codec_remove(struct platform_device *pdev)
+static void lvds_codec_remove(struct platform_device *pdev)
{
struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
drm_bridge_remove(&lvds_codec->bridge);
-
- return 0;
}
static const struct of_device_id lvds_codec_match[] = {
@@ -243,7 +241,7 @@ MODULE_DEVICE_TABLE(of, lvds_codec_match);
static struct platform_driver lvds_codec_driver = {
.probe = lvds_codec_probe,
- .remove = lvds_codec_remove,
+ .remove_new = lvds_codec_remove,
.driver = {
.name = "lvds-codec",
.of_match_table = lvds_codec_match,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 97359f807bfc..4fc494d9084b 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -336,8 +336,7 @@ static int ge_b850v3_register(void)
"ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr);
}
-static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
- const struct i2c_device_id *id)
+static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c)
{
struct device *dev = &stdp4028_i2c->dev;
int ret;
@@ -376,7 +375,7 @@ MODULE_DEVICE_TABLE(of, stdp4028_ge_b850v3_fw_match);
static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
.id_table = stdp4028_ge_b850v3_fw_i2c_table,
- .probe = stdp4028_ge_b850v3_fw_probe,
+ .probe_new = stdp4028_ge_b850v3_fw_probe,
.remove = stdp4028_ge_b850v3_fw_remove,
.driver = {
.name = "stdp4028-ge-b850v3-fw",
@@ -384,8 +383,7 @@ static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
},
};
-static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
- const struct i2c_device_id *id)
+static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c)
{
struct device *dev = &stdp2690_i2c->dev;
int ret;
@@ -424,7 +422,7 @@ MODULE_DEVICE_TABLE(of, stdp2690_ge_b850v3_fw_match);
static struct i2c_driver stdp2690_ge_b850v3_fw_driver = {
.id_table = stdp2690_ge_b850v3_fw_i2c_table,
- .probe = stdp2690_ge_b850v3_fw_probe,
+ .probe_new = stdp2690_ge_b850v3_fw_probe,
.remove = stdp2690_ge_b850v3_fw_remove,
.driver = {
.name = "stdp2690-ge-b850v3-fw",
@@ -440,7 +438,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret)
return ret;
- return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ if (ret)
+ i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
+
+ return ret;
}
module_init(stdpxxxx_ge_b850v3_init);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 6dc2a4e191d7..4a5f5c4f5dcc 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1199,7 +1199,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
return 0;
}
-static int nwl_dsi_remove(struct platform_device *pdev)
+static void nwl_dsi_remove(struct platform_device *pdev)
{
struct nwl_dsi *dsi = platform_get_drvdata(pdev);
@@ -1207,12 +1207,11 @@ static int nwl_dsi_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->dsi_host);
drm_bridge_remove(&dsi->bridge);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static struct platform_driver nwl_dsi_driver = {
.probe = nwl_dsi_probe,
- .remove = nwl_dsi_remove,
+ .remove_new = nwl_dsi_remove,
.driver = {
.of_match_table = nwl_dsi_dt_ids,
.name = DRV_NAME,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0851101a8c72..cd292a2f894c 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -257,8 +257,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.get_edid = ptn3460_get_edid,
};
-static int ptn3460_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ptn3460_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
@@ -336,7 +335,7 @@ MODULE_DEVICE_TABLE(of, ptn3460_match);
static struct i2c_driver ptn3460_driver = {
.id_table = ptn3460_i2c_table,
- .probe = ptn3460_probe,
+ .probe_new = ptn3460_probe,
.remove = ptn3460_remove,
.driver = {
.name = "nxp,ptn3460",
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 216af76d0042..9316384b4474 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -81,6 +81,8 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
return ret;
}
+ drm_panel_bridge_set_orientation(connector, bridge);
+
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
@@ -109,30 +111,82 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
drm_connector_cleanup(connector);
}
-static void panel_bridge_pre_enable(struct drm_bridge *bridge)
+static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+
+ crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
drm_panel_prepare(panel_bridge->panel);
}
-static void panel_bridge_enable(struct drm_bridge *bridge)
+static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+
+ crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
drm_panel_enable(panel_bridge->panel);
}
-static void panel_bridge_disable(struct drm_bridge *bridge)
+static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+
+ crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder);
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc);
+ if (new_crtc_state && new_crtc_state->self_refresh_active)
+ return;
drm_panel_disable(panel_bridge->panel);
}
-static void panel_bridge_post_disable(struct drm_bridge *bridge)
+static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+
+ crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder);
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc);
+ if (new_crtc_state && new_crtc_state->self_refresh_active)
+ return;
drm_panel_unprepare(panel_bridge->panel);
}
@@ -159,10 +213,10 @@ static void panel_bridge_debugfs_init(struct drm_bridge *bridge,
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
- .pre_enable = panel_bridge_pre_enable,
- .enable = panel_bridge_enable,
- .disable = panel_bridge_disable,
- .post_disable = panel_bridge_post_disable,
+ .atomic_pre_enable = panel_bridge_atomic_pre_enable,
+ .atomic_enable = panel_bridge_atomic_enable,
+ .atomic_disable = panel_bridge_atomic_disable,
+ .atomic_post_disable = panel_bridge_atomic_post_disable,
.get_modes = panel_bridge_get_modes,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -357,13 +411,16 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return ERR_PTR(-ENOMEM);
bridge = drm_panel_bridge_add_typed(panel, connector_type);
- if (!IS_ERR(bridge)) {
- *ptr = bridge;
- devres_add(dev, ptr);
- } else {
+ if (IS_ERR(bridge)) {
devres_free(ptr);
+ return bridge;
}
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
+ *ptr = bridge;
+ devres_add(dev, ptr);
+
return bridge;
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
@@ -402,6 +459,8 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
return bridge;
}
EXPORT_SYMBOL(drmm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 309de802863d..efa80e309b98 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -442,9 +442,9 @@ static const struct of_device_id ps8622_devices[] = {
};
MODULE_DEVICE_TABLE(of, ps8622_devices);
-static int ps8622_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ps8622_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ps8622_bridge *ps8622;
struct drm_bridge *panel_bridge;
@@ -496,7 +496,7 @@ static int ps8622_probe(struct i2c_client *client,
ps8622->lane_count = ps8622->max_lane_count;
}
- if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) {
+ if (!of_property_read_bool(dev->of_node, "use-external-pwm")) {
ps8622->bl = backlight_device_register("ps8622-backlight",
dev, ps8622, &ps8622_backlight_ops,
NULL);
@@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
static struct i2c_driver ps8622_driver = {
.id_table = ps8622_i2c_table,
- .probe = ps8622_probe,
+ .probe_new = ps8622_probe,
.remove = ps8622_remove,
.driver = {
.name = "ps8622",
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 6a614e54b383..c3eb45179405 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -15,6 +15,7 @@
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
@@ -104,6 +105,7 @@ struct ps8640 {
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
struct device_link *link;
+ struct edid *edid;
bool pre_enabled;
bool need_post_hpd_delay;
};
@@ -182,7 +184,7 @@ static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wai
* actually connected to GPIO9).
*/
ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
- status & PS_GPIO9, wait_us / 10, wait_us);
+ status & PS_GPIO9, 20000, wait_us);
/*
* The first time we see HPD go high after a reset we delay an extra
@@ -442,7 +444,8 @@ static const struct dev_pm_ops ps8640_pm_ops = {
pm_runtime_force_resume)
};
-static void ps8640_pre_enable(struct drm_bridge *bridge)
+static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -476,7 +479,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
ps_bridge->pre_enabled = true;
}
-static void ps8640_post_disable(struct drm_bridge *bridge)
+static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -539,34 +543,44 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
bool poweroff = !ps_bridge->pre_enabled;
- struct edid *edid;
- /*
- * When we end calling get_edid() triggered by an ioctl, i.e
- *
- * drm_mode_getconnector (ioctl)
- * -> drm_helper_probe_single_connector_modes
- * -> drm_bridge_connector_get_modes
- * -> ps8640_bridge_get_edid
- *
- * We need to make sure that what we need is enabled before reading
- * EDID, for this chip, we need to do a full poweron, otherwise it will
- * fail.
- */
- drm_bridge_chain_pre_enable(bridge);
+ if (!ps_bridge->edid) {
+ /*
+ * When we end calling get_edid() triggered by an ioctl, i.e
+ *
+ * drm_mode_getconnector (ioctl)
+ * -> drm_helper_probe_single_connector_modes
+ * -> drm_bridge_connector_get_modes
+ * -> ps8640_bridge_get_edid
+ *
+ * We need to make sure that what we need is enabled before
+ * reading EDID, for this chip, we need to do a full poweron,
+ * otherwise it will fail.
+ */
+ if (poweroff)
+ drm_atomic_bridge_chain_pre_enable(bridge,
+ connector->state->state);
- edid = drm_get_edid(connector,
- ps_bridge->page[PAGE0_DP_CNTL]->adapter);
+ ps_bridge->edid = drm_get_edid(connector,
+ ps_bridge->page[PAGE0_DP_CNTL]->adapter);
- /*
- * If we call the get_edid() function without having enabled the chip
- * before, return the chip to its original power state.
- */
- if (poweroff)
- drm_bridge_chain_post_disable(bridge);
+ /*
+ * If we call the get_edid() function without having enabled the
+ * chip before, return the chip to its original power state.
+ */
+ if (poweroff)
+ drm_atomic_bridge_chain_post_disable(bridge,
+ connector->state->state);
+ }
+
+ if (!ps_bridge->edid) {
+ dev_err(dev, "Failed to get EDID\n");
+ return NULL;
+ }
- return edid;
+ return drm_edid_duplicate(ps_bridge->edid);
}
static void ps8640_runtime_disable(void *data)
@@ -579,8 +593,11 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
.detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
- .post_disable = ps8640_post_disable,
- .pre_enable = ps8640_pre_enable,
+ .atomic_post_disable = ps8640_atomic_post_disable,
+ .atomic_pre_enable = ps8640_atomic_pre_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge)
@@ -734,13 +751,13 @@ static int ps8640_probe(struct i2c_client *client)
pm_runtime_enable(dev);
/*
* Powering on ps8640 takes ~300ms. To avoid wasting time on power
- * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure
+ * cycling ps8640 too often, set autosuspend_delay to 2000ms to ensure
* the bridge wouldn't suspend in between each _aux_transfer_msg() call
* during EDID read (~20ms in my experiment) and in between the last
* _aux_transfer_msg() call during EDID read and the _pre_enable() call
* (~100ms in my experiment).
*/
- pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev);
@@ -760,6 +777,13 @@ static int ps8640_probe(struct i2c_client *client)
return ret;
}
+static void ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+ kfree(ps_bridge->edid);
+}
+
static const struct of_device_id ps8640_match[] = {
{ .compatible = "parade,ps8640" },
{ }
@@ -768,6 +792,7 @@ MODULE_DEVICE_TABLE(of, ps8640_match);
static struct i2c_driver ps8640_driver = {
.probe_new = ps8640_probe,
+ .remove = ps8640_remove,
.driver = {
.name = "ps8640",
.of_match_table = ps8640_match,
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
new file mode 100644
index 000000000000..e0a402a85787
--- /dev/null
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -0,0 +1,1967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung MIPI DSIM bridge driver.
+ *
+ * Copyright (C) 2021 Amarula Solutions(India)
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ *
+ * Based on exynos_drm_dsi from
+ * Tomasz Figa <t.figa@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/samsung-dsim.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* returns true iff both arguments logically differs */
+#define NEQV(a, b) (!(a) ^ !(b))
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK BIT(8)
+#define DSIM_TX_READY_HS_CLK BIT(10)
+#define DSIM_PLL_STABLE BIT(31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST BIT(16)
+#define DSIM_SWRST BIT(0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
+#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
+#define DSIM_BYTE_CLKEN BIT(24)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
+#define DSIM_ESC_CLKEN BIT(28)
+#define DSIM_TX_REQUEST_HSCLK BIT(31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK BIT(0)
+#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
+#define DSIM_SUB_VC (((x) & 0x3) << 16)
+#define DSIM_MAIN_VC (((x) & 0x3) << 18)
+#define DSIM_HSA_DISABLE_MODE BIT(20)
+#define DSIM_HBP_DISABLE_MODE BIT(21)
+#define DSIM_HFP_DISABLE_MODE BIT(22)
+/*
+ * The i.MX 8M Mini Applications Processor Reference Manual,
+ * Rev. 3, 11/2020 Page 4091
+ * The i.MX 8M Nano Applications Processor Reference Manual,
+ * Rev. 2, 07/2022 Page 3058
+ * The i.MX 8M Plus Applications Processor Reference Manual,
+ * Rev. 1, 06/2021 Page 5436
+ * all claims this bit is 'HseDisableMode' with the definition
+ * 0 = Disables transfer
+ * 1 = Enables transfer
+ *
+ * This clearly states that HSE is not a disabled bit.
+ *
+ * The naming convention follows as per the manual and the
+ * driver logic is based on the MIPI_DSI_MODE_VIDEO_HSE flag.
+ */
+#define DSIM_HSE_DISABLE_MODE BIT(23)
+#define DSIM_AUTO_MODE BIT(24)
+#define DSIM_VIDEO_MODE BIT(25)
+#define DSIM_BURST_MODE BIT(26)
+#define DSIM_SYNC_INFORM BIT(27)
+#define DSIM_EOT_DISABLE BIT(28)
+#define DSIM_MFLUSH_VS BIT(29)
+/* This flag is valid only for exynos3250/3472/5260/5430 */
+#define DSIM_CLKLANE_STOP BIT(30)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST BIT(4)
+#define DSIM_TX_LPDT_LP BIT(6)
+#define DSIM_CMD_LPDT_LP BIT(7)
+#define DSIM_FORCE_BTA BIT(16)
+#define DSIM_FORCE_STOP_STATE BIT(20)
+#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY BIT(31)
+#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
+#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x) ((x) << 28)
+#define DSIM_STABLE_VFP(x) ((x) << 16)
+#define DSIM_MAIN_VBP(x) ((x) << 0)
+#define DSIM_CMD_ALLOW_MASK (0xf << 28)
+#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x) ((x) << 16)
+#define DSIM_MAIN_HBP(x) ((x) << 0)
+#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_HSA(x) ((x) << 0)
+#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x) ((x) << 31)
+#define DSIM_SUB_VRESOL(x) ((x) << 16)
+#define DSIM_SUB_HRESOL(x) ((x) << 0)
+#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE BIT(31)
+#define DSIM_INT_SW_RST_RELEASE BIT(30)
+#define DSIM_INT_SFR_FIFO_EMPTY BIT(29)
+#define DSIM_INT_SFR_HDR_FIFO_EMPTY BIT(28)
+#define DSIM_INT_BTA BIT(25)
+#define DSIM_INT_FRAME_DONE BIT(24)
+#define DSIM_INT_RX_TIMEOUT BIT(21)
+#define DSIM_INT_BTA_TIMEOUT BIT(20)
+#define DSIM_INT_RX_DONE BIT(18)
+#define DSIM_INT_RX_TE BIT(17)
+#define DSIM_INT_RX_ACK BIT(16)
+#define DSIM_INT_RX_ECC_ERR BIT(15)
+#define DSIM_INT_RX_CRC_ERR BIT(14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL BIT(25)
+#define DSIM_RX_DATA_EMPTY BIT(24)
+#define DSIM_SFR_HEADER_FULL BIT(23)
+#define DSIM_SFR_HEADER_EMPTY BIT(22)
+#define DSIM_SFR_PAYLOAD_FULL BIT(21)
+#define DSIM_SFR_PAYLOAD_EMPTY BIT(20)
+#define DSIM_I80_HEADER_FULL BIT(19)
+#define DSIM_I80_HEADER_EMPTY BIT(18)
+#define DSIM_I80_PAYLOAD_FULL BIT(17)
+#define DSIM_I80_PAYLOAD_EMPTY BIT(16)
+#define DSIM_SD_HEADER_FULL BIT(15)
+#define DSIM_SD_HEADER_EMPTY BIT(14)
+#define DSIM_SD_PAYLOAD_FULL BIT(13)
+#define DSIM_SD_PAYLOAD_EMPTY BIT(12)
+#define DSIM_MD_HEADER_FULL BIT(11)
+#define DSIM_MD_HEADER_EMPTY BIT(10)
+#define DSIM_MD_PAYLOAD_FULL BIT(9)
+#define DSIM_MD_PAYLOAD_EMPTY BIT(8)
+#define DSIM_RX_FIFO BIT(4)
+#define DSIM_SFR_FIFO BIT(3)
+#define DSIM_I80_FIFO BIT(2)
+#define DSIM_SD_FIFO BIT(1)
+#define DSIM_MD_FIFO BIT(0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN BIT(14)
+#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x) ((x) << 24)
+#define DSIM_PLL_EN BIT(23)
+#define DSIM_PLL_P(x, offset) ((x) << (offset))
+#define DSIM_PLL_M(x) ((x) << 4)
+#define DSIM_PLL_S(x) ((x) << 1)
+
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP BIT(30)
+#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP BIT(14)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
+#define DSI_MAX_BUS_WIDTH 4
+#define DSI_NUM_VIRTUAL_CHANNELS 4
+#define DSI_TX_FIFO_SIZE 2048
+#define DSI_RX_FIFO_SIZE 256
+#define DSI_XFER_TIMEOUT_MS 100
+#define DSI_RX_FIFO_EMPTY 0x30800002
+
+#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
+
+static const char *const clk_names[5] = {
+ "bus_clk",
+ "sclk_mipi",
+ "phyclk_mipidphy0_bitclkdiv8",
+ "phyclk_mipidphy0_rxclkesc0",
+ "sclk_rgb_vclk_to_dsim0"
+};
+
+enum samsung_dsim_transfer_type {
+ EXYNOS_DSI_TX,
+ EXYNOS_DSI_RX,
+};
+
+enum reg_idx {
+ DSIM_STATUS_REG, /* Status register */
+ DSIM_SWRST_REG, /* Software reset register */
+ DSIM_CLKCTRL_REG, /* Clock control register */
+ DSIM_TIMEOUT_REG, /* Time out register */
+ DSIM_CONFIG_REG, /* Configuration register */
+ DSIM_ESCMODE_REG, /* Escape mode register */
+ DSIM_MDRESOL_REG,
+ DSIM_MVPORCH_REG, /* Main display Vporch register */
+ DSIM_MHPORCH_REG, /* Main display Hporch register */
+ DSIM_MSYNC_REG, /* Main display sync area register */
+ DSIM_INTSRC_REG, /* Interrupt source register */
+ DSIM_INTMSK_REG, /* Interrupt mask register */
+ DSIM_PKTHDR_REG, /* Packet Header FIFO register */
+ DSIM_PAYLOAD_REG, /* Payload FIFO register */
+ DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_FIFOCTRL_REG, /* FIFO status and control register */
+ DSIM_PLLCTRL_REG, /* PLL control register */
+ DSIM_PHYCTRL_REG,
+ DSIM_PHYTIMING_REG,
+ DSIM_PHYTIMING1_REG,
+ DSIM_PHYTIMING2_REG,
+ NUM_REGS
+};
+
+static const unsigned int exynos_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x00,
+ [DSIM_SWRST_REG] = 0x04,
+ [DSIM_CLKCTRL_REG] = 0x08,
+ [DSIM_TIMEOUT_REG] = 0x0c,
+ [DSIM_CONFIG_REG] = 0x10,
+ [DSIM_ESCMODE_REG] = 0x14,
+ [DSIM_MDRESOL_REG] = 0x18,
+ [DSIM_MVPORCH_REG] = 0x1c,
+ [DSIM_MHPORCH_REG] = 0x20,
+ [DSIM_MSYNC_REG] = 0x24,
+ [DSIM_INTSRC_REG] = 0x2c,
+ [DSIM_INTMSK_REG] = 0x30,
+ [DSIM_PKTHDR_REG] = 0x34,
+ [DSIM_PAYLOAD_REG] = 0x38,
+ [DSIM_RXFIFO_REG] = 0x3c,
+ [DSIM_FIFOCTRL_REG] = 0x44,
+ [DSIM_PLLCTRL_REG] = 0x4c,
+ [DSIM_PHYCTRL_REG] = 0x5c,
+ [DSIM_PHYTIMING_REG] = 0x64,
+ [DSIM_PHYTIMING1_REG] = 0x68,
+ [DSIM_PHYTIMING2_REG] = 0x6c,
+};
+
+static const unsigned int exynos5433_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x04,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_CONFIG_REG] = 0x18,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
+enum reg_value_idx {
+ RESET_TYPE,
+ PLL_TIMER,
+ STOP_STATE_CNT,
+ PHYCTRL_ULPS_EXIT,
+ PHYCTRL_VREG_LP,
+ PHYCTRL_SLEW_UP,
+ PHYTIMING_LPX,
+ PHYTIMING_HS_EXIT,
+ PHYTIMING_CLK_PREPARE,
+ PHYTIMING_CLK_ZERO,
+ PHYTIMING_CLK_POST,
+ PHYTIMING_CLK_TRAIL,
+ PHYTIMING_HS_PREPARE,
+ PHYTIMING_HS_ZERO,
+ PHYTIMING_HS_TRAIL
+};
+
+static const unsigned int reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const unsigned int exynos5422_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
+};
+
+static const unsigned int exynos5433_reg_values[] = {
+ [RESET_TYPE] = DSIM_FUNCRST,
+ [PLL_TIMER] = 22200,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
+ [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
+ [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
+static const unsigned int imx8mm_dsim_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = 0,
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x26),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x08),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x58,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 5,
+ .max_freq = 1500,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5433_reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1500,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5422_reg_values,
+};
+
+static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 2100,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ /*
+ * Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
+ * downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
+ */
+ .pll_p_offset = 14,
+ .reg_values = imx8mm_dsim_reg_values,
+};
+
+static const struct samsung_dsim_driver_data *
+samsung_dsim_types[DSIM_TYPE_COUNT] = {
+ [DSIM_TYPE_EXYNOS3250] = &exynos3_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS4210] = &exynos4_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
+ [DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
+};
+
+static inline struct samsung_dsim *host_to_dsi(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct samsung_dsim, dsi_host);
+}
+
+static inline struct samsung_dsim *bridge_to_dsi(struct drm_bridge *b)
+{
+ return container_of(b, struct samsung_dsim, bridge);
+}
+
+static inline void samsung_dsim_write(struct samsung_dsim *dsi,
+ enum reg_idx idx, u32 val)
+{
+ writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static inline u32 samsung_dsim_read(struct samsung_dsim *dsi, enum reg_idx idx)
+{
+ return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static void samsung_dsim_wait_for_reset(struct samsung_dsim *dsi)
+{
+ if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+ return;
+
+ dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void samsung_dsim_reset(struct samsung_dsim *dsi)
+{
+ u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
+
+ reinit_completion(&dsi->completed);
+ samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
+}
+
+#ifndef MHZ
+#define MHZ (1000 * 1000)
+#endif
+
+static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
+ unsigned long fin,
+ unsigned long fout,
+ u8 *p, u16 *m, u8 *s)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long best_freq = 0;
+ u32 min_delta = 0xffffffff;
+ u8 p_min, p_max;
+ u8 _p, best_p;
+ u16 _m, best_m;
+ u8 _s, best_s;
+
+ p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+ p_max = fin / (6 * MHZ);
+
+ for (_p = p_min; _p <= p_max; ++_p) {
+ for (_s = 0; _s <= 5; ++_s) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * (_p << _s);
+ do_div(tmp, fin);
+ _m = tmp;
+ if (_m < 41 || _m > 125)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p);
+ if (tmp < 500 * MHZ ||
+ tmp > driver_data->max_freq * MHZ)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p << _s);
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_p = _p;
+ best_m = _m;
+ best_s = _s;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+ }
+
+ if (best_freq) {
+ *p = best_p;
+ *m = best_m;
+ *s = best_s;
+ }
+
+ return best_freq;
+}
+
+static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
+ unsigned long freq)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long fin, fout;
+ int timeout;
+ u8 p, s;
+ u16 m;
+ u32 reg;
+
+ fin = dsi->pll_clk_rate;
+ fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+ if (!fout) {
+ dev_err(dsi->dev,
+ "failed to find PLL PMS for requested frequency\n");
+ return 0;
+ }
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
+
+ writel(driver_data->reg_values[PLL_TIMER],
+ dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
+ DSIM_PLL_M(m) | DSIM_PLL_S(s);
+
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
+
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
+
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+
+ timeout = 1000;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "PLL failed to stabilize\n");
+ return 0;
+ }
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ } while ((reg & DSIM_PLL_STABLE) == 0);
+
+ return fout;
+}
+
+static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
+{
+ unsigned long hs_clk, byte_clk, esc_clk;
+ unsigned long esc_div;
+ u32 reg;
+
+ hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate);
+ if (!hs_clk) {
+ dev_err(dsi->dev, "failed to configure DSI PLL\n");
+ return -EFAULT;
+ }
+
+ byte_clk = hs_clk / 8;
+ esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
+ esc_clk = byte_clk / esc_div;
+
+ if (esc_clk > 20 * MHZ) {
+ ++esc_div;
+ esc_clk = byte_clk / esc_div;
+ }
+
+ dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+ hs_clk, byte_clk, esc_clk);
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+ | DSIM_ESC_PRESCALER(esc_div)
+ | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+ | DSIM_BYTE_CLK_SRC(0)
+ | DSIM_TX_REQUEST_HSCLK;
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ const unsigned int *reg_values = driver_data->reg_values;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
+ reg_values[PHYCTRL_SLEW_UP];
+ samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_CLK_PREPARE] |
+ reg_values[PHYTIMING_CLK_ZERO] |
+ reg_values[PHYTIMING_CLK_POST] |
+ reg_values[PHYTIMING_CLK_TRAIL];
+
+ samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
+ reg_values[PHYTIMING_HS_TRAIL];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg);
+}
+
+static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+ | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
+ reg &= ~DSIM_PLL_EN;
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+}
+
+static void samsung_dsim_enable_lane(struct samsung_dsim *dsi, u32 lane)
+{
+ u32 reg = samsung_dsim_read(dsi, DSIM_CONFIG_REG);
+
+ reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
+ DSIM_LANE_EN(lane));
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+}
+
+static int samsung_dsim_init_link(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int timeout;
+ u32 reg;
+ u32 lanes_mask;
+
+ /* Initialize FIFO pointers */
+ reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+ reg &= ~0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+
+ usleep_range(9000, 11000);
+
+ reg |= 0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+ usleep_range(9000, 11000);
+
+ /* DSI configuration */
+ reg = 0;
+
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg |= DSIM_VIDEO_MODE;
+
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+ reg |= DSIM_MFLUSH_VS;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ reg |= DSIM_SYNC_INFORM;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ reg |= DSIM_BURST_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+ reg |= DSIM_AUTO_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ reg |= DSIM_HSE_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ reg |= DSIM_HFP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ reg |= DSIM_HBP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ reg |= DSIM_HSA_DISABLE_MODE;
+ }
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ reg |= DSIM_EOT_DISABLE;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+ break;
+ default:
+ dev_err(dsi->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Use non-continuous clock mode if the periparal wants and
+ * host controller supports
+ *
+ * In non-continous clock mode, host controller will turn off
+ * the HS clock between high-speed transmissions to reduce
+ * power consumption.
+ */
+ if (driver_data->has_clklane_stop &&
+ dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ reg |= DSIM_CLKLANE_STOP;
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+
+ lanes_mask = BIT(dsi->lanes) - 1;
+ samsung_dsim_enable_lane(dsi, lanes_mask);
+
+ /* Check clock and data lane state are stop state */
+ timeout = 100;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+ return -EFAULT;
+ }
+
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+ != DSIM_STOP_STATE_DAT(lanes_mask))
+ continue;
+ } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+ reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+ samsung_dsim_write(dsi, DSIM_TIMEOUT_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
+{
+ struct drm_display_mode *m = &dsi->mode;
+ unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ u32 reg;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg = DSIM_CMD_ALLOW(0xf)
+ | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
+ | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
+ samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg);
+
+ reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
+ | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
+ samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
+
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
+ samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
+ }
+ reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
+ DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
+
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
+}
+
+static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
+ if (enable)
+ reg |= DSIM_MAIN_STAND_BY;
+ else
+ reg &= ~DSIM_MAIN_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+}
+
+static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+{
+ int timeout = 2000;
+
+ do {
+ u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+
+ if (!(reg & DSIM_SFR_HEADER_FULL))
+ return 0;
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+ } while (--timeout);
+
+ return -ETIMEDOUT;
+}
+
+static void samsung_dsim_set_cmd_lpm(struct samsung_dsim *dsi, bool lpm)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ if (lpm)
+ v |= DSIM_CMD_LPDT_LP;
+ else
+ v &= ~DSIM_CMD_LPDT_LP;
+
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_force_bta(struct samsung_dsim *dsi)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ v |= DSIM_FORCE_BTA;
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ struct device *dev = dsi->dev;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload = pkt->payload + xfer->tx_done;
+ u16 length = pkt->payload_length - xfer->tx_done;
+ bool first = !xfer->tx_done;
+ u32 reg;
+
+ dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
+ xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (length > DSI_TX_FIFO_SIZE)
+ length = DSI_TX_FIFO_SIZE;
+
+ xfer->tx_done += length;
+
+ /* Send payload */
+ while (length >= 4) {
+ reg = get_unaligned_le32(payload);
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ payload += 4;
+ length -= 4;
+ }
+
+ reg = 0;
+ switch (length) {
+ case 3:
+ reg |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ reg |= payload[1] << 8;
+ fallthrough;
+ case 1:
+ reg |= payload[0];
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ break;
+ }
+
+ /* Send packet header */
+ if (!first)
+ return;
+
+ reg = get_unaligned_le32(pkt->header);
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
+
+ if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
+ dsi->state & DSIM_STATE_CMD_LPM)) {
+ samsung_dsim_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
+ dsi->state ^= DSIM_STATE_CMD_LPM;
+ }
+
+ samsung_dsim_write(dsi, DSIM_PKTHDR_REG, reg);
+
+ if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
+ samsung_dsim_force_bta(dsi);
+}
+
+static void samsung_dsim_read_from_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ u8 *payload = xfer->rx_payload + xfer->rx_done;
+ bool first = !xfer->rx_done;
+ struct device *dev = dsi->dev;
+ u16 length;
+ u32 reg;
+
+ if (first) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+
+ switch (reg & 0x3f) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->rx_len >= 2) {
+ payload[1] = reg >> 16;
+ ++xfer->rx_done;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ payload[0] = reg >> 8;
+ ++xfer->rx_done;
+ xfer->rx_len = xfer->rx_done;
+ xfer->result = 0;
+ goto clear_fifo;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_err(dev, "DSI Error Report: 0x%04x\n", (reg >> 8) & 0xffff);
+ xfer->result = 0;
+ goto clear_fifo;
+ }
+
+ length = (reg >> 8) & 0xffff;
+ if (length > xfer->rx_len) {
+ dev_err(dev,
+ "response too long (%u > %u bytes), stripping\n",
+ xfer->rx_len, length);
+ length = xfer->rx_len;
+ } else if (length < xfer->rx_len) {
+ xfer->rx_len = length;
+ }
+ }
+
+ length = xfer->rx_len - xfer->rx_done;
+ xfer->rx_done += length;
+
+ /* Receive payload */
+ while (length >= 4) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ payload[0] = (reg >> 0) & 0xff;
+ payload[1] = (reg >> 8) & 0xff;
+ payload[2] = (reg >> 16) & 0xff;
+ payload[3] = (reg >> 24) & 0xff;
+ payload += 4;
+ length -= 4;
+ }
+
+ if (length) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ switch (length) {
+ case 3:
+ payload[2] = (reg >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ payload[1] = (reg >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ payload[0] = reg & 0xff;
+ }
+ }
+
+ if (xfer->rx_done == xfer->rx_len)
+ xfer->result = 0;
+
+clear_fifo:
+ length = DSI_RX_FIFO_SIZE / 4;
+ do {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ if (reg == DSI_RX_FIFO_EMPTY)
+ break;
+ } while (--length);
+}
+
+static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
+{
+ unsigned long flags;
+ struct samsung_dsim_transfer *xfer;
+ bool start = false;
+
+again:
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
+
+ samsung_dsim_send_to_fifo(dsi, xfer);
+
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
+
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (start)
+ goto again;
+}
+
+static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
+{
+ struct samsung_dsim_transfer *xfer;
+ unsigned long flags;
+ bool start = true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return false;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ dev_dbg(dsi->dev,
+ "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
+ xfer->rx_done);
+
+ if (xfer->tx_done != xfer->packet.payload_length)
+ return true;
+
+ if (xfer->rx_done != xfer->rx_len)
+ samsung_dsim_read_from_fifo(dsi, xfer);
+
+ if (xfer->rx_done != xfer->rx_len)
+ return true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (!xfer->rx_len)
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ return start;
+}
+
+static void samsung_dsim_remove_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool start;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (!list_empty(&dsi->transfer_list) &&
+ xfer == list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list)) {
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ if (start)
+ samsung_dsim_transfer_start(dsi);
+ return;
+ }
+
+ list_del_init(&xfer->list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int samsung_dsim_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool stopped;
+
+ xfer->tx_done = 0;
+ xfer->rx_done = 0;
+ xfer->result = -ETIMEDOUT;
+ init_completion(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ stopped = list_empty(&dsi->transfer_list);
+ list_add_tail(&xfer->list, &dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (stopped)
+ samsung_dsim_transfer_start(dsi);
+
+ wait_for_completion_timeout(&xfer->completed,
+ msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+ if (xfer->result == -ETIMEDOUT) {
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+
+ samsung_dsim_remove_transfer(dsi, xfer);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
+ (int)pkt->payload_length, pkt->payload);
+ return -ETIMEDOUT;
+ }
+
+ /* Also covers hardware timeout condition */
+ return xfer->result;
+}
+
+static irqreturn_t samsung_dsim_irq(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = dev_id;
+ u32 status;
+
+ status = samsung_dsim_read(dsi, DSIM_INTSRC_REG);
+ if (!status) {
+ static unsigned long j;
+
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(dsi->dev, "spurious interrupt\n");
+ return IRQ_HANDLED;
+ }
+ samsung_dsim_write(dsi, DSIM_INTSRC_REG, status);
+
+ if (status & DSIM_INT_SW_RST_RELEASE) {
+ unsigned long mask = ~(DSIM_INT_RX_DONE |
+ DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_SFR_HDR_FIFO_EMPTY |
+ DSIM_INT_RX_ECC_ERR |
+ DSIM_INT_SW_RST_RELEASE);
+ samsung_dsim_write(dsi, DSIM_INTMSK_REG, mask);
+ complete(&dsi->completed);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_PLL_STABLE)))
+ return IRQ_HANDLED;
+
+ if (samsung_dsim_transfer_finish(dsi))
+ samsung_dsim_transfer_start(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static void samsung_dsim_enable_irq(struct samsung_dsim *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (dsi->te_gpio)
+ enable_irq(gpiod_to_irq(dsi->te_gpio));
+}
+
+static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio)
+ disable_irq(gpiod_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
+static int samsung_dsim_init(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+
+ if (dsi->state & DSIM_STATE_INITIALIZED)
+ return 0;
+
+ samsung_dsim_reset(dsi);
+ samsung_dsim_enable_irq(dsi);
+
+ if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
+ samsung_dsim_enable_lane(dsi, BIT(dsi->lanes) - 1);
+
+ samsung_dsim_enable_clock(dsi);
+ if (driver_data->wait_for_reset)
+ samsung_dsim_wait_for_reset(dsi);
+ samsung_dsim_set_phy_ctrl(dsi);
+ samsung_dsim_init_link(dsi);
+
+ dsi->state |= DSIM_STATE_INITIALIZED;
+
+ return 0;
+}
+
+static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dsi->dev);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable DSI device.\n");
+ return;
+ }
+
+ dsi->state |= DSIM_STATE_ENABLED;
+
+ /*
+ * For Exynos-DSIM the downstream bridge, or panel are expecting
+ * the host initialization during DSI transfer.
+ */
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return;
+ }
+}
+
+static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_mode(dsi);
+ samsung_dsim_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+ dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_enable(dsi, false);
+
+ dsi->state &= ~DSIM_STATE_ENABLED;
+ pm_runtime_put_sync(dsi->dev);
+}
+
+/*
+ * This pixel output formats list referenced from,
+ * AN13573 i.MX 8/RT MIPI DSI/CSI-2, Rev. 0, 21 March 2022
+ * 3.7.4 Pixel formats
+ * Table 14. DSI pixel packing formats
+ */
+static const u32 samsung_dsim_pixel_output_fmts[] = {
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+static bool samsung_dsim_pixel_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ if (fmt == MEDIA_BUS_FMT_FIXED)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(samsung_dsim_pixel_output_fmts); i++) {
+ if (samsung_dsim_pixel_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+samsung_dsim_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ if (!samsung_dsim_pixel_output_fmt_supported(output_fmt))
+ /*
+ * Some bridge/display drivers are still not able to pass the
+ * correct format, so handle those pipelines by falling back
+ * to the default format till the supported formats finalized.
+ */
+ output_fmt = MEDIA_BUS_FMT_RGB888_1X24;
+
+ input_fmts[0] = output_fmt;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int samsung_dsim_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+
+ /*
+ * The i.MX8M Mini/Nano glue logic between LCDIF and DSIM
+ * inverts HS/VS/DE sync signals polarity, therefore, while
+ * i.MX 8M Mini Applications Processor Reference Manual Rev. 3, 11/2020
+ * 13.6.3.5.2 RGB interface
+ * i.MX 8M Nano Applications Processor Reference Manual Rev. 2, 07/2022
+ * 13.6.2.7.2 RGB interface
+ * both claim "Vsync, Hsync, and VDEN are active high signals.", the
+ * LCDIF must generate inverted HS/VS/DE signals, i.e. active LOW.
+ *
+ * The i.MX8M Plus glue logic between LCDIFv3 and DSIM does not
+ * implement the same behavior, therefore LCDIFv3 must generate
+ * HS/VS/DE signals active HIGH.
+ */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) {
+ adjusted_mode->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ } else if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MP) {
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ }
+
+ return 0;
+}
+
+static void samsung_dsim_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ drm_mode_copy(&dsi->mode, adjusted_mode);
+}
+
+static int samsung_dsim_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs samsung_dsim_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = samsung_dsim_atomic_get_input_bus_fmts,
+ .atomic_check = samsung_dsim_atomic_check,
+ .atomic_pre_enable = samsung_dsim_atomic_pre_enable,
+ .atomic_enable = samsung_dsim_atomic_enable,
+ .atomic_disable = samsung_dsim_atomic_disable,
+ .atomic_post_disable = samsung_dsim_atomic_post_disable,
+ .mode_set = samsung_dsim_mode_set,
+ .attach = samsung_dsim_attach,
+};
+
+static irqreturn_t samsung_dsim_te_irq_handler(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = (struct samsung_dsim *)dev_id;
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ if (pdata->host_ops && pdata->host_ops->te_irq_handler)
+ return pdata->host_ops->te_irq_handler(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device *dev)
+{
+ int te_gpio_irq;
+ int ret;
+
+ dsi->te_gpio = devm_gpiod_get_optional(dev, "te", GPIOD_IN);
+ if (!dsi->te_gpio)
+ return 0;
+ else if (IS_ERR(dsi->te_gpio))
+ return dev_err_probe(dev, PTR_ERR(dsi->te_gpio), "failed to get te GPIO\n");
+
+ te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
+
+ ret = request_threaded_irq(te_gpio_irq, samsung_dsim_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpiod_put(dsi->te_gpio);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+ struct device *dev = dsi->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote;
+ struct drm_panel *panel;
+ int ret;
+
+ /*
+ * Devices can also be child nodes when we also control that device
+ * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
+ *
+ * Lookup for a child node of the given parent that isn't either port
+ * or ports.
+ */
+ for_each_available_child_of_node(np, remote) {
+ if (of_node_name_eq(remote, "port") ||
+ of_node_name_eq(remote, "ports"))
+ continue;
+
+ goto of_find_panel_or_bridge;
+ }
+
+ /*
+ * of_graph_get_remote_node() produces a noisy error message if port
+ * node isn't found and the absence of the port is a legit case here,
+ * so at first we silently check whether graph presents in the
+ * device-tree node.
+ */
+ if (!of_graph_is_present(np))
+ return -ENODEV;
+
+ remote = of_graph_get_remote_node(np, 1, 0);
+
+of_find_panel_or_bridge:
+ if (!remote)
+ return -ENODEV;
+
+ panel = of_drm_find_panel(remote);
+ if (!IS_ERR(panel)) {
+ dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
+ } else {
+ dsi->out_bridge = of_drm_find_bridge(remote);
+ if (!dsi->out_bridge)
+ dsi->out_bridge = ERR_PTR(-EINVAL);
+ }
+
+ of_node_put(remote);
+
+ if (IS_ERR(dsi->out_bridge)) {
+ ret = PTR_ERR(dsi->out_bridge);
+ DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
+
+ drm_bridge_add(&dsi->bridge);
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ ret = samsung_dsim_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(dsi, device);
+ if (ret)
+ return ret;
+ }
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
+ }
+}
+
+static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ dsi->out_bridge = NULL;
+
+ if (pdata->host_ops && pdata->host_ops->detach)
+ pdata->host_ops->detach(dsi, device);
+
+ samsung_dsim_unregister_te_irq(dsi);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ struct samsung_dsim_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = samsung_dsim_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops samsung_dsim_ops = {
+ .attach = samsung_dsim_host_attach,
+ .detach = samsung_dsim_host_detach,
+ .transfer = samsung_dsim_host_transfer,
+};
+
+static int samsung_dsim_of_read_u32(const struct device_node *np,
+ const char *propname, u32 *out_value)
+{
+ int ret = of_property_read_u32(np, propname, out_value);
+
+ if (ret < 0)
+ pr_err("%pOF: failed to get '%s' property\n", np, propname);
+
+ return ret;
+}
+
+static int samsung_dsim_parse_dt(struct samsung_dsim *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency",
+ &dsi->pll_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency",
+ &dsi->burst_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency",
+ &dsi->esc_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int generic_dsim_register_host(struct samsung_dsim *dsi)
+{
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static void generic_dsim_unregister_host(struct samsung_dsim *dsi)
+{
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct samsung_dsim_host_ops generic_dsim_host_ops = {
+ .register_host = generic_dsim_register_host,
+ .unregister_host = generic_dsim_unregister_host,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_high = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_low = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+int samsung_dsim_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct samsung_dsim *dsi;
+ int ret, i;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ init_completion(&dsi->completed);
+ spin_lock_init(&dsi->transfer_lock);
+ INIT_LIST_HEAD(&dsi->transfer_list);
+
+ dsi->dsi_host.ops = &samsung_dsim_ops;
+ dsi->dsi_host.dev = dev;
+
+ dsi->dev = dev;
+ dsi->plat_data = of_device_get_match_data(dev);
+ dsi->driver_data = samsung_dsim_types[dsi->plat_data->hw_type];
+
+ dsi->supplies[0].supply = "vddcore";
+ dsi->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
+ dsi->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
+ sizeof(*dsi->clks), GFP_KERNEL);
+ if (!dsi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < dsi->driver_data->num_clks; i++) {
+ dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
+ if (IS_ERR(dsi->clks[i])) {
+ if (strcmp(clk_names[i], "sclk_mipi") == 0) {
+ dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
+ }
+
+ dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
+ return PTR_ERR(dsi->clks[i]);
+ }
+ }
+
+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dsi->reg_base))
+ return PTR_ERR(dsi->reg_base);
+
+ dsi->phy = devm_phy_optional_get(dev, "dsim");
+ if (IS_ERR(dsi->phy)) {
+ dev_info(dev, "failed to get dsim phy\n");
+ return PTR_ERR(dsi->phy);
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0)
+ return dsi->irq;
+
+ ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
+ samsung_dsim_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(dev), dsi);
+ if (ret) {
+ dev_err(dev, "failed to request dsi irq\n");
+ return ret;
+ }
+
+ ret = samsung_dsim_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dsi);
+
+ pm_runtime_enable(dev);
+
+ dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ /* DE_LOW: i.MX8M Mini/Nano LCDIF-DSIM glue logic inverts HS/VS/DE */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM)
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_low;
+ else
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high;
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host)
+ ret = dsi->plat_data->host_ops->register_host(dsi);
+
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_probe);
+
+int samsung_dsim_remove(struct platform_device *pdev)
+{
+ struct samsung_dsim *dsi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host)
+ dsi->plat_data->host_ops->unregister_host(dsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_remove);
+
+static int __maybe_unused samsung_dsim_suspend(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ samsung_dsim_disable_clock(dsi);
+
+ samsung_dsim_disable_irq(dsi);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ for (i = driver_data->num_clks - 1; i > -1; i--)
+ clk_disable_unprepare(dsi->clks[i]);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int __maybe_unused samsung_dsim_resume(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < driver_data->num_clks; i++) {
+ ret = clk_prepare_enable(dsi->clks[i]);
+ if (ret < 0)
+ goto err_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ while (--i > -1)
+ clk_disable_unprepare(dsi->clks[i]);
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+
+const struct dev_pm_ops samsung_dsim_pm_ops = {
+ SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+EXPORT_SYMBOL_GPL(samsung_dsim_pm_ops);
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mm_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MM,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mp_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MP,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct of_device_id samsung_dsim_of_match[] = {
+ {
+ .compatible = "fsl,imx8mm-mipi-dsim",
+ .data = &samsung_dsim_imx8mm_pdata,
+ },
+ {
+ .compatible = "fsl,imx8mp-mipi-dsim",
+ .data = &samsung_dsim_imx8mp_pdata,
+ },
+ { /* sentinel. */ }
+};
+MODULE_DEVICE_TABLE(of, samsung_dsim_of_match);
+
+static struct platform_driver samsung_dsim_driver = {
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
+ .driver = {
+ .name = "samsung-dsim",
+ .owner = THIS_MODULE,
+ .pm = &samsung_dsim_pm_ops,
+ .of_match_table = samsung_dsim_of_match,
+ },
+};
+
+module_platform_driver(samsung_dsim_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Samsung MIPI DSIM controller bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 878fb7d3732b..ef66461e7f7c 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -171,7 +171,6 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
- struct regulator_bulk_data supplies[2];
bool sink_is_hdmi;
/*
* Mutex protects audio and video functions from interfering
@@ -240,12 +239,12 @@ static void sii902x_reset(struct sii902x *sii902x)
if (!sii902x->reset_gpio)
return;
- gpiod_set_value(sii902x->reset_gpio, 1);
+ gpiod_set_value_cansleep(sii902x->reset_gpio, 1);
/* The datasheet says treset-min = 100us. Make it 150us to be sure. */
usleep_range(150, 200);
- gpiod_set_value(sii902x->reset_gpio, 0);
+ gpiod_set_value_cansleep(sii902x->reset_gpio, 0);
}
static enum drm_connector_status sii902x_detect(struct sii902x *sii902x)
@@ -1066,12 +1065,12 @@ static int sii902x_init(struct sii902x *sii902x)
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
-static int sii902x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii902x_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *endpoint;
struct sii902x *sii902x;
+ static const char * const supplies[] = {"iovcc", "cvcc12"};
int ret;
ret = i2c_check_functionality(client->adapter,
@@ -1117,32 +1116,17 @@ static int sii902x_probe(struct i2c_client *client,
sii902x->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
if (!sii902x->next_bridge)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Failed to find remote bridge\n");
}
mutex_init(&sii902x->mutex);
- sii902x->supplies[0].supply = "iovcc";
- sii902x->supplies[1].supply = "cvcc12";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), supplies);
if (ret < 0)
- return ret;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- if (ret < 0) {
- dev_err_probe(dev, ret, "Failed to enable supplies");
- return ret;
- }
+ return dev_err_probe(dev, ret, "Failed to enable supplies");
- ret = sii902x_init(sii902x);
- if (ret < 0) {
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- }
-
- return ret;
+ return sii902x_init(sii902x);
}
static void sii902x_remove(struct i2c_client *client)
@@ -1152,8 +1136,6 @@ static void sii902x_remove(struct i2c_client *client)
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
}
static const struct of_device_id sii902x_dt_ids[] = {
@@ -1169,7 +1151,7 @@ static const struct i2c_device_id sii902x_i2c_ids[] = {
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
static struct i2c_driver sii902x_driver = {
- .probe = sii902x_probe,
+ .probe_new = sii902x_probe,
.remove = sii902x_remove,
.driver = {
.name = "sii902x",
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 5b3061d4b5c3..2d17f227867b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -867,11 +867,6 @@ static int sii9234_init_resources(struct sii9234 *ctx,
return 0;
}
-static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct sii9234, bridge);
-}
-
static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
@@ -886,8 +881,7 @@ static const struct drm_bridge_funcs sii9234_bridge_funcs = {
.mode_valid = sii9234_mode_valid,
};
-static int sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii9234_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct sii9234 *ctx;
@@ -961,7 +955,7 @@ static struct i2c_driver sii9234_driver = {
.name = "sii9234",
.of_match_table = sii9234_dt_match,
},
- .probe = sii9234_probe,
+ .probe_new = sii9234_probe,
.remove = sii9234_remove,
.id_table = sii9234_id,
};
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 511982a1cedb..b96d03cd878d 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2284,8 +2284,7 @@ static const struct drm_bridge_funcs sii8620_bridge_funcs = {
.mode_valid = sii8620_mode_valid,
};
-static int sii8620_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii8620_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct sii8620 *ctx;
@@ -2379,7 +2378,7 @@ static struct i2c_driver sii8620_driver = {
.name = "sii8620",
.of_match_table = of_match_ptr(sii8620_dt_match),
},
- .probe = sii8620_probe,
+ .probe_new = sii8620_probe,
.remove = sii8620_remove,
.id_table = sii8620_id,
};
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 2c5c5211bdab..d85d9ee463b8 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -202,11 +202,9 @@ static int simple_bridge_probe(struct platform_device *pdev)
sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(sbridge->enable)) {
- if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
- return PTR_ERR(sbridge->enable);
- }
+ if (IS_ERR(sbridge->enable))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sbridge->enable),
+ "Unable to retrieve enable GPIO\n");
/* Register the bridge. */
sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
@@ -218,13 +216,11 @@ static int simple_bridge_probe(struct platform_device *pdev)
return 0;
}
-static int simple_bridge_remove(struct platform_device *pdev)
+static void simple_bridge_remove(struct platform_device *pdev)
{
struct simple_bridge *sbridge = platform_get_drvdata(pdev);
drm_bridge_remove(&sbridge->bridge);
-
- return 0;
}
/*
@@ -301,7 +297,7 @@ MODULE_DEVICE_TABLE(of, simple_bridge_match);
static struct platform_driver simple_bridge_driver = {
.probe = simple_bridge_probe,
- .remove = simple_bridge_remove,
+ .remove_new = simple_bridge_remove,
.driver = {
.name = "simple-bridge",
.of_match_table = simple_bridge_match,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 4efb62bcdb63..67b8d17a722a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -584,13 +584,11 @@ err:
return ret;
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
snd_card_free(dw->card);
-
- return 0;
}
#if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN)
@@ -625,7 +623,7 @@ static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.pm = PM_OPS,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index c8f44bcb298a..9389ce526eb1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -296,19 +296,17 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
return 0;
}
-static int dw_hdmi_cec_remove(struct platform_device *pdev)
+static void dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
-
- return 0;
}
static struct platform_driver dw_hdmi_cec_driver = {
.probe = dw_hdmi_cec_probe,
- .remove = dw_hdmi_cec_remove,
+ .remove_new = dw_hdmi_cec_remove,
.driver = {
.name = "dw-hdmi-cec",
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index 557966239677..423762da2ab4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -172,18 +172,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(dw->audio_pdev);
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
platform_device_unregister(dw->audio_pdev);
-
- return 0;
}
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index a2f0860b20bb..26c187d20d97 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -193,6 +193,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
struct hdmi_codec_pdata pdata;
struct platform_device *platform;
+ memset(&pdata, 0, sizeof(pdata));
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
pdata.max_i2s_channels = 8;
@@ -215,18 +216,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct platform_device *platform = dev_get_drvdata(&pdev->dev);
platform_device_unregister(platform);
-
- return 0;
}
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index aa51c61a78c7..603bb3c51027 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1426,9 +1426,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
if (dw_hdmi_support_scdc(hdmi, display)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
- drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
+ drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
else
- drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 0);
+ drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
}
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
@@ -2116,7 +2116,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */
- drm_scdc_set_scrambling(hdmi->ddc, 1);
+ drm_scdc_set_scrambling(&hdmi->connector, 1);
/*
* To activate the scrambler feature, you must ensure
@@ -2132,7 +2132,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ);
- drm_scdc_set_scrambling(hdmi->ddc, 0);
+ drm_scdc_set_scrambling(&hdmi->connector, 0);
}
}
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 0b6a28436885..77f7f7f54757 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -229,6 +229,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 53259c12d777..f85654f1b104 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -369,6 +369,7 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 2a58eb271f70..91f7cb56a654 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1264,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
u32 value;
int ret;
- regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 25);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
@@ -1896,10 +1896,10 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
"failed to create dsi device\n");
tc->dsi = dsi;
-
dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
@@ -2029,7 +2029,7 @@ static void tc_clk_disable(void *data)
clk_disable_unprepare(refclk);
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -2209,7 +2209,7 @@ static struct i2c_driver tc358767_driver = {
.of_match_table = tc358767_of_ids,
},
.id_table = tc358767_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358767_driver);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 4c4b77ce8aba..7c0cbe84611b 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1018,8 +1017,7 @@ static int tc358768_get_regulators(struct tc358768_priv *priv)
return ret;
}
-static int tc358768_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tc358768_i2c_probe(struct i2c_client *client)
{
struct tc358768_priv *priv;
struct device *dev = &client->dev;
@@ -1085,7 +1083,7 @@ static struct i2c_driver tc358768_driver = {
.of_match_table = tc358768_of_ids,
},
.id_table = tc358768_i2c_ids,
- .probe = tc358768_i2c_probe,
+ .probe_new = tc358768_i2c_probe,
.remove = tc358768_i2c_remove,
};
module_i2c_driver(tc358768_driver);
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 3ceb0e9f9bdc..19316994ddd1 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -23,7 +23,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -637,7 +636,7 @@ static int tc_attach_host(struct tc_data *tc)
return 0;
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -729,7 +728,7 @@ static struct i2c_driver tc358775_driver = {
.of_match_table = tc358775_of_ids,
},
.id_table = tc358775_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358775_driver);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index e21078b2f8b5..d4c1a601bbb5 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -230,13 +230,11 @@ static int thc63_probe(struct platform_device *pdev)
return 0;
}
-static int thc63_remove(struct platform_device *pdev)
+static void thc63_remove(struct platform_device *pdev)
{
struct thc63_dev *thc63 = platform_get_drvdata(pdev);
drm_bridge_remove(&thc63->bridge);
-
- return 0;
}
static const struct of_device_id thc63_match[] = {
@@ -247,7 +245,7 @@ MODULE_DEVICE_TABLE(of, thc63_match);
static struct platform_driver thc63_driver = {
.probe = thc63_probe,
- .remove = thc63_remove,
+ .remove_new = thc63_remove,
.driver = {
.name = "thc63lvd1024",
.of_match_table = thc63_match,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 7ba9467fff12..75286c9afbb9 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -346,7 +346,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
/* Deassert reset */
gpiod_set_value_cansleep(ctx->enable_gpio, 1);
- usleep_range(1000, 1100);
+ usleep_range(10000, 11000);
/* Get the LVDS format from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -642,7 +642,9 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
@@ -653,9 +655,9 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
-static int sn65dsi83_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sn65dsi83_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
enum sn65dsi83_model model;
struct sn65dsi83 *ctx;
@@ -698,8 +700,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
drm_bridge_add(&ctx->bridge);
ret = sn65dsi83_host_attach(ctx);
- if (ret)
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to attach DSI host\n");
goto err_remove_bridge;
+ }
return 0;
@@ -730,7 +734,7 @@ static const struct of_device_id sn65dsi83_match_table[] = {
MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
static struct i2c_driver sn65dsi83_driver = {
- .probe = sn65dsi83_probe,
+ .probe_new = sn65dsi83_probe,
.remove = sn65dsi83_remove,
.id_table = sn65dsi83_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 05f8756d1aaf..7a748785c545 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -363,7 +363,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
/* td2: min 100 us after regulators before enabling the GPIO */
usleep_range(100, 110);
- gpiod_set_value(pdata->enable_gpio, 1);
+ gpiod_set_value_cansleep(pdata->enable_gpio, 1);
/*
* If we have a reference clock we can enable communication w/ the
@@ -386,7 +386,7 @@ static int __maybe_unused ti_sn65dsi86_suspend(struct device *dev)
if (pdata->refclk)
ti_sn65dsi86_disable_comms(pdata);
- gpiod_set_value(pdata->enable_gpio, 0);
+ gpiod_set_value_cansleep(pdata->enable_gpio, 0);
ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
if (ret)
@@ -1852,8 +1852,7 @@ static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata)
pdata->supplies);
}
-static int ti_sn65dsi86_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
@@ -1952,7 +1951,7 @@ static struct i2c_driver ti_sn65dsi86_driver = {
.of_match_table = ti_sn65dsi86_match_table,
.pm = &ti_sn65dsi86_pm_ops,
},
- .probe = ti_sn65dsi86_probe,
+ .probe_new = ti_sn65dsi86_probe,
.id_table = ti_sn65dsi86_id,
};
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index b9635abbad16..ab63225cd635 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -355,11 +355,9 @@ static int tfp410_probe(struct platform_device *pdev)
return tfp410_init(&pdev->dev, false);
}
-static int tfp410_remove(struct platform_device *pdev)
+static void tfp410_remove(struct platform_device *pdev)
{
tfp410_fini(&pdev->dev);
-
- return 0;
}
static const struct of_device_id tfp410_match[] = {
@@ -370,7 +368,7 @@ MODULE_DEVICE_TABLE(of, tfp410_match);
static struct platform_driver tfp410_platform_driver = {
.probe = tfp410_probe,
- .remove = tfp410_remove,
+ .remove_new = tfp410_remove,
.driver = {
.name = "tfp410-bridge",
.of_match_table = tfp410_match,
@@ -379,8 +377,7 @@ static struct platform_driver tfp410_platform_driver = {
#if IS_ENABLED(CONFIG_I2C)
/* There is currently no i2c functionality. */
-static int tfp410_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tfp410_i2c_probe(struct i2c_client *client)
{
int reg;
@@ -411,7 +408,7 @@ static struct i2c_driver tfp410_i2c_driver = {
.of_match_table = of_match_ptr(tfp410_match),
},
.id_table = tfp410_i2c_ids,
- .probe = tfp410_i2c_probe,
+ .probe_new = tfp410_i2c_probe,
.remove = tfp410_i2c_remove,
};
#endif /* IS_ENABLED(CONFIG_I2C) */
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index f5741b45ca07..8a165be1a821 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -161,9 +161,14 @@ static void dp_aux_ep_dev_release(struct device *dev)
kfree(aux_ep_with_data);
}
+static int dp_aux_ep_dev_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
static struct device_type dp_aux_device_type_type = {
.groups = dp_aux_ep_dev_groups,
- .uevent = of_device_uevent_modalias,
+ .uevent = dp_aux_ep_dev_modalias,
.release = dp_aux_ep_dev_release,
};
diff --git a/drivers/gpu/drm/display/drm_dp_aux_dev.c b/drivers/gpu/drm/display/drm_dp_aux_dev.c
index 098e482e65a2..29555b9f03c8 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_dev.c
@@ -330,7 +330,7 @@ int drm_dp_aux_dev_init(void)
{
int res;
- drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
+ drm_dp_aux_dev_class = class_create("drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
return PTR_ERR(drm_dp_aux_dev_class);
}
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 51a46689cda7..38dab76ae69e 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
- if (!port)
+ if (!port) {
+ drm_dbg_kms(mgr->dev,
+ "VCPI %d for port %p not in topology, not creating a payload\n",
+ payload->vcpi, payload->port);
+ payload->vc_start_slot = -1;
return 0;
+ }
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
@@ -3337,7 +3342,8 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
* drm_dp_remove_payload() - Remove an MST payload
* @mgr: Manager to use.
* @mst_state: The MST atomic state
- * @payload: The payload to write
+ * @old_payload: The payload with its old state
+ * @new_payload: The payload to write
*
* Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
* the starting time slots of all other payloads which would have been shifted towards the start of
@@ -3345,33 +3351,37 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
*/
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
- struct drm_dp_mst_atomic_payload *payload)
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload)
{
struct drm_dp_mst_atomic_payload *pos;
bool send_remove = false;
/* We failed to make the payload, so nothing to do */
- if (payload->vc_start_slot == -1)
+ if (new_payload->vc_start_slot == -1)
return;
mutex_lock(&mgr->lock);
- send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
+ send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
mutex_unlock(&mgr->lock);
if (send_remove)
- drm_dp_destroy_payload_step1(mgr, mst_state, payload);
+ drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
else
drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
- payload->vcpi);
+ new_payload->vcpi);
list_for_each_entry(pos, &mst_state->payloads, next) {
- if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
- pos->vc_start_slot -= payload->time_slots;
+ if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
+ pos->vc_start_slot -= old_payload->time_slots;
}
- payload->vc_start_slot = -1;
+ new_payload->vc_start_slot = -1;
mgr->payload_count--;
- mgr->next_start_slot -= payload->time_slots;
+ mgr->next_start_slot -= old_payload->time_slots;
+
+ if (new_payload->delete)
+ drm_dp_mst_put_port_malloc(new_payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
@@ -3641,6 +3651,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
+
+ memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
+ memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
}
out_unlock:
@@ -3853,7 +3866,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
- goto out;
+ goto out_clear_reply;
/* Multi-packet message transmission, don't clear the reply */
if (!msg->have_eomt)
@@ -4327,7 +4340,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
if (!payload->delete) {
- drm_dp_mst_put_port_malloc(port);
payload->pbn = 0;
payload->delete = true;
topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
@@ -5353,27 +5365,52 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
+ * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object.
+ *
+ * Returns:
+ *
+ * The old MST topology state, or NULL if there's no topology state for this MST mgr
+ * in the global atomic state
+ */
+struct drm_dp_mst_topology_state *
+drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_private_state *old_priv_state =
+ drm_atomic_get_old_private_obj_state(state, &mgr->base);
+
+ return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
+
+/**
* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
- * The MST topology state, or NULL if there's no topology state for this MST mgr
+ * The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_private_state *priv_state =
+ struct drm_private_state *new_priv_state =
drm_atomic_get_new_private_obj_state(state, &mgr->base);
- return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
+ return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 0264abe55278..faf5e9efa7d3 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf)) {
- DRM_DEBUG_KMS("EOTF Not Supported\n");
- return -EINVAL;
- }
+ connector->hdr_sink_metadata.hdmi_type1.eotf))
+ DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
if (err < 0)
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index c3ad4ab2b456..6d2f244e5830 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -26,6 +26,8 @@
#include <linux/delay.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
#include <drm/drm_print.h>
/**
@@ -140,7 +142,7 @@ EXPORT_SYMBOL(drm_scdc_write);
/**
* drm_scdc_get_scrambling_status - what is status of scrambling?
- * @adapter: I2C adapter for DDC channel
+ * @connector: connector
*
* Reads the scrambler status over SCDC, and checks the
* scrambling status.
@@ -148,14 +150,16 @@ EXPORT_SYMBOL(drm_scdc_write);
* Returns:
* True if the scrambling is enabled, false otherwise.
*/
-bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
+bool drm_scdc_get_scrambling_status(struct drm_connector *connector)
{
u8 status;
int ret;
- ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
+ ret = drm_scdc_readb(connector->ddc, SCDC_SCRAMBLER_STATUS, &status);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to read scrambling status: %d\n", ret);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to read scrambling status: %d\n",
+ connector->base.id, connector->name, ret);
return false;
}
@@ -165,7 +169,7 @@ EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
/**
* drm_scdc_set_scrambling - enable scrambling
- * @adapter: I2C adapter for DDC channel
+ * @connector: connector
* @enable: bool to indicate if scrambling is to be enabled/disabled
*
* Writes the TMDS config register over SCDC channel, and:
@@ -175,14 +179,17 @@ EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
* Returns:
* True if scrambling is set/reset successfully, false otherwise.
*/
-bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
+bool drm_scdc_set_scrambling(struct drm_connector *connector,
+ bool enable)
{
u8 config;
int ret;
- ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
+ ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ connector->base.id, connector->name, ret);
return false;
}
@@ -191,9 +198,11 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
else
config &= ~SCDC_SCRAMBLING_ENABLE;
- ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
+ ret = drm_scdc_writeb(connector->ddc, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to enable scrambling: %d\n", ret);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to enable scrambling: %d\n",
+ connector->base.id, connector->name, ret);
return false;
}
@@ -203,7 +212,7 @@ EXPORT_SYMBOL(drm_scdc_set_scrambling);
/**
* drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio
- * @adapter: I2C adapter for DDC channel
+ * @connector: connector
* @set: ret or reset the high clock ratio
*
*
@@ -230,14 +239,17 @@ EXPORT_SYMBOL(drm_scdc_set_scrambling);
* Returns:
* True if write is successful, false otherwise.
*/
-bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
+bool drm_scdc_set_high_tmds_clock_ratio(struct drm_connector *connector,
+ bool set)
{
u8 config;
int ret;
- ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
+ ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ connector->base.id, connector->name, ret);
return false;
}
@@ -246,9 +258,11 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
else
config &= ~SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
- ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
+ ret = drm_scdc_writeb(connector->ddc, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to set TMDS clock ratio: %d\n", ret);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to set TMDS clock ratio: %d\n",
+ connector->base.id, connector->name, ret);
return false;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f197f59f6d99..b4c6ffc438da 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -902,7 +902,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -985,6 +985,66 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
+ * @state: Atomic state
+ * @encoder: The encoder to fetch the crtc state for
+ *
+ * This function finds and returns the crtc that was connected to @encoder
+ * as specified by the @state.
+ *
+ * Returns: The old crtc connected to @encoder, or NULL if the encoder is
+ * not connected.
+ */
+struct drm_crtc *
+drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_old_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_crtc_for_encoder);
+
+/**
+ * drm_atomic_get_new_crtc_for_encoder - Get new crtc for an encoder
+ * @state: Atomic state
+ * @encoder: The encoder to fetch the crtc state for
+ *
+ * This function finds and returns the crtc that will be connected to @encoder
+ * as specified by the @state.
+ *
+ * Returns: The new crtc connected to @encoder, or NULL if the encoder is
+ * not connected.
+ */
+struct drm_crtc *
+drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -1070,6 +1130,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
+ drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
@@ -1117,7 +1178,7 @@ EXPORT_SYMBOL(drm_atomic_get_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1139,7 +1200,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1756,8 +1817,8 @@ EXPORT_SYMBOL(drm_state_dump);
#ifdef CONFIG_DEBUG_FS
static int drm_state_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
__drm_state_dump(dev, &p, true);
@@ -1766,14 +1827,13 @@ static int drm_state_info(struct seq_file *m, void *data)
}
/* any use in debugfs files to dump individual planes/crtc/etc? */
-static const struct drm_info_list drm_atomic_debugfs_list[] = {
+static const struct drm_debugfs_info drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d579fd8f7cb8..2c2c9caf0be5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1511,6 +1511,47 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
+/*
+ * For atomic updates which touch just a single CRTC, calculate the time of the
+ * next vblank, and inform all the fences of the deadline.
+ */
+static void set_fence_deadline(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ ktime_t vbltime = 0;
+ int i;
+
+ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ ktime_t v;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!new_crtc_state->active)
+ continue;
+
+ if (drm_crtc_next_vblank_start(crtc, &v))
+ continue;
+
+ if (!vbltime || ktime_before(v, vbltime))
+ vbltime = v;
+ }
+
+ /* If no CRTCs updated, then nothing to do: */
+ if (!vbltime)
+ return;
+
+ for_each_new_plane_in_state (state, plane, new_plane_state, i) {
+ if (!new_plane_state->fence)
+ continue;
+ dma_fence_set_deadline(new_plane_state->fence, vbltime);
+ }
+}
+
/**
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
@@ -1540,6 +1581,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_plane_state *new_plane_state;
int i, ret;
+ set_fence_deadline(dev, state);
+
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
if (!new_plane_state->fence)
continue;
@@ -2702,6 +2745,11 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_state);
+
+ if (!disabling && funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ funcs->atomic_enable(plane, old_state);
+ }
}
}
@@ -2762,6 +2810,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
struct drm_plane_state *new_plane_state =
drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
+ bool disabling;
plane_funcs = plane->helper_private;
@@ -2771,12 +2820,18 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
WARN_ON(new_plane_state->crtc &&
new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
- plane_funcs->atomic_disable)
+ disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
+
+ if (disabling && plane_funcs->atomic_disable) {
plane_funcs->atomic_disable(plane, old_state);
- else if (new_plane_state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, new_plane_state))
+ } else if (new_plane_state->crtc || disabling) {
plane_funcs->atomic_update(plane, old_state);
+
+ if (!disabling && plane_funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ plane_funcs->atomic_enable(plane, old_state);
+ }
+ }
}
if (crtc_funcs && crtc_funcs->atomic_flush)
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index dfb57217253b..784e63d70a42 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -482,6 +482,130 @@ void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connecto
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
+ * drm_atomic_helper_connector_tv_reset - Resets Analog TV connector properties
+ * @connector: DRM connector
+ *
+ * Resets the analog TV properties attached to a connector
+ */
+void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ struct drm_connector_state *state = connector->state;
+ struct drm_property *prop;
+ uint64_t val;
+
+ prop = dev->mode_config.tv_mode_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.mode = val;
+
+ if (cmdline->tv_mode_specified)
+ state->tv.mode = cmdline->tv_mode;
+
+ prop = dev->mode_config.tv_select_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.select_subconnector = val;
+
+ prop = dev->mode_config.tv_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.subconnector = val;
+
+ prop = dev->mode_config.tv_brightness_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.brightness = val;
+
+ prop = dev->mode_config.tv_contrast_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.contrast = val;
+
+ prop = dev->mode_config.tv_flicker_reduction_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.flicker_reduction = val;
+
+ prop = dev->mode_config.tv_overscan_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.overscan = val;
+
+ prop = dev->mode_config.tv_saturation_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.saturation = val;
+
+ prop = dev->mode_config.tv_hue_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.hue = val;
+
+ drm_atomic_helper_connector_tv_margins_reset(connector);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
+
+/**
+ * drm_atomic_helper_connector_tv_check - Validate an analog TV connector state
+ * @connector: DRM Connector
+ * @state: the DRM State object
+ *
+ * Checks the state object to see if the requested state is valid for an
+ * analog TV connector.
+ *
+ * Return:
+ * %0 for success, a negative error code on error.
+ */
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+
+ crtc = new_conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ return -EINVAL;
+
+ if (old_conn_state->tv.mode != new_conn_state->tv.mode)
+ crtc_state->mode_changed = true;
+
+ if (old_conn_state->tv.margins.left != new_conn_state->tv.margins.left ||
+ old_conn_state->tv.margins.right != new_conn_state->tv.margins.right ||
+ old_conn_state->tv.margins.top != new_conn_state->tv.margins.top ||
+ old_conn_state->tv.margins.bottom != new_conn_state->tv.margins.bottom ||
+ old_conn_state->tv.mode != new_conn_state->tv.mode ||
+ old_conn_state->tv.brightness != new_conn_state->tv.brightness ||
+ old_conn_state->tv.contrast != new_conn_state->tv.contrast ||
+ old_conn_state->tv.flicker_reduction != new_conn_state->tv.flicker_reduction ||
+ old_conn_state->tv.overscan != new_conn_state->tv.overscan ||
+ old_conn_state->tv.saturation != new_conn_state->tv.saturation ||
+ old_conn_state->tv.hue != new_conn_state->tv.hue)
+ crtc_state->connectors_changed = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_check);
+
+/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
* @connector: connector object
* @state: atomic connector state
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c06d0639d552..d867e7f9f2cd 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -698,6 +698,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->tv.margins.top = val;
} else if (property == config->tv_bottom_margin_property) {
state->tv.margins.bottom = val;
+ } else if (property == config->legacy_tv_mode_property) {
+ state->tv.legacy_mode = val;
} else if (property == config->tv_mode_property) {
state->tv.mode = val;
} else if (property == config->tv_brightness_property) {
@@ -808,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->tv.margins.top;
} else if (property == config->tv_bottom_margin_property) {
*val = state->tv.margins.bottom;
+ } else if (property == config->legacy_tv_mode_property) {
+ *val = state->tv.legacy_mode;
} else if (property == config->tv_mode_property) {
*val = state->tv.mode;
} else if (property == config->tv_brightness_property) {
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index b4c8cab7158c..6e74de833466 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -450,8 +450,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
int i, n = 0;
int ret = 0;
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
@@ -469,9 +469,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
goto done;
}
states[n++] = plane_state;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n",
- plane->base.id, plane->name,
- plane_state->zpos);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] processing zpos value %d\n",
+ plane->base.id, plane->name, plane_state->zpos);
}
sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
@@ -480,8 +479,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
plane = states[i]->plane;
states[i]->normalized_zpos = i;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n",
- plane->base.id, plane->name, i);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] normalized zpos value %d\n",
+ plane->base.id, plane->name, i);
}
crtc_state->zpos_changed = true;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 1545c50fd1c8..c3d69af02e79 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -153,6 +153,45 @@
* situation when probing.
*/
+/**
+ * DOC: dsi bridge operations
+ *
+ * DSI host interfaces are expected to be implemented as bridges rather than
+ * encoders, however there are a few aspects of their operation that need to
+ * be defined in order to provide a consistent interface.
+ *
+ * A DSI host should keep the PHY powered down until the pre_enable operation is
+ * called. All lanes are in an undefined idle state up to this point, and it
+ * must not be assumed that it is LP-11.
+ * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
+ * clock lane to either LP-11 or HS depending on the mode_flag
+ * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
+ *
+ * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
+ * called before the DSI host. If the DSI peripheral requires LP-11 and/or
+ * the clock lane to be in HS mode prior to pre_enable, then it can set the
+ * &pre_enable_prev_first flag to request the pre_enable (and
+ * post_disable) order to be altered to enable the DSI host first.
+ *
+ * Either the CRTC being enabled, or the DSI host enable operation should switch
+ * the host to actively transmitting video on the data lanes.
+ *
+ * The reverse also applies. The DSI host disable operation or stopping the CRTC
+ * should stop transmitting video, and the data lanes should return to the LP-11
+ * state. The DSI host &post_disable operation should disable the PHY.
+ * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
+ * bridge &post_disable will be called before the DSI host's post_disable.
+ *
+ * Whilst it is valid to call &host_transfer prior to pre_enable or after
+ * post_disable, the exact state of the lanes is undefined at this point. The
+ * DSI host should initialise the interface, transmit the data, and then disable
+ * the interface again.
+ *
+ * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
+ * implemented, it therefore needs to be handled entirely within the DSI Host
+ * driver.
+ */
+
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
@@ -510,61 +549,6 @@ drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_chain_disable - disables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called before
- * calling the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->disable)
- iter->funcs->disable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_disable);
-
-/**
- * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.post_disable op for all the bridges in the
- * encoder chain, starting from the first bridge to the last. These are called
- * after completing the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_post_disable);
-
-/**
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
@@ -594,61 +578,6 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called
- * before calling the encoder's commit op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
-
-/**
- * drm_bridge_chain_enable - enables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
- * chain, starting from the first bridge to the last. These are called
- * after completing the encoder's commit op.
- *
- * Note that the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_enable);
-
-/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
@@ -691,6 +620,25 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
+static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
@@ -702,36 +650,86 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
* starting from the first bridge to the last. These are called after completing
* &drm_encoder_helper_funcs.atomic_disable
*
+ * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
+ * bridge will be called before the previous one to reverse the @pre_enable
+ * calling direction.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
+ struct drm_bridge *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
+ limit = NULL;
+
+ if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
+ next = list_next_entry(bridge, chain_node);
+
+ if (next->pre_enable_prev_first) {
+ /* next bridge had requested that prev
+ * was enabled first, so disabled last
+ */
+ limit = next;
+
+ /* Find the next bridge that has NOT requested
+ * prev to be enabled first / disabled last
+ */
+ list_for_each_entry_from(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next->pre_enable_prev_first) {
+ next = list_prev_entry(next, chain_node);
+ limit = next;
+ break;
+ }
+ }
+
+ /* Call these bridges in reverse order */
+ list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ drm_atomic_bridge_call_post_disable(next,
+ old_state);
+ }
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_post_disable(bridge, old_state);
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
- bridge->funcs->post_disable(bridge);
- }
+ if (limit)
+ /* Jump all bridges that we have already post_disabled */
+ bridge = limit;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
+static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->pre_enable) {
+ bridge->funcs->pre_enable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
@@ -743,32 +741,60 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_enable
*
+ * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
+ * prev bridge will be called before pre_enable of this bridge.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
- struct drm_bridge *iter;
+ struct drm_bridge *iter, *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
+ if (iter->pre_enable_prev_first) {
+ next = iter;
+ limit = bridge;
+ list_for_each_entry_from_reverse(next,
+ &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ if (!next->pre_enable_prev_first) {
+ /* Found first bridge that does NOT
+ * request prev to be enabled first
+ */
+ limit = list_prev_entry(next, chain_node);
+ break;
+ }
+ }
+
+ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
+ /* Call requested prev bridge pre_enable
+ * in order.
+ */
+ if (next == iter)
+ /* At the first bridge to request prev
+ * bridges called first.
+ */
+ break;
+
+ drm_atomic_bridge_call_pre_enable(next, old_state);
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_pre_enable(iter, old_state);
- iter->funcs->atomic_pre_enable(iter, old_bridge_state);
- } else if (iter->funcs->pre_enable) {
- iter->funcs->pre_enable(iter);
- }
+ if (iter->pre_enable_prev_first)
+ /* Jump all bridges that we have already pre_enabled */
+ iter = limit;
if (iter == bridge)
break;
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 1c7d936523df..19ae4a177ac3 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -128,14 +128,7 @@ static void drm_bridge_connector_hpd_cb(void *cb_data,
drm_kms_helper_hotplug_event(dev);
}
-/**
- * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
- * @connector: The DRM bridge connector
- *
- * This function enables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their resume handler.
- */
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -145,17 +138,8 @@ void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
bridge_connector);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
-/**
- * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
- * connector
- * @connector: The DRM bridge connector
- *
- * This function disables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their suspend handler.
- */
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -164,7 +148,6 @@ void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
if (hpd)
drm_bridge_hpd_disable(hpd);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
/* -----------------------------------------------------------------------------
* Bridge Connector Functions
@@ -305,6 +288,8 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
/* No need for .mode_valid(), the bridges are checked by the core. */
+ .enable_hpd = drm_bridge_connector_enable_hpd,
+ .disable_hpd = drm_bridge_connector_disable_hpd,
};
/* -----------------------------------------------------------------------------
@@ -387,10 +372,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc);
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
- if (bridge_connector->bridge_hpd) {
+ if (bridge_connector->bridge_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_bridge_connector_enable_hpd(connector);
- }
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 11bb59399471..7098f125b54a 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
+static void list_insert_sorted(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *node;
+ struct list_head *head;
+
+ head = &mm->free_list[drm_buddy_block_order(block)];
+ if (list_empty(head)) {
+ list_add(&block->link, head);
+ return;
+ }
+
+ list_for_each_entry(node, head, link)
+ if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+ break;
+
+ __list_add(&block->link, node->link.prev, &node->link);
+}
+
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_add(&block->link,
- &mm->free_list[drm_buddy_block_order(block)]);
+ list_insert_sorted(mm, block);
}
static void mark_split(struct drm_buddy_block *block)
@@ -128,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
unsigned int order;
u64 root_size;
- root_size = rounddown_pow_of_two(size);
- order = ilog2(root_size) - ilog2(chunk_size);
+ order = ilog2(size) - ilog2(chunk_size);
+ root_size = chunk_size << order;
root = drm_block_alloc(mm, NULL, order, offset);
if (!root)
@@ -387,20 +405,26 @@ err_undo:
}
static struct drm_buddy_block *
-get_maxblock(struct list_head *head)
+get_maxblock(struct drm_buddy *mm, unsigned int order)
{
struct drm_buddy_block *max_block = NULL, *node;
+ unsigned int i;
- max_block = list_first_entry_or_null(head,
- struct drm_buddy_block,
- link);
- if (!max_block)
- return NULL;
+ for (i = order; i <= mm->max_order; ++i) {
+ if (!list_empty(&mm->free_list[i])) {
+ node = list_last_entry(&mm->free_list[i],
+ struct drm_buddy_block,
+ link);
+ if (!max_block) {
+ max_block = node;
+ continue;
+ }
- list_for_each_entry(node, head, link) {
- if (drm_buddy_block_offset(node) >
- drm_buddy_block_offset(max_block))
- max_block = node;
+ if (drm_buddy_block_offset(node) >
+ drm_buddy_block_offset(max_block)) {
+ max_block = node;
+ }
+ }
}
return max_block;
@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
- unsigned int i;
+ unsigned int tmp;
int err;
- for (i = order; i <= mm->max_order; ++i) {
- if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(&mm->free_list[i]);
- if (block)
- break;
- } else {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (block)
- break;
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+ block = get_maxblock(mm, order);
+ if (block)
+ /* Store the obtained block order */
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ if (!list_empty(&mm->free_list[tmp])) {
+ block = list_last_entry(&mm->free_list[tmp],
+ struct drm_buddy_block,
+ link);
+ if (block)
+ break;
+ }
}
}
@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
BUG_ON(!drm_buddy_block_is_free(block));
- while (i != order) {
+ while (tmp != order) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
block = block->right;
- i--;
+ tmp--;
}
return block;
err_undo:
- if (i != order)
+ if (tmp != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index fcca21e8efac..86700560fea2 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -423,8 +423,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -469,8 +468,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int idx;
int i;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
idx = map->offset;
@@ -570,8 +568,7 @@ EXPORT_SYMBOL(drm_legacy_rmmap_locked);
void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -628,8 +625,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *r_list;
int ret;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..f6292ba0e6fc 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -198,13 +198,23 @@ void drm_client_dev_hotplug(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ if (!dev->mode_config.num_connector) {
+ drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
+ return;
+ }
+
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->hotplug)
continue;
+ if (client->hotplug_failed)
+ continue;
+
ret = client->funcs->hotplug(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
}
mutex_unlock(&dev->clientlist_mutex);
}
@@ -233,21 +243,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- struct drm_device *dev = buffer->client->dev;
-
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
- if (buffer->handle)
- drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
kfree(buffer);
}
static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +275,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
- buffer->handle = dumb_args.handle;
- buffer->pitch = dumb_args.pitch;
-
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
+ buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
+ *handle = dumb_args.handle;
return buffer;
@@ -365,7 +370,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format)
+ u32 width, u32 height, u32 format,
+ u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +383,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
- fb_req.handle = buffer->handle;
+ fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +420,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
+ u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ &handle);
if (IS_ERR(buffer))
return buffer;
- ret = drm_client_buffer_addfb(buffer, width, height, format);
+ ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+ /*
+ * The handle is only needed for creating the framebuffer, destroy it
+ * again to solve a circular dependency should anybody export the GEM
+ * object as DMA-buf. The framebuffer and our buffer structure are still
+ * holding references to the GEM object to prevent its destruction.
+ */
+ drm_mode_destroy_dumb(client->dev, handle, client->file);
+
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
@@ -480,8 +497,8 @@ EXPORT_SYMBOL(drm_client_framebuffer_flush);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
@@ -493,14 +510,13 @@ static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_client_debugfs_list[] = {
+static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index d553e793e673..1b12a3c201a3 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -188,10 +188,6 @@ static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_conne
prefer_non_interlace = !cmdline_mode->interlace;
again:
list_for_each_entry(mode, &connector->modes, head) {
- /* Check (optional) mode name first */
- if (!strcmp(mode->name, cmdline_mode->name))
- return mode;
-
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 547356e00341..48df7a5ea503 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,9 +33,11 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
-#include <linux/fb.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
+#include <video/cmdline.h>
+
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -154,9 +156,10 @@ EXPORT_SYMBOL(drm_get_connector_type_name);
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
- char *option = NULL;
+ const char *option;
- if (fb_get_options(connector->name, &option))
+ option = video_get_options(connector->name);
+ if (!option)
return;
if (!drm_mode_parse_command_line_for_connector(option,
@@ -565,6 +568,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
ida_free(&dev->mode_config.connector_ida, connector->index);
kfree(connector->display_info.bus_formats);
+ kfree(connector->display_info.vics);
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
@@ -984,6 +988,41 @@ static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
+static const struct drm_prop_enum_list drm_tv_mode_enum_list[] = {
+ { DRM_MODE_TV_MODE_NTSC, "NTSC" },
+ { DRM_MODE_TV_MODE_NTSC_443, "NTSC-443" },
+ { DRM_MODE_TV_MODE_NTSC_J, "NTSC-J" },
+ { DRM_MODE_TV_MODE_PAL, "PAL" },
+ { DRM_MODE_TV_MODE_PAL_M, "PAL-M" },
+ { DRM_MODE_TV_MODE_PAL_N, "PAL-N" },
+ { DRM_MODE_TV_MODE_SECAM, "SECAM" },
+};
+DRM_ENUM_NAME_FN(drm_get_tv_mode_name, drm_tv_mode_enum_list)
+
+/**
+ * drm_get_tv_mode_from_name - Translates a TV mode name into its enum value
+ * @name: TV Mode name we want to convert
+ * @len: Length of @name
+ *
+ * Translates @name into an enum drm_connector_tv_mode.
+ *
+ * Returns: the enum value on success, a negative errno otherwise.
+ */
+int drm_get_tv_mode_from_name(const char *name, size_t len)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_tv_mode_enum_list); i++) {
+ const struct drm_prop_enum_list *item = &drm_tv_mode_enum_list[i];
+
+ if (strlen(item->name) == len && !strncmp(item->name, name, len))
+ return item->type;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_get_tv_mode_from_name);
+
static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -1410,6 +1449,20 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* a firmware handled hotkey. Therefor userspace must not include the
* privacy-screen sw-state in an atomic commit unless it wants to change
* its value.
+ *
+ * left margin, right margin, top margin, bottom margin:
+ * Add margins to the connector's viewport. This is typically used to
+ * mitigate overscan on TVs.
+ *
+ * The value is the size in pixels of the black border which will be
+ * added. The attached CRTC's content will be scaled to fill the whole
+ * area inside the margin.
+ *
+ * The margins configuration might be sent to the sink, e.g. via HDMI AVI
+ * InfoFrames.
+ *
+ * Drivers can set up these properties by calling
+ * drm_mode_create_tv_margin_properties().
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1552,6 +1605,66 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* infoframe values is done through drm_hdmi_avi_infoframe_content_type().
*/
+/*
+ * TODO: Document the properties:
+ * - brightness
+ * - contrast
+ * - flicker reduction
+ * - hue
+ * - mode
+ * - overscan
+ * - saturation
+ * - select subconnector
+ */
+/**
+ * DOC: Analog TV Connector Properties
+ *
+ * TV Mode:
+ * Indicates the TV Mode used on an analog TV connector. The value
+ * of this property can be one of the following:
+ *
+ * NTSC:
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding.
+ *
+ * NTSC-443:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a color subcarrier
+ * frequency of 4.43MHz
+ *
+ * NTSC-J:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a black level equal to
+ * the blanking level.
+ *
+ * PAL:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-M:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-N:
+ *
+ * TV Mode is CCIR System N together with the PAL Color
+ * Encoding, a color subcarrier frequency of 3.58MHz, the
+ * SECAM color space, and narrower channels than other PAL
+ * variants.
+ *
+ * SECAM:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the SECAM Color Encoding.
+ *
+ * Drivers can set up this property by calling
+ * drm_mode_create_tv_properties().
+ */
+
/**
* drm_connector_attach_content_type_property - attach content-type property
* @connector: connector to attach content type property on.
@@ -1604,7 +1717,7 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
- * drm_mode_create_tv_properties().
+ * drm_mode_create_tv_properties_legacy().
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -1639,7 +1752,7 @@ int drm_mode_create_tv_margin_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
- * drm_mode_create_tv_properties - create TV specific connector properties
+ * drm_mode_create_tv_properties_legacy - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
@@ -1649,12 +1762,16 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
* responsible for allocating a list of format names and passing them to
* this routine.
*
+ * NOTE: This functions registers the deprecated "mode" connector
+ * property to select the analog TV mode (ie, NTSC, PAL, etc.). New
+ * drivers must use drm_mode_create_tv_properties() instead.
+ *
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[])
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
@@ -1690,15 +1807,17 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
if (drm_mode_create_tv_margin_properties(dev))
goto nomem;
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- if (!dev->mode_config.tv_mode_property)
- goto nomem;
+ if (num_modes) {
+ dev->mode_config.legacy_tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ if (!dev->mode_config.legacy_tv_mode_property)
+ goto nomem;
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property,
- i, modes[i]);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.legacy_tv_mode_property,
+ i, modes[i]);
+ }
dev->mode_config.tv_brightness_property =
drm_property_create_range(dev, 0, "brightness", 0, 100);
@@ -1734,6 +1853,47 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
nomem:
return -ENOMEM;
}
+EXPORT_SYMBOL(drm_mode_create_tv_properties_legacy);
+
+/**
+ * drm_mode_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @supported_tv_modes: Bitmask of TV modes supported (See DRM_MODE_TV_MODE_*)
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int supported_tv_modes)
+{
+ struct drm_prop_enum_list tv_mode_list[DRM_MODE_TV_MODE_MAX];
+ struct drm_property *tv_mode;
+ unsigned int i, len = 0;
+
+ if (dev->mode_config.tv_mode_property)
+ return 0;
+
+ for (i = 0; i < DRM_MODE_TV_MODE_MAX; i++) {
+ if (!(supported_tv_modes & BIT(i)))
+ continue;
+
+ tv_mode_list[len].type = i;
+ tv_mode_list[len].name = drm_get_tv_mode_name(i);
+ len++;
+ }
+
+ tv_mode = drm_property_create_enum(dev, 0, "TV mode",
+ tv_mode_list, len);
+ if (!tv_mode)
+ return -ENOMEM;
+
+ dev->mode_config.tv_mode_property = tv_mode;
+
+ return drm_mode_create_tv_properties_legacy(dev, 0, NULL);
+}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
/**
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index c6e6a3e7219a..a0fc779e5e1e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -59,8 +59,7 @@ struct drm_ctx_list {
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -97,8 +96,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
idr_init(&dev->ctx_idr);
@@ -114,8 +112,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -136,8 +133,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->ctxlist_mutex);
@@ -182,8 +178,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
@@ -230,8 +225,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
@@ -335,8 +329,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (res->count >= DRM_RESERVED_CONTEXTS) {
@@ -370,8 +363,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
int tmp_handle;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
tmp_handle = drm_legacy_ctxbitmap_next(dev);
@@ -419,8 +411,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* This is 0, because we don't handle any context flags */
@@ -445,8 +436,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
@@ -469,8 +459,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
@@ -495,8 +484,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index ee445f4605ba..4855230ba2c6 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -50,9 +51,8 @@
static int drm_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
@@ -72,15 +72,15 @@ static int drm_name_info(struct seq_file *m, void *data)
static int drm_clients_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_file *priv;
kuid_t uid;
seq_printf(m,
"%20s %5s %3s master a %5s %10s\n",
"command",
- "pid",
+ "tgid",
"dev",
"uid",
"magic");
@@ -94,7 +94,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
bool is_current_master = drm_is_current_master(priv);
rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_PID);
+ task = pid_task(priv->pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
@@ -124,8 +124,8 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
static int drm_gem_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
seq_printf(m, " name size handles refcount\n");
@@ -136,7 +136,7 @@ static int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
@@ -151,6 +151,21 @@ static int drm_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, node->info_ent->show, node);
}
+static int drm_debugfs_entry_open(struct inode *inode, struct file *file)
+{
+ struct drm_debugfs_entry *entry = inode->i_private;
+ struct drm_debugfs_info *node = &entry->file;
+
+ return single_open(file, node->show, entry);
+}
+
+static const struct file_operations drm_debugfs_entry_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debugfs_entry_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
@@ -192,7 +207,7 @@ void drm_debugfs_create_files(const struct drm_info_list *files, int count,
tmp->minor = minor;
tmp->dent = debugfs_create_file(files[i].name,
- S_IFREG | S_IRUGO, root, tmp,
+ 0444, root, tmp,
&drm_debugfs_fops);
tmp->info_ent = &files[i];
@@ -207,6 +222,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry, *tmp;
char name[64];
INIT_LIST_HEAD(&minor->debugfs_list);
@@ -214,8 +230,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_debugfs_list, DRM_DEBUGFS_ENTRIES);
if (drm_drv_uses_atomic_modeset(dev)) {
drm_atomic_debugfs_init(minor);
@@ -230,9 +245,29 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
if (dev->driver->debugfs_init)
dev->driver->debugfs_init(minor);
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+
return 0;
}
+void drm_debugfs_late_register(struct drm_device *dev)
+{
+ struct drm_minor *minor = dev->primary;
+ struct drm_debugfs_entry *entry, *tmp;
+
+ if (!minor)
+ return;
+
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+}
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
@@ -281,6 +316,53 @@ void drm_debugfs_cleanup(struct drm_minor *minor)
minor->debugfs_root = NULL;
}
+/**
+ * drm_debugfs_add_file - Add a given file to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @name: debugfs file name
+ * @show: show callback
+ * @data: driver-private data, should not be device-specific
+ *
+ * Add a given file entry to the DRM device debugfs file list to be created on
+ * drm_debugfs_init.
+ */
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data)
+{
+ struct drm_debugfs_entry *entry = drmm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->file.name = name;
+ entry->file.show = show;
+ entry->file.data = data;
+ entry->dev = dev;
+
+ mutex_lock(&dev->debugfs_mutex);
+ list_add(&entry->list, &dev->debugfs_list);
+ mutex_unlock(&dev->debugfs_mutex);
+}
+EXPORT_SYMBOL(drm_debugfs_add_file);
+
+/**
+ * drm_debugfs_add_files - Add an array of files to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @files: The array of files to create
+ * @count: The number of files given
+ *
+ * Add a given set of debugfs files represented by an array of
+ * &struct drm_debugfs_info in the DRM device debugfs file list.
+ */
+void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ drm_debugfs_add_file(dev, files[i].name, files[i].show, files[i].data);
+}
+EXPORT_SYMBOL(drm_debugfs_add_files);
+
static int connector_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -426,15 +508,15 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
connector->debugfs_entry = root;
/* force */
- debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("force", 0644, root, connector,
&drm_connector_fops);
/* edid */
- debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("edid_override", 0644, root, connector,
&drm_edid_fops);
/* vrr range */
- debugfs_create_file("vrr_range", S_IRUGO, root, connector,
+ debugfs_create_file("vrr_range", 0444, root, connector,
&vrr_range_fops);
/* max bpc */
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 38ea8203df45..9edc111be7ee 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -7,13 +7,29 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-static int validate_displayid(const u8 *displayid, int length, int idx)
+static const struct displayid_header *
+displayid_get_header(const u8 *displayid, int length, int index)
+{
+ const struct displayid_header *base;
+
+ if (sizeof(*base) > length - index)
+ return ERR_PTR(-EINVAL);
+
+ base = (const struct displayid_header *)&displayid[index];
+
+ return base;
+}
+
+static const struct displayid_header *
+validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
- base = (const struct displayid_header *)&displayid[idx];
+ base = displayid_get_header(displayid, length, idx);
+ if (IS_ERR(base))
+ return base;
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
@@ -21,16 +37,16 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return 0;
+ return base;
}
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
@@ -39,7 +55,6 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
{
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
- int ret;
if (!displayid)
return NULL;
@@ -48,11 +63,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
*length = EDID_LENGTH - 1;
*idx = 1;
- ret = validate_displayid(displayid, *length, *idx);
- if (ret)
+ base = validate_displayid(displayid, *length, *idx);
+ if (IS_ERR(base))
return NULL;
- base = (const struct displayid_header *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
@@ -109,6 +123,9 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
+ /* The first section we encounter is the base section */
+ bool base_section = !iter->section;
+
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
@@ -118,6 +135,18 @@ __displayid_iter_next(struct displayid_iter *iter)
return NULL;
}
+ /* Save the structure version and primary use case. */
+ if (base_section) {
+ const struct displayid_header *base;
+
+ base = displayid_get_header(iter->section, iter->length,
+ iter->idx);
+ if (!IS_ERR(base)) {
+ iter->version = base->rev;
+ iter->primary_use = base->prod_id;
+ }
+ }
+
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
@@ -130,3 +159,18 @@ void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
+
+/* DisplayID Structure Version/Revision from the Base Section. */
+u8 displayid_version(const struct displayid_iter *iter)
+{
+ return iter->version;
+}
+
+/*
+ * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from
+ * the Base Section.
+ */
+u8 displayid_primary_use(const struct displayid_iter *iter)
+{
+ return iter->primary_use;
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 73b845a75d52..cee0cc522ed9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -598,6 +598,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ mutex_destroy(&dev->debugfs_mutex);
drm_legacy_destroy_members(dev);
}
@@ -638,12 +639,14 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->vblank_event_list);
+ INIT_LIST_HEAD(&dev->debugfs_list);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ mutex_init(&dev->debugfs_mutex);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
@@ -688,9 +691,11 @@ static int drm_dev_init(struct drm_device *dev,
}
}
- ret = drm_dev_set_unique(dev, dev_name(parent));
- if (ret)
+ dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
+ if (!dev->unique) {
+ ret = -ENOMEM;
goto err;
+ }
return 0;
@@ -929,8 +934,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
dev->registered = true;
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
+ if (driver->load) {
+ ret = driver->load(dev, flags);
if (ret)
goto err_minors;
}
@@ -997,26 +1002,6 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @name: unique name
- *
- * Sets the unique name of a DRM device using the specified string. This is
- * already done by drm_dev_init(), drivers should only override the default
- * unique name for backwards compatibility reasons.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- drmm_kfree(dev, dev->unique);
- dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
-
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index ad17fa21cebb..70032bba1c97 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -139,10 +139,7 @@ int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
if (!dev->driver->dumb_create)
return -ENOSYS;
- if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, handle);
- else
- return drm_gem_dumb_destroy(file_priv, dev, handle);
+ return drm_gem_handle_delete(file_priv, handle);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3841aba17abd..0454da505687 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -96,7 +96,6 @@ struct detailed_mode_closure {
struct drm_connector *connector;
const struct drm_edid *drm_edid;
bool preferred;
- u32 quirks;
int modes;
};
@@ -2797,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
* the EDID then we'll just return 0.
*/
- base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
@@ -2887,9 +2886,9 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
* Walk the mode list for connector, clearing the preferred status on existing
* modes and setting it anew for the right mode ala quirks.
*/
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
+static void edid_fixup_preferred(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2897,9 +2896,9 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (list_empty(&connector->probed_modes))
return;
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3401,9 +3400,9 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connector,
const struct drm_edid *drm_edid,
- const struct detailed_timing *timing,
- u32 quirks)
+ const struct detailed_timing *timing)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3425,10 +3424,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
- connector->base.id, connector->name);
- }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3437,7 +3432,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3449,7 +3444,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3472,25 +3467,42 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
+ case DRM_EDID_PT_ANALOG_CSYNC:
+ case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ break;
+ }
}
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -4003,8 +4015,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
return;
newmode = drm_mode_detailed(closure->connector,
- closure->drm_edid, timing,
- closure->quirks);
+ closure->drm_edid, timing);
if (!newmode)
return;
@@ -4027,15 +4038,13 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
* @drm_edid: EDID block to scan
- * @quirks: quirks to apply
*/
static int add_detailed_modes(struct drm_connector *connector,
- const struct drm_edid *drm_edid, u32 quirks)
+ const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
- .quirks = quirks,
};
if (drm_edid->edid->revision >= 4)
@@ -4468,28 +4477,20 @@ static u8 svd_to_vic(u8 svd)
return svd;
}
+/*
+ * Return a display mode for the 0-based vic_index'th VIC across all CTA VDBs in
+ * the EDID, or NULL on errors.
+ */
static struct drm_display_mode *
-drm_display_mode_from_vic_index(struct drm_connector *connector,
- const u8 *video_db, u8 video_len,
- u8 video_index)
+drm_display_mode_from_vic_index(struct drm_connector *connector, int vic_index)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
- struct drm_display_mode *newmode;
- u8 vic;
- if (video_db == NULL || video_index >= video_len)
+ if (!info->vics || vic_index >= info->vics_len || !info->vics[vic_index])
return NULL;
- /* CEA modes are numbered 1..127 */
- vic = svd_to_vic(video_db[video_index]);
- if (!drm_valid_cea_vic(vic))
- return NULL;
-
- newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
- if (!newmode)
- return NULL;
-
- return newmode;
+ return drm_display_mode_from_cea_vic(dev, info->vics[vic_index]);
}
/*
@@ -4505,10 +4506,8 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
static int do_y420vdb_modes(struct drm_connector *connector,
const u8 *svds, u8 svds_len)
{
- int modes = 0, i;
struct drm_device *dev = connector->dev;
- struct drm_display_info *info = &connector->display_info;
- struct drm_hdmi_info *hdmi = &info->hdmi;
+ int modes = 0, i;
for (i = 0; i < svds_len; i++) {
u8 vic = svd_to_vic(svds[i]);
@@ -4520,35 +4519,13 @@ static int do_y420vdb_modes(struct drm_connector *connector,
newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
break;
- bitmap_set(hdmi->y420_vdb_modes, vic, 1);
drm_mode_probed_add(connector, newmode);
modes++;
}
- if (modes > 0)
- info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
return modes;
}
-/*
- * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap
- * @connector: connector corresponding to the HDMI sink
- * @vic: CEA vic for the video mode to be added in the map
- *
- * Makes an entry for a videomode in the YCBCR 420 bitmap
- */
-static void
-drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
-{
- u8 vic = svd_to_vic(svd);
- struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
-
- if (!drm_valid_cea_vic(vic))
- return;
-
- bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
-}
-
/**
* drm_display_mode_from_cea_vic() - return a mode for CEA VIC
* @dev: DRM device
@@ -4577,29 +4554,20 @@ drm_display_mode_from_cea_vic(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_display_mode_from_cea_vic);
-static int
-do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+/* Add modes based on VICs parsed in parse_cta_vdb() */
+static int add_cta_vdb_modes(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
int i, modes = 0;
- struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
- for (i = 0; i < len; i++) {
+ if (!info->vics)
+ return 0;
+
+ for (i = 0; i < info->vics_len; i++) {
struct drm_display_mode *mode;
- mode = drm_display_mode_from_vic_index(connector, db, len, i);
+ mode = drm_display_mode_from_vic_index(connector, i);
if (mode) {
- /*
- * YCBCR420 capability block contains a bitmap which
- * gives the index of CEA modes from CEA VDB, which
- * can support YCBCR 420 sampling output also (apart
- * from RGB/YCBCR444 etc).
- * For example, if the bit 0 in bitmap is set,
- * first mode in VDB can support YCBCR420 output too.
- * Add YCBCR420 modes only if sink is HDMI 2.0 capable.
- */
- if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i))
- drm_add_cmdb_modes(connector, db[i]);
-
drm_mode_probed_add(connector, mode);
modes++;
}
@@ -4693,15 +4661,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
}
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
- const u8 *video_db, u8 video_len, u8 video_index)
+ int vic_index)
{
struct drm_display_mode *newmode;
int modes = 0;
if (structure & (1 << 0)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
@@ -4709,9 +4675,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 6)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
@@ -4719,9 +4683,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 8)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
@@ -4732,6 +4694,26 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
return modes;
}
+static bool hdmi_vsdb_latency_present(const u8 *db)
+{
+ return db[8] & BIT(7);
+}
+
+static bool hdmi_vsdb_i_latency_present(const u8 *db)
+{
+ return hdmi_vsdb_latency_present(db) && db[8] & BIT(6);
+}
+
+static int hdmi_vsdb_latency_length(const u8 *db)
+{
+ if (hdmi_vsdb_i_latency_present(db))
+ return 4;
+ else if (hdmi_vsdb_latency_present(db))
+ return 2;
+ else
+ return 0;
+}
+
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
@@ -4742,10 +4724,8 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
* also adds the stereo 3d modes when applicable.
*/
static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
- const u8 *video_db, u8 video_len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
- struct drm_display_info *info = &connector->display_info;
int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
@@ -4758,13 +4738,7 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
if (!(db[8] & (1 << 5)))
goto out;
- /* Latency_Fields_Present */
- if (db[8] & (1 << 7))
- offset += 2;
-
- /* I_Latency_Fields_Present */
- if (db[8] & (1 << 6))
- offset += 2;
+ offset += hdmi_vsdb_latency_length(db);
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
@@ -4818,9 +4792,7 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
for (i = 0; i < 16; i++) {
if (mask & (1 << i))
modes += add_3d_struct_modes(connector,
- structure_all,
- video_db,
- video_len, i);
+ structure_all, i);
}
}
@@ -4857,8 +4829,6 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
if (newflag != 0) {
newmode = drm_display_mode_from_vic_index(connector,
- video_db,
- video_len,
vic_index);
if (newmode) {
@@ -4873,8 +4843,6 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
}
out:
- if (modes > 0)
- info->has_hdmi_infoframe = true;
return modes;
}
@@ -5204,20 +5172,26 @@ static int edid_hfeeodb_extension_block_count(const struct edid *edid)
return cta[4 + 2];
}
-static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
- const u8 *db)
+/*
+ * CTA-861 YCbCr 4:2:0 Capability Map Data Block (CTA Y420CMDB)
+ *
+ * Y420CMDB contains a bitmap which gives the index of CTA modes from CTA VDB,
+ * which can support YCBCR 420 sampling output also (apart from RGB/YCBCR444
+ * etc). For example, if the bit 0 in bitmap is set, first mode in VDB can
+ * support YCBCR420 output too.
+ */
+static void parse_cta_y420cmdb(struct drm_connector *connector,
+ const struct cea_db *db, u64 *y420cmdb_map)
{
struct drm_display_info *info = &connector->display_info;
- struct drm_hdmi_info *hdmi = &info->hdmi;
- u8 map_len = cea_db_payload_len(db) - 1;
- u8 count;
+ int i, map_len = cea_db_payload_len(db) - 1;
+ const u8 *data = cea_db_data(db) + 1;
u64 map = 0;
if (map_len == 0) {
/* All CEA modes support ycbcr420 sampling also.*/
- hdmi->y420_cmdb_map = U64_MAX;
- info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
- return;
+ map = U64_MAX;
+ goto out;
}
/*
@@ -5235,13 +5209,14 @@ static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
if (WARN_ON_ONCE(map_len > 8))
map_len = 8;
- for (count = 0; count < map_len; count++)
- map |= (u64)db[2 + count] << (8 * count);
+ for (i = 0; i < map_len; i++)
+ map |= (u64)data[i] << (8 * i);
+out:
if (map)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
- hdmi->y420_cmdb_map = map;
+ *y420cmdb_map = map;
}
static int add_cea_modes(struct drm_connector *connector,
@@ -5249,21 +5224,16 @@ static int add_cea_modes(struct drm_connector *connector,
{
const struct cea_db *db;
struct cea_db_iter iter;
- int modes = 0;
+ int modes;
+
+ /* CTA VDB block VICs parsed earlier */
+ modes = add_cta_vdb_modes(connector);
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
- const u8 *hdmi = NULL, *video = NULL;
- u8 hdmi_len = 0, video_len = 0;
-
- if (cea_db_tag(db) == CTA_DB_VIDEO) {
- video = cea_db_data(db);
- video_len = cea_db_payload_len(db);
- modes += do_cea_modes(connector, video, video_len);
- } else if (cea_db_is_hdmi_vsdb(db)) {
- /* FIXME: Switch to use cea_db_data() */
- hdmi = (const u8 *)db;
- hdmi_len = cea_db_payload_len(db);
+ if (cea_db_is_hdmi_vsdb(db)) {
+ modes += do_hdmi_vsdb_modes(connector, (const u8 *)db,
+ cea_db_payload_len(db));
} else if (cea_db_is_y420vdb(db)) {
const u8 *vdb420 = cea_db_data(db) + 1;
@@ -5271,15 +5241,6 @@ static int add_cea_modes(struct drm_connector *connector,
modes += do_y420vdb_modes(connector, vdb420,
cea_db_payload_len(db) - 1);
}
-
- /*
- * We parse the HDMI VSDB after having added the cea modes as we
- * will be patching their flags when the sink supports stereo
- * 3D.
- */
- if (hdmi)
- modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
- video, video_len);
}
cea_db_iter_end(&iter);
@@ -5416,6 +5377,7 @@ drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
}
}
+/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
@@ -5423,18 +5385,18 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
if (len >= 6 && (db[6] & (1 << 7)))
connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_SUPPORTS_AI;
- if (len >= 8) {
- connector->latency_present[0] = db[8] >> 7;
- connector->latency_present[1] = (db[8] >> 6) & 1;
- }
- if (len >= 9)
+
+ if (len >= 10 && hdmi_vsdb_latency_present(db)) {
+ connector->latency_present[0] = true;
connector->video_latency[0] = db[9];
- if (len >= 10)
connector->audio_latency[0] = db[10];
- if (len >= 11)
+ }
+
+ if (len >= 12 && hdmi_vsdb_i_latency_present(db)) {
+ connector->latency_present[1] = true;
connector->video_latency[1] = db[11];
- if (len >= 12)
connector->audio_latency[1] = db[12];
+ }
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] HDMI: latency present %d %d, video latency %d %d, audio latency %d %d\n",
@@ -5533,8 +5495,6 @@ static void drm_edid_to_eld(struct drm_connector *connector,
int total_sad_count = 0;
int mnl;
- clear_eld(connector);
-
if (!drm_edid)
return;
@@ -5864,6 +5824,92 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
+/* CTA-861 Video Data Block (CTA VDB) */
+static void parse_cta_vdb(struct drm_connector *connector, const struct cea_db *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ int i, vic_index, len = cea_db_payload_len(db);
+ const u8 *svds = cea_db_data(db);
+ u8 *vics;
+
+ if (!len)
+ return;
+
+ /* Gracefully handle multiple VDBs, however unlikely that is */
+ vics = krealloc(info->vics, info->vics_len + len, GFP_KERNEL);
+ if (!vics)
+ return;
+
+ vic_index = info->vics_len;
+ info->vics_len += len;
+ info->vics = vics;
+
+ for (i = 0; i < len; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+
+ if (!drm_valid_cea_vic(vic))
+ vic = 0;
+
+ info->vics[vic_index++] = vic;
+ }
+}
+
+/*
+ * Update y420_cmdb_modes based on previously parsed CTA VDB and Y420CMDB.
+ *
+ * Translate the y420cmdb_map based on VIC indexes to y420_cmdb_modes indexed
+ * using the VICs themselves.
+ */
+static void update_cta_y420cmdb(struct drm_connector *connector, u64 y420cmdb_map)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ int i, len = min_t(int, info->vics_len, BITS_PER_TYPE(y420cmdb_map));
+
+ for (i = 0; i < len; i++) {
+ u8 vic = info->vics[i];
+
+ if (vic && y420cmdb_map & BIT_ULL(i))
+ bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
+ }
+}
+
+static bool cta_vdb_has_vic(const struct drm_connector *connector, u8 vic)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ int i;
+
+ if (!vic || !info->vics)
+ return false;
+
+ for (i = 0; i < info->vics_len; i++) {
+ if (info->vics[i] == vic)
+ return true;
+ }
+
+ return false;
+}
+
+/* CTA-861-H YCbCr 4:2:0 Video Data Block (CTA Y420VDB) */
+static void parse_cta_y420vdb(struct drm_connector *connector,
+ const struct cea_db *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ const u8 *svds = cea_db_data(db) + 1;
+ int i;
+
+ for (i = 0; i < cea_db_payload_len(db) - 1; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+
+ if (!drm_valid_cea_vic(vic))
+ continue;
+
+ bitmap_set(hdmi->y420_vdb_modes, vic, 1);
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
+ }
+}
+
static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
@@ -5995,14 +6041,14 @@ static void drm_parse_dsc_info(struct drm_hdmi_dsc_cap *hdmi_dsc,
static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
const u8 *hf_scds)
{
- struct drm_display_info *display = &connector->display_info;
- struct drm_hdmi_info *hdmi = &display->hdmi;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
int max_tmds_clock = 0;
u8 max_frl_rate = 0;
bool dsc_support = false;
- display->has_hdmi_infoframe = true;
+ info->has_hdmi_infoframe = true;
if (hf_scds[6] & 0x80) {
hdmi->scdc.supported = true;
@@ -6026,7 +6072,7 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
max_tmds_clock = hf_scds[5] * 5000;
if (max_tmds_clock > 340000) {
- display->max_tmds_clock = max_tmds_clock;
+ info->max_tmds_clock = max_tmds_clock;
}
if (scdc->supported) {
@@ -6117,6 +6163,7 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
}
}
+/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
{
@@ -6130,6 +6177,15 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
if (len >= 7)
info->max_tmds_clock = db[7] * 5000;
+ /*
+ * Try to infer whether the sink supports HDMI infoframes.
+ *
+ * HDMI infoframe support was first added in HDMI 1.4. Assume the sink
+ * supports infoframes if HDMI_Video_present is set.
+ */
+ if (len >= 8 && db[8] & BIT(5))
+ info->has_hdmi_infoframe = true;
+
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI: DVI dual %d, max TMDS clock %d kHz\n",
connector->base.id, connector->name,
info->dvi_dual, info->max_tmds_clock);
@@ -6165,6 +6221,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
const struct cea_db *db;
struct cea_db_iter iter;
const u8 *edid_ext;
+ u64 y420cmdb_map = 0;
drm_edid_iter_begin(drm_edid, &edid_iter);
drm_edid_iter_for_each(edid_ext, &edid_iter) {
@@ -6202,13 +6259,20 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
else if (cea_db_is_microsoft_vsdb(db))
drm_parse_microsoft_vsdb(connector, data);
else if (cea_db_is_y420cmdb(db))
- drm_parse_y420cmdb_bitmap(connector, data);
+ parse_cta_y420cmdb(connector, db, &y420cmdb_map);
+ else if (cea_db_is_y420vdb(db))
+ parse_cta_y420vdb(connector, db);
else if (cea_db_is_vcdb(db))
drm_parse_vcdb(connector, data);
else if (cea_db_is_hdmi_hdr_metadata_block(db))
drm_parse_hdr_metadata_block(connector, data);
+ else if (cea_db_tag(db) == CTA_DB_VIDEO)
+ parse_cta_vdb(connector, db);
}
cea_db_iter_end(&iter);
+
+ if (y420cmdb_map)
+ update_cta_y420cmdb(connector, y420cmdb_map);
}
static
@@ -6374,17 +6438,52 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->mso_stream_count = 0;
info->mso_pixel_overlap = 0;
info->max_dsc_bpp = 0;
+
+ kfree(info->vics);
+ info->vics = NULL;
+ info->vics_len = 0;
+
+ info->quirks = 0;
}
-static u32 update_display_info(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+static void update_displayid_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
- const struct edid *edid = drm_edid->edid;
+ const struct displayid_block *block;
+ struct displayid_iter iter;
- u32 quirks = edid_get_quirks(drm_edid);
+ displayid_iter_edid_begin(drm_edid, &iter);
+ displayid_iter_for_each(block, &iter) {
+ if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ (displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
+ displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
+ info->non_desktop = true;
+
+ /*
+ * We're only interested in the base section here, no need to
+ * iterate further.
+ */
+ break;
+ }
+ displayid_iter_end(&iter);
+}
+
+static void update_display_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+ const struct edid *edid;
drm_reset_display_info(connector);
+ clear_eld(connector);
+
+ if (!drm_edid)
+ return;
+
+ edid = drm_edid->edid;
+
+ info->quirks = edid_get_quirks(drm_edid);
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -6400,6 +6499,8 @@ static u32 update_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, drm_edid);
+ update_displayid_info(connector, drm_edid);
+
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
*
@@ -6456,17 +6557,30 @@ static u32 update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
info->max_dsc_bpp = 15;
- return quirks;
+ if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ info->bpc = 6;
+
+ if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ info->bpc = 8;
+
+ if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ info->bpc = 10;
+
+ if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ info->bpc = 12;
+
+ /* Depends on info->cea_rev set by drm_parse_cea_ext() above */
+ drm_edid_to_eld(connector, drm_edid);
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
@@ -6561,27 +6675,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
return num_modes;
}
-static int _drm_edid_connector_update(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+static int _drm_edid_connector_add_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
+ const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
- u32 quirks;
- if (!drm_edid) {
- drm_reset_display_info(connector);
- clear_eld(connector);
+ if (!drm_edid)
return 0;
- }
-
- /*
- * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
- * To avoid multiple parsing of same block, lets parse that map
- * from sink info, before parsing CEA modes.
- */
- quirks = update_display_info(connector, drm_edid);
-
- /* Depends on info->cea_rev set by update_display_info() above */
- drm_edid_to_eld(connector, drm_edid);
/*
* EDID spec says modes should be preferred in this order:
@@ -6597,7 +6698,7 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
*
* XXX order for additional mode types in extension blocks?
*/
- num_modes += add_detailed_modes(connector, drm_edid, quirks);
+ num_modes += add_detailed_modes(connector, drm_edid);
num_modes += add_cvt_modes(connector, drm_edid);
num_modes += add_standard_modes(connector, drm_edid);
num_modes += add_established_modes(connector, drm_edid);
@@ -6607,20 +6708,8 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-
- if (quirks & EDID_QUIRK_FORCE_6BPC)
- connector->display_info.bpc = 6;
-
- if (quirks & EDID_QUIRK_FORCE_8BPC)
- connector->display_info.bpc = 8;
-
- if (quirks & EDID_QUIRK_FORCE_10BPC)
- connector->display_info.bpc = 10;
-
- if (quirks & EDID_QUIRK_FORCE_12BPC)
- connector->display_info.bpc = 12;
+ if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector);
return num_modes;
}
@@ -6684,49 +6773,54 @@ out:
* @connector: Connector
* @drm_edid: EDID
*
- * Update the connector mode list, display info, ELD, HDR metadata, relevant
- * properties, etc. from the passed in EDID.
+ * Update the connector display info, ELD, HDR metadata, relevant properties,
+ * etc. from the passed in EDID.
*
* If EDID is NULL, reset the information.
*
- * Return: The number of modes added or 0 if we couldn't find any.
+ * Must be called before calling drm_edid_connector_add_modes().
+ *
+ * Return: 0 on success, negative error on errors.
*/
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- int count;
-
- count = _drm_edid_connector_update(connector, drm_edid);
+ update_display_info(connector, drm_edid);
_drm_update_tile_info(connector, drm_edid);
- /* Note: Ignore errors for now. */
- _drm_edid_connector_property_update(connector, drm_edid);
-
- return count;
+ return _drm_edid_connector_property_update(connector, drm_edid);
}
EXPORT_SYMBOL(drm_edid_connector_update);
-static int _drm_connector_update_edid_property(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+/**
+ * drm_edid_connector_add_modes - Update probed modes from the EDID property
+ * @connector: Connector
+ *
+ * Add the modes from the previously updated EDID property to the connector
+ * probed modes list.
+ *
+ * drm_edid_connector_update() must have been called before this to update the
+ * EDID property.
+ *
+ * Return: The number of modes added, or 0 if we couldn't find any.
+ */
+int drm_edid_connector_add_modes(struct drm_connector *connector)
{
- /*
- * Set the display info, using edid if available, otherwise resetting
- * the values to defaults. This duplicates the work done in
- * drm_add_edid_modes, but that function is not consistently called
- * before this one in all drivers and the computation is cheap enough
- * that it seems better to duplicate it rather than attempt to ensure
- * some arbitrary ordering of calls.
- */
- if (drm_edid)
- update_display_info(connector, drm_edid);
- else
- drm_reset_display_info(connector);
+ const struct drm_edid *drm_edid = NULL;
+ int count;
- _drm_update_tile_info(connector, drm_edid);
+ if (connector->edid_blob_ptr)
+ drm_edid = drm_edid_alloc(connector->edid_blob_ptr->data,
+ connector->edid_blob_ptr->length);
- return _drm_edid_connector_property_update(connector, drm_edid);
+ count = _drm_edid_connector_add_modes(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ return count;
}
+EXPORT_SYMBOL(drm_edid_connector_add_modes);
/**
* drm_connector_update_edid_property - update the edid property of a connector
@@ -6749,8 +6843,7 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
{
struct drm_edid drm_edid;
- return _drm_connector_update_edid_property(connector,
- drm_edid_legacy_init(&drm_edid, edid));
+ return drm_edid_connector_update(connector, drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
@@ -6763,13 +6856,14 @@ EXPORT_SYMBOL(drm_connector_update_edid_property);
* &drm_display_info structure and ELD in @connector with any information which
* can be derived from the edid.
*
- * This function is deprecated. Use drm_edid_connector_update() instead.
+ * This function is deprecated. Use drm_edid_connector_add_modes() instead.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_edid drm_edid;
+ struct drm_edid _drm_edid;
+ const struct drm_edid *drm_edid;
if (edid && !drm_edid_is_valid(edid)) {
drm_warn(connector->dev, "[CONNECTOR:%d:%s] EDID invalid.\n",
@@ -6777,8 +6871,11 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
edid = NULL;
}
- return _drm_edid_connector_update(connector,
- drm_edid_legacy_init(&drm_edid, edid));
+ drm_edid = drm_edid_legacy_init(&_drm_edid, edid);
+
+ update_display_info(connector, drm_edid);
+
+ return _drm_edid_connector_add_modes(connector, drm_edid);
}
EXPORT_SYMBOL(drm_add_edid_modes);
@@ -6885,8 +6982,6 @@ static u8 drm_mode_hdmi_vic(const struct drm_connector *connector,
static u8 drm_mode_cea_vic(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
- u8 vic;
-
/*
* HDMI spec says if a mode is found in HDMI 1.4b 4K modes
* we should send its VIC in vendor infoframes, else send the
@@ -6896,14 +6991,23 @@ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
if (drm_mode_hdmi_vic(connector, mode))
return 0;
- vic = drm_match_cea_mode(mode);
+ return drm_match_cea_mode(mode);
+}
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && vic > 64)
+/*
+ * Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that
+ * conform to HDMI 1.4.
+ *
+ * HDMI 1.4 (CTA-861-D) VIC range: [1..64]
+ * HDMI 2.0 (CTA-861-F) VIC range: [1..107]
+ *
+ * If the sink lists the VIC in CTA VDB, assume it's fine, regardless of HDMI
+ * version.
+ */
+static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic)
+{
+ if (!is_hdmi2_sink(connector) && vic > 64 &&
+ !cta_vdb_has_vic(connector, vic))
return 0;
return vic;
@@ -6978,7 +7082,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
}
- frame->video_code = vic;
+ frame->video_code = vic_for_avi_infoframe(connector, vic);
frame->picture_aspect = picture_aspect;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
@@ -7176,6 +7280,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
+static bool displayid_is_tiled_block(const struct displayid_iter *iter,
+ const struct displayid_block *block)
+{
+ return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ block->tag == DATA_BLOCK_TILED_DISPLAY) ||
+ (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
+}
+
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -7186,7 +7299,7 @@ static void _drm_update_tile_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
- if (block->tag == DATA_BLOCK_TILED_DISPLAY)
+ if (displayid_is_tiled_block(&iter, block))
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b3a731b9170a..6bb1b8b27d7a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,7 +30,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/pci.h>
#include <linux/sysrq.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
@@ -58,16 +60,17 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
* In order to keep user-space compatibility, we want in certain use-cases
* to keep leaking the fbdev physical address to the user-space program
* handling the fbdev buffer.
- * This is a bad habit essentially kept into closed source opengl driver
- * that should really be moved into open-source upstream projects instead
- * of using legacy physical addresses in user space to communicate with
- * other out-of-tree kernel modules.
+ *
+ * This is a bad habit, essentially kept to support closed-source OpenGL
+ * drivers that should really be moved into open-source upstream projects
+ * instead of using legacy physical addresses in user space to communicate
+ * with other out-of-tree kernel modules.
*
* This module_param *should* be removed as soon as possible and be
* considered as a broken and legacy behaviour from a modern fbdev device.
*/
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem;
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
@@ -414,14 +417,30 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
* @helper: driver-allocated fbdev helper structure to set up
+ * @preferred_bpp: Preferred bits per pixel for the device.
* @funcs: pointer to structure of functions associate with this helper
*
* Sets up the bare minimum to make the framebuffer helper usable. This is
* useful to implement race-free initialization of the polling helpers.
*/
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs)
{
+ /*
+ * Pick a preferred bpp of 32 if no value has been given. This
+ * will select XRGB8888 for the framebuffer formats. All drivers
+ * have to support XRGB8888 for backwards compatibility with legacy
+ * userspace, so it's the safe choice here.
+ *
+ * TODO: Replace struct drm_mode_config.preferred_depth and this
+ * bpp value with a preferred format that is given as struct
+ * drm_format_info. Then derive all other values from the
+ * format.
+ */
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
@@ -430,10 +449,23 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
+ helper->preferred_bpp = preferred_bpp;
}
EXPORT_SYMBOL(drm_fb_helper_prepare);
/**
+ * drm_fb_helper_unprepare - clean up a drm_fb_helper structure
+ * @fb_helper: driver-allocated fbdev helper structure to set up
+ *
+ * Cleans up the framebuffer helper. Inverse of drm_fb_helper_prepare().
+ */
+void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
+{
+ mutex_destroy(&fb_helper->lock);
+}
+EXPORT_SYMBOL(drm_fb_helper_unprepare);
+
+/**
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -473,8 +505,8 @@ EXPORT_SYMBOL(drm_fb_helper_init);
* drm_fb_helper_alloc_info - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
- * A helper to alloc fb_info and the members cmap and apertures. Called
- * by the driver within the fb_probe fb_helper callback function. Drivers do not
+ * A helper to alloc fb_info and the member cmap. Called by the driver
+ * within the fb_probe fb_helper callback function. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
@@ -496,27 +528,11 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (ret)
goto err_release;
- /*
- * TODO: We really should be smarter here and alloc an aperture
- * for each IORESOURCE_MEM resource helper->dev->dev has and also
- * init the ranges of the appertures based on the resources.
- * Note some drivers currently count on there being only 1 empty
- * aperture and fill this themselves, these will need to be dealt
- * with somehow when fixing this.
- */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto err_free_cmap;
- }
-
fb_helper->info = info;
info->skip_vt_switch = true;
return info;
-err_free_cmap:
- fb_dealloc_cmap(&info->cmap);
err_release:
framebuffer_release(info);
return ERR_PTR(ret);
@@ -524,6 +540,29 @@ err_release:
EXPORT_SYMBOL(drm_fb_helper_alloc_info);
/**
+ * drm_fb_helper_release_info - release fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to release fb_info and the member cmap. Drivers do not
+ * need to release the allocated fb_info structure themselves, this is
+ * automatically done when calling drm_fb_helper_fini().
+ */
+void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->info;
+
+ if (!info)
+ return;
+
+ fb_helper->info = NULL;
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+}
+EXPORT_SYMBOL(drm_fb_helper_release_info);
+
+/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
@@ -546,8 +585,6 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_info);
*/
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
- struct fb_info *info;
-
if (!fb_helper)
return;
@@ -559,13 +596,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
cancel_work_sync(&fb_helper->resume_work);
cancel_work_sync(&fb_helper->damage_work);
- info = fb_helper->info;
- if (info) {
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- fb_helper->info = NULL;
+ drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
@@ -575,8 +606,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&kernel_fb_helper_lock);
- mutex_destroy(&fb_helper->lock);
-
if (!fb_helper->client.funcs)
drm_client_release(&fb_helper->client);
}
@@ -612,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
+ u32 line_length = info->fix.line_length;
+ u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
- u32 y1 = off / info->fix.line_length;
+ u32 y1 = off / line_length;
u32 x2 = info->var.xres;
- u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
+ u32 y2 = DIV_ROUND_UP(end, line_length);
+
+ /* Don't allow any of them beyond the bottom bound of display area */
+ if (y1 > fb_height)
+ y1 = fb_height;
+ if (y2 > fb_height)
+ y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
- off_t bit_off = (off % info->fix.line_length) * 8;
- off_t bit_end = (end % info->fix.line_length) * 8;
+ off_t bit_off = (off % line_length) * 8;
+ off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
@@ -644,7 +681,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
struct drm_fb_helper *helper = info->par;
- unsigned long start, end, min_off, max_off;
+ unsigned long start, end, min_off, max_off, total_size;
struct fb_deferred_io_pageref *pageref;
struct drm_rect damage_area;
@@ -662,7 +699,11 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
* of the screen and account for non-existing scanlines. Hence,
* keep the covered memory area within the screen buffer.
*/
- max_off = min(max_off, info->screen_size);
+ if (info->screen_size)
+ total_size = info->screen_size;
+ else
+ total_size = info->fix.smem_len;
+ max_off = min(max_off, total_size);
if (min_off < max_off) {
drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
@@ -1504,6 +1545,27 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
}
}
+static void __fill_var(struct fb_var_screeninfo *var,
+ struct drm_framebuffer *fb)
+{
+ int i;
+
+ var->xres_virtual = fb->width;
+ var->yres_virtual = fb->height;
+ var->accel_flags = FB_ACCELF_TEXT;
+ var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
+
+ var->height = var->width = 0;
+ var->left_margin = var->right_margin = 0;
+ var->upper_margin = var->lower_margin = 0;
+ var->hsync_len = var->vsync_len = 0;
+ var->sync = var->vmode = 0;
+ var->rotate = 0;
+ var->colorspace = 0;
+ for (i = 0; i < 4; i++)
+ var->reserved[i] = 0;
+}
+
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
@@ -1556,6 +1618,23 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ __fill_var(var, fb);
+
+ /*
+ * fb_pan_display() validates this, but fb_set_par() doesn't and just
+ * falls over. Note that __fill_var above adjusts y/res_virtual.
+ */
+ if (var->yoffset > var->yres_virtual - var->yres ||
+ var->xoffset > var->xres_virtual - var->xres)
+ return -EINVAL;
+
+ /* We neither support grayscale nor FOURCC (also stored in here). */
+ if (var->grayscale > 0)
+ return -EINVAL;
+
+ if (var->nonstd)
+ return -EINVAL;
+
/*
* Workaround for SDL 1.2, which is known to be setting all pixel format
* fields values to zero in some cases. We treat this situation as a
@@ -1571,11 +1650,6 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
- * Likewise, bits_per_pixel should be rounded up to a supported value.
- */
- var->bits_per_pixel = bpp;
-
- /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
@@ -1605,11 +1679,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
if (oops_in_progress)
return -EBUSY;
- if (var->pixclock != 0) {
- drm_err(fb_helper->dev, "PIXEL CLOCK SET\n");
- return -EINVAL;
- }
-
/*
* Normally we want to make sure that a kms master takes precedence over
* fbdev, to avoid fbdev flickering and occasionally stealing the
@@ -1726,117 +1795,132 @@ unlock:
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-/*
- * Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback.
- */
-static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
+ size_t format_count, uint32_t bpp, uint32_t depth)
{
- struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- struct drm_mode_config *config = &dev->mode_config;
- int ret = 0;
- int crtc_count = 0;
- struct drm_connector_list_iter conn_iter;
- struct drm_fb_helper_surface_size sizes;
- struct drm_connector *connector;
- struct drm_mode_set *mode_set;
- int best_depth = 0;
-
- memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- sizes.fb_width = (u32)-1;
- sizes.fb_height = (u32)-1;
+ uint32_t format;
+ size_t i;
/*
- * If driver picks 8 or 16 by default use that for both depth/bpp
- * to begin with
+ * Do not consider YUV or other complicated formats
+ * for framebuffers. This means only legacy formats
+ * are supported (fmt->depth is a legacy field), but
+ * the framebuffer emulation can only deal with such
+ * formats, specifically RGB/BGA formats.
*/
- if (preferred_bpp != sizes.surface_bpp)
- sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+ format = drm_mode_legacy_fb_format(bpp, depth);
+ if (!format)
+ goto err;
- drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
- drm_client_for_each_connector_iter(connector, &conn_iter) {
- struct drm_cmdline_mode *cmdline_mode;
+ for (i = 0; i < format_count; ++i) {
+ if (formats[i] == format)
+ return format;
+ }
- cmdline_mode = &connector->cmdline_mode;
+err:
+ /* We found nothing. */
+ drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
- if (cmdline_mode->bpp_specified) {
- switch (cmdline_mode->bpp) {
- case 8:
- sizes.surface_depth = sizes.surface_bpp = 8;
- break;
- case 15:
- sizes.surface_depth = 15;
- sizes.surface_bpp = 16;
- break;
- case 16:
- sizes.surface_depth = sizes.surface_bpp = 16;
- break;
- case 24:
- sizes.surface_depth = sizes.surface_bpp = 24;
- break;
- case 32:
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- break;
- }
- break;
- }
+ return DRM_FORMAT_INVALID;
+}
+
+static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
+ const uint32_t *formats, size_t format_count,
+ unsigned int color_mode)
+{
+ struct drm_device *dev = fb_helper->dev;
+ uint32_t bpp, depth;
+
+ switch (color_mode) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 24:
+ bpp = depth = color_mode;
+ break;
+ case 15:
+ bpp = 16;
+ depth = 15;
+ break;
+ case 32:
+ bpp = 32;
+ depth = 24;
+ break;
+ default:
+ drm_info(dev, "unsupported color mode of %d\n", color_mode);
+ return DRM_FORMAT_INVALID;
}
- drm_connector_list_iter_end(&conn_iter);
- /*
- * If we run into a situation where, for example, the primary plane
- * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
- * 16) we need to scale down the depth of the sizes we request.
- */
- mutex_lock(&client->modeset_mutex);
+ return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
+}
+
+static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ int crtc_count = 0;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ struct drm_mode_set *mode_set;
+ uint32_t surface_format = DRM_FORMAT_INVALID;
+ const struct drm_format_info *info;
+
+ memset(sizes, 0, sizeof(*sizes));
+ sizes->fb_width = (u32)-1;
+ sizes->fb_height = (u32)-1;
+
drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
- int j;
drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
- for (j = 0; j < plane->format_count; j++) {
- const struct drm_format_info *fmt;
+ drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- fmt = drm_format_info(plane->format_types[j]);
-
- /*
- * Do not consider YUV or other complicated formats
- * for framebuffers. This means only legacy formats
- * are supported (fmt->depth is a legacy field) but
- * the framebuffer emulation can only deal with such
- * formats, specifically RGB/BGA formats.
- */
- if (fmt->depth == 0)
- continue;
-
- /* We found a perfect fit, great */
- if (fmt->depth == sizes.surface_depth) {
- best_depth = fmt->depth;
- break;
- }
-
- /* Skip depths above what we're looking for */
- if (fmt->depth > sizes.surface_depth)
+ if (!cmdline_mode->bpp_specified)
continue;
- /* Best depth found so far */
- if (fmt->depth > best_depth)
- best_depth = fmt->depth;
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ cmdline_mode->bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
+
+ /* try preferred color mode */
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ fb_helper->preferred_bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
- if (sizes.surface_depth != best_depth && best_depth) {
- drm_info(dev, "requested bpp %d, scaled depth down to %d",
- sizes.surface_bpp, best_depth);
- sizes.surface_depth = best_depth;
+
+ if (surface_format == DRM_FORMAT_INVALID) {
+ /*
+ * If none of the given color modes works, fall back
+ * to XRGB8888. Drivers are expected to provide this
+ * format for compatibility with legacy applications.
+ */
+ drm_warn(dev, "No compatible format found\n");
+ surface_format = drm_driver_legacy_fb_format(dev, 32, 24);
}
+ info = drm_format_info(surface_format);
+ sizes->surface_bpp = drm_format_info_bpp(info, 0);
+ sizes->surface_depth = info->depth;
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
drm_client_for_each_modeset(mode_set, client) {
@@ -1858,8 +1942,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = mode_set->x;
y = mode_set->y;
- sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
- sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+ sizes->surface_width =
+ max_t(u32, desired_mode->hdisplay + x, sizes->surface_width);
+ sizes->surface_height =
+ max_t(u32, desired_mode->vdisplay + y, sizes->surface_height);
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
@@ -1875,33 +1961,64 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (lasth)
- sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ sizes->fb_width = min_t(u32, desired_mode->hdisplay + x, sizes->fb_width);
if (lastv)
- sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
+ sizes->fb_height = min_t(u32, desired_mode->vdisplay + y, sizes->fb_height);
}
- mutex_unlock(&client->modeset_mutex);
- if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+ if (crtc_count == 0 || sizes->fb_width == -1 || sizes->fb_height == -1) {
drm_info(dev, "Cannot find any crtc or sizes\n");
-
- /* First time: disable all crtc's.. */
- if (!fb_helper->deferred_setup)
- drm_client_modeset_commit(client);
return -EAGAIN;
}
+ return 0;
+}
+
+static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
+ mutex_lock(&client->modeset_mutex);
+ ret = __drm_fb_helper_find_sizes(fb_helper, sizes);
+ mutex_unlock(&client->modeset_mutex);
+
+ if (ret)
+ return ret;
+
/* Handle our overallocation */
- sizes.surface_height *= drm_fbdev_overalloc;
- sizes.surface_height /= 100;
- if (sizes.surface_height > config->max_height) {
+ sizes->surface_height *= drm_fbdev_overalloc;
+ sizes->surface_height /= 100;
+ if (sizes->surface_height > config->max_height) {
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
config->max_height);
- sizes.surface_height = config->max_height;
+ sizes->surface_height = config->max_height;
}
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem;
-#endif
+ return 0;
+}
+
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_surface_size sizes;
+ int ret;
+
+ ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
+ if (ret) {
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup)
+ drm_client_modeset_commit(client);
+ return ret;
+ }
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
@@ -1909,6 +2026,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
+
+ /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+
return 0;
}
@@ -1950,12 +2072,9 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
}
info->pseudo_palette = fb_helper->pseudo_palette;
- info->var.xres_virtual = fb->width;
- info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = drm_format_info_bpp(format, 0);
- info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
+ __fill_var(&info->var, fb);
info->var.activate = FB_ACTIVATE_NOW;
drm_fb_helper_fill_pixel_fmt(&info->var, format);
@@ -2069,8 +2188,7 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
/* Note: Drops fb_helper->lock before returning. */
static int
-__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
- int bpp_sel)
+__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct fb_info *info;
@@ -2081,10 +2199,9 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
- ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
- fb_helper->preferred_bpp = bpp_sel;
fb_helper->deferred_setup = true;
ret = 0;
}
@@ -2098,11 +2215,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
info = fb_helper->info;
info->var.pixclock = 0;
- /* Shamelessly allow physical address leaking to userspace */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (!fb_helper->hint_leak_smem_start)
-#endif
- /* don't leak any physical addresses to userspace */
+
+ if (!drm_leak_fbdev_smem)
info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
@@ -2130,7 +2244,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
/**
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
- * @bpp_sel: bpp value to use for the framebuffer configuration
*
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
@@ -2168,7 +2281,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
-int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
int ret;
@@ -2176,7 +2289,7 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
return 0;
mutex_lock(&fb_helper->lock);
- ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel);
+ ret = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return ret;
}
@@ -2212,8 +2325,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
mutex_lock(&fb_helper->lock);
if (fb_helper->deferred_setup) {
- err = __drm_fb_helper_initial_config_and_unlock(fb_helper,
- fb_helper->preferred_bpp);
+ err = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return err;
}
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
new file mode 100644
index 000000000000..728deffcc0d9
--- /dev/null
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+
+#include <drm/drm_fbdev_dma.h>
+
+/*
+ * struct fb_ops
+ */
+
+static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (!fb_helper->dev)
+ return;
+
+ drm_fb_helper_fini(fb_helper);
+
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
+static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+
+ if (drm_WARN_ON_ONCE(dev, !fb_helper->dev->driver->gem_prime_mmap))
+ return -ENODEV;
+
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+}
+
+static const struct fb_ops drm_fbdev_dma_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = drm_fbdev_dma_fb_open,
+ .fb_release = drm_fbdev_dma_fb_release,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_mmap = drm_fbdev_dma_fb_mmap,
+};
+
+/*
+ * struct drm_fb_helper
+ */
+
+static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ u32 format;
+ struct iosys_map map;
+ int ret;
+
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+ dma_obj = to_drm_gem_dma_obj(buffer->gem);
+
+ fb = buffer->fb;
+ if (drm_WARN_ON(dev, fb->funcs->dirty)) {
+ ret = -ENODEV; /* damage handling not supported; use generic emulation */
+ goto err_drm_client_buffer_delete;
+ }
+
+ ret = drm_client_buffer_vmap(buffer, &map);
+ if (ret) {
+ goto err_drm_client_buffer_delete;
+ } else if (drm_WARN_ON(dev, map.is_iomem)) {
+ ret = -ENODEV; /* I/O memory not supported; use generic emulation */
+ goto err_drm_client_buffer_delete;
+ }
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_drm_client_buffer_vunmap;
+ }
+
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+
+err_drm_client_buffer_vunmap:
+ fb_helper->fb = NULL;
+ fb_helper->buffer = NULL;
+ drm_client_buffer_vunmap(buffer);
+err_drm_client_buffer_delete:
+ drm_client_framebuffer_delete(buffer);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+ .fb_probe = drm_fbdev_dma_helper_fb_probe,
+};
+
+/*
+ * struct drm_client_funcs
+ */
+
+static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_dma_client_unregister,
+ .restore = drm_fbdev_dma_client_restore,
+ .hotplug = drm_fbdev_dma_client_hotplug,
+};
+
+/**
+ * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up fbdev emulation for GEM DMA drivers that support
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_dma_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * The fbdev is destroyed by drm_dev_unregister().
+ */
+void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_client_init;
+ }
+
+ ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+
+ drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+EXPORT_SYMBOL(drm_fbdev_dma_setup);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index ab8695669279..8e5148bf40bb 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -7,22 +7,13 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_fbdev_generic.h>
-static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_framebuffer *fb = fb_helper->fb;
-
- return dev->mode_config.prefer_shadow_fbdev ||
- dev->mode_config.prefer_shadow ||
- fb->funcs->dirty;
-}
-
/* @user: 1=userspace, 0=fbcon */
-static int drm_fbdev_fb_open(struct fb_info *info, int user)
+static int drm_fbdev_generic_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -33,7 +24,7 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
return 0;
}
-static int drm_fbdev_fb_release(struct fb_info *info, int user)
+static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -43,152 +34,51 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
return 0;
}
-static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
+static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
{
- struct fb_info *fbi = fb_helper->info;
- void *shadow = NULL;
+ struct drm_fb_helper *fb_helper = info->par;
+ void *shadow = info->screen_buffer;
if (!fb_helper->dev)
return;
- if (fbi) {
- if (fbi->fbdefio)
- fb_deferred_io_cleanup(fbi);
- if (drm_fbdev_use_shadow_fb(fb_helper))
- shadow = fbi->screen_buffer;
- }
-
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
-
- if (shadow)
- vfree(shadow);
- else if (fb_helper->buffer)
- drm_client_buffer_vunmap(fb_helper->buffer);
-
+ vfree(shadow);
drm_client_framebuffer_delete(fb_helper->buffer);
-}
-static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
-{
- drm_fbdev_cleanup(fb_helper);
drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
-/*
- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
- * unregister_framebuffer() or fb_release().
- */
-static void drm_fbdev_fb_destroy(struct fb_info *info)
-{
- drm_fbdev_release(info->par);
-}
-
-static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- if (drm_fbdev_use_shadow_fb(fb_helper))
- return fb_deferred_io_mmap(info, vma);
- else if (fb_helper->dev->driver->gem_prime_mmap)
- return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
- else
- return -ENODEV;
-}
-
-static bool drm_fbdev_use_iomem(struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_client_buffer *buffer = fb_helper->buffer;
-
- return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
-}
-
-static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
-
- if (drm_fbdev_use_iomem(info))
- ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
- else
- ret = drm_fb_helper_sys_read(info, buf, count, ppos);
-
- return ret;
-}
-
-static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
-
- if (drm_fbdev_use_iomem(info))
- ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
- else
- ret = drm_fb_helper_sys_write(info, buf, count, ppos);
-
- return ret;
-}
-
-static void drm_fbdev_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_fillrect(info, rect);
- else
- drm_fb_helper_sys_fillrect(info, rect);
-}
-
-static void drm_fbdev_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_copyarea(info, area);
- else
- drm_fb_helper_sys_copyarea(info, area);
-}
-
-static void drm_fbdev_fb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_imageblit(info, image);
- else
- drm_fb_helper_sys_imageblit(info, image);
-}
-
-static const struct fb_ops drm_fbdev_fb_ops = {
+static const struct fb_ops drm_fbdev_generic_fb_ops = {
.owner = THIS_MODULE,
+ .fb_open = drm_fbdev_generic_fb_open,
+ .fb_release = drm_fbdev_generic_fb_release,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = drm_fbdev_fb_open,
- .fb_release = drm_fbdev_fb_release,
- .fb_destroy = drm_fbdev_fb_destroy,
- .fb_mmap = drm_fbdev_fb_mmap,
- .fb_read = drm_fbdev_fb_read,
- .fb_write = drm_fbdev_fb_write,
- .fb_fillrect = drm_fbdev_fb_fillrect,
- .fb_copyarea = drm_fbdev_fb_copyarea,
- .fb_imageblit = drm_fbdev_fb_imageblit,
-};
-
-static struct fb_deferred_io drm_fbdev_defio = {
- .delay = HZ / 20,
- .deferred_io = drm_fb_helper_deferred_io,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = fb_deferred_io_mmap,
+ .fb_destroy = drm_fbdev_generic_fb_destroy,
};
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
-static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
- struct drm_framebuffer *fb;
- struct fb_info *fbi;
+ struct fb_info *info;
+ size_t screen_size;
+ void *screen_buffer;
u32 format;
- struct iosys_map map;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
@@ -203,58 +93,56 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
- fb = buffer->fb;
- fbi = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(fbi))
- return PTR_ERR(fbi);
+ screen_size = buffer->gem->size;
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer) {
+ ret = -ENOMEM;
+ goto err_drm_client_framebuffer_delete;
+ }
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_vfree;
+ }
- fbi->fbops = &drm_fbdev_fb_ops;
- fbi->screen_size = sizes->surface_height * fb->pitches[0];
- fbi->fix.smem_len = fbi->screen_size;
- fbi->flags = FBINFO_DEFAULT;
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
- drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+ info->fbops = &drm_fbdev_generic_fb_ops;
+ info->flags = FBINFO_DEFAULT;
- if (drm_fbdev_use_shadow_fb(fb_helper)) {
- fbi->screen_buffer = vzalloc(fbi->screen_size);
- if (!fbi->screen_buffer)
- return -ENOMEM;
- fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ /* screen */
+ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
+ info->fix.smem_len = screen_size;
- fbi->fbdefio = &drm_fbdev_defio;
- fb_deferred_io_init(fbi);
- } else {
- /* buffer is mapped for HW framebuffer */
- ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
- if (ret)
- return ret;
- if (map.is_iomem) {
- fbi->screen_base = map.vaddr_iomem;
- } else {
- fbi->screen_buffer = map.vaddr;
- fbi->flags |= FBINFO_VIRTFB;
- }
-
- /*
- * Shamelessly leak the physical address to user-space. As
- * page_to_phys() is undefined for I/O memory, warn in this
- * case.
- */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
- !drm_WARN_ON_ONCE(dev, map.is_iomem))
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
- }
+ /* deferred I/O */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
+
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
+err_vfree:
+ vfree(screen_buffer);
+err_drm_client_framebuffer_delete:
+ fb_helper->fb = NULL;
+ fb_helper->buffer = NULL;
+ drm_client_framebuffer_delete(buffer);
+ return ret;
}
-static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip,
- struct iosys_map *dst)
+static void drm_fbdev_generic_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
size_t offset = clip->y1 * fb->pitches[0];
@@ -291,8 +179,8 @@ static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
}
}
-static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip)
+static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct iosys_map map, dst;
@@ -316,7 +204,7 @@ static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
goto out;
dst = map;
- drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
+ drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
@@ -326,23 +214,19 @@ out:
return ret;
}
-static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+static int drm_fbdev_generic_helper_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
{
struct drm_device *dev = helper->dev;
int ret;
- if (!drm_fbdev_use_shadow_fb(helper))
- return 0;
-
/* Call damage handlers only if necessary */
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
- if (helper->buffer) {
- ret = drm_fbdev_damage_blit(helper, clip);
- if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
- return ret;
- }
+ ret = drm_fbdev_generic_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
@@ -353,85 +237,71 @@ static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect
return 0;
}
-static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
- .fb_probe = drm_fbdev_fb_probe,
- .fb_dirty = drm_fbdev_fb_dirty,
+static const struct drm_fb_helper_funcs drm_fbdev_generic_helper_funcs = {
+ .fb_probe = drm_fbdev_generic_helper_fb_probe,
+ .fb_dirty = drm_fbdev_generic_helper_fb_dirty,
};
-static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+static void drm_fbdev_generic_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (fb_helper->info)
- /* drm_fbdev_fb_destroy() takes care of cleanup */
+ if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
- else
- drm_fbdev_release(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
}
-static int drm_fbdev_client_restore(struct drm_client_dev *client)
+static int drm_fbdev_generic_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
-static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+static int drm_fbdev_generic_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
- /* Setup is not retried if it has failed */
- if (!fb_helper->dev && fb_helper->funcs)
- return 0;
-
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
- if (!dev->mode_config.num_connector) {
- drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
- return 0;
- }
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
-
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
- goto err;
+ goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
+ ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
- goto err_cleanup;
+ goto err_drm_fb_helper_fini;
return 0;
-err_cleanup:
- drm_fbdev_cleanup(fb_helper);
-err:
- fb_helper->dev = NULL;
- fb_helper->info = NULL;
-
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
-
return ret;
}
-static const struct drm_client_funcs drm_fbdev_client_funcs = {
+static const struct drm_client_funcs drm_fbdev_generic_client_funcs = {
.owner = THIS_MODULE,
- .unregister = drm_fbdev_client_unregister,
- .restore = drm_fbdev_client_restore,
- .hotplug = drm_fbdev_client_hotplug,
+ .unregister = drm_fbdev_generic_client_unregister,
+ .restore = drm_fbdev_generic_client_restore,
+ .hotplug = drm_fbdev_generic_client_hotplug,
};
/**
* drm_fbdev_generic_setup() - Setup generic fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
@@ -442,20 +312,16 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
* Simple drivers might use drm_mode_config_helper_suspend().
*
- * Drivers that set the dirty callback on their framebuffer will get a shadow
- * fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers. A shadow buffer can be
- * requested explicitly by setting struct drm_mode_config.prefer_shadow or
- * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
- * required to use generic fbdev emulation with SHMEM helpers.
+ * In order to provide fixed mmap-able memory ranges, generic fbdev emulation
+ * uses a shadow buffer in system memory. The implementation blits the shadow
+ * fbdev buffer onto the real buffer in regular intervals.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
*/
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
@@ -466,29 +332,25 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
+ drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_generic_helper_funcs);
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_generic_client_funcs);
if (ret) {
- kfree(fb_helper);
drm_err(dev, "Failed to register client: %d\n", ret);
- return;
+ goto err_drm_client_init;
}
- /*
- * FIXME: This mixes up depth with bpp, which results in a glorious
- * mess, resulting in some drivers picking wrong fbdev defaults and
- * others wrong preferred_depth defaults.
- */
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
- fb_helper->preferred_bpp = preferred_bpp;
-
- ret = drm_fbdev_client_hotplug(&fb_helper->client);
+ ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ return;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 64b4a3a87fbb..c1018c470047 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -156,7 +156,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
if (!file)
return ERR_PTR(-ENOMEM);
- file->pid = get_pid(task_pid(current));
+ file->pid = get_pid(task_tgid(current));
file->minor = minor;
/* for compatibility root is always authenticated */
@@ -245,10 +245,10 @@ void drm_file_free(struct drm_file *file)
dev = file->minor->dev;
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file->minor->kdev->devt),
- atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ atomic_read(&dev->open_count));
#ifdef CONFIG_DRM_LEGACY
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
@@ -340,8 +340,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
- DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
- task_pid_nr(current), minor->index);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
+ current->comm, task_pid_nr(current), minor->index);
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
@@ -450,11 +450,11 @@ EXPORT_SYMBOL(drm_open);
void drm_lastclose(struct drm_device * dev)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
+ drm_dbg_core(dev, "driver lastclose completed\n");
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 74ff33c2ddaa..f93a4efcee90 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -322,7 +322,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -333,14 +333,15 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = val16;
+ dbuf16[x] = cpu_to_le16(val16);
}
}
+/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -351,7 +352,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = swab16(val16);
+ dbuf16[x] = cpu_to_le16(swab16(val16));
}
}
@@ -395,6 +396,161 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_xrgb1555 - Convert XRGB8888 to XRGB1555 clip buffer
+ * @dst: Array of XRGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XRGB1555 devices that don't support
+ * XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xrgb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
+
+static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb1555 - Convert XRGB8888 to ARGB1555 clip buffer
+ * @dst: Array of ARGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB1555 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
+
+static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgba5551 - Convert XRGB8888 to RGBA5551 clip buffer
+ * @dst: Array of RGBA5551 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGBA5551 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgba5551_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
+
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -404,6 +560,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
+ /* write blue-green-red to output in little endianness */
*dbuf8++ = (pix & 0x000000FF) >> 0;
*dbuf8++ = (pix & 0x0000FF00) >> 8;
*dbuf8++ = (pix & 0x00FF0000) >> 16;
@@ -444,63 +601,112 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-static void drm_fb_rgb565_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
- const __le16 *sbuf16 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- u16 val16 = le16_to_cpu(sbuf16[x]);
- u32 val32 = ((val16 & 0xf800) << 8) |
- ((val16 & 0x07e0) << 5) |
- ((val16 & 0x001f) << 3);
- val32 = 0xff000000 | val32 |
- ((val32 >> 3) & 0x00070007) |
- ((val32 >> 2) & 0x00000300);
- dbuf32[x] = cpu_to_le32(val32);
+ pix = le32_to_cpu(sbuf32[x]);
+ pix |= GENMASK(31, 24); /* fill alpha bits */
+ dbuf32[x] = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb565_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+/**
+ * drm_fb_xrgb8888_to_argb8888 - Convert XRGB8888 to ARGB8888 clip buffer
+ * @dst: Array of ARGB8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb565_to_xrgb8888_line);
+ drm_fb_xrgb8888_to_argb8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
-static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
- const u8 *sbuf8 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- u8 r = *sbuf8++;
- u8 g = *sbuf8++;
- u8 b = *sbuf8++;
- u32 pix = 0xff000000 | (r << 16) | (g << 8) | b;
- dbuf32[x] = cpu_to_le32(pix);
+ pix = le32_to_cpu(sbuf32[x]);
+ pix = ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+ *dbuf32++ = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb888_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb888_to_xrgb8888_line);
+ drm_fb_xrgb8888_to_abgr8888_line);
+}
+
+static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ pix = ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+ *dbuf32++ = cpu_to_le32(pix);
+ }
+}
+
+static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xbgr8888_line);
}
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
@@ -555,6 +761,59 @@ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *d
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
+static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 val32;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val32 = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ pix = GENMASK(31, 30) | /* set alpha bits */
+ val32 | ((val32 >> 8) & 0x00300c03);
+ *dbuf32++ = cpu_to_le32(pix);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb2101010 - Convert XRGB8888 to ARGB2101010 clip buffer
+ * @dst: Array of ARGB2101010 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB2101010 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb2101010_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
+
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -641,50 +900,47 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
{
uint32_t fb_format = fb->format->format;
- /* treat alpha channel like filler bits */
- if (fb_format == DRM_FORMAT_ARGB8888)
- fb_format = DRM_FORMAT_XRGB8888;
- if (dst_format == DRM_FORMAT_ARGB8888)
- dst_format = DRM_FORMAT_XRGB8888;
- if (fb_format == DRM_FORMAT_ARGB2101010)
- fb_format = DRM_FORMAT_XRGB2101010;
- if (dst_format == DRM_FORMAT_ARGB2101010)
- dst_format = DRM_FORMAT_XRGB2101010;
-
- if (dst_format == fb_format) {
+ if (fb_format == dst_format) {
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
-
- } else if (dst_format == DRM_FORMAT_RGB565) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == DRM_FORMAT_XRGB8888) {
+ if (dst_format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
- }
- } else if (dst_format == (DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN)) {
- if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ } else if (dst_format == DRM_FORMAT_XRGB1555) {
+ drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_RGB888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ARGB1555) {
+ drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_RGBA5551) {
+ drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB8888) {
- if (fb_format == DRM_FORMAT_RGB888) {
- drm_fb_rgb888_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_ARGB8888) {
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
return 0;
- } else if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_rgb565_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_XBGR8888) {
+ drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ABGR8888) {
+ drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_XRGB2101010) {
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ARGB2101010) {
+ drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
@@ -805,6 +1061,39 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
{
const uint32_t *fourccs_end = fourccs + nfourccs;
@@ -817,73 +1106,48 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t
return false;
}
-static const uint32_t conv_from_xrgb8888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
-};
-
-static const uint32_t conv_from_rgb565_888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
-};
-
-static bool is_conversion_supported(uint32_t from, uint32_t to)
-{
- switch (from) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to);
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to);
- case DRM_FORMAT_XRGB2101010:
- return to == DRM_FORMAT_ARGB2101010;
- case DRM_FORMAT_ARGB2101010:
- return to == DRM_FORMAT_XRGB2101010;
- default:
- return false;
- }
-}
-
/**
* drm_fb_build_fourcc_list - Filters a list of supported color formats against
* the device's native formats
* @dev: DRM device
* @native_fourccs: 4CC codes of natively supported color formats
* @native_nfourccs: The number of entries in @native_fourccs
- * @driver_fourccs: 4CC codes of all driver-supported color formats
- * @driver_nfourccs: The number of entries in @driver_fourccs
* @fourccs_out: Returns 4CC codes of supported color formats
* @nfourccs_out: The number of available entries in @fourccs_out
*
* This function create a list of supported color format from natively
- * supported formats and the emulated formats.
+ * supported formats and additional emulated formats.
* At a minimum, most userspace programs expect at least support for
* XRGB8888 on the primary plane. Devices that have to emulate the
* format, and possibly others, can use drm_fb_build_fourcc_list() to
* create a list of supported color formats. The returned list can
* be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Other heuristics might be applied
+ * will go before emulated formats. Native formats with alpha channel
+ * will be replaced by such without, as primary planes usually don't
+ * support alpha. Other heuristics might be applied
* to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list. Formats
- * without conversion helpers will be skipped. New drivers should only
- * pass in XRGB8888 and avoid exposing additional emulated formats.
+ * usually preferred over formats near the end of the list.
*
* Returns:
* The number of color-formats 4CC codes returned in @fourccs_out.
*/
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
- const u32 *driver_fourccs, size_t driver_nfourccs,
u32 *fourccs_out, size_t nfourccs_out)
{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const uint32_t extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
u32 *fourccs = fourccs_out;
const u32 *fourccs_end = fourccs_out + nfourccs_out;
- uint32_t native_format = 0;
size_t i;
/*
@@ -891,7 +1155,12 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
*/
for (i = 0; i < native_nfourccs; ++i) {
- u32 fourcc = native_fourccs[i];
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate entries */
@@ -902,14 +1171,6 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
- /*
- * There should only be one native format with the current API.
- * This API needs to be refactored to correctly support arbitrary
- * sets of native formats, since it needs to report which native
- * format to use for each emulated format.
- */
- if (!native_format)
- native_format = fourcc;
*fourccs = fourcc;
++fourccs;
}
@@ -918,17 +1179,14 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
* The extra formats, emulated by the driver, go second.
*/
- for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = driver_fourccs[i];
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate and native entries */
} else if (fourccs == fourccs_end) {
drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
continue; /* end of available output buffer */
- } else if (!is_conversion_supported(fourcc, native_format)) {
- drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc);
- continue; /* format is not supported for conversion */
}
drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6242dfbe9240..0f17dfa8702b 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#ifdef __BIG_ENDIAN
+ { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 2dd97473ca10..aff3746dedfb 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1203,8 +1203,8 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
#ifdef CONFIG_DEBUG_FS
static int drm_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_framebuffer *fb;
@@ -1218,14 +1218,13 @@ static int drm_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
+static const struct drm_debugfs_info drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..1a5a2cd0d4ec 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -170,6 +170,20 @@ void drm_gem_private_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
+ * drm_gem_private_object_fini - Finalize a failed drm_gem_object
+ * @obj: drm_gem_object
+ *
+ * Uninitialize an already allocated GEM object when it initialized failed
+ */
+void drm_gem_private_object_fini(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ dma_resv_fini(&obj->_resv);
+}
+EXPORT_SYMBOL(drm_gem_private_object_fini);
+
+/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
@@ -322,13 +336,6 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- u32 handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
@@ -930,12 +937,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- WARN_ON(obj->dma_buf);
-
if (obj->filp)
fput(obj->filp);
- dma_resv_fini(&obj->_resv);
+ drm_gem_private_object_fini(obj);
+
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
@@ -1047,7 +1053,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
goto err_drm_gem_object_put;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
@@ -1331,7 +1337,15 @@ drm_gem_lru_remove(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_lru_remove);
-static void
+/**
+ * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
+ *
+ * Like &drm_gem_lru_move_tail but lru lock must be held
+ *
+ * @lru: The LRU to move the object into.
+ * @obj: The GEM object to move into this LRU
+ */
+void
drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
lockdep_assert_held_once(lru->lock);
@@ -1343,6 +1357,7 @@ drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj
list_add_tail(&obj->lru_node, &lru->list);
obj->lru = lru;
}
+EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
/**
* drm_gem_lru_move_tail - move the object to the tail of the LRU
@@ -1375,10 +1390,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
@@ -1417,8 +1435,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv))
+ if (!dma_resv_trylock(obj->resv)) {
+ *remaining += obj->size >> PAGE_SHIFT;
goto tail;
+ }
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
@@ -1453,3 +1473,21 @@ tail:
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);
+
+/**
+ * drm_gem_evict - helper to evict backing pages for a GEM object
+ * @obj: obj in question
+ */
+int drm_gem_evict(struct drm_gem_object *obj)
+{
+ dma_resv_assert_held(obj->resv);
+
+ if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
+ return -EBUSY;
+
+ if (obj->funcs->evict)
+ return obj->funcs->evict(obj);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_evict);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e42800718f51..5d4b9cd077f7 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -26,11 +26,8 @@
* call drm_gem_plane_helper_prepare_fb() from their implementation of
* struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
* the framebuffer so that the DRM core can synchronize access automatically.
- *
* drm_gem_plane_helper_prepare_fb() can also be used directly as
- * implementation of prepare_fb. For drivers based on
- * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
- * provides equivalent functionality.
+ * implementation of prepare_fb.
*
* .. code-block:: c
*
@@ -41,11 +38,6 @@
* . prepare_fb = drm_gem_plane_helper_prepare_fb,
* };
*
- * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
- * ...,
- * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
- * };
- *
* A driver using a shadow buffer copies the content of the shadow buffers
* into the HW's framebuffer memory during an atomic update. This requires
* a mapping of the shadow buffer into kernel address space. The mappings
@@ -205,27 +197,6 @@ error:
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
-/**
- * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_plane_helper_prepare_fb() to extract the fences
- * from &drm_gem_object.resv and attaches them to the plane state for the atomic
- * helper to wait on. This is necessary to correctly implement implicit
- * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
- * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
- *
- * See drm_gem_plane_helper_prepare_fb() for a discussion of implicit and
- * explicit fencing in atomic modeset updates.
- */
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
-{
- return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
-
/*
* Shadow-buffered Planes
*/
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 1e658c448366..870b90b78bc4 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -477,8 +477,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
dma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
- attach->dmabuf->size);
+ drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
+ attach->dmabuf->size);
return &dma_obj->base;
}
@@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_DONTEXPAND;
+ vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index b602cd72a120..4ea6507a77e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
} else {
ret = drm_gem_object_init(dev, obj, size);
}
- if (ret)
+ if (ret) {
+ drm_gem_private_object_fini(obj);
goto err_free;
+ }
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -139,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
@@ -154,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
drm_gem_shmem_put_pages(shmem);
}
- WARN_ON(shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, shmem->pages_use_count);
drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock);
@@ -173,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
- DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+ PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -205,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
*/
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
+ struct drm_gem_object *obj = &shmem->base;
int ret;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
@@ -223,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->pages_use_count))
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
if (--shmem->pages_use_count > 0)
@@ -266,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
*/
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem);
}
@@ -281,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
*/
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem);
}
@@ -293,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (shmem->vmap_use_count++ > 0) {
- iosys_map_set_vaddr(map, shmem->vaddr);
- return 0;
- }
-
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
- if (WARN_ON(map->is_iomem)) {
+ if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
- ret = -EIO;
- goto err_put_pages;
+ return -EIO;
}
- shmem->vaddr = map->vaddr;
}
} else {
pgprot_t prot = PAGE_KERNEL;
+ if (shmem->vmap_use_count++ > 0) {
+ iosys_map_set_vaddr(map, shmem->vaddr);
+ return 0;
+ }
+
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
@@ -326,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
}
if (ret) {
- DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
+ drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@@ -376,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
-
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
@@ -413,7 +419,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
-static struct drm_gem_shmem_object *
+static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle)
@@ -423,7 +429,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
- return shmem;
+ return PTR_ERR(shmem);
/*
* Allocate an id of idr table where the obj is registered
@@ -432,10 +438,8 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
- if (ret)
- return ERR_PTR(ret);
- return shmem;
+ return ret;
}
/* Update madvise status, returns true if not purged, else
@@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
- WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+ drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
@@ -518,7 +522,6 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_shmem_object *shmem;
if (!args->pitch || !args->size) {
args->pitch = min_pitch;
@@ -531,9 +534,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = PAGE_ALIGN(args->pitch * args->height);
}
- shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
-
- return PTR_ERR_OR_ZERO(shmem);
+ return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
@@ -553,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
mutex_lock(&shmem->pages_lock);
if (page_offset >= num_pages ||
- WARN_ON_ONCE(!shmem->pages) ||
+ drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
@@ -572,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock);
@@ -581,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!WARN_ON_ONCE(!shmem->pages_use_count))
+ if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock);
@@ -622,18 +623,21 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret;
if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
vma->vm_private_data = NULL;
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
- return dma_buf_mmap(obj->dma_buf, vma, 0);
+ return ret;
}
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
return ret;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -651,6 +655,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
+ if (shmem->base.import_attach)
+ return;
+
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -675,29 +682,13 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
-/**
- * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
- * scatter/gather table for a shmem GEM object.
- * @shmem: shmem GEM object
- *
- * This function returns a scatter/gather table suitable for driver usage. If
- * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
- * table created.
- *
- * This is the main function for drivers to get at backing storage, and it hides
- * and difference between dma-buf imported and natively allocated objects.
- * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
- *
- * Returns:
- * A pointer to the scatter/gather table of pinned pages or errno on failure.
- */
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
@@ -706,9 +697,9 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
if (shmem->sgt)
return shmem->sgt;
- WARN_ON(obj->import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -730,9 +721,39 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
+
+/**
+ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+ * scatter/gather table for a shmem GEM object.
+ * @shmem: shmem GEM object
+ *
+ * This function returns a scatter/gather table suitable for driver usage. If
+ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+ * table created.
+ *
+ * This is the main function for drivers to get at backing storage, and it hides
+ * and difference between dma-buf imported and natively allocated objects.
+ * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or errno on failure.
+ */
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ ret = mutex_lock_interruptible(&shmem->pages_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+
+ return sgt;
+}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
/**
@@ -764,7 +785,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
shmem->sgt = sgt;
- DRM_DEBUG_PRIME("size = %zu\n", size);
+ drm_dbg_prime(dev, "size = %zu\n", size);
return &shmem->base;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..3734aa2d1c5b 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b6c7e3803bb3..0bea3df2a16d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -19,6 +19,7 @@
#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -915,6 +916,17 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
{
struct drm_gem_vram_object *gbo;
+ if (!bo->resource) {
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
@@ -956,8 +968,8 @@ static struct ttm_device_funcs bo_driver = {
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_vram_mm *vmm = entry->dev->vram_mm;
struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
@@ -965,7 +977,7 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
@@ -977,9 +989,8 @@ static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
*/
void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list));
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5ea5e260118c..d7e023bbb0d5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -178,14 +178,12 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
-int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- u32 handle);
-
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
void drm_debugfs_cleanup(struct drm_minor *minor);
+void drm_debugfs_late_register(struct drm_device *dev);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
@@ -202,6 +200,10 @@ static inline void drm_debugfs_cleanup(struct drm_minor *minor)
{
}
+static inline void drm_debugfs_late_register(struct drm_device *dev)
+{
+}
+
static inline void drm_debugfs_connector_add(struct drm_connector *connector)
{
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 5d82891c3222..49a743f62b4a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -972,6 +972,7 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
drm_ioctl_compat_t *fn;
int ret;
@@ -986,14 +987,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fn)
return drm_ioctl(filp, cmd, arg);
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated,
- drm_compat_ioctls[nr].name);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ drm_compat_ioctls[nr].name);
ret = (*fn)(filp, cmd, arg);
if (ret)
- DRM_DEBUG("ret = %d\n", ret);
+ drm_dbg_core(dev, "ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ca2a6e6101dc..7c9d66ee917d 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -440,7 +440,7 @@ done:
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
return 0;
}
EXPORT_SYMBOL(drm_noop);
@@ -856,16 +856,16 @@ long drm_ioctl(struct file *filp,
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
- DRM_DEBUG("comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ drm_dbg_core(dev, "comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
+ drm_dbg_core(dev, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
@@ -894,16 +894,17 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
+ drm_dbg_core(dev,
+ "invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
- DRM_DEBUG("comm=\"%s\", pid=%d, ret=%d\n", current->comm,
- task_pid_nr(current), retcode);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, ret=%d\n",
+ current->comm, task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d72c2fac0ff1..150fe1555068 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -6,7 +6,7 @@
#include <linux/uaccess.h>
#include <drm/drm_auth.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_lease.h>
@@ -213,11 +213,11 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
int id;
void *entry;
- DRM_DEBUG_LEASE("lessor %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "lessor %d\n", lessor->lessee_id);
lessee = drm_master_create(lessor->dev);
if (!lessee) {
- DRM_DEBUG_LEASE("drm_master_create failed\n");
+ drm_dbg_lease(dev, "drm_master_create failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -231,7 +231,7 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = -EBUSY;
if (error != 0) {
- DRM_DEBUG_LEASE("object %d failed %d\n", object, error);
+ drm_dbg_lease(dev, "object %d failed %d\n", object, error);
goto out_lessee;
}
}
@@ -249,7 +249,8 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
/* Move the leases over */
lessee->leases = *leases;
- DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
+ drm_dbg_lease(dev, "new lessee %d %p, lessor %d %p\n",
+ lessee->lessee_id, lessee, lessor->lessee_id, lessor);
mutex_unlock(&dev->mode_config.idr_mutex);
return lessee;
@@ -268,7 +269,7 @@ void drm_lease_destroy(struct drm_master *master)
mutex_lock(&dev->mode_config.idr_mutex);
- DRM_DEBUG_LEASE("drm_lease_destroy %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy %d\n", master->lessee_id);
/* This master is referenced by all lessees, hence it cannot be destroyed
* until all of them have been
@@ -277,7 +278,8 @@ void drm_lease_destroy(struct drm_master *master)
/* Remove this master from the lessee idr in the owner */
if (master->lessee_id != 0) {
- DRM_DEBUG_LEASE("remove master %d from device list of lessees\n", master->lessee_id);
+ drm_dbg_lease(dev, "remove master %d from device list of lessees\n",
+ master->lessee_id);
idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
}
@@ -292,7 +294,7 @@ void drm_lease_destroy(struct drm_master *master)
drm_master_put(&master->lessor);
}
- DRM_DEBUG_LEASE("drm_lease_destroy done %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy done %d\n", master->lessee_id);
}
static void _drm_lease_revoke(struct drm_master *top)
@@ -308,7 +310,8 @@ static void _drm_lease_revoke(struct drm_master *top)
* the tree is fully connected, we can do this without recursing
*/
for (;;) {
- DRM_DEBUG_LEASE("revoke leases for %p %d\n", master, master->lessee_id);
+ drm_dbg_lease(master->dev, "revoke leases for %p %d\n",
+ master, master->lessee_id);
/* Evacuate the lease */
idr_for_each_entry(&master->leases, entry, object)
@@ -408,7 +411,7 @@ static int fill_object_idr(struct drm_device *dev,
ret = validate_lease(dev, object_count, objects, universal_planes);
if (ret) {
- DRM_DEBUG_LEASE("lease validation failed\n");
+ drm_dbg_lease(dev, "lease validation failed\n");
goto out_free_objects;
}
@@ -418,7 +421,7 @@ static int fill_object_idr(struct drm_device *dev,
struct drm_mode_object *obj = objects[o];
u32 object_id = objects[o]->id;
- DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id);
+ drm_dbg_lease(dev, "Adding object %d to lease\n", object_id);
/*
* We're using an IDR to hold the set of leased
@@ -430,8 +433,8 @@ static int fill_object_idr(struct drm_device *dev,
*/
ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
@@ -439,15 +442,15 @@ static int fill_object_idr(struct drm_device *dev,
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object primary plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (crtc->cursor) {
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object cursor plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object cursor plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
}
@@ -490,14 +493,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
- DRM_DEBUG_LEASE("invalid flags\n");
+ drm_dbg_lease(dev, "invalid flags\n");
return -EINVAL;
}
lessor = drm_file_get_master(lessor_priv);
/* Do not allow sub-leases */
if (lessor->lessor) {
- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
+ drm_dbg_lease(dev, "recursive leasing not allowed\n");
ret = -EINVAL;
goto out_lessor;
}
@@ -520,7 +523,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count, object_ids);
kfree(object_ids);
if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ drm_dbg_lease(dev, "lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
goto out_lessor;
}
@@ -534,7 +537,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
goto out_lessor;
}
- DRM_DEBUG_LEASE("Creating lease\n");
+ drm_dbg_lease(dev, "Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
@@ -545,7 +548,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
/* Clone the lessor file to create a new file for us */
- DRM_DEBUG_LEASE("Allocating lease file\n");
+ drm_dbg_lease(dev, "Allocating lease file\n");
lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
@@ -560,7 +563,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->authenticated = 1;
/* Pass fd back to userspace */
- DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
+ drm_dbg_lease(dev, "Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
@@ -568,7 +571,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
fd_install(fd, lessee_file);
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl succeeded\n");
return 0;
out_lessee:
@@ -579,7 +582,7 @@ out_leases:
out_lessor:
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;
}
@@ -601,7 +604,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessor = drm_file_get_master(lessor_priv);
- DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "List lessees for %d\n", lessor->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -610,7 +613,8 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
/* Only list un-revoked leases */
if (!idr_is_empty(&lessee->leases)) {
if (count_lessees > count) {
- DRM_DEBUG_LEASE("Add lessee %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "Add lessee %d\n",
+ lessee->lessee_id);
ret = put_user(lessee->lessee_id, lessee_ids + count);
if (ret)
break;
@@ -619,7 +623,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
}
}
- DRM_DEBUG_LEASE("Lessor leases to %d\n", count);
+ drm_dbg_lease(dev, "Lessor leases to %d\n", count);
if (ret == 0)
arg->count_lessees = count;
@@ -651,7 +655,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessee = drm_file_get_master(lessee_priv);
- DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "get lease for %d\n", lessee->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -665,7 +669,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
count = 0;
idr_for_each_entry(object_idr, entry, object) {
if (count_objects > count) {
- DRM_DEBUG_LEASE("adding object %d\n", object);
+ drm_dbg_lease(dev, "adding object %d\n", object);
ret = put_user(object, object_ids + count);
if (ret)
break;
@@ -696,7 +700,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
struct drm_master *lessee;
int ret = 0;
- DRM_DEBUG_LEASE("revoke lease for %d\n", arg->lessee_id);
+ drm_dbg_lease(dev, "revoke lease for %d\n", arg->lessee_id);
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a6ac56580876..c871d9f096b8 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -192,6 +193,7 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
+ * @src: The source buffer
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
@@ -199,12 +201,10 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
@@ -212,19 +212,15 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
if (ret)
return ret;
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto out_drm_gem_fb_end_cpu_access;
-
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, data, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(&dst_map, NULL, data, fb, clip);
+ drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, data, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -232,8 +228,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = -EINVAL;
}
- drm_gem_fb_vunmap(fb, map);
-out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
@@ -257,29 +251,18 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
}
-static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
struct mipi_dbi *dbi = &dbidev->dbi;
bool swap = dbi->swap_bytes;
- int idx, ret = 0;
+ int ret = 0;
bool full;
void *tr;
- if (WARN_ON(!fb))
- return;
-
- if (!drm_dev_enter(fb->dev, &idx))
- return;
-
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto err_drm_dev_exit;
-
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -287,11 +270,11 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
- tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
+ tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@@ -302,11 +285,6 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
-
- drm_gem_fb_vunmap(fb, map);
-
-err_drm_dev_exit:
- drm_dev_exit(idx);
}
/**
@@ -339,13 +317,24 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (WARN_ON(!fb))
+ return;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- mipi_dbi_fb_dirty(state->fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,6 +355,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -378,7 +368,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi_dbi_fb_dirty(fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
@@ -427,9 +417,95 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+/**
+ * mipi_dbi_pipe_begin_fb_access - MIPI DBI pipe begin-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.begin_fb_access.
+ *
+ * See drm_gem_begin_shadow_fb_access() for details and mipi_dbi_pipe_cleanup_fb()
+ * for cleanup.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_begin_fb_access);
+
+/**
+ * mipi_dbi_pipe_end_fb_access - MIPI DBI pipe end-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.end_fb_access.
+ *
+ * See mipi_dbi_pipe_begin_fb_access().
+ */
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_end_fb_access);
+
+/**
+ * mipi_dbi_pipe_reset_plane - MIPI DBI plane-reset helper
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.reset_plane
+ * for MIPI DBI planes.
+ */
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe)
+{
+ drm_gem_reset_shadow_plane(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_reset_plane);
+
+/**
+ * mipi_dbi_pipe_duplicate_plane_state - duplicates MIPI DBI plane state
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.duplicate_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_duplicate_shadow_plane_state() for additional details.
+ *
+ * Returns:
+ * A pointer to a new plane state on success, or NULL otherwise.
+ */
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe)
+{
+ return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_duplicate_plane_state);
+
+/**
+ * mipi_dbi_pipe_destroy_plane_state - cleans up MIPI DBI plane state
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct drm_simple_display_funcs.destroy_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_destroy_shadow_plane_state() for additional details.
+ */
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_destroy_plane_state);
+
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
@@ -652,6 +728,16 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
}
}
+ if (dbidev->io_regulator) {
+ ret = regulator_enable(dbidev->io_regulator);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable I/O regulator (%d)\n", ret);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
+ return ret;
+ }
+ }
+
if (cond && mipi_dbi_display_is_on(dbi))
return 1;
@@ -661,6 +747,8 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
return ret;
}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 497ef4b6a90a..3fd6c733ff4e 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -62,9 +62,9 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mipi_dsi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ const struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
int err;
err = of_device_uevent_modalias(dev, env);
@@ -160,7 +160,7 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
int ret;
u32 reg;
- if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
drm_err(host, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
- dsi->dev.of_node = info->node;
+ device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));
@@ -329,7 +329,7 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
for_each_available_child_of_node(host->dev->of_node, node) {
/* skip nodes without reg property */
- if (!of_find_property(node, "reg", NULL))
+ if (!of_property_present(node, "reg"))
continue;
of_mipi_dsi_device_add(host, node);
}
@@ -1224,6 +1224,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
+/**
+ * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
+ * of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness)
+{
+ u8 payload[2] = { brightness >> 8, brightness & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
+ * brightness value of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness)
+{
+ u8 brightness_be[2];
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ brightness_be, sizeof(brightness_be));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ *brightness = (brightness_be[0] << 8) | brightness_be[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
+
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 688c8afe0bf1..87eb591fe9b5 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -54,6 +54,8 @@ int drm_modeset_register_all(struct drm_device *dev)
if (ret)
goto err_connector;
+ drm_debugfs_late_register(dev);
+
return 0;
err_connector:
@@ -399,6 +401,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
*/
int drmm_mode_config_init(struct drm_device *dev)
{
+ int ret;
+
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
@@ -420,7 +424,11 @@ int drmm_mode_config_init(struct drm_device *dev)
init_llist_head(&dev->mode_config.connector_free_list);
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
- drm_mode_create_standard_properties(dev);
+ ret = drm_mode_create_standard_properties(dev);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
/* Just to be sure */
dev->mode_config.num_fb = 0;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 3c8034a8c27b..ac9a406250c5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -31,10 +31,11 @@
*/
#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/fb.h> /* for KHZ2PICOS() */
#include <linux/list.h>
#include <linux/list_sort.h>
-#include <linux/export.h>
-#include <linux/fb.h>
+#include <linux/of.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -116,6 +117,482 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
+enum drm_mode_analog {
+ DRM_MODE_ANALOG_NTSC, /* 525 lines, 60Hz */
+ DRM_MODE_ANALOG_PAL, /* 625 lines, 50Hz */
+};
+
+/*
+ * The timings come from:
+ * - https://web.archive.org/web/20220406232708/http://www.kolumbus.fi/pami1/video/pal_ntsc.html
+ * - https://web.archive.org/web/20220406124914/http://martin.hinner.info/vga/pal.html
+ * - https://web.archive.org/web/20220609202433/http://www.batsocks.co.uk/readme/video_timing.htm
+ */
+#define NTSC_LINE_DURATION_NS 63556U
+#define NTSC_LINES_NUMBER 525
+
+#define NTSC_HBLK_DURATION_TYP_NS 10900U
+#define NTSC_HBLK_DURATION_MIN_NS (NTSC_HBLK_DURATION_TYP_NS - 200)
+#define NTSC_HBLK_DURATION_MAX_NS (NTSC_HBLK_DURATION_TYP_NS + 200)
+
+#define NTSC_HACT_DURATION_TYP_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_TYP_NS)
+#define NTSC_HACT_DURATION_MIN_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MAX_NS)
+#define NTSC_HACT_DURATION_MAX_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MIN_NS)
+
+#define NTSC_HFP_DURATION_TYP_NS 1500
+#define NTSC_HFP_DURATION_MIN_NS 1270
+#define NTSC_HFP_DURATION_MAX_NS 2220
+
+#define NTSC_HSLEN_DURATION_TYP_NS 4700
+#define NTSC_HSLEN_DURATION_MIN_NS (NTSC_HSLEN_DURATION_TYP_NS - 100)
+#define NTSC_HSLEN_DURATION_MAX_NS (NTSC_HSLEN_DURATION_TYP_NS + 100)
+
+#define NTSC_HBP_DURATION_TYP_NS 4700
+
+/*
+ * I couldn't find the actual tolerance for the back porch, so let's
+ * just reuse the sync length ones.
+ */
+#define NTSC_HBP_DURATION_MIN_NS (NTSC_HBP_DURATION_TYP_NS - 100)
+#define NTSC_HBP_DURATION_MAX_NS (NTSC_HBP_DURATION_TYP_NS + 100)
+
+#define PAL_LINE_DURATION_NS 64000U
+#define PAL_LINES_NUMBER 625
+
+#define PAL_HACT_DURATION_TYP_NS 51950U
+#define PAL_HACT_DURATION_MIN_NS (PAL_HACT_DURATION_TYP_NS - 100)
+#define PAL_HACT_DURATION_MAX_NS (PAL_HACT_DURATION_TYP_NS + 400)
+
+#define PAL_HBLK_DURATION_TYP_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_TYP_NS)
+#define PAL_HBLK_DURATION_MIN_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MAX_NS)
+#define PAL_HBLK_DURATION_MAX_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MIN_NS)
+
+#define PAL_HFP_DURATION_TYP_NS 1650
+#define PAL_HFP_DURATION_MIN_NS (PAL_HFP_DURATION_TYP_NS - 100)
+#define PAL_HFP_DURATION_MAX_NS (PAL_HFP_DURATION_TYP_NS + 400)
+
+#define PAL_HSLEN_DURATION_TYP_NS 4700
+#define PAL_HSLEN_DURATION_MIN_NS (PAL_HSLEN_DURATION_TYP_NS - 200)
+#define PAL_HSLEN_DURATION_MAX_NS (PAL_HSLEN_DURATION_TYP_NS + 200)
+
+#define PAL_HBP_DURATION_TYP_NS 5700
+#define PAL_HBP_DURATION_MIN_NS (PAL_HBP_DURATION_TYP_NS - 200)
+#define PAL_HBP_DURATION_MAX_NS (PAL_HBP_DURATION_TYP_NS + 200)
+
+struct analog_param_field {
+ unsigned int even, odd;
+};
+
+#define PARAM_FIELD(_odd, _even) \
+ { .even = _even, .odd = _odd }
+
+struct analog_param_range {
+ unsigned int min, typ, max;
+};
+
+#define PARAM_RANGE(_min, _typ, _max) \
+ { .min = _min, .typ = _typ, .max = _max }
+
+struct analog_parameters {
+ unsigned int num_lines;
+ unsigned int line_duration_ns;
+
+ struct analog_param_range hact_ns;
+ struct analog_param_range hfp_ns;
+ struct analog_param_range hslen_ns;
+ struct analog_param_range hbp_ns;
+ struct analog_param_range hblk_ns;
+
+ unsigned int bt601_hfp;
+
+ struct analog_param_field vfp_lines;
+ struct analog_param_field vslen_lines;
+ struct analog_param_field vbp_lines;
+};
+
+#define TV_MODE_PARAMETER(_mode, _lines, _line_dur, _hact, _hfp, \
+ _hslen, _hbp, _hblk, _bt601_hfp, _vfp, \
+ _vslen, _vbp) \
+ [_mode] = { \
+ .num_lines = _lines, \
+ .line_duration_ns = _line_dur, \
+ .hact_ns = _hact, \
+ .hfp_ns = _hfp, \
+ .hslen_ns = _hslen, \
+ .hbp_ns = _hbp, \
+ .hblk_ns = _hblk, \
+ .bt601_hfp = _bt601_hfp, \
+ .vfp_lines = _vfp, \
+ .vslen_lines = _vslen, \
+ .vbp_lines = _vbp, \
+ }
+
+static const struct analog_parameters tv_modes_parameters[] = {
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_NTSC,
+ NTSC_LINES_NUMBER,
+ NTSC_LINE_DURATION_NS,
+ PARAM_RANGE(NTSC_HACT_DURATION_MIN_NS,
+ NTSC_HACT_DURATION_TYP_NS,
+ NTSC_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HFP_DURATION_MIN_NS,
+ NTSC_HFP_DURATION_TYP_NS,
+ NTSC_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HSLEN_DURATION_MIN_NS,
+ NTSC_HSLEN_DURATION_TYP_NS,
+ NTSC_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBP_DURATION_MIN_NS,
+ NTSC_HBP_DURATION_TYP_NS,
+ NTSC_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBLK_DURATION_MIN_NS,
+ NTSC_HBLK_DURATION_TYP_NS,
+ NTSC_HBLK_DURATION_MAX_NS),
+ 16,
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(16, 17)),
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_PAL,
+ PAL_LINES_NUMBER,
+ PAL_LINE_DURATION_NS,
+ PARAM_RANGE(PAL_HACT_DURATION_MIN_NS,
+ PAL_HACT_DURATION_TYP_NS,
+ PAL_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HFP_DURATION_MIN_NS,
+ PAL_HFP_DURATION_TYP_NS,
+ PAL_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HSLEN_DURATION_MIN_NS,
+ PAL_HSLEN_DURATION_TYP_NS,
+ PAL_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBP_DURATION_MIN_NS,
+ PAL_HBP_DURATION_TYP_NS,
+ PAL_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBLK_DURATION_MIN_NS,
+ PAL_HBLK_DURATION_TYP_NS,
+ PAL_HBLK_DURATION_MAX_NS),
+ 12,
+
+ /*
+ * The front porch is actually 6 short sync
+ * pulses for the even field, and 5 for the
+ * odd field. Each sync takes half a life so
+ * the odd field front porch is shorter by
+ * half a line.
+ *
+ * In progressive, we're supposed to use 6
+ * pulses, so we're fine there
+ */
+ PARAM_FIELD(3, 2),
+
+ /*
+ * The vsync length is 5 long sync pulses,
+ * each field taking half a line. We're
+ * shorter for both fields by half a line.
+ *
+ * In progressive, we're supposed to use 5
+ * pulses, so we're off by half
+ * a line.
+ *
+ * In interlace, we're now off by half a line
+ * for the even field and one line for the odd
+ * field.
+ */
+ PARAM_FIELD(3, 3),
+
+ /*
+ * The back porch starts with post-equalizing
+ * pulses, consisting in 5 short sync pulses
+ * for the even field, 4 for the odd field. In
+ * progressive, it's 5 short syncs.
+ *
+ * In progressive, we thus have 2.5 lines,
+ * plus the 0.5 line we were missing
+ * previously, so we should use 3 lines.
+ *
+ * In interlace, the even field is in the
+ * exact same case than progressive. For the
+ * odd field, we should be using 2 lines but
+ * we're one line short, so we'll make up for
+ * it here by using 3.
+ *
+ * The entire blanking area is supposed to
+ * take 25 lines, so we also need to account
+ * for the rest of the blanking area that
+ * can't be in either the front porch or sync
+ * period.
+ */
+ PARAM_FIELD(19, 20)),
+};
+
+static int fill_analog_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ const struct analog_parameters *params,
+ unsigned long pixel_clock_hz,
+ unsigned int hactive,
+ unsigned int vactive,
+ bool interlace)
+{
+ unsigned long pixel_duration_ns = NSEC_PER_SEC / pixel_clock_hz;
+ unsigned int htotal, vtotal;
+ unsigned int max_hact, hact_duration_ns;
+ unsigned int hblk, hblk_duration_ns;
+ unsigned int hfp, hfp_duration_ns;
+ unsigned int hslen, hslen_duration_ns;
+ unsigned int hbp, hbp_duration_ns;
+ unsigned int porches, porches_duration_ns;
+ unsigned int vfp, vfp_min;
+ unsigned int vbp, vbp_min;
+ unsigned int vslen;
+ bool bt601 = false;
+ int porches_rem;
+ u64 result;
+
+ drm_dbg_kms(dev,
+ "Generating a %ux%u%c, %u-line mode with a %lu kHz clock\n",
+ hactive, vactive,
+ interlace ? 'i' : 'p',
+ params->num_lines,
+ pixel_clock_hz / 1000);
+
+ max_hact = params->hact_ns.max / pixel_duration_ns;
+ if (pixel_clock_hz == 13500000 && hactive > max_hact && hactive <= 720) {
+ drm_dbg_kms(dev, "Trying to generate a BT.601 mode. Disabling checks.\n");
+ bt601 = true;
+ }
+
+ /*
+ * Our pixel duration is going to be round down by the division,
+ * so rounding up is probably going to introduce even more
+ * deviation.
+ */
+ result = (u64)params->line_duration_ns * pixel_clock_hz;
+ do_div(result, NSEC_PER_SEC);
+ htotal = result;
+
+ drm_dbg_kms(dev, "Total Horizontal Number of Pixels: %u\n", htotal);
+
+ hact_duration_ns = hactive * pixel_duration_ns;
+ if (!bt601 &&
+ (hact_duration_ns < params->hact_ns.min ||
+ hact_duration_ns > params->hact_ns.max)) {
+ DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
+ hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
+ return -EINVAL;
+ }
+
+ hblk = htotal - hactive;
+ drm_dbg_kms(dev, "Horizontal Blanking Period: %u\n", hblk);
+
+ hblk_duration_ns = hblk * pixel_duration_ns;
+ if (!bt601 &&
+ (hblk_duration_ns < params->hblk_ns.min ||
+ hblk_duration_ns > params->hblk_ns.max)) {
+ DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
+ hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
+ return -EINVAL;
+ }
+
+ hslen = DIV_ROUND_UP(params->hslen_ns.typ, pixel_duration_ns);
+ drm_dbg_kms(dev, "Horizontal Sync Period: %u\n", hslen);
+
+ hslen_duration_ns = hslen * pixel_duration_ns;
+ if (!bt601 &&
+ (hslen_duration_ns < params->hslen_ns.min ||
+ hslen_duration_ns > params->hslen_ns.max)) {
+ DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
+ hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
+ return -EINVAL;
+ }
+
+ porches = hblk - hslen;
+ drm_dbg_kms(dev, "Remaining horizontal pixels for both porches: %u\n", porches);
+
+ porches_duration_ns = porches * pixel_duration_ns;
+ if (!bt601 &&
+ (porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
+ porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
+ DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
+ return -EINVAL;
+ }
+
+ if (bt601) {
+ hfp = params->bt601_hfp;
+ } else {
+ unsigned int hfp_min = DIV_ROUND_UP(params->hfp_ns.min,
+ pixel_duration_ns);
+ unsigned int hbp_min = DIV_ROUND_UP(params->hbp_ns.min,
+ pixel_duration_ns);
+ int porches_rem = porches - hfp_min - hbp_min;
+
+ hfp = hfp_min + DIV_ROUND_UP(porches_rem, 2);
+ }
+
+ drm_dbg_kms(dev, "Horizontal Front Porch: %u\n", hfp);
+
+ hfp_duration_ns = hfp * pixel_duration_ns;
+ if (!bt601 &&
+ (hfp_duration_ns < params->hfp_ns.min ||
+ hfp_duration_ns > params->hfp_ns.max)) {
+ DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
+ hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
+ return -EINVAL;
+ }
+
+ hbp = porches - hfp;
+ drm_dbg_kms(dev, "Horizontal Back Porch: %u\n", hbp);
+
+ hbp_duration_ns = hbp * pixel_duration_ns;
+ if (!bt601 &&
+ (hbp_duration_ns < params->hbp_ns.min ||
+ hbp_duration_ns > params->hbp_ns.max)) {
+ DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
+ hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
+ return -EINVAL;
+ }
+
+ if (htotal != (hactive + hfp + hslen + hbp))
+ return -EINVAL;
+
+ mode->clock = pixel_clock_hz / 1000;
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hfp;
+ mode->hsync_end = mode->hsync_start + hslen;
+ mode->htotal = mode->hsync_end + hbp;
+
+ if (interlace) {
+ vfp_min = params->vfp_lines.even + params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.even + params->vbp_lines.odd;
+ vslen = params->vslen_lines.even + params->vslen_lines.odd;
+ } else {
+ /*
+ * By convention, NTSC (aka 525/60) systems start with
+ * the even field, but PAL (aka 625/50) systems start
+ * with the odd one.
+ *
+ * PAL systems also have asymmetric timings between the
+ * even and odd field, while NTSC is symmetric.
+ *
+ * Moreover, if we want to create a progressive mode for
+ * PAL, we need to use the odd field timings.
+ *
+ * Since odd == even for NTSC, we can just use the odd
+ * one all the time to simplify the code a bit.
+ */
+ vfp_min = params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.odd;
+ vslen = params->vslen_lines.odd;
+ }
+
+ drm_dbg_kms(dev, "Vertical Sync Period: %u\n", vslen);
+
+ porches = params->num_lines - vactive - vslen;
+ drm_dbg_kms(dev, "Remaining vertical pixels for both porches: %u\n", porches);
+
+ porches_rem = porches - vfp_min - vbp_min;
+ vfp = vfp_min + (porches_rem / 2);
+ drm_dbg_kms(dev, "Vertical Front Porch: %u\n", vfp);
+
+ vbp = porches - vfp;
+ drm_dbg_kms(dev, "Vertical Back Porch: %u\n", vbp);
+
+ vtotal = vactive + vfp + vslen + vbp;
+ if (params->num_lines != vtotal) {
+ DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
+ vtotal, params->num_lines);
+ return -EINVAL;
+ }
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vfp;
+ mode->vsync_end = mode->vsync_start + vslen;
+ mode->vtotal = mode->vsync_end + vbp;
+
+ if (mode->vtotal != params->num_lines)
+ return -EINVAL;
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC;
+ if (interlace)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(dev, "Generated mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ return 0;
+}
+
+/**
+ * drm_analog_tv_mode - create a display mode for an analog TV
+ * @dev: drm device
+ * @tv_mode: TV Mode standard to create a mode for. See DRM_MODE_TV_MODE_*.
+ * @pixel_clock_hz: Pixel Clock Frequency, in Hertz
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @interlace: whether to compute an interlaced mode
+ *
+ * This function creates a struct drm_display_mode instance suited for
+ * an analog TV output, for one of the usual analog TV mode.
+ *
+ * Note that @hdisplay is larger than the usual constraints for the PAL
+ * and NTSC timings, and we'll choose to ignore most timings constraints
+ * to reach those resolutions.
+ *
+ * Returns:
+ *
+ * A pointer to the mode, allocated with drm_mode_create(). Returns NULL
+ * on error.
+ */
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode tv_mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace)
+{
+ struct drm_display_mode *mode;
+ enum drm_mode_analog analog;
+ int ret;
+
+ switch (tv_mode) {
+ case DRM_MODE_TV_MODE_NTSC:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_443:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_J:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_M:
+ analog = DRM_MODE_ANALOG_NTSC;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_N:
+ fallthrough;
+ case DRM_MODE_TV_MODE_SECAM:
+ analog = DRM_MODE_ANALOG_PAL;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ ret = fill_analog_mode(dev, mode,
+ &tv_modes_parameters[analog],
+ pixel_clock_hz, hdisplay, vdisplay, interlace);
+ if (ret)
+ goto err_free_mode;
+
+ return mode;
+
+err_free_mode:
+ drm_mode_destroy(dev, mode);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_analog_tv_mode);
+
/**
* drm_cvt_mode -create a modeline based on the CVT algorithm
* @dev: drm device
@@ -1659,6 +2136,30 @@ static int drm_mode_parse_panel_orientation(const char *delim,
return 0;
}
+static int drm_mode_parse_tv_mode(const char *delim,
+ struct drm_cmdline_mode *mode)
+{
+ const char *value;
+ int ret;
+
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ delim = strchr(value, ',');
+ if (!delim)
+ delim = value + strlen(value);
+
+ ret = drm_get_tv_mode_from_name(value, delim - value);
+ if (ret < 0)
+ return ret;
+
+ mode->tv_mode_specified = true;
+ mode->tv_mode = ret;
+
+ return 0;
+}
+
static int drm_mode_parse_cmdline_options(const char *str,
bool freestanding,
const struct drm_connector *connector,
@@ -1728,6 +2229,9 @@ static int drm_mode_parse_cmdline_options(const char *str,
} else if (!strncmp(option, "panel_orientation", delim - option)) {
if (drm_mode_parse_panel_orientation(delim, mode))
return -EINVAL;
+ } else if (!strncmp(option, "tv_mode", delim - option)) {
+ if (drm_mode_parse_tv_mode(delim, mode))
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -1756,20 +2260,24 @@ struct drm_named_mode {
unsigned int xres;
unsigned int yres;
unsigned int flags;
+ unsigned int tv_mode;
};
-#define NAMED_MODE(_name, _pclk, _x, _y, _flags) \
+#define NAMED_MODE(_name, _pclk, _x, _y, _flags, _mode) \
{ \
.name = _name, \
.pixel_clock_khz = _pclk, \
.xres = _x, \
.yres = _y, \
.flags = _flags, \
+ .tv_mode = _mode, \
}
static const struct drm_named_mode drm_named_modes[] = {
- NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE),
- NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE),
+ NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC),
+ NAMED_MODE("NTSC-J", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC_J),
+ NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL),
+ NAMED_MODE("PAL-M", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL_M),
};
static int drm_mode_parse_cmdline_named_mode(const char *name,
@@ -1809,11 +2317,13 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
if (ret != name_end)
continue;
- strcpy(cmdline_mode->name, mode->name);
+ strscpy(cmdline_mode->name, mode->name, sizeof(cmdline_mode->name));
cmdline_mode->pixel_clock = mode->pixel_clock_khz;
cmdline_mode->xres = mode->xres;
cmdline_mode->yres = mode->yres;
cmdline_mode->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ cmdline_mode->tv_mode = mode->tv_mode;
+ cmdline_mode->tv_mode_specified = true;
cmdline_mode->specified = true;
return 1;
@@ -1829,8 +2339,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
* @mode: preallocated drm_cmdline_mode structure to fill out
*
* This parses @mode_option command line modeline for modes and options to
- * configure the connector. If @mode_option is NULL the default command line
- * modeline in fb_mode_option will be parsed instead.
+ * configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end::
@@ -1992,6 +2501,31 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+static struct drm_display_mode *drm_named_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) {
+ const struct drm_named_mode *named_mode = &drm_named_modes[i];
+
+ if (strcmp(cmd->name, named_mode->name))
+ continue;
+
+ if (!cmd->tv_mode_specified)
+ continue;
+
+ return drm_analog_tv_mode(dev,
+ named_mode->tv_mode,
+ named_mode->pixel_clock_khz * 1000,
+ named_mode->xres,
+ named_mode->yres,
+ named_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ }
+
+ return NULL;
+}
+
/**
* drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
* @dev: DRM device to create the new mode for
@@ -2009,7 +2543,9 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
if (cmd->xres == 0 || cmd->yres == 0)
return NULL;
- if (cmd->cvt)
+ if (strlen(cmd->name))
+ mode = drm_named_mode(dev, cmd);
+ else if (cmd->cvt)
mode = drm_cvt_mode(dev,
cmd->xres, cmd->yres,
cmd->refresh_specified ? cmd->refresh : 60,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 7bbcb999bb75..177b600895d3 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -493,3 +494,53 @@ int drm_of_get_data_lanes_count_ep(const struct device_node *port,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
+
+#if IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+
+/**
+ * drm_of_get_dsi_bus - find the DSI bus for a given device
+ * @dev: parent device of display (SPI, I2C)
+ *
+ * Gets parent DSI bus for a DSI device controlled through a bus other
+ * than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
+ *
+ * Returns pointer to mipi_dsi_host if successful, -EINVAL if the
+ * request is unsupported, -EPROBE_DEFER if the DSI host is found but
+ * not available, or -ENODEV otherwise.
+ */
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ struct mipi_dsi_host *dsi_host;
+ struct device_node *endpoint, *dsi_host_node;
+
+ /*
+ * Get first endpoint child from device.
+ */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Follow the first endpoint to get the DSI host node and then
+ * release the endpoint since we no longer need it.
+ */
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!dsi_host_node)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Get the DSI host from the DSI host node. If we get an error
+ * or the return is null assume we're not ready to probe just
+ * yet. Release the DSI host node since we're done with it.
+ */
+ dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (IS_ERR_OR_NULL(dsi_host))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dsi_host;
+}
+EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus);
+
+#endif /* CONFIG_DRM_MIPI_DSI */
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52d8800a8ab8..b1a38e6ce2f8 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data asus_t100ha = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.width = 720,
.height = 1280,
@@ -97,6 +91,12 @@ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.width = 800,
.height = 1280,
@@ -127,6 +127,12 @@ static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -151,7 +157,7 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
- .driver_data = (void *)&asus_t100ha,
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* Asus T101HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -196,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Dynabook K50 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
+ },
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -304,10 +316,29 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
- }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ }, { /* Lenovo Ideapad D330-10IGL (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo IdeaPad Duet 3 10IGL5 */
.matches = {
- /* Non exact match to match all versions */
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X90L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */
@@ -325,6 +356,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Tab 3 X90F */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Nanote UMPC-01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 33357629a7f5..24e7998d1731 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -46,6 +46,11 @@
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
*
+ * Unless explicitly specified (via CRTC property or otherwise), the active area
+ * of a CRTC will be black by default. This means portions of the active area
+ * which are not covered by a plane will be black, and alpha blending of any
+ * planes with the CRTC background will blend with black at the lowest zpos.
+ *
* To create a plane, a KMS drivers allocates and zeroes an instances of
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index ba6a9136a065..c91e454eba09 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -28,7 +28,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index f924b8b4ab6b..d29dafce9bb0 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
- * @dev: dev to export the buffer from
+ * @dev: drm_device to import into
* @file_priv: drm file-private structure
* @prime_fd: fd id of the dma-buf which should be imported
* @handle: pointer to storage for the handle of the imported buffer object
@@ -544,7 +544,8 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
* Optional pinning of buffers is handled at dma-buf attach and detach time in
* drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
* handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
- * &drm_gem_object_funcs.get_sg_table.
+ * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
+ * unimplemented, exports into another device are rejected.
*
* For kernel-internal access there's drm_gem_dmabuf_vmap() and
* drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
@@ -583,6 +584,9 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
+ if (!obj->funcs->get_sg_table)
+ return -ENOSYS;
+
return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -925,7 +929,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
- * Importing dmabuf exported from out own gem increases
+ * Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bcd9611dabfd..2fb9bf901a2c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -222,6 +222,45 @@ drm_connector_mode_valid(struct drm_connector *connector,
return ret;
}
+static void drm_kms_helper_disable_hpd(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->disable_hpd)
+ funcs->disable_hpd(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->enable_hpd)
+ funcs->enable_hpd(connector);
+
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return poll;
+}
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
* drm_kms_helper_poll_enable - re-enable output polling.
@@ -241,20 +280,13 @@ drm_connector_mode_valid(struct drm_connector *connector,
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
+ dev->mode_config.poll_running)
return;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT))
- poll = true;
- }
- drm_connector_list_iter_end(&conn_iter);
+ poll = drm_kms_helper_enable_hpd(dev);
if (dev->mode_config.delayed_event) {
/*
@@ -273,6 +305,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
+
+ dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
@@ -556,15 +590,13 @@ retry:
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}
/* Re-enable polling in case the global poll config changed. */
- if (drm_kms_helper_poll != dev->mode_config.poll_running)
- drm_kms_helper_poll_enable(dev);
-
- dev->mode_config.poll_running = drm_kms_helper_poll;
+ drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -704,8 +736,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
- if (!drm_kms_helper_poll)
+ if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
+ drm_kms_helper_disable_hpd(dev);
+ dev->mode_config.poll_running = false;
goto out;
+ }
if (!mutex_trylock(&dev->mode_config.mutex)) {
repoll = true;
@@ -818,9 +853,12 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled)
- return;
+ if (dev->mode_config.poll_running)
+ drm_kms_helper_disable_hpd(dev);
+
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+
+ dev->mode_config.poll_running = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -861,8 +899,9 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
if (!dev->mode_config.poll_enabled)
return;
+ drm_kms_helper_poll_disable(dev);
+
dev->mode_config.poll_enabled = false;
- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -1139,10 +1178,94 @@ int drm_connector_helper_get_modes(struct drm_connector *connector)
* EDID. Otherwise, if the EDID is NULL, clear the connector
* information.
*/
- count = drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
drm_edid_free(drm_edid);
return count;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes);
+
+/**
+ * drm_connector_helper_tv_get_modes - Fills the modes availables to a TV connector
+ * @connector: The connector
+ *
+ * Fills the available modes for a TV connector based on the supported
+ * TV modes, and the default mode expressed by the kernel command line.
+ *
+ * This can be used as the default TV connector helper .get_modes() hook
+ * if the driver does not need any special processing.
+ *
+ * Returns:
+ * The number of modes added to the connector.
+ */
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *tv_mode_property =
+ dev->mode_config.tv_mode_property;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ unsigned int ntsc_modes = BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_NTSC_443) |
+ BIT(DRM_MODE_TV_MODE_NTSC_J) |
+ BIT(DRM_MODE_TV_MODE_PAL_M);
+ unsigned int pal_modes = BIT(DRM_MODE_TV_MODE_PAL) |
+ BIT(DRM_MODE_TV_MODE_PAL_N) |
+ BIT(DRM_MODE_TV_MODE_SECAM);
+ unsigned int tv_modes[2] = { UINT_MAX, UINT_MAX };
+ unsigned int i, supported_tv_modes = 0;
+
+ if (!tv_mode_property)
+ return 0;
+
+ for (i = 0; i < tv_mode_property->num_values; i++)
+ supported_tv_modes |= BIT(tv_mode_property->values[i]);
+
+ if ((supported_tv_modes & ntsc_modes) &&
+ (supported_tv_modes & pal_modes)) {
+ uint64_t default_mode;
+
+ if (drm_object_property_get_default_value(&connector->base,
+ tv_mode_property,
+ &default_mode))
+ return 0;
+
+ if (cmdline->tv_mode_specified)
+ default_mode = cmdline->tv_mode;
+
+ if (BIT(default_mode) & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ tv_modes[1] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ tv_modes[1] = DRM_MODE_TV_MODE_NTSC;
+ }
+ } else if (supported_tv_modes & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ } else if (supported_tv_modes & pal_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct drm_display_mode *mode;
+
+ if (tv_modes[i] == DRM_MODE_TV_MODE_NTSC)
+ mode = drm_mode_analog_ntsc_480i(dev);
+ else if (tv_modes[i] == DRM_MODE_TV_MODE_PAL)
+ mode = drm_mode_analog_pal_576i(dev);
+ else
+ break;
+ if (!mode)
+ return i;
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 3ef420ec4534..270523ae36d4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -267,7 +267,7 @@ static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
WARN_ON_ONCE(pipe->funcs && pipe->funcs->cleanup_fb);
- return drm_gem_simple_display_pipe_prepare_fb(pipe, state);
+ return drm_gem_plane_helper_prepare_fb(plane, state);
}
return pipe->funcs->prepare_fb(pipe, state);
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
new file mode 100644
index 000000000000..38cc7a123819
--- /dev/null
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright 2023 Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+
+#include <drm/drm_suballoc.h>
+#include <drm/drm_print.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-fence.h>
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa);
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager);
+
+/**
+ * drm_suballoc_manager_init() - Initialise the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate
+ * @align: alignment for each suballocated chunk
+ *
+ * Prepares the suballocation manager for suballocations.
+ */
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES));
+
+ if (!align)
+ align = 1;
+
+ /* alignment must be a power of 2 */
+ if (WARN_ON_ONCE(align & (align - 1)))
+ align = roundup_pow_of_two(align);
+
+ init_waitqueue_head(&sa_manager->wq);
+ sa_manager->size = size;
+ sa_manager->align = align;
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+}
+EXPORT_SYMBOL(drm_suballoc_manager_init);
+
+/**
+ * drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ *
+ * Cleans up the suballocation manager after use. All fences added
+ * with drm_suballoc_free() must be signaled, or we cannot clean up
+ * the entire manager.
+ */
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (!sa_manager->size)
+ return;
+
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist;
+ drm_suballoc_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist))
+ DRM_ERROR("sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) {
+ drm_suballoc_remove_locked(sa);
+ }
+
+ sa_manager->size = 0;
+}
+EXPORT_SYMBOL(drm_suballoc_manager_fini);
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa)
+{
+ struct drm_suballoc_manager *sa_manager = sa->manager;
+
+ if (sa_manager->hole == &sa->olist)
+ sa_manager->hole = sa->olist.prev;
+
+ list_del_init(&sa->olist);
+ list_del_init(&sa->flist);
+ dma_fence_put(sa->fence);
+ kfree(sa);
+}
+
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
+ list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
+ if (!sa->fence || !dma_fence_is_signaled(sa->fence))
+ return;
+
+ drm_suballoc_remove_locked(sa);
+ }
+}
+
+static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist)
+ return list_entry(hole, struct drm_suballoc, olist)->eoffset;
+
+ return 0;
+}
+
+static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist)
+ return list_entry(hole->next, struct drm_suballoc, olist)->soffset;
+ return sa_manager->size;
+}
+
+static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager,
+ struct drm_suballoc *sa,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa->manager = sa_manager;
+ sa->soffset = soffset;
+ sa->eoffset = soffset + size;
+ list_add(&sa->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa->flist);
+ sa_manager->hole = &sa->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+ unsigned int i;
+
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (!list_empty(&sa_manager->flist[i]))
+ return true;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ return ((eoffset - soffset) >= (size + wasted));
+}
+
+/**
+ * drm_suballoc_event() - Check if we can stop waiting
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Return: true if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly.
+ * false otherwise.
+ */
+static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ bool ret;
+
+ spin_lock(&sa_manager->wq.lock);
+ ret = __drm_suballoc_event(sa_manager, size, align);
+ spin_unlock(&sa_manager->wq.lock);
+ return ret;
+}
+
+static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
+ struct dma_fence **fences,
+ unsigned int *tries)
+{
+ struct drm_suballoc *best_bo = NULL;
+ unsigned int i, best_idx;
+ size_t soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa
+ * of the current last
+ */
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) {
+ struct drm_suballoc *sa;
+
+ fences[i] = NULL;
+
+ if (list_empty(&sa_manager->flist[i]))
+ continue;
+
+ sa = list_first_entry(&sa_manager->flist[i],
+ struct drm_suballoc, flist);
+
+ if (!dma_fence_is_signaled(sa->fence)) {
+ fences[i] = sa->fence;
+ continue;
+ }
+
+ /* limit the number of tries each freelist gets */
+ if (tries[i] > 2)
+ continue;
+
+ tmp = sa->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_idx = i;
+ best_bo = sa;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_idx];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /*
+ * We know that this one is signaled,
+ * so it's safe to remove it.
+ */
+ drm_suballoc_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drm_suballoc_new() - Make a suballocation.
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate.
+ * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but
+ * the argument is provided for suballocations from reclaim context or
+ * where the caller wants to avoid pipelining rather than wait for
+ * reclaim.
+ * @intr: Whether to perform waits interruptible. This should typically
+ * always be true, unless the caller needs to propagate a
+ * non-interruptible context from above layers.
+ * @align: Alignment. Must not exceed the default manager alignment.
+ * If @align is zero, then the manager alignment is used.
+ *
+ * Try to make a suballocation of size @size, which will be rounded
+ * up to the alignment specified in specified in drm_suballoc_manager_init().
+ *
+ * Return: a new suballocated bo, or an ERR_PTR.
+ */
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align)
+{
+ struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int tries[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int count;
+ int i, r;
+ struct drm_suballoc *sa;
+
+ if (WARN_ON_ONCE(align > sa_manager->align))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(size > sa_manager->size || !size))
+ return ERR_PTR(-EINVAL);
+
+ if (!align)
+ align = sa_manager->align;
+
+ sa = kmalloc(sizeof(*sa), gfp);
+ if (!sa)
+ return ERR_PTR(-ENOMEM);
+ sa->manager = sa_manager;
+ sa->fence = NULL;
+ INIT_LIST_HEAD(&sa->olist);
+ INIT_LIST_HEAD(&sa->flist);
+
+ spin_lock(&sa_manager->wq.lock);
+ do {
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ tries[i] = 0;
+
+ do {
+ drm_suballoc_try_free(sa_manager);
+
+ if (drm_suballoc_try_alloc(sa_manager, sa,
+ size, align)) {
+ spin_unlock(&sa_manager->wq.lock);
+ return sa;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (drm_suballoc_next_hole(sa_manager, fences, tries));
+
+ for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (fences[i])
+ fences[count++] = dma_fence_get(fences[i]);
+
+ if (count) {
+ long t;
+
+ spin_unlock(&sa_manager->wq.lock);
+ t = dma_fence_wait_any_timeout(fences, count, intr,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ for (i = 0; i < count; ++i)
+ dma_fence_put(fences[i]);
+
+ r = (t > 0) ? 0 : t;
+ spin_lock(&sa_manager->wq.lock);
+ } else if (intr) {
+ /* if we have nothing to wait for block */
+ r = wait_event_interruptible_locked
+ (sa_manager->wq,
+ __drm_suballoc_event(sa_manager, size, align));
+ } else {
+ spin_unlock(&sa_manager->wq.lock);
+ wait_event(sa_manager->wq,
+ drm_suballoc_event(sa_manager, size, align));
+ r = 0;
+ spin_lock(&sa_manager->wq.lock);
+ }
+ } while (!r);
+
+ spin_unlock(&sa_manager->wq.lock);
+ kfree(sa);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL(drm_suballoc_new);
+
+/**
+ * drm_suballoc_free - Free a suballocation
+ * @suballoc: pointer to the suballocation
+ * @fence: fence that signals when suballocation is idle
+ *
+ * Free the suballocation. The suballocation can be re-used after @fence signals.
+ */
+void drm_suballoc_free(struct drm_suballoc *suballoc,
+ struct dma_fence *fence)
+{
+ struct drm_suballoc_manager *sa_manager;
+
+ if (!suballoc)
+ return;
+
+ sa_manager = suballoc->manager;
+
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !dma_fence_is_signaled(fence)) {
+ u32 idx;
+
+ suballoc->fence = dma_fence_get(fence);
+ idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1);
+ list_add_tail(&suballoc->flist, &sa_manager->flist[idx]);
+ } else {
+ drm_suballoc_remove_locked(suballoc);
+ }
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_free);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{
+ struct drm_suballoc *i;
+
+ spin_lock(&sa_manager->wq.lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ unsigned long long soffset = i->soffset;
+ unsigned long long eoffset = i->eoffset;
+
+ if (&i->olist == sa_manager->hole)
+ drm_puts(p, ">");
+ else
+ drm_puts(p, " ");
+
+ drm_printf(p, "[0x%010llx 0x%010llx] size %8lld",
+ suballoc_base + soffset, suballoc_base + eoffset,
+ eoffset - soffset);
+
+ if (i->fence)
+ drm_printf(p, " protected by 0x%016llx on context %llu",
+ (unsigned long long)i->fence->seqno,
+ (unsigned long long)i->fence->context);
+
+ drm_puts(p, "\n");
+ }
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_dump_debug_info);
+#endif
+MODULE_AUTHOR("Multiple");
+MODULE_DESCRIPTION("Range suballocator helper");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 183130355997..3c22a803201d 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -112,7 +112,7 @@ int drm_sysfs_init(void)
{
int err;
- drm_class = class_create(THIS_MODULE, "drm");
+ drm_class = class_create("drm");
if (IS_ERR(drm_class))
return PTR_ERR(drm_class);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 2ff31717a3de..877e2067534f 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
- * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most
+ * recent vblank interval
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
@@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- ktime_t *tvblank, bool in_vblank_irq)
+drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank,
+ bool in_vblank_irq)
{
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
@@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
/* Query driver if possible and precision timestamping enabled. */
if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
-
ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
}
@@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
return ret;
}
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
+ ktime_t *tvblank, bool in_vblank_irq)
+{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq);
+}
+
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
@@ -980,6 +985,42 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
+/**
+ * drm_crtc_next_vblank_start - calculate the time of the next vblank
+ * @crtc: the crtc for which to calculate next vblank time
+ * @vblanktime: pointer to time to receive the next vblank timestamp.
+ *
+ * Calculate the expected time of the start of the next vblank period,
+ * based on time of previous vblank and frame duration
+ */
+int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
+{
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank;
+ struct drm_display_mode *mode;
+ u64 vblank_start;
+
+ if (!drm_dev_has_vblank(crtc->dev))
+ return -EINVAL;
+
+ vblank = &crtc->dev->vblank[pipe];
+ mode = &vblank->hwmode;
+
+ if (!vblank->framedur_ns || !vblank->linedur_ns)
+ return -EINVAL;
+
+ if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false))
+ return -EINVAL;
+
+ vblank_start = DIV_ROUND_DOWN_ULL(
+ (u64)vblank->framedur_ns * mode->crtc_vblank_start,
+ mode->crtc_vtotal);
+ *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start));
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_next_vblank_start);
+
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
u64 seq, ktime_t now)
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index f024dc93939e..87c9fe55dec7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
@@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 7de37f8c68fd..83229a031af0 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
}
EXPORT_SYMBOL(drm_vma_offset_remove);
-/**
- * drm_vma_node_allow - Add open-file to list of allowed users
- * @node: Node to modify
- * @tag: Tag of file to remove
- *
- * Add @tag to the list of allowed open-files for this node. If @tag is
- * already on this list, the ref-count is incremented.
- *
- * The list of allowed-users is preserved across drm_vma_offset_add() and
- * drm_vma_offset_remove() calls. You may even call it if the node is currently
- * not added to any offset-manager.
- *
- * You must remove all open-files the same number of times as you added them
- * before destroying the node. Otherwise, you will leak memory.
- *
- * This is locked against concurrent access internally.
- *
- * RETURNS:
- * 0 on success, negative error code on internal failure (out-of-mem)
- */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+static int vma_node_allow(struct drm_vma_offset_node *node,
+ struct drm_file *tag, bool ref_counted)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
- entry->vm_count++;
+ if (ref_counted)
+ entry->vm_count++;
goto unlock;
} else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
@@ -307,9 +289,59 @@ unlock:
kfree(new);
return ret;
}
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node. If @tag is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, true);
+}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
+ * drm_vma_node_allow_once - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
+ * should only be called once after this.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, false);
+}
+EXPORT_SYMBOL(drm_vma_node_allow_once);
+
+/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1d2b4fb4bcf8..31a7f59ccb49 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -56,6 +56,11 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
+ ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
+ xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free;
+
ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
priv->cmdbuf_suballoc);
if (!ctx->mmu) {
@@ -99,6 +104,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
etnaviv_iommu_context_put(ctx->mmu);
+ xa_erase(&priv->active_contexts, ctx->id);
+
kfree(ctx);
}
@@ -514,6 +521,8 @@ static int etnaviv_bind(struct device *dev)
dma_set_max_seg_size(dev, SZ_2G);
+ xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
+
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -563,6 +572,8 @@ static void etnaviv_unbind(struct device *dev)
etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+ xa_destroy(&priv->active_contexts);
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 2bb4c25565dc..b3eb1662e90c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -12,6 +12,7 @@
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
@@ -28,6 +29,7 @@ struct etnaviv_iommu_global;
#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
struct etnaviv_file_private {
+ int id;
struct etnaviv_iommu_context *mmu;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
@@ -40,6 +42,9 @@ struct etnaviv_drm_private {
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
struct etnaviv_iommu_global *mmu_global;
+ struct xarray active_contexts;
+ u32 next_context_id;
+
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index c5ae5492e1af..b5f73502e3dd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
{
pgprot_t vm_page_prot;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 7031db145a77..3524b5811682 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
- return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+ int ret;
+
+ ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+ if (!ret) {
+ /* Drop the reference acquired by drm_gem_mmap_obj(). */
+ drm_gem_object_put(&etnaviv_obj->base);
+ }
+
+ return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1491159d0d20..45403ea38906 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) {
- /* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_lock);
- idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_lock);
+ /*
+ * Remove from user fence array before dropping the reference,
+ * so fence can not be found in lookup anymore.
+ */
+ xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 51320eeebfcf..de8c9894967c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -773,6 +773,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ if (gpu->identity.nn_core_count > 0)
+ dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, "
+ "for which the UAPI is still experimental\n");
+
/* Exclude VG cores with FE2.0 */
if (gpu->identity.features & chipFeatures_PIPE_VG &&
gpu->identity.features & chipFeatures_FE20) {
@@ -957,6 +961,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.vertex_cache_size);
seq_printf(m, "\t shader_core_count: %d\n",
gpu->identity.shader_core_count);
+ seq_printf(m, "\t nn_core_count: %d\n",
+ gpu->identity.nn_core_count);
seq_printf(m, "\t pixel_pipes: %d\n",
gpu->identity.pixel_pipes);
seq_printf(m, "\t vertex_output_buffer_size: %d\n",
@@ -1240,7 +1246,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* pretends we didn't find a fence in that case.
*/
rcu_read_lock();
- fence = idr_find(&gpu->fence_idr, id);
+ fence = xa_load(&gpu->user_fences, id);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
@@ -1450,6 +1456,15 @@ static void sync_point_worker(struct work_struct *work)
static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
+ static const char *fault_reasons[] = {
+ "slave not present",
+ "page not present",
+ "write violation",
+ "out of bounds",
+ "read security violation",
+ "write security violation",
+ };
+
u32 status_reg, status;
int i;
@@ -1462,18 +1477,25 @@ static void dump_mmu_fault(struct etnaviv_gpu *gpu)
dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
for (i = 0; i < 4; i++) {
+ const char *reason = "unknown";
u32 address_reg;
+ u32 mmu_status;
- if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
+ mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK;
+ if (!mmu_status)
continue;
+ if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons))
+ reason = fault_reasons[mmu_status - 1];
+
if (gpu->sec_mode == ETNA_SEC_NONE)
address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
else
address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
- dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
- gpu_read(gpu, address_reg));
+ dev_err_ratelimited(gpu->dev,
+ "MMU %d fault (%s) addr 0x%08x\n",
+ i, reason, gpu_read(gpu, address_reg));
}
}
@@ -1629,7 +1651,6 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
return etnaviv_gpu_clk_disable(gpu);
}
-#ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
int ret;
@@ -1645,7 +1666,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
return 0;
}
-#endif
static int
etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
@@ -1713,18 +1733,17 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
if (ret)
goto out_workqueue;
-#ifdef CONFIG_PM
- ret = pm_runtime_get_sync(gpu->dev);
-#else
- ret = etnaviv_gpu_clk_enable(gpu);
-#endif
+ if (IS_ENABLED(CONFIG_PM))
+ ret = pm_runtime_get_sync(gpu->dev);
+ else
+ ret = etnaviv_gpu_clk_enable(gpu);
if (ret < 0)
goto out_sched;
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
- idr_init(&gpu->fence_idr);
+ xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
@@ -1761,12 +1780,12 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_sched_fini(gpu);
-#ifdef CONFIG_PM
- pm_runtime_get_sync(gpu->dev);
- pm_runtime_put_sync_suspend(gpu->dev);
-#else
- etnaviv_gpu_hw_suspend(gpu);
-#endif
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_get_sync(gpu->dev);
+ pm_runtime_put_sync_suspend(gpu->dev);
+ } else {
+ etnaviv_gpu_hw_suspend(gpu);
+ }
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
@@ -1778,7 +1797,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
- idr_destroy(&gpu->fence_idr);
+ xa_destroy(&gpu->user_fences);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
@@ -1810,7 +1829,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_lock);
+ mutex_init(&gpu->sched_lock);
/* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
@@ -1880,7 +1899,6 @@ static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int etnaviv_gpu_rpm_suspend(struct device *dev)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
@@ -1923,18 +1941,16 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
- SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
- NULL)
+ RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL)
};
struct platform_driver etnaviv_gpu_driver = {
.driver = {
.name = "etnaviv-gpu",
.owner = THIS_MODULE,
- .pm = &etnaviv_gpu_pm_ops,
+ .pm = pm_ptr(&etnaviv_gpu_pm_ops),
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f1204b070fb8..98c6f9c320fc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -51,6 +51,9 @@ struct etnaviv_chip_identity {
/* Number of shader cores. */
u32 shader_core_count;
+ /* Number of Neural Network cores. */
+ u32 nn_core_count;
+
/* Size of the vertex cache. */
u32 vertex_cache_size;
@@ -100,6 +103,7 @@ struct etnaviv_gpu {
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
struct workqueue_struct *wq;
+ struct mutex sched_lock;
struct drm_gpu_scheduler sched;
bool initialized;
bool fe_running;
@@ -117,8 +121,8 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_lock;
- struct idr fence_idr;
+ struct xarray user_fences;
+ u32 next_user_fence;
u32 next_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 57f334e24189..2e63afa6c798 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -16,6 +16,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 128,
.shader_core_count = 1,
+ .nn_core_count = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -47,6 +48,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -78,6 +80,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -140,6 +143,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 1024,
.shader_core_count = 4,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -161,6 +165,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features10 = 0x90044250,
.minor_features11 = 0x00000024,
},
+ {
+ .model = 0x8000,
+ .revision = 0x7120,
+ .product_id = 0x45080009,
+ .customer_id = 0x88,
+ .eco_id = 0,
+ .stream_count = 8,
+ .register_max = 64,
+ .thread_count = 256,
+ .shader_core_count = 1,
+ .nn_core_count = 8,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cac,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfadb,
+ .minor_features2 = 0xeb9d6fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xd30dafc7,
+ .minor_features5 = 0x7b5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fffa6f,
+ .minor_features8 = 0x00fe0ef0,
+ .minor_features9 = 0x0088003c,
+ .minor_features10 = 0x108048c0,
+ .minor_features11 = 0x00000010,
+ },
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index d29f467eee13..1ae87dfd19c4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -97,24 +97,24 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
- int ret = 0;
+ struct etnaviv_gpu *gpu = submit->gpu;
+ int ret;
/*
- * Hold the fence lock across the whole operation to avoid jobs being
+ * Hold the sched lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_arm.
*/
- mutex_lock(&submit->gpu->fence_lock);
+ mutex_lock(&gpu->sched_lock);
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
- submit->out_fence, 0,
- INT_MAX, GFP_KERNEL);
- if (submit->out_fence_id < 0) {
+ ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
+ submit->out_fence, xa_limit_32b,
+ &gpu->next_user_fence, GFP_KERNEL);
+ if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
- ret = -ENOMEM;
goto out_unlock;
}
@@ -124,7 +124,7 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
- mutex_unlock(&submit->gpu->fence_lock);
+ mutex_unlock(&gpu->sched_lock);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index deaaa99fa654..94d5f33b1fd6 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
-- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
-- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
-- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
-- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
-- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
-- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
-- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
-- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
-
-Copyright (C) 2012-2019 by the following authors:
+- state.xml ( 27198 bytes, from 2022-04-22 10:35:24)
+- common.xml ( 35468 bytes, from 2020-10-28 12:56:03)
+- common_3d.xml ( 15058 bytes, from 2020-10-28 12:56:03)
+- state_hi.xml ( 34804 bytes, from 2022-12-02 09:06:28)
+- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03)
+- state_2d.xml ( 51552 bytes, from 2020-10-28 12:56:03)
+- state_3d.xml ( 84445 bytes, from 2022-11-15 15:59:38)
+- state_blt.xml ( 14424 bytes, from 2022-11-07 11:18:41)
+- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03)
+
+Copyright (C) 2012-2022 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -321,16 +321,16 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
#define VIVS_MMUv2_STATUS 0x00000188
-#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x0000000f
#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x000000f0
#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000f00
#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x0000f000
#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
@@ -465,7 +465,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG0 0x00000470
#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a
+#define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b
+#define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c
#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_LK_COUNT 0x00000010
+#define VIVS_MC_PROFILE_CONFIG0_FE_STALL_COUNT 0x00000011
+#define VIVS_MC_PROFILE_CONFIG0_FE_PROCESS_COUNT 0x00000012
#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
@@ -499,11 +505,14 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
+#define VIVS_MC_PROFILE_CONFIG1_PA_DROPED_PRIM_COUNTER 0x00000009
+#define VIVS_MC_PROFILE_CONFIG1_PA_FRUSTUM_CLIPPED_PRIM_COUNTER 0x0000000a
#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
+#define VIVS_MC_PROFILE_CONFIG1_SE_TRIVIAL_REJECTED_LINE_COUNT 0x00000400
#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
@@ -515,6 +524,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_HZ_CACHE_MISS_COUNTER 0x00110000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_HZ_CACHE_MISS_COUNTER 0x00120000
#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
@@ -535,13 +546,48 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
-#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_SENTOUT_FROM_COLORPIPE 0x00000004
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_COLORPIPE 0x00000005
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_DEPTHPIPE 0x00000007
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_SENTOUT_FROM_DEPTHPIPE 0x00000008
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_DEPTHPIPE 0x00000009
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_SENTOUT_FROM_DEPTHPIPE 0x0000000a
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_DEPTHPIPE 0x0000000b
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_OTHERS 0x0000000c
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_OTHERS 0x0000000d
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_FROM_OTHERS 0x0000000e
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_OTHERS 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC_FE_READ_BANDWIDTH 0x00000015
+#define VIVS_MC_PROFILE_CONFIG2_MC_MMU_READ_BANDWIDTH 0x00000016
+#define VIVS_MC_PROFILE_CONFIG2_MC_BLT_READ_BANDWIDTH 0x00000017
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH0_READ_BANDWIDTH 0x00000018
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH1_READ_BANDWIDTH 0x00000019
+#define VIVS_MC_PROFILE_CONFIG2_MC_PE_WRITE_BANDWIDTH 0x0000001a
+#define VIVS_MC_PROFILE_CONFIG2_MC_BLT_WRITE_BANDWIDTH 0x0000001b
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH0_WRITE_BANDWIDTH 0x0000001c
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH1_WRITE_BANDWIDTH 0x0000001d
#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_L2__MASK 0x00ff0000
+#define VIVS_MC_PROFILE_CONFIG2_L2__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI0_READ_REQUEST_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI0_WRITE_REQUEST_COUNT 0x00040000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI1_WRITE_REQUEST_COUNT 0x00050000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_READ_TRANSACTIONS_REQUEST_BY_AXI0 0x00080000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_READ_TRANSACTIONS_REQUEST_BY_AXI1 0x00090000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_WRITE_TRANSACTIONS_REQUEST_BY_AXI0 0x000c0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_WRITE_TRANSACTIONS_REQUEST_BY_AXI1 0x000d0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_MINMAX_LATENCY 0x00100000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_TOTAL_LATENCY 0x00110000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_TOTAL_REQUEST_COUNT 0x00120000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_MINMAX_LATENCY 0x00130000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_TOTAL_LATENCY 0x00140000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_TOTAL_REQUEST_COUNT 0x00150000
#define VIVS_MC_PROFILE_CONFIG2_BLT__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG2_BLT__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG2_BLT_UNK0 0x00000000
@@ -566,5 +612,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_L2_READ 0x00000564
+#define VIVS_MC_MC_LATENCY_RESET 0x00000568
+
+#define VIVS_MC_MC_AXI_MAX_MIN_LATENCY 0x0000056c
+
+#define VIVS_MC_MC_AXI_TOTAL_LATENCY 0x00000570
+
+#define VIVS_MC_MC_AXI_SAMPLE_COUNT 0x00000574
+
#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3d2f025d4fd4..0cb92d651ff1 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,7 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && COMMON_CLK
- depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on MMU
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
@@ -59,6 +59,7 @@ config DRM_EXYNOS_DSI
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON
select DRM_MIPI_DSI
select DRM_PANEL
+ select DRM_SAMSUNG_DSIM
default n
help
This enables support for Exynos MIPI-DSI device.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8155d7e650f1..2867b39fa35e 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -710,7 +710,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM
static int exynos5433_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -741,14 +740,10 @@ err:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos5433_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos5433_decon_pm_ops,
+ exynos5433_decon_suspend,
+ exynos5433_decon_resume, NULL);
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
@@ -881,7 +876,7 @@ struct platform_driver exynos5433_decon_driver = {
.remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
- .pm = &exynos5433_decon_pm_ops,
+ .pm = pm_ptr(&exynos5433_decon_pm_ops),
.of_match_table = exynos5433_decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 7080cf7952ec..3126f735dedc 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -779,7 +779,6 @@ static int decon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos7_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -836,21 +835,16 @@ err_aclk_enable:
err_pclk_enable:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos7_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
+ exynos7_decon_resume, NULL);
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
- .pm = &exynos7_decon_pm_ops,
+ .pm = pm_ptr(&exynos7_decon_pm_ops),
.of_match_table = decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4e3d3d5f6866..3404ec1367fb 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -260,7 +260,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
@@ -274,13 +273,9 @@ static int exynos_dp_resume(struct device *dev)
return analogix_dp_resume(dp->adp);
}
-#endif
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_dp_pm_ops, exynos_dp_suspend,
+ exynos_dp_resume, NULL);
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
@@ -294,7 +289,7 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
+ .pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 16c539657f73..6b73fb7a83c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,7 +16,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_ioctl.h>
@@ -108,7 +107,6 @@ static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
- .lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -288,19 +286,15 @@ static int exynos_drm_bind(struct device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm);
- ret = exynos_drm_fbdev_init(drm);
- if (ret)
- goto err_cleanup_poll;
-
/* register the DRM device */
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto err_cleanup_fbdev;
+ goto err_cleanup_poll;
+
+ exynos_drm_fbdev_setup(drm);
return 0;
-err_cleanup_fbdev:
- exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
err_unbind_all:
@@ -321,7 +315,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 6ae9056e7a18..81d501efd013 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -197,8 +197,6 @@ struct drm_exynos_file_private {
* @wait: wait an atomic commit to finish
*/
struct exynos_drm_private {
- struct drm_fb_helper *fb_helper;
-
struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index ec673223d6b7..fc81f728e6ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1,1500 +1,56 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung SoC MIPI DSI Master driver.
+ * Samsung MIPI DSIM glue for Exynos SoCs.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Contacts: Tomasz Figa <t.figa@samsung.com>
-*/
+ */
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/component.h>
-#include <linux/gpio/consumer.h>
-#include <linux/irq.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-#include <asm/unaligned.h>
-
-#include <video/mipi_display.h>
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
+#include <drm/bridge/samsung-dsim.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-/* returns true iff both arguments logically differs */
-#define NEQV(a, b) (!(a) ^ !(b))
-
-/* DSIM_STATUS */
-#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
-#define DSIM_STOP_STATE_CLK (1 << 8)
-#define DSIM_TX_READY_HS_CLK (1 << 10)
-#define DSIM_PLL_STABLE (1 << 31)
-
-/* DSIM_SWRST */
-#define DSIM_FUNCRST (1 << 16)
-#define DSIM_SWRST (1 << 0)
-
-/* DSIM_TIMEOUT */
-#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
-#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
-
-/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN (1 << 24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS (1 << 27)
-#define DSIM_ESC_CLKEN (1 << 28)
-#define DSIM_TX_REQUEST_HSCLK (1 << 31)
-
-/* DSIM_CONFIG */
-#define DSIM_LANE_EN_CLK (1 << 0)
-#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
-#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
-#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
-#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
-#define DSIM_SUB_VC (((x) & 0x3) << 16)
-#define DSIM_MAIN_VC (((x) & 0x3) << 18)
-#define DSIM_HSA_MODE (1 << 20)
-#define DSIM_HBP_MODE (1 << 21)
-#define DSIM_HFP_MODE (1 << 22)
-#define DSIM_HSE_MODE (1 << 23)
-#define DSIM_AUTO_MODE (1 << 24)
-#define DSIM_VIDEO_MODE (1 << 25)
-#define DSIM_BURST_MODE (1 << 26)
-#define DSIM_SYNC_INFORM (1 << 27)
-#define DSIM_EOT_DISABLE (1 << 28)
-#define DSIM_MFLUSH_VS (1 << 29)
-/* This flag is valid only for exynos3250/3472/5260/5430 */
-#define DSIM_CLKLANE_STOP (1 << 30)
-
-/* DSIM_ESCMODE */
-#define DSIM_TX_TRIGGER_RST (1 << 4)
-#define DSIM_TX_LPDT_LP (1 << 6)
-#define DSIM_CMD_LPDT_LP (1 << 7)
-#define DSIM_FORCE_BTA (1 << 16)
-#define DSIM_FORCE_STOP_STATE (1 << 20)
-#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
-#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
-
-/* DSIM_MDRESOL */
-#define DSIM_MAIN_STAND_BY (1 << 31)
-#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
-#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
-
-/* DSIM_MVPORCH */
-#define DSIM_CMD_ALLOW(x) ((x) << 28)
-#define DSIM_STABLE_VFP(x) ((x) << 16)
-#define DSIM_MAIN_VBP(x) ((x) << 0)
-#define DSIM_CMD_ALLOW_MASK (0xf << 28)
-#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
-#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
-
-/* DSIM_MHPORCH */
-#define DSIM_MAIN_HFP(x) ((x) << 16)
-#define DSIM_MAIN_HBP(x) ((x) << 0)
-#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
-#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
-
-/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
-#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
-#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
-
-/* DSIM_SDRESOL */
-#define DSIM_SUB_STANDY(x) ((x) << 31)
-#define DSIM_SUB_VRESOL(x) ((x) << 16)
-#define DSIM_SUB_HRESOL(x) ((x) << 0)
-#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
-#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
-#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
-
-/* DSIM_INTSRC */
-#define DSIM_INT_PLL_STABLE (1 << 31)
-#define DSIM_INT_SW_RST_RELEASE (1 << 30)
-#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
-#define DSIM_INT_SFR_HDR_FIFO_EMPTY (1 << 28)
-#define DSIM_INT_BTA (1 << 25)
-#define DSIM_INT_FRAME_DONE (1 << 24)
-#define DSIM_INT_RX_TIMEOUT (1 << 21)
-#define DSIM_INT_BTA_TIMEOUT (1 << 20)
-#define DSIM_INT_RX_DONE (1 << 18)
-#define DSIM_INT_RX_TE (1 << 17)
-#define DSIM_INT_RX_ACK (1 << 16)
-#define DSIM_INT_RX_ECC_ERR (1 << 15)
-#define DSIM_INT_RX_CRC_ERR (1 << 14)
-
-/* DSIM_FIFOCTRL */
-#define DSIM_RX_DATA_FULL (1 << 25)
-#define DSIM_RX_DATA_EMPTY (1 << 24)
-#define DSIM_SFR_HEADER_FULL (1 << 23)
-#define DSIM_SFR_HEADER_EMPTY (1 << 22)
-#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
-#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
-#define DSIM_I80_HEADER_FULL (1 << 19)
-#define DSIM_I80_HEADER_EMPTY (1 << 18)
-#define DSIM_I80_PAYLOAD_FULL (1 << 17)
-#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
-#define DSIM_SD_HEADER_FULL (1 << 15)
-#define DSIM_SD_HEADER_EMPTY (1 << 14)
-#define DSIM_SD_PAYLOAD_FULL (1 << 13)
-#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
-#define DSIM_MD_HEADER_FULL (1 << 11)
-#define DSIM_MD_HEADER_EMPTY (1 << 10)
-#define DSIM_MD_PAYLOAD_FULL (1 << 9)
-#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
-#define DSIM_RX_FIFO (1 << 4)
-#define DSIM_SFR_FIFO (1 << 3)
-#define DSIM_I80_FIFO (1 << 2)
-#define DSIM_SD_FIFO (1 << 1)
-#define DSIM_MD_FIFO (1 << 0)
-
-/* DSIM_PHYACCHR */
-#define DSIM_AFC_EN (1 << 14)
-#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
-
-/* DSIM_PLLCTRL */
-#define DSIM_FREQ_BAND(x) ((x) << 24)
-#define DSIM_PLL_EN (1 << 23)
-#define DSIM_PLL_P(x) ((x) << 13)
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
-
-/* DSIM_PHYCTRL */
-#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
-#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP (1 << 30)
-#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP (1 << 14)
-
-/* DSIM_PHYTIMING */
-#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
-#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
-
-/* DSIM_PHYTIMING1 */
-#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
-#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
-#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
-#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
-
-/* DSIM_PHYTIMING2 */
-#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
-#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
-#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
-
-#define DSI_MAX_BUS_WIDTH 4
-#define DSI_NUM_VIRTUAL_CHANNELS 4
-#define DSI_TX_FIFO_SIZE 2048
-#define DSI_RX_FIFO_SIZE 256
-#define DSI_XFER_TIMEOUT_MS 100
-#define DSI_RX_FIFO_EMPTY 0x30800002
-
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
-static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0" };
-
-enum exynos_dsi_transfer_type {
- EXYNOS_DSI_TX,
- EXYNOS_DSI_RX,
-};
-
-struct exynos_dsi_transfer {
- struct list_head list;
- struct completion completed;
- int result;
- struct mipi_dsi_packet packet;
- u16 flags;
- u16 tx_done;
-
- u8 *rx_payload;
- u16 rx_len;
- u16 rx_done;
-};
-
-#define DSIM_STATE_ENABLED BIT(0)
-#define DSIM_STATE_INITIALIZED BIT(1)
-#define DSIM_STATE_CMD_LPM BIT(2)
-#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
-
-struct exynos_dsi_driver_data {
- const unsigned int *reg_ofs;
- unsigned int plltmr_reg;
- unsigned int has_freqband:1;
- unsigned int has_clklane_stop:1;
- unsigned int num_clks;
- unsigned int max_freq;
- unsigned int wait_for_reset;
- unsigned int num_bits_resol;
- const unsigned int *reg_values;
-};
-
struct exynos_dsi {
struct drm_encoder encoder;
- struct mipi_dsi_host dsi_host;
- struct drm_bridge bridge;
- struct drm_bridge *out_bridge;
- struct device *dev;
- struct drm_display_mode mode;
-
- void __iomem *reg_base;
- struct phy *phy;
- struct clk **clks;
- struct regulator_bulk_data supplies[2];
- int irq;
- struct gpio_desc *te_gpio;
-
- u32 pll_clk_rate;
- u32 burst_clk_rate;
- u32 esc_clk_rate;
- u32 lanes;
- u32 mode_flags;
- u32 format;
-
- int state;
- struct drm_property *brightness;
- struct completion completed;
-
- spinlock_t transfer_lock; /* protects transfer_list */
- struct list_head transfer_list;
-
- const struct exynos_dsi_driver_data *driver_data;
-};
-
-#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
-
-static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b)
-{
- return container_of(b, struct exynos_dsi, bridge);
-}
-
-enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
- DSIM_SWRST_REG, /* Software reset register */
- DSIM_CLKCTRL_REG, /* Clock control register */
- DSIM_TIMEOUT_REG, /* Time out register */
- DSIM_CONFIG_REG, /* Configuration register */
- DSIM_ESCMODE_REG, /* Escape mode register */
- DSIM_MDRESOL_REG,
- DSIM_MVPORCH_REG, /* Main display Vporch register */
- DSIM_MHPORCH_REG, /* Main display Hporch register */
- DSIM_MSYNC_REG, /* Main display sync area register */
- DSIM_INTSRC_REG, /* Interrupt source register */
- DSIM_INTMSK_REG, /* Interrupt mask register */
- DSIM_PKTHDR_REG, /* Packet Header FIFO register */
- DSIM_PAYLOAD_REG, /* Payload FIFO register */
- DSIM_RXFIFO_REG, /* Read FIFO register */
- DSIM_FIFOCTRL_REG, /* FIFO status and control register */
- DSIM_PLLCTRL_REG, /* PLL control register */
- DSIM_PHYCTRL_REG,
- DSIM_PHYTIMING_REG,
- DSIM_PHYTIMING1_REG,
- DSIM_PHYTIMING2_REG,
- NUM_REGS
-};
-
-static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx,
- u32 val)
-{
-
- writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx)
-{
- return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static const unsigned int exynos_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x00,
- [DSIM_SWRST_REG] = 0x04,
- [DSIM_CLKCTRL_REG] = 0x08,
- [DSIM_TIMEOUT_REG] = 0x0c,
- [DSIM_CONFIG_REG] = 0x10,
- [DSIM_ESCMODE_REG] = 0x14,
- [DSIM_MDRESOL_REG] = 0x18,
- [DSIM_MVPORCH_REG] = 0x1c,
- [DSIM_MHPORCH_REG] = 0x20,
- [DSIM_MSYNC_REG] = 0x24,
- [DSIM_INTSRC_REG] = 0x2c,
- [DSIM_INTMSK_REG] = 0x30,
- [DSIM_PKTHDR_REG] = 0x34,
- [DSIM_PAYLOAD_REG] = 0x38,
- [DSIM_RXFIFO_REG] = 0x3c,
- [DSIM_FIFOCTRL_REG] = 0x44,
- [DSIM_PLLCTRL_REG] = 0x4c,
- [DSIM_PHYCTRL_REG] = 0x5c,
- [DSIM_PHYTIMING_REG] = 0x64,
- [DSIM_PHYTIMING1_REG] = 0x68,
- [DSIM_PHYTIMING2_REG] = 0x6c,
-};
-
-static const unsigned int exynos5433_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x04,
- [DSIM_SWRST_REG] = 0x0C,
- [DSIM_CLKCTRL_REG] = 0x10,
- [DSIM_TIMEOUT_REG] = 0x14,
- [DSIM_CONFIG_REG] = 0x18,
- [DSIM_ESCMODE_REG] = 0x1C,
- [DSIM_MDRESOL_REG] = 0x20,
- [DSIM_MVPORCH_REG] = 0x24,
- [DSIM_MHPORCH_REG] = 0x28,
- [DSIM_MSYNC_REG] = 0x2C,
- [DSIM_INTSRC_REG] = 0x34,
- [DSIM_INTMSK_REG] = 0x38,
- [DSIM_PKTHDR_REG] = 0x3C,
- [DSIM_PAYLOAD_REG] = 0x40,
- [DSIM_RXFIFO_REG] = 0x44,
- [DSIM_FIFOCTRL_REG] = 0x4C,
- [DSIM_PLLCTRL_REG] = 0x94,
- [DSIM_PHYCTRL_REG] = 0xA4,
- [DSIM_PHYTIMING_REG] = 0xB4,
- [DSIM_PHYTIMING1_REG] = 0xB8,
- [DSIM_PHYTIMING2_REG] = 0xBC,
};
-enum reg_value_idx {
- RESET_TYPE,
- PLL_TIMER,
- STOP_STATE_CNT,
- PHYCTRL_ULPS_EXIT,
- PHYCTRL_VREG_LP,
- PHYCTRL_SLEW_UP,
- PHYTIMING_LPX,
- PHYTIMING_HS_EXIT,
- PHYTIMING_CLK_PREPARE,
- PHYTIMING_CLK_ZERO,
- PHYTIMING_CLK_POST,
- PHYTIMING_CLK_TRAIL,
- PHYTIMING_HS_PREPARE,
- PHYTIMING_HS_ZERO,
- PHYTIMING_HS_TRAIL
-};
-
-static const unsigned int reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
-};
-
-static const unsigned int exynos5422_reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
-};
-
-static const unsigned int exynos5433_reg_values[] = {
- [RESET_TYPE] = DSIM_FUNCRST,
- [PLL_TIMER] = 22200,
- [STOP_STATE_CNT] = 0xa,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
- [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
- [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
-};
-
-static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x58,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 5,
- .max_freq = 1500,
- .wait_for_reset = 0,
- .num_bits_resol = 12,
- .reg_values = exynos5433_reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1500,
- .wait_for_reset = 1,
- .num_bits_resol = 12,
- .reg_values = exynos5422_reg_values,
-};
-
-static const struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos3250-mipi-dsi",
- .data = &exynos3_dsi_driver_data },
- { .compatible = "samsung,exynos4210-mipi-dsi",
- .data = &exynos4_dsi_driver_data },
- { .compatible = "samsung,exynos5410-mipi-dsi",
- .data = &exynos5_dsi_driver_data },
- { .compatible = "samsung,exynos5422-mipi-dsi",
- .data = &exynos5422_dsi_driver_data },
- { .compatible = "samsung,exynos5433-mipi-dsi",
- .data = &exynos5433_dsi_driver_data },
- { }
-};
-
-static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
-{
- if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
- return;
-
- dev_err(dsi->dev, "timeout waiting for reset\n");
-}
-
-static void exynos_dsi_reset(struct exynos_dsi *dsi)
-{
- u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
-
- reinit_completion(&dsi->completed);
- exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val);
-}
-
-#ifndef MHZ
-#define MHZ (1000*1000)
-#endif
-
-static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
- unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long best_freq = 0;
- u32 min_delta = 0xffffffff;
- u8 p_min, p_max;
- u8 _p, best_p;
- u16 _m, best_m;
- u8 _s, best_s;
-
- p_min = DIV_ROUND_UP(fin, (12 * MHZ));
- p_max = fin / (6 * MHZ);
-
- for (_p = p_min; _p <= p_max; ++_p) {
- for (_s = 0; _s <= 5; ++_s) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * (_p << _s);
- do_div(tmp, fin);
- _m = tmp;
- if (_m < 41 || _m > 125)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p);
- if (tmp < 500 * MHZ ||
- tmp > driver_data->max_freq * MHZ)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p << _s);
-
- delta = abs(fout - tmp);
- if (delta < min_delta) {
- best_p = _p;
- best_m = _m;
- best_s = _s;
- min_delta = delta;
- best_freq = tmp;
- }
- }
- }
-
- if (best_freq) {
- *p = best_p;
- *m = best_m;
- *s = best_s;
- }
-
- return best_freq;
-}
-
-static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
- unsigned long freq)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long fin, fout;
- int timeout;
- u8 p, s;
- u16 m;
- u32 reg;
-
- fin = dsi->pll_clk_rate;
- fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
- if (!fout) {
- dev_err(dsi->dev,
- "failed to find PLL PMS for requested frequency\n");
- return 0;
- }
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
-
- writel(driver_data->reg_values[PLL_TIMER],
- dsi->reg_base + driver_data->plltmr_reg);
-
- reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
-
- if (driver_data->has_freqband) {
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
- int band;
-
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
-
- dev_dbg(dsi->dev, "band %d\n", band);
-
- reg |= DSIM_FREQ_BAND(band);
- }
-
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-
- timeout = 1000;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "PLL failed to stabilize\n");
- return 0;
- }
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
-
- return fout;
-}
-
-static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
-{
- unsigned long hs_clk, byte_clk, esc_clk;
- unsigned long esc_div;
- u32 reg;
-
- hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
- if (!hs_clk) {
- dev_err(dsi->dev, "failed to configure DSI PLL\n");
- return -EFAULT;
- }
-
- byte_clk = hs_clk / 8;
- esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
- esc_clk = byte_clk / esc_div;
-
- if (esc_clk > 20 * MHZ) {
- ++esc_div;
- esc_clk = byte_clk / esc_div;
- }
-
- dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
- hs_clk, byte_clk, esc_clk);
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- const unsigned int *reg_values = driver_data->reg_values;
- u32 reg;
-
- if (driver_data->has_freqband)
- return;
-
- /* B D-PHY: D-PHY Master & Slave Analog Block control */
- reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
- reg_values[PHYCTRL_SLEW_UP];
- exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg);
-
- /*
- * T LPX: Transmitted length of any Low-Power state period
- * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
- * burst
- */
- reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
- exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg);
-
- /*
- * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Clock.
- * T CLK_POST: Time that the transmitter continues to send HS clock
- * after the last associated Data Lane has transitioned to LP Mode
- * Interval is defined as the period from the end of T HS-TRAIL to
- * the beginning of T CLK-TRAIL
- * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
- * the last payload clock bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_CLK_PREPARE] |
- reg_values[PHYTIMING_CLK_ZERO] |
- reg_values[PHYTIMING_CLK_POST] |
- reg_values[PHYTIMING_CLK_TRAIL];
-
- exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg);
-
- /*
- * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Sync sequence.
- * T HS-TRAIL: Time that the transmitter drives the flipped differential
- * state after last payload data bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
- reg_values[PHYTIMING_HS_TRAIL];
- exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg);
-}
-
-static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG);
- reg &= ~DSIM_PLL_EN;
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-}
-
-static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
-{
- u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG);
- reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
- DSIM_LANE_EN(lane));
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-}
-
-static int exynos_dsi_init_link(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int timeout;
- u32 reg;
- u32 lanes_mask;
-
- /* Initialize FIFO pointers */
- reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
- reg &= ~0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
-
- usleep_range(9000, 11000);
-
- reg |= 0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
- usleep_range(9000, 11000);
-
- /* DSI configuration */
- reg = 0;
-
- /*
- * The first bit of mode_flags specifies display configuration.
- * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
- * mode, otherwise it will support command mode.
- */
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
-
- /*
- * The user manual describes that following bits are ignored in
- * command mode.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- reg |= DSIM_SYNC_INFORM;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- reg |= DSIM_BURST_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
- reg |= DSIM_AUTO_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
- reg |= DSIM_HSE_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP))
- reg |= DSIM_HFP_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP))
- reg |= DSIM_HBP_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA))
- reg |= DSIM_HSA_MODE;
- }
-
- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
- reg |= DSIM_EOT_DISABLE;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
- break;
- case MIPI_DSI_FMT_RGB666:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
- break;
- case MIPI_DSI_FMT_RGB565:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
- break;
- default:
- dev_err(dsi->dev, "invalid pixel format\n");
- return -EINVAL;
- }
-
- /*
- * Use non-continuous clock mode if the periparal wants and
- * host controller supports
- *
- * In non-continous clock mode, host controller will turn off
- * the HS clock between high-speed transmissions to reduce
- * power consumption.
- */
- if (driver_data->has_clklane_stop &&
- dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
- reg |= DSIM_CLKLANE_STOP;
- }
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-
- lanes_mask = BIT(dsi->lanes) - 1;
- exynos_dsi_enable_lane(dsi, lanes_mask);
-
- /* Check clock and data lane state are stop state */
- timeout = 100;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "waiting for bus lanes timed out\n");
- return -EFAULT;
- }
-
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
- != DSIM_STOP_STATE_DAT(lanes_mask))
- continue;
- } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
-
- reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- reg &= ~DSIM_STOP_STATE_CNT_MASK;
- reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg);
-
- reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
- exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
-{
- struct drm_display_mode *m = &dsi->mode;
- unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
- u32 reg;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg = DSIM_CMD_ALLOW(0xf)
- | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
- | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
- exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
-
- reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
- | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
- exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
-
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
- | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
- exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
- }
- reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
- DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
-
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-
- dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
-}
-
-static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG);
- if (enable)
- reg |= DSIM_MAIN_STAND_BY;
- else
- reg &= ~DSIM_MAIN_STAND_BY;
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-}
-
-static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
-{
- int timeout = 2000;
-
- do {
- u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
-
- if (!(reg & DSIM_SFR_HEADER_FULL))
- return 0;
-
- if (!cond_resched())
- usleep_range(950, 1050);
- } while (--timeout);
-
- return -ETIMEDOUT;
-}
-
-static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
-
- if (lpm)
- v |= DSIM_CMD_LPDT_LP;
- else
- v &= ~DSIM_CMD_LPDT_LP;
-
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- v |= DSIM_FORCE_BTA;
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- struct device *dev = dsi->dev;
- struct mipi_dsi_packet *pkt = &xfer->packet;
- const u8 *payload = pkt->payload + xfer->tx_done;
- u16 length = pkt->payload_length - xfer->tx_done;
- bool first = !xfer->tx_done;
- u32 reg;
-
- dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
- xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
-
- if (length > DSI_TX_FIFO_SIZE)
- length = DSI_TX_FIFO_SIZE;
-
- xfer->tx_done += length;
-
- /* Send payload */
- while (length >= 4) {
- reg = get_unaligned_le32(payload);
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- payload += 4;
- length -= 4;
- }
-
- reg = 0;
- switch (length) {
- case 3:
- reg |= payload[2] << 16;
- fallthrough;
- case 2:
- reg |= payload[1] << 8;
- fallthrough;
- case 1:
- reg |= payload[0];
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- break;
- }
-
- /* Send packet header */
- if (!first)
- return;
-
- reg = get_unaligned_le32(pkt->header);
- if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
- }
-
- if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
- dsi->state & DSIM_STATE_CMD_LPM)) {
- exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
- dsi->state ^= DSIM_STATE_CMD_LPM;
- }
-
- exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg);
-
- if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
- exynos_dsi_force_bta(dsi);
-}
-
-static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- u8 *payload = xfer->rx_payload + xfer->rx_done;
- bool first = !xfer->rx_done;
- struct device *dev = dsi->dev;
- u16 length;
- u32 reg;
-
- if (first) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
-
- switch (reg & 0x3f) {
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
- if (xfer->rx_len >= 2) {
- payload[1] = reg >> 16;
- ++xfer->rx_done;
- }
- fallthrough;
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
- payload[0] = reg >> 8;
- ++xfer->rx_done;
- xfer->rx_len = xfer->rx_done;
- xfer->result = 0;
- goto clear_fifo;
- case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
- dev_err(dev, "DSI Error Report: 0x%04x\n",
- (reg >> 8) & 0xffff);
- xfer->result = 0;
- goto clear_fifo;
- }
-
- length = (reg >> 8) & 0xffff;
- if (length > xfer->rx_len) {
- dev_err(dev,
- "response too long (%u > %u bytes), stripping\n",
- xfer->rx_len, length);
- length = xfer->rx_len;
- } else if (length < xfer->rx_len)
- xfer->rx_len = length;
- }
-
- length = xfer->rx_len - xfer->rx_done;
- xfer->rx_done += length;
-
- /* Receive payload */
- while (length >= 4) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- payload[0] = (reg >> 0) & 0xff;
- payload[1] = (reg >> 8) & 0xff;
- payload[2] = (reg >> 16) & 0xff;
- payload[3] = (reg >> 24) & 0xff;
- payload += 4;
- length -= 4;
- }
-
- if (length) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- switch (length) {
- case 3:
- payload[2] = (reg >> 16) & 0xff;
- fallthrough;
- case 2:
- payload[1] = (reg >> 8) & 0xff;
- fallthrough;
- case 1:
- payload[0] = reg & 0xff;
- }
- }
-
- if (xfer->rx_done == xfer->rx_len)
- xfer->result = 0;
-
-clear_fifo:
- length = DSI_RX_FIFO_SIZE / 4;
- do {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- if (reg == DSI_RX_FIFO_EMPTY)
- break;
- } while (--length);
-}
-
-static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
-{
- unsigned long flags;
- struct exynos_dsi_transfer *xfer;
- bool start = false;
-
-again:
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
-
- exynos_dsi_send_to_fifo(dsi, xfer);
-
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
-
- xfer->result = 0;
- complete(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
-}
-
-static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
-{
- struct exynos_dsi_transfer *xfer;
- unsigned long flags;
- bool start = true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return false;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- dev_dbg(dsi->dev,
- "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
- xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
- xfer->rx_done);
-
- if (xfer->tx_done != xfer->packet.payload_length)
- return true;
-
- if (xfer->rx_done != xfer->rx_len)
- exynos_dsi_read_from_fifo(dsi, xfer);
-
- if (xfer->rx_done != xfer->rx_len)
- return true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (!xfer->rx_len)
- xfer->result = 0;
- complete(&xfer->completed);
-
- return start;
-}
-
-static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- unsigned long flags;
- bool start;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (!list_empty(&dsi->transfer_list) &&
- xfer == list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list)) {
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (start)
- exynos_dsi_transfer_start(dsi);
- return;
- }
-
- list_del_init(&xfer->list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-}
-
-static int exynos_dsi_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
+static irqreturn_t exynos_dsi_te_irq_handler(struct samsung_dsim *dsim)
{
- unsigned long flags;
- bool stopped;
-
- xfer->tx_done = 0;
- xfer->rx_done = 0;
- xfer->result = -ETIMEDOUT;
- init_completion(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- stopped = list_empty(&dsi->transfer_list);
- list_add_tail(&xfer->list, &dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (stopped)
- exynos_dsi_transfer_start(dsi);
-
- wait_for_completion_timeout(&xfer->completed,
- msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
- if (xfer->result == -ETIMEDOUT) {
- struct mipi_dsi_packet *pkt = &xfer->packet;
- exynos_dsi_remove_transfer(dsi, xfer);
- dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
- (int)pkt->payload_length, pkt->payload);
- return -ETIMEDOUT;
- }
-
- /* Also covers hardware timeout condition */
- return xfer->result;
-}
-
-static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = dev_id;
- u32 status;
-
- status = exynos_dsi_read(dsi, DSIM_INTSRC_REG);
- if (!status) {
- static unsigned long int j;
- if (printk_timed_ratelimit(&j, 500))
- dev_warn(dsi->dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
- exynos_dsi_write(dsi, DSIM_INTSRC_REG, status);
-
- if (status & DSIM_INT_SW_RST_RELEASE) {
- u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR |
- DSIM_INT_SW_RST_RELEASE);
- exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
- complete(&dsi->completed);
- return IRQ_HANDLED;
- }
-
- if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_PLL_STABLE)))
- return IRQ_HANDLED;
-
- if (exynos_dsi_transfer_finish(dsi))
- exynos_dsi_transfer_start(dsi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
- if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
+ if (dsim->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
return IRQ_HANDLED;
}
-static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
-{
- enable_irq(dsi->irq);
-
- if (dsi->te_gpio)
- enable_irq(gpiod_to_irq(dsi->te_gpio));
-}
-
-static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio)
- disable_irq(gpiod_to_irq(dsi->te_gpio));
-
- disable_irq(dsi->irq);
-}
-
-static int exynos_dsi_init(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
-
- exynos_dsi_reset(dsi);
- exynos_dsi_enable_irq(dsi);
-
- if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
- exynos_dsi_enable_lane(dsi, BIT(dsi->lanes) - 1);
-
- exynos_dsi_enable_clock(dsi);
- if (driver_data->wait_for_reset)
- exynos_dsi_wait_for_reset(dsi);
- exynos_dsi_set_phy_ctrl(dsi);
- exynos_dsi_init_link(dsi);
-
- return 0;
-}
-
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
- struct device *panel)
-{
- int ret;
- int te_gpio_irq;
-
- dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
- if (!dsi->te_gpio) {
- return 0;
- } else if (IS_ERR(dsi->te_gpio)) {
- dev_err(dsi->dev, "gpio request failed with %ld\n",
- PTR_ERR(dsi->te_gpio));
- return PTR_ERR(dsi->te_gpio);
- }
-
- te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
-
- ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
- if (ret) {
- dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
- gpiod_put(dsi->te_gpio);
- return ret;
- }
-
- return 0;
-}
-
-static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio) {
- free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
- gpiod_put(dsi->te_gpio);
- }
-}
-
-static void exynos_dsi_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
- int ret;
-
- if (dsi->state & DSIM_STATE_ENABLED)
- return;
-
- ret = pm_runtime_resume_and_get(dsi->dev);
- if (ret < 0) {
- dev_err(dsi->dev, "failed to enable DSI device.\n");
- return;
- }
-
- dsi->state |= DSIM_STATE_ENABLED;
-}
-
-static void exynos_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_mode(dsi);
- exynos_dsi_set_display_enable(dsi, true);
-
- dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
- return;
-}
-
-static void exynos_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return;
-
- dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
-}
-
-static void exynos_dsi_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_enable(dsi, false);
-
- dsi->state &= ~DSIM_STATE_ENABLED;
- pm_runtime_put_sync(dsi->dev);
-}
-
-static void exynos_dsi_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- drm_mode_copy(&dsi->mode, adjusted_mode);
-}
-
-static int exynos_dsi_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, NULL, flags);
-}
-
-static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
- .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_pre_enable = exynos_dsi_atomic_pre_enable,
- .atomic_enable = exynos_dsi_atomic_enable,
- .atomic_disable = exynos_dsi_atomic_disable,
- .atomic_post_disable = exynos_dsi_atomic_post_disable,
- .mode_set = exynos_dsi_mode_set,
- .attach = exynos_dsi_attach,
-};
-
-MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
-
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+static int exynos_dsi_host_attach(struct samsung_dsim *dsim,
struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct device *dev = dsi->dev;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm = encoder->dev;
- struct drm_panel *panel;
- int ret;
-
- panel = of_drm_find_panel(device->dev.of_node);
- if (!IS_ERR(panel)) {
- dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
- } else {
- dsi->out_bridge = of_drm_find_bridge(device->dev.of_node);
- if (!dsi->out_bridge)
- dsi->out_bridge = ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR(dsi->out_bridge)) {
- ret = PTR_ERR(dsi->out_bridge);
- DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
- return ret;
- }
-
- DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
-
- drm_bridge_add(&dsi->bridge);
- drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
-
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- ret = exynos_dsi_register_te_irq(dsi, &device->dev);
- if (ret)
- return ret;
- }
+ drm_bridge_attach(encoder, &dsim->bridge,
+ list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge,
+ chain_node), 0);
mutex_lock(&drm->mode_config.mutex);
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
+ dsim->lanes = device->lanes;
+ dsim->format = device->format;
+ dsim->mode_flags = device->mode_flags;
exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+ !(dsim->mode_flags & MIPI_DSI_MODE_VIDEO);
mutex_unlock(&drm->mode_config.mutex);
@@ -1504,100 +60,20 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
+static void exynos_dsi_host_detach(struct samsung_dsim *dsim,
+ struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_device *drm = dsi->encoder.dev;
- if (dsi->out_bridge->funcs->detach)
- dsi->out_bridge->funcs->detach(dsi->out_bridge);
- dsi->out_bridge = NULL;
-
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- drm_bridge_remove(&dsi->bridge);
-
- return 0;
-}
-
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
-static int exynos_dsi_of_read_u32(const struct device_node *np,
- const char *propname, u32 *out_value)
-{
- int ret = of_property_read_u32(np, propname, out_value);
-
- if (ret < 0)
- pr_err("%pOF: failed to get '%s' property\n", np, propname);
-
- return ret;
}
-static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
+static int exynos_dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct device *dev = dsi->dev;
- struct device_node *node = dev->of_node;
- int ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
- &dsi->pll_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
- &dsi->burst_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
- &dsi->esc_clk_rate);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int exynos_dsi_bind(struct device *dev, struct device *master,
- void *data)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm_dev = data;
int ret;
@@ -1608,17 +84,16 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (ret < 0)
return ret;
- return mipi_dsi_host_register(&dsi->dsi_host);
+ return mipi_dsi_host_register(&dsim->dsi_host);
}
-static void exynos_dsi_unbind(struct device *dev, struct device *master,
- void *data)
+static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data)
{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
- exynos_dsi_atomic_disable(&dsi->bridge, NULL);
+ dsim->bridge.funcs->atomic_disable(&dsim->bridge, NULL);
- mipi_dsi_host_unregister(&dsi->dsi_host);
+ mipi_dsi_host_unregister(&dsim->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {
@@ -1626,188 +101,90 @@ static const struct component_ops exynos_dsi_component_ops = {
.unbind = exynos_dsi_unbind,
};
-static int exynos_dsi_probe(struct platform_device *pdev)
+static int exynos_dsi_register_host(struct samsung_dsim *dsim)
{
- struct device *dev = &pdev->dev;
struct exynos_dsi *dsi;
- int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = devm_kzalloc(dsim->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- init_completion(&dsi->completed);
- spin_lock_init(&dsi->transfer_lock);
- INIT_LIST_HEAD(&dsi->transfer_list);
-
- dsi->dsi_host.ops = &exynos_dsi_ops;
- dsi->dsi_host.dev = dev;
-
- dsi->dev = dev;
- dsi->driver_data = of_device_get_match_data(dev);
-
- dsi->supplies[0].supply = "vddcore";
- dsi->supplies[1].supply = "vddio";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
- dsi->supplies);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get regulators\n");
-
- dsi->clks = devm_kcalloc(dev,
- dsi->driver_data->num_clks, sizeof(*dsi->clks),
- GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev,
- OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n",
- clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
- }
-
- dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dsi->reg_base))
- return PTR_ERR(dsi->reg_base);
-
- dsi->phy = devm_phy_get(dev, "dsim");
- if (IS_ERR(dsi->phy)) {
- dev_info(dev, "failed to get dsim phy\n");
- return PTR_ERR(dsi->phy);
- }
-
- dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0)
- return dsi->irq;
-
- ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
- exynos_dsi_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- dev_name(dev), dsi);
- if (ret) {
- dev_err(dev, "failed to request dsi irq\n");
- return ret;
- }
-
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, dsi);
-
- pm_runtime_enable(dev);
-
- dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
- dsi->bridge.of_node = dev->of_node;
- dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
-
- ret = component_add(dev, &exynos_dsi_component_ops);
- if (ret)
- goto err_disable_runtime;
+ dsim->priv = dsi;
+ dsim->bridge.pre_enable_prev_first = true;
- return 0;
-
-err_disable_runtime:
- pm_runtime_disable(dev);
-
- return ret;
+ return component_add(dsim->dev, &exynos_dsi_component_ops);
}
-static int exynos_dsi_remove(struct platform_device *pdev)
+static void exynos_dsi_unregister_host(struct samsung_dsim *dsim)
{
- pm_runtime_disable(&pdev->dev);
-
- component_del(&pdev->dev, &exynos_dsi_component_ops);
-
- return 0;
-}
-
-static int __maybe_unused exynos_dsi_suspend(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- usleep_range(10000, 20000);
-
- if (dsi->state & DSIM_STATE_INITIALIZED) {
- dsi->state &= ~DSIM_STATE_INITIALIZED;
-
- exynos_dsi_disable_clock(dsi);
-
- exynos_dsi_disable_irq(dsi);
- }
-
- dsi->state &= ~DSIM_STATE_CMD_LPM;
-
- phy_power_off(dsi->phy);
-
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0)
- dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
-
- return 0;
+ component_del(dsim->dev, &exynos_dsi_component_ops);
}
-static int __maybe_unused exynos_dsi_resume(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
- return ret;
- }
+static const struct samsung_dsim_host_ops exynos_dsi_exynos_host_ops = {
+ .register_host = exynos_dsi_register_host,
+ .unregister_host = exynos_dsi_unregister_host,
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .te_irq_handler = exynos_dsi_te_irq_handler,
+};
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos3250_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS3250,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- ret = phy_power_on(dsi->phy);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable phy %d\n", ret);
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos4210_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS4210,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return 0;
+static const struct samsung_dsim_plat_data exynos5410_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5410,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
- regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+static const struct samsung_dsim_plat_data exynos5422_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5422,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return ret;
-}
+static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5433,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-static const struct dev_pm_ops exynos_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+static const struct of_device_id exynos_dsi_of_match[] = {
+ {
+ .compatible = "samsung,exynos3250-mipi-dsi",
+ .data = &exynos3250_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4210_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5410_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5422-mipi-dsi",
+ .data = &exynos5422_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5433-mipi-dsi",
+ .data = &exynos5433_dsi_pdata,
+ },
+ { /* sentinel. */ }
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
struct platform_driver dsi_driver = {
- .probe = exynos_dsi_probe,
- .remove = exynos_dsi_remove,
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
- .pm = &exynos_dsi_pm_ops,
+ .pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 97f2dee2db29..fc1c5608db96 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -157,7 +156,6 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 55c92372fca0..ea4b3d248aac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -8,16 +8,12 @@
* Seung-Woo Kim <sw0312.kim@samsung.com>
*/
-#include <linux/console.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_prime.h>
-#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -27,22 +23,26 @@
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
-#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
- drm_fb_helper)
+static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_gem_object *obj = drm_gem_fb_get_obj(helper->fb, 0);
-struct exynos_drm_fbdev {
- struct drm_fb_helper drm_fb_helper;
- struct exynos_drm_gem *exynos_gem;
-};
+ return drm_gem_prime_mmap(obj, vma);
+}
-static int exynos_drm_fb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
+static void exynos_drm_fb_destroy(struct fb_info *info)
{
- struct drm_fb_helper *helper = info->par;
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
- struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
- return drm_gem_prime_mmap(&exynos_gem->base, vma);
+ drm_fb_helper_fini(fb_helper);
+
+ drm_framebuffer_remove(fb);
+
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
static const struct fb_ops exynos_drm_fb_ops = {
@@ -54,6 +54,7 @@ static const struct fb_ops exynos_drm_fb_ops = {
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_destroy = exynos_drm_fb_destroy,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
@@ -89,7 +90,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
struct exynos_drm_gem *exynos_gem;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -113,8 +113,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
- exynos_fbdev->exynos_gem = exynos_gem;
-
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
@@ -127,19 +125,13 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (ret < 0)
goto err_destroy_framebuffer;
- return ret;
+ return 0;
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
+ helper->fb = NULL;
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
-
- /*
- * if failed, all resources allocated above would be released by
- * drm_mode_config_cleanup() when drm_load() had been called prior
- * to any specific driver such as fimd or hdmi driver.
- */
-
return ret;
}
@@ -147,79 +139,92 @@ static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
-int exynos_drm_fbdev_init(struct drm_device *dev)
-{
- struct exynos_drm_fbdev *fbdev;
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
- int ret;
+/*
+ * struct drm_client
+ */
- if (!dev->mode_config.num_crtc)
- return 0;
+static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
+}
- fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev)
- return -ENOMEM;
+static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
- private->fb_helper = helper = &fbdev->drm_fb_helper;
+ return 0;
+}
- drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
+static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
- ret = drm_fb_helper_init(dev, helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to initialize drm fb helper.\n");
- goto err_init;
- }
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
- ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to set up hw configuration.\n");
- goto err_setup;
- }
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
- return 0;
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
-err_setup:
- drm_fb_helper_fini(helper);
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
-err_init:
- private->fb_helper = NULL;
- kfree(fbdev);
+ return 0;
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
-static void exynos_drm_fbdev_destroy(struct drm_device *dev,
- struct drm_fb_helper *fb_helper)
+static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = exynos_drm_fbdev_client_unregister,
+ .restore = exynos_drm_fbdev_client_restore,
+ .hotplug = exynos_drm_fbdev_client_hotplug,
+};
+
+void exynos_drm_fbdev_setup(struct drm_device *dev)
{
- struct drm_framebuffer *fb;
+ struct drm_fb_helper *fb_helper;
+ int ret;
- /* release drm framebuffer and real buffer */
- if (fb_helper->fb && fb_helper->fb->funcs) {
- fb = fb_helper->fb;
- if (fb)
- drm_framebuffer_remove(fb);
- }
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
- drm_fb_helper_unregister_info(fb_helper);
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
- drm_fb_helper_fini(fb_helper);
-}
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs);
+ if (ret)
+ goto err_drm_client_init;
-void exynos_drm_fbdev_fini(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct exynos_drm_fbdev *fbdev;
+ ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
- if (!private || !private->fb_helper)
- return;
+ drm_client_register(&fb_helper->client);
- fbdev = to_exynos_fbdev(private->fb_helper);
+ return;
- exynos_drm_fbdev_destroy(dev, private->fb_helper);
- kfree(fbdev);
- private->fb_helper = NULL;
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 3b1e98e84580..1e1dea627cd9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -12,27 +12,11 @@
#define _EXYNOS_DRM_FBDEV_H_
#ifdef CONFIG_DRM_FBDEV_EMULATION
-
-int exynos_drm_fbdev_init(struct drm_device *dev);
-void exynos_drm_fbdev_fini(struct drm_device *dev);
-
+void exynos_drm_fbdev_setup(struct drm_device *dev);
#else
-
-static inline int exynos_drm_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+static inline void exynos_drm_fbdev_setup(struct drm_device *dev)
{
}
-
-static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-
-#define exynos_drm_output_poll_changed (NULL)
-
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0ee32e4b1e43..8de2714599fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1381,7 +1381,6 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1398,13 +1397,9 @@ static int fimc_runtime_resume(struct device *dev)
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
-#endif
-static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(fimc_pm_ops, fimc_runtime_suspend,
+ fimc_runtime_resume, NULL);
static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4210-fimc" },
@@ -1420,6 +1415,6 @@ struct platform_driver fimc_driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
.owner = THIS_MODULE,
- .pm = &fimc_pm_ops,
+ .pm = pm_ptr(&fimc_pm_ops),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ae6636e6658e..7f4a0be03dd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1287,7 +1287,6 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_fimd_suspend(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
@@ -1321,13 +1320,9 @@ static int exynos_fimd_resume(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_fimd_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
+ exynos_fimd_resume, NULL);
struct platform_driver fimd_driver = {
.probe = fimd_probe,
@@ -1335,7 +1330,7 @@ struct platform_driver fimd_driver = {
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
- .pm = &exynos_fimd_pm_ops,
+ .pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index e19c2ceb3759..ec784e58da5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1549,7 +1549,6 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1574,9 +1573,7 @@ static int g2d_resume(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1597,11 +1594,10 @@ static int g2d_runtime_resume(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
- SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
static const struct of_device_id exynos_g2d_match[] = {
@@ -1617,7 +1613,7 @@ struct platform_driver g2d_driver = {
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
- .pm = &g2d_pm_ops,
+ .pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 3e493f48e0d4..638ca96830e9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
unsigned long vm_size;
int ret;
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
@@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 09ce28ee08d9..17bab5b1663f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,7 +340,6 @@ static const struct component_ops exynos_mic_component_ops = {
.unbind = exynos_mic_unbind,
};
-#ifdef CONFIG_PM
static int exynos_mic_suspend(struct device *dev)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
@@ -369,13 +368,9 @@ static int exynos_mic_resume(struct device *dev)
}
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_mic_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_mic_pm_ops, exynos_mic_suspend,
+ exynos_mic_resume, NULL);
static int exynos_mic_probe(struct platform_device *pdev)
{
@@ -470,7 +465,7 @@ struct platform_driver mic_driver = {
.remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
- .pm = &exynos_mic_pm_ops,
+ .pm = pm_ptr(&exynos_mic_pm_ops),
.owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dec7df35baa9..8706f377c349 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -340,7 +340,6 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -355,7 +354,6 @@ static int rotator_runtime_resume(struct device *dev)
return clk_prepare_enable(rot->clock);
}
-#endif
static const struct drm_exynos_ipp_limit rotator_s5pv210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
@@ -450,12 +448,8 @@ static const struct of_device_id exynos_rotator_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
-static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
- NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
+ rotator_runtime_resume, NULL);
struct platform_driver rotator_driver = {
.probe = rotator_probe,
@@ -463,7 +457,7 @@ struct platform_driver rotator_driver = {
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
- .pm = &rotator_pm_ops,
+ .pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 3c049fb658a3..20608e9780ce 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -550,8 +550,6 @@ static int scaler_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
static int clk_disable_unprepare_wrapper(struct clk *clk)
{
clk_disable_unprepare(clk);
@@ -584,13 +582,9 @@ static int scaler_runtime_resume(struct device *dev)
return scaler_clk_ctrl(scaler, true);
}
-#endif
-static const struct dev_pm_ops scaler_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(scaler_pm_ops, scaler_runtime_suspend,
+ scaler_runtime_resume, NULL);
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
@@ -731,7 +725,7 @@ struct platform_driver scaler_driver = {
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
- .pm = &scaler_pm_ops,
+ .pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 8579c7629f5e..c09ba019ba5e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -333,7 +333,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto put;
- drm_fbdev_generic_setup(drm, legacyfb_depth);
+ drm_fbdev_dma_setup(drm, legacyfb_depth);
return 0;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 807b989e3c77..2efc0eb41c64 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -3,6 +3,8 @@ config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 63012bf2485a..4f302cd5e1a6 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -38,5 +38,6 @@ gma500_gfx-y += \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
+gma500_gfx-$(CONFIG_DRM_FBDEV_EMULATION) += fbdev.o
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 577a4987b193..8711a7a5b8da 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -7,6 +7,8 @@
* Authors: Eric Knopp
*/
+#include <linux/backlight.h>
+
#include <acpi/video.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3065596257e9..3e83299113e3 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "cdv_device.h"
#include "gma_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7ff1e5141150..5a0acd914f76 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 0c3ddcdc29dc..bbd0abdd8382 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "cdv_device.h"
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 53b967282d6a..8992a95076f2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 29ef45f14169..2d95e0471291 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -28,7 +28,9 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index be6efcaaa3b3..f08a6803dc18 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -12,6 +12,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
new file mode 100644
index 000000000000..62287407e717
--- /dev/null
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ **************************************************************************/
+
+#include <linux/pfn_t.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
+
+#include "gem.h"
+#include "psb_drv.h"
+
+/*
+ * VM area struct
+ */
+
+static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct fb_info *info = vma->vm_private_data;
+ unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
+ unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
+ vm_fault_t err = VM_FAULT_SIGBUS;
+ unsigned long page_num = vma_pages(vma);
+ unsigned long i;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < page_num; ++i) {
+ err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ if (unlikely(err & VM_FAULT_ERROR))
+ break;
+ address += PAGE_SIZE;
+ ++pfn;
+ }
+
+ return err;
+}
+
+static const struct vm_operations_struct psb_fbdev_vm_ops = {
+ .fault = psb_fbdev_vm_fault,
+};
+
+/*
+ * struct fb_ops
+ */
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psb_fbdev_fb_setcolreg(unsigned int regno,
+ unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ uint32_t v;
+
+ if (!fb)
+ return -ENOMEM;
+
+ if (regno > 255)
+ return 1;
+
+ red = CMAP_TOHW(red, info->var.red.length);
+ blue = CMAP_TOHW(blue, info->var.blue.length);
+ green = CMAP_TOHW(green, info->var.green.length);
+ transp = CMAP_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ if (regno < 16) {
+ switch (fb->format->cpp[0] * 8) {
+ case 16:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ case 24:
+ case 32:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psb_fbdev_vm_ops;
+ vma->vm_private_data = info;
+ vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static void psb_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_gem_object *obj = fb->obj[0];
+
+ drm_fb_helper_fini(fb_helper);
+
+ drm_framebuffer_unregister_private(fb);
+ fb->obj[0] = NULL;
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+
+ drm_gem_object_put(obj);
+
+ drm_client_release(&fb_helper->client);
+
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
+static const struct fb_ops psb_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_setcolreg = psb_fbdev_fb_setcolreg,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_mmap = psb_fbdev_fb_mmap,
+ .fb_destroy = psb_fbdev_fb_destroy,
+};
+
+/*
+ * struct drm_fb_helper_funcs
+ */
+
+static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd = { };
+ int size;
+ int ret;
+ struct psb_gem_object *backing;
+ struct drm_gem_object *obj;
+ u32 bpp, depth;
+
+ /* No 24-bit packed mode */
+ if (sizes->surface_bpp == 24) {
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 24;
+ }
+ bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
+
+ /*
+ * If the mode does not fit in 32 bit then switch to 16 bit to get
+ * a console on full resolution. The X mode setting server will
+ * allocate its own 32-bit GEM framebuffer.
+ */
+ size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
+ sizes->surface_height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (size > dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+ bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
+ if (IS_ERR(backing))
+ return PTR_ERR(backing);
+ obj = &backing->base;
+
+ fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_drm_gem_object_put;
+ }
+
+ fb_helper->fb = fb;
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_drm_framebuffer_unregister_private;
+ }
+
+ info->fbops = &psb_fbdev_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+ /* Accessed stolen memory directly */
+ info->screen_base = dev_priv->vram_addr + backing->offset;
+ info->screen_size = size;
+
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
+
+ info->fix.smem_start = dev_priv->stolen_base + backing->offset;
+ info->fix.smem_len = size;
+ info->fix.ywrapstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.mmio_start = pci_resource_start(pdev, 0);
+ info->fix.mmio_len = pci_resource_len(pdev, 0);
+
+ memset(info->screen_base, 0, info->screen_size);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
+
+ return 0;
+
+err_drm_framebuffer_unregister_private:
+ drm_framebuffer_unregister_private(fb);
+ fb->obj[0] = NULL;
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
+ .fb_probe = psb_fbdev_fb_probe,
+};
+
+/*
+ * struct drm_client_funcs and setup code
+ */
+
+static void psb_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_fb_helper_unprepare(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int psb_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs psb_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = psb_fbdev_client_unregister,
+ .restore = psb_fbdev_client_restore,
+ .hotplug = psb_fbdev_client_hotplug,
+};
+
+void psb_fbdev_setup(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->dev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_fb_helper_unprepare;
+ }
+
+ ret = psb_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+
+ drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_fb_helper_unprepare:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8d5a37b8f110..1a374702b696 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -5,156 +5,18 @@
*
**************************************************************************/
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include "framebuffer.h"
-#include "gem.h"
#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- uint32_t v;
-
- if (!fb)
- return -ENOMEM;
-
- if (regno > 255)
- return 1;
-
- red = CMAP_TOHW(red, info->var.red.length);
- blue = CMAP_TOHW(blue, info->var.blue.length);
- green = CMAP_TOHW(green, info->var.green.length);
- transp = CMAP_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- if (regno < 16) {
- switch (fb->format->cpp[0] * 8) {
- case 16:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- }
- }
-
- return 0;
-}
-
-static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_framebuffer *fb = vma->vm_private_data;
- struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]);
- int page_num;
- int i;
- unsigned long address;
- vm_fault_t ret = VM_FAULT_SIGBUS;
- unsigned long pfn;
- unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset;
-
- page_num = vma_pages(vma);
- address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- for (i = 0; i < page_num; i++) {
- pfn = (phys_addr >> PAGE_SHIFT);
-
- ret = vmf_insert_mixed(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
- if (unlikely(ret & VM_FAULT_ERROR))
- break;
- address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
- }
- return ret;
-}
-
-static void psbfb_vm_open(struct vm_area_struct *vma)
-{
-}
-
-static void psbfb_vm_close(struct vm_area_struct *vma)
-{
-}
-
-static const struct vm_operations_struct psbfb_vm_ops = {
- .fault = psbfb_vm_fault,
- .open = psbfb_vm_open,
- .close = psbfb_vm_close
-};
-
-static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
-
- if (vma->vm_pgoff != 0)
- return -EINVAL;
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
- return -EINVAL;
-
- /*
- * If this is a GEM object then info->screen_base is the virtual
- * kernel remapping of the object. FIXME: Review if this is
- * suitable for our mmap work
- */
- vma->vm_ops = &psbfb_vm_ops;
- vma->vm_private_data = (void *)fb;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
-}
-
-static const struct fb_ops psbfb_unaccel_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_setcolreg = psbfb_setcolreg,
- .fb_read = drm_fb_helper_cfb_read,
- .fb_write = drm_fb_helper_cfb_write,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_mmap = psbfb_mmap,
-};
-
/**
* psb_framebuffer_init - initialize a framebuffer
* @dev: our DRM device
@@ -205,11 +67,9 @@ static int psb_framebuffer_init(struct drm_device *dev,
*
* TODO: review object references
*/
-
-static struct drm_framebuffer *psb_framebuffer_create
- (struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct drm_framebuffer *fb;
int ret;
@@ -227,98 +87,6 @@ static struct drm_framebuffer *psb_framebuffer_create
}
/**
- * psbfb_create - create a framebuffer
- * @fb_helper: the framebuffer helper
- * @sizes: specification of the layout
- *
- * Create a framebuffer to the specifications provided
- */
-static int psbfb_create(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- int size;
- int ret;
- struct psb_gem_object *backing;
- struct drm_gem_object *obj;
- u32 bpp, depth;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- bpp = sizes->surface_bpp;
- depth = sizes->surface_depth;
-
- /* No 24bit packed */
- if (bpp == 24)
- bpp = 32;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
-
- /* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
- if (IS_ERR(backing))
- return PTR_ERR(backing);
- obj = &backing->base;
-
- memset(dev_priv->vram_addr + backing->offset, 0, size);
-
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_gem_object_put;
- }
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_drm_gem_object_put;
- }
-
- fb_helper->fb = fb;
-
- info->fbops = &psbfb_unaccel_ops;
-
- info->fix.smem_start = dev_priv->fb_base;
- info->fix.smem_len = size;
- info->fix.ywrapstep = 0;
- info->fix.ypanstep = 0;
-
- /* Accessed stolen memory directly */
- info->screen_base = dev_priv->vram_addr + backing->offset;
- info->screen_size = size;
-
- if (dev_priv->gtt.stolen_size) {
- info->apertures->ranges[0].base = dev_priv->fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
- }
-
- drm_fb_helper_fill_info(info, fb_helper, sizes);
-
- info->fix.mmio_start = pci_resource_start(pdev, 0);
- info->fix.mmio_len = pci_resource_len(pdev, 0);
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
-
- return 0;
-
-err_drm_gem_object_put:
- drm_gem_object_put(obj);
- return ret;
-}
-
-/**
* psb_user_framebuffer_create - create framebuffer
* @dev: our DRM device
* @filp: client file
@@ -349,106 +117,8 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return fb;
}
-static int psbfb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- unsigned int fb_size;
- int bytespp;
-
- bytespp = sizes->surface_bpp / 8;
- if (bytespp == 3) /* no 24bit packed */
- bytespp = 4;
-
- /* If the mode will not fit in 32bit then switch to 16bit to get
- a console on full resolution. The X mode setting server will
- allocate its own 32bit GEM framebuffer */
- fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
- sizes->surface_height;
- fb_size = ALIGN(fb_size, PAGE_SIZE);
-
- if (fb_size > dev_priv->vram_stolen_size) {
- sizes->surface_bpp = 16;
- sizes->surface_depth = 16;
- }
-
- return psbfb_create(fb_helper, sizes);
-}
-
-static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
- .fb_probe = psbfb_probe,
-};
-
-static int psb_fbdev_destroy(struct drm_device *dev,
- struct drm_fb_helper *fb_helper)
-{
- struct drm_framebuffer *fb = fb_helper->fb;
-
- drm_fb_helper_unregister_info(fb_helper);
-
- drm_fb_helper_fini(fb_helper);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
-
- if (fb->obj[0])
- drm_gem_object_put(fb->obj[0]);
- kfree(fb);
-
- return 0;
-}
-
-int psb_fbdev_init(struct drm_device *dev)
-{
- struct drm_fb_helper *fb_helper;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper) {
- dev_err(dev->dev, "no memory\n");
- return -ENOMEM;
- }
-
- dev_priv->fb_helper = fb_helper;
-
- drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto free;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper, 32);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(fb_helper);
-free:
- kfree(fb_helper);
- return ret;
-}
-
-static void psb_fbdev_fini(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-
- if (!dev_priv->fb_helper)
- return;
-
- psb_fbdev_destroy(dev, dev_priv->fb_helper);
- kfree(dev_priv->fb_helper);
- dev_priv->fb_helper = NULL;
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static void psb_setup_outputs(struct drm_device *dev)
@@ -516,7 +186,6 @@ void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
if (drmm_mode_config_init(dev))
@@ -527,10 +196,6 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &psb_mode_funcs;
- /* set memory base */
- /* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base));
-
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
@@ -551,6 +216,5 @@ void psb_modeset_cleanup(struct drm_device *dev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
}
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index fe7b8436f87a..f65e90d890f4 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -11,8 +11,10 @@
#include <linux/highmem.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 64761f46b434..de8ccfe9890f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -9,6 +9,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 95b7cb099e63..ed8626c73541 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,7 +27,9 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 75b4eb1c8884..d974d0c60d2a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -14,6 +14,7 @@
#include <asm/intel-mid.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 3c294c38bdb4..dcfcd7b89d4a 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -6,6 +6,7 @@
**************************************************************************/
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "gma_device.h"
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cd9c73f5a64a..2ce96b1b9c74 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
@@ -387,7 +386,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
psb_modeset_init(dev);
- psb_fbdev_init(dev);
drm_kms_helper_poll_init(dev);
/* Only add backlight support if we have LVDS or MIPI output */
@@ -452,6 +450,8 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ psb_fbdev_setup(dev_priv);
+
return 0;
}
@@ -477,7 +477,6 @@ static const struct file_operations psb_gem_fops = {
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
- .lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index a5df6d2f2cab..f7f709df99b4 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -193,8 +193,6 @@
#define KSEL_BYPASS_25 6
#define KSEL_BYPASS_83_100 7
-struct drm_fb_helper;
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -522,9 +520,6 @@ struct drm_psb_private {
uint32_t blc_adj1;
uint32_t blc_adj2;
- struct drm_fb_helper *fb_helper;
- resource_size_t fb_base;
-
bool dsr_enable;
u32 dsr_fb_update;
bool dpi_panel_on[3];
@@ -610,7 +605,19 @@ extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
-extern int psb_fbdev_init(struct drm_device *dev);
+
+/* framebuffer */
+struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* fbdev */
+#if defined(CONFIG_DRM_FBDEV_EMULATION)
+void psb_fbdev_setup(struct drm_psb_private *dev_priv);
+#else
+static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv)
+{ }
+#endif
/* backlight.c */
int gma_backlight_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c1781a8fb..ff46e88c4768 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -9,6 +9,9 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 8a1111fe714b..0bb85494e3da 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -9,7 +9,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 7ee6c8ce103b..8486de230ec9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -11,6 +11,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index bdced46dd333..d6fd5d726216 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -33,7 +33,9 @@
#include <linux/slab.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index d421031462df..343c51250207 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -32,17 +32,6 @@ static inline u32 gma_pipestat(int pipe)
BUG();
}
-static inline u32 gma_pipe_event(int pipe)
-{
- if (pipe == 0)
- return _PSB_PIPEA_EVENT_FLAG;
- if (pipe == 1)
- return _MDFLD_PIPEB_EVENT_FLAG;
- if (pipe == 2)
- return _MDFLD_PIPEC_EVENT_FLAG;
- BUG();
-}
-
static inline u32 gma_pipeconf(int pipe)
{
if (pipe == 0)
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index fa636206f232..034e78360d4f 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -303,7 +303,7 @@ static int gud_connector_atomic_check(struct drm_connector *connector,
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
old_state->tv.margins.bottom != new_state->tv.margins.bottom ||
- old_state->tv.mode != new_state->tv.mode ||
+ old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.brightness != new_state->tv.brightness ||
old_state->tv.contrast != new_state->tv.contrast ||
old_state->tv.flicker_reduction != new_state->tv.flicker_reduction ||
@@ -400,7 +400,7 @@ static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connect
for (i = 0; i < num_modes; i++)
modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN];
- ret = drm_mode_create_tv_properties(connector->dev, num_modes, modes);
+ ret = drm_mode_create_tv_properties_legacy(connector->dev, num_modes, modes);
free:
kfree(buf);
if (ret < 0)
@@ -424,7 +424,7 @@ gud_connector_property_lookup(struct drm_connector *connector, u16 prop)
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return config->tv_bottom_margin_property;
case GUD_PROPERTY_TV_MODE:
- return config->tv_mode_property;
+ return config->legacy_tv_mode_property;
case GUD_PROPERTY_TV_BRIGHTNESS:
return config->tv_brightness_property;
case GUD_PROPERTY_TV_CONTRAST:
@@ -454,7 +454,7 @@ static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connecto
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return &state->margins.bottom;
case GUD_PROPERTY_TV_MODE:
- return &state->mode;
+ return &state->legacy_mode;
case GUD_PROPERTY_TV_BRIGHTNESS:
return &state->brightness;
case GUD_PROPERTY_TV_CONTRAST:
@@ -539,7 +539,7 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
fallthrough;
case GUD_PROPERTY_TV_HUE:
/* This is a no-op if already added. */
- ret = drm_mode_create_tv_properties(drm, 0, NULL);
+ ret = drm_mode_create_tv_properties_legacy(drm, 0, NULL);
if (ret)
goto out;
break;
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index d57dab104358..9d7bf8ee45f1 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -325,8 +325,8 @@ static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struc
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct gud_device *gdrm = to_gud_device(node->minor->dev);
+ struct drm_debugfs_entry *entry = m->private;
+ struct gud_device *gdrm = to_gud_device(entry->dev);
char buf[10];
string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
@@ -352,19 +352,10 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list gud_debugfs_list[] = {
- { "stats", gud_stats_debugfs, 0, NULL },
-};
-
-static void gud_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
- minor->debugfs_root, minor);
-}
-
static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
.check = gud_pipe_check,
.update = gud_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -385,7 +376,6 @@ static const struct drm_driver gud_drm_driver = {
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gud_gem_prime_import,
- .debugfs_init = gud_debugfs_init,
.name = "gud",
.desc = "Generic USB Display",
@@ -622,6 +612,8 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!gdrm->dmadev)
dev_warn(dev, "buffer sharing not supported");
+ drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
+
ret = drm_dev_register(drm, 0);
if (ret) {
put_device(gdrm->dmadev);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index e351a1f1420d..0d148a6f27aa 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -43,6 +43,7 @@ struct gud_device {
struct drm_framebuffer *fb;
struct drm_rect damage;
bool prev_flush_failed;
+ void *shadow_buf;
};
static inline struct gud_device *to_gud_device(struct drm_device *drm)
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 7c6dc2bcd14a..dc16a92625d4 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -5,6 +5,7 @@
#include <linux/lz4.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
@@ -15,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
@@ -24,17 +26,13 @@
#include "gud_internal.h"
/*
- * Some userspace rendering loops runs all displays in the same loop.
+ * Some userspace rendering loops run all displays in the same loop.
* This means that a fast display will have to wait for a slow one.
- * For this reason gud does flushing asynchronous by default.
- * The down side is that in e.g. a single display setup userspace thinks
- * the display is insanely fast since the driver reports back immediately
- * that the flush/pageflip is done. This wastes CPU and power.
- * Such users might want to set this module parameter to false.
+ * Such users might want to enable this module parameter.
*/
-static bool gud_async_flush = true;
+static bool gud_async_flush;
module_param_named(async_flush, gud_async_flush, bool, 0644);
-MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
+MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]");
/*
* FIXME: The driver is probably broken on Big Endian machines.
@@ -152,32 +150,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
}
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect,
struct gud_set_buffer_req *req)
{
- struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
u8 compression = gdrm->compression;
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst;
void *vaddr, *buf;
size_t pitch, len;
- int ret = 0;
pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
len = pitch * drm_rect_height(rect);
if (len > gdrm->bulk_len)
return -E2BIG;
- ret = drm_gem_fb_vmap(fb, map, map_data);
- if (ret)
- return ret;
-
- vaddr = map_data[0].vaddr;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- goto vunmap;
+ vaddr = src[0].vaddr;
retry:
if (compression)
buf = gdrm->compress_buf;
@@ -192,29 +179,27 @@ retry:
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
- if (!len) {
- ret = -ENOMEM;
- goto end_cpu_access;
- }
+ if (!len)
+ return -ENOMEM;
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
- } else if (compression && !import_attach && pitch == fb->pitches[0]) {
+ drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+ } else if (compression && cached_reads && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
+ drm_fb_memcpy(&dst, NULL, src, fb, rect);
}
memset(req, 0, sizeof(*req));
@@ -237,12 +222,7 @@ retry:
req->compressed_length = cpu_to_le32(complen);
}
-end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-vunmap:
- drm_gem_fb_vunmap(fb, map);
-
- return ret;
+ return 0;
}
struct gud_usb_bulk_context {
@@ -285,6 +265,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
}
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect)
{
struct gud_set_buffer_req req;
@@ -293,7 +274,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = gud_prep_flush(gdrm, fb, format, rect, &req);
+ ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
if (ret)
return ret;
@@ -333,46 +314,51 @@ void gud_clear_damage(struct gud_device *gdrm)
gdrm->damage.y2 = 0;
}
-static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
+static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
+ struct drm_rect *damage)
{
- gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
- gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
- gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
- gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
-}
+ const struct drm_format_info *format;
+ unsigned int i, lines;
+ size_t pitch;
+ int ret;
-static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
-{
- /*
- * pipe_update waits for the worker when the display mode is going to change.
- * This ensures that the width and height is still the same making it safe to
- * add back the damage.
- */
+ format = fb->format;
+ if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
+ format = gdrm->xrgb8888_emulation_format;
- mutex_lock(&gdrm->damage_lock);
- if (!gdrm->fb) {
- drm_framebuffer_get(fb);
- gdrm->fb = fb;
- }
- gud_add_damage(gdrm, damage);
- mutex_unlock(&gdrm->damage_lock);
+ /* Split update if it's too big */
+ pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage));
+ lines = drm_rect_height(damage);
+
+ if (gdrm->bulk_len < lines * pitch)
+ lines = gdrm->bulk_len / pitch;
+
+ for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) {
+ struct drm_rect rect = *damage;
- /* Retry only once to avoid a possible storm in case of continues errors. */
- if (!gdrm->prev_flush_failed)
- queue_work(system_long_wq, &gdrm->work);
- gdrm->prev_flush_failed = true;
+ rect.y1 += i * lines;
+ rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
+
+ ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
+ if (ret) {
+ if (ret != -ENODEV && ret != -ECONNRESET &&
+ ret != -ESHUTDOWN && ret != -EPROTO)
+ dev_err_ratelimited(fb->dev->dev,
+ "Failed to flush framebuffer: error=%d\n", ret);
+ gdrm->prev_flush_failed = true;
+ break;
+ }
+ }
}
void gud_flush_work(struct work_struct *work)
{
struct gud_device *gdrm = container_of(work, struct gud_device, work);
- const struct drm_format_info *format;
+ struct iosys_map shadow_map;
struct drm_framebuffer *fb;
struct drm_rect damage;
- unsigned int i, lines;
- int idx, ret = 0;
- size_t pitch;
+ int idx;
if (!drm_dev_enter(&gdrm->drm, &idx))
return;
@@ -380,6 +366,7 @@ void gud_flush_work(struct work_struct *work)
mutex_lock(&gdrm->damage_lock);
fb = gdrm->fb;
gdrm->fb = NULL;
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
damage = gdrm->damage;
gud_clear_damage(gdrm);
mutex_unlock(&gdrm->damage_lock);
@@ -387,59 +374,43 @@ void gud_flush_work(struct work_struct *work)
if (!fb)
goto out;
- format = fb->format;
- if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
- format = gdrm->xrgb8888_emulation_format;
-
- /* Split update if it's too big */
- pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
- lines = drm_rect_height(&damage);
-
- if (gdrm->bulk_len < lines * pitch)
- lines = gdrm->bulk_len / pitch;
-
- for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
- struct drm_rect rect = damage;
-
- rect.y1 += i * lines;
- rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
-
- ret = gud_flush_rect(gdrm, fb, format, &rect);
- if (ret) {
- if (ret != -ENODEV && ret != -ECONNRESET &&
- ret != -ESHUTDOWN && ret != -EPROTO) {
- bool prev_flush_failed = gdrm->prev_flush_failed;
-
- gud_retry_failed_flush(gdrm, fb, &damage);
- if (!prev_flush_failed)
- dev_err_ratelimited(fb->dev->dev,
- "Failed to flush framebuffer: error=%d\n", ret);
- }
- break;
- }
-
- gdrm->prev_flush_failed = false;
- }
+ gud_flush_damage(gdrm, fb, &shadow_map, true, &damage);
drm_framebuffer_put(fb);
out:
drm_dev_exit(idx);
}
-static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
+static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
{
struct drm_framebuffer *old_fb = NULL;
+ struct iosys_map shadow_map;
mutex_lock(&gdrm->damage_lock);
+ if (!gdrm->shadow_buf) {
+ gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
+ if (!gdrm->shadow_buf) {
+ mutex_unlock(&gdrm->damage_lock);
+ return -ENOMEM;
+ }
+ }
+
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
+ iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage));
+ drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage);
+
if (fb != gdrm->fb) {
old_fb = gdrm->fb;
drm_framebuffer_get(fb);
gdrm->fb = fb;
}
- gud_add_damage(gdrm, damage);
+ gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
+ gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
+ gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
+ gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
mutex_unlock(&gdrm->damage_lock);
@@ -447,6 +418,26 @@ static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer
if (old_fb)
drm_framebuffer_put(old_fb);
+
+ return 0;
+}
+
+static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
+{
+ int ret;
+
+ if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
+ drm_rect_init(damage, 0, 0, fb->width, fb->height);
+
+ if (gud_async_flush) {
+ ret = gud_fb_queue_damage(gdrm, fb, src, damage);
+ if (ret != -ENOMEM)
+ return;
+ }
+
+ /* Imported buffers are assumed to be WriteCombined with uncached reads */
+ gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
int gud_pipe_check(struct drm_simple_display_pipe *pipe,
@@ -571,10 +562,11 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_device *drm = pipe->crtc.dev;
struct gud_device *gdrm = to_gud_device(drm);
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect damage;
- int idx;
+ int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
@@ -584,6 +576,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
gdrm->fb = NULL;
}
gud_clear_damage(gdrm);
+ vfree(gdrm->shadow_buf);
+ gdrm->shadow_buf = NULL;
mutex_unlock(&gdrm->damage_lock);
}
@@ -599,14 +593,19 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (crtc->state->active_changed)
gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
- if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
- if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
- drm_rect_init(&damage, 0, 0, fb->width, fb->height);
- gud_fb_queue_damage(gdrm, fb, &damage);
- if (!gud_async_flush)
- flush_work(&gdrm->work);
- }
+ if (!fb)
+ goto ctrl_disable;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto ctrl_disable;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+ctrl_disable:
if (!crtc->state->enable)
gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 4e41c144a290..126504318a4f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -7,6 +7,8 @@ config DRM_HISI_HIBMC
select DRM_VRAM_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 22053c613644..0c4aa4d9b0a7 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -106,7 +106,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
- dev->mode_config.preferred_depth = 32;
+ dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -340,7 +340,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_unload;
}
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 32);
return 0;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 427c20ba3404..f830d62a5ce6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -165,7 +165,7 @@ err_hv_set_drv_data:
return ret;
}
-static int hyperv_vmbus_remove(struct hv_device *hdev)
+static void hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
@@ -176,8 +176,6 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
hv_set_drvdata(hdev, NULL);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
-
- return 0;
}
static int hyperv_vmbus_suspend(struct hv_device *hdev)
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 578b738859b9..521bdf656cca 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
+#include <drm/drm_crtc_helper.h>
+
#include "ch7006_priv.h"
/* DRM encoder functions */
@@ -250,7 +252,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_mode_config *conf = &dev->mode_config;
- drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+ drm_mode_create_tv_properties_legacy(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
if (!priv->scale_property)
@@ -264,8 +266,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->hmargin);
drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_object_attach_property(&connector->base, conf->tv_mode_property,
- priv->norm);
+ drm_object_attach_property(&connector->base, conf->legacy_tv_mode_property,
+ priv->norm);
drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
drm_object_attach_property(&connector->base, conf->tv_contrast_property,
@@ -315,7 +317,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_VPOS);
- } else if (property == conf->tv_mode_property) {
+ } else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
@@ -386,7 +388,7 @@ static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
/* I2C driver functions */
-static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int ch7006_probe(struct i2c_client *client)
{
uint8_t addr = CH7006_VERSION_ID;
uint8_t val;
@@ -495,7 +497,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
- .probe = ch7006_probe,
+ .probe_new = ch7006_probe,
.remove = ch7006_remove,
.driver = {
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index 986b04599906..052bdc48a339 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -27,7 +27,6 @@
#ifndef __DRM_I2C_CH7006_PRIV_H__
#define __DRM_I2C_CH7006_PRIV_H__
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 1bc0b5de4499..f57f9a807542 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -350,7 +350,7 @@ static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
/* I2C driver functions */
static int
-sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+sil164_probe(struct i2c_client *client)
{
int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
sil164_read(client, SIL164_VENDOR_LO);
@@ -420,7 +420,7 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
- .probe = sil164_probe,
+ .probe_new = sil164_probe,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 9ed54e7ccff2..b8c143e573e0 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -375,8 +375,7 @@ static void tda9950_cec_del(void *data)
cec_delete_adapter(priv->adap);
}
-static int tda9950_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tda9950_probe(struct i2c_client *client)
{
struct tda9950_glue *glue = client->dev.platform_data;
struct device *dev = &client->dev;
@@ -493,7 +492,7 @@ static struct i2c_device_id tda9950_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
static struct i2c_driver tda9950_driver = {
- .probe = tda9950_probe,
+ .probe_new = tda9950_probe,
.remove = tda9950_remove,
.driver = {
.name = "tda9950",
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a14d2896aebb..db5c9343a3d2 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2059,7 +2059,7 @@ static const struct component_ops tda998x_ops = {
};
static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+tda998x_probe(struct i2c_client *client)
{
int ret;
@@ -2099,7 +2099,7 @@ static const struct i2c_device_id tda998x_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
static struct i2c_driver tda998x_driver = {
- .probe = tda998x_probe,
+ .probe_new = tda998x_probe,
.remove = tda998x_remove,
.driver = {
.name = "tda998x",
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
deleted file mode 100644
index c181f8528c5c..000000000000
--- a/drivers/gpu/drm/i810/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-i810-y := i810_drv.o i810_dma.o
-
-obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
deleted file mode 100644
index 9fb4dd63342f..000000000000
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ /dev/null
@@ -1,1266 +0,0 @@
-/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include <linux/delay.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_print.h>
-#include <drm/i810_drm.h>
-
-#include "i810_drv.h"
-
-#define I810_BUF_FREE 2
-#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
-
-#define I810_BUF_UNMAPPED 0
-#define I810_BUF_MAPPED 1
-
-static struct drm_buf *i810_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
- I810_BUF_CLIENT);
- if (used == I810_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
- if (used != I810_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i810_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i810_buf_priv_t *buf_priv;
-
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= VM_DONTCOPY;
-
- buf_priv->currently_mapped = I810_BUF_MAPPED;
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i810_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = i810_mmap_buffers,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED)
- return -EINVAL;
-
- /* This is all entirely broken */
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i810_buffer_fops;
- dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR(buf_priv->virtual)) {
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR(buf_priv->virtual);
- buf_priv->virtual = NULL;
- }
-
- return retcode;
-}
-
-static int i810_unmap_buffer(struct drm_buf *buf)
-{
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I810_BUF_MAPPED)
- return -EINVAL;
-
- retcode = vm_munmap((unsigned long)buf_priv->virtual,
- (size_t) buf->total);
-
- buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i810_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i810_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i810_map_buffer(buf, file_priv);
- if (retcode) {
- i810_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i810_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i810_private_t *dev_priv =
- (drm_i810_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- dma_free_coherent(dev->dev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- }
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf_priv->kernel_virtual && buf->total)
- drm_legacy_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-static int i810_wait_ring(struct drm_device *dev, int n)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i810_kernel_lost_context(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I810_READ(LP_RING + RING_TAIL);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-}
-
-static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 24;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I810_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_legacy_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
-
- }
- return 0;
-}
-
-static int i810_dma_initialize(struct drm_device *dev,
- drm_i810_private_t *dev_priv,
- drm_i810_init_t *init)
-{
- struct drm_map_list *r_list;
- memset(dev_priv, 0, sizeof(drm_i810_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i810_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_legacy_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->overlay_offset = init->overlay_offset;
- dev_priv->overlay_physical = init->overlay_physical;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- dma_alloc_coherent(dev->dev, PAGE_SIZE,
- &dev_priv->dma_status_page, GFP_KERNEL);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I810_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i810_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i810_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv;
- drm_i810_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I810_INIT_DMA_1_4:
- DRM_INFO("Using v1.4 init.\n");
- dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i810_dma_initialize(dev, dev_priv, init);
- break;
-
- case I810_CLEANUP_DMA:
- DRM_INFO("DMA Cleanup\n");
- retcode = i810_dma_cleanup(dev);
- break;
- default:
- return -EINVAL;
- }
-
- return retcode;
-}
-
-/* Most efficient way to verify state for the i810 is as it is
- * emitted. Non-conformant state is silently dropped.
- *
- * Use 'volatile' & local var tmp to force the emitted values to be
- * identical to the verified ones.
- */
-static void i810EmitContextVerified(struct drm_device *dev,
- volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
-
- OUT_RING(GFX_OP_COLOR_FACTOR);
- OUT_RING(code[I810_CTXREG_CF1]);
-
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[I810_CTXREG_ST1]);
-
- for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
- tmp = code[i];
-
- if ((tmp & (7 << 29)) == (3 << 29) &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else
- printk("constext state dropped!!!\n");
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
-
- OUT_RING(GFX_OP_MAP_INFO);
- OUT_RING(code[I810_TEXREG_MI1]);
- OUT_RING(code[I810_TEXREG_MI2]);
- OUT_RING(code[I810_TEXREG_MI3]);
-
- for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
-
- if ((tmp & (7 << 29)) == (3 << 29) &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else
- printk("texture state dropped!!!\n");
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i810EmitDestVerified(struct drm_device *dev,
- volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
-
- tmp = code[I810_DESTREG_DI1];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(tmp);
- } else
- DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
-
- /* invarient:
- */
- OUT_RING(CMD_OP_Z_BUFFER_INFO);
- OUT_RING(dev_priv->zi1);
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I810_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I810_DESTREG_DR1]);
- OUT_RING(code[I810_DESTREG_DR2]);
- OUT_RING(code[I810_DESTREG_DR3]);
- OUT_RING(code[I810_DESTREG_DR4]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i810EmitState(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%x\n", dirty);
-
- if (dirty & I810_UPLOAD_BUFFERS) {
- i810EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
- }
-
- if (dirty & I810_UPLOAD_CTX) {
- i810EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I810_UPLOAD_CTX;
- }
-
- if (dirty & I810_UPLOAD_TEX0) {
- i810EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
- }
-
- if (dirty & I810_UPLOAD_TEX1) {
- i810EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
- }
-}
-
-/* need to verify
- */
-static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = 2;
- int i;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I810_FRONT | I810_BACK);
- if (tmp & I810_FRONT)
- flags |= I810_BACK;
- if (tmp & I810_BACK)
- flags |= I810_FRONT;
- }
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- unsigned int x = pbox->x1;
- unsigned int y = pbox->y1;
- unsigned int width = (pbox->x2 - x) * cpp;
- unsigned int height = pbox->y2 - y;
- unsigned int start = y * pitch + x * cpp;
-
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I810_FRONT) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(start);
- OUT_RING(clear_color);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (flags & I810_BACK) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(dev_priv->back_offset + start);
- OUT_RING(clear_color);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (flags & I810_DEPTH) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(dev_priv->depth_offset + start);
- OUT_RING(clear_zval);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i810_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = 2;
- int i;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- unsigned int w = pbox->x2 - pbox->x1;
- unsigned int h = pbox->y2 - pbox->y1;
- unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
- unsigned int start = dst;
-
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
- OUT_RING(pitch | (0xCC << 16));
- OUT_RING((h << 16) | (w * cpp));
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset + start);
- else
- OUT_RING(dev_priv->back_offset + start);
- OUT_RING(pitch);
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset + start);
- else
- OUT_RING(dev_priv->front_offset + start);
- ADVANCE_LP_RING();
- }
-}
-
-static void i810_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- if (used < 0 || used > 4 * 1024)
- used = 0;
-
- if (sarea_priv->dirty)
- i810EmitState(dev);
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
- unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
-
- *(u32 *) buf_priv->kernel_virtual =
- ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
-
- if (used & 4) {
- *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
- used += 4;
- }
-
- i810_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(4);
- OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
- SC_ENABLE);
- OUT_RING(GFX_OP_SCISSOR_INFO);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING((box[i].x2 -
- 1) | ((box[i].y2 - 1) << 16));
- ADVANCE_LP_RING();
- }
-
- BEGIN_LP_RING(4);
- OUT_RING(CMD_OP_BATCH_BUFFER);
- OUT_RING(start | BB1_PROTECTED);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
- I810_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I810_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i810_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int pitch = dev_priv->pitch;
- RING_LOCALS;
-
- DRM_DEBUG("page=%d pfCurrentPage=%d\n",
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
- /* On i815 at least ASYNC is buggy */
- /* pitch<<5 is from 11.2.8 p158,
- its the pitch / 8 then left shifted 8,
- so (pitch >> 3) << 8 */
- OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- /* Increment the frame counter. The client-side 3D driver must
- * throttle the framerate by waiting for this value before
- * performing the swapbuffer ioctl.
- */
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-
-}
-
-static void i810_dma_quiescent(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i810_wait_ring(dev, dev_priv->ring.Size - 8);
-}
-
-static void i810_flush_queue(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i810_wait_ring(dev, dev_priv->ring.Size - 8);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
- I810_BUF_FREE);
-
- if (used == I810_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I810_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return;
-}
-
-/* Must be called with the lock held */
-void i810_driver_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i810_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
- I810_BUF_FREE);
-
- if (used == I810_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I810_BUF_MAPPED)
- buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- }
- }
-}
-
-static int i810_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i810_flush_queue(dev);
- return 0;
-}
-
-static int i810_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
- return -EINVAL;
-
- i810_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i810_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i810_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color, clear->clear_depth);
- return 0;
-}
-
-static int i810_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i810_dma_dispatch_swap(dev);
- return 0;
-}
-
-static int i810_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i810_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i810_dma_t *d = data;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i810_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i810_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i810_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
- unsigned int last_render)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int u;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
- if (u != I810_BUF_CLIENT)
- DRM_DEBUG("MC found buffer that isn't mine!\n");
-
- if (used < 0 || used > 4 * 1024)
- used = 0;
-
- sarea_priv->dirty = 0x7f;
-
- DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
- if (used & 4) {
- *(u32 *) ((char *) buf_priv->virtual + used) = 0;
- used += 4;
- }
-
- i810_unmap_buffer(buf);
- }
- BEGIN_LP_RING(4);
- OUT_RING(CMD_OP_BATCH_BUFFER);
- OUT_RING(start | BB1_PROTECTED);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I810_BUF_FREE);
- OUT_RING(0);
-
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(16);
- OUT_RING(last_render);
- OUT_RING(0);
- ADVANCE_LP_RING();
-}
-
-static int i810_dma_mc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_mc_t *mc = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (mc->idx >= dma->buf_count || mc->idx < 0)
- return -EINVAL;
-
- i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
- mc->last_render);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i810_rstatus(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
-}
-
-static int i810_ov0_info(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- drm_i810_overlay_t *ov = data;
-
- ov->offset = dev_priv->overlay_offset;
- ov->physical = dev_priv->overlay_physical;
-
- return 0;
-}
-
-static int i810_fstatus(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
- return I810_READ(0x30008);
-}
-
-static int i810_ov0_flip(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* Tell the overlay to update */
- I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
-
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i810_do_init_pageflip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i810_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
- if (dev_priv->current_page != 0)
- i810_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i810_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i810_do_init_pageflip(dev);
-
- i810_dma_dispatch_flip(dev);
- return 0;
-}
-
-int i810_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- dev->agp = drm_legacy_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
-
- /* Our userspace depends upon the agp mapping support. */
- if (!dev->agp)
- return -EINVAL;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
-void i810_driver_lastclose(struct drm_device *dev)
-{
- i810_dma_cleanup(dev);
-}
-
-void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i810_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i810_do_cleanup_pageflip(dev);
- }
-
- if (file_priv->master && file_priv->master->lock.hw_lock) {
- drm_legacy_idlelock_take(&file_priv->master->lock);
- i810_driver_reclaim_buffers(dev, file_priv);
- drm_legacy_idlelock_release(&file_priv->master->lock);
- } else {
- /* master disappeared, clean up stuff anyway and hope nothing
- * goes wrong */
- i810_driver_reclaim_buffers(dev, file_priv);
- }
-
-}
-
-int i810_driver_dma_quiescent(struct drm_device *dev)
-{
- i810_dma_quiescent(dev);
- return 0;
-}
-
-const struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
deleted file mode 100644
index 0e53a066d4db..000000000000
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* i810_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include "i810_drv.h"
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/i810_drm.h>
-
-
-static struct pci_device_id pciidlist[] = {
- i810_PCI_IDS
-};
-
-static const struct file_operations i810_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY,
- .dev_priv_size = sizeof(drm_i810_buf_priv_t),
- .load = i810_driver_load,
- .lastclose = i810_driver_lastclose,
- .preclose = i810_driver_preclose,
- .dma_quiescent = i810_driver_dma_quiescent,
- .ioctls = i810_ioctls,
- .fops = &i810_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver i810_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init i810_init(void)
-{
- if (num_possible_cpus() > 1) {
- pr_err("drm/i810 does not support SMP\n");
- return -EINVAL;
- }
- driver.num_ioctls = i810_max_ioctl;
- return drm_legacy_pci_init(&driver, &i810_pci_driver);
-}
-
-static void __exit i810_exit(void)
-{
- drm_legacy_pci_exit(&driver, &i810_pci_driver);
-}
-
-module_init(i810_init);
-module_exit(i810_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
deleted file mode 100644
index 9df3981ffc66..000000000000
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I810_DRV_H_
-#define _I810_DRV_H_
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/i810_drm.h>
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i810"
-#define DRIVER_DESC "Intel i810"
-#define DRIVER_DATE "20030605"
-
-/* Interface history
- *
- * 1.1 - XFree86 4.1
- * 1.2 - XvMC interfaces
- * - XFree86 4.2
- * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
- * - Remove requirement for interrupt (leave stubs again)
- * 1.3 - Add page flipping.
- * 1.4 - fix DRM interface
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 4
-#define DRIVER_PATCHLEVEL 0
-
-typedef struct drm_i810_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i810_buf_priv_t;
-
-typedef struct _drm_i810_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i810_ring_buffer_t;
-
-typedef struct drm_i810_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i810_sarea_t *sarea_priv;
- drm_i810_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int overlay_offset;
- int overlay_physical;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int front_offset;
-} drm_i810_private_t;
-
- /* i810_dma.c */
-extern int i810_driver_dma_quiescent(struct drm_device *dev);
-void i810_driver_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(struct drm_device *dev);
-extern void i810_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-
-extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern const struct drm_ioctl_desc i810_ioctls[];
-extern int i810_max_ioctl;
-
-#define I810_BASE(reg) ((unsigned long) \
- dev_priv->mmio_map->handle)
-#define I810_ADDR(reg) (I810_BASE(reg) + reg)
-#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
-#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
-#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
-#define I810_READ16(reg) I810_DEREF16(reg)
-#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
-
-#define I810_VERBOSE 0
-#define RING_LOCALS unsigned int outring, ringmask; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
- if (dev_priv->ring.space < n*4) \
- i810_wait_ring(dev, n*4); \
- dev_priv->ring.space -= n*4; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING\n"); \
- dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I810REG_HWSTAM 0x02098
-#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
-#define I810REG_INT_ENABLE_R 0x020a0
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
-#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
-#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23))
-#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-#define WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define WAIT_FOR_VBLANK (1<<3)
-
-#endif
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 3efce05d7b57..e4f4d2e3fdfe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -18,6 +18,8 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
+ select I2C
+ select I2C_ALGOBIT
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
@@ -54,23 +56,34 @@ config DRM_I915
If "M" is selected, the module will be called i915.
config DRM_I915_FORCE_PROBE
- string "Force probe driver for selected new Intel hardware"
+ string "Force probe i915 for selected Intel hardware IDs"
depends on DRM_I915
help
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
- Force probe the driver for new Intel graphics devices that are
- recognized but not properly supported by this kernel version. It is
- recommended to upgrade to a kernel version with proper support as soon
- as it is available.
+ Force probe the i915 driver for Intel graphics devices that are
+ recognized but not properly supported by this kernel version. Force
+ probing an unsupported device taints the kernel. It is recommended to
+ upgrade to a kernel version with proper support as soon as it is
+ available.
+
+ It can also be used to block the probe of recognized and fully
+ supported devices.
Use "" to disable force probe. If in doubt, use this.
- Use "<pci-id>[,<pci-id>,...]" to force probe the driver for listed
+ Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
devices. For example, "4500" or "4500,4571".
- Use "*" to force probe the driver for all known devices.
+ Use "*" to force probe the driver for all known devices. Not
+ recommended.
+
+ Use "!" right before the ID to block the probe of the device. For
+ example, "4500,!4571" forces the probe of 4500 and blocks the probe of
+ 4571.
+
+ Use "!*" to block the probe of the driver for all known devices.
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
@@ -107,18 +120,16 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
-config DRM_I915_GVT
- bool
-
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
depends on KVM
- depends on VFIO_MDEV
+ depends on VFIO
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
+ select VFIO_MDEV
help
Choose this option if you want to enable Intel GVT-g graphics
@@ -155,8 +166,5 @@ menu "drm/i915 Profile Guided Optimisation"
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
-menu "drm/i915 Unstable Evolution"
- visible if EXPERT && STAGING && BROKEN
- depends on DRM_I915
- source "drivers/gpu/drm/i915/Kconfig.unstable"
-endmenu
+config DRM_I915_GVT
+ bool
diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable
deleted file mode 100644
index cf151a297ed7..000000000000
--- a/drivers/gpu/drm/i915/Kconfig.unstable
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_I915_UNSTABLE
- bool "Enable unstable API for early prototype development"
- depends on EXPERT
- depends on STAGING
- depends on BROKEN # should never be enabled by distros!
- # We use the dependency on !COMPILE_TEST to not be enabled in
- # allmodconfig or allyesconfig configurations
- depends on !COMPILE_TEST
- default n
- help
- Enable prototype uAPI under general discussion before they are
- finalized. Such prototypes may be withdrawn or substantially
- changed before release. They are only enabled here so that a wide
- number of interested parties (userspace driver developers) can
- verify that the uAPI meet their expectations. These uAPI should
- never be used in production.
-
- Recommended for driver developers _only_.
-
- If in the slightest bit of doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 01974b82d205..97b0d4ae221a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -47,12 +47,10 @@ i915-y += i915_driver.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
+ intel_clock_gating.o \
intel_device_info.o \
- intel_dram.o \
intel_memory_region.o \
- intel_pch.o \
intel_pcode.o \
- intel_pm.o \
intel_region_ttm.o \
intel_runtime_pm.o \
intel_sbi.o \
@@ -62,6 +60,12 @@ i915-y += i915_driver.o \
vlv_sideband.o \
vlv_suspend.o
+# core peripheral code
+i915-y += \
+ soc/intel_dram.o \
+ soc/intel_gmch.o \
+ soc/intel_pch.o
+
# core library code
i915-y += \
i915_memcpy.o \
@@ -188,9 +192,10 @@ i915-y += \
i915_vma_resource.o
# general-purpose microcontroller (GuC) support
-i915-y += gt/uc/intel_uc.o \
- gt/uc/intel_uc_debugfs.o \
- gt/uc/intel_uc_fw.o \
+i915-y += \
+ gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@@ -205,7 +210,10 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
gt/uc/intel_huc_debugfs.o \
- gt/uc/intel_huc_fw.o
+ gt/uc/intel_huc_fw.o \
+ gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
+ gt/uc/intel_uc_fw.o
# graphics system controller (GSC) support
i915-y += gt/intel_gsc.o
@@ -232,6 +240,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
+ display/intel_display_rps.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
@@ -247,6 +256,7 @@ i915-y += \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
+ display/intel_hdcp_gsc.o \
display/intel_hotplug.o \
display/intel_hti.o \
display/intel_lpe_audio.o \
@@ -259,9 +269,13 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
+ display/intel_sprite_uapi.o \
display/intel_tc.o \
+ display/intel_vblank.o \
display/intel_vga.o \
+ display/intel_wm.o \
display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
@@ -355,6 +369,13 @@ include $(src)/gvt/Makefile
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
+# kernel-doc test
+#
+# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
+ifdef CONFIG_DRM_I915_WERROR
+ cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $<
+endif
+
# header test
# exclude some broken headers from the test coverage
@@ -366,7 +387,8 @@ always-$(CONFIG_DRM_I915_WERROR) += \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
- cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \
+ $(srctree)/scripts/kernel-doc -none $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 54f58ba44b9f..6d948520e9a6 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -50,15 +50,26 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define CH7xxx_INPUT_CLOCK 0x1d
#define CH7xxx_GPIO 0x1e
#define CH7xxx_GPIO_HPIR (1<<3)
-#define CH7xxx_IDF 0x1f
+#define CH7xxx_IDF 0x1f
+#define CH7xxx_IDF_IBS (1<<7)
+#define CH7xxx_IDF_DES (1<<6)
#define CH7xxx_IDF_HSP (1<<3)
#define CH7xxx_IDF_VSP (1<<4)
#define CH7xxx_CONNECTION_DETECT 0x20
#define CH7xxx_CDET_DVI (1<<5)
-#define CH7301_DAC_CNTL 0x21
+#define CH7xxx_DAC_CNTL 0x21
+#define CH7xxx_SYNCO_MASK (3 << 3)
+#define CH7xxx_SYNCO_VGA_HSYNC (1 << 3)
+
+#define CH7xxx_CLOCK_OUTPUT 0x22
+#define CH7xxx_BCOEN (1 << 4)
+#define CH7xxx_BCOP (1 << 3)
+#define CH7xxx_BCO_MASK (7 << 0)
+#define CH7xxx_BCO_VGA_VSYNC (6 << 0)
+
#define CH7301_HOTPLUG 0x23
#define CH7xxx_TCTL 0x31
#define CH7xxx_TVCO 0x32
@@ -301,6 +312,8 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+ idf |= CH7xxx_IDF_IBS;
+
idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
idf |= CH7xxx_IDF_HSP;
@@ -309,6 +322,11 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+
+ ch7xxx_writeb(dvo, CH7xxx_DAC_CNTL,
+ CH7xxx_SYNCO_VGA_HSYNC);
+ ch7xxx_writeb(dvo, CH7xxx_CLOCK_OUTPUT,
+ CH7xxx_BCOEN | CH7xxx_BCO_VGA_VSYNC);
}
/* set the CH7xxx power state */
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 0dfa0a0209ff..4acc8ce29c0b 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -58,6 +58,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_9_MDI (1<<0)
#define SIL164_REGC 0x0c
+#define SIL164_C_SCNT (1<<7)
+#define SIL164_C_PLLF_MASK (0xf<<1)
+#define SIL164_C_PLLF_REC (4<<1)
+#define SIL164_C_PFEN (1<<0)
struct sil164_priv {
//I2CDevRec d;
@@ -205,7 +209,13 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
sil164_writeb(sil, 0x0c, 0x89);
sil164_writeb(sil, 0x08, 0x31);*/
/* don't do much */
- return;
+
+ sil164_writeb(dvo, SIL164_REG8,
+ SIL164_8_VEN | SIL164_8_HEN);
+ sil164_writeb(dvo, SIL164_REG9,
+ SIL164_9_TSEL);
+ sil164_writeb(dvo, SIL164_REGC,
+ SIL164_C_PLLF_REC | SIL164_C_PFEN);
}
/* set the SIL164 power state */
@@ -224,7 +234,6 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
ch &= ~SIL164_8_PD;
sil164_writeb(dvo, SIL164_REG8, ch);
- return;
}
static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 24ef36ec2d3d..920d570f7594 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -17,6 +17,7 @@
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp;
-
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- trans_dp |= TRANS_DP_ENH_FRAMING;
- else
- trans_dp &= ~TRANS_DP_ENH_FRAMING;
- intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ TRANS_DP_ENH_FRAMING,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd) ?
+ TRANS_DP_ENH_FRAMING : 0);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -398,6 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (intel_dp_is_edp(intel_dp))
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void
@@ -1198,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
-static bool gm45_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1277,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return false;
@@ -1293,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -1375,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- dig_port->connected = gm45_digital_port_connected;
- else
- dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
dig_port->connected = ilk_digital_port_connected;
@@ -1389,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index c3580d96765c..448ea26786e0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
@@ -155,6 +156,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
@@ -271,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
@@ -288,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
@@ -546,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return;
@@ -562,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -627,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 83aa3800245f..8eca0de065b6 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -14,6 +14,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 val;
if (!crtc_state->ips_enabled)
return;
@@ -26,10 +27,15 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&i915->drm,
!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+ val = IPS_ENABLE;
+
+ if (i915->display.ips.false_color)
+ val |= IPS_FALSE_COLOR;
+
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -37,7 +43,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* so we need to just enable it and continue on.
*/
} else {
- intel_de_write(i915, IPS_CTL, IPS_ENABLE);
+ intel_de_write(i915, IPS_CTL, val);
/*
* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
@@ -267,3 +273,87 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
crtc_state->ips_enabled = true;
}
}
+
+static int hsw_ips_debugfs_false_color_get(void *data, u64 *val)
+{
+ struct intel_crtc *crtc = data;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ *val = i915->display.ips.false_color;
+
+ return 0;
+}
+
+static int hsw_ips_debugfs_false_color_set(void *data, u64 val)
+{
+ struct intel_crtc *crtc = data;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (ret)
+ return ret;
+
+ i915->display.ips.false_color = val;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active)
+ goto unlock;
+
+ if (crtc_state->uapi.commit &&
+ !try_wait_for_completion(&crtc_state->uapi.commit->hw_done))
+ goto unlock;
+
+ hsw_ips_enable(crtc_state);
+
+ unlock:
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops,
+ hsw_ips_debugfs_false_color_get,
+ hsw_ips_debugfs_false_color_set,
+ "%llu\n");
+
+static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_crtc *crtc = m->private;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ str_yes_no(i915->params.enable_ips));
+
+ if (DISPLAY_VER(i915) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
+
+void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc)
+{
+ if (!hsw_crtc_supports_ips(crtc))
+ return;
+
+ debugfs_create_file("i915_ips_false_color", 0644, crtc->base.debugfs_entry,
+ crtc, &hsw_ips_debugfs_false_color_fops);
+
+ debugfs_create_file("i915_ips_status", 0444, crtc->base.debugfs_entry,
+ crtc, &hsw_ips_debugfs_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 4564dee497d7..4eb83b350791 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -22,5 +22,6 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc);
#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 73c88b1c9545..caef72d38798 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -1,48 +1,18 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eugeni Dodonov <eugeni.dodonov@intel.com>
- *
+ * Copyright © 2023 Intel Corporation
*/
-#include "display/intel_de.h"
-#include "display/intel_display_trace.h"
-#include "display/skl_watermark.h"
-
-#include "gt/intel_engine_regs.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_gt_mcr.h"
-#include "gt/intel_gt_regs.h"
-
#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_atomic.h"
+#include "intel_display.h"
+#include "intel_display_trace.h"
#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
-struct drm_i915_clock_gating_funcs {
- void (*init_clock_gating)(struct drm_i915_private *i915);
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -50,180 +20,16 @@ struct intel_wm_config {
bool sprites_scaled;
};
-static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- if (HAS_LLC(dev_priv)) {
- /*
- * WaCompressedResourceDisplayNewHashMode:skl,kbl
- * Display WA #0390: skl,kbl
- *
- * Must match Sampler, Pixel Back End, and Media. See
- * WaCompressedResourceSamplerPbeMediaNewHashMode.
- */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, SKL_DE_COMPRESSED_HASH_MODE);
- }
-
- /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, SKL_EDP_PSR_FIX_RDWRAP);
-
- /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, 0, MASK_WAKEMEM);
-
- /*
- * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
- * Display WA #0859: skl,bxt,kbl,glk,cfl
- */
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_MEMORY_WAKE);
-}
-
-static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen9_init_clock_gating(dev_priv);
-
- /* WaDisableSDEUnitClockGating:bxt */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * FIXME:
- * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
- */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
-
- /*
- * Wa: Backlight PWM may stop in the asserted state, causing backlight
- * to stay fully on.
- */
- intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
- PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /*
- * Lower the display internal timeout.
- * This is needed to avoid any hard hangs when DSI port PLL
- * is off and a MMIO access is attempted by any privilege
- * application, using batch buffers or any other means.
- */
- intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
-
- /*
- * WaFbcTurnOffFbcWatermark:bxt
- * Display WA #0562: bxt
- */
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:bxt
- * Display WA #0883: bxt
- */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
-}
-
-static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen9_init_clock_gating(dev_priv);
-
- /*
- * WaDisablePWMClockGating:glk
- * Backlight PWM may stop in the asserted state, causing backlight
- * to stay fully on.
- */
- intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
- PWM1_GATING_DIS | PWM2_GATING_DIS);
-}
-
-static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u16 ddrpll, csipll;
-
- ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
- csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-}
+struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
+};
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
@@ -360,7 +166,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
} else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
- * and yet it does have the related watermark in
+ * yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
@@ -606,7 +412,7 @@ static const struct intel_watermark_params i830_a_wm_info = {
static const struct intel_watermark_params i830_bc_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM/2,
+ .max_wm = I915_MAX_WM / 2,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
@@ -786,34 +592,6 @@ static bool is_enabling(int old, int new, int threshold)
return old < threshold && new >= threshold;
}
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
- return dev_priv->display.wm.max_level + 1;
-}
-
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-
- /* FIXME check the 'enable' instead */
- if (!crtc_state->hw.active)
- return false;
-
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (plane->id == PLANE_CURSOR)
- return plane_state->hw.fb != NULL;
- else
- return plane_state->uapi.visible;
-}
-
static bool intel_crtc_active(struct intel_crtc *crtc)
{
/* Be paranoid as we can arrive here with only partial
@@ -938,22 +716,22 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
intel_uncore_write(&dev_priv->uncore, DSPFW2,
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
intel_uncore_write(&dev_priv->uncore, DSPFW3,
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
@@ -970,10 +748,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -988,50 +766,50 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
intel_uncore_write(&dev_priv->uncore, DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
intel_uncore_write(&dev_priv->uncore, DSPFW3,
- FW_WM(wm->sr.cursor, CURSOR_SR));
+ FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
@@ -1046,7 +824,7 @@ static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
+ dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1153,7 +931,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+ for (; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1172,7 +950,7 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+ for (; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1191,7 +969,6 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1203,7 +980,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < num_levels; level++) {
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1273,7 +1050,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (level > dev_priv->display.wm.max_level)
+ if (level >= dev_priv->display.wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1609,13 +1386,13 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
/* all latencies in usec */
dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1751,7 +1528,7 @@ static void vlv_invalidate_wms(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+ for (; level < dev_priv->display.wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1778,10 +1555,9 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(dev_priv);
bool dirty = false;
- for (; level < num_levels; level++) {
+ for (; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1797,7 +1573,6 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
bool dirty = false;
@@ -1806,7 +1581,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < num_levels; level++) {
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1865,7 +1640,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = intel_wm_num_levels(dev_priv);
+ wm_state->num_levels = dev_priv->display.wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -2128,7 +1903,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.max_level;
+ wm->level = dev_priv->display.wm.num_levels - 1;
wm->cxsr = true;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2835,6 +2610,8 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
u64 sskpd;
+ i915->display.wm.num_levels = 5;
+
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
@@ -2850,6 +2627,8 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
u32 sskpd;
+ i915->display.wm.num_levels = 4;
+
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
@@ -2862,6 +2641,8 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
u32 mltr;
+ i915->display.wm.num_levels = 3;
+
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2886,61 +2667,16 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = 13;
}
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
- /* how many WM levels are we expecting */
- if (HAS_HW_SAGV_WM(dev_priv))
- return 5;
- else if (DISPLAY_VER(dev_priv) >= 9)
- return 7;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 4;
- else if (DISPLAY_VER(dev_priv) >= 6)
- return 3;
- else
- return 2;
-}
-
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[])
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = wm[level];
-
- if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency not provided\n",
- name, level);
- continue;
- }
-
- /*
- * - latencies are in us on gen9.
- * - before then, WM1+ latency values are in 0.5us units
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency %u (%u.%u usec)\n", name, level,
- wm[level], latency / 10, latency % 10);
- }
-}
-
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5], u16 min)
{
- int level, max_level = ilk_wm_max_level(dev_priv);
+ int level;
if (wm[0] >= min)
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level <= max_level; level++)
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
@@ -3060,8 +2796,8 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
const struct intel_plane_state *pristate = NULL;
const struct intel_plane_state *sprstate = NULL;
const struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
+ int level, usable_level;
pipe_wm = &crtc_state->wm.ilk.optimal;
@@ -3078,7 +2814,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = max_level;
+ usable_level = dev_priv->display.wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
@@ -3132,7 +2868,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
+ int level;
/*
* Start with the final, target watermarks, then combine with the
@@ -3149,7 +2885,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
struct intel_wm_level *a_wm = &a->wm[level];
const struct intel_wm_level *b_wm = &b->wm[level];
@@ -3220,8 +2956,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int last_enabled_level = max_level;
+ int level, num_levels = dev_priv->display.wm.num_levels;
+ int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
@@ -3232,7 +2968,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
/* merge each WM1+ level */
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
ilk_merge_wm_level(dev_priv, level, wm);
@@ -3257,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
- for (level = 2; level <= max_level; level++) {
+ for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
wm->enable = false;
@@ -3345,17 +3081,18 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
}
}
-/* Find the result with the highest level enabled. Check for enable_fbc_wm in
- * case both are at the same level. Prefer r1 in case they're the same. */
+/*
+ * Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same.
+ */
static struct intel_pipe_wm *
ilk_find_best_result(struct drm_i915_private *dev_priv,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int level1 = 0, level2 = 0;
+ int level, level1 = 0, level2 = 0;
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3629,20 +3366,141 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
} else {
- int level, max_level = ilk_wm_max_level(dev_priv);
+ int level;
/*
* For inactive pipes, all watermark levels
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
active->wm[level].enable = true;
}
crtc->wm.active.ilk = *active;
}
+static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ return;
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ intel_state = to_intel_atomic_state(state);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ if (!HAS_GMCH(dev_priv))
+ intel_state->skip_intermediate_wm = true;
+
+ ret = ilk_sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
+
+ /* Write calculated watermark values back */
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ intel_optimize_watermarks(intel_state, crtc);
+
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
+ }
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
#define _FW_WM(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
#define _FW_WM_VLV(value, plane) \
@@ -3750,7 +3608,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
struct intel_crtc *crtc;
@@ -3843,7 +3701,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
str_yes_no(wm->fbc_en));
}
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
struct intel_crtc *crtc;
@@ -3858,12 +3716,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
+ int level;
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < num_levels; level++) {
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3892,7 +3750,13 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+}
+
+static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
struct intel_crtc *crtc;
@@ -3928,7 +3792,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -3992,7 +3856,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
struct intel_crtc *crtc;
@@ -4007,12 +3871,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
+ int level;
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < num_levels; level++) {
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4038,6 +3902,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
+static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+}
+
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
@@ -4054,7 +3924,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc *crtc;
@@ -4075,811 +3945,24 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(pipe), 0, DISP_TRICKLE_FEED_DISABLE);
-
- intel_uncore_rmw(&dev_priv->uncore, DSPSURF(pipe), 0, 0);
- intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
- }
-}
-
-static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
- /*
- * Required for FBC
- * WaFbcDisableDpfcClockGating:ilk
- */
- dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
- ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
- ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
-
- intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
- MARIUNIT_CLOCK_GATE_DISABLE |
- SVSMUNIT_CLOCK_GATE_DISABLE);
- intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
- VFMUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be set in
- * order to enable memory self-refresh
- * The bit 22/21 of 0x42004
- * The bit 5 of 0x42020
- * The bit 15 of 0x45000
- */
- intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
- (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
- (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
-
- /*
- * Based on the document from hardware guys the following bits
- * should be set unconditionally in order to enable FBC.
- * The bit 22 of 0x42000
- * The bit 22 of 0x42004
- * The bit 7,8,9 of 0x42020.
- */
- if (IS_IRONLAKE_M(dev_priv)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ilk */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS);
- intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_DPARB_GATE);
- }
-
- intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
-
- intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT);
-
- g4x_disable_trickle_feed(dev_priv);
-
- ibx_init_clock_gating(dev_priv);
-}
-
-static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
- u32 val;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
- PCH_DPLUNIT_CLOCK_GATE_DISABLE |
- PCH_CPUNIT_CLOCK_GATE_DISABLE);
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN2, 0, DPLS_EDP_PPS_FIX_DIS);
- /* The below fixes the weird display corruption, a few pixels shifted
- * downward, on (only) LVDS of some HP laptops with IVY.
- */
- for_each_pipe(dev_priv, pipe) {
- val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
- val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
- val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
- intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
- }
- /* WADP0ClockGatingDisable */
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
- TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
- }
-}
-
-static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
- if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
- drm_dbg_kms(&dev_priv->drm,
- "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
- tmp);
-}
-
-static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
- intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
-
- intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT);
-
- intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
- intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
- GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- *
- * WaDisableRCCUnitClockGating:snb
- * WaDisableRCPBUnitClockGating:snb
- */
- intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be
- * set in order to enable memory self-refresh and fbc:
- * The bit21 and bit22 of 0x42000
- * The bit21 and bit22 of 0x42004
- * The bit5 and bit7 of 0x42020
- * The bit14 of 0x70180
- * The bit14 of 0x71180
- *
- * WaFbcAsynchFlipDisableFbcQueue:snb
- */
- intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
- intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
- intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
- intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
- ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
- ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
-
- g4x_disable_trickle_feed(dev_priv);
-
- cpt_init_clock_gating(dev_priv);
-
- gen6_check_mch_setup(dev_priv);
-}
-
-static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /*
- * TODO: this bit should only be enabled when really needed, then
- * disabled when not needed anymore in order to save power.
- */
- if (HAS_PCH_LPT_LP(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
- 0, PCH_LP_PARTITION_LEVEL_DISABLE);
-
- /* WADPOClockGatingDisable:hsw */
- intel_uncore_rmw(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
- 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
-}
-
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
-
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
-static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
- int general_prio_credits,
- int high_prio_credits)
-{
- u32 misccpctl;
- u32 val;
-
- /* WaTempDisableDOPClkGating:bdw */
- misccpctl = intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_ENABLE, 0);
-
- val = intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
- val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
- intel_gt_mcr_multicast_write(to_gt(dev_priv), GEN8_L3SQCREG1, val);
-
- /*
- * Wait at least 100 clocks before re-enabling clock gating.
- * See the definition of L3SQCREG1 in BSpec.
- */
- intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
- udelay(1);
- intel_gt_mcr_multicast_write(to_gt(dev_priv), GEN8_MISCCPCTL, misccpctl);
-}
-
-static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* Wa_1409120013:icl,ehl */
- intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
- /*Wa_14010594013:icl, ehl */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
- 0, ICL_DELAY_PMRSP);
-}
-
-static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* Wa_1409120013 */
- if (DISPLAY_VER(dev_priv) == 12)
- intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
- /* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS);
-
- /* Wa_14013723622:tgl,rkl,dg1,adl-s */
- if (DISPLAY_VER(dev_priv) == 12)
- intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
- CLKREQ_POLICY_MEM_UP_OVRD, 0);
-}
-
-static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen12lp_init_clock_gating(dev_priv);
-
- /* Wa_22011091694:adlp */
- intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
-
- /* Bspec/49189 Initialize Sequence */
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
-}
-
-static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen12lp_init_clock_gating(dev_priv);
-
- /* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS);
-}
-
-static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* Wa_22010146351:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-}
-
-static void dg2_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_22010954014:dg2 */
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
- SGSI_SIDECLK_DIS);
-
- /*
- * Wa_14010733611:dg2_g10
- * Wa_22010146351:dg2_g10
- */
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
- SGR_DIS | SGGI_DIS);
-}
-
-static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* Wa_14012385139:pvc */
- if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-
- /* Wa_22010954014:pvc */
- if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
-}
-
-static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PCH_CNP(dev_priv))
- return;
-
- /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, 0, CNP_PWM_CGE_GATING_DISABLE);
-}
-
-static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- cnp_init_clock_gating(dev_priv);
- gen9_init_clock_gating(dev_priv);
-
- /* WAC6entrylatency:cfl */
- intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
-
- /*
- * WaFbcTurnOffFbcWatermark:cfl
- * Display WA #0562: cfl
- */
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:cfl
- * Display WA #0873: cfl
- */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-}
-
-static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen9_init_clock_gating(dev_priv);
-
- /* WAC6entrylatency:kbl */
- intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
-
- /* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6,
- 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableGamClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1,
- 0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * WaFbcTurnOffFbcWatermark:kbl
- * Display WA #0562: kbl
- */
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:kbl
- * Display WA #0873: kbl
- */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-}
-
-static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen9_init_clock_gating(dev_priv);
-
- /* WaDisableDopClockGating:skl */
- intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_ENABLE, 0);
-
- /* WAC6entrylatency:skl */
- intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
-
- /*
- * WaFbcTurnOffFbcWatermark:skl
- * Display WA #0562: skl
- */
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:skl
- * Display WA #0873: skl
- */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:skl
- * Display WA #0883: skl
- */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
-}
-
-static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS);
-
- /* WaSwitchSolVfFArbitrationPriority:bdw */
- intel_uncore_rmw(&dev_priv->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL);
-
- /* WaPsrDPAMaskVBlankInSRD:bdw */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, DPA_MASK_VBLANK_SRD);
-
- for_each_pipe(dev_priv, pipe) {
- /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
- 0, BDW_DPRS_MASK_VBLANK_SRD);
- }
-
- /* WaVSRefCountFullforceMissDisable:bdw */
- /* WaDSRefCountFullforceMissDisable:bdw */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
- GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0);
-
- intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
- /* WaDisableSDEUnitClockGating:bdw */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /* WaProgramL3SqcReg1Default:bdw */
- gen8_set_l3sqc_credits(dev_priv, 30, 2);
-
- /* WaKVMNotificationOnConfigChange:bdw */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR2_1,
- 0, KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
-
- lpt_init_clock_gating(dev_priv);
-
- /* WaDisableDopClockGating:bdw
- *
- * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
- * clock gating.
- */
- intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1, 0, GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS);
-
- /* This is required by WaCatErrorRejectionIssue:hsw */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- /* WaSwitchSolVfFArbitrationPriority:hsw */
- intel_uncore_rmw(&dev_priv->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL);
-
- lpt_init_clock_gating(dev_priv);
-}
-
-static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
-
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS);
-
- /* WaDisableBackToBackFlipFix:ivb */
- intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- if (IS_IVB_GT1(dev_priv))
- intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- else {
- /* must write both registers */
- intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- }
-
- /*
- * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating:ivb workaround.
- */
- intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- /* This is required by WaCatErrorRejectionIssue:ivb */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- g4x_disable_trickle_feed(dev_priv);
-
- intel_uncore_rmw(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK,
- GEN6_MBC_SNPCR_MED);
-
- if (!HAS_PCH_NOP(dev_priv))
- cpt_init_clock_gating(dev_priv);
-
- gen6_check_mch_setup(dev_priv);
-}
-
-static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* WaDisableBackToBackFlipFix:vlv */
- intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- /* WaDisableDopClockGating:vlv */
- intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
- /* This is required by WaCatErrorRejectionIssue:vlv */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- /*
- * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating:vlv workaround.
- */
- intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableL3Bank2xClockGate:vlv
- * Disabling L3 clock gating- MMIO 940c[25] = 1
- * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_UCGCTL4, 0, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
-
- /*
- * WaDisableVLVClockGating_VBIIssue:vlv
- * Disable clock gating on th GCFG unit to prevent a delay
- * in the reporting of vblank events.
- */
- intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
-}
-
-static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- /* WaVSRefCountFullforceMissDisable:chv */
- /* WaDSRefCountFullforceMissDisable:chv */
- intel_uncore_rmw(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
- GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0);
-
- /* WaDisableSemaphoreAndSyncFlipWait:chv */
- intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
- /* WaDisableCSUnitClockGating:chv */
- intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableSDEUnitClockGating:chv */
- intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * WaProgramL3SqcReg1Default:chv
- * See gfxspecs/Related Documents/Performance Guide/
- * LSQC Setting Recommendations.
- */
- gen8_set_l3sqc_credits(dev_priv, 38, 2);
-}
-
-static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 dspclk_gate;
-
- intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
- intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev_priv))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
-
- g4x_disable_trickle_feed(dev_priv);
-}
-
-static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
- intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
- intel_uncore_write16(uncore, DEUC, 0);
- intel_uncore_write(uncore,
- MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
-
- if (IS_PINEVIEW(dev_priv))
- intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
-
- /* IIR "flip pending" means done if this bit is set */
- intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
- /* interrupts should cause a wake up from C3 */
- intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
-
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
-
- intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-
- /* interrupts should cause a wake up from C3 */
- intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
- _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
-
- intel_uncore_write(&dev_priv->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
-
- /*
- * Have FBC ignore 3D activity since we use software
- * render tracking, and otherwise a pure 3D workload
- * (even if it just renders a single frame and then does
- * abosultely nothing) would not allow FBC to recompress
- * until a 2D blit occurs.
- */
- intel_uncore_write(&dev_priv->uncore, SCPD0,
- _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
-}
-
-static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- intel_uncore_write(&dev_priv->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
- _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
-}
-
-void intel_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
-}
-
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT(dev_priv))
- lpt_suspend_hw(dev_priv);
-}
-
-static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- drm_dbg_kms(&dev_priv->drm,
- "No clock gating settings or workarounds applied.\n");
-}
-
-#define CG_FUNCS(platform) \
-static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
- .init_clock_gating = platform##_init_clock_gating, \
-}
-
-CG_FUNCS(pvc);
-CG_FUNCS(dg2);
-CG_FUNCS(xehpsdv);
-CG_FUNCS(adlp);
-CG_FUNCS(dg1);
-CG_FUNCS(gen12lp);
-CG_FUNCS(icl);
-CG_FUNCS(cfl);
-CG_FUNCS(skl);
-CG_FUNCS(kbl);
-CG_FUNCS(bxt);
-CG_FUNCS(glk);
-CG_FUNCS(bdw);
-CG_FUNCS(chv);
-CG_FUNCS(hsw);
-CG_FUNCS(ivb);
-CG_FUNCS(vlv);
-CG_FUNCS(gen6);
-CG_FUNCS(ilk);
-CG_FUNCS(g4x);
-CG_FUNCS(i965gm);
-CG_FUNCS(i965g);
-CG_FUNCS(gen3);
-CG_FUNCS(i85x);
-CG_FUNCS(i830);
-CG_FUNCS(nop);
-#undef CG_FUNCS
-
-/**
- * intel_init_clock_gating_hooks - setup the clock gating hooks
- * @dev_priv: device private
- *
- * Setup the hooks that configure which clocks of a given platform can be
- * gated and also apply various GT and display specific workarounds for these
- * platforms. Note that some GT specific workarounds are applied separately
- * when GPU contexts or batchbuffers start their execution.
- */
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
-{
- if (IS_PONTEVECCHIO(dev_priv))
- dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
- else if (IS_XEHPSDV(dev_priv))
- dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
- else if (IS_ALDERLAKE_P(dev_priv))
- dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
- else if (IS_DG1(dev_priv))
- dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 12)
- dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 11)
- dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
- else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
- dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
- else if (IS_SKYLAKE(dev_priv))
- dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
- else if (IS_KABYLAKE(dev_priv))
- dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
- else if (IS_BROXTON(dev_priv))
- dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
- else if (IS_GEMINILAKE(dev_priv))
- dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
- else if (IS_BROADWELL(dev_priv))
- dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
- else if (IS_HASWELL(dev_priv))
- dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
- else if (IS_IVYBRIDGE(dev_priv))
- dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 6)
- dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 5)
- dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
- else if (IS_I965GM(dev_priv))
- dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
- else if (IS_I965G(dev_priv))
- dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 3)
- dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
- else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
- dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
- else if (GRAPHICS_VER(dev_priv) == 2)
- dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
- else {
- MISSING_CASE(INTEL_DEVID(dev_priv));
- dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
- }
-}
-
static const struct intel_wm_funcs ilk_wm_funcs = {
.compute_pipe_wm = ilk_compute_pipe_wm,
.compute_intermediate_wm = ilk_compute_intermediate_wm,
.initial_watermarks = ilk_initial_watermarks,
.optimize_watermarks = ilk_optimize_watermarks,
+ .get_hw_state = ilk_wm_get_hw_state,
};
static const struct intel_wm_funcs vlv_wm_funcs = {
@@ -4888,6 +3971,7 @@ static const struct intel_wm_funcs vlv_wm_funcs = {
.initial_watermarks = vlv_initial_watermarks,
.optimize_watermarks = vlv_optimize_watermarks,
.atomic_update_watermarks = vlv_atomic_update_fifo,
+ .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
};
static const struct intel_wm_funcs g4x_wm_funcs = {
@@ -4895,6 +3979,7 @@ static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_intermediate_wm = g4x_compute_intermediate_wm,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
+ .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
};
static const struct intel_wm_funcs pnv_wm_funcs = {
@@ -4916,35 +4001,12 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_init(dev_priv);
- return;
- }
-
- /* For cxsr */
- if (IS_PINEVIEW(dev_priv))
- pnv_get_mem_freq(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 5)
- ilk_get_mem_freq(dev_priv);
-
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
-
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
- dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
- dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.funcs.wm = &nop_funcs;
- }
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
dev_priv->display.funcs.wm = &vlv_wm_funcs;
@@ -4965,8 +4027,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.funcs.wm = &nop_funcs;
- } else
+ } else {
dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ }
} else if (DISPLAY_VER(dev_priv) == 4) {
dev_priv->display.funcs.wm = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
@@ -4982,9 +4045,3 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.wm = &nop_funcs;
}
}
-
-void intel_pm_setup(struct drm_i915_private *dev_priv)
-{
- dev_priv->runtime_pm.suspended = false;
- atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
-}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
new file mode 100644
index 000000000000..a7875cbcd05a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I9XX_WM_H__
+#define __I9XX_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+int ilk_wm_max_level(const struct drm_i915_private *i915);
+bool ilk_disable_lp_wm(struct drm_i915_private *i915);
+void ilk_wm_sanitize(struct drm_i915_private *i915);
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
+void i9xx_wm_init(struct drm_i915_private *i915);
+
+#endif /* __I9XX_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index d16b30a2dded..c9aeba0ecf91 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp, mode_flags;
+ u32 mode_flags;
enum port port;
mode_flags = crtc_state->mode_flags;
@@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
else
return;
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp |= DSI_FRAME_UPDATE_REQUEST;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
@@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
- u32 tmp;
+ u32 tmp, mask, val;
int lane;
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
+ mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
+ val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
+ RTERM_SELECT(0x6);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
-
+ mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK;
+ val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
+ RCOMP_SCALAR(0x98);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
-
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
-
- for (lane = 0; lane <= 3; lane++) {
- /* Bspec: must not use GRP register for write */
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK;
+ val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
+ CURSOR_COEFF(0x3f);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+
+ /* Bspec: must not use GRP register for write */
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ mask, val);
}
}
@@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
- dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
- dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp |= COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ 0, COMBO_PHY_MODE_DSI);
get_dsi_io_power_domains(dev_priv, intel_dsi);
}
@@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~LOADGEN_SELECT;
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
- for (lane = 0; lane <= 3; lane++) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~LOADGEN_SELECT;
- if (lane != 2)
- tmp |= LOADGEN_SELECT;
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~FRC_LATENCY_OPTIM_MASK;
- tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
+ FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
@@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~LATENCY_OPTIM_MASK;
- tmp |= LATENCY_OPTIM_VAL(0);
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
- tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
tmp = intel_de_read(dev_priv,
ICL_PORT_PCS_DW1_LN(0, phy));
@@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
}
/*
@@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- tmp |= SUS_CLOCK_CONFIG;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
}
/* Program swing and de-emphasis */
@@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
}
}
@@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
enum phy phy;
/* Program T-INIT master registers */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
- tmp &= ~DSI_T_INIT_MASTER_MASK;
- tmp |= intel_dsi->init_count;
- intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv,
- DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DPHY_TA_TIMING_PARAM(port),
- tmp);
+ intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
/* shadow register inside display core */
- tmp = intel_de_read(dev_priv,
- DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
}
}
}
if (IS_JSL_EHL(dev_priv)) {
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
- tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
+ 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
@@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp |= PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ 0, PORT_SYNC_MODE_ENABLE);
}
/* configure stream splitting */
@@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans),
+ HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans),
+ HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
}
}
@@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- intel_de_write(dev_priv, VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans),
+ VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
@@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans),
+ VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
}
}
@@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans),
vsync_shift);
}
}
- /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ /*
+ * program TRANS_VBLANK register, should be same as vtotal programmed
+ *
+ * FIXME get rid of these local hacks and do it right,
+ * this will not handle eg. delayed vblank correctly.
+ */
if (DISPLAY_VER(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans),
+ VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
}
}
}
@@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp |= PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(&dev_priv->drm,
"DSI transcoder not enabled\n");
}
@@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+ u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
/*
* escape clock count calculation:
@@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
- tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
- tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
+ HSTX_TIMEOUT_VALUE_MASK,
+ HSTX_TIMEOUT_VALUE(hs_tx_timeout));
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
- tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
- tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
+ LPRX_TIMEOUT_VALUE_MASK,
+ LPRX_TIMEOUT_VALUE(lp_rx_timeout));
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
- tmp &= ~TA_TIMEOUT_VALUE_MASK;
- tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
+ TA_TIMEOUT_VALUE_MASK,
+ TA_TIMEOUT_VALUE(ta_timeout));
}
}
@@ -1199,7 +1140,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
/* panel power on related mipi dsi vbt sequences */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
@@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp &= ~PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(&dev_priv->drm,
"DSI trancoder not disabled\n");
}
@@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
+ DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
}
/* put dsi link in ULPS */
@@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
- tmp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans),
+ TRANS_DDI_FUNC_ENABLE, 0);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp &= ~PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ PORT_SYNC_MODE_ENABLE, 0);
}
}
}
@@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
@@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
/* set mode to DDI */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp &= ~COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ COMBO_PHY_MODE_DSI, 0);
}
static void gen11_dsi_disable(struct intel_atomic_state *state,
@@ -1574,7 +1500,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Get the details on which TE should be enabled */
if (is_cmd_mode(intel_dsi))
@@ -1626,8 +1552,6 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (crtc_state->dsc.slice_count > 1)
crtc_state->dsc.dsc_split = true;
- vdsc_cfg->convert_rgb = true;
-
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
@@ -1754,8 +1678,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- ret = tmp & PIPECONF_ENABLE;
+ tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans));
+ ret = tmp & TRANSCONF_ENABLE;
}
out:
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2043,7 +1967,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+ encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
@@ -2054,7 +1979,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 6621aa245caf..a9a3f3715279 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -41,6 +41,7 @@
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
+#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -310,11 +311,11 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
kfree(crtc_state);
}
-static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
- const char *name, int idx,
- struct intel_plane_state *plane_state,
- int *scaler_id)
+static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int j;
@@ -334,7 +335,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
- return;
+ return -EINVAL;
/* set scaler mode */
if (plane_state && plane_state->hw.fb &&
@@ -375,9 +376,71 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
+ /*
+ * FIXME: we should also check the scaler factors for pfit, so
+ * this shouldn't be tied directly to planes.
+ */
+ if (plane_state && plane_state->hw.fb) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int hscale, vscale, max_vscale, max_hscale;
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ max_hscale = 0x30000 - 1;
+ if (*scaler_id == 0)
+ max_vscale = 0x30000 - 1;
+ else
+ max_vscale = 0x10000;
+
+ } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ max_hscale = 0x30000 - 1;
+ max_vscale = 0x30000 - 1;
+ } else {
+ max_hscale = 0x20000 - 1;
+ max_vscale = 0x20000 - 1;
+ }
+
+ /*
+ * FIXME: We should change the if-else block above to
+ * support HQ vs dynamic scaler properly.
+ */
+
+ /* Check if required scaling is within limits */
+ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
+ vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Scaler %d doesn't support required plane scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", src, true);
+ drm_rect_debug_print("dst: ", dst, false);
+
+ return -EINVAL;
+ }
+ }
+
drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
+
+ return 0;
}
/**
@@ -437,7 +500,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
int *scaler_id;
const char *name;
- int idx;
+ int idx, ret;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
@@ -494,9 +557,11 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
scaler_id = &plane_state->scaler_id;
}
- intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
+ ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 10e1fc9d0698..f33164b10292 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -32,17 +32,17 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include "gt/intel_rps.h"
-
+#include "i915_config.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_watermark.h"
@@ -362,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->async_flip_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -581,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
intel_plane_is_scaled(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
new_crtc_state->do_async_flip = true;
+ new_crtc_state->async_flip_planes |= BIT(plane->id);
+ }
return 0;
}
@@ -937,62 +940,62 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
*/
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
- if (!dma_fence_is_i915(fence))
- return;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ }
- if (drm_crtc_vblank_get(crtc))
- return;
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
+ if (src_x % hsub || src_w % hsub) {
+ drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, str_yes_no(rotated));
+ return -EINVAL;
}
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
+ if (src_y % vsub || src_h % vsub) {
+ drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, str_yes_no(rotated));
+ return -EINVAL;
+ }
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+ return 0;
}
/**
@@ -1025,7 +1028,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
int ret;
if (old_obj) {
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state,
to_intel_crtc(old_plane_state->hw.crtc));
@@ -1040,7 +1043,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (intel_crtc_needs_modeset(crtc_state)) {
+ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv,
false, 0,
@@ -1085,13 +1088,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
dma_resv_iter_begin(&cursor, obj->base.resv,
DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
}
dma_resv_iter_end(&cursor);
} else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -1102,10 +1105,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
- state->rps_interactive = true;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, true);
return 0;
@@ -1136,10 +1136,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (!obj)
return;
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
- state->rps_interactive = false;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, false);
/* Should only be called after a successful intel_prepare_plane_fb()! */
intel_plane_unpin_fb(old_plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 74b6d3b169a7..191dad0efc8e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -62,6 +62,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
bool can_position);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
void intel_plane_helper_add(struct intel_plane *plane);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 98c3322b4549..3d5a9bbc6fde 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -71,6 +71,8 @@ struct intel_audio_funcs {
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+ void (*audio_codec_get_config)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
};
/* DP N/M table */
@@ -314,6 +316,27 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
+static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 *eld = (u32 *)crtc_state->eld;
+ int eld_buffer_size, len, i;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ if ((tmp & G4X_ELD_VALID) == 0)
+ return;
+
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+
+ eld_buffer_size = g4x_eld_buffer_size(i915);
+ len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
+
+ for (i = 0; i < len; i++)
+ eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+}
+
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -335,8 +358,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
+ const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
@@ -345,7 +367,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
eld_buffer_size = g4x_eld_buffer_size(i915);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
+ len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
@@ -459,17 +481,6 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
-/* ELD buffer size in dwords */
-static int hsw_eld_buffer_size(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- u32 tmp;
-
- tmp = intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -570,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
@@ -581,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val = intel_de_read(i915, AUD_CONFIG_BE);
if (DISPLAY_VER(i915) == 11)
- val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
else if (DISPLAY_VER(i915) >= 12)
- val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+ val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
/* Get hblank early enable value required */
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder);
hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
if (hblank_early_prog < 32)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32);
else if (hblank_early_prog < 64)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64);
else if (hblank_early_prog < 96)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96);
else
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128);
/* Get samples room value required */
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder);
samples_room = calc_samples_room(crtc_state);
if (samples_room < 3)
- val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room);
else /* Program 0 i.e "All Samples available in buffer" */
- val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
intel_de_write(i915, AUD_CONFIG_BE, val);
@@ -618,10 +628,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const u32 *eld = (const u32 *)connector->eld;
- int eld_buffer_size, len, i;
mutex_lock(&i915->display.audio.mutex);
@@ -639,25 +646,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder),
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = hsw_eld_buffer_size(i915, cpu_transcoder);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)) &
- IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
- 0, AUDIO_ELD_VALID(cpu_transcoder));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -665,47 +657,33 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&i915->display.audio.mutex);
}
-struct ilk_audio_regs {
+struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ilk_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct drm_i915_private *i915,
enum pipe pipe,
- struct ilk_audio_regs *regs)
+ struct ibx_audio_regs *regs)
{
- if (HAS_PCH_IBX(i915)) {
- regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- regs->aud_config = IBX_AUD_CFG(pipe);
- regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
+ } else if (HAS_PCH_CPT(i915)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ } else if (HAS_PCH_IBX(i915)) {
+ regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = IBX_AUD_CFG(pipe);
+ regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
}
}
-/* ELD buffer size in dwords */
-static int ilk_eld_buffer_size(struct drm_i915_private *i915,
- enum pipe pipe)
-{
- struct ilk_audio_regs regs;
- u32 tmp;
-
- ilk_audio_regs_init(i915, pipe, &regs);
-
- tmp = intel_de_read(i915, regs.aud_cntl_st);
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
-static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -713,12 +691,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -741,25 +719,22 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
intel_crtc_wait_for_next_vblank(crtc);
}
-static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- int eld_buffer_size, len, i;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -767,24 +742,10 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, regs.aud_cntl_st,
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = ilk_eld_buffer_size(i915, pipe);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, regs.aud_cntl_st) & IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
- 0, IBX_ELD_VALID(port));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
intel_de_rmw(i915, regs.aud_config,
@@ -798,6 +759,41 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&i915->display.audio.mutex);
}
+void intel_audio_sdp_split_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder trans = crtc_state->cpu_transcoder;
+
+ if (HAS_DP20(i915))
+ intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+}
+
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->eld[0]) {
+ drm_dbg_kms(&i915->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return false;
+ }
+
+ BUILD_BUG_ON(sizeof(crtc_state->eld) != sizeof(connector->eld));
+ memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
+
+ crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ return true;
+}
+
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @encoder: encoder on which to enable audio
@@ -814,27 +810,19 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
- connector->base.id, connector->name,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(connector->eld));
-
- /* FIXME precompute the ELD in .compute_config() */
- if (!connector->eld[0])
- drm_dbg_kms(&i915->drm,
- "Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ crtc->base.base.id, crtc->base.name,
+ drm_eld_size(crtc_state->eld));
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_enable(encoder,
@@ -842,22 +830,25 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = connector;
- /* referred in audio callbacks */
- i915->display.audio.encoder_map[pipe] = encoder;
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ audio_state->encoder = encoder;
+ BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
+ memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, connector->eld,
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -878,16 +869,18 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_connector *connector = old_conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
- connector->base.id, connector->name,
- encoder->base.base.id, encoder->base.name, pipe_name(pipe));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name);
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_disable(encoder,
@@ -895,35 +888,71 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
old_conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = NULL;
- i915->display.audio.encoder_map[pipe] = NULL;
+
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ audio_state->encoder = NULL;
+ memset(audio_state->eld, 0, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
+}
+
+static void intel_acomp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
+
+ mutex_lock(&i915->display.audio.mutex);
+
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ if (audio_state->encoder)
+ memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
+
+ mutex_unlock(&i915->display.audio.mutex);
+}
+
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (i915->display.funcs.audio)
+ i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
.audio_codec_enable = g4x_audio_codec_enable,
.audio_codec_disable = g4x_audio_codec_disable,
+ .audio_codec_get_config = g4x_audio_codec_get_config,
};
-static const struct intel_audio_funcs ilk_audio_funcs = {
- .audio_codec_enable = ilk_audio_codec_enable,
- .audio_codec_disable = ilk_audio_codec_disable,
+static const struct intel_audio_funcs ibx_audio_funcs = {
+ .audio_codec_enable = ibx_audio_codec_enable,
+ .audio_codec_disable = ibx_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
static const struct intel_audio_funcs hsw_audio_funcs = {
.audio_codec_enable = hsw_audio_codec_enable,
.audio_codec_disable = hsw_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
/**
@@ -934,12 +963,11 @@ void intel_audio_hooks_init(struct drm_i915_private *i915)
{
if (IS_G4X(i915))
i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
+ HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ i915->display.funcs.audio = &ibx_audio_funcs;
else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
i915->display.funcs.audio = &hsw_audio_funcs;
- else if (HAS_PCH_SPLIT(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -955,11 +983,7 @@ void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
{
- if (refclk == 24000)
- aud_ts->m = 12;
- else
- aud_ts->m = 15;
-
+ aud_ts->m = 60;
aud_ts->n = cdclk * aud_ts->m / 24000;
}
@@ -1117,59 +1141,59 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
}
/*
- * get the intel_encoder according to the parameter port and pipe
- * intel_encoder is saved by the index of pipe
- * MST & (pipe >= 0): return the audio.encoder_map[pipe],
+ * get the intel audio state according to the parameter port and cpu_transcoder
+ * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder],
* when port is matched
- * MST & (pipe < 0): this is invalid
- * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * MST & (cpu_transcoder < 0): this is invalid
+ * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry)
* will get the right intel_encoder with port matched
- * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
- int port, int pipe)
+static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+ int port, int cpu_transcoder)
{
/* MST */
- if (pipe >= 0) {
+ if (cpu_transcoder >= 0) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
if (drm_WARN_ON(&i915->drm,
- pipe >= ARRAY_SIZE(i915->display.audio.encoder_map)))
+ cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
return NULL;
- encoder = i915->display.audio.encoder_map[pipe];
- /*
- * when bootup, audio driver may not know it is
- * MST or not. So it will poll all the port & pipe
- * combinations
- */
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+ encoder = audio_state->encoder;
+
if (encoder && encoder->port == port &&
encoder->type == INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
/* Non-MST */
- if (pipe > 0)
+ if (cpu_transcoder > 0)
return NULL;
- for_each_pipe(i915, pipe) {
+ for_each_cpu_transcoder(i915, cpu_transcoder) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- encoder = i915->display.audio.encoder_map[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+ encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
encoder->type != INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
return NULL;
}
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int pipe, int rate)
+ int cpu_transcoder, int rate)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct i915_audio_component *acomp = i915->display.audio.component;
+ const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1181,20 +1205,22 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&i915->display.audio.mutex);
- /* 1. get the pipe */
- encoder = get_saved_enc(i915, port, pipe);
- if (!encoder || !encoder->base.crtc) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+ encoder = audio_state->encoder;
+
+ /* FIXME stop using the legacy crtc pointer */
crtc = to_intel_crtc(encoder->base.crtc);
- /* port must be valid now, otherwise the pipe will be invalid */
+ /* port must be valid now, otherwise the cpu_transcoder will be invalid */
acomp->aud_sample_rate[port] = rate;
+ /* FIXME get rid of the crtc->config stuff */
hsw_audio_config_update(encoder, crtc->config);
unlock:
@@ -1204,28 +1230,26 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- int pipe, bool *enabled,
+ int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
- struct intel_encoder *intel_encoder;
- const u8 *eld;
- int ret = -EINVAL;
+ const struct intel_audio_state *audio_state;
+ int ret = 0;
mutex_lock(&i915->display.audio.mutex);
- intel_encoder = get_saved_enc(i915, port, pipe);
- if (!intel_encoder) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
mutex_unlock(&i915->display.audio.mutex);
- return ret;
+ return -EINVAL;
}
- ret = 0;
- *enabled = intel_encoder->audio_connector != NULL;
+ *enabled = audio_state->encoder != NULL;
if (*enabled) {
- eld = intel_encoder->audio_connector->eld;
+ const u8 *eld = audio_state->eld;
+
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 63b22131dc45..07d034a981e9 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -6,21 +6,30 @@
#ifndef __INTEL_AUDIO_H__
#define __INTEL_AUDIO_H__
+#include <linux/types.h>
+
struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
+void intel_audio_sdp_split_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 71af88a70461..2e8f17c04522 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
- if (dev_priv->params.invert_brightness < 0)
+ if (i915->params.invert_brightness < 0)
return val;
- if (dev_priv->params.invert_brightness > 0 ||
- intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
+ if (i915->params.invert_brightness > 0 ||
+ intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -105,16 +105,17 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
val = scale(val, panel->backlight.min, panel->backlight.max,
@@ -125,14 +126,14 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
+ if (i915->params.invert_brightness > 0 ||
+ (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -141,32 +142,32 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val;
- val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(dev_priv) < 4)
+ val = intel_de_read(i915, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(i915) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
+ pci_read_config_byte(to_pci_dev(i915->drm.dev), LBPC, &lbpc);
val *= lbpc;
}
@@ -175,21 +176,20 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- return intel_de_read(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
@@ -204,69 +204,69 @@ static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe un
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ u32 val;
- u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
+ val = intel_de_read(i915, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
+ pci_write_config_byte(to_pci_dev(i915->drm.dev), LBPC, lbpc);
}
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
- intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(i915, BLC_PWM_CTL) & ~mask;
+ intel_de_write(i915, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -284,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
}
@@ -296,7 +297,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -309,9 +310,9 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -325,13 +326,13 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, level);
@@ -344,31 +345,26 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "cpu backlight was enabled, disabling\n");
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- tmp & ~BLM_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ connector->base.base.id, connector->base.name);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -378,63 +374,49 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
- u32 tmp;
+ struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
- }
+ if (panel->backlight.controller == 1)
+ intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
}
static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -451,7 +433,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
@@ -463,68 +445,61 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ connector->base.base.id, connector->base.name);
return;
}
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
+ u32 pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(dev_priv)) {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
- }
+ if (HAS_PCH_LPT(i915))
+ intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ LPT_PWM_GRANULARITY : 0);
+ else
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(i915))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
@@ -534,61 +509,63 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(i915, BLC_PWM_CPU_CTL2);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- intel_de_write(dev_priv, BLC_PWM_CTL, 0);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
+ intel_de_write(i915, BLC_PWM_CTL, 0);
}
freq = panel->backlight.pwm_level_max;
@@ -598,11 +575,11 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = freq << 17;
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+ if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL);
+ intel_de_write(i915, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(i915, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -612,24 +589,25 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (DISPLAY_VER(dev_priv) == 2)
- intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ if (DISPLAY_VER(i915) == 2)
+ intel_de_write(i915, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.pwm_level_max;
@@ -637,16 +615,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_write(i915, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(i915, BLC_PWM_CTL2);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_backlight_set_pwm_level(conn_state, level);
}
@@ -655,20 +633,21 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -676,50 +655,47 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val = intel_de_read(i915, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "util pin already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ intel_de_write(i915, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- intel_de_write(dev_priv, UTIL_PIN_CTL,
+ intel_de_write(i915, UTIL_PIN_CTL,
val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -728,11 +704,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -740,22 +714,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -764,11 +735,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -810,37 +779,37 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&i915->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
- drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -859,16 +828,16 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -876,18 +845,19 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- struct drm_device *dev = connector->base.dev;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
- bd->props.brightness, bd->props.max_brightness);
+ drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+
+ drm_dbg_kms(&i915->drm, "updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector->base.state, bd->props.brightness,
bd->props.max_brightness);
@@ -907,28 +877,28 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
bd->props.power = FB_BLANK_POWERDOWN;
}
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
return 0;
}
static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
u32 hw_level;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector,
hw_level, bd->props.max_brightness);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
}
return ret;
@@ -1038,9 +1008,9 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz);
}
@@ -1077,7 +1047,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1086,7 +1056,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(dev_priv))
+ if (HAS_PCH_LPT_H(i915))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1100,9 +1070,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1116,13 +1086,13 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int clock;
- if (IS_PINEVIEW(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ if (IS_PINEVIEW(i915))
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
else
- clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+ clock = KHz(i915->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1134,13 +1104,13 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int clock;
- if (IS_G4X(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ if (IS_G4X(i915))
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
else
- clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+ clock = KHz(i915->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1152,17 +1122,17 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int mul, clock;
- if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev_priv))
+ if ((intel_de_read(i915, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (IS_CHERRYVIEW(i915))
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
mul = 128;
}
@@ -1171,16 +1141,16 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT defined backlight frequency %u Hz\n",
pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"default backlight frequency %u Hz\n",
pwm_freq_hz);
}
@@ -1190,20 +1160,20 @@ static u16 get_vbt_pwm_freq(struct intel_connector *connector)
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"backlight frequency conversion not supported\n");
return 0;
}
pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"backlight frequency conversion failed\n");
return 0;
}
@@ -1216,11 +1186,11 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1231,7 +1201,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
if (min != connector->panel.vbt.backlight.min_brightness) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
connector->panel.vbt.backlight.min_brightness, min);
}
@@ -1242,24 +1212,24 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(dev_priv))
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ if (HAS_PCH_LPT(i915))
+ alt = intel_de_read(i915, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(i915, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1271,38 +1241,42 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
if (cpu_mode) {
val = pch_get_backlight(connector, unused);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, val);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ intel_de_write(i915, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ intel_de_write(i915, BLC_PWM_CPU_CTL2,
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1313,25 +1287,29 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
- if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (DISPLAY_VER(i915) == 2 || IS_I915GM(i915) || IS_I945GM(i915))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv))
+ if (IS_PINEVIEW(i915))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.pwm_level_max = ctl >> 17;
@@ -1355,20 +1333,24 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1384,22 +1366,26 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe));
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1412,31 +1398,35 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
+ connector->base.base.id, connector->base.name, pipe_name(pipe));
+
return 0;
}
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- pwm_ctl = intel_de_read(dev_priv,
+ pwm_ctl = intel_de_read(i915,
BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val = intel_de_read(i915, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1448,29 +1438,63 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
+static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
+{
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ return 1;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ return 2;
+
+ return 1;
+}
+
+static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller)
+{
+ if (controller < 0 || controller >= cnp_num_backlight_controllers(i915))
+ return false;
+
+ if (controller == 1 &&
+ INTEL_PCH_TYPE(i915) >= PCH_ICP &&
+ INTEL_PCH_TYPE(i915) < PCH_MTP)
+ return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+
+ return true;
+}
+
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
- * controller. TODO: ICP has multiple controllers but we only use
- * controller 0 for now.
+ * controller. ICP+ can have two controllers, depending on pin muxing.
*/
- panel->backlight.controller = 0;
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
+ if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+ panel->backlight.controller = 0;
+ }
- pwm_ctl = intel_de_read(dev_priv,
+ pwm_ctl = intel_de_read(i915,
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1482,30 +1506,34 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
static int ext_pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_pmic_backlight");
desc = "PMIC";
} else {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_soc_backlight");
desc = "SoC";
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
- desc);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1522,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
} else {
@@ -1531,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
- desc);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
+ connector->base.base.id, connector->base.name, desc);
+
return 0;
}
@@ -1575,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s
static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ int ret;
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0)
return ret;
@@ -1594,57 +1626,59 @@ void intel_backlight_update(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT, but present per quirk\n");
+ if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
+ connector->base.base.id, connector->base.name);
} else {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT\n");
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] no backlight present per VBT\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
+ if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to setup backlight for connector %s\n",
- connector->base.name);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] failed to setup backlight\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
panel->backlight.present = true;
- drm_dbg_kms(&dev_priv->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->base.name,
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
+ connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
@@ -1753,30 +1787,30 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
{
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- if (HAS_PCH_LPT(dev_priv))
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) {
+ if (HAS_PCH_LPT(i915))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(i915)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
+ } else if (DISPLAY_VER(i915) == 4) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
@@ -1786,7 +1820,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
index 344eb8096bd2..d0cdfd631d75 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -8,23 +8,20 @@
#include "intel_display_reg_defs.h"
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
-#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
- _VLV_BLC_PWM_CTL2_B)
+#define _VLV_BLC_PWM_CTL2_A (VLV_DISPLAY_BASE + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (VLV_DISPLAY_BASE + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, _VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
-#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
- _VLV_BLC_PWM_CTL_B)
+#define _VLV_BLC_PWM_CTL_A (VLV_DISPLAY_BASE + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (VLV_DISPLAY_BASE + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, _VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
-#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
- _VLV_BLC_HIST_CTL_B)
+#define _VLV_BLC_HIST_CTL_A (VLV_DISPLAY_BASE + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (VLV_DISPLAY_BASE + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, _VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2 _MMIO(0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -47,7 +44,7 @@
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define BLC_PWM_CTL _MMIO(0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -69,7 +66,7 @@
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLC_HIST_CTL _MMIO(0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 572a4e3769f3..75e69dffc5e9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,16 +25,15 @@
*
*/
-#include <drm/drm_edid.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
-
-#include "display/intel_display.h"
-#include "display/intel_display_types.h"
-#include "display/intel_gmbus.h"
+#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -142,8 +141,8 @@ struct bdb_block_entry {
};
static const void *
-find_section(struct drm_i915_private *i915,
- enum bdb_block_id section_id)
+bdb_find_section(struct drm_i915_private *i915,
+ enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
@@ -202,7 +201,7 @@ static size_t lfp_data_min_size(struct drm_i915_private *i915)
const struct bdb_lvds_lfp_data_ptrs *ptrs;
size_t size;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@@ -620,18 +619,18 @@ static void dump_pnp_id(struct drm_i915_private *i915,
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
return intel_opregion_get_panel_type(i915);
}
static int vbt_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_options *lvds_options;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return -1;
@@ -652,12 +651,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
static int pnpid_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
const struct lvds_pnp_id *edid_id;
struct lvds_pnp_id edid_id_nodate;
+ const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
int i, best = -1;
if (!edid)
@@ -671,11 +671,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
dump_pnp_id(i915, edid_id, "EDID");
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return -1;
@@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
static int fallback_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
- return 0;
+ return use_fallback ? 0 : -1;
}
enum panel_type {
@@ -715,13 +715,13 @@ enum panel_type {
static int get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
struct {
const char *name;
int (*get_panel_type)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid);
+ const struct drm_edid *drm_edid, bool use_fallback);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
+ drm_edid, use_fallback);
drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
@@ -790,7 +791,7 @@ parse_panel_options(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
int drrs_mode;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -880,11 +881,11 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return;
@@ -931,7 +932,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
if (i915->display.vbt.version < 229)
return;
- generic_dtd = find_section(i915, BDB_GENERIC_DTD);
+ generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@@ -1010,7 +1011,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
- backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
+ backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -1032,6 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
size_t exp_size;
@@ -1082,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
+ if (i915->display.vbt.version >= 239)
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout =
+ DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
+ else
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
+
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
@@ -1111,14 +1119,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
+ dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS);
if (!dtds)
return;
@@ -1154,7 +1162,7 @@ parse_general_features(struct drm_i915_private *i915)
{
const struct bdb_general_features *general;
- general = find_section(i915, BDB_GENERAL_FEATURES);
+ general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
if (!general)
return;
@@ -1200,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
static void
parse_sdvo_device_mapping(struct drm_i915_private *i915)
{
- struct sdvo_device_mapping *mapping;
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
int count = 0;
/*
@@ -1215,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ struct sdvo_device_mapping *mapping;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -1278,7 +1285,7 @@ parse_driver_features(struct drm_i915_private *i915)
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1315,7 +1322,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1355,7 +1362,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
if (i915->display.vbt.version < 228)
return;
- power = find_section(i915, BDB_LFP_POWER);
+ power = bdb_find_section(i915, BDB_LFP_POWER);
if (!power)
return;
@@ -1395,7 +1402,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
- edp = find_section(i915, BDB_EDP);
+ edp = bdb_find_section(i915, BDB_EDP);
if (!edp)
return;
@@ -1525,7 +1532,7 @@ parse_psr(struct drm_i915_private *i915,
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
- psr = find_section(i915, BDB_PSR);
+ psr = bdb_find_section(i915, BDB_PSR);
if (!psr) {
drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
@@ -1686,7 +1693,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Parse #52 for panel index used from panel_type already
* parsed
*/
- start = find_section(i915, BDB_MIPI_CONFIG);
+ start = bdb_find_section(i915, BDB_MIPI_CONFIG);
if (!start) {
drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
@@ -1998,7 +2005,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = find_section(i915, BDB_MIPI_SEQUENCE);
+ sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
if (!sequence) {
drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
@@ -2073,14 +2080,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
u16 block_size;
int index;
if (i915->display.vbt.version < 198)
return;
- params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
@@ -2098,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
@@ -2224,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2290,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2466,6 +2472,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -2504,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
-static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
@@ -2515,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
-static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 244)
return 0;
@@ -2569,14 +2591,27 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
}
-static bool
+bool
intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
{
return intel_bios_encoder_supports_dp(devdata) &&
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
-static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+static bool
+intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
+}
+
+bool
+intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+}
+
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
@@ -2584,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
return devdata->child.hdmi_level_shifter_value;
}
-static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
@@ -2626,7 +2661,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
{
struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
is_dvi = intel_bios_encoder_supports_dvi(devdata);
@@ -2634,44 +2669,45 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
is_crt = intel_bios_encoder_supports_crt(devdata);
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
is_edp = intel_bios_encoder_supports_edp(devdata);
+ is_dsi = intel_bios_encoder_supports_dsi(devdata);
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
drm_dbg_kms(&i915->drm,
- "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(i915) && child->lspcon,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
+ intel_bios_encoder_is_lspcon(devdata),
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
- max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
- dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
- hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
@@ -2693,6 +2729,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+ port = dsi_dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE)
return;
@@ -2754,7 +2792,7 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
- defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
+ defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
if (!defs) {
drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");
@@ -2784,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915)
expected_size = 37;
} else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->display.vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 250) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -3183,14 +3221,26 @@ out:
kfree(oprom_vbt);
}
-void intel_bios_init_panel(struct drm_i915_private *i915,
- struct intel_panel *panel,
- const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+static void intel_bios_init_panel(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid,
+ bool use_fallback)
{
- init_vbt_panel_defaults(panel);
+ /* already have it? */
+ if (panel->vbt.panel_type >= 0) {
+ drm_WARN_ON(&i915->drm, !use_fallback);
+ return;
+ }
- panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
+ panel->vbt.panel_type = get_panel_type(i915, devdata,
+ drm_edid, use_fallback);
+ if (panel->vbt.panel_type < 0) {
+ drm_WARN_ON(&i915->drm, use_fallback);
+ return;
+ }
+
+ init_vbt_panel_defaults(panel);
parse_panel_options(i915, panel);
parse_generic_dtd(i915, panel);
@@ -3205,6 +3255,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
parse_mipi_sequence(i915, panel);
}
+void intel_bios_init_panel_early(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata)
+{
+ intel_bios_init_panel(i915, panel, devdata, NULL, false);
+}
+
+void intel_bios_init_panel_late(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid)
+{
+ intel_bios_init_panel(i915, panel, devdata, drm_edid, true);
+}
+
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @i915: i915 device instance
@@ -3252,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel)
bool intel_bios_is_tv_present(struct drm_i915_private *i915)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (!i915->display.vbt.int_tv_support)
return false;
@@ -3261,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/*
* If the device type is not TV, continue.
@@ -3295,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (list_empty(&i915->display.vbt.display_devices))
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -3343,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
+
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->display.vbt.ports[port];
-}
+ if (!is_port_valid(i915, port))
+ return false;
-/**
- * intel_bios_is_port_edp - is the device in given port eDP
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if the device in %port is eDP.
- */
-bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
-{
- const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ const struct child_device_config *child = &devdata->child;
+
+ if (dvo_port_to_port(i915, child->dvo_port) == port)
+ return true;
+ }
- return devdata && intel_bios_encoder_supports_edp(devdata);
+ return false;
}
static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
@@ -3403,30 +3463,24 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- u8 dvo_port;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = child->dvo_port;
-
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3503,15 +3557,14 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
@@ -3525,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
return false;
}
-/**
- * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if HPD should be inverted for %port.
- */
-bool
-intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- if (drm_WARN_ON_ONCE(&i915->drm,
- !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
- return false;
-
- return devdata && devdata->child.hpd_invert;
-}
-
-/**
- * intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if LSPCON is present on this port
- */
-bool
-intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
-}
-
-/**
- * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if port requires lane reversal
- */
-bool
-intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port)
+static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return devdata && devdata->child.lane_reversal;
-}
-
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
- if (!devdata || !devdata->child.aux_channel) {
- aux_ch = (enum aux_ch)port;
-
- drm_dbg_kms(&i915->drm,
- "using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
/*
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
* map to DDI A,B,TC1,TC2 respectively.
@@ -3599,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (devdata->child.aux_channel) {
+ switch (aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -3660,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(devdata->child.aux_channel);
+ MISSING_CASE(aux_channel);
aux_ch = AUX_CH_A;
break;
}
- drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
return aux_ch;
}
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_max_tmds_clock(devdata);
-}
-
-/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+ if (!devdata || !devdata->child.aux_channel)
+ return AUX_CH_NONE;
- return _intel_bios_hdmi_level_shift(devdata);
+ return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3696,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
return translate_iboost(devdata->child.dp_iboost_level);
}
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3704,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
return translate_iboost(devdata->child.hdmi_iboost_level);
}
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_link_rate(devdata);
-}
-
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_lane_count(devdata);
-}
-
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3741,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.lane_reversal;
+}
+
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.hpd_invert;
+}
+
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e375405a7828..8a0730c9b48c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,12 +32,13 @@
#include <linux/types.h>
+struct drm_edid;
struct drm_i915_private;
-struct edid;
struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
struct intel_panel;
+enum aux_ch;
enum port;
enum intel_backlight_type {
@@ -232,10 +233,13 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_init_panel(struct drm_i915_private *dev_priv,
- struct intel_panel *panel,
- const struct intel_bios_encoder_data *devdata,
- const struct edid *edid);
+void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata);
+void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
@@ -245,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
@@ -269,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 1c236f02b380..202321ffbe2a 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 qgv_points = 0, psf_points = 0;
+
+ /*
+ * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+ * it with failure if we try masking any unadvertised points.
+ * So need to operate only with those returned from PCode.
+ */
+ if (num_qgv_points > 0)
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
+
+ if (num_psf_gv_points > 0)
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
+
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
+}
+
+static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+{
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ ICL_PCODE_REQ_QGV_PT_MASK);
+}
+
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask)
{
@@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return ret;
}
+ dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ I915_SAGV_ENABLED : I915_SAGV_DISABLED;
+
return 0;
}
@@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
-{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
- u16 qgv_points = 0, psf_points = 0;
-
- /*
- * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
- * it with failure if we try masking any unadvertised points.
- * So need to operate only with those returned from PCode.
- */
- if (num_qgv_points > 0)
- qgv_points = GENMASK(num_qgv_points - 1, 0);
-
- if (num_psf_gv_points > 0)
- psf_points = GENMASK(num_psf_gv_points - 1, 0);
-
- return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index cb7ee3a24a58..f20292143745 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,7 @@
#include <drm/drm_atomic.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b74e36d76013..084a483f9776 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1319,11 +1319,35 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals rplu_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{}
@@ -1346,6 +1370,16 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals mtl_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 },
+ {}
+};
+
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1717,44 +1751,108 @@ static void dg2_cdclk_squash_program(struct drm_i915_private *i915,
intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl);
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe)
+static bool cdclk_pll_is_unknown(unsigned int vco)
+{
+ /*
+ * Ensure driver does not take the crawl path for the
+ * case when the vco is set to ~0 in the
+ * sanitize path.
+ */
+ return vco == ~0;
+}
+
+static int cdclk_squash_divider(u16 waveform)
+{
+ return hweight16(waveform ?: 0xffff);
+}
+
+static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *old_cdclk_config,
+ const struct intel_cdclk_config *new_cdclk_config,
+ struct intel_cdclk_config *mid_cdclk_config)
+{
+ u16 old_waveform, new_waveform, mid_waveform;
+ int size = 16;
+ int div = 2;
+
+ /* Return if PLL is in an unknown state, force a complete disable and re-enable. */
+ if (cdclk_pll_is_unknown(old_cdclk_config->vco))
+ return false;
+
+ /* Return if both Squash and Crawl are not present */
+ if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ return false;
+
+ old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk);
+ new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk);
+
+ /* Return if Squash only or Crawl only is the desired action */
+ if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 ||
+ old_cdclk_config->vco == new_cdclk_config->vco ||
+ old_waveform == new_waveform)
+ return false;
+
+ *mid_cdclk_config = *new_cdclk_config;
+
+ /*
+ * Populate the mid_cdclk_config accordingly.
+ * - If moving to a higher cdclk, the desired action is squashing.
+ * The mid cdclk config should have the new (squash) waveform.
+ * - If moving to a lower cdclk, the desired action is crawling.
+ * The mid cdclk config should have the new vco.
+ */
+
+ if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) {
+ mid_cdclk_config->vco = old_cdclk_config->vco;
+ mid_waveform = new_waveform;
+ } else {
+ mid_cdclk_config->vco = new_cdclk_config->vco;
+ mid_waveform = old_waveform;
+ }
+
+ mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
+ mid_cdclk_config->vco, size * div);
+
+ /* make sure the mid clock came out sane */
+
+ drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk <
+ min(old_cdclk_config->cdclk, new_cdclk_config->cdclk));
+ drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk >
+ i915->display.cdclk.max_cdclk_freq);
+ drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) !=
+ mid_waveform);
+
+ return true;
+}
+
+static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
+{
+ return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) &&
+ dev_priv->display.cdclk.hw.vco > 0 &&
+ HAS_CDCLK_SQUASH(dev_priv));
+}
+
+static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 val;
u16 waveform;
int clock;
- int ret;
-
- /* Inform power controller of upcoming frequency change. */
- if (DISPLAY_VER(dev_priv) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- else
- /*
- * BSpec requires us to wait up to 150usec, but that leads to
- * timeouts; the 2ms used here is based on experiment.
- */
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- drm_err(&dev_priv->drm,
- "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
+ !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11)
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ /* wa_15010685871: dg2, mtl */
+ if (pll_enable_wa_needed(dev_priv))
+ dg2_cdclk_squash_program(dev_priv, 0);
+
icl_cdclk_pll_update(dev_priv, vco);
- else
+ } else
bxt_cdclk_pll_update(dev_priv, vco);
waveform = cdclk_squash_waveform(dev_priv, cdclk);
@@ -1782,11 +1880,62 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE)
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ struct intel_cdclk_config mid_cdclk_config;
+ int cdclk = cdclk_config->cdclk;
+ int ret = 0;
+
+ /*
+ * Inform power controller of upcoming frequency change.
+ * Display versions 14 and beyond do not follow the PUnit
+ * mailbox communication, skip
+ * this step.
+ */
+ if (DISPLAY_VER(dev_priv) >= 14)
+ /* NOOP */;
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
+ cdclk_config, &mid_cdclk_config)) {
+ _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
+ _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ } else {
+ _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ /*
+ * NOOP - No Pcode communication needed for
+ * Display versions 14 and beyond
+ */;
+ else if (DISPLAY_VER(dev_priv) >= 11)
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
- } else {
+ else
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -1797,7 +1946,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
- }
if (ret) {
drm_err(&dev_priv->drm,
@@ -1954,6 +2102,28 @@ void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
skl_cdclk_uninit_hw(i915);
}
+static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ u16 old_waveform;
+ u16 new_waveform;
+
+ drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco));
+
+ if (a->vco == 0 || b->vco == 0)
+ return false;
+
+ if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ return false;
+
+ old_waveform = cdclk_squash_waveform(i915, a->cdclk);
+ new_waveform = cdclk_squash_waveform(i915, b->cdclk);
+
+ return a->vco != b->vco &&
+ old_waveform != new_waveform;
+}
+
static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
@@ -2760,9 +2930,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
pipe = INVALID_PIPE;
}
- if (intel_cdclk_can_squash(dev_priv,
- &old_cdclk_state->actual,
- &new_cdclk_state->actual)) {
+ if (intel_cdclk_can_crawl_and_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via crawling and squashing\n");
+ } else if (intel_cdclk_can_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk via squashing\n");
} else if (intel_cdclk_can_crawl(dev_priv,
@@ -3060,6 +3235,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
+static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = tgl_calc_voltage_level,
+};
+
static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3195,7 +3377,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_DG2(dev_priv)) {
+ if (IS_METEORLAKE(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
+ dev_priv->display.cdclk.table = mtl_cdclk_table;
+ } else if (IS_DG2(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
@@ -3203,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
+ else if (IS_ADLP_RPLU(dev_priv))
+ dev_priv->display.cdclk.table = rplu_cdclk_table;
else
dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index c674879a84a5..51e2f6a11ce4 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 250e83f1f5ac..36aac88143ac 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -47,13 +47,29 @@ struct intel_color_funcs {
*/
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
+ * Perform any extra tasks needed after all the
+ * double buffered registers have been latched.
+ */
+ void (*color_post_update)(const struct intel_crtc_state *crtc_state);
+ /*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
* following the latching of any double buffered registers
* involved with the same commit.
*/
void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Read out the LUTs from the hardware into the software state.
+ * Used by eg. the hardware state checker.
+ */
void (*read_luts)(struct intel_crtc_state *crtc_state);
+ /*
+ * Compare the LUTs
+ */
+ bool (*lut_equal)(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -143,15 +159,7 @@ static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
- return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
-}
-
-static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
-{
- return !crtc_state->hw.degamma_lut &&
- !crtc_state->hw.ctm &&
- crtc_state->hw.gamma_lut &&
- lut_is_legacy(crtc_state->hw.gamma_lut);
+ return lut && drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
}
/*
@@ -246,17 +254,44 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
-static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- /*
- * FIXME if there's a gamma LUT after the CSC, we should
- * do the range compression using the gamma LUT instead.
- */
- return crtc_state->limited_color_range &&
- (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- IS_DISPLAY_VER(i915, 9, 10));
+ /* icl+ have dedicated output CSC */
+ if (DISPLAY_VER(i915) >= 11)
+ return false;
+
+ /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
+ if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
+ return false;
+
+ return crtc_state->limited_color_range;
+}
+
+static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!ilk_limited_range(crtc_state))
+ return false;
+
+ if (crtc_state->c8_planes)
+ return false;
+
+ if (DISPLAY_VER(i915) == 10)
+ return crtc_state->hw.gamma_lut;
+ else
+ return crtc_state->hw.gamma_lut &&
+ (crtc_state->hw.degamma_lut || crtc_state->hw.ctm);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ if (!ilk_limited_range(crtc_state))
+ return false;
+
+ return !ilk_lut_limited_range(crtc_state);
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
@@ -437,6 +472,79 @@ static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PALETTE_BLUE_MASK, val), 8);
}
+/* i8xx/i9xx+ 10bit slope format "even DW" (low 8 bits) */
+static u32 _i9xx_lut_10_ldw(u16 a)
+{
+ return drm_color_lut_extract(a, 10) & 0xff;
+}
+
+static u32 i9xx_lut_10_ldw(const struct drm_color_lut *color)
+{
+ return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_ldw(color[0].red)) |
+ REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_ldw(color[0].green)) |
+ REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_ldw(color[0].blue));
+}
+
+/* i8xx/i9xx+ 10bit slope format "odd DW" (high 2 bits + slope) */
+static u32 _i9xx_lut_10_udw(u16 a, u16 b)
+{
+ unsigned int mantissa, exponent;
+
+ a = drm_color_lut_extract(a, 10);
+ b = drm_color_lut_extract(b, 10);
+
+ /* b = a + 8 * m * 2 ^ -e */
+ mantissa = clamp(b - a, 0, 0x7f);
+ exponent = 3;
+ while (mantissa > 0xf) {
+ mantissa >>= 1;
+ exponent--;
+ }
+
+ return (exponent << 6) |
+ (mantissa << 2) |
+ (a >> 8);
+}
+
+static u32 i9xx_lut_10_udw(const struct drm_color_lut *color)
+{
+ return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_udw(color[0].red, color[1].red)) |
+ REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_udw(color[0].green, color[1].green)) |
+ REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_udw(color[0].blue, color[1].blue));
+}
+
+static void i9xx_lut_10_pack(struct drm_color_lut *color,
+ u32 ldw, u32 udw)
+{
+ u16 red = REG_FIELD_GET(PALETTE_10BIT_RED_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_RED_UDW_MASK, udw) << 8;
+ u16 green = REG_FIELD_GET(PALETTE_10BIT_GREEN_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_GREEN_UDW_MASK, udw) << 8;
+ u16 blue = REG_FIELD_GET(PALETTE_10BIT_BLUE_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_BLUE_UDW_MASK, udw) << 8;
+
+ color->red = intel_color_lut_pack(red, 10);
+ color->green = intel_color_lut_pack(green, 10);
+ color->blue = intel_color_lut_pack(blue, 10);
+}
+
+static void i9xx_lut_10_pack_slope(struct drm_color_lut *color,
+ u32 ldw, u32 udw)
+{
+ int r_exp = REG_FIELD_GET(PALETTE_10BIT_RED_EXP_MASK, udw);
+ int r_mant = REG_FIELD_GET(PALETTE_10BIT_RED_MANT_MASK, udw);
+ int g_exp = REG_FIELD_GET(PALETTE_10BIT_GREEN_EXP_MASK, udw);
+ int g_mant = REG_FIELD_GET(PALETTE_10BIT_GREEN_MANT_MASK, udw);
+ int b_exp = REG_FIELD_GET(PALETTE_10BIT_BLUE_EXP_MASK, udw);
+ int b_mant = REG_FIELD_GET(PALETTE_10BIT_BLUE_MANT_MASK, udw);
+
+ i9xx_lut_10_pack(color, ldw, udw);
+
+ color->red += r_mant << (3 - r_exp);
+ color->green += g_mant << (3 - g_exp);
+ color->blue += b_mant << (3 - b_exp);
+}
+
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
{
@@ -511,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Despite Wa_1406463849, ICL no longer suffers from the SKL
+ * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
+ * Possibly due to the extra sticky CSC arming
+ * (see icl_color_post_update()).
+ *
+ * On TGL+ all CSC arming issues have been properly fixed.
+ */
icl_load_csc_matrix(crtc_state);
}
+static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * Possibly related to display WA #1184, SKL CSC loses the latched
+ * CSC coeff/offset register values if the CSC registers are disarmed
+ * between DC5 exit and PSR exit. This will cause the plane(s) to
+ * output all black (until CSC_MODE is rearmed and properly latched).
+ * Once PSR exit (and proper register latching) has occurred the
+ * danger is over. Thus when PSR is enabled the CSC coeff/offset
+ * register programming will be peformed from skl_color_commit_arm()
+ * which is called after PSR exit.
+ */
+ if (!crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@@ -521,7 +653,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
@@ -530,7 +662,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
@@ -556,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
+ if (crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@@ -574,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
+static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black.
+ */
+ intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+
+ intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /*
+ * Despite Wa_1406463849, ICL CSC is no longer disarmed by
+ * coeff/offset register *writes*. Instead, once CSC_MODE
+ * is armed it stays armed, even after it has been latched.
+ * Afterwards the coeff/offset registers become effectively
+ * self-arming. That self-arming must be disabled before the
+ * next icl_color_commit_noarm() tries to write the next set
+ * of coeff/offset registers. Fortunately register *reads*
+ * do still disarm the CSC. Naturally this must not be done
+ * until the previously written CSC registers have actually
+ * been latched.
+ *
+ * TGL+ no longer need this workaround.
+ */
+ intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
+}
+
static struct drm_property_blob *
create_linear_lut(struct drm_i915_private *i915, int lut_size)
{
@@ -600,9 +776,18 @@ create_linear_lut(struct drm_i915_private *i915, int lut_size)
return blob;
}
+static u16 lut_limited_range(unsigned int value)
+{
+ unsigned int min = 16 << 8;
+ unsigned int max = 235 << 8;
+
+ return value * (max - min) / 0xffff + min;
+}
+
static struct drm_property_blob *
create_resized_lut(struct drm_i915_private *i915,
- const struct drm_property_blob *blob_in, int lut_out_size)
+ const struct drm_property_blob *blob_in, int lut_out_size,
+ bool limited_color_range)
{
int i, lut_in_size = drm_color_lut_size(blob_in);
struct drm_property_blob *blob_out;
@@ -618,8 +803,18 @@ create_resized_lut(struct drm_i915_private *i915,
lut_in = blob_in->data;
lut_out = blob_out->data;
- for (i = 0; i < lut_out_size; i++)
- lut_out[i] = lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)];
+ for (i = 0; i < lut_out_size; i++) {
+ const struct drm_color_lut *entry =
+ &lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)];
+
+ if (limited_color_range) {
+ lut_out[i].red = lut_limited_range(entry->red);
+ lut_out[i].green = lut_limited_range(entry->green);
+ lut_out[i].blue = lut_limited_range(entry->blue);
+ } else {
+ lut_out[i] = *entry;
+ }
+ }
return blob_out;
}
@@ -642,12 +837,38 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
i9xx_lut_8(&lut[i]));
}
+static void i9xx_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i9xx_lut_10_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i9xx_lut_10_udw(&lut[i]));
+ }
+}
+
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- i9xx_load_lut_8(crtc, post_csc_lut);
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ i9xx_load_lut_8(crtc, post_csc_lut);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ i9xx_load_lut_10(crtc, post_csc_lut);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
static void i965_load_lut_10p6(struct intel_crtc *crtc,
@@ -675,16 +896,34 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
i9xx_load_lut_8(crtc, post_csc_lut);
- else
+ break;
+ case GAMMA_MODE_MODE_10BIT:
i965_load_lut_10p6(crtc, post_csc_lut);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
-static void ilk_load_lut_8(struct intel_crtc *crtc,
+static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (crtc_state->dsb)
+ intel_dsb_reg_write(crtc_state->dsb, reg, val);
+ else
+ intel_de_write_fw(i915, reg, val);
+}
+
+static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut;
enum pipe pipe = crtc->pipe;
int i;
@@ -695,36 +934,35 @@ static void ilk_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(i915, LGC_PALETTE(pipe, i),
- i9xx_lut_8(&lut[i]));
+ ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
}
-static void ilk_load_lut_10(struct intel_crtc *crtc,
+static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- intel_de_write_fw(i915, PREC_PALETTE(pipe, i),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_10BIT:
- ilk_load_lut_10(crtc, blob);
+ ilk_load_lut_10(crtc_state, blob);
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -745,50 +983,56 @@ static int ivb_lut_10_size(u32 prec_index)
* "Restriction : Index auto increment mode is not
* supported and must not be enabled."
*/
-static void ivb_load_lut_10(struct intel_crtc *crtc,
+static void ivb_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), prec_index++);
- intel_de_write_fw(i915, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ prec_index + i);
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
}
/* On BDW+ the index auto increment mode actually works */
-static void bdw_load_lut_10(struct intel_crtc *crtc,
+static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ prec_index);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ prec_index);
for (i = 0; i < lut_size; i++)
- intel_de_write_fw(i915, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
}
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
@@ -797,9 +1041,9 @@ static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
}
static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state)
@@ -808,31 +1052,30 @@ static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- ivb_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- ivb_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
- ivb_load_lut_10(crtc, blob,
+ ivb_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
@@ -844,25 +1087,23 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- bdw_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
-
- bdw_load_lut_10(crtc, blob,
+ bdw_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
@@ -894,9 +1135,11 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe), 0);
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe),
- PRE_CSC_GAMC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT |
+ PRE_CSC_GAMC_INDEX_VALUE(0));
for (i = 0; i < lut_size; i++) {
/*
@@ -912,32 +1155,31 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- intel_de_write_fw(i915, PRE_CSC_GAMC_DATA(pipe),
- lut[i].green);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- intel_de_write_fw(i915, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, post_csc_lut);
+ ilk_load_lut_8(crtc_state, post_csc_lut);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
@@ -955,9 +1197,9 @@ ivb_load_lut_max(const struct intel_crtc_state *crtc_state,
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
@@ -976,17 +1218,23 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
}
static void
@@ -1009,14 +1257,19 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
- intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ PAL_PREC_INDEX_VALUE(0));
+
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
@@ -1033,12 +1286,16 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
ivb_load_lut_max(crtc_state, entry);
@@ -1048,23 +1305,22 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, post_csc_lut);
+ ilk_load_lut_8(crtc_state, post_csc_lut);
break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
@@ -1073,7 +1329,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
}
- intel_dsb_commit(crtc_state);
+ if (crtc_state->dsb) {
+ intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_commit(crtc_state->dsb, false);
+ intel_dsb_wait(crtc_state->dsb);
+ }
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1087,6 +1347,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 14));
}
+static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, ldw), 14);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, ldw), 14);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_RED_UDW_MASK, udw), 14);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -1182,6 +1449,36 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
}
+void intel_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (i915->display.funcs.color->color_post_update)
+ i915->display.funcs.color->color_post_update(crtc_state);
+}
+
+void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /* FIXME DSB has issues loading LUTs, disable it for now */
+ return;
+
+ if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
+ return;
+
+ crtc_state->dsb = intel_dsb_prepare(crtc, 1024);
+}
+
+void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsb)
+ return;
+
+ intel_dsb_cleanup(crtc_state->dsb);
+ crtc_state->dsb = NULL;
+}
+
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1224,8 +1521,25 @@ void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (i915->display.funcs.color->read_luts)
- i915->display.funcs.color->read_luts(crtc_state);
+ i915->display.funcs.color->read_luts(crtc_state);
+}
+
+bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME c8_planes readout missing thus
+ * .read_luts() doesn't read out post_csc_lut.
+ */
+ if (!is_pre_csc_lut && crtc_state->c8_planes)
+ return true;
+
+ return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2,
+ is_pre_csc_lut);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -1273,6 +1587,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
/* plane control register changes blocked by CxSR */
if (HAS_GMCH(i915))
@@ -1282,6 +1598,42 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return 0;
}
+static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ if (lut_is_legacy(gamma_lut))
+ return 0;
+
+ return INTEL_INFO(i915)->display.color.gamma_lut_tests;
+}
+
+static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return INTEL_INFO(i915)->display.color.degamma_lut_tests;
+}
+
+static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ if (lut_is_legacy(gamma_lut))
+ return LEGACY_LUT_LENGTH;
+
+ return INTEL_INFO(i915)->display.color.gamma_lut_size;
+}
+
+static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return INTEL_INFO(i915)->display.color.degamma_lut_size;
+}
+
static int check_lut_size(const struct drm_property_blob *lut, int expected)
{
int len;
@@ -1299,29 +1651,23 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
-static int check_luts(const struct intel_crtc_state *crtc_state)
+static int _check_luts(const struct intel_crtc_state *crtc_state,
+ u32 degamma_tests, u32 gamma_tests)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
- u32 gamma_tests, degamma_tests;
-
- /* Always allow legacy gamma LUT with no further checking. */
- if (crtc_state_is_legacy_gamma(crtc_state))
- return 0;
/* C8 relies on its palette being stored in the legacy LUT */
- if (crtc_state->c8_planes) {
+ if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) {
drm_dbg_kms(&i915->drm,
"C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
- degamma_length = INTEL_INFO(i915)->display.color.degamma_lut_size;
- gamma_length = INTEL_INFO(i915)->display.color.gamma_lut_size;
- degamma_tests = INTEL_INFO(i915)->display.color.degamma_lut_tests;
- gamma_tests = INTEL_INFO(i915)->display.color.gamma_lut_tests;
+ degamma_length = intel_degamma_lut_size(crtc_state);
+ gamma_length = intel_gamma_lut_size(crtc_state);
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
@@ -1334,13 +1680,44 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return 0;
}
+static int check_luts(const struct intel_crtc_state *crtc_state)
+{
+ return _check_luts(crtc_state,
+ intel_degamma_lut_tests(crtc_state),
+ intel_gamma_lut_tests(crtc_state));
+}
+
static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
- return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int i9xx_lut_10_diff(u16 a, u16 b)
+{
+ return drm_color_lut_extract(a, 10) -
+ drm_color_lut_extract(b, 10);
+}
+
+static int i9xx_check_lut_10(struct drm_i915_private *dev_priv,
+ const struct drm_property_blob *blob)
+{
+ const struct drm_color_lut *lut = blob->data;
+ int lut_size = drm_color_lut_size(blob);
+ const struct drm_color_lut *a = &lut[lut_size - 2];
+ const struct drm_color_lut *b = &lut[lut_size - 1];
+
+ if (i9xx_lut_10_diff(b->red, a->red) > 0x7f ||
+ i9xx_lut_10_diff(b->green, a->green) > 0x7f ||
+ i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) {
+ drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
@@ -1355,15 +1732,19 @@ void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
} else if (DISPLAY_VER(i915) == 10) {
drm_WARN_ON(&i915->drm,
+ crtc_state->post_csc_lut == crtc_state->hw.gamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut);
drm_WARN_ON(&i915->drm,
+ !ilk_lut_limited_range(crtc_state) &&
+ crtc_state->post_csc_lut != NULL &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
} else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) {
drm_WARN_ON(&i915->drm,
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut);
drm_WARN_ON(&i915->drm,
+ !ilk_lut_limited_range(crtc_state) &&
crtc_state->post_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
}
@@ -1379,6 +1760,7 @@ static void intel_assign_luts(struct intel_crtc_state *crtc_state)
static int i9xx_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
@@ -1391,6 +1773,13 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+ if (DISPLAY_VER(i915) < 4 &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) {
+ ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut);
+ if (ret)
+ return ret;
+ }
+
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
return ret;
@@ -1406,14 +1795,12 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
{
u32 cgm_mode = 0;
- if (crtc_state_is_legacy_gamma(crtc_state))
- return 0;
-
if (crtc_state->hw.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
if (crtc_state->hw.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
- if (crtc_state->hw.gamma_lut)
+ if (crtc_state->hw.gamma_lut &&
+ !lut_is_legacy(crtc_state->hw.gamma_lut))
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
@@ -1440,7 +1827,7 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
* Otherwise we bypass it and use the CGM gamma instead.
*/
crtc_state->gamma_enable =
- crtc_state_is_legacy_gamma(crtc_state) &&
+ lut_is_legacy(crtc_state->hw.gamma_lut) &&
!crtc_state->c8_planes;
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
@@ -1475,7 +1862,7 @@ static bool ilk_csc_enable(const struct intel_crtc_state *crtc_state)
static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
return GAMMA_MODE_MODE_10BIT;
@@ -1499,8 +1886,28 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
CSC_POSITION_BEFORE_GAMMA;
}
-static void ilk_assign_luts(struct intel_crtc_state *crtc_state)
+static int ilk_assign_luts(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (ilk_lut_limited_range(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ drm_color_lut_size(crtc_state->hw.gamma_lut),
+ true);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut);
+
+ drm_property_blob_put(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut);
+
+ return 0;
+ }
+
if (crtc_state->hw.degamma_lut ||
crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) {
drm_property_replace_blob(&crtc_state->pre_csc_lut,
@@ -1513,6 +1920,8 @@ static void ilk_assign_luts(struct intel_crtc_state *crtc_state)
drm_property_replace_blob(&crtc_state->post_csc_lut,
NULL);
}
+
+ return 0;
}
static int ilk_color_check(struct intel_crtc_state *crtc_state)
@@ -1549,7 +1958,9 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- ilk_assign_luts(crtc_state);
+ ret = ilk_assign_luts(crtc_state);
+ if (ret)
+ return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
@@ -1585,19 +1996,19 @@ static int ivb_assign_luts(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct drm_property_blob *degamma_lut, *gamma_lut;
- if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) {
- ilk_assign_luts(crtc_state);
- return 0;
- }
+ if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT)
+ return ilk_assign_luts(crtc_state);
drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024);
drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024);
- degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512);
+ degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512,
+ false);
if (IS_ERR(degamma_lut))
return PTR_ERR(degamma_lut);
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512);
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512,
+ ilk_lut_limited_range(crtc_state));
if (IS_ERR(gamma_lut)) {
drm_property_blob_put(degamma_lut);
return PTR_ERR(gamma_lut);
@@ -1621,6 +2032,12 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) {
+ drm_dbg_kms(&i915->drm,
+ "C8 pixelformat and degamma together are not possible\n");
+ return -EINVAL;
+ }
+
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.ctm) {
drm_dbg_kms(&i915->drm,
@@ -1659,17 +2076,57 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
return GAMMA_MODE_MODE_10BIT;
}
-static void glk_assign_luts(struct intel_crtc_state *crtc_state)
+static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
+}
+
+static int glk_assign_luts(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- intel_assign_luts(crtc_state);
+ if (glk_use_pre_csc_lut_for_gamma(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ INTEL_INFO(i915)->display.color.degamma_lut_size,
+ false);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, gamma_lut);
+ drm_property_replace_blob(&crtc_state->post_csc_lut, NULL);
+
+ drm_property_blob_put(gamma_lut);
+
+ return 0;
+ }
+
+ if (ilk_lut_limited_range(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ drm_color_lut_size(crtc_state->hw.gamma_lut),
+ true);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut);
+
+ drm_property_blob_put(gamma_lut);
+ } else {
+ drm_property_replace_blob(&crtc_state->post_csc_lut, crtc_state->hw.gamma_lut);
+ }
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut);
/*
* On GLK+ both pipe CSC and degamma LUT are controlled
@@ -1680,6 +2137,19 @@ static void glk_assign_luts(struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable && !crtc_state->pre_csc_lut)
drm_property_replace_blob(&crtc_state->pre_csc_lut,
i915->display.color.glk_linear_degamma_lut);
+
+ return 0;
+}
+
+static int glk_check_luts(const struct intel_crtc_state *crtc_state)
+{
+ u32 degamma_tests = intel_degamma_lut_tests(crtc_state);
+ u32 gamma_tests = intel_gamma_lut_tests(crtc_state);
+
+ if (glk_use_pre_csc_lut_for_gamma(crtc_state))
+ gamma_tests |= degamma_tests;
+
+ return _check_luts(crtc_state, degamma_tests, gamma_tests);
}
static int glk_color_check(struct intel_crtc_state *crtc_state)
@@ -1687,7 +2157,7 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
int ret;
- ret = check_luts(crtc_state);
+ ret = glk_check_luts(crtc_state);
if (ret)
return ret;
@@ -1706,14 +2176,16 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
}
crtc_state->gamma_enable =
+ !glk_use_pre_csc_lut_for_gamma(crtc_state) &&
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/* On GLK+ degamma LUT is controlled by csc_enable */
crtc_state->csc_enable =
+ glk_use_pre_csc_lut_for_gamma(crtc_state) ||
crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->hw.ctm || crtc_state->limited_color_range;
+ crtc_state->hw.ctm || ilk_csc_limited_range(crtc_state);
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
@@ -1723,7 +2195,9 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- glk_assign_luts(crtc_state);
+ ret = glk_assign_luts(crtc_state);
+ if (ret)
+ return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
@@ -1744,7 +2218,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
gamma_mode |= POST_CSC_GAMMA_ENABLE;
if (!crtc_state->hw.gamma_lut ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
/*
* Enable 10bit gamma for D13
@@ -1754,7 +2228,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
else if (DISPLAY_VER(i915) >= 13)
gamma_mode |= GAMMA_MODE_MODE_10BIT;
else
- gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
+ gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG;
return gamma_mode;
}
@@ -1792,68 +2266,153 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int i9xx_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
- return 16;
+ return 10;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
-static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int i9xx_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
- return 0;
+ return 0;
+}
- if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+static int i965_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
- return 10;
+ return 16;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
-static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int ilk_gamma_mode_precision(u32 gamma_mode)
{
- if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
return 10;
- else
- return i9xx_gamma_precision(crtc_state);
+ default:
+ MISSING_CASE(gamma_mode);
+ return 0;
+ }
+}
+
+static bool ilk_has_post_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->c8_planes)
+ return true;
+
+ return crtc_state->gamma_enable &&
+ (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) != 0;
+}
+
+static bool ilk_has_pre_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->gamma_enable &&
+ (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0;
}
-static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int ilk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
+ if (!ilk_has_post_csc_lut(crtc_state))
return 0;
- switch (crtc_state->gamma_mode) {
- case GAMMA_MODE_MODE_8BIT:
- return 8;
- case GAMMA_MODE_MODE_10BIT:
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
+}
+
+static int ilk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!ilk_has_pre_csc_lut(crtc_state))
+ return 0;
+
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
+}
+
+static int ivb_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return 10;
- default:
- MISSING_CASE(crtc_state->gamma_mode);
+
+ return ilk_post_csc_lut_precision(crtc_state);
+}
+
+static int ivb_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return 10;
+
+ return ilk_pre_csc_lut_precision(crtc_state);
+}
+
+static int chv_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+
+ return i965_post_csc_lut_precision(crtc_state);
+}
+
+static int chv_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
+ return 14;
+
+ return 0;
+}
+
+static int glk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
- }
+
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
}
-static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int glk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ if (!crtc_state->csc_enable)
+ return 0;
+
+ return 16;
+}
+
+static bool icl_has_post_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->c8_planes)
+ return true;
+
+ return crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE;
+}
+
+static bool icl_has_pre_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->gamma_mode & PRE_CSC_GAMMA_ENABLE;
+}
+
+static int icl_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!icl_has_post_csc_lut(crtc_state))
return 0;
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -1861,7 +2420,7 @@ static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
return 8;
case GAMMA_MODE_MODE_10BIT:
return 10;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
return 16;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -1869,26 +2428,12 @@ static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
}
}
-int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+static int icl_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (HAS_GMCH(i915)) {
- if (IS_CHERRYVIEW(i915))
- return chv_gamma_precision(crtc_state);
- else
- return i9xx_gamma_precision(crtc_state);
- } else {
- if (DISPLAY_VER(i915) >= 11)
- return icl_gamma_precision(crtc_state);
- else if (DISPLAY_VER(i915) == 10)
- return glk_gamma_precision(crtc_state);
- else if (IS_IRONLAKE(i915))
- return ilk_gamma_precision(crtc_state);
- }
+ if (!icl_has_pre_csc_lut(crtc_state))
+ return 0;
- return 0;
+ return 16;
}
static bool err_check(struct drm_color_lut *lut1,
@@ -1899,9 +2444,9 @@ static bool err_check(struct drm_color_lut *lut1,
((abs((long)lut2->green - lut1->green)) <= err);
}
-static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
- struct drm_color_lut *lut2,
- int lut_size, u32 err)
+static bool intel_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
{
int i;
@@ -1913,9 +2458,9 @@ static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
return true;
}
-bool intel_color_lut_equal(struct drm_property_blob *blob1,
- struct drm_property_blob *blob2,
- u32 gamma_mode, u32 bit_precision)
+static bool intel_lut_equal(const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ int check_size, int precision)
{
struct drm_color_lut *lut1, *lut2;
int lut_size1, lut_size2;
@@ -1924,40 +2469,134 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
if (!blob1 != !blob2)
return false;
+ if (!blob1 != !precision)
+ return false;
+
if (!blob1)
return true;
lut_size1 = drm_color_lut_size(blob1);
lut_size2 = drm_color_lut_size(blob2);
- /* check sw and hw lut size */
if (lut_size1 != lut_size2)
return false;
+ if (check_size > lut_size1)
+ return false;
+
lut1 = blob1->data;
lut2 = blob2->data;
- err = 0xffff >> bit_precision;
+ err = 0xffff >> precision;
- /* check sw and hw lut entry to be equal */
- switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
- case GAMMA_MODE_MODE_8BIT:
- case GAMMA_MODE_MODE_10BIT:
- if (!intel_color_lut_entries_equal(lut1, lut2,
- lut_size2, err))
- return false;
- break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
- if (!intel_color_lut_entries_equal(lut1, lut2,
- 9, err))
- return false;
- break;
- default:
- MISSING_CASE(gamma_mode);
- return false;
- }
+ if (!check_size)
+ check_size = lut_size1;
- return true;
+ return intel_lut_entries_equal(lut1, lut2, check_size, err);
+}
+
+static bool i9xx_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ int check_size = 0;
+
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ i9xx_pre_csc_lut_precision(crtc_state));
+
+ /* 10bit mode last entry is implicit, just skip it */
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT)
+ check_size = 128;
+
+ return intel_lut_equal(blob1, blob2, check_size,
+ i9xx_post_csc_lut_precision(crtc_state));
+}
+
+static bool i965_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ i9xx_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ i965_post_csc_lut_precision(crtc_state));
+}
+
+static bool chv_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ chv_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ chv_post_csc_lut_precision(crtc_state));
+}
+
+static bool ilk_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ ilk_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ ilk_post_csc_lut_precision(crtc_state));
+}
+
+static bool ivb_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ ivb_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ ivb_post_csc_lut_precision(crtc_state));
+}
+
+static bool glk_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ glk_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ glk_post_csc_lut_precision(crtc_state));
+}
+
+static bool icl_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ int check_size = 0;
+
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ icl_pre_csc_lut_precision(crtc_state));
+
+ /* hw readout broken except for the super fine segment :( */
+ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_12BIT_MULTI_SEG)
+ check_size = 9;
+
+ return intel_lut_equal(blob1, blob2, check_size,
+ icl_post_csc_lut_precision(crtc_state));
}
static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
@@ -1985,14 +2624,53 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
return blob;
}
+static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ u32 ldw, udw;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ lut_size * sizeof(lut[0]), NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
+ udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i9xx_lut_10_pack(&lut[i], ldw, udw);
+ }
+
+ i9xx_lut_10_pack_slope(&lut[i], ldw, udw);
+
+ return blob;
+}
+
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ crtc_state->post_csc_lut = i9xx_read_lut_10(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
@@ -2029,13 +2707,46 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
- else
+ break;
+ case GAMMA_MODE_MODE_10BIT:
crtc_state->post_csc_lut = i965_read_lut_10p6(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1));
+
+ chv_cgm_degamma_pack(&lut[i], ldw, udw);
+ }
+
+ return blob;
}
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
@@ -2068,6 +2779,9 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
+ crtc_state->pre_csc_lut = chv_read_cgm_degamma(crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
crtc_state->post_csc_lut = chv_read_cgm_gamma(crtc);
else
@@ -2127,19 +2841,88 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ *blob = ilk_read_lut_10(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val;
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index + i);
+ val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
+ }
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
+ return blob;
+}
+
+static void ivb_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- crtc_state->post_csc_lut = ilk_read_lut_8(crtc);
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ crtc_state->pre_csc_lut =
+ ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut =
+ ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
- crtc_state->post_csc_lut = ilk_read_lut_10(crtc);
+ *blob = ivb_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -2152,14 +2935,11 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
u32 prec_index)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int i, hw_lut_size = ivb_lut_10_size(prec_index);
- int lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size;
+ int i, lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- drm_WARN_ON(&i915->drm, lut_size != hw_lut_size);
-
blob = drm_property_create_blob(&i915->drm,
sizeof(lut[0]) * lut_size,
NULL);
@@ -2169,7 +2949,10 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
lut = blob->data;
intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ prec_index);
+ intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ prec_index);
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe));
@@ -2177,7 +2960,80 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
ilk_lut_10_pack(&lut[i], val);
}
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
+ return blob;
+}
+
+static void bdw_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
+ return;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ crtc_state->pre_csc_lut =
+ bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut =
+ bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ *blob = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ /*
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
+ */
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT |
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
+
+ lut[i].red = val;
+ lut[i].green = val;
+ lut[i].blue = val;
+ }
+
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
return blob;
}
@@ -2186,7 +3042,10 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (crtc_state->csc_enable)
+ crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc);
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
switch (crtc_state->gamma_mode) {
@@ -2220,7 +3079,10 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
lut = blob->data;
intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
+ intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_AUTO_INCREMENT |
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe));
@@ -2229,7 +3091,8 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
ilk_lut_12p4_pack(&lut[i], ldw, udw);
}
- intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+ intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
/*
* FIXME readouts from PAL_PREC_DATA register aren't giving
@@ -2244,7 +3107,10 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ if (icl_has_pre_csc_lut(crtc_state))
+ crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc);
+
+ if (!icl_has_post_csc_lut(crtc_state))
return;
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -2254,7 +3120,7 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_10BIT:
crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
crtc_state->post_csc_lut = icl_read_lut_multi_segment(crtc);
break;
default:
@@ -2268,6 +3134,7 @@ static const struct intel_color_funcs chv_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = chv_load_luts,
.read_luts = chv_read_luts,
+ .lut_equal = chv_lut_equal,
};
static const struct intel_color_funcs i965_color_funcs = {
@@ -2275,6 +3142,7 @@ static const struct intel_color_funcs i965_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = i965_load_luts,
.read_luts = i965_read_luts,
+ .lut_equal = i965_lut_equal,
};
static const struct intel_color_funcs i9xx_color_funcs = {
@@ -2282,30 +3150,44 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = i9xx_load_luts,
.read_luts = i9xx_read_luts,
+ .lut_equal = i9xx_lut_equal,
+};
+
+static const struct intel_color_funcs tgl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit_noarm = icl_color_commit_noarm,
+ .color_commit_arm = icl_color_commit_arm,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+ .lut_equal = icl_lut_equal,
};
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
- .color_commit_arm = skl_color_commit_arm,
+ .color_commit_arm = icl_color_commit_arm,
+ .color_post_update = icl_color_post_update,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
+ .lut_equal = icl_lut_equal,
};
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
+ .lut_equal = glk_lut_equal,
};
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
- .read_luts = NULL,
+ .read_luts = bdw_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs bdw_color_funcs = {
@@ -2313,7 +3195,8 @@ static const struct intel_color_funcs bdw_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = hsw_color_commit_arm,
.load_luts = bdw_load_luts,
- .read_luts = NULL,
+ .read_luts = bdw_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs hsw_color_funcs = {
@@ -2321,7 +3204,8 @@ static const struct intel_color_funcs hsw_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = hsw_color_commit_arm,
.load_luts = ivb_load_luts,
- .read_luts = NULL,
+ .read_luts = ivb_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs ivb_color_funcs = {
@@ -2329,7 +3213,8 @@ static const struct intel_color_funcs ivb_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = ilk_color_commit_arm,
.load_luts = ivb_load_luts,
- .read_luts = NULL,
+ .read_luts = ivb_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs ilk_color_funcs = {
@@ -2338,19 +3223,34 @@ static const struct intel_color_funcs ilk_color_funcs = {
.color_commit_arm = ilk_color_commit_arm,
.load_luts = ilk_load_luts,
.read_luts = ilk_read_luts,
+ .lut_equal = ilk_lut_equal,
};
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- bool has_ctm = INTEL_INFO(i915)->display.color.degamma_lut_size != 0;
+ int degamma_lut_size, gamma_lut_size;
+ bool has_ctm;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(i915)->display.color.degamma_lut_size,
- has_ctm,
- INTEL_INFO(i915)->display.color.gamma_lut_size);
+ gamma_lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size;
+ degamma_lut_size = INTEL_INFO(i915)->display.color.degamma_lut_size;
+ has_ctm = degamma_lut_size != 0;
+
+ /*
+ * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the
+ * only mode supported by Alviso and Grantsdale."
+ *
+ * Actually looks like this affects all of gen3.
+ * Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm)
+ * are confirmed not to suffer from this restriction.
+ */
+ if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size,
+ has_ctm, gamma_lut_size);
}
int intel_color_init(struct drm_i915_private *i915)
@@ -2379,7 +3279,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
else
i915->display.funcs.color = &i9xx_color_funcs;
} else {
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 12)
+ i915->display.funcs.color = &tgl_color_funcs;
+ else if (DISPLAY_VER(i915) == 11)
i915->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(i915) == 10)
i915->display.funcs.color = &glk_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 2a5ada67774d..8002492be709 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -17,14 +17,17 @@ void intel_color_init_hooks(struct drm_i915_private *i915);
int intel_color_init(struct drm_i915_private *i915);
void intel_color_crtc_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
+void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
-int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
-bool intel_color_lut_equal(struct drm_property_blob *blob1,
- struct drm_property_blob *blob2,
- u32 gamma_mode, u32 bit_precision);
+bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 8b870b2dd4f9..922a6d87b553 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum phy phy)
{
const struct icl_procmon *procmon;
- u32 val;
procmon = icl_get_procmon_ref_values(dev_priv, phy);
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ (0xff << 16) | 0xff, procmon->dw1);
intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
@@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
- DCC_MODE_SELECT_MASK,
- DCC_MODE_SELECT_CONTINUOSLY);
+ DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
ret &= icl_verify_procmon_ref_values(dev_priv, phy);
@@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
- u32 val;
if (is_dsi) {
drm_WARN_ON(&dev_priv->drm, lane_reversal);
@@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
- val &= ~PWR_DOWN_LN_MASK;
- val |= lane_mask;
- intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ PWR_DOWN_LN_MASK, lane_mask);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -360,25 +353,19 @@ skip_phy_misc:
val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
- val |= DCC_MODE_SELECT_CONTINUOSLY;
+ val |= RUN_DCC_ONCE;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
}
icl_set_procmon_ref_values(dev_priv, phy);
- if (phy_is_master(dev_priv, phy)) {
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
- val |= IREFGEN;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
- }
-
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val |= COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ if (phy_is_master(dev_priv, phy))
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ 0, IREFGEN);
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- val |= CL_POWER_DOWN_ENABLE;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, CL_POWER_DOWN_ENABLE);
}
}
@@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
enum phy phy;
for_each_combo_phy_reverse(dev_priv, phy) {
- u32 val;
-
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy)) {
if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
@@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val &= ~COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 2ed65193ca19..b0983edccf3f 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -90,8 +90,8 @@
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
-#define DCC_MODE_SELECT_MASK (0x3 << 20)
-#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
+#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
+#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6205ddd3ded0..257afac34839 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
- INIT_LIST_HEAD(&connector->panel.fixed_modes);
+ intel_panel_init_alloc(connector);
return 0;
}
@@ -95,13 +95,10 @@ void intel_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- kfree(intel_connector->detect_edid);
+ drm_edid_free(intel_connector->detect_edid);
intel_hdcp_cleanup(intel_connector);
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
intel_panel_fini(intel_connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 797ad9489f7e..38e9c61c2344 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -44,6 +44,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_fdi_regs.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -260,7 +261,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
@@ -300,7 +301,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
}
static void hsw_enable_crt(struct intel_atomic_state *state,
@@ -678,59 +679,50 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
+intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
u32 vsample;
u32 vblank, vblank_start, vblank_end;
u32 dsl;
- i915_reg_t bclrpat_reg, vtotal_reg,
- vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
u8 st00;
enum drm_connector_status status;
drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
- bclrpat_reg = BCLRPAT(pipe);
- vtotal_reg = VTOTAL(pipe);
- vblank_reg = VBLANK(pipe);
- vsync_reg = VSYNC(pipe);
- pipeconf_reg = PIPECONF(pipe);
- pipe_dsl_reg = PIPEDSL(pipe);
+ save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder));
+ save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
- save_bclrpat = intel_uncore_read(uncore, bclrpat_reg);
- save_vtotal = intel_uncore_read(uncore, vtotal_reg);
- vblank = intel_uncore_read(uncore, vblank_reg);
+ vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1;
+ vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1;
- vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
- vactive = (save_vtotal & 0x7ff) + 1;
-
- vblank_start = (vblank & 0xfff) + 1;
- vblank_end = ((vblank >> 16) & 0xfff) + 1;
+ vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1;
+ vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1;
/* Set the border color to purple. */
- intel_uncore_write(uncore, bclrpat_reg, 0x500050);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050);
if (DISPLAY_VER(dev_priv) != 2) {
- u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
- intel_uncore_write(uncore,
- pipeconf_reg,
- pipeconf | PIPECONF_FORCE_BORDER);
- intel_uncore_posting_read(uncore, pipeconf_reg);
+ u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
+
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ transconf | TRANSCONF_FORCE_BORDER);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/* Wait for next Vblank to substitue
* border color for Color info */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
- st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
- intel_uncore_write(uncore, pipeconf_reg, pipeconf);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -740,14 +732,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv, vsync_reg);
- u32 vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1;
vblank_start = vsync_start;
- intel_uncore_write(uncore,
- vblank_reg,
- (vblank_start - 1) |
- ((vblank_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(vblank_start - 1) |
+ VBLANK_END(vblank_end - 1));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -759,10 +750,9 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/*
* Wait for the border to be displayed
*/
- while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive)
+ while (intel_de_read(dev_priv, PIPEDSL(pipe)) >= vactive)
;
- while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <=
- vsample)
+ while ((dsl = intel_de_read(dev_priv, PIPEDSL(pipe))) <= vsample)
;
/*
* Watch ST00 for an entire scanline
@@ -772,14 +762,14 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
do {
count++;
/* Read the ST00 VGA status register */
- st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
- } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl));
+ } while ((intel_de_read(dev_priv, PIPEDSL(pipe)) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
- intel_uncore_write(uncore, vblank_reg, vblank);
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -792,7 +782,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- intel_uncore_write(uncore, bclrpat_reg, save_bclrpat);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 037fc140b585..ed45a6934854 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -25,9 +25,11 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -210,7 +212,7 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
static int intel_crtc_late_register(struct drm_crtc *crtc)
{
- intel_crtc_debugfs_add(crtc);
+ intel_crtc_debugfs_add(to_intel_crtc(crtc));
return 0;
}
@@ -313,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
crtc->plane_ids_mask |= BIT(primary->id);
+ intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -682,6 +686,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
+ /*
+ * Seamless M/N update may need to update frame timings.
+ *
+ * FIXME Should be synchronized with the start of vblank somehow...
+ */
+ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+ intel_crtc_update_active_timings(new_crtc_state);
+
local_irq_enable();
if (intel_vgpu_active(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index e3273fe8ddac..54c8adc0702e 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
@@ -12,14 +14,16 @@
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
+ drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
@@ -56,6 +60,17 @@ intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
}
+static void
+intel_dump_buffer(struct drm_i915_private *i915,
+ const char *prefix, const u8 *buf, size_t len)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
+ 16, 0, buf, len, false);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -108,7 +123,7 @@ static const char * const output_format_str[] = {
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
};
-static const char *output_formats(enum intel_output_format format)
+const char *intel_output_format_name(enum intel_output_format format)
{
if (format >= ARRAY_SIZE(output_format_str))
return "invalid";
@@ -166,7 +181,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
"active: %s, output_types: %s (0x%x), output format: %s\n",
str_yes_no(pipe_config->hw.active),
buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
+ intel_output_format_name(pipe_config->output_format));
drm_dbg_kms(&i915->drm,
"cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
@@ -236,6 +251,10 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ if (pipe_config->has_audio)
+ intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
+
drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
index 9399c35b7e5e..780f3f1190d7 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
@@ -8,9 +8,11 @@
struct intel_crtc_state;
struct intel_atomic_state;
+enum intel_output_format;
void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state,
struct intel_atomic_state *state,
const char *context);
+const char *intel_output_format_name(enum intel_output_format format);
#endif /* __INTEL_CRTC_STATE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d190fa0d393b..31bef0427377 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,7 +21,6 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_watermark.h"
/* Cursor formats */
@@ -532,9 +531,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0f1ec2a98cc8..3a7b98837516 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -47,6 +47,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -64,9 +65,9 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -89,7 +90,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
{
int level;
- level = intel_bios_hdmi_level_shift(encoder);
+ level = intel_bios_hdmi_level_shift(encoder->devdata);
if (level < 0)
level = trans->hdmi_default_entry;
@@ -126,7 +127,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_dp_boost_level(encoder->devdata))
+ intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -158,7 +159,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -185,6 +186,8 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
enum port port)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ int timeout_us;
int ret;
/* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
@@ -193,8 +196,19 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
return;
}
+ if (IS_DG2(dev_priv)) {
+ timeout_us = 1200;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_tc(dev_priv, phy))
+ timeout_us = 3000;
+ else
+ timeout_us = 1000;
+ } else {
+ timeout_us = 500;
+ }
+
ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE), IS_DG2(dev_priv) ? 1200 : 500, 10, 10);
+ DDI_BUF_IS_IDLE), timeout_us, 10, 10);
if (ret)
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
@@ -631,19 +645,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
- u32 tmp;
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (enable)
- tmp |= hdcp_mask;
- else
- tmp &= ~hdcp_mask;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -935,8 +944,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
main_link_aux_power_domain_get(dig_port, crtc_state);
}
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -944,33 +953,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 13)
- val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
- val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
- else
- val = TRANS_CLK_SEL_PORT(encoder->port);
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
- }
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
+ else
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
- else
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
- }
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_DISABLED;
+ else
+ val = TRANS_CLK_SEL_DISABLED;
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -996,9 +1006,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
+ iboost = intel_bios_hdmi_boost_level(encoder->devdata);
else
- iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
+ iboost = intel_bios_dp_boost_level(encoder->devdata);
if (iboost == 0) {
const struct intel_ddi_buf_trans *trans;
@@ -2187,15 +2197,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val |= DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ 0, DP_TP_CTL_FEC_ENABLE);
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2203,15 +2211,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_FEC_ENABLE, 0);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
@@ -2374,7 +2380,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
if (HAS_DP20(dev_priv))
intel_ddi_config_transcoder_dp2(encoder, crtc_state);
@@ -2501,7 +2507,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2513,6 +2519,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ if (HAS_DP20(dev_priv))
+ intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
+ crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
@@ -2543,7 +2553,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -2609,12 +2619,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
- }
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE, 0);
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -2647,19 +2654,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- u32 val;
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_MODE_SELECT_MASK);
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
+ 0);
}
} else {
if (!is_mst)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
@@ -2670,7 +2672,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* transcoder"
*/
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
@@ -2696,12 +2698,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
old_crtc_state, old_conn_state);
if (DISPLAY_VER(dev_priv) < 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
@@ -2718,18 +2720,15 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
struct intel_crtc *slave_crtc;
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_transcoder(old_crtc_state);
-
intel_vrr_disable(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -2770,6 +2769,17 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
else
intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
+}
+
+static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
main_link_aux_power_domain_put(dig_port, old_crtc_state);
@@ -2933,6 +2943,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
}
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
+ intel_wait_ddi_buf_active(dev_priv, port);
+
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -2946,10 +2958,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!intel_crtc_is_bigjoiner_slave(crtc_state))
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- intel_vrr_enable(encoder, crtc_state);
+ /* Enable/Disable DP2.0 SDP split config before transcoder */
+ intel_audio_sdp_split_update(encoder, crtc_state);
intel_enable_transcoder(crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3045,37 +3060,23 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state,
intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
-static void
-intel_ddi_update_prepare(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- struct intel_crtc *crtc)
+void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc_state *crtc_state =
- crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
- int required_lanes = crtc_state ? crtc_state->lane_count : 1;
-
- drm_WARN_ON(state->base.dev, crtc && crtc->active);
-
- intel_tc_port_get_link(enc_to_dig_port(encoder),
- required_lanes);
- if (crtc_state && crtc_state->hw.active) {
- struct intel_crtc *slave_crtc;
-
- intel_update_active_dpll(state, crtc, encoder);
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *slave_crtc;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state))
- intel_update_active_dpll(state, slave_crtc, encoder);
- }
-}
+ if (!intel_phy_is_tc(i915, phy))
+ return;
-static void
-intel_ddi_update_complete(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- struct intel_crtc *crtc)
-{
- intel_tc_port_put_link(enc_to_dig_port(encoder));
+ intel_update_active_dpll(state, crtc, encoder);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state))
+ intel_update_active_dpll(state, slave_crtc, encoder);
}
static void
@@ -3089,8 +3090,13 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (is_tc_port)
+ if (is_tc_port) {
+ struct intel_crtc *master_crtc =
+ to_intel_crtc(crtc_state->uapi.crtc);
+
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+ intel_ddi_update_active_dpll(state, encoder, master_crtc);
+ }
main_link_aux_power_domain_get(dig_port, crtc_state);
@@ -3135,8 +3141,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
wait = true;
}
- dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
@@ -3204,12 +3209,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- u32 val;
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -3478,6 +3480,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
intel_psr_get_config(encoder, pipe_config);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
void intel_ddi_get_clock(struct intel_encoder *encoder,
@@ -3539,6 +3543,37 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+{
+ return pll->info->id == DPLL_ID_ICL_TBTPLL;
+}
+
+static enum icl_port_dpll_id
+icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return ICL_PORT_DPLL_DEFAULT;
+
+ if (icl_ddi_tc_pll_is_tbt(pll))
+ return ICL_PORT_DPLL_DEFAULT;
+ else
+ return ICL_PORT_DPLL_MG_PHY;
+}
+
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!encoder->port_pll_type)
+ return ICL_PORT_DPLL_DEFAULT;
+
+ return encoder->port_pll_type(encoder, crtc_state);
+}
+
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
@@ -3551,7 +3586,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- if (pll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(pll))
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
else
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
@@ -3564,7 +3599,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
else
crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
@@ -3606,7 +3641,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_tc(i915, phy))
- intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
+ intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
+ crtc_state);
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
@@ -3806,7 +3842,7 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
if (intel_phy_is_tc(i915, phy))
- intel_tc_port_flush_work(dig_port);
+ intel_tc_port_cleanup(dig_port);
intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
@@ -3951,8 +3987,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
- ret);
+ drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4247,7 +4283,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
if (!intel_phy_is_tc(i915, phy))
return;
- intel_tc_port_flush_work(dig_port);
+ intel_tc_port_cleanup(dig_port);
}
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
@@ -4285,7 +4321,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_bios_encoder_supports_hdmi(devdata);
init_dp = intel_bios_encoder_supports_dp(devdata);
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(devdata)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
@@ -4305,7 +4341,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
if (intel_phy_is_snps(dev_priv, phy) &&
- dev_priv->snps_phy_failed_calibration & BIT(phy)) {
+ dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
drm_dbg_kms(&dev_priv->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
@@ -4361,6 +4397,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
encoder->disable = intel_disable_ddi;
+ encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
encoder->get_hw_state = intel_ddi_get_hw_state;
@@ -4400,6 +4437,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_combo_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4412,6 +4450,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_tc_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4480,56 +4519,50 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ if (intel_bios_encoder_lane_reversal(devdata))
dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(encoder);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
- intel_tc_port_init(dig_port, is_legacy);
-
- encoder->update_prepare = intel_ddi_update_prepare;
- encoder->update_complete = intel_ddi_update_complete;
- }
+ if (!is_legacy && init_hdmi) {
+ is_legacy = !init_dp;
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
+ port_name(port),
+ str_yes_no(init_dp),
+ is_legacy ? "legacy" : "non-legacy");
+ }
- if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
+ if (intel_tc_port_init(dig_port, is_legacy) < 0)
goto err;
-
- dig_port->hpd_pulse = intel_dp_hpd_pulse;
-
- if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
}
- /* In theory we don't need the encoder->type check, but leave it just in
- * case we have some really bad VBTs... */
- if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
- goto err;
- }
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
- IS_BROXTON(dev_priv))
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ dig_port->connected = bdw_digital_port_connected;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ dig_port->connected = lpt_digital_port_connected;
+ } else if (IS_BROADWELL(dev_priv)) {
+ if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else {
+ } else if (IS_HASWELL(dev_priv)) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -4538,6 +4571,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(dig_port))
+ goto err;
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ }
+
+ /*
+ * In theory we don't need the encoder->type check,
+ * but leave it just in case we have some really bad VBTs...
+ */
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(dig_port))
+ goto err;
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index d39076facdce..2bc034042a93 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -40,6 +40,9 @@ void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
@@ -52,9 +55,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -69,5 +72,8 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane);
+void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 9c104f65e4c8..42552d8c151e 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -16,6 +16,19 @@ intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
return intel_uncore_read(&i915->uncore, reg);
}
+static inline u8
+intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read8(&i915->uncore, reg);
+}
+
+static inline u64
+intel_de_read64_2x32(struct drm_i915_private *i915,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+}
+
static inline void
intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
{
@@ -28,10 +41,10 @@ intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
intel_uncore_write(&i915->uncore, reg, val);
}
-static inline void
+static inline u32
intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
{
- intel_uncore_rmw(&i915->uncore, reg, clear, set);
+ return intel_uncore_rmw(&i915->uncore, reg, clear, set);
}
static inline int
@@ -42,6 +55,23 @@ intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
}
static inline int
+intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
+{
+ return __intel_wait_for_register(&i915->uncore, reg, mask, value,
+ fast_timeout_us, slow_timeout_ms, out_value);
+}
+
+static inline int
intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
u32 mask, unsigned int timeout)
{
@@ -81,4 +111,16 @@ intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
intel_uncore_write_fw(&i915->uncore, reg, val);
}
+static inline u32
+intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read_notrace(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_notrace(&i915->uncore, reg, val);
+}
+
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 6c2686ecb62a..3c29792137a5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -24,15 +24,15 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <acpi/video.h>
+#include <linux/dma-resv.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
+#include <acpi/video.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
@@ -45,65 +45,60 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include "display/intel_audio.h"
-#include "display/intel_crt.h"
-#include "display/intel_ddi.h"
-#include "display/intel_display_debugfs.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dp.h"
-#include "display/intel_dp_mst.h"
-#include "display/intel_dpll.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_drrs.h"
-#include "display/intel_dsi.h"
-#include "display/intel_dvo.h"
-#include "display/intel_fb.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_lvds.h"
-#include "display/intel_sdvo.h"
-#include "display/intel_snps_phy.h"
-#include "display/intel_tv.h"
-#include "display/intel_vdsc.h"
-#include "display/intel_vrr.h"
-
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/gen8_ppgtt.h"
-
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
+#include "i9xx_plane.h"
+#include "i9xx_wm.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_clock_gating.h"
#include "intel_color.h"
+#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dp.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
-#include "intel_dsb.h"
+#include "intel_drrs.h"
+#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
-#include "intel_modeset_verify.h"
+#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
+#include "intel_modeset_verify.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
@@ -111,14 +106,19 @@
#include "intel_pcode.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
-#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sprite.h"
+#include "intel_sdvo.h"
+#include "intel_snps_phy.h"
#include "intel_tc.h"
+#include "intel_tv.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vga.h"
-#include "i9xx_plane.h"
+#include "intel_vrr.h"
+#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -130,104 +130,9 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @dev_priv: i915 device
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->display.funcs.wm->update_wm)
- dev_priv->display.funcs.wm->update_wm(dev_priv);
-}
-
-static int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_pipe_wm)
- return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
- return 0;
-}
-
-static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
- return 0;
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.funcs.wm->compute_pipe_wm))
- return 0;
- return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
-}
-
-static bool intel_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->initial_watermarks) {
- dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
- return true;
- }
- return false;
-}
-
-static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->atomic_update_watermarks)
- dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
-}
-
-static void intel_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->optimize_watermarks)
- dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
-}
-
-static int intel_compute_global_watermarks(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_global_watermarks)
- return dev_priv->display.funcs.wm->compute_global_watermarks(state);
- return 0;
-}
-
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
@@ -296,11 +201,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
}
/* Wa_2006604312:icl,ehl */
@@ -309,11 +214,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -388,41 +291,6 @@ struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
return to_intel_crtc(crtc_state->uapi.crtc);
}
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t reg = PIPEDSL(pipe);
- u32 line1, line2;
-
- line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
- msleep(5);
- line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
-
- return line1 != line2;
-}
-
-static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- drm_err(&dev_priv->drm,
- "pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), str_on_off(state));
-}
-
-static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, false);
-}
-
-static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, true);
-}
-
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
@@ -433,8 +301,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
- PIPECONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -455,8 +323,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
- cur_state = !!(val & PIPECONF_ENABLE);
+ u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
+ cur_state = !!(val & TRANSCONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
@@ -568,15 +436,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if (val & PIPECONF_ENABLE) {
+ if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, reg);
/*
@@ -607,9 +475,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
return;
/*
@@ -617,11 +485,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
* so best keep it disabled when not needed.
*/
if (old_crtc_state->double_wide)
- val &= ~PIPECONF_DOUBLE_WIDE;
+ val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!IS_I830(dev_priv))
- val &= ~PIPECONF_ENABLE;
+ val &= ~TRANSCONF_ENABLE;
if (DISPLAY_VER(dev_priv) >= 14)
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
@@ -631,7 +499,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
intel_de_write(dev_priv, reg, val);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -982,7 +850,7 @@ void intel_display_finish_reset(struct drm_i915_private *i915)
*/
intel_pps_unlock_regs_wa(i915);
intel_modeset_init_hw(i915);
- intel_init_clock_gating(i915);
+ intel_clock_gating_init(i915);
intel_hpd_init(i915);
ret = __intel_display_resume(i915, state, ctx);
@@ -1091,29 +959,13 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
- drm_WARN(encoder->base.dev, num_encoders != 1,
+ drm_WARN(state->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
num_encoders, pipe_name(master_crtc->pipe));
return encoder;
}
-static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t dslreg = PIPEDSL(pipe);
- u32 temp;
-
- temp = intel_de_read(dev_priv, dslreg);
- udelay(500);
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
- drm_err(&dev_priv->drm,
- "mode set failed: pipe %c stuck\n",
- pipe_name(pipe));
- }
-}
-
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1246,7 +1098,6 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
intel_update_watermarks(dev_priv);
- hsw_ips_post_update(state, crtc);
intel_fbc_post_update(state, crtc);
if (needs_async_flip_vtd_wa(old_crtc_state) &&
@@ -1264,6 +1115,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
+
+ if (intel_crtc_needs_color_update(new_crtc_state))
+ intel_color_post_update(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1307,7 +1161,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- u8 update_planes = new_crtc_state->update_planes;
+ u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
+ ~new_crtc_state->async_flip_planes;
const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
bool need_vbl_wait = false;
@@ -1316,7 +1171,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (plane->need_async_flip_disable_wa &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id)) {
+ disable_async_flip_planes & BIT(plane->id)) {
/*
* Apart from the async flip bit we want to
* preserve the old state for the plane.
@@ -1433,7 +1288,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
intel_crtc_async_flip_disable_wa(state, crtc);
}
@@ -1465,36 +1320,11 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, fb_bits);
}
-/*
- * intel_connector_primary_encoder - get the primary encoder for a connector
- * @connector: connector for which to return the encoder
- *
- * Returns the primary encoder for a connector. There is a 1:1 mapping from
- * all connectors to their encoder, except for DP-MST connectors which have
- * both a virtual and a primary encoder. These DP-MST primary encoders can be
- * pointed to by as many DP-MST connectors as there are pipes.
- */
-static struct intel_encoder *
-intel_connector_primary_encoder(struct intel_connector *connector)
-{
- struct intel_encoder *encoder;
-
- if (connector->mst_port)
- return &dp_to_dig_port(connector->mst_port)->base;
-
- encoder = intel_attached_encoder(connector);
- drm_WARN_ON(connector->base.dev, !encoder);
-
- return encoder;
-}
-
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- struct drm_connector_state *new_conn_state;
- struct drm_connector *connector;
int i;
/*
@@ -1510,57 +1340,6 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
}
}
-
- if (!state->modeset)
- return;
-
- for_each_new_connector_in_state(&state->base, connector, new_conn_state,
- i) {
- struct intel_connector *intel_connector;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
-
- if (!intel_connector_needs_modeset(state, connector))
- continue;
-
- intel_connector = to_intel_connector(connector);
- encoder = intel_connector_primary_encoder(intel_connector);
- if (!encoder->update_prepare)
- continue;
-
- crtc = new_conn_state->crtc ?
- to_intel_crtc(new_conn_state->crtc) : NULL;
- encoder->update_prepare(state, encoder, crtc);
- }
-}
-
-static void intel_encoders_update_complete(struct intel_atomic_state *state)
-{
- struct drm_connector_state *new_conn_state;
- struct drm_connector *connector;
- int i;
-
- if (!state->modeset)
- return;
-
- for_each_new_connector_in_state(&state->base, connector, new_conn_state,
- i) {
- struct intel_connector *intel_connector;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
-
- if (!intel_connector_needs_modeset(state, connector))
- continue;
-
- intel_connector = to_intel_connector(connector);
- encoder = intel_connector_primary_encoder(intel_connector);
- if (!encoder->update_complete)
- continue;
-
- crtc = new_conn_state->crtc ?
- to_intel_crtc(new_conn_state->crtc) : NULL;
- encoder->update_complete(state, encoder, crtc);
- }
}
static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
@@ -1809,7 +1588,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev_priv, pipe);
+ intel_wait_for_pipe_scanline_moving(crtc);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -1856,12 +1635,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
enum transcoder transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
CHICKEN_TRANS(transcoder);
- u32 val;
- val = intel_de_read(dev_priv, reg);
- val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg,
+ HSW_FRAME_START_DELAY_MASK,
+ HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
@@ -1901,7 +1678,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
intel_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1922,6 +1699,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
+ intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+
if (!new_crtc_state->bigjoiner_pipes) {
intel_encoders_pre_pll_enable(state, crtc);
@@ -1940,7 +1719,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
!transcoder_is_dsi(cpu_transcoder))
@@ -2050,6 +1829,8 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -2057,6 +1838,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* FIXME collapse everything to one hook.
@@ -2066,6 +1848,12 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
}
+
+ intel_disable_shared_dpll(old_crtc_state);
+
+ intel_encoders_post_pll_disable(state, crtc);
+
+ intel_dmc_disable_pipe(i915, crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -2283,6 +2071,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
+ intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
@@ -2869,12 +2659,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
+ crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -2891,23 +2683,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
vsyncshift += adjusted_mode->crtc_htotal;
}
+ /*
+ * VBLANK_START no longer works on ADL+, instead we must use
+ * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
+
if (DISPLAY_VER(dev_priv) > 3)
- intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
- vsyncshift);
-
- intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- intel_de_write(dev_priv, HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- intel_de_write(dev_priv, HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
- intel_de_write(dev_priv, VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
- intel_de_write(dev_priv, VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
+ HTOTAL(adjusted_mode->crtc_htotal - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
+ HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
+ HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(crtc_vblank_start - 1) |
+ VBLANK_END(crtc_vblank_end - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
+ VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -2915,9 +2728,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, VTOTAL(pipe),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
-
+ intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2945,9 +2758,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
@@ -2956,43 +2769,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
+ adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
+ adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
+
+ tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
+ /* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
+ adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ adjusted_mode->crtc_vtotal += 1;
+ adjusted_mode->crtc_vblank_end += 1;
}
+
+ if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
}
static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3032,7 +2849,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf = 0;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
/*
* - We keep both pipes enabled on 830
@@ -3040,18 +2858,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
- pipeconf |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
+ val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
- pipeconf |= PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN |
+ TRANSCONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
default:
@@ -3059,13 +2877,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- pipeconf |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- pipeconf |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- pipeconf |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
}
}
@@ -3073,23 +2891,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (DISPLAY_VER(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
- pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
} else {
- pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
+ val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
- pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
- pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
- intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
@@ -3190,20 +3008,20 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static enum intel_output_format
-bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
+bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- if (tmp & PIPEMISC_YUV420_ENABLE) {
+ if (tmp & PIPE_MISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
- } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
return INTEL_OUTPUT_FORMAT_YCBCR444;
} else {
return INTEL_OUTPUT_FORMAT_RGB;
@@ -3248,20 +3066,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
default:
@@ -3271,12 +3089,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ (tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
@@ -3286,7 +3104,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
if (DISPLAY_VER(dev_priv) < 4)
- pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+ pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -3296,7 +3114,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
else
tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -3356,7 +3174,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
/*
@@ -3364,7 +3182,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
switch (crtc_state->pipe_bpp) {
default:
@@ -3372,26 +3190,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- val |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- val |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
case 36:
- val |= PIPECONF_BPC_12;
+ val |= TRANSCONF_BPC_12;
break;
}
if (crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
/*
* This would end up with an odd purple hue over
@@ -3402,18 +3220,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range &&
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
@@ -3428,25 +3246,25 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (IS_HASWELL(dev_priv) && crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
if (IS_HASWELL(dev_priv) &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
- intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -3454,18 +3272,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_BPC_6;
+ val |= PIPE_MISC_BPC_6;
break;
case 24:
- val |= PIPEMISC_BPC_8;
+ val |= PIPE_MISC_BPC_8;
break;
case 30:
- val |= PIPEMISC_BPC_10;
+ val |= PIPE_MISC_BPC_10;
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
if (DISPLAY_VER(dev_priv) > 12)
- val |= PIPEMISC_BPC_12_ADLP;
+ val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -3473,38 +3291,38 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->dither)
- val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+ val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPEMISC_YUV420_ENABLE |
- PIPEMISC_YUV420_MODE_FULL_BLEND;
+ val |= PIPE_MISC_YUV420_ENABLE |
+ PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
- val |= PIPEMISC_HDR_MODE_PRECISION;
+ val |= PIPE_MISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
- val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+ val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
- intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
+ intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
}
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- switch (tmp & PIPEMISC_BPC_MASK) {
- case PIPEMISC_BPC_6:
+ switch (tmp & PIPE_MISC_BPC_MASK) {
+ case PIPE_MISC_BPC_6:
return 18;
- case PIPEMISC_BPC_8:
+ case PIPE_MISC_BPC_8:
return 24;
- case PIPEMISC_BPC_10:
+ case PIPE_MISC_BPC_10:
return 30;
/*
* PORT OUTPUT 12 BPC defined for ADLP+.
@@ -3516,7 +3334,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
* on older platforms, need to find a workaround for 12 BPC
* MIPI DSI HW readout.
*/
- case PIPEMISC_BPC_12_ADLP:
+ case PIPE_MISC_BPC_12_ADLP:
if (DISPLAY_VER(dev_priv) > 12)
return 36;
fallthrough;
@@ -3668,33 +3486,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->shared_dpll = NULL;
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
- case PIPECONF_BPC_12:
+ case TRANSCONF_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
- if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
- switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
- case PIPECONF_OUTPUT_COLORSPACE_YUV601:
- case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
break;
default:
@@ -3702,11 +3520,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
break;
}
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+ pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
@@ -3983,9 +3801,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
- return tmp & PIPECONF_ENABLE;
+ return tmp & TRANSCONF_ENABLE;
}
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
@@ -4089,15 +3907,15 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_HASWELL(dev_priv)) {
u32 tmp = intel_de_read(dev_priv,
- PIPECONF(pipe_config->cpu_transcoder));
+ TRANSCONF(pipe_config->cpu_transcoder));
- if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
else
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
pipe_config->output_format =
- bdw_get_pipemisc_output_format(crtc);
+ bdw_get_pipe_misc_output_format(crtc);
}
pipe_config->gamma_mode = intel_de_read(dev_priv,
@@ -4140,7 +3958,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
- PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5195,6 +5013,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -5433,6 +5252,12 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
return memcmp(a, b, sizeof(*a)) == 0;
}
+static bool
+intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
+{
+ return memcmp(a, b, len) == 0;
+}
+
static void
pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -5483,6 +5308,50 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
}
}
+/* Returns the length up to and including the last differing byte */
+static size_t
+memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (a[i] != b[i])
+ return i + 1;
+ }
+
+ return 0;
+}
+
+static void
+pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const u8 *a, const u8 *b, size_t len)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ } else {
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
+ drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ }
+}
+
static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
@@ -5531,7 +5400,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
- u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
current_config->inherited && !pipe_config->inherited;
@@ -5682,21 +5550,26 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
- if (current_config->name1 != pipe_config->name1) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name1), \
- "(expected %i, found %i, won't compare lut values)", \
- current_config->name1, \
- pipe_config->name1); \
- ret = false;\
- } else { \
- if (!intel_color_lut_equal(current_config->name2, \
- pipe_config->name2, pipe_config->name1, \
- bit_precision)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name2), \
- "hw_state doesn't match sw_state"); \
- ret = false; \
- } \
+#define PIPE_CONF_CHECK_BUFFER(name, len) do { \
+ BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
+ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
+ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
+ pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
+ current_config->name, \
+ pipe_config->name, \
+ (len)); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
+ if (current_config->gamma_mode == pipe_config->gamma_mode && \
+ !intel_color_lut_equal(current_config, \
+ current_config->lut, pipe_config->lut, \
+ is_pre_csc_lut)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(lut), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
} \
} while (0)
@@ -5760,6 +5633,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5793,9 +5667,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(linetime);
PIPE_CONF_CHECK_I(ips_linetime);
- bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
- if (bp_gamma)
- PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, post_csc_lut, bp_gamma);
+ PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
+ PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
if (current_config->active_planes) {
PIPE_CONF_CHECK_BOOL(has_psr);
@@ -5950,78 +5823,22 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
if (ret)
return ret;
+ ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
crtc_state->update_planes |= crtc_state->active_planes;
+ crtc_state->async_flip_planes = 0;
+ crtc_state->do_async_flip = false;
}
return 0;
}
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode adjusted_mode;
-
- drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
-
- if (crtc_state->vrr.enable) {
- adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- }
-
- drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
-
- crtc->mode_flags = crtc_state->mode_flags;
-
- /*
- * The scanline counter increments at the leading edge of hsync.
- *
- * On most platforms it starts counting from vtotal-1 on the
- * first active line. That means the scanline counter value is
- * always one less than what we would expect. Ie. just after
- * start of vblank, which also occurs at start of hsync (on the
- * last active line), the scanline counter will read vblank_start-1.
- *
- * On gen2 the scanline counter starts counting from 1 instead
- * of vtotal-1, so we have to subtract one (or rather add vtotal-1
- * to keep the value positive), instead of adding one.
- *
- * On HSW+ the behaviour of the scanline counter depends on the output
- * type. For DP ports it behaves like most other platforms, but on HDMI
- * there's an extra 1 line difference. So we need to add two instead of
- * one to the value.
- *
- * On VLV/CHV DSI the scanline counter would appear to increment
- * approx. 1/3 of a scanline before start of vblank. Unfortunately
- * that means we can't tell whether we're in vblank or not while
- * we're on that particular line. We must still set scanline_offset
- * to 1 so that the vblank timestamps come out correct when we query
- * the scanline counter from within the vblank interrupt handler.
- * However if queried just before the start of vblank we'll get an
- * answer that's slightly in the future.
- */
- if (DISPLAY_VER(dev_priv) == 2) {
- int vtotal;
-
- vtotal = adjusted_mode.crtc_vtotal;
- if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- vtotal /= 2;
-
- crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- crtc->scanline_offset = 2;
- } else {
- crtc->scanline_offset = 1;
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6707,8 +6524,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
* @dev: drm device
* @_state: state to validate
*/
-static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *_state)
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
@@ -6941,7 +6758,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (intel_crtc_needs_color_update(crtc_state))
- intel_dsb_prepare(crtc_state);
+ intel_color_prepare_commit(crtc_state);
}
return 0;
@@ -7026,7 +6843,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
@@ -7085,6 +6902,12 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+ if (old_crtc_state->inherited ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ if (HAS_DPT(i915))
+ intel_dpt_configure(crtc);
+ }
+
if (!modeset) {
if (new_crtc_state->preload_luts &&
intel_crtc_needs_color_update(new_crtc_state))
@@ -7102,6 +6925,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
+ drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state);
@@ -7146,7 +6971,6 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
dev_priv->display.funcs.display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
if (!new_crtc_state->hw.active)
intel_initial_watermarks(state, crtc);
@@ -7392,24 +7216,18 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
&wait_reset);
}
-static void intel_cleanup_dsbs(struct intel_atomic_state *state)
-{
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i)
- intel_dsb_cleanup(old_crtc_state);
-}
-
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, base.commit_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
+ intel_color_cleanup_commit(old_crtc_state);
- intel_cleanup_dsbs(state);
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
@@ -7475,8 +7293,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
- if (state->modeset)
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ /*
+ * During full modesets we write a lot of registers, wait
+ * for PLLs, etc. Doing that while DC states are enabled
+ * is not a good idea.
+ *
+ * During fastsets and other updates we also need to
+ * disable DC states due to the following scenario:
+ * 1. DC5 exit and PSR exit happen
+ * 2. Some or all _noarm() registers are written
+ * 3. Due to some long delay PSR is re-entered
+ * 4. DC5 entry -> DMC saves the already written new
+ * _noarm() registers and the old not yet written
+ * _arm() registers
+ * 5. DC5 exit -> DMC restores a mixture of old and
+ * new register values and arms the update
+ * 6. PSR exit -> hardware latches a mixture of old and
+ * new register values -> corrupted frame, or worse
+ * 7. New _arm() registers are finally written
+ * 8. Hardware finally latches a complete set of new
+ * register values, and subsequent frames will be OK again
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@@ -7531,8 +7369,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.funcs.display->commit_modeset_enables(state);
- intel_encoders_update_complete(state);
-
if (state->modeset)
intel_set_cdclk_post_plane_update(state);
@@ -7587,6 +7423,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
+ hsw_ips_post_update(state, crtc);
+
/*
* Activate DRRS after state readout to avoid
* dp_m_n vs. dp_m2_n2 confusion on BDW+.
@@ -7597,6 +7436,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* DSB cleanup is done in cleanup_work aligning with framebuffer
* cleanup. So copy and reset the dsb structure to sync with
* commit_done and later do dsb cleanup in cleanup_work.
+ *
+ * FIXME get rid of this funny new->old swapping
*/
old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
}
@@ -7620,8 +7461,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@@ -7747,7 +7588,7 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&state->commit_ready);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_dsb_cleanup(new_crtc_state);
+ intel_color_cleanup_commit(new_crtc_state);
drm_atomic_helper_cleanup_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
@@ -8369,124 +8210,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
-static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(state->dev, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc_state->hw.active) {
- /*
- * Preserve the inherited flag to avoid
- * taking the full modeset path.
- */
- crtc_state->inherited = true;
- }
- }
-
- drm_for_each_plane(plane, state->dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-/*
- * Calculate what we think the watermarks should be for the state we've read
- * out of the hardware and then immediately program those watermarks so that
- * we ensure the hardware settings match our internal state.
- *
- * We can calculate what we think WM's should be by creating a duplicate of the
- * current state (which was constructed during hardware readout) and running it
- * through the atomic check code to calculate new watermark values in the
- * state object.
- */
-static void sanitize_watermarks(struct drm_i915_private *dev_priv)
-{
- struct drm_atomic_state *state;
- struct intel_atomic_state *intel_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
-
- /* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
- return;
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return;
-
- intel_state = to_intel_atomic_state(state);
-
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- state->acquire_ctx = &ctx;
-
- /*
- * Hardware readout is the only time we don't want to calculate
- * intermediate watermarks (since we don't trust the current
- * watermarks).
- */
- if (!HAS_GMCH(dev_priv))
- intel_state->skip_intermediate_wm = true;
-
- ret = sanitize_watermarks_add_affected(state);
- if (ret)
- goto fail;
-
- ret = intel_atomic_check(&dev_priv->drm, state);
- if (ret)
- goto fail;
-
- /* Write calculated watermark values back */
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- crtc_state->wm.need_postvbl_update = true;
- intel_optimize_watermarks(intel_state, crtc);
-
- to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
- }
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- drm_WARN(&dev_priv->drm, ret,
- "Could not determine valid watermarks for inherited state\n");
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -8647,12 +8370,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
+ ret = intel_power_domains_init(i915);
+ if (ret < 0)
+ goto cleanup_vga;
+
intel_power_domains_init_hw(i915, false);
if (!HAS_DISPLAY(i915))
return 0;
- intel_dmc_ucode_init(i915);
+ intel_dmc_init(i915);
i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -8687,8 +8414,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
cleanup_vga_client_pw_domain_dmc:
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
+cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
intel_bios_driver_remove(i915);
@@ -8707,7 +8435,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
- intel_init_pm(i915);
+ intel_wm_init(i915);
intel_panel_sanitize_ssc(i915);
@@ -8763,7 +8491,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(i915);
+ ilk_wm_sanitize(i915);
return 0;
}
@@ -8804,6 +8532,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -8830,13 +8559,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(640 - 1) | HTOTAL(800 - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(480 - 1) | VTOTAL(525 - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
intel_de_write(dev_priv, FP0(pipe), fp);
intel_de_write(dev_priv, FP1(pipe), fp);
@@ -8867,8 +8603,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -8891,8 +8627,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
- intel_de_write(dev_priv, PIPECONF(pipe), 0);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), 0);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -9013,7 +8749,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
@@ -9048,14 +8784,14 @@ void intel_display_driver_register(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_display_debugfs_register(i915);
-
/* Must be done after probing outputs */
intel_opregion_register(i915);
intel_acpi_video_register(i915);
intel_audio_init(i915);
+ intel_display_debugfs_register(i915);
+
/*
* Some ports require correctly set-up hpd registers for
* detection to work properly (leading to ghost connected
@@ -9064,7 +8800,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* enabled. We do it last so that the async config cannot run
* before the connectors are registered.
*/
- intel_fbdev_initial_config_async(&i915->drm);
+ intel_fbdev_initial_config_async(i915);
/*
* We need to coordinate the hotplugs with the asynchronous
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 714030136b7f..287159bdeb0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,9 +28,11 @@
#include <drm/drm_util.h>
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
enum drm_scaling_filter;
struct dpll;
+struct drm_atomic_state;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
@@ -62,51 +64,9 @@ struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
-/*
- * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
- * rest have consecutive values and match the enum values of transcoders
- * with a 1:1 transcoder -> pipe mapping.
- */
-enum pipe {
- INVALID_PIPE = -1,
-
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- PIPE_D,
- _PIPE_EDP,
-
- I915_MAX_PIPES = _PIPE_EDP
-};
#define pipe_name(p) ((p) + 'A')
-enum transcoder {
- INVALID_TRANSCODER = -1,
- /*
- * The following transcoders have a 1:1 transcoder -> pipe mapping,
- * keep their values fixed: the code assumes that TRANSCODER_A=0, the
- * rest have consecutive values and match the enum values of the pipes
- * they map to.
- */
- TRANSCODER_A = PIPE_A,
- TRANSCODER_B = PIPE_B,
- TRANSCODER_C = PIPE_C,
- TRANSCODER_D = PIPE_D,
-
- /*
- * The following transcoders can map to any pipe, their enum value
- * doesn't need to stay fixed.
- */
- TRANSCODER_EDP,
- TRANSCODER_DSI_0,
- TRANSCODER_DSI_1,
- TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
- TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
-
- I915_MAX_TRANSCODERS
-};
-
static inline const char *transcoder_name(enum transcoder transcoder)
{
switch (transcoder) {
@@ -147,29 +107,6 @@ enum i9xx_plane_id {
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_SPRITE3,
- PLANE_SPRITE4,
- PLANE_SPRITE5,
- PLANE_CURSOR,
-
- I915_MAX_PLANES,
-};
-
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
@@ -182,34 +119,6 @@ enum plane_id {
for_each_dbuf_slice((__dev_priv), (__slice)) \
for_each_if((__mask) & BIT(__slice))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
- PORT_G,
- PORT_H,
- PORT_I,
-
- /* tgl+ */
- PORT_TC1 = PORT_D,
- PORT_TC2,
- PORT_TC3,
- PORT_TC4,
- PORT_TC5,
- PORT_TC6,
-
- /* XE_LPD repositions D/E offsets and bitfields */
- PORT_D_XELPD = PORT_TC5,
- PORT_E_XELPD,
-
- I915_MAX_PORTS
-};
-
#define port_name(p) ((p) + 'A')
/*
@@ -255,14 +164,9 @@ enum tc_port {
I915_MAX_TC_PORTS
};
-enum tc_port_mode {
- TC_PORT_DISCONNECTED,
- TC_PORT_TBT_ALT,
- TC_PORT_DP_ALT,
- TC_PORT_LEGACY,
-};
-
enum aux_ch {
+ AUX_CH_NONE = -1,
+
AUX_CH_A,
AUX_CH_B,
AUX_CH_C,
@@ -312,27 +216,6 @@ enum phy_fia {
FIA3,
};
-enum hpd_pin {
- HPD_NONE = 0,
- HPD_TV = HPD_NONE, /* TV is known to be unreliable */
- HPD_CRT,
- HPD_SDVO_B,
- HPD_SDVO_C,
- HPD_PORT_A,
- HPD_PORT_B,
- HPD_PORT_C,
- HPD_PORT_D,
- HPD_PORT_E,
- HPD_PORT_TC1,
- HPD_PORT_TC2,
- HPD_PORT_TC3,
- HPD_PORT_TC4,
- HPD_PORT_TC5,
- HPD_PORT_TC6,
-
- HPD_NUM_PINS
-};
-
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
@@ -440,6 +323,14 @@ enum hpd_pin {
(__i)++) \
for_each_if(plane)
+#define for_each_old_intel_crtc_in_state(__state, crtc, old_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -499,6 +390,7 @@ enum hpd_pin {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
@@ -523,7 +415,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
@@ -612,7 +503,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 57ddce3ba02b..e36f88a39b86 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -17,15 +17,14 @@
#include <drm/drm_modeset_lock.h>
#include "intel_cdclk.h"
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
-#include "intel_dmc.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct drm_property;
@@ -40,6 +39,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dmc;
struct intel_dpll_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
@@ -85,6 +85,12 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
+ void (*get_hw_state)(struct drm_i915_private *i915);
+};
+
+struct intel_audio_state {
+ struct intel_encoder *encoder;
+ u8 eld[MAX_ELD_BYTES];
};
struct intel_audio {
@@ -96,8 +102,8 @@ struct intel_audio {
int power_refcount;
u32 freq_cntrl;
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
+ /* current audio state for the audio component hooks */
+ struct intel_audio_state state[I915_MAX_TRANSCODERS];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
@@ -122,6 +128,11 @@ struct intel_dpll {
int nssc;
int ssc;
} ref_clks;
+
+ /*
+ * Bitmask of PLLs using the PCH SSC, indexed using enum intel_dpll_id.
+ */
+ u8 pch_ssc_use;
};
struct intel_frontbuffer_tracking {
@@ -172,6 +183,17 @@ struct intel_hotplug {
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
+
+ /*
+ * Flag to track if long HPDs need not to be processed
+ *
+ * Some panels generate long HPDs while keep connected to the port.
+ * This can cause issues with CI tests results. In CI systems we
+ * don't expect to disconnect the panels and could ignore the long
+ * HPDs generated from the faulty panels. This flag can be used as
+ * cue to ignore the long HPDs and can be set / unset using debugfs.
+ */
+ bool ignore_long_hpd;
};
struct intel_vbt_data {
@@ -233,7 +255,7 @@ struct intel_wm {
struct g4x_wm_values g4x;
};
- u8 max_level;
+ u8 num_levels;
/*
* Should be held around atomic WM register writing; also
@@ -330,6 +352,11 @@ struct intel_display {
} dkl;
struct {
+ struct intel_dmc *dmc;
+ intel_wakeref_t wakeref;
+ } dmc;
+
+ struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base;
} dsi;
@@ -368,9 +395,15 @@ struct intel_display {
} gmbus;
struct {
- struct i915_hdcp_comp_master *master;
+ struct i915_hdcp_master *master;
bool comp_added;
+ /*
+ * HDCP message struct for allocation of memory which can be
+ * reused when sending message to gsc cs.
+ * this is only populated post Meteorlake
+ */
+ struct intel_hdcp_gsc_message *hdcp_message;
/* Mutex to protect the above hdcp component related values. */
struct mutex comp_mutex;
} hdcp;
@@ -386,6 +419,10 @@ struct intel_display {
} hti;
struct {
+ bool false_color;
+ } ips;
+
+ struct {
struct i915_power_domains domains;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -429,6 +466,24 @@ struct intel_display {
} sagv;
struct {
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 phy_failed_calibration;
+ } snps;
+
+ struct {
+ /*
+ * Shadows for CHV DPLL_MD regs to keep the state
+ * checker somewhat working in the presence hardware
+ * crappiness (can't read out DPLL_MD for pipes B & C).
+ */
+ u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
+ } state;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
@@ -438,7 +493,6 @@ struct intel_display {
/* Grouping using named structs. Keep sorted. */
struct intel_audio audio;
- struct intel_dmc dmc;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 7bcd90384a46..45113ae107ba 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -8,10 +8,12 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "hsw_ips.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_crtc_state_dump.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
@@ -26,10 +28,10 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
-#include "intel_pm.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "intel_sprite.h"
-#include "skl_watermark.h"
+#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -49,33 +51,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(dev_priv->params.enable_ips));
-
- if (DISPLAY_VER(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -169,269 +144,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
- int ret;
-
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const char *status = "unknown";
- u32 val, status_val;
-
- if (intel_dp->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(intel_dp->psr.transcoder));
- status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(intel_dp->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
- if (psr->sink_support)
- seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- str_yes_no(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- str_enabled_disabled(enabled), val);
- psr_source_status(intel_dp, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
-
- seq_printf(m, "PSR2 selective fetch: %s\n",
- str_enabled_disabled(psr->psr2_sel_fetch_enabled));
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_dp *intel_dp = NULL;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- /* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- intel_dp = enc_to_intel_dp(encoder);
- break;
- }
-
- if (!intel_dp)
- return -ENODEV;
-
- return intel_psr_status(m, intel_dp);
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
- int ret = -ENODEV;
-
- if (!HAS_PSR(dev_priv))
- return ret;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- // TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- }
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- // TODO: split to each transcoder's PSR debug state
- *val = READ_ONCE(intel_dp->psr.debug);
- return 0;
- }
-
- return -ENODEV;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -832,10 +544,10 @@ static const struct file_operations crtc_updates_fops = {
.write = crtc_updates_write
};
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
- debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
- to_intel_crtc(crtc), &crtc_updates_fops);
+ debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry,
+ crtc, &crtc_updates_fops);
}
#else
@@ -845,7 +557,7 @@ static void crtc_updates_info(struct seq_file *m,
{
}
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
}
#endif
@@ -1282,237 +994,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(&dev_priv->drm);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(&dev_priv->drm);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1574,12 +1055,10 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
@@ -1593,13 +1072,9 @@ static const struct {
const struct file_operations *fops;
} intel_display_debugfs_files[] = {
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
void intel_display_debugfs_register(struct drm_i915_private *i915)
@@ -1622,7 +1097,8 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
- skl_watermark_ipc_debugfs_register(i915);
+ intel_psr_debugfs_register(i915);
+ intel_wm_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1674,16 +1150,6 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-static int i915_psr_status_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
-
- return intel_psr_status(m, intel_dp);
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -1770,6 +1236,13 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
str_yes_no(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "DSC_Output_Format_Sink_Support: RGB: %s YCBCR420: %s YCBCR444: %s\n",
+ str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd,
+ DP_DSC_RGB)),
+ str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd,
+ DP_DSC_YCbCr420_Native)),
+ str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd,
+ DP_DSC_YCbCr444)));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1895,13 +1368,80 @@ static const struct file_operations i915_dsc_bpc_fops = {
.write = i915_dsc_bpc_write
};
+static int i915_dsc_output_format_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Output_Format: %s\n",
+ intel_output_format_name(crtc_state->output_format));
+
+out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_output_format_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int dsc_output_format = 0;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, &dsc_output_format);
+ if (ret < 0)
+ return ret;
+
+ intel_dp->force_dsc_output_format = dsc_output_format;
+ *offp += len;
+
+ return len;
+}
+
+static int i915_dsc_output_format_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_output_format_show, inode->i_private);
+}
+
+static const struct file_operations i915_dsc_output_format_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_output_format_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_output_format_write
+};
+
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
*/
static int i915_current_bpc_show(struct seq_file *m, void *data)
{
- struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc *crtc = m->private;
struct intel_crtc_state *crtc_state;
int ret;
@@ -1918,9 +1458,20 @@ static int i915_current_bpc_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+/* Pipe may differ from crtc index if pipes are fused off */
+static int intel_crtc_pipe_show(struct seq_file *m, void *unused)
+{
+ struct intel_crtc *crtc = m->private;
+
+ seq_printf(m, "%c\n", pipe_name(crtc->pipe));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
- * @connector: pointer to a registered drm_connector
+ * @intel_connector: pointer to a registered drm_connector
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
@@ -1936,19 +1487,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
return;
intel_drrs_connector_debugfs_add(intel_connector);
+ intel_psr_connector_debugfs_add(intel_connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (HAS_PSR(dev_priv) &&
- connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_psr_status", 0444, root,
- connector, &i915_psr_status_fops);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
@@ -1966,6 +1509,9 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_bpc", 0644, root,
connector, &i915_dsc_bpc_fops);
+
+ debugfs_create_file("i915_dsc_output_format", 0644, root,
+ connector, &i915_dsc_output_format_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
@@ -1983,15 +1529,20 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
*
* Failure to add debugfs entries should generally be ignored.
*/
-void intel_crtc_debugfs_add(struct drm_crtc *crtc)
+void intel_crtc_debugfs_add(struct intel_crtc *crtc)
{
- if (!crtc->debugfs_entry)
+ struct dentry *root = crtc->base.debugfs_entry;
+
+ if (!root)
return;
crtc_updates_add(crtc);
- intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc));
- intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+ intel_drrs_crtc_debugfs_add(crtc);
+ intel_fbc_crtc_debugfs_add(crtc);
+ hsw_ips_crtc_debugfs_add(crtc);
- debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ debugfs_create_file("i915_current_bpc", 0444, root, crtc,
&i915_current_bpc_fops);
+ debugfs_create_file("i915_pipe", 0444, root, crtc,
+ &intel_crtc_pipe_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index d3a79c07c384..e1f479b7acd1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_crtc;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
void intel_connector_debugfs_add(struct intel_connector *connector);
-void intel_crtc_debugfs_add(struct drm_crtc *crtc);
+void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
-static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
+static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
new file mode 100644
index 000000000000..5126d0b5ae5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_LIMITS_H__
+#define __INTEL_DISPLAY_LIMITS_H__
+
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ PIPE_D,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+enum transcoder {
+ INVALID_TRANSCODER = -1,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
+
+ I915_MAX_TRANSCODERS
+};
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ /* tgl+ */
+ PORT_TC1 = PORT_D,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ PORT_D_XELPD = PORT_TC5,
+ PORT_E_XELPD,
+
+ I915_MAX_PORTS
+};
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_A,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_PORT_E,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
+
+ HPD_NUM_PINS
+};
+
+#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 3adba64937de..7c9f4288329e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -19,8 +19,10 @@
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
+#include "intel_pps_regs.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
+#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
@@ -264,9 +266,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+sanitize_target_dc_state(struct drm_i915_private *i915,
u32 target_dc_state)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -279,7 +282,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
+ if (power_domains->allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -312,7 +315,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->display.dmc.target_dc_state)
+ if (state == power_domains->target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -323,7 +326,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->display.dmc.target_dc_state = state;
+ power_domains->target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -696,7 +699,7 @@ out_verify:
}
/**
- * intel_display_power_put_async - release a power domain reference asynchronously
+ * __intel_display_power_put_async - release a power domain reference asynchronously
* @i915: i915 device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
@@ -992,10 +995,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->display.dmc.allowed_dc_mask =
+ power_domains->allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->display.dmc.target_dc_state =
+ power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1181,8 +1184,10 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
"CPU PWM2 enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Utility pin enabled\n");
+ I915_STATE_WARN((intel_de_read(dev_priv, UTIL_PIN_CTL) &
+ (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
+ (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
+ "Utility pin enabled in PWM mode\n");
I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
"PCH GTC enabled\n");
@@ -1260,9 +1265,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -1306,9 +1309,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
@@ -1347,15 +1348,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
@@ -1363,25 +1360,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_LP_PARTITION_LEVEL_DISABLE);
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
i915_reg_t reg;
- u32 reset_bits, val;
+ u32 reset_bits;
if (IS_IVYBRIDGE(dev_priv)) {
reg = GEN7_MSG_CTL;
@@ -1394,14 +1387,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- val = intel_de_read(dev_priv, reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -1580,10 +1566,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- /* Wa_1409767108:tgl,dg1,adl-s */
+ IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -1618,7 +1602,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1646,6 +1629,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
+
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
@@ -1670,11 +1657,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
- DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
- intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
- }
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) >= 13)
@@ -1700,6 +1686,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
+
/*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already
@@ -2055,7 +2045,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2244,22 +2234,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
void intel_display_power_resume(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
+ if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
+ else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2154d900b1aa..8e96be8e6330 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -137,6 +137,10 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+
intel_wakeref_t init_wakeref;
intel_wakeref_t disable_wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index f5d66ca85b19..6645eb1911d8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -10,6 +10,7 @@
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_types.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8710dd41ffd4..62bafcbc7937 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -15,6 +15,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
+#include "intel_dp_aux_regs.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -333,7 +334,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
@@ -356,9 +356,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
@@ -380,17 +378,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
hsw_power_well_pre_disable(dev_priv,
power_well->desc->irq_pipe_mask);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_EDP &&
+ encoder->port == port)
+ return true;
+ }
+
+ return false;
+}
+
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -398,29 +406,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
+ !intel_port_is_edp(dev_priv, (enum port)phy))
+ intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
+ 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
}
static void
@@ -430,17 +431,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -502,19 +498,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- u32 val;
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+ intel_de_rmw(dev_priv, regs->driver,
+ 0,
+ HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
/*
* An AUX timeout is expected if the TBT DP tunnel is down,
@@ -700,19 +692,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+void gen9_sanitize_dc_state(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
u32 val;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->display.dmc.dc_state, val);
- dev_priv->display.dmc.dc_state = val;
+ power_domains->dc_state, val);
+ power_domains->dc_state = val;
}
/**
@@ -740,6 +733,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
*/
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 val;
u32 mask;
@@ -747,8 +741,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->display.dmc.allowed_dc_mask))
- state &= dev_priv->display.dmc.allowed_dc_mask;
+ state & ~power_domains->allowed_dc_mask))
+ state &= power_domains->allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -756,16 +750,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->display.dmc.dc_state)
+ if ((val & mask) != power_domains->dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->display.dmc.dc_state, val & mask);
+ power_domains->dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->display.dmc.dc_state = val & mask;
+ power_domains->dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -776,12 +770,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
+ intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -820,8 +810,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -829,8 +819,10 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
+ (intel_de_read(dev_priv, UTIL_PIN_CTL) &
+ (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
+ (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
+ "Utility pin enabled in PWM mode\n");
drm_WARN_ONCE(&dev_priv->drm,
(intel_de_read(dev_priv, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC6),
@@ -847,8 +839,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -957,9 +949,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -998,10 +991,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->display.dmc.target_dc_state) {
+ switch (power_domains->target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1033,9 +1028,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1049,8 +1044,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1149,18 +1144,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 val;
-
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1276,8 +1267,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1289,8 +1279,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
vlv_set_power_well(dev_priv, power_well, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 02605418ff08..755c1ea8225c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -13,7 +13,7 @@
#define VLV_DISPLAY_BASE 0x180000
/*
- * Named helper wrappers around _PICK_EVEN() and _PICK().
+ * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES().
*/
#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
@@ -29,12 +29,8 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
-#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__))
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
new file mode 100644
index 000000000000..918d0327169a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_vblank.h>
+
+#include "gt/intel_rps.h"
+#include "i915_drv.h"
+#include "intel_display_rps.h"
+#include "intel_display_types.h"
+
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+ if (state->rps_interactive == interactive)
+ return;
+
+ intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
+ state->rps_interactive = interactive;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
new file mode 100644
index 000000000000..e19009c2371a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_RPS_H__
+#define __INTEL_DISPLAY_RPS_H__
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_atomic_state;
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence);
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive);
+
+#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 725aba3fa531..651ea8564e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -17,6 +17,7 @@
#include "i915_irq.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
+#include "intel_vblank.h"
#define __dev_name_i915(i915) dev_name((i915)->drm.dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f07395065a69..47395b39c8f4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -43,22 +43,24 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
-#include <drm/i915_mei_hdcp_interface.h>
+#include <drm/i915_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_vma.h"
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_printer;
struct __intel_global_objs_state;
struct intel_ddi_buf_trans;
struct intel_fbc;
struct intel_connector;
+struct intel_tc_port;
/*
* Display related stuff
@@ -168,9 +170,6 @@ struct intel_encoder {
int (*compute_config_late)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
- void (*update_prepare)(struct intel_atomic_state *,
- struct intel_encoder *,
- struct intel_crtc *);
void (*pre_pll_enable)(struct intel_atomic_state *,
struct intel_encoder *,
const struct intel_crtc_state *,
@@ -183,9 +182,6 @@ struct intel_encoder {
struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*update_complete)(struct intel_atomic_state *,
- struct intel_encoder *,
- struct intel_crtc *);
void (*disable)(struct intel_atomic_state *,
struct intel_encoder *,
const struct intel_crtc_state *,
@@ -254,6 +250,11 @@ struct intel_encoder {
* Returns whether the port clock is enabled or not.
*/
bool (*is_clock_enabled)(struct intel_encoder *encoder);
+ /*
+ * Returns the PLL type the port uses.
+ */
+ enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
@@ -262,8 +263,6 @@ struct intel_encoder {
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
/* VBT information for this encoder (may be NULL for older platforms) */
const struct intel_bios_encoder_data *devdata;
@@ -291,7 +290,7 @@ struct intel_vbt_panel_data {
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits */
- unsigned int panel_type:4;
+ int panel_type;
unsigned int lvds_dither:1;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
@@ -327,10 +326,11 @@ struct intel_vbt_panel_data {
struct {
u16 pwm_freq_hz;
u16 brightness_precision_bits;
+ u16 hdr_dpcd_refresh_timeout;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
- u8 controller; /* brightness controller number */
+ s8 controller; /* brightness controller number */
enum intel_backlight_type type;
} backlight;
@@ -351,6 +351,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ const struct drm_edid *fixed_edid;
+
struct list_head fixed_modes;
/* backlight */
@@ -591,9 +594,8 @@ struct intel_connector {
/* Panel info for eDP and LVDS */
struct intel_panel panel;
- /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
- struct edid *edid;
- struct edid *detect_edid;
+ /* Cached EDID for detect. */
+ const struct drm_edid *detect_edid;
/* Number of times hotplug detection was tried after an HPD interrupt */
int hotplug_retries;
@@ -1150,6 +1152,7 @@ struct intel_crtc_state {
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
+ bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
struct drm_dp_vsc_sdp psr_vsc;
@@ -1248,6 +1251,9 @@ struct intel_crtc_state {
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ /* bitmask of planes with async flip active */
+ u8 async_flip_planes;
+
u8 framestart_delay; /* 1-4 */
u8 msa_timing_delay; /* 0-3 */
@@ -1261,6 +1267,8 @@ struct intel_crtc_state {
struct drm_dp_vsc_sdp vsc;
} infoframes;
+ u8 eld[MAX_ELD_BYTES];
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -1295,6 +1303,8 @@ struct intel_crtc_state {
/* Forward Error correction State */
bool fec_enable;
+ bool sdp_split_enable;
+
/* Pointer to master transcoder in case of tiled displays */
enum transcoder master_transcoder;
@@ -1497,17 +1507,6 @@ struct intel_watermark_params {
u8 cacheline_size;
};
-struct cxsr_latency {
- bool is_desktop : 1;
- bool is_ddr3 : 1;
- u16 fsb_freq;
- u16 mem_freq;
- u16 display_sr;
- u16 display_hpll_disable;
- u16 cursor_sr;
- u16 cursor_hpll_disable;
-};
-
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
@@ -1568,11 +1567,19 @@ struct intel_pps {
ktime_t panel_power_off_time;
intel_wakeref_t vdd_wakeref;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
+ union {
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+
+ /*
+ * Power sequencer index. Only relevant on BXT+.
+ */
+ int pps_idx;
+ };
+
/*
* Pipe currently driving the port. Used for preventing
* the use of the PPS for any pipe currentrly driving
@@ -1581,7 +1588,7 @@ struct intel_pps {
enum pipe active_pipe;
/*
* Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
+ * requiring a reinitialization. Only relevant on BXT+.
*/
bool pps_reset;
struct edp_power_seq pps_delays;
@@ -1618,6 +1625,8 @@ struct intel_psr {
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@@ -1727,6 +1736,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
+ int force_dsc_output_format;
int force_dsc_bpc;
bool hobl_failed;
@@ -1767,16 +1777,7 @@ struct intel_digital_port {
intel_wakeref_t ddi_io_wakeref;
intel_wakeref_t aux_wakeref;
- struct mutex tc_lock; /* protects the TypeC port mode */
- intel_wakeref_t tc_lock_wakeref;
- enum intel_display_power_domain tc_lock_power_domain;
- struct delayed_work tc_disconnect_phy_work;
- int tc_link_refcount;
- bool tc_legacy_port:1;
- char tc_port_name[8];
- enum tc_port_mode tc_mode;
- enum phy_fia tc_phy_fia;
- u8 tc_phy_fia_idx;
+ struct intel_tc_port *tc;
/* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
struct mutex hdcp_mutex;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index eff3add70611..8a88de67ff0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -38,66 +38,101 @@
* low-power state and comes back to normal.
*/
+enum intel_dmc_id {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
+ DMC_FW_MAX
+};
+
+struct intel_dmc {
+ struct drm_i915_private *i915;
+ struct work_struct work;
+ const char *fw_path;
+ u32 max_fw_size; /* bytes */
+ u32 version;
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+};
+
+/* Note: This may be NULL. */
+static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
+{
+ return i915->display.dmc.dmc;
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
-#define DMC_PATH(platform, major, minor) \
- "i915/" \
- __stringify(platform) "_dmc_ver" \
- __stringify(major) "_" \
+#define DMC_PATH(platform) \
+ "i915/" __stringify(platform) "_dmc.bin"
+
+/*
+ * New DMC additions should not use this. This is used solely to remain
+ * compatible with systems that have not yet updated DMC blobs to use
+ * unversioned file names.
+ */
+#define DMC_LEGACY_PATH(platform, major, minor) \
+ "i915/" \
+ __stringify(platform) "_dmc_ver" \
+ __stringify(major) "_" \
__stringify(minor) ".bin"
+#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
-
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 08)
-#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 8)
+#define MTL_DMC_PATH DMC_PATH(mtl)
+MODULE_FIRMWARE(MTL_DMC_PATH);
+
+#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
+#define ADLP_DMC_PATH DMC_PATH(adlp)
+#define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
+MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
-#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
-#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
+#define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01)
MODULE_FIRMWARE(ADLS_DMC_PATH);
-#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
-#define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+#define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02)
MODULE_FIRMWARE(DG1_DMC_PATH);
-#define RKL_DMC_PATH DMC_PATH(rkl, 2, 03)
-#define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 3)
+#define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03)
MODULE_FIRMWARE(RKL_DMC_PATH);
-#define TGL_DMC_PATH DMC_PATH(tgl, 2, 12)
-#define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
+#define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12)
MODULE_FIRMWARE(TGL_DMC_PATH);
-#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
-#define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
+#define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09)
#define ICL_DMC_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_DMC_PATH);
-#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
-#define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04)
#define GLK_DMC_MAX_FW_SIZE 0x4000
MODULE_FIRMWARE(GLK_DMC_PATH);
-#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
-#define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04)
#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(KBL_DMC_PATH);
-#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
-#define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
+#define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27)
#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(SKL_DMC_PATH);
-#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
-#define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07)
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
@@ -108,6 +143,8 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
#define DMC_V3_MAX_MMIO_COUNT 20
#define DMC_V1_MMIO_START_RANGE 0x80000
+#define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A))
+
struct intel_css_header {
/* 0x09 for DMC */
u32 module_type;
@@ -248,9 +285,19 @@ struct stepping_info {
char substepping;
};
-static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+#define for_each_dmc_id(__dmc_id) \
+ for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
+
+static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
+{
+ return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
+}
+
+static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
{
- return i915->display.dmc.dmc_info[dmc_id].payload;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ return dmc && dmc->dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -269,12 +316,12 @@ intel_get_stepping_info(struct drm_i915_private *i915,
return si;
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
{
/* The below bit doesn't need to be cleared ever afterwards */
- intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ intel_de_rmw(i915, DC_STATE_DEBUG, 0,
DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
+ intel_de_posting_read(i915, DC_STATE_DEBUG);
}
static void disable_event_handler(struct drm_i915_private *i915,
@@ -314,26 +361,23 @@ disable_flip_queue_event(struct drm_i915_private *i915,
}
static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
{
- switch (dmc_id) {
- case DMC_FW_MAIN:
+ if (dmc_id == DMC_FW_MAIN) {
if (DISPLAY_VER(i915) == 12) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
return true;
}
- break;
- case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
if (IS_DG2(i915)) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
return true;
}
- break;
}
return false;
@@ -342,13 +386,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
static void
disable_all_flip_queue_events(struct drm_i915_private *i915)
{
- int dmc_id;
+ enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
return;
- for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ for_each_dmc_id(dmc_id) {
i915_reg_t ctl_reg;
i915_reg_t htp_reg;
@@ -364,32 +408,29 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
- int id;
+ enum intel_dmc_id dmc_id;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(i915) < 12)
return;
- for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ for_each_dmc_id(dmc_id) {
int handler;
- if (!has_dmc_id_fw(i915, id))
+ if (!has_dmc_id_fw(i915, dmc_id))
continue;
for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
disable_event_handler(i915,
- DMC_EVT_CTL(i915, id, handler),
- DMC_EVT_HTP(i915, id, handler));
+ DMC_EVT_CTL(i915, dmc_id, handler),
+ DMC_EVT_HTP(i915, dmc_id, handler));
}
}
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
- if (DISPLAY_VER(i915) != 13)
- return;
-
/*
* Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time
@@ -407,59 +448,106 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
+static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_16015201720
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ */
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+ MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ if (DISPLAY_VER(i915) >= 14 && enable)
+ mtl_pipedmc_clock_gating_wa(i915);
+ else if (DISPLAY_VER(i915) == 13)
+ adlp_pipedmc_clock_gating_wa(i915, enable);
+}
+
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
*
* DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_dmc_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
- u32 id, i;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
+ u32 i;
- if (!intel_dmc_has_payload(dev_priv))
+ if (!intel_dmc_has_payload(i915))
return;
- pipedmc_clock_gating_wa(dev_priv, true);
+ pipedmc_clock_gating_wa(i915, true);
- disable_all_event_handlers(dev_priv);
+ disable_all_event_handlers(i915);
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
preempt_disable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
- intel_uncore_write_fw(&dev_priv->uncore,
- DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
- dmc->dmc_info[id].payload[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(i915,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
}
}
preempt_enable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
- intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
- dmc->dmc_info[id].mmiodata[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]);
}
}
- dev_priv->display.dmc.dc_state = 0;
+ power_domains->dc_state = 0;
- gen9_set_dc_state_debugmask(dev_priv);
+ gen9_set_dc_state_debugmask(i915);
/*
* Flip queue events need to be disabled before enabling DC5/6.
* i915 doesn't use the flip queue feature, so disable it already
* here.
*/
- disable_all_flip_queue_events(dev_priv);
+ disable_all_flip_queue_events(i915);
- pipedmc_clock_gating_wa(dev_priv, false);
+ pipedmc_clock_gating_wa(i915, false);
}
/**
@@ -481,8 +569,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
void assert_dmc_loaded(struct drm_i915_private *i915)
{
- drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
+ drm_WARN_ONCE(&i915->drm, dmc &&
+ !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -517,15 +608,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
const struct stepping_info *si,
u8 package_ver)
{
- unsigned int i, id;
-
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
+ enum intel_dmc_id dmc_id;
+ unsigned int i;
for (i = 0; i < num_entries; i++) {
- id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
+ dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
- if (id >= DMC_FW_MAX) {
- drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ if (!is_valid_dmc_id(dmc_id)) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
continue;
}
@@ -533,29 +624,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
* check for the stepping since we already found a previous FW
* for this id.
*/
- if (dmc->dmc_info[id].present)
+ if (dmc->dmc_info[dmc_id].present)
continue;
if (fw_info_matches_stepping(&fw_info[i], si)) {
- dmc->dmc_info[id].present = true;
- dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
+ dmc->dmc_info[dmc_id].present = true;
+ dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
}
}
}
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
- int header_ver, u8 dmc_id)
+ int header_ver, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 start_range, end_range;
int i;
- if (dmc_id >= DMC_FW_MAX) {
- drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
- return false;
- }
-
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
@@ -583,9 +669,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
- size_t rem_size, u8 dmc_id)
+ size_t rem_size, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -696,7 +782,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -750,7 +836,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -765,34 +851,22 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return 0;
}
- if (dmc->required_version &&
- css_header->version != dmc->required_version) {
- drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u\n",
- DMC_VERSION_MAJOR(css_header->version),
- DMC_VERSION_MINOR(css_header->version),
- DMC_VERSION_MAJOR(dmc->required_version),
- DMC_VERSION_MINOR(dmc->required_version));
- return 0;
- }
-
dmc->version = css_header->version;
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
+ struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
- const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
+ const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
+ enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
- int id;
if (!fw)
return;
@@ -813,62 +887,79 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
- for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->display.dmc.dmc_info[id].present)
+ for_each_dmc_id(dmc_id) {
+ if (!dmc->dmc_info[dmc_id].present)
continue;
- offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
if (offset > fw->size) {
- drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ drm_err(&i915->drm, "Reading beyond the fw_size\n");
continue;
}
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
- parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
}
-static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
- dev_priv->display.dmc.wakeref =
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+ i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
-static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->display.dmc.wakeref);
+ fetch_and_zero(&i915->display.dmc.wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+}
+
+static const char *dmc_fallback_path(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_DMC_FALLBACK_PATH;
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ return NULL;
}
static void dmc_load_work_fn(struct work_struct *work)
{
- struct drm_i915_private *dev_priv;
- struct intel_dmc *dmc;
+ struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
+ struct drm_i915_private *i915 = dmc->i915;
const struct firmware *fw = NULL;
+ const char *fallback_path;
+ int err;
+
+ err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
+
+ if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ fallback_path = dmc_fallback_path(i915);
+ if (fallback_path) {
+ drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
+ dmc->fw_path, fallback_path);
+ err = request_firmware(&fw, fallback_path, i915->drm.dev);
+ if (err == 0)
+ dmc->fw_path = fallback_path;
+ }
+ }
- dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
- dmc = &dev_priv->display.dmc;
-
- request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
- parse_dmc_fw(dev_priv, fw);
+ parse_dmc_fw(dmc, fw);
- if (intel_dmc_has_payload(dev_priv)) {
- intel_dmc_load_program(dev_priv);
- intel_dmc_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(i915)) {
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
- drm_info(&dev_priv->drm,
- "Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
- drm_notice(&dev_priv->drm,
+ drm_notice(&i915->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
dmc->fw_path);
- drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
+ drm_notice(&i915->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -876,19 +967,17 @@ static void dmc_load_work_fn(struct work_struct *work)
}
/**
- * intel_dmc_ucode_init() - initialize the firmware loading.
- * @dev_priv: i915 drm device.
+ * intel_dmc_init() - initialize the firmware loading.
+ * @i915: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_init(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
-
- INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
+ struct intel_dmc *dmc;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
@@ -899,181 +988,195 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_dmc_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(i915);
+
+ dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return;
- if (IS_DG2(dev_priv)) {
+ dmc->i915 = i915;
+
+ INIT_WORK(&dmc->work, dmc_load_work_fn);
+
+ if (IS_METEORLAKE(i915)) {
+ dmc->fw_path = MTL_DMC_PATH;
+ dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
dmc->fw_path = DG2_DMC_PATH;
- dmc->required_version = DG2_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (IS_ALDERLAKE_P(i915)) {
dmc->fw_path = ADLP_DMC_PATH;
- dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (IS_ALDERLAKE_S(i915)) {
dmc->fw_path = ADLS_DMC_PATH;
- dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(dev_priv)) {
+ } else if (IS_DG1(i915)) {
dmc->fw_path = DG1_DMC_PATH;
- dmc->required_version = DG1_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (IS_ROCKETLAKE(i915)) {
dmc->fw_path = RKL_DMC_PATH;
- dmc->required_version = RKL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(dev_priv)) {
+ } else if (IS_TIGERLAKE(i915)) {
dmc->fw_path = TGL_DMC_PATH;
- dmc->required_version = TGL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(i915) == 11) {
dmc->fw_path = ICL_DMC_PATH;
- dmc->required_version = ICL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(i915)) {
dmc->fw_path = GLK_DMC_PATH;
- dmc->required_version = GLK_DMC_VERSION_REQUIRED;
dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
dmc->fw_path = KBL_DMC_PATH;
- dmc->required_version = KBL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915)) {
dmc->fw_path = SKL_DMC_PATH;
- dmc->required_version = SKL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
dmc->fw_path = BXT_DMC_PATH;
- dmc->required_version = BXT_DMC_VERSION_REQUIRED;
dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
- if (dev_priv->params.dmc_firmware_path) {
- if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- dmc->fw_path = NULL;
- drm_info(&dev_priv->drm,
+ if (i915->params.dmc_firmware_path) {
+ if (strlen(i915->params.dmc_firmware_path) == 0) {
+ drm_info(&i915->drm,
"Disabling DMC firmware and runtime PM\n");
- return;
+ goto out;
}
- dmc->fw_path = dev_priv->params.dmc_firmware_path;
- /* Bypass version check for firmware override. */
- dmc->required_version = 0;
+ dmc->fw_path = i915->params.dmc_firmware_path;
}
if (!dmc->fw_path) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
- return;
+ goto out;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->display.dmc.work);
+ i915->display.dmc.dmc = dmc;
+
+ drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dmc->work);
+
+ return;
+
+out:
+ kfree(dmc);
}
/**
- * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
- * @dev_priv: i915 drm device
+ * intel_dmc_suspend() - prepare DMC firmware before system suspend
+ * @i915: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_suspend(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ if (!HAS_DMC(i915))
return;
- flush_work(&dev_priv->display.dmc.work);
+ if (dmc)
+ flush_work(&dmc->work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_put(i915);
}
/**
- * intel_dmc_ucode_resume() - init DMC firmware during system resume
- * @dev_priv: i915 drm device
+ * intel_dmc_resume() - init DMC firmware during system resume
+ * @i915: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_dmc_ucode_suspend().
+ * resources released in intel_dmc_suspend().
*/
-void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_resume(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_get(i915);
}
/**
- * intel_dmc_ucode_fini() - unload the DMC firmware.
- * @dev_priv: i915 drm device.
+ * intel_dmc_fini() - unload the DMC firmware.
+ * @i915: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_fini(struct drm_i915_private *i915)
{
- int id;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
- intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ intel_dmc_suspend(i915);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+
+ if (dmc) {
+ for_each_dmc_id(dmc_id)
+ kfree(dmc->dmc_info[dmc_id].payload);
- for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->display.dmc.dmc_info[id].payload);
+ kfree(dmc);
+ i915->display.dmc.dmc = NULL;
+ }
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->display.dmc;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
if (!HAS_DMC(i915))
return;
+ i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
i915_error_printf(m, "DMC loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- i915_error_printf(m, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
+ if (dmc)
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
intel_wakeref_t wakeref;
- struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->display.dmc;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
str_yes_no(IS_ALDERLAKE_P(i915) ||
DISPLAY_VER(i915) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
if (!intel_dmc_has_payload(i915))
goto out;
@@ -1107,9 +1210,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(i915, dc6_reg));
-out:
seq_printf(m, "program base: 0x%08x\n",
intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+
+out:
seq_printf(m, "ssp base: 0x%08x\n",
intel_de_read(i915, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 67e03315ef99..fd607afff2ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -6,51 +6,20 @@
#ifndef __INTEL_DMC_H__
#define __INTEL_DMC_H__
-#include "i915_reg_defs.h"
-#include "intel_wakeref.h"
-#include <linux/workqueue.h>
+#include <linux/types.h>
struct drm_i915_error_state_buf;
struct drm_i915_private;
+enum pipe;
-enum {
- DMC_FW_MAIN = 0,
- DMC_FW_PIPEA,
- DMC_FW_PIPEB,
- DMC_FW_PIPEC,
- DMC_FW_PIPED,
- DMC_FW_MAX
-};
-
-struct intel_dmc {
- struct work_struct work;
- const char *fw_path;
- u32 required_version;
- u32 max_fw_size; /* bytes */
- u32 version;
- struct dmc_fw_info {
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dmc_offset;
- u32 start_mmioaddr;
- u32 dmc_fw_size; /*dwords */
- u32 *payload;
- bool present;
- } dmc_info[DMC_FW_MAX];
-
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
-void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
void intel_dmc_disable_program(struct drm_i915_private *i915);
-void intel_dmc_ucode_fini(struct drm_i915_private *i915);
-void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
-void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void intel_dmc_fini(struct drm_i915_private *i915);
+void intel_dmc_suspend(struct drm_i915_private *i915);
+void intel_dmc_resume(struct drm_i915_private *i915);
bool intel_dmc_has_payload(struct drm_i915_private *i915);
void intel_dmc_debugfs_register(struct drm_i915_private *i915);
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 5e5e41644ddf..cf10094acae3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -11,6 +11,16 @@
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define _PIPEDMC_CONTROL_A 0x45250
+#define _PIPEDMC_CONTROL_B 0x45254
+#define PIPEDMC_CONTROL(pipe) _MMIO_PIPE(pipe, \
+ _PIPEDMC_CONTROL_A, \
+ _PIPEDMC_CONTROL_B)
+#define PIPEDMC_ENABLE REG_BIT(0)
+
+#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
+#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 75070eb07d4b..529ee22be872 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -76,6 +76,7 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
+#include "intel_crtc_state_dump.h"
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -117,7 +118,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
}
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
/* Is link rate UHBR and thus 128b/132b? */
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
@@ -289,7 +289,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
{
- int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
int max_lanes = dig_port->max_lanes;
if (vbt_max_lanes)
@@ -426,7 +426,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
- max_rate = intel_bios_dp_max_link_rate(encoder);
+ max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
if (intel_dp_is_edp(intel_dp)) {
struct intel_connector *connector = intel_dp->attached_connector;
@@ -673,23 +673,75 @@ small_joiner_ram_size_bits(struct drm_i915_private *i915)
return 6144 * 8;
}
-static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
- u32 link_clock, u32 lane_count,
- u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner,
- u32 pipe_bpp)
+u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
{
- u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ u32 bits_per_pixel = bpp;
int i;
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
+ if (DISPLAY_VER(i915) >= 13) {
+ bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+
+ /*
+ * According to BSpec, 27 is the max DSC output bpp,
+ * 8 is the min DSC output bpp
+ */
+ bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
+ } else {
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
+ bits_per_pixel, valid_dsc_bpp[i]);
+
+ bits_per_pixel = valid_dsc_bpp[i];
+ }
+
+ return bits_per_pixel;
+}
+
+u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay,
+ bool bigjoiner,
+ u32 pipe_bpp,
+ u32 timeslots)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+
/*
* Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
- * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
- * for SST -> TimeSlotsPerMTP is 1,
- * for MST -> TimeSlotsPerMTP has to be calculated
+ * (LinkSymbolClock)* 8 * (TimeSlots / 64)
+ * for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
+ * for MST -> TimeSlots has to be calculated, based on mode requirements
+ *
+ * Due to FEC overhead, the available bw is reduced to 97.2261%.
+ * To support the given mode:
+ * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
+ * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
+ * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
+ * (ModeClock / FEC Overhead)
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
+ * (ModeClock / FEC Overhead * 8)
*/
- bits_per_pixel = (link_clock * lane_count * 8) /
- intel_dp_mode_to_fec_clock(mode_clock);
+ bits_per_pixel = ((link_clock * lane_count) * timeslots) /
+ (intel_dp_mode_to_fec_clock(mode_clock) * 8);
+
+ drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
+ "total bw %u pixel clock %u\n",
+ bits_per_pixel, timeslots,
+ (link_clock * lane_count * 8),
+ intel_dp_mode_to_fec_clock(mode_clock));
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
@@ -712,24 +764,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
}
- /* Error out if the max bpp is less than smallest allowed valid bpp */
- if (bits_per_pixel < valid_dsc_bpp[0]) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
- bits_per_pixel, valid_dsc_bpp[0]);
- return 0;
- }
-
- /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
- if (DISPLAY_VER(i915) >= 13) {
- bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
- } else {
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
- }
- bits_per_pixel = valid_dsc_bpp[i];
- }
+ bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
/*
* Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
@@ -738,9 +773,9 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
return bits_per_pixel << 4;
}
-static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
- int mode_clock, int mode_hdisplay,
- bool bigjoiner)
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay,
+ bool bigjoiner)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 min_slice_count, i;
@@ -753,6 +788,13 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
min_slice_count = DIV_ROUND_UP(mode_clock,
DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * Due to some DSC engine BW limitations, we need to enable second
+ * slice and VDSC engine, whenever we approach close enough to max CDCLK
+ */
+ if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ min_slice_count = max_t(u8, min_slice_count, 2);
+
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
drm_dbg_kms(&i915->drm,
@@ -792,6 +834,9 @@ intel_dp_output_format(struct intel_connector *connector,
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ if (intel_dp->force_dsc_output_format)
+ return intel_dp->force_dsc_output_format;
+
if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output)
return INTEL_OUTPUT_FORMAT_RGB;
@@ -947,8 +992,8 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
-static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
- int hdisplay, int clock)
+bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -974,9 +1019,6 @@ intel_dp_mode_valid(struct drm_connector *_connector,
enum drm_mode_status status;
bool dsc = false, bigjoiner = false;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
@@ -1013,7 +1055,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
*/
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC(dev_priv) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1035,7 +1077,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock,
mode->hdisplay,
bigjoiner,
- pipe_bpp) >> 4;
+ pipe_bpp, 64) >> 4;
dsc_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
target_clock,
@@ -1364,7 +1406,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
+int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i, num_bpc;
@@ -1400,6 +1442,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
DP_DSC_MINOR_SHIFT;
}
+static int intel_dp_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
+ * lines is an optimal slice height, but any size can be used as long as
+ * vertical active integer multiple and maximum vertical slice count
+ * requirements are met.
+ */
+ for (slice_height = 108; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ /*
+ * Highly unlikely we reach here as most of the resolutions will end up
+ * finding appropriate slice_height in above loop but returning
+ * slice_height as 2 here as it should work with all resolutions.
+ */
+ return 2;
+}
+
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -1418,17 +1482,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8. Eventually add
- * logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
+ vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
@@ -1440,9 +1494,10 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->dsc_version_minor =
min(intel_dp_source_dsc_version_minor(intel_dp),
intel_dp_sink_dsc_version_minor(intel_dp));
-
- vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
- DP_DSC_RGB;
+ if (vdsc_cfg->convert_rgb)
+ vdsc_cfg->convert_rgb =
+ intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
if (!line_buf_depth) {
@@ -1465,10 +1520,37 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
return drm_dsc_compute_rc_parameters(vdsc_cfg);
}
-static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static bool intel_dp_dsc_supports_format(struct intel_dp *intel_dp,
+ enum intel_output_format output_format)
+{
+ u8 sink_dsc_format;
+
+ switch (output_format) {
+ case INTEL_OUTPUT_FORMAT_RGB:
+ sink_dsc_format = DP_DSC_RGB;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ sink_dsc_format = DP_DSC_YCbCr444;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ if (min(intel_dp_source_dsc_version_minor(intel_dp),
+ intel_dp_sink_dsc_version_minor(intel_dp)) < 2)
+ return false;
+ sink_dsc_format = DP_DSC_YCbCr420_Native;
+ break;
+ default:
+ return false;
+ }
+
+ return drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, sink_dsc_format);
+}
+
+int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits,
+ int timeslots,
+ bool compute_pipe_bpp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
@@ -1483,7 +1565,13 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ if (!intel_dp_dsc_supports_format(intel_dp, pipe_config->output_format))
+ return -EINVAL;
+
+ if (compute_pipe_bpp)
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ else
+ pipe_bpp = pipe_config->pipe_bpp;
if (intel_dp->force_dsc_bpc) {
pipe_bpp = intel_dp->force_dsc_bpc * 3;
@@ -1513,49 +1601,74 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
+ if (!pipe_config->dsc.slice_count) {
+ drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ pipe_config->dsc.slice_count);
+ return -EINVAL;
+ }
} else {
- u16 dsc_max_output_bpp;
+ u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;
- dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(dev_priv,
- pipe_config->port_clock,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- adjusted_mode->crtc_hdisplay,
- pipe_config->bigjoiner_pipes,
- pipe_bpp);
+ if (compute_pipe_bpp) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ pipe_config->bigjoiner_pipes,
+ pipe_bpp,
+ timeslots);
+ /*
+ * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum
+ * supported PPS value can be 63.9375 and with the further
+ * mention that bpp should be programmed double the target bpp
+ * restricting our target bpp to be 31.9375 at max
+ */
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ dsc_max_output_bpp = min_t(u16, dsc_max_output_bpp, 31 << 4);
+
+ if (!dsc_max_output_bpp) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP not supported\n");
+ return -EINVAL;
+ }
+ }
dsc_dp_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
pipe_config->bigjoiner_pipes);
- if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ if (!dsc_dp_slice_count) {
drm_dbg_kms(&dev_priv->drm,
- "Compressed BPP/Slice Count not supported\n");
+ "Compressed Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc.compressed_bpp = min_t(u16,
- dsc_max_output_bpp >> 4,
- pipe_config->pipe_bpp);
+
+ /*
+ * compute pipe bpp is set to false for DP MST DSC case
+ * and compressed_bpp is calculated same time once
+ * vpci timeslots are allocated, because overall bpp
+ * calculation procedure is bit different for MST case.
+ */
+ if (compute_pipe_bpp) {
+ pipe_config->dsc.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ }
pipe_config->dsc.slice_count = dsc_dp_slice_count;
+ drm_dbg_kms(&dev_priv->drm, "DSC: compressed bpp %d slice count %d\n",
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
}
-
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
- pipe_config->bigjoiner_pipes) {
- if (pipe_config->dsc.slice_count < 2) {
- drm_dbg_kms(&dev_priv->drm,
- "Cannot split stream to use 2 VDSC instances\n");
- return -EINVAL;
- }
-
+ if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
pipe_config->dsc.dsc_split = true;
- }
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
@@ -1643,7 +1756,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
- conn_state, &limits);
+ conn_state, &limits, 64, true);
if (ret < 0)
return ret;
}
@@ -1688,7 +1801,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -1952,7 +2065,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
static bool intel_dp_has_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2009,6 +2121,23 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
return ret;
}
+static void
+intel_dp_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+
+ pipe_config->sdp_split_enable =
+ intel_dp_has_audio(encoder, conn_state) &&
+ intel_dp_is_uhbr(pipe_config);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
+ connector->base.id, connector->name,
+ str_yes_no(pipe_config->sdp_split_enable));
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2024,7 +2153,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_dp_has_audio(encoder, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -2036,7 +2167,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_GMCH(dev_priv) &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2092,6 +2223,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock /= n;
}
+ intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
+
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
@@ -2221,10 +2354,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
- wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ connector->base.base.id, connector->base.name,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
+
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
}
/* If the device supports it, try to set the power state appropriately */
@@ -2907,7 +3045,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_max_sink_lane_count(intel_dp);
/* Read the eDP DSC DPCD registers */
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (HAS_DSC(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
/*
@@ -3590,12 +3728,11 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
- struct edid *block = intel_connector->detect_edid;
+ /* FIXME: Get rid of drm_edid_raw() */
+ const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
- /* We have to write the checksum
- * of the last block read
- */
- block += intel_connector->detect_edid->extensions;
+ /* We have to write the checksum of the last block read */
+ block += block->extensions;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
@@ -4417,29 +4554,34 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return is_connected;
}
-static struct edid *
+static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
- /* use cached edid if we have one */
- if (intel_connector->edid) {
+ /* Use panel fixed edid if we have one */
+ if (fixed_edid) {
/* invalid edid */
- if (IS_ERR(intel_connector->edid))
+ if (IS_ERR(fixed_edid))
return NULL;
- return drm_edid_duplicate(intel_connector->edid);
- } else
- return drm_get_edid(&intel_connector->base,
- &intel_dp->aux.ddc);
+ return drm_edid_dup(fixed_edid);
+ }
+
+ return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
}
static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
+ const struct edid *edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
intel_dp->dfp.max_bpc =
drm_dp_downstream_max_bpc(intel_dp->dpcd,
@@ -4539,21 +4681,27 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool vrr_capable;
intel_dp_unset_edid(intel_dp);
- edid = intel_dp_get_edid(intel_dp);
- connector->detect_edid = edid;
+ drm_edid = intel_dp_get_edid(intel_dp);
+ connector->detect_edid = drm_edid;
+
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
- intel_dp_update_dfp(intel_dp, edid);
+ intel_dp_update_dfp(intel_dp, drm_edid);
intel_dp_update_420(intel_dp);
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_dp->has_audio = drm_detect_monitor_audio(edid);
@@ -4568,7 +4716,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
drm_dp_cec_unset_edid(&intel_dp->aux);
- kfree(connector->detect_edid);
+ drm_edid_free(connector->detect_edid);
connector->detect_edid = NULL;
intel_dp->has_hdmi_sink = false;
@@ -4633,7 +4781,7 @@ intel_dp_detect(struct drm_connector *connector,
}
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (HAS_DSC(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4732,12 +4880,10 @@ intel_dp_force(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct edid *edid;
- int num_modes = 0;
+ int num_modes;
- edid = intel_connector->detect_edid;
- if (edid)
- num_modes = intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ num_modes = drm_edid_connector_add_modes(connector);
/* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
@@ -4746,7 +4892,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
if (num_modes)
return num_modes;
- if (!edid) {
+ if (!intel_connector->detect_edid) {
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
struct drm_display_mode *mode;
@@ -4783,7 +4929,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
/*
@@ -5061,8 +5207,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5074,13 +5221,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
return true;
- return intel_bios_is_port_edp(dev_priv, port);
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+}
+
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return _intel_dp_is_port_edp(i915, devdata, port);
}
static bool
-has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- if (intel_bios_is_lspcon_present(i915, port))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -5115,14 +5273,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
drm_connector_attach_content_type_property(connector);
intel_attach_hdmi_colorspace_property(connector);
} else {
intel_attach_dp_colorspace_property(connector);
}
- if (has_gamut_metadata_dip(dev_priv, port))
+ if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
if (HAS_VRR(dev_priv))
@@ -5164,11 +5322,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n",
- connector->base.base.id, connector->base.name,
- pipe_name(pipe));
}
intel_backlight_setup(connector, pipe);
@@ -5182,7 +5335,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -5202,7 +5355,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- intel_pps_init(intel_dp);
+ intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
+ encoder->devdata);
+
+ if (!intel_pps_init(intel_dp)) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ /*
+ * The BIOS may have still enabled VDD on the PPS even
+ * though it's unusable. Make sure we turn it back off
+ * and to release the power domain references/etc.
+ */
+ goto out_vdd_off;
+ }
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -5216,29 +5382,28 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_lock(&dev_priv->drm.mode_config.mutex);
- edid = drm_get_edid(connector, &intel_dp->aux.ddc);
- if (!edid) {
+ drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc);
+ if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- edid = intel_opregion_get_edid(intel_connector);
- if (edid)
+ drm_edid = intel_opregion_get_edid(intel_connector);
+ if (drm_edid)
drm_dbg_kms(&dev_priv->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
connector->base.id, connector->name);
}
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector, edid);
- } else {
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ if (drm_edid) {
+ if (drm_edid_connector_update(connector, drm_edid) ||
+ !drm_edid_connector_add_modes(connector)) {
+ drm_edid_connector_update(connector, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = ERR_PTR(-ENOENT);
}
- intel_connector->edid = edid;
- intel_bios_init_panel(dev_priv, &intel_connector->panel,
- encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);
@@ -5262,7 +5427,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
goto out_vdd_off;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, drm_edid);
intel_edp_backlight_setup(intel_dp, intel_connector);
@@ -5332,7 +5497,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port)) {
+ if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
@@ -5364,7 +5529,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
connector->interlace_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index a54902c713a3..ef39e4f7a329 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -56,6 +56,12 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits,
+ int timeslots,
+ bool recompute_pipe_bpp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
@@ -96,6 +102,18 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
+u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay,
+ bool bigjoiner,
+ u32 pipe_bpp,
+ u32 timeslots);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay,
+ bool bigjoiner);
+bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -103,6 +121,7 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
}
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 664bebdecea7..705915d50565 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -6,8 +6,11 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
+#include "intel_bios.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
+#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
#include "intel_tc.h"
@@ -40,20 +43,16 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
+ int ret;
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+ ret = __intel_de_wait_for_register(i915, ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY, 0,
+ 2, timeout_ms, &status);
- if (!done)
+ if (ret == -ETIMEDOUT)
drm_err(&i915->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
-#undef C
return status;
}
@@ -120,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
+static int intel_dp_aux_sync_len(void)
+{
+ int precharge = 16; /* 10-16 */
+ int preamble = 16;
+
+ return precharge + preamble;
+}
+
+static int intel_dp_aux_fw_sync_len(void)
+{
+ int precharge = 16; /* 10-16 */
+ int preamble = 8;
+
+ return precharge + preamble;
+}
+
+static int g4x_dp_aux_precharge_len(void)
+{
+ int precharge_min = 10;
+ int preamble = 16;
+
+ /* HW wants the length of the extra precharge in 2us units */
+ return (intel_dp_aux_sync_len() -
+ precharge_min - preamble) / 2;
+}
+
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
@@ -142,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
@@ -166,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
@@ -191,7 +216,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
@@ -208,8 +232,19 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- if (is_tc_port)
+ if (is_tc_port) {
intel_tc_port_lock(dig_port);
+ /*
+ * Abort transfers on a disconnected port as required by
+ * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
+ * timeouts that would otherwise happen.
+ * TODO: abort the transfer on non-TC ports as well.
+ */
+ if (!intel_tc_port_connected_locked(&dig_port->base)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ }
aux_domain = intel_aux_power_domain(dig_port);
@@ -235,7 +270,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
+ status = intel_de_read_notrace(i915, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -244,7 +279,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
+ const u32 status = intel_de_read(i915, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
drm_WARN(&i915->drm, 1,
@@ -274,23 +309,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_aux_pack(send + i,
- send_bytes - i));
+ intel_de_write(i915, ch_data[i >> 2],
+ intel_dp_aux_pack(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
+ intel_de_write(i915, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ intel_de_write(i915, ch_ctl,
+ status | DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
/*
* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
@@ -361,7 +393,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]),
+ intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -373,7 +405,7 @@ out:
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
+out_unlock:
if (is_tc_port)
intel_tc_port_unlock(dig_port);
@@ -744,3 +776,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
+
+static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ /* SKL has DDI E but no AUX E */
+ if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ return AUX_CH_A;
+
+ return (enum aux_ch)encoder->port;
+}
+
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum aux_ch aux_ch;
+
+ aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
+ if (aux_ch != AUX_CH_NONE) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+ return aux_ch;
+ }
+
+ aux_ch = default_aux_ch(encoder);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 738577537bc7..138e340f94ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -6,9 +6,13 @@
#ifndef __INTEL_DP_AUX_H__
#define __INTEL_DP_AUX_H__
+enum aux_ch;
struct intel_dp;
+struct intel_encoder;
void intel_dp_aux_fini(struct intel_dp *intel_dp);
void intel_dp_aux_init(struct intel_dp *intel_dp);
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
+
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 83af95bce98d..95cc5251843e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
+static bool is_intel_tcon_cap(const u8 tcon_cap[4])
+{
+ return tcon_cap[0] >= 1;
+}
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
return false;
- if (tcon_cap[0] >= 1) {
- drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
- tcon_cap[0]);
- } else {
- drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
- tcon_cap[0]);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ connector->base.base.id, connector->base.name,
+ is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
+
+ if (!is_intel_tcon_cap(tcon_cap))
return false;
- }
/*
* If we don't have HDR static metadata there is no way to
@@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
- "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
}
@@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
u8 buf[2] = { 0 };
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf))
- drm_err(dev, "Failed to write brightness level to DPCD\n");
+ drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return;
}
@@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
}
- if (ctrl != old_ctrl)
- if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+ if (ctrl != old_ctrl &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
+static const char *dpcd_vs_pwm_str(bool aux)
+{
+ return aux ? "DPCD" : "PWM";
+}
+
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
@@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
&connector->base.display_info.luminance_range;
int ret;
- if (panel->backlight.edp.intel.sdr_uses_aux) {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
- } else {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux));
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
panel->backlight.min = 0;
}
- drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
- panel->backlight.max);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.min, panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
@@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup PWM backlight controls for eDP backlight: %d\n",
- ret);
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ connector->base.base.id, connector->base.name);
return true;
}
return false;
@@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* interfaces is to probe for Intel's first, and VESA's second.
*/
if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
- drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
- drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
new file mode 100644
index 000000000000..5702f318d537
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_REGS_H__
+#define __INTEL_DP_AUX_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/*
+ * The aux channel provides a way to talk to the signal sink for DDC etc. Max
+ * packet size supported is 20 bytes in each direction, hence the 5 fixed data
+ * registers
+ */
+#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
+#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
+
+#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+
+#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
+#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+
+#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210
+#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410
+#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610
+#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810
+
+#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_CTL, \
+ _DPB_AUX_CH_CTL, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_CTL, \
+ _XELPDP_USBC2_AUX_CH_CTL, \
+ _XELPDP_USBC3_AUX_CH_CTL, \
+ _XELPDP_USBC4_AUX_CH_CTL))
+
+#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214
+#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414
+#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614
+#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814
+
+#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_DATA1, \
+ _XELPDP_USBC2_AUX_CH_DATA1, \
+ _XELPDP_USBC3_AUX_CH_DATA1, \
+ _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4)
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19)
+#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
+#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
+#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
+#define DP_AUX_CH_CTL_TBT_IO (1 << 11)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
+#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
+
+#endif /* __INTEL_DP_AUX_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 3d3efcf02011..d638054c74ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1379,10 +1379,6 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
}
}
- /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
-
return true;
}
@@ -1433,7 +1429,11 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool passed;
+
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@@ -1451,6 +1451,46 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
else
passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+ /*
+ * Ignore the link failure in CI
+ *
+ * In fixed enviroments like CI, sometimes unexpected long HPDs are
+ * generated by the displays. If ignore_long_hpd flag is set, such long
+ * HPDs are ignored. And probably as a consequence of these ignored
+ * long HPDs, subsequent link trainings are failed resulting into CI
+ * execution failures.
+ *
+ * For test cases which rely on the link training or processing of HPDs
+ * ignore_long_hpd flag can unset from the testcase.
+ */
+ if (!passed && i915->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+ return;
+ }
+
if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
+
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /*
+ * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
+ * disable SDP CRC. This is applicable for Display version 13.
+ * Default value of bit 31 is '0' hence discarding the write
+ * TODO: Corrective actions on SDP corruption yet to be defined
+ */
+ if (intel_dp_is_uhbr(crtc_state))
+ /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SDP_ERROR_DETECTION_CONFIGURATION,
+ DP_SDP_CRC16_128B132B_EN);
+
+ drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 7fa1c0833096..2c8f2775891b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -39,4 +39,6 @@ static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
}
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 4077a979a924..2c49d9ab86a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -45,10 +45,35 @@
#include "intel_hotplug.h"
#include "skl_scaler.h"
-static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
+ const struct drm_display_mode *adjusted_mode,
+ struct intel_crtc_state *crtc_state,
+ bool dsc)
+{
+ if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) {
+ int output_bpp = bpp;
+ /* DisplayPort 2 128b/132b, bits per lane is always 32 */
+ int symbol_clock = crtc_state->port_clock / 32;
+
+ if (output_bpp * adjusted_mode->crtc_clock >=
+ symbol_clock * 72) {
+ drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n",
+ output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp,
+ int min_bpp,
+ struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ int step,
+ bool dsc)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
@@ -60,6 +85,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
if (IS_ERR(mst_state))
@@ -71,30 +97,72 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
// TODO: Handle pbn_div changes by adding a new MST helper
if (!mst_state->pbn_div) {
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- limits->max_rate,
- limits->max_lane_count);
+ crtc_state->port_clock,
+ crtc_state->lane_count);
}
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- crtc_state->pipe_bpp = bpp;
+ for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
+
+ ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
+ if (ret)
+ continue;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp,
- false);
+ dsc ? bpp << 4 : bpp,
+ dsc);
+
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port, crtc_state->pbn);
+ connector->port,
+ crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
- if (slots >= 0)
- break;
+
+ if (slots >= 0) {
+ ret = drm_dp_mst_atomic_check(state);
+ /*
+ * If we got slots >= 0 and we can fit those based on check
+ * then we can exit the loop. Otherwise keep trying.
+ */
+ if (!ret)
+ break;
+ }
}
+ /* We failed to find a proper bpp/timeslots, return error */
+ if (ret)
+ slots = ret;
+
if (slots < 0) {
drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
slots);
- return slots;
+ } else {
+ if (!dsc)
+ crtc_state->pipe_bpp = bpp;
+ else
+ crtc_state->dsc.compressed_bpp = bpp;
+ drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
+ return slots;
+}
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int slots = -EINVAL;
+
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, limits->max_bpp,
+ limits->min_bpp, limits,
+ conn_state, 2 * 3, false);
+
+ if (slots < 0)
+ return slots;
+
intel_link_compute_m_n(crtc_state->pipe_bpp,
crtc_state->lane_count,
adjusted_mode->crtc_clock,
@@ -106,6 +174,99 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int slots = -EINVAL;
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+ int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
+ u8 dsc_max_bpc;
+ bool need_timeslot_recalc = false;
+ u32 last_compressed_bpp;
+
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (DISPLAY_VER(i915) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
+
+ max_bpp = min_t(u8, dsc_max_bpc * 3, limits->max_bpp);
+ min_bpp = limits->min_bpp;
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+
+ drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
+ min_bpp, max_bpp);
+
+ sink_max_bpp = dsc_bpc[0] * 3;
+ sink_min_bpp = sink_max_bpp;
+
+ for (i = 1; i < num_bpc; i++) {
+ if (sink_min_bpp > dsc_bpc[i] * 3)
+ sink_min_bpp = dsc_bpc[i] * 3;
+ if (sink_max_bpp < dsc_bpc[i] * 3)
+ sink_max_bpp = dsc_bpc[i] * 3;
+ }
+
+ drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
+ sink_min_bpp, sink_max_bpp);
+
+ if (min_bpp < sink_min_bpp)
+ min_bpp = sink_min_bpp;
+
+ if (max_bpp > sink_max_bpp)
+ max_bpp = sink_max_bpp;
+
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
+ min_bpp, limits,
+ conn_state, 2 * 3, true);
+
+ if (slots < 0)
+ return slots;
+
+ last_compressed_bpp = crtc_state->dsc.compressed_bpp;
+
+ crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
+ last_compressed_bpp,
+ crtc_state->pipe_bpp);
+
+ if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
+ need_timeslot_recalc = true;
+
+ /*
+ * Apparently some MST hubs dislike if vcpi slots are not matching precisely
+ * the actual compressed bpp we use.
+ */
+ if (need_timeslot_recalc) {
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
+ crtc_state->dsc.compressed_bpp,
+ crtc_state->dsc.compressed_bpp,
+ limits, conn_state, 2 * 3, true);
+ if (slots < 0)
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ crtc_state->fec_enable);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -129,6 +290,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
return 0;
}
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return connector->port->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -136,10 +310,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
@@ -151,11 +321,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = connector->port->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
+ pipe_config->has_audio =
+ intel_dp_mst_has_audio(conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* for MST we always configure max link bw - the spec doesn't
@@ -182,6 +350,29 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
conn_state, &limits);
+
+ if (ret == -EDEADLK)
+ return ret;
+
+ /* enable compression if the mode doesn't fit available BW */
+ drm_dbg_kms(&dev_priv->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
+ if (ret || intel_dp->force_dsc_en) {
+ /*
+ * Try to get at least some timeslots and then see, if
+ * we can fit there with DSC.
+ */
+ drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
+
+ ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits,
+ pipe_config->dp_m_n.tu, false);
+ }
+
if (ret)
return ret;
@@ -365,8 +556,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *old_mst_state =
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *new_mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ const struct drm_dp_mst_atomic_payload *old_payload =
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ struct drm_dp_mst_atomic_payload *new_payload =
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -374,8 +571,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
+ old_payload, new_payload);
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
@@ -439,7 +636,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* no clock to the transcoder"
*/
if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
@@ -451,6 +648,20 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links);
}
+static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0 &&
+ dig_port->base.post_pll_disable)
+ dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
+}
+
static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -463,6 +674,13 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
if (intel_dp->active_mst_links == 0)
dig_port->base.pre_pll_enable(state, &dig_port->base,
pipe_config, NULL);
+ else
+ /*
+ * The port PLL state needs to get updated for secondary
+ * streams as for the primary stream.
+ */
+ intel_ddi_update_active_dpll(state, &dig_port->base,
+ to_intel_crtc(pipe_config->uapi.crtc));
}
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
@@ -519,7 +737,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* here for the following ones.
*/
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(encoder, pipe_config);
+ intel_ddi_enable_transcoder_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -692,6 +910,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
+ bool dsc = false, bigjoiner = false;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
+ int target_clock = mode->clock;
if (drm_connector_is_unregistered(connector)) {
*status = MODE_ERROR;
@@ -729,6 +951,48 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ bigjoiner = true;
+ max_dotclk *= 2;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 10 &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
+
+ if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner,
+ pipe_bpp, 64) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner);
+ }
+
+ dsc = dsc_max_output_bpp && dsc_slice_count;
+ }
+
+ /*
+ * Big joiner configuration needs DSC for TGL which is not true for
+ * XE_LPD where uncompressed joiner is supported.
+ */
+ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ return MODE_CLOCK_HIGH;
+
+ if (mode_rate > max_rate && !dsc)
+ return MODE_CLOCK_HIGH;
+
*status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
return 0;
}
@@ -928,6 +1192,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
@@ -1018,3 +1283,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
}
+
+/**
+ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
+ * @state: atomic state
+ * @connector: connector to add the state for
+ * @crtc: the CRTC @connector is attached to
+ *
+ * Add the MST topology state for @connector to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ struct intel_crtc *crtc)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+
+ if (!connector->mst_port)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(&state->base,
+ &connector->mst_port->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
+
+ return 0;
+}
+
+/**
+ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
+ * @state: atomic state
+ * @crtc: CRTC to add the state for
+ *
+ * Add the MST topology state for @crtc to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *_connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ struct intel_connector *connector = to_intel_connector(_connector);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f7301de6cdfb..f1815bb72267 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
@@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7eb7440b3180..62b93d097e44 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -376,7 +376,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+ dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
@@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
"force reprogramming it\n", phy);
}
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val |= phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
phy);
/* Program PLL Rcomp code offset */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
+ 0xE4 << IREF0RC_OFFSET_SHIFT);
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
+ 0xE4 << IREF1RC_OFFSET_SHIFT);
/* Program power gating */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy_info->dual_channel) {
- val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
- }
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
+ OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
+
+ if (phy_info->dual_channel)
+ intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0,
+ DW6_OLDO_DYN_PWR_DOWN_EN);
if (phy_info->rcomp_phy != -1) {
u32 grc_code;
@@ -442,40 +432,32 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
- phy_info->rcomp_phy);
+ val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
+ dev_priv->display.state.bxt_phy_grc = val;
+
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
-
- val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
- val |= GRC_DIS | GRC_RDY_OVRD;
- intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
+ 0, GRC_DIS | GRC_RDY_OVRD);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val &= ~phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -568,7 +550,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
"BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
- u32 grc_code = dev_priv->bxt_phy_grc;
+ u32 grc_code = dev_priv->display.state.bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index c236aafe9be0..4e9c18be7e1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1910,7 +1910,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, DPLL_MD(PIPE_B),
crtc_state->dpll_hw_state.dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 1974eb580ed1..22fc908b7e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -608,17 +608,15 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, WRPLL_CTL(id));
- intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
@@ -626,17 +624,15 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, SPLL_CTL);
- intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
@@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
- DPLL_CTRL1_SSC(id) |
- DPLL_CTRL1_LINK_RATE_MASK(id));
- val |= pll->state.hw_state.ctrl1 << (id * 6);
-
- intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
+ pll->state.hw_state.ctrl1 << (id * 6));
intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
@@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
@@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, regs[id].ctl);
}
@@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_REF_SEL;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ 0, PORT_PLL_POWER_ENABLE);
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
}
/* Disable 10 bit clock */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
+ PORT_PLL_10BIT_CLK_ENABLE, 0);
/* Write P1 & P2 */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->state.hw_state.ebb0;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
/* Write M2 integer */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- temp &= ~PORT_PLL_M2_INT_MASK;
- temp |= pll->state.hw_state.pll0;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
+ PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
/* Write N */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->state.hw_state.pll1;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
+ PORT_PLL_N_MASK, pll->state.hw_state.pll1);
/* Write M2 fraction */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->state.hw_state.pll2;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
+ PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
/* Write M2 fraction enable */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->state.hw_state.pll3;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
+ PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
/* Write coeff */
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
@@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->state.hw_state.pll8;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
+ PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->state.hw_state.pll9;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
+ PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
@@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
@@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- u32 temp;
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_ENABLE, 0);
if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
!i915_mmio_reg_valid(div0_reg));
if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
- intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
- hw_state->div0);
+ intel_de_rmw(dev_priv, div0_reg,
+ TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
- u32 val;
/*
* Some of the following registers have reserved fields, so program
@@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
- val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
- val |= hw_state->mg_refclkin_ctl;
- intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
+ MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
- val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
- val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- val |= hw_state->mg_clktop2_coreclkctl1;
- intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
+ hw_state->mg_clktop2_coreclkctl1);
- val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
- val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
- val |= hw_state->mg_clktop2_hsclkctl;
- intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
+ hw_state->mg_clktop2_hsclkctl);
intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
@@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
hw_state->mg_pll_frac_lock);
intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
- val &= ~hw_state->mg_pll_bias_mask;
- val |= hw_state->mg_pll_bias;
- intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
+ hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
- val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
- val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
+ hw_state->mg_pll_tdc_coldst_bias_mask,
+ hw_state->mg_pll_tdc_coldst_bias);
intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
@@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
/*
* The spec says we need to "wait" but it also says it should be
@@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
* since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
*/
val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+ val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}
@@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
/* The first steps are done by intel_ddi_post_disable(). */
/*
@@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothing here.
*/
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
/* DVFS post sequence would be here. See the comment above. */
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
/*
* The spec says we need to "wait" but it also says it should be
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index ad1a37b515fb..b8027392144d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,8 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -301,6 +303,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj;
+ dpt->obj->is_dpt = true;
return &dpt->vm;
}
@@ -309,5 +312,29 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ dpt->obj->is_dpt = false;
i915_vm_put(&dpt->vm);
}
+
+void intel_dpt_configure(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
+ }
+ } else if (DISPLAY_VER(i915) == 13) {
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index e18a9f767b11..d9a166550185 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -10,6 +10,7 @@ struct drm_i915_private;
struct i915_address_space;
struct i915_vma;
+struct intel_crtc;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
@@ -19,5 +20,6 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
+void intel_dpt_configure(struct intel_crtc *crtc);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 5b9e44443814..760e63cdc0c8 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
- u32 val, bit;
+ u32 bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- bit = PIPECONF_REFRESH_RATE_ALT_VLV;
+ bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
- bit = PIPECONF_REFRESH_RATE_ALT_ILK;
+ bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if (refresh_rate == DRRS_REFRESH_RATE_LOW)
- val |= bit;
- else
- val &= ~bit;
-
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder),
+ bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
static void
@@ -374,16 +368,16 @@ out:
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
- NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
+ NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
{
debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
crtc, &intel_drrs_debugfs_status_fops);
- debugfs_create_file("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
- crtc, &intel_drrs_debugfs_ctl_fops);
+ debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
+ crtc, &intel_drrs_debugfs_ctl_fops);
}
static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 1e1c6107d51b..bed058d2c3ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -11,6 +11,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
+#include "intel_dsb_regs.h"
struct i915_vma;
@@ -24,25 +25,30 @@ enum dsb_id {
struct intel_dsb {
enum dsb_id id;
+
u32 *cmd_buf;
struct i915_vma *vma;
+ struct intel_crtc *crtc;
+
+ /*
+ * maximum number of dwords the buffer will hold.
+ */
+ unsigned int size;
/*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
+ * free_pos will point the first free dword and
+ * help in calculating tail of command buffer.
*/
- int free_pos;
+ unsigned int free_pos;
/*
- * ins_start_offset will help to store start address of the dsb
+ * ins_start_offset will help to store start dword of the dsb
* instuction and help in identifying the batch of auto-increment
* register.
*/
- u32 ins_start_offset;
+ unsigned int ins_start_offset;
};
-#define DSB_BUF_SIZE (2 * PAGE_SIZE)
-
/**
* DOC: DSB
*
@@ -62,86 +68,87 @@ struct intel_dsb {
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_NOOP 0x0
#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_WAIT_USEC 0x2
+#define DSB_OPCODE_WAIT_LINES 0x3
+#define DSB_OPCODE_WAIT_VBLANKS 0x4
+#define DSB_OPCODE_WAIT_DSL_IN 0x5
+#define DSB_OPCODE_WAIT_DSL_OUT 0x6
+#define DSB_OPCODE_INTERRUPT 0x7
#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_OPCODE_POLL 0xA
#define DSB_BYTE_EN 0xF
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
+static bool assert_dsb_has_room(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* each instruction is 2 dwords */
+ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
+ "[CRTC:%d:%s] DSB %d buffer overflow\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+}
+
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
enum dsb_id id)
{
- return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
+ return intel_de_read(i915, DSB_CTRL(pipe, id)) & DSB_STATUS_BUSY;
}
-static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 dsb_ctrl;
+ u32 *buf = dsb->cmd_buf;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ if (!assert_dsb_has_room(dsb))
+ return;
- dsb_ctrl |= DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+ dsb->ins_start_offset = dsb->free_pos;
+
+ buf[dsb->free_pos++] = ldw;
+ buf[dsb->free_pos++] = udw;
}
-static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
+ u32 opcode, i915_reg_t reg)
{
- u32 dsb_ctrl;
+ const u32 *buf = dsb->cmd_buf;
+ u32 prev_opcode, prev_reg;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ prev_opcode = buf[dsb->ins_start_offset + 1] >> DSB_OPCODE_SHIFT;
+ prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+
+ return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
+}
- dsb_ctrl &= ~DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_MMIO_WRITE, reg);
+}
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_INDEXED_WRITE, reg);
}
/**
- * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
- * increment register.
- * @crtc_state: intel_crtc_state structure
+ * intel_dsb_reg_write() - Emit register wriite to the DSB context
+ * @dsb: DSB context
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
- * buffer of DSB for auto-increment register. During command buffer overflow,
- * a warning is thrown and rest all erroneous condition register programming
- * is done through mmio write.
+ * buffer of DSB.
*/
-
-void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val)
+void intel_dsb_reg_write(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
{
- struct intel_dsb *dsb = crtc_state->dsb;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf;
- u32 reg_val;
-
- if (!dsb) {
- intel_de_write_fw(dev_priv, reg, val);
- return;
- }
- buf = dsb->cmd_buf;
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
-
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -158,207 +165,192 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
- if (reg_val != i915_mmio_reg_offset(reg)) {
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
+ !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ } else {
+ u32 *buf = dsb->cmd_buf;
- dsb->ins_start_offset = dsb->free_pos;
+ if (!assert_dsb_has_room(dsb))
+ return;
- /* Update the size. */
- buf[dsb->free_pos++] = 1;
+ /* convert to indexed write? */
+ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
+ u32 prev_val = buf[dsb->ins_start_offset + 0];
- /* Update the opcode and reg. */
- buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
- DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 0] = 1; /* count */
+ buf[dsb->ins_start_offset + 1] =
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 2] = prev_val;
- /* Update the value. */
- buf[dsb->free_pos++] = val;
- } else {
- /* Update the new value. */
- buf[dsb->free_pos++] = val;
+ dsb->free_pos++;
+ }
- /* Update the size. */
+ buf[dsb->free_pos++] = val;
+ /* Update the count */
buf[dsb->ins_start_offset]++;
- }
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+ }
}
-/**
- * intel_dsb_reg_write() -Write to the DSB context for normal
- * register.
- * @crtc_state: intel_crtc_state structure
- * @reg: register address.
- * @val: value.
- *
- * This function is used for writing register-value pair in command
- * buffer of DSB. During command buffer overflow, a warning is thrown
- * and rest all erroneous condition register programming is done
- * through mmio write.
- */
-void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val)
+static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_dsb *dsb;
- u32 *buf;
+ u32 aligned_tail, tail;
- dsb = crtc_state->dsb;
- if (!dsb) {
- intel_de_write_fw(dev_priv, reg, val);
- return;
- }
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
- buf = dsb->cmd_buf;
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
+ if (aligned_tail > tail)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ aligned_tail - tail);
- dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = val;
- buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
- i915_mmio_reg_offset(reg);
+ dsb->free_pos = aligned_tail / 4;
+}
+
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
- * @crtc_state: intel_crtc_state structure
+ * @dsb: DSB context
+ * @wait_for_vblank: wait for vblank before executing
*
* This function is used to do actual write to hardware using DSB.
- * On errors, fall back to MMIO. Also this function help to reset the context.
*/
-void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
+void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank)
{
- struct intel_dsb *dsb = crtc_state->dsb;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- if (!(dsb && dsb->free_pos))
+ tail = dsb->free_pos * 4;
+ if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
return;
- if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
- goto reset;
-
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "HEAD_PTR write failed - dsb engine is busy.\n");
- goto reset;
+ drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+ return;
}
+
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id),
+ (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) |
+ DSB_ENABLE);
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
-
- tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
- if (tail > dsb->free_pos * 4)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- (tail - dsb->free_pos * 4));
-
- if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "TAIL_PTR write failed - dsb engine is busy.\n");
- goto reset;
- }
- drm_dbg_kms(&dev_priv->drm,
- "DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
- if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
+}
+
+void intel_dsb_wait(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1))
drm_err(&dev_priv->drm,
- "Timed out waiting for DSB workload completion.\n");
- goto reset;
- }
+ "[CRTC:%d:%s] DSB %d timed out waiting for idle\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
-reset:
+ /* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
}
/**
* intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
- * @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
+ * @crtc: the CRTC
+ * @max_cmds: number of commands we need to fit into command buffer
*
* This function prepare the command buffer which is used to store dsb
* instructions with data.
+ *
+ * Returns:
+ * DSB context, NULL on failure
*/
-void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dsb *dsb;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct intel_dsb *dsb;
struct i915_vma *vma;
+ unsigned int size;
u32 *buf;
- intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
- return;
+ return NULL;
- dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
- if (!dsb) {
- drm_err(&i915->drm, "DSB object creation failed\n");
- return;
- }
+ dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
+ if (!dsb)
+ goto out;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
- if (IS_ERR(obj)) {
- kfree(dsb);
- goto out;
- }
+ /* ~1 qword per instruction, full cachelines */
+ size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ goto out_put_rpm;
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
- kfree(dsb);
- goto out;
+ goto out_put_rpm;
}
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
- kfree(dsb);
- goto out;
+ goto out_put_rpm;
}
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
dsb->id = DSB1;
dsb->vma = vma;
+ dsb->crtc = crtc;
dsb->cmd_buf = buf;
+ dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- crtc_state->dsb = dsb;
-out:
- if (!crtc_state->dsb)
- drm_info(&i915->drm,
- "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+ return dsb;
+
+out_put_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ kfree(dsb);
+out:
+ drm_info_once(&i915->drm,
+ "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
+ crtc->base.base.id, crtc->base.name, DSB1);
+
+ return NULL;
}
/**
* intel_dsb_cleanup() - To cleanup DSB context.
- * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
+ * @dsb: DSB context
*
* This function cleanup the DSB context by unpinning and releasing
* the VMA object associated with it.
*/
-void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
+void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- if (!crtc_state->dsb)
- return;
-
- i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
- kfree(crtc_state->dsb);
- crtc_state->dsb = NULL;
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 74dd2b3343bb..b8148b47022d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -10,14 +10,17 @@
#include "i915_reg_defs.h"
-struct intel_crtc_state;
+struct intel_crtc;
+struct intel_dsb;
-void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
-void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
-void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds);
+void intel_dsb_finish(struct intel_dsb *dsb);
+void intel_dsb_cleanup(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
-void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val);
-void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
+void intel_dsb_commit(struct intel_dsb *dsb,
+ bool wait_for_vblank);
+void intel_dsb_wait(struct intel_dsb *dsb);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_regs.h b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
new file mode 100644
index 000000000000..12535d478775
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DSB_REGS_H__
+#define __INTEL_DSB_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* This register controls the Display State Buffer (DSB) engines. */
+#define _DSBSL_INSTANCE_BASE 0x70B00
+#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
+ (pipe) * 0x1000 + (id) * 0x100)
+#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
+#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
+#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
+#define DSB_ENABLE REG_BIT(31)
+#define DSB_BUF_REITERATE REG_BIT(29)
+#define DSB_WAIT_FOR_VBLANK REG_BIT(28)
+#define DSB_WAIT_FOR_LINE_IN REG_BIT(27)
+#define DSB_HALT REG_BIT(16)
+#define DSB_NON_POSTED REG_BIT(8)
+#define DSB_STATUS_BUSY REG_BIT(0)
+#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc)
+#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x))
+#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0)
+#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x))
+#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10)
+#define DSB_POLL_ENABLE REG_BIT(31)
+#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23)
+#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */
+#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15)
+#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x))
+#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14)
+#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c)
+#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24)
+#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_ATS_FAULT_INT_EN REG_BIT(20)
+#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
+#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
+#define DSB_POLL_ERR_INT_EN REG_BIT(17)
+#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4)
+#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
+#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
+#define DSB_POLL_ERR_INT_STATUS REG_BIT(1)
+#define DSB_PROG_INT_STATUS REG_BIT(0)
+#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c)
+#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30)
+#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31)
+#define DSB_RM_READY_TIMEOUT REG_BIT(30)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */
+#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0)
+#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */
+#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34)
+#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38)
+#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c)
+#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40)
+#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44)
+#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48)
+#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0)
+
+#endif /* __INTEL_DSB_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 20e466d843ce..049443245310 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using DCS for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index fce69fa446d5..c7935ea498c4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -41,9 +41,12 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
+#include "intel_gmbus_regs.h"
+#include "intel_pps_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
@@ -377,6 +380,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
+enum {
+ MIPI_RESET_1 = 0,
+ MIPI_AVDD_EN_1,
+ MIPI_BKLT_EN_1,
+ MIPI_AVEE_EN_1,
+ MIPI_VIO_EN_1,
+ MIPI_RESET_2,
+ MIPI_AVDD_EN_2,
+ MIPI_BKLT_EN_2,
+ MIPI_AVEE_EN_2,
+ MIPI_VIO_EN_2,
+};
+
+static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+ int gpio, bool value)
+{
+ int index;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ return;
+
+ switch (gpio) {
+ case MIPI_RESET_1:
+ case MIPI_RESET_2:
+ index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
+
+ /*
+ * Disable HPD to set the pin to output, and set output
+ * value. The HPD pin should not be enabled for DSI anyway,
+ * assuming the board design and VBT are sane, and the pin isn't
+ * used by a non-DSI encoder.
+ *
+ * The locking protects against concurrent SHOTPLUG_CTL_DDI
+ * modifications in irq setup and handling.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
+ SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
+ value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ break;
+ case MIPI_AVDD_EN_1:
+ case MIPI_AVDD_EN_2:
+ index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
+ value ? PANEL_POWER_ON : 0);
+ break;
+ case MIPI_BKLT_EN_1:
+ case MIPI_BKLT_EN_2:
+ index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
+ value ? EDP_BLC_ENABLE : 0);
+ break;
+ case MIPI_AVEE_EN_1:
+ case MIPI_AVEE_EN_2:
+ index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_CLOCK_VAL_OUT,
+ GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
+ GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
+ break;
+ case MIPI_VIO_EN_1:
+ case MIPI_VIO_EN_2:
+ index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_DATA_VAL_OUT,
+ GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
+ GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
+ break;
+ default:
+ MISSING_CASE(gpio);
+ }
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -384,8 +466,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
-
- drm_dbg_kms(&dev_priv->drm, "\n");
+ bool native = DISPLAY_VER(dev_priv) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3)
gpio_index = *data++;
@@ -398,10 +479,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
else
gpio_source = 0;
+ if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
+ native = false;
+
/* pull up/down */
value = *data++ & 1;
- if (DISPLAY_VER(dev_priv) >= 11)
+ drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
+
+ if (native)
+ icl_native_gpio_set_value(dev_priv, gpio_number, value);
+ else if (DISPLAY_VER(dev_priv) >= 11)
icl_exec_gpio(connector, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(connector, gpio_source, gpio_number, value);
@@ -674,17 +763,6 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
}
-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
- struct intel_connector *connector = intel_dsi->attached_connector;
-
- /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
- return;
-
- msleep(msec);
-}
-
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
index dc642c1fe7ef..468d873fab1a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -16,7 +16,6 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* __INTEL_DSI_VBT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c86f9890754d..eb2dcd866cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -38,6 +38,7 @@
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
+#include "intel_dvo_regs.h"
#include "intel_gmbus.h"
#include "intel_panel.h"
@@ -56,48 +57,42 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
- .dvo_reg = DVOA,
- .dvo_srcdim_reg = DVOA_SRCDIM,
+ .port = PORT_A,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = 0x75,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
@@ -105,8 +100,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
- .dvo_reg = DVOB,
- .dvo_srcdim_reg = DVOB_SRCDIM,
+ .port = PORT_B,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
},
@@ -118,8 +112,6 @@ struct intel_dvo {
struct intel_dvo_device dev;
struct intel_connector *attached_connector;
-
- bool panel_wants_dither;
};
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
@@ -134,12 +126,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -150,13 +143,13 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
- *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
return tmp & DVO_ENABLE;
}
@@ -164,13 +157,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -190,14 +183,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = intel_de_read(dev_priv, dvo_reg);
+ enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
- intel_de_read(dev_priv, dvo_reg);
+
+ intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(i915, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -205,30 +198,29 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = intel_de_read(dev_priv, dvo_reg);
+ enum port port = encoder->port;
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
- intel_de_read(dev_priv, dvo_reg);
+ intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(i915, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
static enum drm_mode_status
-intel_dvo_mode_valid(struct drm_connector *connector,
+intel_dvo_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
int target_clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -239,7 +231,7 @@ intel_dvo_mode_valid(struct drm_connector *connector,
if (fixed_mode) {
enum drm_mode_status status;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -289,18 +281,17 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
u32 dvo_val;
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
- /* Save the data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(dev_priv, dvo_reg) &
- (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ /* Save the active data order, since I don't know what it should be set to. */
+ dvo_val = intel_de_read(i915, DVO(port)) &
+ (DVO_DEDICATED_INT_ENABLE |
+ DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
@@ -311,19 +302,21 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(dev_priv, dvo_srcdim_reg,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- intel_de_write(dev_priv, dvo_reg, dvo_val);
+ intel_de_write(i915, DVO_SRCDIM(port),
+ DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
+ DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
+ intel_de_write(i915, DVO(port), dvo_val);
}
static enum drm_connector_status
-intel_dvo_detect(struct drm_connector *connector, bool force)
+intel_dvo_detect(struct drm_connector *_connector, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
if (!INTEL_DISPLAY_ENABLED(i915))
return connector_status_disconnected;
@@ -331,9 +324,10 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
-static int intel_dvo_get_modes(struct drm_connector *connector)
+static int intel_dvo_get_modes(struct drm_connector *_connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
/*
@@ -342,12 +336,12 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- num_modes = intel_ddc_get_modes(connector,
- intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+ num_modes = intel_ddc_get_modes(&connector->base,
+ intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC));
if (num_modes)
return num_modes;
- return intel_panel_get_modes(to_intel_connector(connector));
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
@@ -379,165 +373,184 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-static enum port intel_dvo_port(i915_reg_t dvo_reg)
+static int intel_dvo_encoder_type(const struct intel_dvo_device *dvo)
+{
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ return DRM_MODE_ENCODER_TMDS;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
+ case INTEL_DVO_CHIP_LVDS:
+ return DRM_MODE_ENCODER_LVDS;
+ default:
+ MISSING_CASE(dvo->type);
+ return DRM_MODE_ENCODER_NONE;
+ }
+}
+
+static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
{
- if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return PORT_A;
- else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return PORT_B;
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ return DRM_MODE_CONNECTOR_DVII;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
+ case INTEL_DVO_CHIP_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ default:
+ MISSING_CASE(dvo->type);
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+ struct intel_dvo *intel_dvo,
+ const struct intel_dvo_device *dvo)
+{
+ struct i2c_adapter *i2c;
+ u32 dpll[I915_MAX_PIPES];
+ enum pipe pipe;
+ int gpio;
+ bool ret;
+
+ /*
+ * Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PIN_SSC;
else
- return PORT_C;
+ gpio = GMBUS_PIN_DPB;
+
+ /*
+ * Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+
+ /*
+ * GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ /*
+ * ns2501 requires the DVO 2x clock before it will
+ * respond to i2c accesses, so make sure we have
+ * the clock enabled before we attempt to initialize
+ * the device.
+ */
+ for_each_pipe(dev_priv, pipe)
+ dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE);
+
+ ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ /* restore the DVO 2x clock state to original */
+ for_each_pipe(dev_priv, pipe) {
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
+ }
+
+ intel_gmbus_force_bit(i2c, false);
+
+ return ret;
}
-void intel_dvo_init(struct drm_i915_private *dev_priv)
+static bool intel_dvo_probe(struct drm_i915_private *i915,
+ struct intel_dvo *intel_dvo)
{
- struct intel_encoder *intel_encoder;
- struct intel_dvo *intel_dvo;
- struct intel_connector *intel_connector;
int i;
- int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ if (intel_dvo_init_dev(i915, intel_dvo,
+ &intel_dvo_devices[i]))
+ return true;
+ }
+
+ return false;
+}
+
+void intel_dvo_init(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_dvo *intel_dvo;
intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(intel_dvo);
return;
}
- intel_dvo->attached_connector = intel_connector;
+ intel_dvo->attached_connector = connector;
- intel_encoder = &intel_dvo->base;
+ encoder = &intel_dvo->base;
- intel_encoder->disable = intel_disable_dvo;
- intel_encoder->enable = intel_enable_dvo;
- intel_encoder->get_hw_state = intel_dvo_get_hw_state;
- intel_encoder->get_config = intel_dvo_get_config;
- intel_encoder->compute_config = intel_dvo_compute_config;
- intel_encoder->pre_enable = intel_dvo_pre_enable;
- intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+ encoder->disable = intel_disable_dvo;
+ encoder->enable = intel_enable_dvo;
+ encoder->get_hw_state = intel_dvo_get_hw_state;
+ encoder->get_config = intel_dvo_get_config;
+ encoder->compute_config = intel_dvo_compute_config;
+ encoder->pre_enable = intel_dvo_pre_enable;
+ connector->get_hw_state = intel_dvo_connector_get_hw_state;
- /* Now, try to find a controller */
- for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_connector->base;
- const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
- struct i2c_adapter *i2c;
- int gpio;
- bool dvoinit;
- enum pipe pipe;
- u32 dpll[I915_MAX_PIPES];
- enum port port;
+ if (!intel_dvo_probe(i915, intel_dvo)) {
+ kfree(intel_dvo);
+ intel_connector_free(connector);
+ return;
+ }
- /*
- * Allow the I2C driver info to specify the GPIO to be used in
- * special cases, but otherwise default to what's defined
- * in the spec.
- */
- if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
- gpio = dvo->gpio;
- else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GMBUS_PIN_SSC;
- else
- gpio = GMBUS_PIN_DPB;
+ encoder->type = INTEL_OUTPUT_DVO;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = intel_dvo->dev.port;
+ encoder->pipe_mask = ~0;
- /*
- * Set up the I2C bus necessary for the chip we're probing.
- * It appears that everything is on GPIOE except for panels
- * on i830 laptops, which are on GPIOB (DVOA).
- */
- i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+ if (intel_dvo->dev.type != INTEL_DVO_CHIP_LVDS)
+ encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
+ BIT(INTEL_OUTPUT_DVO);
- intel_dvo->dev = *dvo;
+ drm_encoder_init(&i915->drm, &encoder->base,
+ &intel_dvo_enc_funcs,
+ intel_dvo_encoder_type(&intel_dvo->dev),
+ "DVO %c", port_name(encoder->port));
- /*
- * GMBUS NAK handling seems to be unstable, hence let the
- * transmitter detection run in bit banging mode for now.
- */
- intel_gmbus_force_bit(i2c, true);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dvo->dev.name);
+
+ if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_init(&i915->drm, &connector->base,
+ &intel_dvo_connector_funcs,
+ intel_dvo_connector_type(&intel_dvo->dev));
+
+ drm_connector_helper_add(&connector->base,
+ &intel_dvo_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ intel_connector_attach_encoder(connector, encoder);
+
+ if (intel_dvo->dev.type == INTEL_DVO_CHIP_LVDS) {
/*
- * ns2501 requires the DVO 2x clock before it will
- * respond to i2c accesses, so make sure we have
- * have the clock enabled before we attempt to
- * initialize the device.
+ * For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
*/
- for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
- intel_de_write(dev_priv, DPLL(pipe),
- dpll[pipe] | DPLL_DVO_2X_MODE);
- }
-
- dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
-
- /* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
- }
-
- intel_gmbus_force_bit(i2c, false);
-
- if (!dvoinit)
- continue;
-
- port = intel_dvo_port(dvo->dvo_reg);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type,
- "DVO %c", port_name(port));
-
- intel_encoder->type = INTEL_OUTPUT_DVO;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = port;
- intel_encoder->pipe_mask = ~0;
-
- if (dvo->type != INTEL_DVO_CHIP_LVDS)
- intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
- BIT(INTEL_OUTPUT_DVO);
-
- switch (dvo->type) {
- case INTEL_DVO_CHIP_TMDS:
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- drm_connector_init(&dev_priv->drm, connector,
- &intel_dvo_connector_funcs,
- DRM_MODE_CONNECTOR_DVII);
- encoder_type = DRM_MODE_ENCODER_TMDS;
- break;
- case INTEL_DVO_CHIP_LVDS_NO_FIXED:
- case INTEL_DVO_CHIP_LVDS:
- drm_connector_init(&dev_priv->drm, connector,
- &intel_dvo_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- encoder_type = DRM_MODE_ENCODER_LVDS;
- break;
- }
-
- drm_connector_helper_add(connector,
- &intel_dvo_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (dvo->type == INTEL_DVO_CHIP_LVDS) {
- /*
- * For our LVDS chipsets, we should hopefully be able
- * to dig the fixed panel mode out of the BIOS data.
- * However, it's in a different format from the BIOS
- * data on chipsets with integrated LVDS (stored in AIM
- * headers, likely), so for now, just get the current
- * mode being output through DVO.
- */
- intel_panel_add_encoder_fixed_mode(intel_connector,
- intel_encoder);
-
- intel_panel_init(intel_connector);
-
- intel_dvo->panel_wants_dither = true;
- }
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- return;
+ intel_panel_init(connector, NULL);
}
-
- kfree(intel_dvo);
- kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index ecff7b190856..f7e98e1c6470 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -25,6 +25,8 @@
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
+
enum drm_connector_status;
struct drm_display_mode;
struct i2c_adapter;
@@ -32,9 +34,8 @@ struct i2c_adapter;
struct intel_dvo_device {
const char *name;
int type;
- /* DVOA/B/C output register */
- i915_reg_t dvo_reg;
- i915_reg_t dvo_srcdim_reg;
+ /* DVOA/B/C */
+ enum port port;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_regs.h b/drivers/gpu/drm/i915/display/intel_dvo_regs.h
new file mode 100644
index 000000000000..6f9058462850
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dvo_regs.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_REGS_H__
+#define __INTEL_DVO_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _DVOA 0x61120
+#define _DVOB 0x61140
+#define _DVOC 0x61160
+#define DVO(port) _MMIO_PORT((port), _DVOA, _DVOB)
+#define DVO_ENABLE REG_BIT(31)
+#define DVO_PIPE_SEL_MASK REG_BIT(30)
+#define DVO_PIPE_SEL(pipe) REG_FIELD_PREP(DVO_PIPE_SEL_MASK, (pipe))
+#define DVO_PIPE_STALL_MASK REG_GENMASK(29, 28)
+#define DVO_PIPE_STALL_UNUSED REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 0)
+#define DVO_PIPE_STALL REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 1)
+#define DVO_PIPE_STALL_TV REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 2)
+#define DVO_INTERRUPT_SELECT REG_BIT(27)
+#define DVO_DEDICATED_INT_ENABLE REG_BIT(26)
+#define DVO_PRESERVE_MASK REG_GENMASK(25, 24)
+#define DVO_USE_VGA_SYNC REG_BIT(15)
+#define DVO_DATA_ORDER_MASK REG_BIT(14)
+#define DVO_DATA_ORDER_I740 REG_FIELD_PREP(DVO_DATA_ORDER_MASK, 0)
+#define DVO_DATA_ORDER_FP REG_FIELD_PREP(DVO_DATA_ORDER_MASK, 1)
+#define DVO_VSYNC_DISABLE REG_BIT(11)
+#define DVO_HSYNC_DISABLE REG_BIT(10)
+#define DVO_VSYNC_TRISTATE REG_BIT(9)
+#define DVO_HSYNC_TRISTATE REG_BIT(8)
+#define DVO_BORDER_ENABLE REG_BIT(7)
+#define DVO_ACT_DATA_ORDER_MASK REG_BIT(6)
+#define DVO_ACT_DATA_ORDER_RGGB REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 0)
+#define DVO_ACT_DATA_ORDER_GBRG REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 1)
+#define DVO_ACT_DATA_ORDER_GBRG_ERRATA REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 0)
+#define DVO_ACT_DATA_ORDER_RGGB_ERRATA REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 1)
+#define DVO_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define DVO_HSYNC_ACTIVE_HIGH REG_BIT(3)
+#define DVO_BLANK_ACTIVE_HIGH REG_BIT(2)
+#define DVO_OUTPUT_CSTATE_PIXELS REG_BIT(1) /* SDG only */
+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS REG_BIT(0) /* SDG only */
+
+#define _DVOA_SRCDIM 0x61124
+#define _DVOB_SRCDIM 0x61144
+#define _DVOC_SRCDIM 0x61164
+#define DVO_SRCDIM(port) _MMIO_PORT((port), _DVOA_SRCDIM, _DVOB_SRCDIM)
+#define DVO_SRCDIM_HORIZONTAL_MASK REG_GENMASK(22, 12)
+#define DVO_SRCDIM_HORIZONTAL(x) REG_FIELD_PREP(DVO_SRCDIM_HORIZONTAL_MASK, (x))
+#define DVO_SRCDIM_VERTICAL_MASK REG_GENMASK(10, 0)
+#define DVO_SRCDIM_VERTICAL(x) REG_FIELD_PREP(DVO_SRCDIM_VERTICAL_MASK, (x))
+
+#endif /* __INTEL_DVO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 63137ae5ab21..e5f637897b5e 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -174,7 +174,7 @@ static const struct intel_modifier_desc intel_modifiers[] = {
.plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC,
}, {
.modifier = I915_FORMAT_MOD_4_TILED,
- .display_ver = { 13, 13 },
+ .display_ver = { 13, -1 },
.plane_caps = INTEL_PLANE_CAP_TILING_4,
}, {
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
@@ -716,14 +716,15 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
}
}
-static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
+bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
{
- return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR;
+ return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR;
}
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
+ return fb && to_i915(fb->dev)->params.enable_dpt &&
+ intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
@@ -1189,7 +1190,7 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
- return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
+ return IS_ALDERLAKE_P(i915) && intel_fb_uses_dpt(&fb->base);
}
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
@@ -1705,7 +1706,7 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
* The new CCS hash mode makes remapping impossible
*/
if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) ||
- intel_modifier_uses_dpt(dev_priv, modifier))
+ intel_fb_modifier_uses_dpt(dev_priv, modifier))
return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
else if (DISPLAY_VER(dev_priv) >= 7)
return 256 * 1024;
@@ -2007,6 +2008,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
vm = intel_dpt_create(intel_fb);
if (IS_ERR(vm)) {
+ drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
goto err;
}
@@ -2017,11 +2019,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
+ goto err_free_dpt;
}
return 0;
+err_free_dpt:
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
err:
intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
@@ -2046,6 +2051,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
return ERR_PTR(-EREMOTE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 4662b812b934..e85167d6bc34 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -92,6 +92,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
+bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier);
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 6900acbb1381..1aca7552a85d 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -91,7 +91,7 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
goto err;
}
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ vma->display_alignment = max(vma->display_alignment, alignment);
i915_gem_object_flush_if_display(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b5ee5ea0d010..b507ff944864 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -323,25 +323,23 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
struct drm_i915_private *dev_priv = fbc->i915;
- spin_lock_irq(&dev_priv->uncore.lock);
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
}
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_fb.start, U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_llb.start, U32_MAX));
intel_de_write(i915, FBC_CFB_BASE,
- i915->dsm.start + fbc->compressed_fb.start);
+ i915->dsm.stolen.start + fbc->compressed_fb.start);
intel_de_write(i915, FBC_LL_BASE,
- i915->dsm.start + fbc->compressed_llb.start);
+ i915->dsm.stolen.start + fbc->compressed_llb.start);
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -359,10 +357,8 @@ static void i965_fbc_nuke(struct intel_fbc *fbc)
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
struct drm_i915_private *dev_priv = fbc->i915;
- spin_lock_irq(&dev_priv->uncore.lock);
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
}
static const struct intel_fbc_funcs i965_fbc_funcs = {
@@ -716,7 +712,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
(DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
- end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
+ end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -815,7 +811,7 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -1095,7 +1091,9 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* Wa_14016291713 */
- if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ if ((IS_DISPLAY_VER(i915, 12, 13) ||
+ IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) &&
+ crtc_state->has_psr) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
}
@@ -1809,10 +1807,10 @@ static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
- intel_fbc_debugfs_false_color_get,
- intel_fbc_debugfs_false_color_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
+ intel_fbc_debugfs_false_color_get,
+ intel_fbc_debugfs_false_color_set,
+ "%llu\n");
static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
struct dentry *parent)
@@ -1821,8 +1819,8 @@ static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
fbc, &intel_fbc_debugfs_status_fops);
if (fbc->funcs->set_false_color)
- debugfs_create_file("i915_fbc_false_color", 0644, parent,
- fbc, &intel_fbc_debugfs_false_color_fops);
+ debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
+ fbc, &intel_fbc_debugfs_false_color_fops);
}
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 5575d7abdc09..673bcdfb7ff6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -170,7 +170,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features.
*/
- if (size * 2 < dev_priv->stolen_usable_size)
+ if (size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
bool prealloc = false;
void __iomem *vaddr;
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -267,36 +268,40 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
- /* setup aperture base/size for vesafb takeover */
obj = intel_fb_obj(&intel_fb->base);
if (i915_gem_object_is_lmem(obj)) {
struct intel_memory_region *mem = obj->mm.region;
- info->apertures->ranges[0].base = mem->io_start;
- info->apertures->ranges[0].size = mem->io_size;
-
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
(unsigned long)(mem->io_start +
i915_gem_object_get_dma_address(obj, 0));
info->fix.smem_len = obj->base.size;
} else {
- info->apertures->ranges[0].base = ggtt->gmadr.start;
- info->apertures->ranges[0].size = ggtt->mappable_end;
-
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
info->fix.smem_len = vma->size;
}
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- goto out_unpin;
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
}
+
+ if (ret)
+ goto out_unpin;
+
info->screen_base = vaddr;
info->screen_size = vma->size;
@@ -328,8 +333,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
@@ -347,6 +364,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
+ drm_fb_helper_unprepare(&ifbdev->helper);
kfree(ifbdev);
}
@@ -527,10 +545,12 @@ int intel_fbdev_init(struct drm_device *dev)
return -ENOMEM;
mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
- if (!intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->preferred_bpp = 32;
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
@@ -549,14 +569,13 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
struct intel_fbdev *ifbdev = data;
/* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp))
+ if (drm_fb_helper_initial_config(&ifbdev->helper))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
}
-void intel_fbdev_initial_config_async(struct drm_device *dev)
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -626,7 +645,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
- if (!ifbdev || !ifbdev->vma)
+ if (!ifbdev)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
+ return;
+
+ if (!ifbdev->vma)
goto set_suspend;
info = ifbdev->helper.info;
@@ -693,9 +718,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
-void intel_fbdev_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 0e95e9472fa3..04fd523a5023 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -15,12 +15,12 @@ struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
@@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0;
}
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
}
@@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 063f1da4f229..55283677c45a 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -12,6 +12,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_fdi_regs.h"
struct intel_fdi_funcs {
void (*fdi_link_train)(struct intel_crtc *crtc,
@@ -366,8 +367,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -439,19 +439,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
@@ -538,13 +530,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -593,13 +581,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -719,19 +703,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
}
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE_IVB,
+ FDI_LINK_TRAIN_PATTERN_2_IVB);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_PATTERN_MASK_CPT,
+ FDI_LINK_TRAIN_PATTERN_2_CPT);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
@@ -837,9 +815,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
@@ -865,25 +842,19 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
@@ -898,7 +869,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -906,30 +876,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
-
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
intel_ddi_disable_clock(encoder);
-
- val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -945,16 +900,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
+ intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
intel_de_posting_read(dev_priv, reg);
udelay(200);
@@ -974,28 +927,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
/* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(100);
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
/* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(100);
}
@@ -1007,15 +950,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
intel_de_posting_read(dev_priv, reg);
@@ -1027,11 +968,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
@@ -1042,9 +980,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- /* BPC in FDI rx is consistent with that in PIPECONF */
+ /* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp);
intel_de_posting_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi_regs.h b/drivers/gpu/drm/i915/display/intel_fdi_regs.h
new file mode 100644
index 000000000000..853b834c35a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi_regs.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_FDI_REGS_H__
+#define __INTEL_FDI_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define FDI_PLL_BIOS_0 _MMIO(0x46000)
+#define FDI_PLL_FB_CLOCK_MASK 0xff
+#define FDI_PLL_BIOS_1 _MMIO(0x46004)
+#define FDI_PLL_BIOS_2 _MMIO(0x46008)
+#define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c)
+#define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010)
+#define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014)
+
+#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24)
+#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
+#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
+
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0)
+#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+/* CPU: FDI_TX */
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
+#define FDI_TX_DISABLE (0 << 31)
+#define FDI_TX_ENABLE (1 << 31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28)
+#define FDI_LINK_TRAIN_NONE (3 << 28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22)
+#define FDI_DP_PORT_WIDTH_SHIFT 19
+#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
+#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18)
+/* Ironlake: hardwired to 1 */
+#define FDI_TX_PLL_ENABLE (1 << 14)
+
+/* Ivybridge has different bits for lolz */
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8)
+#define FDI_LINK_TRAIN_NONE_IVB (3 << 8)
+
+/* both Tx and Rx */
+#define FDI_COMPOSITE_SYNC (1 << 11)
+#define FDI_LINK_TRAIN_AUTO (1 << 10)
+#define FDI_SCRAMBLING_ENABLE (0 << 7)
+#define FDI_SCRAMBLING_DISABLE (1 << 7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
+#define FDI_RX_ENABLE (1 << 31)
+/* train, dp width same as FDI_TX */
+#define FDI_FS_ERRC_ENABLE (1 << 27)
+#define FDI_FE_ERRC_ENABLE (1 << 26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16)
+#define FDI_8BPC (0 << 16)
+#define FDI_10BPC (1 << 16)
+#define FDI_6BPC (2 << 16)
+#define FDI_12BPC (3 << 16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15)
+#define FDI_DMI_LINK_REVERSE_MASK (1 << 14)
+#define FDI_RX_PLL_ENABLE (1 << 13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10)
+#define FDI_FS_ERR_REPORT_ENABLE (1 << 9)
+#define FDI_FE_ERR_REPORT_ENABLE (1 << 8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6)
+#define FDI_PCDCLK (1 << 4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1 << 10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8)
+
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3 << 26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26)
+#define FDI_RX_PWRDN_LANE0_MASK (3 << 24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24)
+#define FDI_RX_TP1_TO_TP2_48 (2 << 20)
+#define FDI_RX_TP1_TO_TP2_64 (3 << 20)
+#define FDI_RX_FDI_DELAY_90 (0x90 << 0)
+#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN (1 << 10)
+#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7)
+#define FDI_RX_FS_CODE_ERR (1 << 6)
+#define FDI_RX_FE_CODE_ERR (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0)
+
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
+
+#define FDI_PLL_CTL_1 _MMIO(0xfe000)
+#define FDI_PLL_CTL_2 _MMIO(0xfe004)
+
+#endif /* __INTEL_FDI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index d636d21fa9ce..b708a62e509a 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -31,6 +31,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
+#include "intel_pch_display.h"
/**
* DOC: fifo underrun handling
@@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ crtc->cpu_fifo_underrun_disabled = !enable;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = !enable;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index 2e47d7d3c101..b00d8abebcf9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -9,8 +9,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_crtc;
enum pipe;
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc, bool enable);
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a5840a28a69d..3ddfc8080ee8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -215,54 +215,34 @@ intel_gmbus_reset(struct drm_i915_private *i915)
static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(i915, DSPCLK_GATE_D(i915));
- if (!enable)
- val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, DSPCLK_GATE_D(i915), val);
+ intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
- if (!enable)
- val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
- if (!enable)
- val |= BXT_GMBUS_GATING_DIS;
- else
- val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
+ !enable ? BXT_GMBUS_GATING_DIS : 0);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *i915 = bus->i915;
- struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(i915) && !IS_I845G(i915))
- reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = intel_de_read_notrace(i915, bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -270,37 +250,31 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | GPIO_CLOCK_DIR_MASK);
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved);
- return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- GPIO_CLOCK_VAL_IN) != 0;
+ return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | GPIO_DATA_DIR_MASK);
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved);
- return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- GPIO_DATA_VAL_IN) != 0;
+ return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -310,16 +284,14 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | clock_bits);
- intel_uncore_posting_read(uncore, bus->gpio_reg);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits);
+ intel_de_posting_read(i915, bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -329,8 +301,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits);
- intel_uncore_posting_read(uncore, bus->gpio_reg);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits);
+ intel_de_posting_read(i915, bus->gpio_reg);
}
static int
@@ -439,9 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_wait_for_register_fw(&i915->uncore,
- GMBUS2(i915), GMBUS_ACTIVE, 0,
- 10);
+ ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
intel_de_write_fw(i915, GMBUS4(i915), 0);
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6406fd487ee5..650232c4892b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
@@ -203,13 +204,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_gt *gt = dev_priv->media_gt;
+ struct intel_gsc_uc *gsc = &gt->uc.gsc;
bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
return false;
- /* MEI interface is solid */
+ /* If MTL+ make sure gsc is loaded and proxy is setup */
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ if (!intel_uc_fw_is_running(&gsc->fw))
+ return false;
+
+ /* MEI/GSC interface is solid depending on which is used */
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -943,8 +951,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port);
- intel_de_write(dev_priv, HDCP_REP_CTL,
- intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+ intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
@@ -1143,18 +1150,18 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
@@ -1173,18 +1180,18 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
@@ -1201,18 +1208,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1227,18 +1234,18 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
@@ -1254,18 +1261,18 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
@@ -1281,18 +1288,18 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
@@ -1307,18 +1314,18 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
@@ -1336,20 +1343,21 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
- rep_topology,
- rep_send_ack);
+ ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
+ data,
+ rep_topology,
+ rep_send_ack);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
@@ -1365,18 +1373,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1389,18 +1397,18 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
@@ -1409,22 +1417,22 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
return ret;
}
-static int hdcp2_close_mei_session(struct intel_connector *connector)
+static int hdcp2_close_session(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
&dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1433,7 +1441,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
static int hdcp2_deauthenticate_port(struct intel_connector *connector)
{
- return hdcp2_close_mei_session(connector);
+ return hdcp2_close_session(connector);
}
/* Authentication flow starts from here */
@@ -1819,12 +1827,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_AUTH_STATUS) {
+ LINK_AUTH_STATUS)
/* Link is Authenticated. Now set for Encryption */
- intel_de_write(dev_priv,
- HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
- }
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ 0, CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_set(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1848,8 +1854,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS));
- intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ CTL_LINK_ENCRYPTION_REQ, 0);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -2145,8 +2151,8 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
- dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
+ dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
@@ -2163,30 +2169,30 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
-static const struct component_ops i915_hdcp_component_ops = {
+static const struct component_ops i915_hdcp_ops = {
.bind = i915_hdcp_component_bind,
.unbind = i915_hdcp_component_unbind,
};
-static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
- return MEI_DDI_A;
+ return HDCP_DDI_A;
case PORT_B ... PORT_F:
- return (enum mei_fw_ddi)port;
+ return (enum hdcp_ddi)port;
default:
- return MEI_DDI_INVALID_PORT;
+ return HDCP_DDI_INVALID_PORT;
}
}
-static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
- return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
default: /* eDP, DSI TRANSCODERS are non HDCP capable */
- return MEI_INVALID_TRANSCODER;
+ return HDCP_INVALID_TRANSCODER;
}
}
@@ -2200,20 +2206,20 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
enum port port = dig_port->base.port;
if (DISPLAY_VER(dev_priv) < 12)
- data->fw_ddi = intel_get_mei_fw_ddi_index(port);
+ data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
else
/*
- * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
* with zero(INVALID PORT index).
*/
- data->fw_ddi = MEI_DDI_INVALID_PORT;
+ data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
/*
- * As associated transcoder is set and modified at modeset, here fw_tc
+ * As associated transcoder is set and modified at modeset, here hdcp_transcoder
* is initialized to zero (invalid transcoder index). This will be
* retained for <Gen12 forever.
*/
- data->fw_tc = MEI_INVALID_TRANSCODER;
+ data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
@@ -2235,6 +2241,9 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
{
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ return true;
+
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
@@ -2256,10 +2265,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = true;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
- I915_COMPONENT_HDCP);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ ret = intel_hdcp_gsc_init(dev_priv);
+ else
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
+ I915_COMPONENT_HDCP);
+
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
ret);
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
dev_priv->display.hdcp.comp_added = false;
@@ -2350,7 +2363,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
}
if (DISPLAY_VER(dev_priv) >= 12)
- dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
+ dig_port->hdcp_port_data.hdcp_transcoder =
+ intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2485,7 +2499,10 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = false;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ intel_hdcp_gsc_fini(dev_priv);
+ else
+ component_del(dev_priv->drm.dev, &i915_hdcp_ops);
}
void intel_hdcp_cleanup(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
new file mode 100644
index 000000000000..7e52aea6aa17
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include <drm/i915_hdcp_interface.h>
+
+#include "display/intel_hdcp_gsc.h"
+#include "gem/i915_gem_region.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 14;
+}
+
+static int
+gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
+ struct wired_cmd_initiate_hdcp2_session_out
+ session_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ake_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_init_in.header.api_version = HDCP_API_VERSION;
+ session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
+ session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_init_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
+
+ session_init_in.port.integrated_port_type = data->port_type;
+ session_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ session_init_in.protocol = data->protocol;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ sizeof(session_init_in),
+ (u8 *)&session_init_out,
+ sizeof(session_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_INITIATE_HDCP2_SESSION,
+ session_init_out.header.status);
+ return -EIO;
+ }
+
+ ake_data->msg_id = HDCP_2_2_AKE_INIT;
+ ake_data->tx_caps = session_init_out.tx_caps;
+ memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
+ struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_rxcert_in.header.api_version = HDCP_API_VERSION;
+ verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
+ verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
+
+ verify_rxcert_in.port.integrated_port_type = data->port_type;
+ verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ verify_rxcert_in.cert_rx = rx_cert->cert_rx;
+ memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
+ memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ sizeof(verify_rxcert_in),
+ (u8 *)&verify_rxcert_out,
+ sizeof(verify_rxcert_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_VERIFY_RECEIVER_CERT,
+ verify_rxcert_out.header.status);
+ return -EIO;
+ }
+
+ *km_stored = !!verify_rxcert_out.km_stored;
+ if (verify_rxcert_out.km_stored) {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_stored_km);
+ } else {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
+ }
+
+ memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
+ sizeof(verify_rxcert_out.ekm_buff));
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
+ struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_hprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ send_hprime_in.header.api_version = HDCP_API_VERSION;
+ send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
+ send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
+
+ send_hprime_in.port.integrated_port_type = data->port_type;
+ send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
+ HDCP_2_2_H_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ sizeof(send_hprime_in),
+ (u8 *)&send_hprime_out,
+ sizeof(send_hprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
+ struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !pairing_info)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ pairing_info_in.header.api_version = HDCP_API_VERSION;
+ pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
+ pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
+
+ pairing_info_in.port.integrated_port_type = data->port_type;
+ pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
+ HDCP_2_2_E_KH_KM_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ sizeof(pairing_info_in),
+ (u8 *)&pairing_info_out,
+ sizeof(pairing_info_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_PAIRING_INFO,
+ pairing_info_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_initiate_locality_check(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data)
+{
+ struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
+ struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !lc_init_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ lc_init_in.header.api_version = HDCP_API_VERSION;
+ lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
+ lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
+
+ lc_init_in.port.integrated_port_type = data->port_type;
+ lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
+ (u8 *)&lc_init_out, sizeof(lc_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
+ WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
+ return -EIO;
+ }
+
+ lc_init_data->msg_id = HDCP_2_2_LC_INIT;
+ memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
+ struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_lprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_lprime_in.header.api_version = HDCP_API_VERSION;
+ verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
+ verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
+
+ verify_lprime_in.port.integrated_port_type = data->port_type;
+ verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
+ HDCP_2_2_L_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ sizeof(verify_lprime_in),
+ (u8 *)&verify_lprime_out,
+ sizeof(verify_lprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VALIDATE_LOCALITY,
+ verify_lprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
+ struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ske_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ get_skey_in.header.api_version = HDCP_API_VERSION;
+ get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
+ get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
+
+ get_skey_in.port.integrated_port_type = data->port_type;
+ get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
+ (u8 *)&get_skey_out, sizeof(get_skey_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_GET_SESSION_KEY, get_skey_out.header.status);
+ return -EIO;
+ }
+
+ ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
+ memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
+ HDCP_2_2_E_DKEY_KS_LEN);
+ memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack)
+{
+ struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
+ struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !rep_topology || !rep_send_ack || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_repeater_in.header.api_version = HDCP_API_VERSION;
+ verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
+ verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
+
+ verify_repeater_in.port.integrated_port_type = data->port_type;
+ verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
+ HDCP_2_2_RXINFO_LEN);
+ memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
+ HDCP_2_2_SEQ_NUM_LEN);
+ memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ sizeof(verify_repeater_in),
+ (u8 *)&verify_repeater_out,
+ sizeof(verify_repeater_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VERIFY_REPEATER,
+ verify_repeater_out.header.status);
+ return -EIO;
+ }
+
+ memcpy(rep_send_ack->v, verify_repeater_out.v,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
+
+ return 0;
+}
+
+static int gsc_hdcp_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
+ struct wired_cmd_repeater_auth_stream_req_out
+ verify_mprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+ size_t cmd_size;
+
+ if (!dev || !stream_ready || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ cmd_size = struct_size(verify_mprime_in, streams, data->k);
+ if (cmd_size == SIZE_MAX)
+ return -EINVAL;
+
+ verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
+ if (!verify_mprime_in)
+ return -ENOMEM;
+
+ verify_mprime_in->header.api_version = HDCP_API_VERSION;
+ verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
+
+ verify_mprime_in->port.integrated_port_type = data->port_type;
+ verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
+ drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
+
+ memcpy(verify_mprime_in->streams, data->streams,
+ array_size(data->k, sizeof(*data->streams)));
+
+ verify_mprime_in->k = cpu_to_be16(data->k);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
+ (u8 *)&verify_mprime_out,
+ sizeof(verify_mprime_out));
+ kfree(verify_mprime_in);
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ verify_mprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
+{
+ struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
+ struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ enable_auth_in.header.api_version = HDCP_API_VERSION;
+ enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
+ enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
+
+ enable_auth_in.port.integrated_port_type = data->port_type;
+ enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ enable_auth_in.stream_type = data->streams[0].stream_type;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ sizeof(enable_auth_in),
+ (u8 *)&enable_auth_out,
+ sizeof(enable_auth_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_ENABLE_AUTH, enable_auth_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
+{
+ struct wired_cmd_close_session_in session_close_in = { { 0 } };
+ struct wired_cmd_close_session_out session_close_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_close_in.header.api_version = HDCP_API_VERSION;
+ session_close_in.header.command_id = WIRED_CLOSE_SESSION;
+ session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_close_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
+
+ session_close_in.port.integrated_port_type = data->port_type;
+ session_close_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ sizeof(session_close_in),
+ (u8 *)&session_close_out,
+ sizeof(session_close_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n",
+ session_close_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = gsc_hdcp_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ gsc_hdcp_verify_receiver_cert_prepare_km,
+ .verify_hprime = gsc_hdcp_verify_hprime,
+ .store_pairing_info = gsc_hdcp_store_pairing_info,
+ .initiate_locality_check = gsc_hdcp_initiate_locality_check,
+ .verify_lprime = gsc_hdcp_verify_lprime,
+ .get_session_key = gsc_hdcp_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ gsc_hdcp_repeater_check_flow_prepare_ack,
+ .verify_mprime = gsc_hdcp_verify_mprime,
+ .enable_hdcp_authentication = gsc_hdcp_enable_authentication,
+ .close_hdcp_session = gsc_hdcp_close_session,
+};
+
+/*This function helps allocate memory for the command that we will send to gsc cs */
+static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
+ struct intel_hdcp_gsc_message *hdcp_message)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_vma *vma = NULL;
+ void *cmd;
+ int err;
+
+ /* allocate object of one page for HDCP command memory and store it */
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
+ return PTR_ERR(obj);
+ }
+
+ cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(cmd)) {
+ drm_err(&i915->drm, "Failed to map gsc message page!\n");
+ err = PTR_ERR(cmd);
+ goto out_unpin;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_unmap;
+
+ memset(cmd, 0, obj->base.size);
+
+ hdcp_message->hdcp_cmd = cmd;
+ hdcp_message->vma = vma;
+
+ return 0;
+
+out_unmap:
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message;
+ int ret;
+
+ hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
+
+ if (!hdcp_message)
+ return -ENOMEM;
+
+ /*
+ * NOTE: No need to lock the comp mutex here as it is already
+ * going to be taken before this function called
+ */
+ i915->display.hdcp.hdcp_message = hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
+
+ if (ret)
+ drm_err(&i915->drm, "Could not initialize hdcp_message\n");
+
+ return ret;
+}
+
+static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message =
+ i915->display.hdcp.hdcp_message;
+
+ i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
+ kfree(hdcp_message);
+}
+
+int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+{
+ struct i915_hdcp_master *data;
+ int ret;
+
+ data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_lock(&i915->display.hdcp.comp_mutex);
+ i915->display.hdcp.master = data;
+ i915->display.hdcp.master->hdcp_dev = i915->drm.dev;
+ i915->display.hdcp.master->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(i915);
+ mutex_unlock(&i915->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+{
+ intel_hdcp_gsc_free_message(i915);
+ kfree(i915->display.hdcp.master);
+}
+
+static int intel_gsc_send_sync(struct drm_i915_private *i915,
+ struct intel_gsc_mtl_header *header, u64 addr,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ int ret;
+
+ header->flags = 0;
+ ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
+ header->message_size,
+ addr,
+ msg_out_len + sizeof(*header));
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Checking validity marker for memory sanity
+ */
+ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "invalid validity marker\n");
+ return -EINVAL;
+ }
+
+ if (header->status != 0) {
+ drm_err(&i915->drm, "header status indicates error %d\n",
+ header->status);
+ return -EINVAL;
+ }
+
+ if (header->flags & GSC_OUTFLAG_MSG_PENDING)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * This function can now be used for sending requests and will also handle
+ * receipt of reply messages hence no different function of message retrieval
+ * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * gsc cs memory header as stated in specs after which the normal HDCP payload
+ * will follow
+ */
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct intel_gsc_mtl_header *header;
+ const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
+ struct intel_hdcp_gsc_message *hdcp_message;
+ u64 addr, host_session_id;
+ u32 reply_size, msg_size;
+ int ret, tries = 0;
+
+ if (!intel_uc_uses_gsc_uc(&gt->uc))
+ return -ENODEV;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+ return -ENOSPC;
+
+ hdcp_message = i915->display.hdcp.hdcp_message;
+ header = hdcp_message->hdcp_cmd;
+ addr = i915_ggtt_offset(hdcp_message->vma);
+
+ msg_size = msg_in_len + sizeof(*header);
+ memset(header, 0, msg_size);
+ get_random_bytes(&host_session_id, sizeof(u64));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
+ msg_size, host_session_id);
+ memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
+
+ /*
+ * Keep sending request in case the pending bit is set no need to add
+ * message handle as we are using same address hence loc. of header is
+ * same and it will contain the message handle. we will send the message
+ * 20 times each message 50 ms apart
+ */
+ do {
+ ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
+
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(50);
+
+ } while (++tries < 20);
+
+ if (ret)
+ goto err;
+
+ /* we use the same mem for the reply, so header is in the same loc */
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_len) {
+ drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ reply_size = msg_out_len;
+ } else if (reply_size != msg_out_len) {
+ drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ }
+
+ memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
+
+err:
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
new file mode 100644
index 000000000000..5cc9fd2e88f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_GSC_H__
+#define __INTEL_HDCP_GSC_H__
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd;
+};
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len);
+int intel_hdcp_gsc_init(struct drm_i915_private *i915);
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915);
+
+#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index bac85d88054f..a690a5616506 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,6 +44,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_de.h"
@@ -237,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
@@ -313,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -395,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -471,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv,
@@ -537,8 +522,7 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
- if (DISPLAY_VER(dev_priv) == 13 && crtc_state->has_psr &&
- type == DP_SDP_VSC)
+ if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
return;
val |= hsw_infoframe_enable(type);
@@ -767,6 +751,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -776,7 +761,11 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (IS_DGFX(i915))
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
+ else
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+
if (drm_WARN_ON(encoder->base.dev, ret))
return false;
@@ -1790,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata);
if (vbt_max_tmds_clock)
max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
@@ -1988,9 +1977,6 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2150,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -2238,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state)
!is_power_of_2(crtc_state->uapi.encoder_mask);
}
+static bool source_supports_scrambling(struct intel_encoder *encoder)
+{
+ /*
+ * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and
+ * scrambling is supported.
+ * But there seem to be cases where certain platforms that support
+ * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is
+ * capped by VBT to less than 340MHz.
+ *
+ * In such cases when an HDMI2.0 sink is connected, it creates a
+ * problem : the platform and the sink both support scrambling but the
+ * HDMI 1.4 retimer chip doesn't.
+ *
+ * So go for scrambling, based on the max tmds clock taking into account,
+ * restrictions coming from VBT.
+ */
+ return intel_hdmi_source_max_tmds_clock(encoder) > 340000;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2252,6 +2257,10 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ if (!connector->interlace_allowed &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return -EINVAL;
+
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink =
intel_has_hdmi_sink(intel_hdmi, conn_state) &&
@@ -2264,7 +2273,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier = 2;
pipe_config->has_audio =
- intel_hdmi_has_audio(encoder, pipe_config, conn_state);
+ intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* Try to respect downstream TMDS clock limits first, if
@@ -2295,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
+ if (scdc->scrambling.supported && source_supports_scrambling(encoder)) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -2353,7 +2363,7 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- kfree(to_intel_connector(connector)->detect_edid);
+ drm_edid_free(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
@@ -2414,7 +2424,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
@@ -2422,17 +2433,23 @@ intel_hdmi_set_edid(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
- if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) {
drm_dbg_kms(&dev_priv->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
- to_intel_connector(connector)->detect_edid = edid;
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(connector, drm_edid);
+
+ to_intel_connector(connector)->detect_edid = drm_edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
@@ -2508,13 +2525,8 @@ intel_hdmi_force(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
-
- edid = to_intel_connector(connector)->detect_edid;
- if (edid == NULL)
- return 0;
-
- return intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ return drm_edid_connector_add_modes(connector);
}
static struct i2c_adapter *
@@ -2634,11 +2646,8 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool scrambling)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
- struct i2c_adapter *adapter =
- intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
if (!sink_scrambling->supported)
return true;
@@ -2649,9 +2658,8 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
- return drm_scdc_set_high_tmds_clock_ratio(adapter,
- high_tmds_clock_ratio) &&
- drm_scdc_set_scrambling(adapter, scrambling);
+ return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(connector, scrambling);
}
static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
@@ -2843,11 +2851,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
enum port port = encoder->port;
u8 ddc_pin;
- ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
if (ddc_pin) {
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2873,8 +2882,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2895,7 +2905,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
+ if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
dig_port->set_infoframes = lspcon_set_infoframes;
@@ -2953,7 +2963,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- connector->interlace_allowed = true;
+ if (DISPLAY_VER(dev_priv) < 12)
+ connector->interlace_allowed = true;
+
connector->stereo_allowed = true;
if (DISPLAY_VER(dev_priv) >= 10)
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 907ab7526cb4..b12900446828 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -389,6 +389,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
+ /* Skip calling encode hotplug handlers if ignore long HPD set*/
+ if (dev_priv->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ return;
+ }
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -940,4 +947,6 @@ void intel_hpd_debugfs_register(struct drm_i915_private *i915)
i915, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
i915, &i915_hpd_short_storm_ctl_fops);
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ &i915->display.hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index 12a1f4ce1a77..c518efebdf77 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -21,6 +21,9 @@ void intel_hti_init(struct drm_i915_private *i915)
bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
+ if (drm_WARN_ON(&i915->drm, phy == PHY_NONE))
+ return false;
+
return i915->display.hti.state & HDPORT_ENABLED &&
i915->display.hti.state & HDPORT_DDI_USED(phy);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 8aaaef4d7856..5863763de530 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
* @dev_priv: the i915 drm device private data
- * @pipe: pipe
+ * @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
* @ls_clock: Link symbol clock in kHz
@@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* Notify lpe audio driver of eld change.
*/
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
unsigned long irqflags;
@@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
- ppdata->pipe = pipe;
+ ppdata->pipe = cpu_transcoder;
ppdata->ls_clock = ls_clock;
ppdata->dp_output = dp_output;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index f848c5038714..0beecac267ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-enum pipe;
enum port;
+enum transcoder;
struct drm_i915_private;
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 9ff1c0b223ad..bb3b5355a0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return;
if (!lspcon->active) {
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7bf1bdfd03ec..0de44b3631cd 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -49,7 +49,9 @@
#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
+#include "intel_pps_regs.h"
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
@@ -84,18 +86,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(dev_priv, lvds_reg);
+ val = intel_de_read(i915, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ if (HAS_PCH_CPT(i915))
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
return val & LVDS_PORT_EN;
}
@@ -103,31 +105,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
+ wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
- pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -139,20 +140,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->hw.adjusted_mode.flags |= flags;
+ crtc_state->hw.adjusted_mode.flags |= flags;
if (DISPLAY_VER(dev_priv) < 5)
- pipe_config->gmch_pfit.lvds_border_bits =
+ crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
- pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -216,41 +217,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_CONTROL(0), val);
intel_de_write(dev_priv, PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
intel_de_write(dev_priv, PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
intel_de_write(dev_priv, PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(dev_priv)) {
- assert_fdi_rx_pll_disabled(dev_priv, pipe);
- assert_shared_dpll_disabled(dev_priv,
- pipe_config->shared_dpll);
+ if (HAS_PCH_SPLIT(i915)) {
+ assert_fdi_rx_pll_disabled(i915, pipe);
+ assert_shared_dpll_disabled(i915, crtc_state->shared_dpll);
} else {
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(i915, pipe);
}
- intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(i915)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -260,7 +264,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= pipe_config->gmch_pfit.lvds_border_bits;
+ temp |= crtc_state->gmch_pfit.lvds_border_bits;
/*
* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -283,14 +287,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/*
* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg.
+ * only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
*/
- if (pipe_config->dither && pipe_config->pipe_bpp == 18)
+ if (crtc_state->dither && crtc_state->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -301,7 +305,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(dev_priv, lvds_encoder->reg, temp);
+ intel_de_write(i915, lvds_encoder->reg, temp);
}
/*
@@ -309,25 +313,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
*/
static void intel_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_backlight_enable(pipe_config, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -338,14 +339,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power off\n");
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
@@ -386,19 +385,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
}
static enum drm_mode_status
-intel_lvds_mode_valid(struct drm_connector *connector,
+intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -408,23 +407,21 @@ intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
+static int intel_lvds_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder =
- to_lvds_encoder(intel_encoder);
- struct intel_connector *intel_connector =
- lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
- drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
+ drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -433,14 +430,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
else
lvds_bpp = 6*3;
- if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- drm_dbg_kms(&dev_priv->drm,
+ if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
+ drm_dbg_kms(&i915->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
- pipe_config->pipe_bpp = lvds_bpp;
+ crtc_state->pipe_bpp, lvds_bpp);
+ crtc_state->pipe_bpp = lvds_bpp;
}
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/*
* We have timings from the BIOS for the panel, put them in
@@ -448,17 +445,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ ret = intel_panel_compute_config(connector, adjusted_mode);
if (ret)
return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ if (HAS_PCH_SPLIT(i915))
+ crtc_state->has_pch_encoder = true;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(crtc_state, conn_state);
if (ret)
return ret;
@@ -474,15 +471,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
-static int intel_lvds_get_modes(struct drm_connector *connector)
+static int intel_lvds_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
+
+ /* Use panel fixed edid if we have one */
+ if (!IS_ERR_OR_NULL(fixed_edid)) {
+ drm_edid_connector_update(&connector->base, fixed_edid);
- /* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- return drm_add_edid_modes(connector, intel_connector->edid);
+ return drm_edid_connector_add_modes(&connector->base);
+ }
- return intel_panel_get_modes(intel_connector);
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -581,12 +582,12 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "AOpen i45GMx-I",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
- DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
- },
- },
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
@@ -603,14 +604,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Clientron E830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
- },
- },
- {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
@@ -760,11 +761,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(&i915->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -772,24 +773,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (dev_priv->params.lvds_channel_mode > 0)
- return dev_priv->params.lvds_channel_mode == 2;
+ if (i915->params.lvds_channel_mode > 0)
+ return i915->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -804,8 +805,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(dev_priv, lvds_encoder->reg);
- if (HAS_PCH_CPT(dev_priv))
+ val = intel_de_read(i915, lvds_encoder->reg);
+ if (HAS_PCH_CPT(i915))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -822,56 +823,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *dev_priv)
+void intel_lvds_init(struct drm_i915_private *i915)
{
struct intel_lvds_encoder *lvds_encoder;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct edid *edid;
+ struct intel_connector *connector;
+ const struct drm_edid *drm_edid;
+ struct intel_encoder *encoder;
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support,
+ drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->display.vbt.int_lvds_support) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!i915->display.vbt.int_lvds_support) {
+ drm_dbg_kms(&i915->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(i915))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(dev_priv, lvds_reg);
+ lvds = intel_de_read(i915, lvds_reg);
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(i915)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+ if (!intel_bios_is_lvds_present(i915, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -879,57 +878,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = intel_connector;
+ lvds_encoder->attached_connector = connector;
+ encoder = &lvds_encoder->base;
- intel_encoder = &lvds_encoder->base;
- encoder = &intel_encoder->base;
- connector = &intel_connector->base;
- drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs,
+ drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
- intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_enable = intel_pre_enable_lvds;
- intel_encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- intel_encoder->disable = pch_disable_lvds;
- intel_encoder->post_disable = pch_post_disable_lvds;
+ encoder->enable = intel_enable_lvds;
+ encoder->pre_enable = intel_pre_enable_lvds;
+ encoder->compute_config = intel_lvds_compute_config;
+ if (HAS_PCH_SPLIT(i915)) {
+ encoder->disable = pch_disable_lvds;
+ encoder->post_disable = pch_post_disable_lvds;
} else {
- intel_encoder->disable = gmch_disable_lvds;
+ encoder->disable = gmch_disable_lvds;
}
- intel_encoder->get_hw_state = intel_lvds_get_hw_state;
- intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_lvds_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
-
- intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = PORT_NONE;
- intel_encoder->cloneable = 0;
- if (DISPLAY_VER(dev_priv) < 4)
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->get_hw_state = intel_lvds_get_hw_state;
+ encoder->get_config = intel_lvds_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_lvds_shutdown;
+ connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(connector, encoder);
+
+ encoder->type = INTEL_OUTPUT_LVDS;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = PORT_NONE;
+ encoder->cloneable = 0;
+ if (DISPLAY_VER(i915) < 4)
+ encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
- drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
lvds_encoder->reg = lvds_reg;
- intel_lvds_add_properties(connector);
+ intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -944,56 +941,63 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
- edid = drm_get_edid_switcheroo(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- else
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
- } else {
+ mutex_lock(&i915->drm.mode_config.mutex);
+ if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) {
+ const struct edid *edid;
+
+ /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */
+ edid = drm_get_edid_switcheroo(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
+ if (edid) {
+ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ } else {
+ drm_edid = NULL;
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = drm_edid_read_ddc(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
}
- intel_connector->edid = edid;
-
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
- IS_ERR(edid) ? NULL : edid);
+ if (drm_edid) {
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ drm_edid = ERR_PTR(-ENOENT);
+ }
+ intel_bios_init_panel_late(i915, &connector->panel, NULL,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* Failed to get EDID, what about VBT? */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
/*
* If we didn't get a fixed mode from EDID or VBT, try checking
* if the panel is already turned on. If so, assume that
* whatever is currently programmed is the correct mode.
*/
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
+ if (!intel_panel_preferred_fixed_mode(connector))
goto failed;
- intel_panel_init(intel_connector);
+ intel_panel_init(connector, drm_edid);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1001,10 +1005,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
failed:
- drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
- drm_connector_cleanup(connector);
- drm_encoder_cleanup(encoder);
+ drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(&connector->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
- intel_connector_free(intel_connector);
+ intel_connector_free(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
new file mode 100644
index 000000000000..47c1832819ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_REGS_H__
+#define __INTEL_LVDS_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* LVDS port control */
+#define LVDS _MMIO(0x61180)
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN REG_BIT(31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPE_SEL_MASK REG_BIT(30)
+#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
+#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER REG_BIT(25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY REG_BIT(21)
+#define LVDS_HSYNC_POLARITY REG_BIT(20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE REG_BIT(15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
+#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6)
+#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
+#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4)
+#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
+#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2)
+#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
+#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+
+#define PCH_LVDS _MMIO(0xe1180)
+#define LVDS_DETECTED REG_BIT(1)
+
+#endif /* __INTEL_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
index 0e8248bce52d..0306ade2bc30 100644
--- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
@@ -142,7 +142,9 @@
#define FIA1_BASE 0x163000
#define FIA2_BASE 0x16E000
#define FIA3_BASE 0x16F000
-#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \
+ FIA1_BASE, FIA1_BASE,\
+ FIA2_BASE, FIA3_BASE)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 96395bfbd41d..134b943f1953 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_color.h"
@@ -21,9 +22,12 @@
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
-#include "intel_pm.h"
+#include "intel_vblank.h"
+#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
@@ -96,7 +100,6 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
intel_fbc_disable(crtc);
intel_update_watermarks(i915);
- intel_disable_shared_dpll(crtc_state);
intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
@@ -234,12 +237,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!crtc_state->hw.active && !HAS_GMCH(i915))
- return;
-
/*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
+ * We start out with underrun reporting disabled on active
+ * pipes to avoid races.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
@@ -250,19 +250,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
- crtc->cpu_fifo_underrun_disabled = true;
-
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(i915, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
+ intel_init_fifo_underrun_reporting(i915, crtc,
+ !crtc_state->hw.active &&
+ !HAS_GMCH(i915));
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -647,17 +637,14 @@ static void intel_early_display_was(struct drm_i915_private *i915)
* Also known as Wa_14010480278.
*/
if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_write(i915, GEN9_CLKGATE_DIS_0,
- intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
- if (IS_HASWELL(i915)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(i915, CHICKEN_PAR1_1,
- intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ if (IS_HASWELL(i915))
+ intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
/* Display WA #1142:kbl,cfl,cml */
@@ -698,8 +685,10 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->hw.active)
+ if (crtc_state->hw.active) {
+ intel_dmc_enable_pipe(i915, crtc->pipe);
intel_crtc_vblank_on(crtc_state);
+ }
}
intel_fbc_sanitize(i915);
@@ -721,18 +710,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_dpll_sanitize_state(i915);
- if (IS_G4X(i915)) {
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
- } else if (DISPLAY_VER(i915) >= 9) {
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
- } else if (HAS_PCH_SPLIT(i915)) {
- ilk_wm_get_hw_state(i915);
- }
+ intel_wm_get_hw_state(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e0184745632c..b7973a05d022 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -1101,41 +1101,34 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* The EDID in the OpRegion, or NULL if there is none or it's invalid.
*
*/
-struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_opregion *opregion = &i915->display.opregion;
- const void *in_edid;
- const struct edid *edid;
- struct edid *new_edid;
+ const struct drm_edid *drm_edid;
+ const void *edid;
int len;
if (!opregion->asle_ext)
return NULL;
- in_edid = opregion->asle_ext->bddc;
+ edid = opregion->asle_ext->bddc;
/* Validity corresponds to number of 128-byte blocks */
len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
- if (!len || !memchr_inv(in_edid, 0, len))
+ if (!len || !memchr_inv(edid, 0, len))
return NULL;
- edid = in_edid;
+ drm_edid = drm_edid_alloc(edid, len);
- if (len < EDID_LENGTH * (1 + edid->extensions)) {
- drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n");
- return NULL;
- }
- new_edid = drm_edid_duplicate(edid);
- if (!new_edid)
- return NULL;
- if (!drm_edid_is_valid(new_edid)) {
- kfree(new_edid);
+ if (!drm_edid_valid(drm_edid)) {
drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
- return NULL;
+ drm_edid_free(drm_edid);
+ drm_edid = NULL;
}
- return new_edid;
+
+ return drm_edid;
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
@@ -1166,13 +1159,10 @@ void intel_opregion_register(struct drm_i915_private *i915)
intel_opregion_resume(i915);
}
-void intel_opregion_resume(struct drm_i915_private *i915)
+static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
- if (!opregion->header)
- return;
-
if (opregion->acpi) {
intel_didl_outputs(i915);
intel_setup_cadls(i915);
@@ -1193,18 +1183,24 @@ void intel_opregion_resume(struct drm_i915_private *i915)
/* Some platforms abuse the _DSM to enable MUX */
intel_dsm_get_bios_data_funcs_supported(i915);
-
- intel_opregion_notify_adapter(i915, PCI_D0);
}
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+void intel_opregion_resume(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
- intel_opregion_notify_adapter(i915, state);
+ if (HAS_DISPLAY(i915))
+ intel_opregion_resume_display(i915);
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+static void intel_opregion_suspend_display(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
@@ -1215,6 +1211,19 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
opregion->acpi->drdy = 0;
}
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (HAS_DISPLAY(i915))
+ intel_opregion_suspend_display(i915);
+}
+
void intel_opregion_unregister(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
@@ -1228,6 +1237,14 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
unregister_acpi_notifier(&opregion->acpi_notifier);
opregion->acpi_notifier.notifier_call = NULL;
}
+}
+
+void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
/* just clear all opregion memory pointers now */
memunmap(opregion->header);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 2f261f985400..fd2ea8ef0fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -60,6 +60,7 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_cleanup(struct drm_i915_private *i915);
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
@@ -74,7 +75,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -85,6 +86,10 @@ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
return 0;
}
+static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+}
+
static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
{
}
@@ -123,7 +128,7 @@ static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
return -ENODEV;
}
-static inline struct edid *
+static inline const struct drm_edid *
intel_opregion_get_edid(struct intel_connector *connector)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 1640726bfbf6..ce2a34a25211 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -31,12 +31,15 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
+#include <drm/drm_edid.h>
+
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
@@ -661,10 +664,22 @@ intel_panel_mode_valid(struct intel_connector *connector,
return MODE_OK;
}
-int intel_panel_init(struct intel_connector *connector)
+void intel_panel_init_alloc(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ connector->panel.vbt.panel_type = -1;
+ connector->panel.vbt.backlight.controller = -1;
+ INIT_LIST_HEAD(&panel->fixed_modes);
+}
+
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid)
{
struct intel_panel *panel = &connector->panel;
+ panel->fixed_edid = fixed_edid;
+
intel_backlight_init_funcs(panel);
if (!has_drrs_modes(connector))
@@ -683,6 +698,9 @@ void intel_panel_fini(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
struct drm_display_mode *fixed_mode, *next;
+ if (!IS_ERR_OR_NULL(panel->fixed_edid))
+ drm_edid_free(panel->fixed_edid);
+
intel_backlight_destroy(panel);
intel_bios_fini_panel(panel);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5c5b5b7f95b6..15a8c897b33f 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -13,12 +13,15 @@ enum drrs_type;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+struct drm_edid;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
-int intel_panel_init(struct intel_connector *connector);
+void intel_panel_init_alloc(struct intel_connector *connector);
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index cecc0d007cf3..2411fe4dee8b 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -9,7 +9,9 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_fdi_regs.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pps.h"
@@ -219,20 +221,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -266,7 +268,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -278,15 +280,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
- val &= ~PIPECONF_BPC_MASK;
+ val &= ~TRANSCONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ val |= pipeconf_val & TRANSCONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) {
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
@@ -307,7 +309,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -317,21 +318,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(dev_priv))
/* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void ilk_pch_pre_enable(struct intel_atomic_state *state,
@@ -414,7 +410,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -456,21 +452,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
/* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
+ TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
@@ -565,9 +554,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK)
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
@@ -580,20 +569,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 08a94365b7d1..f4c09cc37a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -12,19 +12,13 @@
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
@@ -467,24 +461,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->pch_ssc_use = 0;
+ dev_priv->display.dpll.pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->pch_ssc_use)
+ if (dev_priv->display.dpll.pch_ssc_use)
return;
if (has_fdi) {
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index e9774670e3f6..8d3ea8d7b737 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -72,14 +72,13 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source)
+static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
{
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
- int ret = 0;
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -121,8 +120,6 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
}
}
drm_modeset_unlock_all(&dev_priv->drm);
-
- return ret;
}
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -132,11 +129,8 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
{
bool need_stable_symbols = false;
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -200,11 +194,8 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 76be796df255..bb6ea7de5c61 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -107,7 +107,7 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->stolen_usable_size)
+ size * 2 > i915->dsm.usable_size)
return NULL;
obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 9bbf41a076f7..7f9926672a6a 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -13,7 +13,9 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pps.h"
+#include "intel_pps_regs.h"
#include "intel_quirks.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
@@ -22,6 +24,40 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
static void pps_init_delays(struct intel_dp *intel_dp);
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+static const char *pps_name(struct drm_i915_private *i915,
+ struct intel_pps *pps)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ switch (pps->pps_pipe) {
+ case INVALID_PIPE:
+ /*
+ * FIXME would be nice if we can guarantee
+ * to always have a valid PPS when calling this.
+ */
+ return "PPS <none>";
+ case PIPE_A:
+ return "PPS A";
+ case PIPE_B:
+ return "PPS B";
+ default:
+ MISSING_CASE(pps->pps_pipe);
+ break;
+ }
+ } else {
+ switch (pps->pps_idx) {
+ case 0:
+ return "PPS 0";
+ case 1:
+ return "PPS 1";
+ default:
+ MISSING_CASE(pps->pps_idx);
+ break;
+ }
+ }
+
+ return "PPS <invalid>";
+}
+
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -60,15 +96,15 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
+ "skipping %s kick due to [ENCODER:%d:%s] being active\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name))
return;
drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "kicking %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -95,7 +131,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
+ "Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
return;
}
@@ -190,10 +226,9 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
intel_dp->pps.pps_pipe = pipe;
drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps.pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "picked %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
/* init power sequencer on this pipe and port */
pps_init_delays(intel_dp);
@@ -212,8 +247,7 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_connector *connector = intel_dp->attached_connector;
- int backlight_controller = connector->panel.vbt.backlight.controller;
+ int pps_idx = intel_dp->pps.pps_idx;
lockdep_assert_held(&dev_priv->display.pps.mutex);
@@ -221,7 +255,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps.pps_reset)
- return backlight_controller;
+ return pps_idx;
intel_dp->pps.pps_reset = false;
@@ -231,34 +265,29 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
*/
pps_init_registers(intel_dp, false);
- return backlight_controller;
+ return pps_idx;
}
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
+typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+ return intel_de_read(dev_priv, PP_STATUS(pps_idx)) & PP_ON;
}
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, PP_CONTROL(pps_idx)) & EDP_FORCE_VDD;
}
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
{
return true;
}
static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
+ enum port port, pps_check check)
{
enum pipe pipe;
@@ -269,7 +298,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
- if (!pipe_check(dev_priv, pipe))
+ if (!check(dev_priv, pipe))
continue;
return pipe;
@@ -290,30 +319,117 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
+ pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
+ pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
+ pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "[ENCODER:%d:%s] no initial power sequencer\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
return;
}
drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps.pps_pipe));
+ "[ENCODER:%d:%s] initial power sequencer: %s\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
+}
+
+static int intel_num_pps(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return 2;
+
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ return 2;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ return 1;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ return 2;
+
+ return 1;
+}
+
+static bool intel_pps_is_valid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_dp->pps.pps_idx == 1 &&
+ INTEL_PCH_TYPE(i915) >= PCH_ICP &&
+ INTEL_PCH_TYPE(i915) < PCH_MTP)
+ return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+
+ return true;
+}
+
+static int
+bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
+{
+ int pps_idx, pps_num = intel_num_pps(i915);
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ if (check(i915, pps_idx))
+ return pps_idx;
+ }
+
+ return -1;
+}
+
+static bool
+pps_initial_setup(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ lockdep_assert_held(&i915->display.pps.mutex);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_initial_power_sequencer_setup(intel_dp);
+ return true;
+ }
+
+ /* first ask the VBT */
+ if (intel_num_pps(i915) > 1)
+ intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
+ else
+ intel_dp->pps.pps_idx = 0;
+
+ if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
+ intel_dp->pps.pps_idx = -1;
+
+ /* VBT wasn't parsed yet? pick one where the panel is on */
+ if (intel_dp->pps.pps_idx < 0)
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_idx < 0)
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
+ /* didn't find one? pick any */
+ if (intel_dp->pps.pps_idx < 0) {
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
+ encoder->base.base.id, encoder->base.name,
+ pps_name(i915, &intel_dp->pps));
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] initial power sequencer: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ pps_name(i915, &intel_dp->pps));
+ }
+
+ return intel_pps_is_valid(intel_dp);
}
void intel_pps_reset_all(struct drm_i915_private *dev_priv)
@@ -364,14 +480,16 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
+ int pps_idx;
memset(regs, 0, sizeof(*regs));
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else
+ pps_idx = intel_dp->pps.pps_idx;
regs->pp_ctrl = PP_CONTROL(pps_idx);
regs->pp_stat = PP_STATUS(pps_idx);
@@ -435,21 +553,27 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
}
}
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
@@ -460,10 +584,10 @@ void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
+ u32 mask, u32 value)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->display.pps.mutex);
@@ -474,7 +598,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
+ "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
mask, value,
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -482,7 +608,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
+ "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -492,26 +620,35 @@ static void wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
/* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -598,9 +735,12 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -608,12 +748,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
intel_de_write(dev_priv, pp_ctrl_reg, pp);
intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
/*
@@ -621,9 +760,9 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
if (!edp_have_panel_power(intel_dp)) {
drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
msleep(intel_dp->pps.panel_power_up_delay);
}
@@ -638,6 +777,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
bool vdd;
@@ -647,9 +787,10 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_intel_pps_lock(intel_dp, wakeref)
vdd = intel_pps_vdd_on_unlocked(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(i915, &intel_dp->pps));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
@@ -667,9 +808,9 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -681,7 +822,9 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
intel_de_posting_read(dev_priv, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -756,9 +899,10 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
intel_dp->pps.want_panel_vdd = false;
@@ -779,14 +923,16 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
+ "[ENCODER:%d:%s] %s panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps)))
return;
wait_panel_power_cycle(intel_dp);
@@ -840,12 +986,14 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -980,9 +1128,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* from a port.
*/
drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "detaching %s from [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
intel_de_write(dev_priv, pp_on_reg, 0);
intel_de_posting_read(dev_priv, pp_on_reg);
@@ -1000,7 +1148,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ "stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1008,7 +1156,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
continue;
drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ "stealing PPS %c from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1053,9 +1201,9 @@ void vlv_pps_init(struct intel_encoder *encoder,
intel_dp->pps.pps_pipe = crtc->pipe;
drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
- encoder->base.name);
+ "initializing %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ encoder->base.base.id, encoder->base.name);
/* init power sequencer on this pipe and port */
pps_init_delays(intel_dp);
@@ -1079,7 +1227,9 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
* indefinitely.
*/
drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
+ "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
@@ -1386,17 +1536,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
- if (i915_mmio_reg_valid(regs.pp_div)) {
+ if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(dev_priv, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
+ else
+ intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->t11_t12, 1000)));
drm_dbg_kms(&dev_priv->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
@@ -1432,10 +1578,10 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
}
}
-void intel_pps_init(struct intel_dp *intel_dp)
+bool intel_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
+ bool ret;
intel_dp->pps.initializing = true;
INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
@@ -1443,13 +1589,36 @@ void intel_pps_init(struct intel_dp *intel_dp)
pps_init_timestamps(intel_dp);
with_intel_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- vlv_initial_power_sequencer_setup(intel_dp);
+ ret = pps_initial_setup(intel_dp);
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
pps_vdd_init(intel_dp);
}
+
+ return ret;
+}
+
+static void pps_init_late(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return;
+
+ if (intel_num_pps(i915) < 2)
+ return;
+
+ drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
+ intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
+ "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
+
+ if (connector->panel.vbt.backlight.controller >= 0)
+ intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
}
void intel_pps_init_late(struct intel_dp *intel_dp)
@@ -1458,6 +1627,8 @@ void intel_pps_init_late(struct intel_dp *intel_dp)
with_intel_pps_lock(intel_dp, wakeref) {
/* Reinit delays after per-panel info has been parsed from VBT */
+ pps_init_late(intel_dp);
+
memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
@@ -1480,10 +1651,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
* This w/a is needed at least on CPT/PPT, but to be sure apply it
* everywhere where registers can be write protected.
*/
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
+ pps_num = intel_num_pps(dev_priv);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index a3a56f903f26..a2c2467e3c22 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -40,7 +40,7 @@ void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
-void intel_pps_init(struct intel_dp *intel_dp);
+bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
void intel_pps_reset_all(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_pps_regs.h b/drivers/gpu/drm/i915/display/intel_pps_regs.h
new file mode 100644
index 000000000000..60edd2a27100
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps_regs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_REGS_H__
+#define __INTEL_PPS_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Panel power sequencing */
+#define PPS_BASE 0x61200
+#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
+#define PCH_PPS_BASE 0xC7200
+
+#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \
+ PPS_BASE + (reg) + \
+ (pps_idx) * 0x100)
+
+#define _PP_STATUS 0x61200
+#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
+#define PP_ON REG_BIT(31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY REG_BIT(30)
+#define PP_SEQUENCE_MASK REG_GENMASK(29, 28)
+#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
+#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
+#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
+#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27)
+#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0)
+#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
+#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
+#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
+#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
+#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
+#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
+#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
+#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
+
+#define _PP_CONTROL 0x61204
+#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
+#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
+#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
+#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define EDP_FORCE_VDD REG_BIT(3)
+#define EDP_BLC_ENABLE REG_BIT(2)
+#define PANEL_POWER_RESET REG_BIT(1)
+#define PANEL_POWER_ON REG_BIT(0)
+
+#define _PP_ON_DELAYS 0x61208
+#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
+#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
+#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
+#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
+#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
+#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
+#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
+#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
+
+#define _PP_OFF_DELAYS 0x6120C
+#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
+#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
+
+#define _PP_DIVISOR 0x61210
+#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
+#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
+#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
+
+#endif /* __INTEL_PPS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5b678916e6db..6badfff2b4a2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -24,17 +24,17 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
-#include "display/intel_dp.h"
-
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
#include "skl_universal_plane.h"
@@ -153,7 +153,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t imr_reg;
- u32 mask, val;
+ u32 mask;
if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
@@ -165,10 +165,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- val = intel_de_read(dev_priv, imr_reg);
- val &= ~psr_irq_mask_get(intel_dp);
- val |= ~mask;
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
}
static void psr_event_print(struct drm_i915_private *i915,
@@ -246,8 +243,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- u32 val;
-
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
@@ -261,9 +256,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = intel_de_read(dev_priv, imr_reg);
- val |= psr_irq_psr_error_bit_get(intel_dp);
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
schedule_work(&intel_dp->psr.work);
}
@@ -527,6 +520,17 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
return val;
}
+static int psr2_block_count_lines(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
+}
+
+static int psr2_block_count(struct intel_dp *intel_dp)
+{
+ return psr2_block_count_lines(intel_dp) / 4;
+}
+
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -543,6 +547,13 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (psr2_block_count(intel_dp) > 2)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ }
+
/* Wa_22012278275:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
@@ -559,31 +570,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp, lines = 7;
-
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ u32 tmp;
- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
val |= tmp;
- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
val |= tmp;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- /*
- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
- * values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resolution mode needs to decrease
- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
- */
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= EDP_PSR2_FAST_WAKE(7);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -592,12 +593,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- /* Wa_1408330847 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK);
-
tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
@@ -638,13 +633,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
- val &= ~EDP_PSR2_IDLE_FRAME_MASK;
- val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
+ EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
@@ -709,6 +701,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 exit_scanlines;
/*
@@ -725,7 +718,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -766,13 +759,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- /* Wa_14010254185 Wa_14010103792 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
- return false;
- }
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -797,7 +783,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
return intel_dp->psr.su_y_granularity == 4;
/*
- * adl_p and display 14+ platforms has 1 line granularity.
+ * adl_p and mtl platforms have 1 line granularity.
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
@@ -843,6 +829,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -937,6 +963,21 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
+ /* Vblank >= PSR2_CTL Block Count Number maximum line count */
+ if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
+ crtc_state->hw.adjusted_mode.crtc_vblank_start <
+ psr2_block_count_lines(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, too short vblank time\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -946,13 +987,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
}
- /* Wa_2209313811 */
- if (!crtc_state->enable_psr2_sel_fetch &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- goto unsupported;
- }
-
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
goto unsupported;
@@ -1072,7 +1106,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
}
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
val &= EXITLINE_MASK;
pipe_config->dc3co_exitline = val;
}
@@ -1112,12 +1146,42 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
return LATENCY_REPORTING_REMOVED_PIPE_B;
case PIPE_C:
return LATENCY_REPORTING_REMOVED_PIPE_C;
+ case PIPE_D:
+ return LATENCY_REPORTING_REMOVED_PIPE_D;
default:
MISSING_CASE(intel_dp->psr.pipe);
return 0;
}
}
+/*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+static void wm_optimization_wa(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool set_wa_bit = false;
+
+ /* Wa_14015648006 */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_DISPLAY_VER(dev_priv, 11, 13))
+ set_wa_bit |= crtc_state->wm_level_disabled;
+
+ /* Wa_16013835468 */
+ if (DISPLAY_VER(dev_priv) == 12)
+ set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
+ if (set_wa_bit)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, wa_16013835468_bit_get(intel_dp));
+ else
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1144,25 +1208,25 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (intel_dp->psr.dc3co_exitline) {
- u32 val;
-
- /*
- * TODO: if future platforms supports DC3CO in more than one
- * transcoder, EXITLINE will need to be unset when disabling PSR
- */
- val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
- val &= ~EXITLINE_MASK;
- val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
- val |= EXITLINE_ENABLE;
- intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
- }
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ if (intel_dp->psr.dc3co_exitline)
+ intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
+ intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ wm_optimization_wa(intel_dp, crtc_state);
+
if (intel_dp->psr.psr2_enabled) {
if (DISPLAY_VER(dev_priv) == 9)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
@@ -1170,39 +1234,25 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
PSR2_ADD_VERTICAL_LINE_COUNT);
/*
- * Wa_16014451276:adlp
+ * Wa_16014451276:adlp,mtl[a0,b0]
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
ADLP_1_BASED_X_GRANULARITY);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_16012604467:adlp,mtl[a0,b0] */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
-
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv))
+ MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
+ MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv)) {
- u16 vtotal, vblank;
-
- vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
- crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
- crtc_state->uapi.adjusted_mode.crtc_vblank_start;
- if (vblank > vtotal)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
- wa_16013835468_bit_get(intel_dp));
- }
}
}
@@ -1349,29 +1399,23 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
- /* Wa_1408330847 */
- if (intel_dp->psr.psr2_sel_fetch_enabled &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.psr2_enabled) {
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_16012604467:adlp,mtl[a0,b0] */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv))
+ MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
+ MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv))
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
}
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
@@ -1510,7 +1554,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv));
+ man_trk_ctl_single_full_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv));
/*
* Display WA #0884: skl+
@@ -1528,8 +1573,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1540,10 +1585,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (plane->id == PLANE_CURSOR)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+}
+
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1554,11 +1617,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (plane->id == PLANE_CURSOR) {
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
+ if (plane->id == PLANE_CURSOR)
return;
- }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1586,9 +1646,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
@@ -1624,11 +1681,8 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
if (full_update) {
- /*
- * Not applying Wa_14014971508:adlp as we do not support the
- * feature that requires this workaround.
- */
val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
+ val |= man_trk_ctl_continuos_full_frame(dev_priv);
goto exit;
}
@@ -1826,6 +1880,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
+ /* Wa_14014971492 */
+ if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ crtc_state->splitter.enable)
+ pipe_clip.y1 = 0;
+
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
@@ -1909,14 +1969,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - PSR disabled in new state
* - All planes will go inactive
* - Changing between PSR versions
+ * - Display WA #1136: skl, bxt
*/
needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
needs_to_disable |= !new_crtc_state->has_psr;
needs_to_disable |= !new_crtc_state->active_planes;
needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
+ needs_to_disable |= DISPLAY_VER(i915) < 11 &&
+ new_crtc_state->wm_level_disabled;
if (psr->enabled && needs_to_disable)
intel_psr_disable_locked(intel_dp);
+ else if (psr->enabled && new_crtc_state->wm_level_disabled)
+ /* Wa_14015648006 */
+ wm_optimization_wa(intel_dp, new_crtc_state);
mutex_unlock(&psr->lock);
}
@@ -1935,23 +2001,29 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
+ bool keep_disabled = false;
mutex_lock(&psr->lock);
- if (psr->sink_not_reliable)
- goto exit;
-
drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
- /* Only enable if there is active planes */
- if (!psr->enabled && crtc_state->active_planes)
+ keep_disabled |= psr->sink_not_reliable;
+ keep_disabled |= !crtc_state->active_planes;
+
+ /* Display WA #1136: skl, bxt */
+ keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->wm_level_disabled;
+
+ if (!psr->enabled && !keep_disabled)
intel_psr_enable_locked(intel_dp, crtc_state);
+ else if (psr->enabled && !crtc_state->wm_level_disabled)
+ /* Wa_14015648006 */
+ wm_optimization_wa(intel_dp, crtc_state);
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(intel_dp);
-exit:
mutex_unlock(&psr->lock);
}
}
@@ -2307,12 +2379,15 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv);
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv);
/*
- * turn continuous full frame off and do a single
- * full frame
+ * Set psr2_sel_fetch_cff_enabled as false to allow selective
+ * updates. Still keep cff bit enabled as we don't have proper
+ * SU configuration in case update is sent for any reason after
+ * sff bit gets cleared by the HW on next vblank.
*/
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
val);
@@ -2622,3 +2697,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
break;
}
}
+
+static void
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const char *status = "unknown";
+ u32 val, status_val;
+
+ if (intel_dp->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
+ status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ str_yes_no(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ str_enabled_disabled(enabled), val);
+ psr_source_status(intel_dp, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+
+ seq_printf(m, "PSR2 selective fetch: %s\n",
+ str_enabled_disabled(psr->psr2_sel_fetch_enabled));
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_edp_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+ intel_wakeref_t wakeref;
+ int ret = -ENODEV;
+
+ if (!HAS_PSR(dev_priv))
+ return ret;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+void intel_psr_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ i915, &i915_edp_psr_debug_fops);
+
+ debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ i915, &i915_edp_psr_status_fops);
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ const char *str;
+ int ret;
+ u8 val;
+
+ if (!CAN_PSR(intel_dp)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ else
+ str = "unknown";
+
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
+void intel_psr_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ return;
+
+ debugfs_create_file("i915_psr_sink_status", 0444, root,
+ connector, &i915_psr_sink_status_fops);
+
+ if (HAS_PSR(i915))
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 2ac3a46cccc5..0b95e8aa615f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
@@ -46,16 +47,22 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+void intel_psr_connector_debugfs_add(struct intel_connector *connector);
+void intel_psr_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
new file mode 100644
index 000000000000..958d8cabc44b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_REGS_H__
+#define __INTEL_PSR_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
+#define EXITLINE_ENABLE REG_BIT(31)
+#define EXITLINE_MASK REG_GENMASK(12, 0)
+#define EXITLINE_SHIFT 0
+
+/*
+ * HSW+ eDP PSR registers
+ *
+ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
+ * instance of it
+ */
+#define _SRD_CTL_A 0x60800
+#define _SRD_CTL_EDP 0x6f800
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
+#define EDP_PSR_ENABLE (1 << 31)
+#define BDW_PSR_SINGLE_FRAME (1 << 30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY (1 << 27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
+#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
+#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
+#define EDP_PSR_TP1_TP2_SEL (0 << 11)
+#define EDP_PSR_TP1_TP3_SEL (1 << 11)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
+#define EDP_PSR_TP1_TIME_500us (0 << 4)
+#define EDP_PSR_TP1_TIME_100us (1 << 4)
+#define EDP_PSR_TP1_TIME_2500us (2 << 4)
+#define EDP_PSR_TP1_TIME_0us (3 << 4)
+#define EDP_PSR_IDLE_FRAME_SHIFT 0
+
+/*
+ * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
+ * to transcoder and bits defined for each one as if using no shift (i.e. as if
+ * it was for TRANSCODER_EDP)
+ */
+#define EDP_PSR_IMR _MMIO(0x64834)
+#define EDP_PSR_IIR _MMIO(0x64838)
+#define _PSR_IMR_A 0x60814
+#define _PSR_IIR_A 0x60818
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
+ 0 : ((trans) - TRANSCODER_A + 1) * 8)
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+
+#define _SRD_AUX_DATA_A 0x60814
+#define _SRD_AUX_DATA_EDP 0x6f814
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
+
+#define _SRD_STATUS_A 0x60840
+#define _SRD_STATUS_EDP 0x6f840
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
+#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
+#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
+#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
+#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
+#define EDP_PSR_STATUS_COUNT_SHIFT 16
+#define EDP_PSR_STATUS_COUNT_MASK 0xf
+#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
+#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
+#define EDP_PSR_STATUS_IDLE_MASK 0xf
+
+#define _SRD_PERF_CNT_A 0x60844
+#define _SRD_PERF_CNT_EDP 0x6f844
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
+#define EDP_PSR_PERF_CNT_MASK 0xffffff
+
+/* PSR_MASK on SKL+ */
+#define _SRD_DEBUG_A 0x60860
+#define _SRD_DEBUG_EDP 0x6f860
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
+#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
+
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30) /* up to adl-p */
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
+#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
+#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
+#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
+#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
+#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
+#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
+#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
+#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10
+#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT)
+#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
+#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
+#define EDP_PSR2_IDLE_FRAME_MASK 0xf
+#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+
+#define _PSR_EVENT_TRANS_A 0x60848
+#define _PSR_EVENT_TRANS_B 0x61848
+#define _PSR_EVENT_TRANS_C 0x62848
+#define _PSR_EVENT_TRANS_D 0x63848
+#define _PSR_EVENT_TRANS_EDP 0x6f848
+#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
+#define PSR_EVENT_PSR2_DISABLED (1 << 16)
+#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
+#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14)
+#define PSR_EVENT_GRAPHICS_RESET (1 << 12)
+#define PSR_EVENT_PCH_INTERRUPT (1 << 11)
+#define PSR_EVENT_MEMORY_UP (1 << 10)
+#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
+#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
+#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
+#define PSR_EVENT_HDCP_ENABLE (1 << 4)
+#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
+#define PSR_EVENT_VBI_ENABLE (1 << 2)
+#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
+#define PSR_EVENT_PSR_DISABLE (1 << 0)
+
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
+#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
+
+#define _PSR2_SU_STATUS_A 0x60914
+#define _PSR2_SU_STATUS_EDP 0x6f914
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
+#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
+#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
+#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
+#define PSR2_SU_STATUS_FRAMES 8
+
+#define _PSR2_MAN_TRK_CTL_A 0x60910
+#define _PSR2_MAN_TRK_CTL_EDP 0x6f910
+#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31)
+#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21)
+#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+#define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(20, 11)
+#define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(3)
+#define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2)
+#define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1)
+#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(28, 16)
+#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0)
+#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31)
+#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
+#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
+
+#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
+#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
+#define _SEL_FETCH_PLANE_BASE_3_A 0x708D0
+#define _SEL_FETCH_PLANE_BASE_4_A 0x708F0
+#define _SEL_FETCH_PLANE_BASE_5_A 0x70920
+#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
+#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
+#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
+#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
+
+#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
+ _SEL_FETCH_PLANE_BASE_1_A, \
+ _SEL_FETCH_PLANE_BASE_2_A, \
+ _SEL_FETCH_PLANE_BASE_3_A, \
+ _SEL_FETCH_PLANE_BASE_4_A, \
+ _SEL_FETCH_PLANE_BASE_5_A, \
+ _SEL_FETCH_PLANE_BASE_6_A, \
+ _SEL_FETCH_PLANE_BASE_7_A, \
+ _SEL_FETCH_PLANE_BASE_CUR_A)
+#define _SEL_FETCH_PLANE_BASE_1(pipe) _PIPE(pipe, _SEL_FETCH_PLANE_BASE_1_A, _SEL_FETCH_PLANE_BASE_1_B)
+#define _SEL_FETCH_PLANE_BASE(pipe, plane) (_SEL_FETCH_PLANE_BASE_1(pipe) - \
+ _SEL_FETCH_PLANE_BASE_1_A + \
+ _SEL_FETCH_PLANE_BASE_A(plane))
+
+#define _SEL_FETCH_PLANE_CTL_1_A 0x70890
+#define PLANE_SEL_FETCH_CTL(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_CTL_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+#define PLANE_SEL_FETCH_CTL_ENABLE REG_BIT(31)
+
+#define _SEL_FETCH_PLANE_POS_1_A 0x70894
+#define PLANE_SEL_FETCH_POS(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_POS_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+#define _SEL_FETCH_PLANE_SIZE_1_A 0x70898
+#define PLANE_SEL_FETCH_SIZE(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_SIZE_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+#define _SEL_FETCH_PLANE_OFFSET_1_A 0x7089C
+#define PLANE_SEL_FETCH_OFFSET(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_OFFSET_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 6f8e4ec5c0fb..6e86c0971d24 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -17,6 +17,15 @@
/* from BPP 6 to 36 in steps of 0.5 */
#define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61
+/* from BPP 6 to 24 in steps of 0.5 */
+#define RC_RANGE_QP420_8BPC_MAX_NUM_BPP 17
+
+/* from BPP 6 to 30 in steps of 0.5 */
+#define RC_RANGE_QP420_10BPC_MAX_NUM_BPP 23
+
+/* from BPP 6 to 36 in steps of 0.5 */
+#define RC_RANGE_QP420_12BPC_MAX_NUM_BPP 29
+
/*
* These qp tables are as per the C model
* and it has the rows pointing to bpps which increment
@@ -283,26 +292,182 @@ static const u8 rc_range_maxqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC
11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 }
};
-#define PARAM_TABLE(_minmax, _bpc, _row, _col) do { \
- if (bpc == (_bpc)) \
- return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \
+static const u8 rc_range_minqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
+ { 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0 },
+ { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0 },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1 },
+ { 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 1, 1 },
+ { 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 2, 2, 1 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 2, 1 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 2 },
+ { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3 }
+};
+
+static const u8 rc_range_maxqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = {
+ { 4, 4, 3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 4, 4, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 5, 5, 5, 5, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 6, 6, 6, 6, 5, 4, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0 },
+ { 7, 7, 7, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 1, 1, 1, 0 },
+ { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 1, 1, 0 },
+ { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1 },
+ { 8, 8, 8, 8, 8, 7, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1 },
+ { 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 },
+ { 10, 10, 9, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 3, 3, 2, 2 },
+ { 10, 10, 10, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 2, 2 },
+ { 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2 },
+ { 11, 11, 11, 10, 9, 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3, 2 },
+ { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4, 4, 4, 3 },
+ { 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4 }
+};
+
+static const u8 rc_range_minqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 4, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0 },
+ { 7, 7, 7, 7, 7, 6, 5, 5, 5, 5, 5, 4, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0 },
+ { 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 2, 2, 1, 1, 1, 0 },
+ { 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 4, 4, 4, 3, 2, 2, 2, 1, 1, 1, 0 },
+ { 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 2, 1, 1 },
+ { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1 },
+ { 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 },
+ { 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 7, 6, 6, 5, 4, 4, 3, 3, 2, 1 },
+ { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 2, 1 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 3, 3,
+ 2, 2 },
+ { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 10, 9, 8, 8, 7, 6, 6, 5,
+ 5, 4, 4 }
+};
+
+static const u8 rc_range_maxqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = {
+ { 8, 8, 7, 6, 4, 4, 3, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 8, 8, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 9, 9, 9, 8, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 10, 10, 10, 9, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 0,
+ 0 },
+ { 11, 11, 11, 10, 10, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1,
+ 0 },
+ { 11, 11, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 1,
+ 1 },
+ { 11, 11, 11, 11, 11, 10, 9, 8, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 3, 2, 2, 2,
+ 1 },
+ { 12, 12, 12, 12, 12, 11, 10, 9, 8, 8, 8, 7, 6, 5, 5, 4, 3, 3, 3, 2, 2,
+ 2, 1 },
+ { 13, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 3,
+ 2, 2 },
+ { 14, 14, 13, 13, 13, 12, 11, 10, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3,
+ 2, 2 },
+ { 14, 14, 14, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 4,
+ 3, 3, 2 },
+ { 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4,
+ 4, 3, 2 },
+ { 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4,
+ 4, 3, 2 },
+ { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4,
+ 4, 3, 3 },
+ { 18, 17, 17, 16, 15, 15, 14, 13, 13, 12, 11, 11, 11, 10, 9, 9, 8, 7, 7,
+ 6, 6, 5, 5 }
+};
+
+static const u8 rc_range_minqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0 },
+ { 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0 },
+ { 9, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0 },
+ { 10, 9, 9, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 3, 2, 2, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0 },
+ { 11, 10, 10, 10, 10, 9, 9, 8, 7, 6, 6, 6, 6, 5, 5, 4, 3, 3, 3, 2, 2, 1,
+ 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2,
+ 1, 1, 0, 0, 0, 0, 0 },
+ { 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 7, 6, 5, 5, 5, 5, 4, 3, 3,
+ 2, 1, 1, 1, 1, 1, 0 },
+ { 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 8, 8, 8, 7, 6, 6, 5, 4, 4,
+ 3, 2, 2, 1, 1, 1, 1, 1 },
+ { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5,
+ 5, 4, 4, 2, 2, 1, 1, 1, 1 },
+ { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6,
+ 5, 4, 4, 3, 2, 2, 1, 1, 1 },
+ { 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7,
+ 6, 5, 4, 3, 3, 2, 2, 1, 1 },
+ { 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 11, 10, 10, 9, 8, 8,
+ 7, 7, 6, 5, 4, 3, 3, 2, 2, 1 },
+ { 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8,
+ 7, 7, 6, 5, 4, 4, 3, 2, 2, 1 },
+ { 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 13, 13, 12, 11, 11, 10, 9, 9, 8,
+ 8, 7, 6, 6, 5, 4, 4, 3, 3, 2 },
+ { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 14, 13, 12, 12, 11, 10,
+ 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4 }
+};
+
+static const u8 rc_range_maxqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = {
+ { 11, 10, 9, 8, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0 },
+ { 12, 11, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 10, 9, 8, 7, 6, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0 },
+ { 14, 13, 13, 12, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1,
+ 1, 1, 0, 0, 0, 0, 0 },
+ { 15, 14, 14, 13, 13, 11, 10, 9, 8, 7, 7, 7, 7, 6, 6, 5, 4, 4, 4, 3, 3, 2,
+ 1, 1, 1, 0, 0, 0, 0 },
+ { 15, 15, 15, 14, 14, 13, 12, 11, 10, 10, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4,
+ 4, 3, 2, 2, 1, 1, 0, 0, 0 },
+ { 15, 15, 15, 15, 15, 14, 13, 12, 11, 11, 11, 10, 9, 8, 7, 6, 6, 6, 6, 5,
+ 4, 4, 3, 2, 2, 2, 1, 1, 0 },
+ { 16, 16, 16, 16, 16, 15, 14, 13, 12, 12, 12, 11, 10, 9, 9, 8, 7, 7, 6, 5,
+ 5, 4, 3, 3, 2, 2, 2, 1, 1 },
+ { 17, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 7,
+ 6, 6, 5, 5, 3, 3, 2, 2, 1, 1 },
+ { 18, 18, 17, 17, 17, 16, 15, 14, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8,
+ 7, 6, 5, 5, 4, 3, 3, 2, 2, 1 },
+ { 18, 18, 18, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 10, 9, 9, 8,
+ 8, 7, 6, 5, 4, 4, 3, 3, 2, 2 },
+ { 19, 19, 18, 18, 17, 17, 16, 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 9,
+ 9, 8, 8, 7, 6, 5, 4, 4, 3, 3, 2 },
+ { 19, 19, 19, 18, 17, 17, 17, 16, 15, 15, 14, 13, 13, 12, 12, 11, 10, 9,
+ 9, 8, 8, 7, 6, 5, 5, 4, 3, 3, 2 },
+ { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10,
+ 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3 },
+ { 22, 21, 21, 20, 19, 19, 18, 17, 17, 16, 15, 15, 15, 14, 13, 13, 12, 11,
+ 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5 }
+};
+
+#define PARAM_TABLE(_minmax, _bpc, _row, _col, _is_420) do { \
+ if (bpc == (_bpc)) { \
+ if (_is_420) \
+ return rc_range_##_minmax##qp420_##_bpc##bpc[_row][_col]; \
+ else \
+ return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \
+ } \
} while (0)
-u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i)
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i, bool is_420)
{
- PARAM_TABLE(min, 8, buf_i, bpp_i);
- PARAM_TABLE(min, 10, buf_i, bpp_i);
- PARAM_TABLE(min, 12, buf_i, bpp_i);
+ PARAM_TABLE(min, 8, buf_i, bpp_i, is_420);
+ PARAM_TABLE(min, 10, buf_i, bpp_i, is_420);
+ PARAM_TABLE(min, 12, buf_i, bpp_i, is_420);
MISSING_CASE(bpc);
return 0;
}
-u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i)
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i, bool is_420)
{
- PARAM_TABLE(max, 8, buf_i, bpp_i);
- PARAM_TABLE(max, 10, buf_i, bpp_i);
- PARAM_TABLE(max, 12, buf_i, bpp_i);
+ PARAM_TABLE(max, 8, buf_i, bpp_i, is_420);
+ PARAM_TABLE(max, 10, buf_i, bpp_i, is_420);
+ PARAM_TABLE(max, 12, buf_i, bpp_i, is_420);
MISSING_CASE(bpc);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.h b/drivers/gpu/drm/i915/display/intel_qp_tables.h
index 9fb3c36bd7c6..a9ff9ca29938 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.h
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i);
-u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i);
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i, bool is_420);
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i, bool is_420);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 6e48d3bcdfec..a280448df771 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
/* ECS Liva Q2 */
{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ /* HP Notebook - 14-r206nv */
+ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 329b9d9af667..e12ba458636c 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,6 +39,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -1068,7 +1069,8 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1))
return -ENXIO;
- if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ /* TX_DISABLED doesn't mean disabled for ELD */
+ if (if_index != SDVO_HBUF_INDEX_ELD && tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
@@ -1185,6 +1187,28 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
+static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ ssize_t len;
+ u8 val;
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1))
+ return;
+
+ if ((val & SDVO_AUDIO_ELD_VALID) == 0)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ crtc_state->eld, sizeof(crtc_state->eld));
+ if (len < 0)
+ drm_dbg_kms(&i915->drm, "failed to read ELD\n");
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1378,7 +1402,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_sdvo_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->limited_color_range =
intel_sdvo_limited_color_range(encoder, pipe_config,
@@ -1729,9 +1755,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
- u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
-
- if ((val & mask) == mask)
+ if (val & SDVO_AUDIO_PRESENCE_DETECT)
pipe_config->has_audio = true;
}
@@ -1742,6 +1766,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
+
+ intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
@@ -1753,12 +1779,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- u8 *eld = connector->eld;
-
- eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ const u8 *eld = crtc_state->eld;
intel_sdvo_set_audio_state(intel_sdvo, 0);
@@ -2886,7 +2907,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2903,7 +2924,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
mutex_unlock(&i915->drm.mode_config.mutex);
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
if (!intel_panel_preferred_fixed_mode(intel_connector))
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index c799e891f8b5..1cfb94b5cedb 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -40,22 +40,22 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
*/
if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- i915->snps_phy_failed_calibration |= BIT(phy);
+ i915->display.snps.phy_failed_calibration |= BIT(phy);
}
}
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
enum phy phy, bool enable)
{
u32 val;
- if (!intel_phy_is_snps(dev_priv, phy))
+ if (!intel_phy_is_snps(i915, phy))
return;
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
enable ? 2 : 3);
- intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_TX_REQ(phy),
- SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
+ intel_de_rmw(i915, SNPS_PHY_TX_REQ(phy),
+ SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_268500 = {
.clock = 268500,
.ref_control =
@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_497750 = {
.clock = 497750,
.ref_control =
@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_209800,
&dg2_hdmi_241500,
&dg2_hdmi_262750,
+ &dg2_hdmi_267300,
&dg2_hdmi_268500,
&dg2_hdmi_296703,
+ &dg2_hdmi_319890,
&dg2_hdmi_497750,
&dg2_hdmi_592000,
&dg2_hdmi_593407,
@@ -1785,7 +1847,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
*/
/* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
- intel_uncore_rmw(&dev_priv->uncore, enable_reg, 0, PLL_ENABLE);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/*
* 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
@@ -1830,14 +1892,13 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
*/
/* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
- intel_uncore_rmw(&i915->uncore, enable_reg, PLL_ENABLE, 0);
+ intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
/*
* 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
* This will allow the PLL to stop running.
*/
- intel_uncore_rmw(&i915->uncore, SNPS_PHY_MPLLB_DIV(phy),
- SNPS_PHY_MPLLB_FORCE_EN, 0);
+ intel_de_rmw(i915, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
/*
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6b4d24b9cd0..25034bbf1445 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -32,85 +32,20 @@
#include <linux/string_helpers.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
-#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "intel_frontbuffer.h"
#include "intel_sprite.h"
-#include "intel_vrr.h"
-
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_rect *src = &plane_state->uapi.src;
- u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
-
- /*
- * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
- * abuses hsub/vsub so we can't use them here. But as they
- * are limited to 32bpp RGB formats we don't actually need
- * to check anything.
- */
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
- return 0;
-
- /*
- * Hardware doesn't handle subpixel coordinates.
- * Adjust to (macro)pixel boundary, but be careful not to
- * increase the source viewport size, because that could
- * push the downscaling factor out of bounds.
- */
- src_x = src->x1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_y = src->y1 >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- drm_rect_init(src, src_x << 16, src_y << 16,
- src_w << 16, src_h << 16);
-
- if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
- hsub = 2;
- vsub = 2;
- } else {
- hsub = fb->format->hsub;
- vsub = fb->format->vsub;
- }
-
- if (rotated)
- hsub = vsub = max(hsub, vsub);
-
- if (src_x % hsub || src_w % hsub) {
- drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_x, src_w, hsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- if (src_y % vsub || src_h % vsub) {
- drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_y, src_h, vsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- return 0;
-}
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
@@ -1217,7 +1152,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
}
intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
* The control register self-arms if the plane was previously
@@ -1448,124 +1384,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9;
-}
-
-static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
- const struct drm_intel_sprite_colorkey *set)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-
- *key = *set;
-
- /*
- * We want src key enabled on the
- * sprite and not on the primary.
- */
- if (plane->id == PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_SOURCE)
- key->flags = 0;
-
- /*
- * On SKL+ we want dst key enabled on
- * the primary and not on the sprite.
- */
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- key->flags = 0;
-}
-
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_intel_sprite_colorkey *set = data;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
-
- /* ignore the pointless "none" flag */
- set->flags &= ~I915_SET_COLORKEY_NONE;
-
- if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- /* Make sure we don't try to enable both src & dest simultaneously */
- if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- plane = drm_plane_find(dev, file_priv, set->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
- return -ENOENT;
-
- /*
- * SKL+ only plane 2 can do destination keying against plane 1.
- * Also multiple planes can't do destination keying on the same
- * pipe simultaneously.
- */
- if (DISPLAY_VER(dev_priv) >= 9 &&
- to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(plane->dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
- while (1) {
- plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
-
- /*
- * On some platforms we have to configure
- * the dst colorkey on the primary plane.
- */
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv,
- to_intel_plane(plane)->pipe);
-
- plane_state = drm_atomic_get_plane_state(state,
- crtc->base.primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
- }
-
- if (!ret)
- ret = drm_atomic_commit(state);
-
- if (ret != -EDEADLK)
- break;
-
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- }
-
- drm_atomic_state_put(state);
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
new file mode 100644
index 000000000000..70a391083751
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+#include "intel_sprite_uapi.h"
+
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return DISPLAY_VER(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret = 0;
+
+ /* ignore the pointless "none" flag */
+ set->flags &= ~I915_SET_COLORKEY_NONE;
+
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
+
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+ }
+
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
+
+ drm_atomic_state_put(state);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.h b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
new file mode 100644
index 000000000000..3eb50025acaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_UAPI_H__
+#define __INTEL_SPRITE_UAPI_H__
+
+struct drm_device;
+struct drm_file;
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* __INTEL_SPRITE_UAPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 70624b4b2d38..3b60995e9dfb 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,8 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power_map.h"
#include "intel_display_types.h"
@@ -13,6 +15,52 @@
#include "intel_mg_phy_regs.h"
#include "intel_tc.h"
+enum tc_port_mode {
+ TC_PORT_DISCONNECTED,
+ TC_PORT_TBT_ALT,
+ TC_PORT_DP_ALT,
+ TC_PORT_LEGACY,
+};
+
+struct intel_tc_port;
+
+struct intel_tc_phy_ops {
+ enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
+ u32 (*hpd_live_status)(struct intel_tc_port *tc);
+ bool (*is_ready)(struct intel_tc_port *tc);
+ bool (*is_owned)(struct intel_tc_port *tc);
+ void (*get_hw_state)(struct intel_tc_port *tc);
+ bool (*connect)(struct intel_tc_port *tc, int required_lanes);
+ void (*disconnect)(struct intel_tc_port *tc);
+ void (*init)(struct intel_tc_port *tc);
+};
+
+struct intel_tc_port {
+ struct intel_digital_port *dig_port;
+
+ const struct intel_tc_phy_ops *phy_ops;
+
+ struct mutex lock; /* protects the TypeC port mode */
+ intel_wakeref_t lock_wakeref;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ enum intel_display_power_domain lock_power_domain;
+#endif
+ struct delayed_work disconnect_phy_work;
+ int link_refcount;
+ bool legacy_port:1;
+ char port_name[8];
+ enum tc_port_mode mode;
+ enum tc_port_mode init_mode;
+ enum phy_fia phy_fia;
+ u8 phy_fia_idx;
+};
+
+static enum intel_display_power_domain
+tc_phy_cold_off_domain(struct intel_tc_port *);
+static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
+static bool tc_phy_is_ready(struct intel_tc_port *tc);
+static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
+
static const char *tc_port_mode_name(enum tc_port_mode mode)
{
static const char * const names[] = {
@@ -28,13 +76,24 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
+static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
+{
+ return dig_port->tc;
+}
+
+static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
+{
+ return to_i915(tc->dig_port->base.base.dev);
+}
+
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
+ return intel_phy_is_tc(i915, phy) && tc->mode == mode;
}
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
@@ -52,113 +111,178 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
}
+/*
+ * The display power domains used for TC ports depending on the
+ * platform and TC mode (legacy, DP-alt, TBT):
+ *
+ * POWER_DOMAIN_DISPLAY_CORE:
+ * --------------------------
+ * ADLP/all modes:
+ * - TCSS/IOM access for PHY ready state.
+ * ADLP+/all modes:
+ * - DE/north-,south-HPD ISR access for HPD live state.
+ *
+ * POWER_DOMAIN_PORT_DDI_LANES_<port>:
+ * -----------------------------------
+ * ICL+/all modes:
+ * - DE/DDI_BUF access for port enabled state.
+ * ADLP/all modes:
+ * - DE/DDI_BUF access for PHY owned state.
+ *
+ * POWER_DOMAIN_AUX_USBC<TC port index>:
+ * -------------------------------------
+ * ICL/legacy mode:
+ * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
+ * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
+ * main lanes.
+ * ADLP/legacy, DP-alt modes:
+ * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
+ * main lanes.
+ *
+ * POWER_DOMAIN_TC_COLD_OFF:
+ * -------------------------
+ * TGL/legacy, DP-alt modes:
+ * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
+ * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
+ * main lanes.
+ *
+ * ICL, TGL, ADLP/TBT mode:
+ * - TCSS/IOM,FIA access for HPD live state
+ * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
+ * AUX and main lanes.
+ */
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
- return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
- IS_ALDERLAKE_P(i915);
+ return tc_phy_cold_off_domain(tc) ==
+ intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
}
-static enum intel_display_power_domain
-tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
+static intel_wakeref_t
+__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
- if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
- return POWER_DOMAIN_TC_COLD_OFF;
+ *domain = tc_phy_cold_off_domain(tc);
- return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_get(i915, *domain);
}
static intel_wakeref_t
-tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
- enum intel_display_power_domain *domain)
+tc_cold_block(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- *domain = tc_cold_get_power_domain(dig_port, mode);
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
- return intel_display_power_get(i915, *domain);
+ wakeref = __tc_cold_block(tc, &domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ tc->lock_power_domain = domain;
+#endif
+ return wakeref;
}
-static intel_wakeref_t
-tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
+static void
+__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
{
- return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ intel_display_power_put(i915, domain, wakeref);
}
static void
-tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
+tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
- /*
- * wakeref == -1, means some error happened saving save_depot_stack but
- * power should still be put down and 0 is a invalid save_depot_stack
- * id so can be used to skip it for non TC legacy ports.
- */
- if (wakeref == 0)
- return;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+#endif
+ __tc_cold_unblock(tc, domain, wakeref);
+}
- intel_display_power_put(i915, domain, wakeref);
+static void
+assert_display_core_power_enabled(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
}
static void
-assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+assert_tc_cold_blocked(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
bool enabled;
enabled = intel_display_power_is_enabled(i915,
- tc_cold_get_power_domain(dig_port,
- dig_port->tc_mode));
+ tc_phy_cold_off_domain(tc));
drm_WARN_ON(&i915->drm, !enabled);
}
+static enum intel_display_power_domain
+tc_port_power_domain(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+
+ return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
+}
+
+static void
+assert_tc_port_power_enabled(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
+}
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+ lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
- assert_tc_cold_blocked(dig_port);
+ assert_tc_cold_blocked(tc);
- lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
- return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_uncore_read(uncore,
- PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+ pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
- assert_tc_cold_blocked(dig_port);
+ assert_tc_cold_blocked(tc);
- return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
- DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_wakeref_t wakeref;
u32 lane_mask;
- if (dig_port->tc_mode != TC_PORT_DP_ALT)
+ if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
return 4;
- assert_tc_cold_blocked(dig_port);
+ assert_tc_cold_blocked(tc);
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
@@ -185,48 +309,51 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
drm_WARN_ON(&i915->drm,
- lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ lane_reversal && tc->mode != TC_PORT_LEGACY);
- assert_tc_cold_blocked(dig_port);
+ assert_tc_cold_blocked(tc);
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
+ val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
case 1:
val |= lane_reversal ?
- DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
- DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
+ DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
break;
case 2:
val |= lane_reversal ?
- DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
- DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
+ DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
break;
case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
break;
default:
MISSING_CASE(required_lanes);
}
- intel_uncore_write(uncore,
- PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+ intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
-static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
u32 valid_hpd_mask;
- if (dig_port->tc_legacy_port)
+ drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+
+ if (hweight32(live_status_mask) != 1)
+ return;
+
+ if (tc->legacy_port)
valid_hpd_mask = BIT(TC_PORT_LEGACY);
else
valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
@@ -238,83 +365,79 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
/* If live status mismatches the VBT flag, trust the live status. */
drm_dbg_kms(&i915->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
- dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
+ tc->port_name, live_status_mask, valid_hpd_mask);
- dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+ tc->legacy_port = !tc->legacy_port;
}
-static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
- u32 mask = 0;
- u32 val;
-
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
- if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, nothing connected\n",
- dig_port->tc_port_name);
- return mask;
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (modular_fia) {
+ tc->phy_fia = tc_port / 2;
+ tc->phy_fia_idx = tc_port % 2;
+ } else {
+ tc->phy_fia = FIA1;
+ tc->phy_fia_idx = tc_port;
}
+}
- if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
- mask |= BIT(TC_PORT_TBT_ALT);
- if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
- mask |= BIT(TC_PORT_DP_ALT);
-
- if (intel_uncore_read(uncore, SDEISR) & isr_bit)
- mask |= BIT(TC_PORT_LEGACY);
+/*
+ * ICL TC PHY handlers
+ * -------------------
+ */
+static enum intel_display_power_domain
+icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
- /* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
- tc_port_fixup_legacy_flag(dig_port, mask);
+ if (tc->legacy_port)
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
- return mask;
+ return POWER_DOMAIN_TC_COLD_OFF;
}
-static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
- struct intel_uncore *uncore = &i915->uncore;
- u32 val, mask = 0;
+ intel_wakeref_t wakeref;
+ u32 fia_isr;
+ u32 pch_isr;
+ u32 mask = 0;
- /*
- * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
- * registers in IOM. Note that this doesn't apply to PHY and FIA
- * registers.
- */
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
- if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
- mask |= BIT(TC_PORT_DP_ALT);
- if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
+ with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
+ fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(i915, SDEISR);
+ }
+
+ if (fia_isr == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, nothing connected\n",
+ tc->port_name);
+ return mask;
+ }
+
+ if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
mask |= BIT(TC_PORT_TBT_ALT);
+ if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
+ mask |= BIT(TC_PORT_DP_ALT);
- if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ if (pch_isr & isr_bit)
mask |= BIT(TC_PORT_LEGACY);
- /* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
- tc_port_fixup_legacy_flag(dig_port, mask);
-
return mask;
}
-static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- if (IS_ALDERLAKE_P(i915))
- return adl_tc_port_live_status_mask(dig_port);
-
- return icl_tc_port_live_status_mask(dig_port);
-}
-
/*
* Return the PHY status complete flag indicating that display can acquire the
* PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
@@ -323,146 +446,80 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
* owned by the TBT subsystem and so switching the ownership to display is not
* required.
*/
-static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
- if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, assuming not complete\n",
- dig_port->tc_port_name);
- return false;
- }
-
- return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
-}
-
-/*
- * Return the PHY status complete flag indicating that display can acquire the
- * PHY ownership. The IOM firmware sets this flag when it's ready to switch
- * the ownership to display, regardless of what sink is connected (TBT-alt,
- * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
- * subsystem and so switching the ownership to display is not required.
- */
-static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
- struct intel_uncore *uncore = &i915->uncore;
- u32 val;
+ assert_tc_cold_blocked(tc);
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, assuming not complete\n",
- dig_port->tc_port_name);
+ "Port %s: PHY in TCCOLD, assuming not ready\n",
+ tc->port_name);
return false;
}
- return val & TCSS_DDI_STATUS_READY;
+ return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
}
-static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- if (IS_ALDERLAKE_P(i915))
- return adl_tc_phy_status_complete(dig_port);
-
- return icl_tc_phy_status_complete(dig_port);
-}
-
-static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ assert_tc_cold_blocked(tc);
+
+ val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
- dig_port->tc_port_name, take ? "take" : "release");
+ tc->port_name, take ? "take" : "release");
return false;
}
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
if (take)
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
-
- intel_uncore_write(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
-
- return true;
-}
-
-static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
- bool take)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum port port = dig_port->base.port;
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_uncore_rmw(uncore, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
- take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
+ intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
-static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- if (IS_ALDERLAKE_P(i915))
- return adl_tc_phy_take_ownership(dig_port, take);
-
- return icl_tc_phy_take_ownership(dig_port, take);
-}
-
-static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ assert_tc_cold_blocked(tc);
+
+ val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, assume safe mode\n",
- dig_port->tc_port_name);
- return true;
+ "Port %s: PHY in TCCOLD, assume not owned\n",
+ tc->port_name);
+ return false;
}
- return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+ return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
}
-static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum port port = dig_port->base.port;
- u32 val;
-
- val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
- return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
-}
+ enum intel_display_power_domain domain;
+ intel_wakeref_t tc_cold_wref;
-static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ tc_cold_wref = __tc_cold_block(tc, &domain);
- if (IS_ALDERLAKE_P(i915))
- return adl_tc_phy_is_owned(dig_port);
+ tc->mode = tc_phy_get_current_mode(tc);
+ if (tc->mode != TC_PORT_DISCONNECTED)
+ tc->lock_wakeref = tc_cold_block(tc);
- return icl_tc_phy_is_owned(dig_port);
+ __tc_cold_unblock(tc, domain, tc_cold_wref);
}
/*
@@ -476,151 +533,590 @@ static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
* connect and disconnect to cleanly transfer ownership with the controller and
* set the type-C power state.
*/
-static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
- int required_lanes)
+static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
+ int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- u32 live_status_mask;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
- if (!tc_phy_status_complete(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
- dig_port->tc_port_name);
- goto out_set_tbt_alt_mode;
- }
-
- live_status_mask = tc_port_live_status_mask(dig_port);
- if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
- !dig_port->tc_legacy_port) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
- dig_port->tc_port_name, live_status_mask);
- goto out_set_tbt_alt_mode;
- }
-
- if (!tc_phy_take_ownership(dig_port, true) &&
- !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
- goto out_set_tbt_alt_mode;
-
max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
- if (dig_port->tc_legacy_port) {
+ if (tc->mode == TC_PORT_LEGACY) {
drm_WARN_ON(&i915->drm, max_lanes != 4);
- dig_port->tc_mode = TC_PORT_LEGACY;
-
- return;
+ return true;
}
+ drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
- if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+ if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
- dig_port->tc_port_name);
- goto out_release_phy;
+ tc->port_name);
+ return false;
}
if (max_lanes < required_lanes) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
- dig_port->tc_port_name,
+ tc->port_name,
max_lanes, required_lanes);
- goto out_release_phy;
+ return false;
}
- dig_port->tc_mode = TC_PORT_DP_ALT;
+ return true;
+}
+
+static bool icl_tc_phy_connect(struct intel_tc_port *tc,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ tc->lock_wakeref = tc_cold_block(tc);
+
+ if (tc->mode == TC_PORT_TBT_ALT)
+ return true;
- return;
+ if ((!tc_phy_is_ready(tc) ||
+ !icl_tc_phy_take_ownership(tc, true)) &&
+ !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ tc->port_name,
+ str_yes_no(tc_phy_is_ready(tc)));
+ goto out_unblock_tc_cold;
+ }
+
+
+ if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ goto out_release_phy;
+
+ return true;
out_release_phy:
- tc_phy_take_ownership(dig_port, false);
-out_set_tbt_alt_mode:
- dig_port->tc_mode = TC_PORT_TBT_ALT;
+ icl_tc_phy_take_ownership(tc, false);
+out_unblock_tc_cold:
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
+
+ return false;
}
/*
* See the comment at the connect function. This implements the Disconnect
* Flow.
*/
-static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
{
- switch (dig_port->tc_mode) {
+ switch (tc->mode) {
case TC_PORT_LEGACY:
case TC_PORT_DP_ALT:
- tc_phy_take_ownership(dig_port, false);
+ icl_tc_phy_take_ownership(tc, false);
fallthrough;
case TC_PORT_TBT_ALT:
- dig_port->tc_mode = TC_PORT_DISCONNECTED;
- fallthrough;
- case TC_PORT_DISCONNECTED:
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
break;
default:
- MISSING_CASE(dig_port->tc_mode);
+ MISSING_CASE(tc->mode);
}
}
-static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+static void icl_tc_phy_init(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ tc_phy_load_fia_params(tc, false);
+}
+
+static const struct intel_tc_phy_ops icl_tc_phy_ops = {
+ .cold_off_domain = icl_tc_phy_cold_off_domain,
+ .hpd_live_status = icl_tc_phy_hpd_live_status,
+ .is_ready = icl_tc_phy_is_ready,
+ .is_owned = icl_tc_phy_is_owned,
+ .get_hw_state = icl_tc_phy_get_hw_state,
+ .connect = icl_tc_phy_connect,
+ .disconnect = icl_tc_phy_disconnect,
+ .init = icl_tc_phy_init,
+};
+
+/*
+ * TGL TC PHY handlers
+ * -------------------
+ */
+static enum intel_display_power_domain
+tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
+{
+ return POWER_DOMAIN_TC_COLD_OFF;
+}
+
+static void tgl_tc_phy_init(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
+ val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
+}
+
+static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
+ .cold_off_domain = tgl_tc_phy_cold_off_domain,
+ .hpd_live_status = icl_tc_phy_hpd_live_status,
+ .is_ready = icl_tc_phy_is_ready,
+ .is_owned = icl_tc_phy_is_owned,
+ .get_hw_state = icl_tc_phy_get_hw_state,
+ .connect = icl_tc_phy_connect,
+ .disconnect = icl_tc_phy_disconnect,
+ .init = tgl_tc_phy_init,
+};
+
+/*
+ * ADLP TC PHY handlers
+ * --------------------
+ */
+static enum intel_display_power_domain
+adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+
+ if (tc->mode != TC_PORT_TBT_ALT)
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+
+ return POWER_DOMAIN_TC_COLD_OFF;
+}
+
+static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+ enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
+ u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ intel_wakeref_t wakeref;
+ u32 cpu_isr;
+ u32 pch_isr;
+ u32 mask = 0;
- if (!tc_phy_status_complete(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
- return dig_port->tc_mode == TC_PORT_TBT_ALT;
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(i915, SDEISR);
}
- /* On ADL-P the PHY complete flag is set in TBT mode as well. */
- if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
- return true;
+ if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
+ mask |= BIT(TC_PORT_DP_ALT);
+ if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
+ mask |= BIT(TC_PORT_TBT_ALT);
+
+ if (pch_isr & pch_isr_bit)
+ mask |= BIT(TC_PORT_LEGACY);
+
+ return mask;
+}
+
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when it's ready to switch
+ * the ownership to display, regardless of what sink is connected (TBT-alt,
+ * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
+ * subsystem and so switching the ownership to display is not required.
+ */
+static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ u32 val;
- if (!tc_phy_is_owned(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
- dig_port->tc_port_name);
+ assert_display_core_power_enabled(tc);
+ val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not ready\n",
+ tc->port_name);
return false;
}
- return dig_port->tc_mode == TC_PORT_DP_ALT ||
- dig_port->tc_mode == TC_PORT_LEGACY;
+ return val & TCSS_DDI_STATUS_READY;
+}
+
+static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
+ bool take)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+
+ assert_tc_port_power_enabled(tc);
+
+ intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
+
+ return true;
+}
+
+static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+ u32 val;
+
+ assert_tc_port_power_enabled(tc);
+
+ val = intel_de_read(i915, DDI_BUF_CTL(port));
+ return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+}
+
+static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum intel_display_power_domain port_power_domain =
+ tc_port_power_domain(tc);
+ intel_wakeref_t port_wakeref;
+
+ port_wakeref = intel_display_power_get(i915, port_power_domain);
+
+ tc->mode = tc_phy_get_current_mode(tc);
+ if (tc->mode != TC_PORT_DISCONNECTED)
+ tc->lock_wakeref = tc_cold_block(tc);
+
+ intel_display_power_put(i915, port_power_domain, port_wakeref);
+}
+
+static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum intel_display_power_domain port_power_domain =
+ tc_port_power_domain(tc);
+ intel_wakeref_t port_wakeref;
+
+ if (tc->mode == TC_PORT_TBT_ALT) {
+ tc->lock_wakeref = tc_cold_block(tc);
+ return true;
+ }
+
+ port_wakeref = intel_display_power_get(i915, port_power_domain);
+
+ if (!adlp_tc_phy_take_ownership(tc, true) &&
+ !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ tc->port_name);
+ goto out_put_port_power;
+ }
+
+ if (!tc_phy_is_ready(tc) &&
+ !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ tc->port_name);
+ goto out_release_phy;
+ }
+
+ tc->lock_wakeref = tc_cold_block(tc);
+
+ if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ goto out_unblock_tc_cold;
+
+ intel_display_power_put(i915, port_power_domain, port_wakeref);
+
+ return true;
+
+out_unblock_tc_cold:
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
+out_release_phy:
+ adlp_tc_phy_take_ownership(tc, false);
+out_put_port_power:
+ intel_display_power_put(i915, port_power_domain, port_wakeref);
+
+ return false;
+}
+
+static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum intel_display_power_domain port_power_domain =
+ tc_port_power_domain(tc);
+ intel_wakeref_t port_wakeref;
+
+ port_wakeref = intel_display_power_get(i915, port_power_domain);
+
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
+
+ switch (tc->mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ adlp_tc_phy_take_ownership(tc, false);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ break;
+ default:
+ MISSING_CASE(tc->mode);
+ }
+
+ intel_display_power_put(i915, port_power_domain, port_wakeref);
+}
+
+static void adlp_tc_phy_init(struct intel_tc_port *tc)
+{
+ tc_phy_load_fia_params(tc, true);
+}
+
+static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
+ .cold_off_domain = adlp_tc_phy_cold_off_domain,
+ .hpd_live_status = adlp_tc_phy_hpd_live_status,
+ .is_ready = adlp_tc_phy_is_ready,
+ .is_owned = adlp_tc_phy_is_owned,
+ .get_hw_state = adlp_tc_phy_get_hw_state,
+ .connect = adlp_tc_phy_connect,
+ .disconnect = adlp_tc_phy_disconnect,
+ .init = adlp_tc_phy_init,
+};
+
+/*
+ * Generic TC PHY handlers
+ * -----------------------
+ */
+static enum intel_display_power_domain
+tc_phy_cold_off_domain(struct intel_tc_port *tc)
+{
+ return tc->phy_ops->cold_off_domain(tc);
+}
+
+static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ u32 mask;
+
+ mask = tc->phy_ops->hpd_live_status(tc);
+
+ /* The sink can be connected only in a single mode. */
+ drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+
+ return mask;
+}
+
+static bool tc_phy_is_ready(struct intel_tc_port *tc)
+{
+ return tc->phy_ops->is_ready(tc);
+}
+
+static bool tc_phy_is_owned(struct intel_tc_port *tc)
+{
+ return tc->phy_ops->is_owned(tc);
+}
+
+static void tc_phy_get_hw_state(struct intel_tc_port *tc)
+{
+ tc->phy_ops->get_hw_state(tc);
+}
+
+static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
+ bool phy_is_ready, bool phy_is_owned)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+
+ return phy_is_ready && phy_is_owned;
+}
+
+static bool tc_phy_is_connected(struct intel_tc_port *tc,
+ enum icl_port_dpll_id port_pll_type)
+{
+ struct intel_encoder *encoder = &tc->dig_port->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool phy_is_ready = tc_phy_is_ready(tc);
+ bool phy_is_owned = tc_phy_is_owned(tc);
+ bool is_connected;
+
+ if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
+ is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
+ else
+ is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
+
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
+ tc->port_name,
+ str_yes_no(is_connected),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
+
+ return is_connected;
+}
+
+static void tc_phy_wait_for_ready(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ if (wait_for(tc_phy_is_ready(tc), 100))
+ drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ tc->port_name);
}
static enum tc_port_mode
-intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+hpd_mask_to_tc_mode(u32 live_status_mask)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- u32 live_status_mask = tc_port_live_status_mask(dig_port);
- enum tc_port_mode mode;
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return TC_PORT_DISCONNECTED;
+}
+
+static enum tc_port_mode
+tc_phy_hpd_live_mode(struct intel_tc_port *tc)
+{
+ u32 live_status_mask = tc_phy_hpd_live_status(tc);
+
+ return hpd_mask_to_tc_mode(live_status_mask);
+}
+
+static enum tc_port_mode
+get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ return live_mode;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ case TC_PORT_DISCONNECTED:
+ if (tc->legacy_port)
+ return TC_PORT_LEGACY;
+ else
+ return TC_PORT_DP_ALT;
+ }
+}
- if (!tc_phy_is_owned(dig_port) ||
- drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
+static enum tc_port_mode
+get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ return TC_PORT_DISCONNECTED;
+ case TC_PORT_DP_ALT:
+ case TC_PORT_TBT_ALT:
return TC_PORT_TBT_ALT;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
+ if (tc->legacy_port)
+ return TC_PORT_DISCONNECTED;
+ else
+ return TC_PORT_TBT_ALT;
+ }
+}
+
+static enum tc_port_mode
+tc_phy_get_current_mode(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
+ bool phy_is_ready;
+ bool phy_is_owned;
+ enum tc_port_mode mode;
+
+ /*
+ * For legacy ports the IOM firmware initializes the PHY during boot-up
+ * and system resume whether or not a sink is connected. Wait here for
+ * the initialization to get ready.
+ */
+ if (tc->legacy_port)
+ tc_phy_wait_for_ready(tc);
- mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
- if (live_status_mask) {
- enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+ phy_is_ready = tc_phy_is_ready(tc);
+ phy_is_owned = tc_phy_is_owned(tc);
- if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
- mode = live_mode;
+ if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
+ mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
+ } else {
+ drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
+ tc->port_name,
+ tc_port_mode_name(mode),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ tc_port_mode_name(live_mode));
+
return mode;
}
+static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
+{
+ if (tc->legacy_port)
+ return TC_PORT_LEGACY;
+
+ return TC_PORT_TBT_ALT;
+}
+
static enum tc_port_mode
-intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
{
- u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
- if (live_status_mask)
- return fls(live_status_mask) - 1;
+ if (mode != TC_PORT_DISCONNECTED)
+ return mode;
- return TC_PORT_TBT_ALT;
+ return default_tc_mode(tc);
+}
+
+static enum tc_port_mode
+tc_phy_get_target_mode(struct intel_tc_port *tc)
+{
+ u32 live_status_mask = tc_phy_hpd_live_status(tc);
+
+ return hpd_mask_to_target_mode(tc, live_status_mask);
}
-static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ u32 live_status_mask = tc_phy_hpd_live_status(tc);
+ bool connected;
+
+ tc_port_fixup_legacy_flag(tc, live_status_mask);
+
+ tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
+
+ connected = tc->phy_ops->connect(tc, required_lanes);
+ if (!connected && tc->mode != default_tc_mode(tc)) {
+ tc->mode = default_tc_mode(tc);
+ connected = tc->phy_ops->connect(tc, required_lanes);
+ }
+
+ drm_WARN_ON(&i915->drm, !connected);
+}
+
+static void tc_phy_disconnect(struct intel_tc_port *tc)
+{
+ if (tc->mode != TC_PORT_DISCONNECTED) {
+ tc->phy_ops->disconnect(tc);
+ tc->mode = TC_PORT_DISCONNECTED;
+ }
+}
+
+static void tc_phy_init(struct intel_tc_port *tc)
+{
+ mutex_lock(&tc->lock);
+ tc->phy_ops->init(tc);
+ mutex_unlock(&tc->lock);
+}
+
+static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
int required_lanes, bool force_disconnect)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+ enum tc_port_mode old_tc_mode = tc->mode;
intel_display_power_flush_work(i915);
if (!intel_tc_cold_requires_aux_pw(dig_port)) {
@@ -632,58 +1128,48 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
drm_WARN_ON(&i915->drm, aux_powered);
}
- icl_tc_phy_disconnect(dig_port);
+ tc_phy_disconnect(tc);
if (!force_disconnect)
- icl_tc_phy_connect(dig_port, required_lanes);
+ tc_phy_connect(tc, required_lanes);
drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
- dig_port->tc_port_name,
+ tc->port_name,
tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(dig_port->tc_mode));
+ tc_port_mode_name(tc->mode));
}
-static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
{
- return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+ return tc_phy_get_target_mode(tc) != tc->mode;
}
-static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
+static void intel_tc_port_update_mode(struct intel_tc_port *tc,
int required_lanes, bool force_disconnect)
{
- enum intel_display_power_domain domain;
- intel_wakeref_t wref;
- bool needs_reset = force_disconnect;
-
- if (!needs_reset) {
- /* Get power domain required to check the hotplug live status. */
- wref = tc_cold_block(dig_port, &domain);
- needs_reset = intel_tc_port_needs_reset(dig_port);
- tc_cold_unblock(dig_port, domain, wref);
- }
-
- if (!needs_reset)
- return;
-
- /* Get power domain required for resetting the mode. */
- wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
-
- intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
+ if (force_disconnect ||
+ intel_tc_port_needs_reset(tc))
+ intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
+}
- /* Get power domain matching the new mode after reset. */
- tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
- fetch_and_zero(&dig_port->tc_lock_wakeref));
- if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
- dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
- &dig_port->tc_lock_power_domain);
+static void __intel_tc_port_get_link(struct intel_tc_port *tc)
+{
+ tc->link_refcount++;
+}
- tc_cold_unblock(dig_port, domain, wref);
+static void __intel_tc_port_put_link(struct intel_tc_port *tc)
+{
+ tc->link_refcount--;
}
-static void
-intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
- int refcount)
+static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- dig_port->tc_link_refcount = refcount;
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+
+ assert_tc_port_power_enabled(tc);
+
+ return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ DDI_BUF_CTL_ENABLE;
}
/**
@@ -696,85 +1182,119 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- intel_wakeref_t tc_cold_wref;
- enum intel_display_power_domain domain;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+ bool update_mode = false;
+
+ mutex_lock(&tc->lock);
- mutex_lock(&dig_port->tc_lock);
+ drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, tc->lock_wakeref);
+ drm_WARN_ON(&i915->drm, tc->link_refcount);
- drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
+ tc_phy_get_hw_state(tc);
+ /*
+ * Save the initial mode for the state check in
+ * intel_tc_port_sanitize_mode().
+ */
+ tc->init_mode = tc->mode;
+
+ /*
+ * The PHY needs to be connected for AUX to work during HW readout and
+ * MST topology resume, but the PHY mode can only be changed if the
+ * port is disabled.
+ *
+ * An exception is the case where BIOS leaves the PHY incorrectly
+ * disconnected on an enabled legacy port. Work around that by
+ * connecting the PHY even though the port is enabled. This doesn't
+ * cause a problem as the PHY ownership state is ignored by the
+ * IOM/TCSS firmware (only display can own the PHY in that case).
+ */
+ if (!tc_port_is_enabled(tc)) {
+ update_mode = true;
+ } else if (tc->mode == TC_PORT_DISCONNECTED) {
+ drm_WARN_ON(&i915->drm, !tc->legacy_port);
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected on enabled port, connecting it\n",
+ tc->port_name);
+ update_mode = true;
+ }
+
+ if (update_mode)
+ intel_tc_port_update_mode(tc, 1, false);
+
+ /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
+ __intel_tc_port_get_link(tc);
- tc_cold_wref = tc_cold_block(dig_port, &domain);
+ mutex_unlock(&tc->lock);
+}
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
- /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
- intel_tc_port_link_init_refcount(dig_port, 1);
- dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+static bool tc_port_has_active_links(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+ enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
+ int active_links = 0;
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ if (dig_port->dp.is_mst) {
+ /* TODO: get the PLL type for MST, once HW readout is done for it. */
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ } else if (crtc_state && crtc_state->hw.active) {
+ pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
+ active_links = 1;
+ }
- drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ if (active_links && !tc_phy_is_connected(tc, pll_type))
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ tc->port_name, active_links);
- mutex_unlock(&dig_port->tc_lock);
+ return active_links;
}
/**
* intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
* @dig_port: digital port
+ * @crtc_state: atomic state of CRTC connected to @dig_port
*
* Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
* loading and system resume:
* If the encoder is enabled keep the TypeC mode/PHY connected state locked until
* the encoder is disabled.
* If the encoder is disabled make sure the PHY is disconnected.
+ * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
*/
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_encoder *encoder = &dig_port->base;
- int active_links = 0;
-
- mutex_lock(&dig_port->tc_lock);
-
- if (dig_port->dp.is_mst)
- active_links = intel_dp_mst_encoder_active_links(dig_port);
- else if (encoder->base.crtc)
- active_links = to_intel_crtc(encoder->base.crtc)->active;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
- drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
- intel_tc_port_link_init_refcount(dig_port, active_links);
+ mutex_lock(&tc->lock);
- if (active_links) {
- if (!icl_tc_phy_is_connected(dig_port))
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
- } else {
+ drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_links(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
* we'll just switch to disconnected mode from it here without
* a note.
*/
- if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (tc->init_mode != TC_PORT_TBT_ALT &&
+ tc->init_mode != TC_PORT_DISCONNECTED)
drm_dbg_kms(&i915->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
- icl_tc_phy_disconnect(dig_port);
-
- tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
- fetch_and_zero(&dig_port->tc_lock_wakeref));
+ tc->port_name,
+ tc_port_mode_name(tc->init_mode));
+ tc_phy_disconnect(tc);
+ __intel_tc_port_put_link(tc);
}
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ tc->port_name,
+ tc_port_mode_name(tc->mode));
- mutex_unlock(&dig_port->tc_lock);
+ mutex_unlock(&tc->lock);
}
/*
@@ -787,63 +1307,73 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
-bool intel_tc_port_connected(struct intel_encoder *encoder)
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- bool is_connected;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+ u32 mask = ~0;
- intel_tc_port_lock(dig_port);
+ drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
- is_connected = tc_port_live_status_mask(dig_port) &
- BIT(dig_port->tc_mode);
+ if (tc->mode != TC_PORT_DISCONNECTED)
+ mask = BIT(tc->mode);
- intel_tc_port_unlock(dig_port);
+ return tc_phy_hpd_live_status(tc) & mask;
+}
+
+bool intel_tc_port_connected(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+ bool is_connected;
+
+ mutex_lock(&tc->lock);
+ is_connected = intel_tc_port_connected_locked(encoder);
+ mutex_unlock(&tc->lock);
return is_connected;
}
-static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = tc_to_i915(tc);
- mutex_lock(&dig_port->tc_lock);
+ mutex_lock(&tc->lock);
- cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
+ cancel_delayed_work(&tc->disconnect_phy_work);
- if (!dig_port->tc_link_refcount)
- intel_tc_port_update_mode(dig_port, required_lanes,
+ if (!tc->link_refcount)
+ intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(dig_port));
+ drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
+ !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1);
+ __intel_tc_port_lock(to_tc_port(dig_port), 1);
}
-/**
- * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
- * @dig_port: digital port
- *
+/*
* Disconnect the given digital port from its TypeC PHY (handing back the
* control of the PHY to the TypeC subsystem). This will happen in a delayed
* manner after each aux transactions and modeset disables.
*/
static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
{
- struct intel_digital_port *dig_port =
- container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
+ struct intel_tc_port *tc =
+ container_of(work, struct intel_tc_port, disconnect_phy_work.work);
- mutex_lock(&dig_port->tc_lock);
+ mutex_lock(&tc->lock);
- if (!dig_port->tc_link_refcount)
- intel_tc_port_update_mode(dig_port, 1, true);
+ if (!tc->link_refcount)
+ intel_tc_port_update_mode(tc, 1, true);
- mutex_unlock(&dig_port->tc_lock);
+ mutex_unlock(&tc->lock);
}
/**
@@ -854,105 +1384,91 @@ static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
*/
void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
{
- flush_delayed_work(&dig_port->tc_disconnect_phy_work);
+ flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
}
void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
- if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
- queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
+ queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
msecs_to_jiffies(1000));
- mutex_unlock(&dig_port->tc_lock);
+ mutex_unlock(&tc->lock);
}
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
- return mutex_is_locked(&dig_port->tc_lock) ||
- dig_port->tc_link_refcount;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ return mutex_is_locked(&tc->lock) ||
+ tc->link_refcount;
}
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
- __intel_tc_port_lock(dig_port, required_lanes);
- dig_port->tc_link_refcount++;
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ __intel_tc_port_lock(tc, required_lanes);
+ __intel_tc_port_get_link(tc);
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
intel_tc_port_lock(dig_port);
- --dig_port->tc_link_refcount;
+ __intel_tc_port_put_link(tc);
intel_tc_port_unlock(dig_port);
-
- /*
- * Disconnecting the PHY after the PHY's PLL gets disabled may
- * hang the system on ADL-P, so disconnect the PHY here synchronously.
- * TODO: remove this once the root cause of the ordering requirement
- * is found/fixed.
- */
- intel_tc_port_flush_work(dig_port);
}
-static bool
-tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
- u32 val;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_tc_port *tc;
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
- if (!INTEL_INFO(i915)->display.has_modular_fia)
- return false;
+ if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ return -EINVAL;
- mutex_lock(&dig_port->tc_lock);
- wakeref = tc_cold_block(dig_port, &domain);
- val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
- tc_cold_unblock(dig_port, domain, wakeref);
- mutex_unlock(&dig_port->tc_lock);
+ tc = kzalloc(sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ dig_port->tc = tc;
+ tc->dig_port = dig_port;
- return val & MODULAR_FIA_MASK;
-}
+ if (DISPLAY_VER(i915) >= 13)
+ tc->phy_ops = &adlp_tc_phy_ops;
+ else if (DISPLAY_VER(i915) >= 12)
+ tc->phy_ops = &tgl_tc_phy_ops;
+ else
+ tc->phy_ops = &icl_tc_phy_ops;
-static void
-tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
-{
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ snprintf(tc->port_name, sizeof(tc->port_name),
+ "%c/TC#%d", port_name(port), tc_port + 1);
- /*
- * Each Modular FIA instance houses 2 TC ports. In SOC that has more
- * than two TC ports, there are multiple instances of Modular FIA.
- */
- if (tc_has_modular_fia(i915, dig_port)) {
- dig_port->tc_phy_fia = tc_port / 2;
- dig_port->tc_phy_fia_idx = tc_port % 2;
- } else {
- dig_port->tc_phy_fia = FIA1;
- dig_port->tc_phy_fia_idx = tc_port;
- }
-}
+ mutex_init(&tc->lock);
+ INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
+ tc->legacy_port = is_legacy;
+ tc->mode = TC_PORT_DISCONNECTED;
+ tc->link_refcount = 0;
-void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ tc_phy_init(tc);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
- return;
+ intel_tc_port_init_mode(dig_port);
- snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
- "%c/TC#%d", port_name(port), tc_port + 1);
+ return 0;
+}
- mutex_init(&dig_port->tc_lock);
- INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
- dig_port->tc_legacy_port = is_legacy;
- dig_port->tc_mode = TC_PORT_DISCONNECTED;
- dig_port->tc_link_refcount = 0;
- tc_port_load_fia_params(i915, dig_port);
+void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
+{
+ intel_tc_port_flush_work(dig_port);
- intel_tc_port_init_mode(dig_port);
+ kfree(dig_port->tc);
+ dig_port->tc = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index d54082e2d5e8..dd0810f9ea95 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -6,9 +6,9 @@
#ifndef __INTEL_TC_H__
#define __INTEL_TC_H__
-#include <linux/mutex.h>
#include <linux/types.h>
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@@ -17,6 +17,7 @@ bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@@ -25,7 +26,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
@@ -34,7 +36,8 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port,
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
-void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+void intel_tc_port_cleanup(struct intel_digital_port *dig_port);
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 4d2101ca1692..557ec5b62afa 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -35,8 +35,8 @@
#include <drm/drm_edid.h>
#include "i915_drv.h"
-#include "i915_reg.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -44,6 +44,7 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
+#include "intel_tv_regs.h"
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
@@ -930,8 +931,7 @@ intel_enable_tv(struct intel_atomic_state *state,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,8 +943,7 @@ intel_disable_tv(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -1905,10 +1904,10 @@ static void intel_tv_add_properties(struct drm_connector *connector)
tv_format_names[i] = tv_modes[i].name;
}
- drm_mode_create_tv_properties(&i915->drm, i, tv_format_names);
+ drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_mode_property,
+ i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
diff --git a/drivers/gpu/drm/i915/display/intel_tv_regs.h b/drivers/gpu/drm/i915/display/intel_tv_regs.h
new file mode 100644
index 000000000000..ab25aeb3c423
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tv_regs.h
@@ -0,0 +1,490 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_TV_REGS_H__
+#define __INTEL_TV_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* TV port control */
+#define TV_CTL _MMIO(0x68000)
+/* Enables the TV encoder */
+# define TV_ENC_ENABLE (1 << 31)
+/* Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
+/* Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
+/* Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
+/* Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
+/* Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
+# define TV_TRILEVEL_SYNC (1 << 21)
+/* Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC (1 << 20)
+/* Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X (0 << 18)
+/* Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X (1 << 18)
+/* Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE (2 << 18)
+/* Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X (3 << 18)
+# define TV_OVERSAMPLE_MASK (3 << 18)
+/* Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE (1 << 17)
+/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+# define TV_PAL_BURST (1 << 16)
+/* Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK (7 << 12)
+/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX (1 << 11)
+/*
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX (1 << 10)
+/* Bits that must be preserved by software */
+# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
+# define TV_FUSE_STATE_MASK (3 << 4)
+/* Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED (0 << 4)
+/* Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
+/* Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED (2 << 4)
+/* Normal operation */
+# define TV_TEST_MODE_NORMAL (0 << 0)
+/* Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1 (1 << 0)
+/* Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2 (2 << 0)
+/* Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3 (3 << 0)
+/* Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4 (4 << 0)
+/* Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5 (5 << 0)
+/*
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
+# define TV_TEST_MODE_MASK (7 << 0)
+
+#define TV_DAC _MMIO(0x68004)
+# define TV_DAC_SAVE 0x00ffff00
+/*
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG (1 << 31)
+# define TVDAC_SENSE_MASK (7 << 28)
+/* Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE (1 << 30)
+/* Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE (1 << 29)
+/* Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE (1 << 28)
+/*
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN (1 << 27)
+/* Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL (1 << 26)
+/* Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL (1 << 25)
+/* Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL (1 << 24)
+/* Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE (1 << 7)
+/* Sets the slew rate. Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST (1 << 6)
+# define DAC_A_1_3_V (0 << 4)
+# define DAC_A_1_1_V (1 << 4)
+# define DAC_A_0_7_V (2 << 4)
+# define DAC_A_MASK (3 << 4)
+# define DAC_B_1_3_V (0 << 2)
+# define DAC_B_1_1_V (1 << 2)
+# define DAC_B_0_7_V (2 << 2)
+# define DAC_B_MASK (3 << 2)
+# define DAC_C_1_3_V (0 << 0)
+# define DAC_C_1_1_V (1 << 0)
+# define DAC_C_0_7_V (2 << 0)
+# define DAC_C_MASK (3 << 0)
+
+/*
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y _MMIO(0x68010)
+# define TV_RY_MASK 0x07ff0000
+# define TV_RY_SHIFT 16
+# define TV_GY_MASK 0x00000fff
+# define TV_GY_SHIFT 0
+
+#define TV_CSC_Y2 _MMIO(0x68014)
+# define TV_BY_MASK 0x07ff0000
+# define TV_BY_SHIFT 16
+/*
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK 0x000003ff
+# define TV_AY_SHIFT 0
+
+#define TV_CSC_U _MMIO(0x68018)
+# define TV_RU_MASK 0x07ff0000
+# define TV_RU_SHIFT 16
+# define TV_GU_MASK 0x000007ff
+# define TV_GU_SHIFT 0
+
+#define TV_CSC_U2 _MMIO(0x6801c)
+# define TV_BU_MASK 0x07ff0000
+# define TV_BU_SHIFT 16
+/*
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK 0x000003ff
+# define TV_AU_SHIFT 0
+
+#define TV_CSC_V _MMIO(0x68020)
+# define TV_RV_MASK 0x0fff0000
+# define TV_RV_SHIFT 16
+# define TV_GV_MASK 0x000007ff
+# define TV_GV_SHIFT 0
+
+#define TV_CSC_V2 _MMIO(0x68024)
+# define TV_BV_MASK 0x07ff0000
+# define TV_BV_SHIFT 16
+/*
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK 0x000007ff
+# define TV_AV_SHIFT 0
+
+#define TV_CLR_KNOBS _MMIO(0x68028)
+/* 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK 0xff000000
+# define TV_BRIGHTNESS_SHIFT 24
+/* Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK 0x00ff0000
+# define TV_CONTRAST_SHIFT 16
+/* Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK 0x0000ff00
+# define TV_SATURATION_SHIFT 8
+/* Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK 0x000000ff
+# define TV_HUE_SHIFT 0
+
+#define TV_CLR_LEVEL _MMIO(0x6802c)
+/* Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK 0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT 16
+/* Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK 0x000001ff
+# define TV_BLANK_LEVEL_SHIFT 0
+
+#define TV_H_CTL_1 _MMIO(0x68030)
+/* Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK 0x1fff0000
+# define TV_HSYNC_END_SHIFT 16
+/* Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK 0x00001fff
+# define TV_HTOTAL_SHIFT 0
+
+#define TV_H_CTL_2 _MMIO(0x68034)
+/* Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA (1 << 31)
+/* Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT 16
+# define TV_HBURST_START_MASK 0x1fff0000
+/* Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT 0
+# define TV_HBURST_LEN_MASK 0x0001fff
+
+#define TV_H_CTL_3 _MMIO(0x68038)
+/* End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT 16
+# define TV_HBLANK_END_MASK 0x1fff0000
+/* Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT 0
+# define TV_HBLANK_START_MASK 0x0001fff
+
+#define TV_V_CTL_1 _MMIO(0x6803c)
+/* XXX */
+# define TV_NBR_END_SHIFT 16
+# define TV_NBR_END_MASK 0x07ff0000
+/* XXX */
+# define TV_VI_END_F1_SHIFT 8
+# define TV_VI_END_F1_MASK 0x00003f00
+/* XXX */
+# define TV_VI_END_F2_SHIFT 0
+# define TV_VI_END_F2_MASK 0x0000003f
+
+#define TV_V_CTL_2 _MMIO(0x68040)
+/* Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK 0x07ff0000
+# define TV_VSYNC_LEN_SHIFT 16
+/* Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK 0x00007f00
+# define TV_VSYNC_START_F1_SHIFT 8
+/*
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK 0x0000007f
+# define TV_VSYNC_START_F2_SHIFT 0
+
+#define TV_V_CTL_3 _MMIO(0x68044)
+/* Enables generation of the equalization signal */
+# define TV_EQUAL_ENA (1 << 31)
+/* Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK 0x007f0000
+# define TV_VEQ_LEN_SHIFT 16
+/* Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK 0x0007f00
+# define TV_VEQ_START_F1_SHIFT 8
+/*
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK 0x000007f
+# define TV_VEQ_START_F2_SHIFT 0
+
+#define TV_V_CTL_4 _MMIO(0x68048)
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK 0x003f0000
+# define TV_VBURST_START_F1_SHIFT 16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK 0x000000ff
+# define TV_VBURST_END_F1_SHIFT 0
+
+#define TV_V_CTL_5 _MMIO(0x6804c)
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK 0x003f0000
+# define TV_VBURST_START_F2_SHIFT 16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK 0x000000ff
+# define TV_VBURST_END_F2_SHIFT 0
+
+#define TV_V_CTL_6 _MMIO(0x68050)
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK 0x003f0000
+# define TV_VBURST_START_F3_SHIFT 16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK 0x000000ff
+# define TV_VBURST_END_F3_SHIFT 0
+
+#define TV_V_CTL_7 _MMIO(0x68054)
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK 0x003f0000
+# define TV_VBURST_START_F4_SHIFT 16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK 0x000000ff
+# define TV_VBURST_END_F4_SHIFT 0
+
+#define TV_SC_CTL_1 _MMIO(0x68060)
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN (1 << 31)
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN (1 << 30)
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN (1 << 29)
+/* Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2 (0 << 24)
+/* Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4 (1 << 24)
+/* Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8 (2 << 24)
+/* Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER (3 << 24)
+/* Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK 0x00ff0000
+# define TV_BURST_LEVEL_SHIFT 16
+/* Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK 0x00000fff
+# define TV_SCDDA1_INC_SHIFT 0
+
+#define TV_SC_CTL_2 _MMIO(0x68064)
+/* Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK 0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT 16
+/* Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK 0x00007fff
+# define TV_SCDDA2_INC_SHIFT 0
+
+#define TV_SC_CTL_3 _MMIO(0x68068)
+/* Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK 0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT 16
+/* Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK 0x00007fff
+# define TV_SCDDA3_INC_SHIFT 0
+
+#define TV_WIN_POS _MMIO(0x68070)
+/* X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK 0x1fff0000
+# define TV_XPOS_SHIFT 16
+/* Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK 0x00000fff
+# define TV_YPOS_SHIFT 0
+
+#define TV_WIN_SIZE _MMIO(0x68074)
+/* Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK 0x1fff0000
+# define TV_XSIZE_SHIFT 16
+/*
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK 0x00000fff
+# define TV_YSIZE_SHIFT 0
+
+#define TV_FILTER_CTL_1 _MMIO(0x68080)
+/*
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE (1 << 31)
+/*
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS (1 << 29)
+/* Enables adaptive vertical filtering */
+# define TV_VADAPT (1 << 28)
+# define TV_VADAPT_MODE_MASK (3 << 26)
+/* Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST (0 << 26)
+/* Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE (1 << 26)
+/* Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST (3 << 26)
+/*
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK 0x00003fff
+# define TV_HSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_2 _MMIO(0x68084)
+/*
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK 0x00038000
+# define TV_VSCALE_INT_SHIFT 15
+/*
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK 0x00007fff
+# define TV_VSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_3 _MMIO(0x68088)
+/*
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK 0x00038000
+# define TV_VSCALE_IP_INT_SHIFT 15
+/*
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT 0
+
+#define TV_CC_CONTROL _MMIO(0x68090)
+# define TV_CC_ENABLE (1 << 31)
+/*
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK (1 << 27)
+# define TV_CC_FID_SHIFT 27
+/* Sets the horizontal position of the CC data. Usually 135. */
+# define TV_CC_HOFF_MASK 0x03ff0000
+# define TV_CC_HOFF_SHIFT 16
+/* Sets the vertical position of the CC data. Usually 21 */
+# define TV_CC_LINE_MASK 0x0000003f
+# define TV_CC_LINE_SHIFT 0
+
+#define TV_CC_DATA _MMIO(0x68094)
+# define TV_CC_RDY (1 << 31)
+/* Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK 0x007f0000
+# define TV_CC_DATA_2_SHIFT 16
+/* First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK 0x0000007f
+# define TV_CC_DATA_1_SHIFT 0
+
+#define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */
+#define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */
+#define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */
+#define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */
+
+#endif /* __INTEL_TV_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
new file mode 100644
index 000000000000..f8bf9810527d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+#include "intel_vrr.h"
+
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via TRANSCONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on TRANSCONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
+/*
+ * Called from drm generic code, passed a 'crtc', which we use as a pipe index.
+ */
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 pixel, vbl_start, hsync_start, htotal;
+ u64 frame;
+
+ /*
+ * On i965gm TV output the frame counter only works up to
+ * the point when we enable the TV encoder. After that the
+ * frame counter ceases to work and reads zero. We need a
+ * vblank wait before enabling the TV encoder and so we
+ * have to enable vblank interrupts while the frame counter
+ * is still in a working state. However the core vblank code
+ * does not like us returning non-zero frame counter values
+ * when we've told it that we don't have a working frame
+ * counter. Thus we must stop non-zero values leaking out.
+ */
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(pipe), PIPEFRAME(pipe));
+
+ pixel = frame & PIPE_PIXEL_MASK;
+ frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff;
+
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return (frame + (pixel >= vbl_start)) & 0xffffff;
+}
+
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(pipe));
+}
+
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 htotal = mode->crtc_htotal;
+ u32 clock = mode->crtc_clock;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
+
+ /*
+ * To avoid the race condition where we might cross into the
+ * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * during the same frame.
+ */
+ do {
+ /*
+ * This field provides read back of the display
+ * pipe frame time stamp. The time stamp value
+ * is sampled at every start of vertical blank.
+ */
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+
+ /*
+ * The TIMESTAMP_CTR register has the current
+ * time stamp value.
+ */
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
+
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+ } while (scan_post_time != scan_prev_time);
+
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
+ scanline = min(scanline, vtotal - 1);
+ scanline = (scanline + vblank_start) % vtotal;
+
+ return scanline;
+}
+
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct drm_display_mode *mode;
+ struct drm_vblank_crtc *vblank;
+ enum pipe pipe = crtc->pipe;
+ int position, vtotal;
+
+ if (!crtc->active)
+ return 0;
+
+ vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ mode = &vblank->hwmode;
+
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ return __intel_get_crtc_scanline_from_timestamp(crtc);
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+
+ /*
+ * On HSW, the DSL reg (0x70000) appears to return 0 if we
+ * read it just before the start of vblank. So try it again
+ * so we don't accidentally end up spanning a vblank frame
+ * increment, causing the pipe_update_end() code to squak at us.
+ *
+ * The nature of this problem means we can't simply check the ISR
+ * bit and return the vblank start value; nor can we use the scanline
+ * debug register in the transcoder as it appears to have the same
+ * problem. We may need to extend this to include other platforms,
+ * but so far testing only shows the problem on HSW.
+ */
+ if (HAS_DDI(dev_priv) && !position) {
+ int i, temp;
+
+ for (i = 0; i < 100; i++) {
+ udelay(1);
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+ if (temp != position) {
+ position = temp;
+ break;
+ }
+ }
+ }
+
+ /*
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
+ */
+ return (position + crtc->scanline_offset) % vtotal;
+}
+
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = _crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ enum pipe pipe = crtc->pipe;
+ int position;
+ int vbl_start, vbl_end, hsync_start, htotal, vtotal;
+ unsigned long irqflags;
+ bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
+ IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled pipe %c\n",
+ pipe_name(pipe));
+ return false;
+ }
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
+ /*
+ * Lock uncore.lock, as we will do multiple timing critical raw
+ * register reads, potentially with preemption disabled, so the
+ * following code must not block on uncore.lock.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = __intel_get_crtc_scanline(crtc);
+ } else {
+ /*
+ * Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
+
+ /*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
+ * Start of vblank interrupt is triggered at start of hsync,
+ * just prior to the first active line of vblank. However we
+ * consider lines to start at the leading edge of horizontal
+ * active. So, should we get here before we've crossed into
+ * the horizontal active of the first line in vblank, we would
+ * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+ * always add htotal-hsync_start to the current pixel position.
+ */
+ position = (position + htotal - hsync_start) % vtotal;
+ }
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
+
+ if (use_scanline_counter) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ return true;
+}
+
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned long irqflags;
+ int position;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ position = __intel_get_crtc_scanline(crtc);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return position;
+}
+
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ i915_reg_t reg = PIPEDSL(pipe);
+ u32 line1, line2;
+
+ line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ msleep(5);
+ line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+
+ return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Wait for the display line to settle/start moving */
+ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), str_on_off(state));
+}
+
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, false);
+}
+
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, true);
+}
+
+static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
+ */
+ if (DISPLAY_VER(i915) == 2) {
+ int vtotal;
+
+ vtotal = adjusted_mode->crtc_vtotal;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ return vtotal - 1;
+ } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return 2;
+ } else {
+ return 1;
+ }
+}
+
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_display_mode adjusted_mode;
+ int vmax_vblank_start = 0;
+ unsigned long irqflags;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
+
+ /*
+ * Belts and suspenders locking to guarantee everyone sees 100%
+ * consistent state during fastset seamless refresh rate changes.
+ *
+ * vblank_time_lock takes care of all drm_vblank.c stuff, and
+ * uncore.lock takes care of __intel_get_crtc_scanline() which
+ * may get called elsewhere as well.
+ *
+ * TODO maybe just protect everything (including
+ * __intel_get_crtc_scanline()) with vblank_time_lock?
+ * Need to audit everything to make sure it's safe.
+ */
+ spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
+ spin_lock(&i915->uncore.lock);
+
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
+
+ crtc->vmax_vblank_start = vmax_vblank_start;
+
+ crtc->mode_flags = crtc_state->mode_flags;
+
+ crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
+
+ spin_unlock(&i915->uncore.lock);
+ spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
new file mode 100644
index 000000000000..0884db7e76ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VBLANK_H__
+#define __INTEL_VBLANK_H__
+
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9d3b77b41b5c..8e787c13d26d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -17,6 +17,7 @@
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
@@ -345,16 +346,13 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!RUNTIME_INFO(i915)->has_dsc)
+ if (!HAS_DSC(i915))
return false;
- if (DISPLAY_VER(i915) >= 12)
- return true;
-
- if (DISPLAY_VER(i915) >= 11 && cpu_transcoder != TRANSCODER_A)
- return true;
+ if (DISPLAY_VER(i915) == 11 && cpu_transcoder == TRANSCODER_A)
+ return false;
- return false;
+ return true;
}
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
@@ -425,9 +423,9 @@ calculate_rc_params(struct rc_parameters *rc,
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
/* Read range_minqp and range_max_qp from qp tables */
rc->rc_range_params[buf_i].range_min_qp =
- intel_lookup_range_min_qp(bpc, buf_i, bpp_i);
+ intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
rc->rc_range_params[buf_i].range_max_qp =
- intel_lookup_range_max_qp(bpc, buf_i, bpp_i);
+ intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
/* Calculate range_bgp_offset */
if (bpp <= 6) {
@@ -450,6 +448,29 @@ calculate_rc_params(struct rc_parameters *rc,
}
}
+static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config,
+ struct drm_dsc_config *vdsc_cfg)
+{
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_RGB ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ if (vdsc_cfg->slice_height > 4095)
+ return -EINVAL;
+ if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 15000)
+ return -EINVAL;
+ } else if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ if (vdsc_cfg->slice_width % 2)
+ return -EINVAL;
+ if (vdsc_cfg->slice_height % 2)
+ return -EINVAL;
+ if (vdsc_cfg->slice_height > 4094)
+ return -EINVAL;
+ if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 30000)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -458,19 +479,64 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
const struct rc_parameters *rc_params;
struct rc_parameters *rc = NULL;
+ int err;
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
- /* Gen 11 does not support YCbCr */
+ err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg);
+
+ if (err) {
+ drm_dbg_kms(&dev_priv->drm, "Slice dimension requirements not met\n");
+ return err;
+ }
+
+ /*
+ * According to DSC 1.2 specs if colorspace is YCbCr then convert_rgb is 0
+ * else 1
+ */
+ vdsc_cfg->convert_rgb = pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR444;
+
+ if (DISPLAY_VER(dev_priv) >= 14 &&
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vdsc_cfg->native_420 = true;
+ /* We do not support YcBCr422 as of now */
+ vdsc_cfg->native_422 = false;
vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
/* Gen 11 only supports integral values of bpp */
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+
+ /*
+ * According to DSC 1.2 specs in Section 4.1 if native_420 is set:
+ * -We need to double the current bpp.
+ * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
+ * height < 8.
+ * -second_line_offset_adj is 512 as shown by emperical values to yeild best chroma
+ * preservation in second line.
+ * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
+ * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
+ * fractional bits.
+ */
+ if (vdsc_cfg->native_420) {
+ vdsc_cfg->bits_per_pixel <<= 1;
+
+ if (vdsc_cfg->slice_height >= 8)
+ vdsc_cfg->second_line_bpg_offset = 12;
+ else
+ vdsc_cfg->second_line_bpg_offset =
+ 2 * (vdsc_cfg->slice_height - 1);
+
+ vdsc_cfg->second_line_offset_adj = 512;
+ vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11,
+ vdsc_cfg->slice_height - 1);
+ }
+
vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
@@ -597,8 +663,13 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_VER_MIN_SHIFT |
vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
- if (vdsc_cfg->dsc_version_minor == 2)
+ if (vdsc_cfg->dsc_version_minor == 2) {
pps_val |= DSC_ALT_ICH_SEL;
+ if (vdsc_cfg->native_420)
+ pps_val |= DSC_NATIVE_420_ENABLE;
+ if (vdsc_cfg->native_422)
+ pps_val |= DSC_NATIVE_422_ENABLE;
+ }
if (vdsc_cfg->block_pred_enable)
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
@@ -909,6 +980,33 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val);
}
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ /* Populate PICTURE_PARAMETER_SET_17 registers */
+ pps_val = 0;
+ pps_val |= DSC_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset);
+ drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val);
+ intel_de_write(dev_priv,
+ MTL_DSC0_PICTURE_PARAMETER_SET_17(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ MTL_DSC1_PICTURE_PARAMETER_SET_17(pipe),
+ pps_val);
+
+ /* Populate PICTURE_PARAMETER_SET_18 registers */
+ pps_val = 0;
+ pps_val |= DSC_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) |
+ DSC_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj);
+ drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val);
+ intel_de_write(dev_priv,
+ MTL_DSC0_PICTURE_PARAMETER_SET_18(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ MTL_DSC1_PICTURE_PARAMETER_SET_18(pipe),
+ pps_val);
+ }
+
/* Populate the RC_BUF_THRESH registers */
memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
@@ -1183,7 +1281,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
- u32 dss_ctl1, dss_ctl2, val;
+ u32 dss_ctl1, dss_ctl2, pps0 = 0, pps1 = 0;
if (!intel_dsc_source_support(crtc_state))
return;
@@ -1206,13 +1304,21 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
/* FIXME: add more state readout as needed */
- /* PPS1 */
- if (!is_pipe_dsc(crtc, cpu_transcoder))
- val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
- else
- val = intel_de_read(dev_priv,
- ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
- vdsc_cfg->bits_per_pixel = val;
+ /* PPS0 & PPS1 */
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ pps1 = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
+ } else {
+ pps0 = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe));
+ pps1 = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ }
+
+ vdsc_cfg->bits_per_pixel = pps1;
+
+ if (pps0 & DSC_NATIVE_420_ENABLE)
+ vdsc_cfg->bits_per_pixel >>= 1;
+
crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
out:
intel_display_power_put(dev_priv, power_domain, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
new file mode 100644
index 000000000000..b71f00b5c761
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_REGS_H__
+#define __INTEL_VDSC_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
+#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
+/* MTL Display Stream Compression registers */
+#define _MTL_DSC0_PICTURE_PARAMETER_SET_17_PB 0x782B4
+#define _MTL_DSC1_PICTURE_PARAMETER_SET_17_PB 0x783B4
+#define _MTL_DSC0_PICTURE_PARAMETER_SET_17_PC 0x784B4
+#define _MTL_DSC1_PICTURE_PARAMETER_SET_17_PC 0x785B4
+#define MTL_DSC0_PICTURE_PARAMETER_SET_17(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _MTL_DSC0_PICTURE_PARAMETER_SET_17_PB, \
+ _MTL_DSC0_PICTURE_PARAMETER_SET_17_PC)
+#define MTL_DSC1_PICTURE_PARAMETER_SET_17(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _MTL_DSC1_PICTURE_PARAMETER_SET_17_PB, \
+ _MTL_DSC1_PICTURE_PARAMETER_SET_17_PC)
+#define DSC_SL_BPG_OFFSET(offset) ((offset) << 27)
+
+#define _MTL_DSC0_PICTURE_PARAMETER_SET_18_PB 0x782B8
+#define _MTL_DSC1_PICTURE_PARAMETER_SET_18_PB 0x783B8
+#define _MTL_DSC0_PICTURE_PARAMETER_SET_18_PC 0x784B8
+#define _MTL_DSC1_PICTURE_PARAMETER_SET_18_PC 0x785B8
+#define MTL_DSC0_PICTURE_PARAMETER_SET_18(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _MTL_DSC0_PICTURE_PARAMETER_SET_18_PB, \
+ _MTL_DSC0_PICTURE_PARAMETER_SET_18_PC)
+#define MTL_DSC1_PICTURE_PARAMETER_SET_18(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _MTL_DSC1_PICTURE_PARAMETER_SET_18_PB, \
+ _MTL_DSC1_PICTURE_PARAMETER_SET_18_PC)
+#define DSC_NSL_BPG_OFFSET(offset) ((offset) << 16)
+#define DSC_SL_OFFSET_ADJ(offset) ((offset) << 0)
+
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_NATIVE_422_ENABLE BIT(23)
+#define DSC_NATIVE_420_ENABLE BIT(22)
+#define DSC_ALT_ICH_SEL (1 << 20)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#endif /* __INTEL_VDSC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index a69bfcac9a94..286a0bdd28c6 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -6,9 +6,10 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <drm/i915_drm.h>
#include <video/vga.h>
+#include "soc/intel_gmch.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
@@ -98,39 +99,12 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
-static int
-intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
-{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- drm_err(&i915->drm, "failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
- return 0;
-
- if (enable_decode)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- drm_err(&i915->drm, "failed to write control word\n");
- return -EIO;
- }
-
- return 0;
-}
-
static unsigned int
intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
- intel_vga_set_state(i915, enable_decode);
+ intel_gmch_vga_set_state(i915, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 7b1357e82b69..4228f26b4c11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -78,10 +78,10 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* The hw imposes the extra scanline before frame start */
if (DISPLAY_VER(i915) >= 13)
- return crtc_state->vrr.guardband + crtc_state->framestart_delay + 1;
+ return crtc_state->vrr.guardband;
else
+ /* The hw imposes the extra scanline before frame start */
return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
}
@@ -144,57 +144,47 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* is deprecated.
*/
if (DISPLAY_VER(i915) >= 13) {
- /*
- * FIXME: Subtract Window2 delay from below value.
- *
- * Window2 specifies time required to program DSB (Window2) in
- * number of scan lines. Assuming 0 for no DSB.
- */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
- /*
- * FIXME: s/4/framestart_delay/ to get consistent
- * earliest/latest points for register latching regardless
- * of the framestart_delay used?
- *
- * FIXME: this really needs the extra scanline to provide consistent
- * behaviour for all framestart_delay values. Otherwise with
- * framestart_delay==4 we will end up extending the min vblank by
- * one extra line.
- */
crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
+ crtc_state->framestart_delay - 1);
}
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
+static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(i915) >= 13)
+ return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else
+ return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+}
+
void intel_vrr_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 trans_vrr_ctl;
if (!crtc_state->vrr.enable)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
- else
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
- VRR_CTL_PIPELINE_FULL_OVERRIDE;
-
intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state));
intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
@@ -231,8 +221,13 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
new file mode 100644
index 000000000000..b615449e70b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_display_types.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @i915: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->update_wm)
+ i915->display.funcs.wm->update_wm(i915);
+}
+
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_pipe_wm)
+ return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
+
+ return 0;
+}
+
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!i915->display.funcs.wm->compute_intermediate_wm)
+ return 0;
+
+ if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
+ return 0;
+
+ return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
+}
+
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->initial_watermarks) {
+ i915->display.funcs.wm->initial_watermarks(state, crtc);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->atomic_update_watermarks)
+ i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+}
+
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->optimize_watermarks)
+ i915->display.funcs.wm->optimize_watermarks(state, crtc);
+}
+
+int intel_compute_global_watermarks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_global_watermarks)
+ return i915->display.funcs.wm->compute_global_watermarks(state);
+
+ return 0;
+}
+
+void intel_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->get_hw_state)
+ return i915->display.funcs.wm->get_hw_state(i915);
+}
+
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->hw.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->hw.fb != NULL;
+ else
+ return plane_state->uapi.visible;
+}
+
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
+{
+ int level;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
+ }
+}
+
+void intel_wm_init(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 9)
+ skl_wm_init(i915);
+ else
+ i9xx_wm_init(i915);
+}
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int level;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 new[8] = { 0 };
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != dev_priv->display.wm.num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ return len;
+}
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+void intel_wm_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_pri_wm_latency_fops);
+
+ debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_spr_wm_latency_fops);
+
+ debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_cur_wm_latency_fops);
+
+ skl_watermark_debugfs_register(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
new file mode 100644
index 000000000000..48429ac140d2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_WM_H__
+#define __INTEL_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_update_watermarks(struct drm_i915_private *i915);
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_global_watermarks(struct intel_atomic_state *state);
+void intel_wm_get_hw_state(struct drm_i915_private *i915);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *i915,
+ const char *name, const u16 wm[]);
+void intel_wm_init(struct drm_i915_private *i915);
+void intel_wm_debugfs_register(struct drm_i915_private *i915);
+
+#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h
index 211632f58751..628b7c0ce484 100644
--- a/drivers/gpu/drm/i915/intel_pm_types.h
+++ b/drivers/gpu/drm/i915/display/intel_wm_types.h
@@ -3,12 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#ifndef __INTEL_PM_TYPES_H__
-#define __INTEL_PM_TYPES_H__
+#ifndef __INTEL_WM_TYPES_H__
+#define __INTEL_WM_TYPES_H__
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "intel_display_limits.h"
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
@@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-#endif /* __INTEL_PM_TYPES_H__ */
+#endif /* __INTEL_WM_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index d7390067b7d4..0e7e014fcc71 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -87,6 +87,14 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
#define ICL_MAX_SRC_H 4096
#define ICL_MAX_DST_W 5120
#define ICL_MAX_DST_H 4096
+#define TGL_MAX_SRC_W 5120
+#define TGL_MAX_SRC_H 8192
+#define TGL_MAX_DST_W 8192
+#define TGL_MAX_DST_H 8192
+#define MTL_MAX_SRC_W 4096
+#define MTL_MAX_SRC_H 8192
+#define MTL_MAX_DST_W 8192
+#define MTL_MAX_DST_H 8192
#define SKL_MIN_YUV_420_SRC_W 16
#define SKL_MIN_YUV_420_SRC_H 16
@@ -103,6 +111,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int min_src_w, min_src_h, min_dst_w, min_dst_h;
+ int max_src_w, max_src_h, max_dst_w, max_dst_h;
/*
* Src coordinates are already rotated by 270 degrees for
@@ -157,15 +169,38 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
+ min_src_w = SKL_MIN_SRC_W;
+ min_src_h = SKL_MIN_SRC_H;
+ min_dst_w = SKL_MIN_DST_W;
+ min_dst_h = SKL_MIN_DST_H;
+
+ if (DISPLAY_VER(dev_priv) < 11) {
+ max_src_w = SKL_MAX_SRC_W;
+ max_src_h = SKL_MAX_SRC_H;
+ max_dst_w = SKL_MAX_DST_W;
+ max_dst_h = SKL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 12) {
+ max_src_w = ICL_MAX_SRC_W;
+ max_src_h = ICL_MAX_SRC_H;
+ max_dst_w = ICL_MAX_DST_W;
+ max_dst_h = ICL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 14) {
+ max_src_w = TGL_MAX_SRC_W;
+ max_src_h = TGL_MAX_SRC_H;
+ max_dst_w = TGL_MAX_DST_W;
+ max_dst_h = TGL_MAX_DST_H;
+ } else {
+ max_src_w = MTL_MAX_SRC_W;
+ max_src_h = MTL_MAX_SRC_H;
+ max_dst_w = MTL_MAX_DST_W;
+ max_dst_h = MTL_MAX_DST_H;
+ }
+
/* range checks */
- if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (DISPLAY_VER(dev_priv) >= 11 &&
- (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
- dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (DISPLAY_VER(dev_priv) < 11 &&
- (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
+ if (src_w < min_src_w || src_h < min_src_h ||
+ dst_w < min_dst_w || dst_h < min_dst_h ||
+ src_w > max_src_w || src_h > max_src_h ||
+ dst_w > max_dst_w || dst_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
@@ -174,6 +209,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
+ /*
+ * The pipe scaler does not use all the bits of PIPESRC, at least
+ * on the earlier platforms. So even when we're scaling a plane
+ * the *pipe* source size must not be too large. For simplicity
+ * we assume the limits match the scaler source size limits. Might
+ * not be 100% accurate on all platforms, but good enough for now.
+ */
+ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: pipe src size %ux%u "
+ "is out of scaler range\n",
+ crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
+ return -EINVAL;
+ }
+
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 76490cc59d8f..8ea0598a5a07 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -17,7 +17,6 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -642,7 +641,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1260,7 +1259,7 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
}
static void
@@ -1287,6 +1286,8 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
@@ -1627,7 +1628,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < 1) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);
@@ -1848,7 +1849,7 @@ static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+ return intel_pxp_key_check(i915->pxp, obj, false) == 0;
}
static bool pxp_is_borked(struct drm_i915_gem_object *obj)
@@ -2180,7 +2181,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
if (DISPLAY_VER(i915) < 12)
return false;
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ /* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
return false;
@@ -2474,6 +2475,12 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
+ if (!dev_priv->params.enable_dpt &&
+ intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
+ drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
+ goto error;
+ }
+
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
* while i915 HW rotation is clockwise, thats why this swapping.
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index e0766d1be966..1c7e6468f3e3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -5,21 +5,23 @@
#include <drm/drm_blend.h>
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "skl_watermark.h"
-
-#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_reg.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+#include "skl_watermark_regs.h"
static void skl_sagv_disable(struct drm_i915_private *i915);
@@ -45,8 +47,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
enum dbuf_slice slice;
for_each_dbuf_slice(i915, slice) {
- if (intel_uncore_read(&i915->uncore,
- DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
enabled_slices |= BIT(slice);
}
@@ -65,7 +66,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
static bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ return HAS_SAGV(i915) &&
i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
}
@@ -75,7 +76,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 14) {
u32 val;
- val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+ val = intel_de_read(i915, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
} else if (DISPLAY_VER(i915) >= 12) {
@@ -93,7 +94,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
return val;
} else if (DISPLAY_VER(i915) == 11) {
return 10;
- } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ } else if (HAS_SAGV(i915)) {
return 30;
} else {
return 0;
@@ -102,7 +103,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!intel_has_sagv(i915))
+ if (!HAS_SAGV(i915))
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
@@ -360,7 +361,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(i915);
+ for (level = i915->display.wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -411,6 +412,9 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ if (!i915->params.enable_sagv)
+ return false;
+
if (DISPLAY_VER(i915) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
@@ -705,16 +709,38 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
+static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+ const struct skl_wm_params *wp)
+{
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ if (latency == 0)
+ return 0;
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ latency += 15;
+
+ return latency;
+}
+
static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ int level;
ret = skl_compute_wm_params(crtc_state, 256,
drm_format_info(DRM_FORMAT_ARGB8888),
@@ -723,8 +749,8 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
crtc_state->pixel_rate, &wp, 0);
drm_WARN_ON(&i915->drm, ret);
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = i915->display.wm.skl_latency[level];
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(i915, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -756,18 +782,18 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ val = intel_de_read(i915, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
if (DISPLAY_VER(i915) >= 11)
return;
- val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
@@ -1408,16 +1434,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+ const struct skl_plane_wm *wm)
{
/*
* Wa_1408961008:icl, ehl
* Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
+ * Wa_14017887344:icl
+ * Wa_14017868169:adl, tgl
+ * Due to some power saving optimizations, different subsystems
+ * like PSR, might still use even disabled wm level registers,
+ * for "reference", so lets keep at least the values sane.
+ * Considering amount of WA requiring us to do similar things, was
+ * decided to simply do it for all of the platforms, as those wm
+ * levels are disabled, this isn't going to do harm anyway.
*/
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+ return level > 0 && !wm->wm[level].enable;
}
struct skl_plane_ddb_iter {
@@ -1493,7 +1525,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1569,7 +1601,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for (level++; level < i915->display.wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1586,11 +1618,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ if (skl_need_wm_copy_wa(i915, level, wm)) {
+ wm->wm[level].blocks = wm->wm[level - 1].blocks;
+ wm->wm[level].lines = wm->wm[level - 1].lines;
+ wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
}
}
}
@@ -1835,17 +1866,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
- latency += 4;
-
- if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
- latency += 15;
-
method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
@@ -1967,12 +1987,12 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
struct skl_wm_level *levels)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level *result_prev = &levels[0];
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = skl_wm_latency(i915, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -1992,7 +2012,8 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
unsigned int latency = 0;
if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+ latency = i915->display.sagv.block_time_us +
+ skl_wm_latency(i915, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
@@ -2184,6 +2205,117 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool
+skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
+ int wm0_lines, int latency)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* FIXME missing scaler and DSC pre-fill time */
+ return crtc_state->framestart_delay +
+ intel_usecs_to_scanlines(adjusted_mode, latency) +
+ wm0_lines >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
+
+static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+ int wm0_lines = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /* FIXME what about !skl_wm_has_lines() platforms? */
+ wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
+ }
+
+ return wm0_lines;
+}
+
+static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
+ int wm0_lines)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int level;
+
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ int latency;
+
+ /* FIXME should we care about the latency w/a's? */
+ latency = skl_wm_latency(i915, level, NULL);
+ if (latency == 0)
+ continue;
+
+ /* FIXME is it correct to use 0 latency for wm0 here? */
+ if (level == 0)
+ latency = 0;
+
+ if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ return level;
+ }
+
+ return -EINVAL;
+}
+
+static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int wm0_lines, level;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ wm0_lines = skl_max_wm0_lines(crtc_state);
+
+ level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ if (level < 0)
+ return level;
+
+ /*
+ * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
+ * based on whether we're limited by the vblank duration.
+ */
+ crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+
+ for (level++; level < i915->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * FIXME just clear enable or flag the entire
+ * thing as bad via min_ddb_alloc=U16_MAX?
+ */
+ wm->wm[level].enable = false;
+ wm->uv_wm[level].enable = false;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ i915->display.sagv.block_time_us &&
+ skl_is_vblank_too_short(crtc_state, wm0_lines,
+ i915->display.sagv.block_time_us)) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ wm->sagv.wm0.enable = false;
+ wm->sagv.trans_wm.enable = false;
+ }
+ }
+
+ return 0;
+}
+
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2213,7 +2345,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
- return 0;
+ return skl_wm_check_vblank(crtc_state);
}
static void skl_ddb_entry_write(struct drm_i915_private *i915,
@@ -2248,7 +2380,6 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -2256,8 +2387,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2285,14 +2417,14 @@ void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, CUR_WM(pipe, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2324,9 +2456,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2398,6 +2530,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2674,9 +2808,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *new_pipe_wm)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2755,6 +2889,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2810,47 +2946,41 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int level, max_level;
enum plane_id plane_id;
+ int level;
u32 val;
- max_level = ilk_wm_max_level(i915);
-
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
else
- val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+ val = intel_de_read(i915, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+ val = intel_de_read(i915, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
if (HAS_HW_SAGV_WM(i915)) {
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore,
- PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore,
- CUR_WM_SAGV(pipe));
+ val = intel_de_read(i915, CUR_WM_SAGV(pipe));
skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore,
- PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore,
- CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
} else if (DISPLAY_VER(i915) >= 12) {
@@ -2860,7 +2990,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
@@ -2960,7 +3090,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
@@ -2996,6 +3126,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915)
}
}
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+}
+
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state)
{
@@ -3006,9 +3142,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
struct skl_pipe_wm wm;
} *hw;
const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(i915);
struct intel_plane *plane;
u8 hw_enabled_slices;
+ int level;
if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
return;
@@ -3035,7 +3171,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
@@ -3126,8 +3262,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915)
if (!HAS_IPC(i915))
return;
- intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
}
static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
@@ -3157,7 +3293,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915)
static void
adjust_wm_latency(struct drm_i915_private *i915,
- u16 wm[], int max_level, int read_latency)
+ u16 wm[], int num_levels, int read_latency)
{
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3167,12 +3303,12 @@ adjust_wm_latency(struct drm_i915_private *i915,
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < num_levels; level++) {
if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
+ for (i = level + 1; i < num_levels; i++)
wm[i] = 0;
- max_level = level - 1;
+ num_levels = level;
break;
}
}
@@ -3185,7 +3321,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < num_levels; level++)
wm[level] += read_latency;
}
@@ -3201,28 +3337,27 @@ adjust_wm_latency(struct drm_i915_private *i915,
static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- struct intel_uncore *uncore = &i915->uncore;
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
u32 val;
- val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, max_level, 6);
+ adjust_wm_latency(i915, wm, num_levels, 6);
}
static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
int mult = IS_DG2(i915) ? 2 : 1;
u32 val;
@@ -3254,11 +3389,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, max_level, read_latency);
+ adjust_wm_latency(i915, wm, num_levels, read_latency);
}
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
+ if (HAS_HW_SAGV_WM(i915))
+ i915->display.wm.num_levels = 6;
+ else
+ i915->display.wm.num_levels = 8;
+
if (DISPLAY_VER(i915) >= 14)
mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
else
@@ -3269,6 +3409,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915)
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
@@ -3546,13 +3687,35 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
.write = skl_watermark_ipc_status_write
};
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+static int intel_sagv_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ static const char * const sagv_status[] = {
+ [I915_SAGV_UNKNOWN] = "unknown",
+ [I915_SAGV_DISABLED] = "disabled",
+ [I915_SAGV_ENABLED] = "enabled",
+ [I915_SAGV_NOT_CONTROLLED] = "not controlled",
+ };
+
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
+
+void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- if (!HAS_IPC(i915))
- return;
+ if (HAS_IPC(i915))
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ if (HAS_SAGV(i915))
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
+ &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 7a5a4e67cd73..f91a3d4ddc07 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,9 +8,9 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct intel_atomic_state;
@@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
-void skl_wm_get_hw_state(struct drm_i915_private *i915);
-void skl_wm_sanitize(struct drm_i915_private *i915);
-
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state);
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_debugfs_register(struct drm_i915_private *i915);
void skl_wm_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
new file mode 100644
index 000000000000..628c5920ad49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __SKL_WATERMARK_REGS_H__
+#define __SKL_WATERMARK_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _PIPEA_MBUS_DBOX_CTL 0x7003C
+#define _PIPEB_MBUS_DBOX_CTL 0x7103C
+#define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \
+ _PIPEB_MBUS_DBOX_CTL)
+#define MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK REG_GENMASK(24, 20) /* tgl+ */
+#define MBUS_DBOX_B2B_TRANSACTIONS_MAX(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK, x)
+#define MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK REG_GENMASK(19, 17) /* tgl+ */
+#define MBUS_DBOX_B2B_TRANSACTIONS_DELAY(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK, x)
+#define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */
+#define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14)
+#define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x)
+#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2)
+#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3)
+#define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8)
+#define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x)
+#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5)
+#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x)
+#define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0)
+#define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x)
+
+#define MBUS_UBOX_CTL _MMIO(0x4503C)
+#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
+#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
+
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+
+/* Watermark register definitions for SKL */
+#define _CUR_WM_A_0 0x70140
+#define _CUR_WM_B_0 0x71140
+#define _CUR_WM_SAGV_A 0x70158
+#define _CUR_WM_SAGV_B 0x71158
+#define _CUR_WM_SAGV_TRANS_A 0x7015C
+#define _CUR_WM_SAGV_TRANS_B 0x7115C
+#define _CUR_WM_TRANS_A 0x70168
+#define _CUR_WM_TRANS_B 0x71168
+#define _PLANE_WM_1_A_0 0x70240
+#define _PLANE_WM_1_B_0 0x71240
+#define _PLANE_WM_2_A_0 0x70340
+#define _PLANE_WM_2_B_0 0x71340
+#define _PLANE_WM_SAGV_1_A 0x70258
+#define _PLANE_WM_SAGV_1_B 0x71258
+#define _PLANE_WM_SAGV_2_A 0x70358
+#define _PLANE_WM_SAGV_2_B 0x71358
+#define _PLANE_WM_SAGV_TRANS_1_A 0x7025C
+#define _PLANE_WM_SAGV_TRANS_1_B 0x7125C
+#define _PLANE_WM_SAGV_TRANS_2_A 0x7035C
+#define _PLANE_WM_SAGV_TRANS_2_B 0x7135C
+#define _PLANE_WM_TRANS_1_A 0x70268
+#define _PLANE_WM_TRANS_1_B 0x71268
+#define _PLANE_WM_TRANS_2_A 0x70368
+#define _PLANE_WM_TRANS_2_B 0x71368
+#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_IGNORE_LINES (1 << 30)
+#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
+#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
+
+#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
+#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_SAGV(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_A, _CUR_WM_SAGV_B)
+#define CUR_WM_SAGV_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_TRANS_A, _CUR_WM_SAGV_TRANS_B)
+#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A, _CUR_WM_TRANS_B)
+#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_SAGV_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_1_A, _PLANE_WM_SAGV_1_B)
+#define _PLANE_WM_SAGV_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_2_A, _PLANE_WM_SAGV_2_B)
+#define PLANE_WM_SAGV(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_1(pipe), _PLANE_WM_SAGV_2(pipe)))
+#define _PLANE_WM_SAGV_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_1_A, _PLANE_WM_SAGV_TRANS_1_B)
+#define _PLANE_WM_SAGV_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_2_A, _PLANE_WM_SAGV_TRANS_2_B)
+#define PLANE_WM_SAGV_TRANS(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_TRANS_1(pipe), _PLANE_WM_SAGV_TRANS_2(pipe)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_1_A, _PLANE_WM_TRANS_1_B)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_2_A, _PLANE_WM_TRANS_2_B)
+#define PLANE_WM_TRANS(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
+
+#define _PLANE_BUF_CFG_1_B 0x7127c
+#define _PLANE_BUF_CFG_2_B 0x7137c
+#define _PLANE_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+#define _PLANE_NV12_BUF_CFG_1_B 0x71278
+#define _PLANE_NV12_BUF_CFG_2_B 0x71378
+#define _PLANE_NV12_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B)
+#define _PLANE_NV12_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
+#define PLANE_NV12_BUF_CFG(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A 0x7017c
+#define _CUR_BUF_CFG_B 0x7117c
+#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
+/*
+ * The below are numbered starting from "S1" on gen11/gen12, but starting
+ * with display 13, the bspec switches to a 0-based numbering scheme
+ * (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
+ * We'll just use the 0-based numbering here for all platforms since it's the
+ * way things will be named by the hardware team going forward, plus it's more
+ * consistent with how most of the rest of our registers are named.
+ */
+#define _DBUF_CTL_S0 0x45008
+#define _DBUF_CTL_S1 0x44FE8
+#define _DBUF_CTL_S2 0x44300
+#define _DBUF_CTL_S3 0x44304
+#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
+ _DBUF_CTL_S0, \
+ _DBUF_CTL_S1, \
+ _DBUF_CTL_S2, \
+ _DBUF_CTL_S3))
+#define DBUF_POWER_REQUEST REG_BIT(31)
+#define DBUF_POWER_STATE REG_BIT(30)
+#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
+#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
+#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
+#define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */
+
+#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
+#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
+#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
+#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0)
+#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16)
+
+#define MTL_LATENCY_SAGV _MMIO(0x4578c)
+#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
+
+#endif /* __SKL_WATERMARK_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 84481030883a..61d008d4e5f1 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
bool cold_boot = false;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- intel_de_write(dev_priv, MIPI_CTRL(port),
- tmp | GLK_MIPIIO_ENABLE);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
- tmp &= ~GLK_LP_WAKE;
- else
- tmp |= GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
@@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
}
/* Get IO out of reset */
- val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
- val | GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
@@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_EXIT | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
-
- val = intel_de_read(dev_priv, MIPI_CTRL(port));
- val &= ~GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK,
+ ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
}
}
@@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
- intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
@@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Enter ULPS */
- for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
}
/* Clear MIPI mode */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~GLK_MIPIIO_ENABLE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
- u32 val;
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
DEVICE_READY | ULPS_STATE_ENTER);
@@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
@@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- u32 temp;
+ u32 temp = intel_dsi->pixel_overlap;
+
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- temp = intel_de_read(dev_priv,
- MIPI_CTRL(port));
- temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- BXT_PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, MIPI_CTRL(port),
- temp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIXEL_OVERLAP_CNT_MASK,
+ temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
- temp &= ~PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
+ intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ PIXEL_OVERLAP_CNT_MASK,
+ temp << PIXEL_OVERLAP_CNT_SHIFT);
}
}
@@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 temp;
/* de-assert ip_tg_enable signal */
- temp = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -783,11 +737,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
- u32 val;
bool glk_cold_boot = false;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -810,9 +762,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
@@ -820,32 +770,18 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 val;
-
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ 0, DPOUNIT_CLOCK_GATE_DISABLE);
}
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
+ /* Give the panel time to power-on and then deassert its reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
-
- /*
- * Give the panel time to power-on and then deassert its reset.
- * Depending on the VBT MIPI sequences version the deassert-seq
- * may contain the necessary delay, intel_dsi_msleep() will skip
- * the delay in that case. If there is no deassert-seq, then an
- * unconditional msleep is used to give the panel time to power-on.
- */
- if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- } else {
- msleep(intel_dsi->panel_on_delay);
- }
+ msleep(intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
@@ -879,7 +815,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(20); /* XXX */
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
- intel_dsi_msleep(intel_dsi, 100);
+ msleep(100);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
@@ -949,7 +885,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -987,27 +922,22 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
} else {
- u32 val;
-
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
/* Assert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
+ msleep(intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
intel_dsi->panel_power_off_time = ktime_get_boottime();
@@ -1058,7 +988,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -1130,7 +1060,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
pipe_config->mode_flags |=
@@ -1432,11 +1362,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~BXT_PIPE_SELECT_MASK;
-
- tmp |= BXT_PIPE_SELECT(pipe);
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
@@ -1605,7 +1532,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
if (IS_GEMINILAKE(dev_priv))
return;
@@ -1620,9 +1546,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
vlv_dsi_reset_clocks(encoder, port);
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
@@ -1916,7 +1840,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
@@ -1983,7 +1907,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err_cleanup_connector;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index af7402127cd9..b697badbbe71 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
@@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val |= BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
@@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
- tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
- tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b3b398fe689c..385ffc575b48 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,6 +8,7 @@
#include "display/intel_frontbuffer.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_sw_fence_work.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 7f2831efc798..5402a7bbcb1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -257,7 +257,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
if (!protected) {
pc->uses_protected_content = false;
- } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
+ } else if (!intel_pxp_is_enabled(i915->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -271,8 +271,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (!intel_pxp_is_active(&to_gt(i915)->pxp))
- ret = intel_pxp_start(&to_gt(i915)->pxp);
+ if (!intel_pxp_is_active(i915->pxp))
+ ret = intel_pxp_start(i915->pxp);
}
return ret;
@@ -364,7 +364,7 @@ static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct i915_address_space *vm;
if (args->size)
@@ -733,7 +733,7 @@ static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct set_proto_ctx_engines set = { .i915 = i915 };
struct i915_context_param_engines __user *user =
u64_to_user_ptr(args->value);
@@ -813,7 +813,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct drm_i915_gem_context_param_sseu user_sseu;
struct intel_sseu *sseu;
int ret;
@@ -913,7 +913,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
break;
case I915_CONTEXT_PARAM_PRIORITY:
- ret = validate_priority(fpriv->dev_priv, args);
+ ret = validate_priority(fpriv->i915, args);
if (!ret)
pc->sched.priority = args->value;
break;
@@ -934,12 +934,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
if (args->size)
ret = -EINVAL;
else
- ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+ ret = proto_context_set_persistence(fpriv->i915, pc,
args->value);
break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
- ret = proto_context_set_protected(fpriv->dev_priv, pc,
+ ret = proto_context_set_protected(fpriv->i915, pc,
args->value);
break;
@@ -1096,16 +1096,15 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
- const struct intel_gt *gt = to_gt(ctx->i915);
+ const unsigned int max = I915_NUM_ENGINES;
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
- enum intel_engine_id id;
- e = alloc_engines(I915_NUM_ENGINES);
+ e = alloc_engines(max);
if (!e)
return ERR_PTR(-ENOMEM);
- for_each_engine(engine, gt, id) {
+ for_each_uabi_engine(engine, ctx->i915) {
struct intel_context *ce;
struct intel_sseu sseu = {};
int ret;
@@ -1113,7 +1112,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
if (engine->legacy_idx == INVALID_ENGINE)
continue;
- GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(engine->legacy_idx >= max);
GEM_BUG_ON(e->engines[engine->legacy_idx]);
ce = intel_context_create(engine);
@@ -1688,6 +1687,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
init_contexts(&i915->gem.contexts);
}
+/*
+ * Note that this implicitly consumes the ctx reference, by placing
+ * the ctx in the context_xa.
+ */
static void gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv,
u32 id)
@@ -1703,10 +1706,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
current->comm, pid_nr(ctx->pid));
- /* And finally expose ourselves to userspace via the idr */
- old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
- WARN_ON(old);
-
spin_lock(&ctx->client->ctx_lock);
list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
spin_unlock(&ctx->client->ctx_lock);
@@ -1714,6 +1713,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
+
+ /* And finally expose ourselves to userspace via the idr */
+ old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+ WARN_ON(old);
}
int i915_gem_context_open(struct drm_i915_private *i915,
@@ -1767,7 +1770,7 @@ void i915_gem_context_close(struct drm_file *file)
unsigned long idx;
xa_for_each(&file_priv->proto_context_xa, idx, pc)
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
xa_destroy(&file_priv->proto_context_xa);
mutex_destroy(&file_priv->proto_context_lock);
@@ -1857,11 +1860,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
vm = ctx->vm;
GEM_BUG_ON(!vm);
+ /*
+ * Get a reference for the allocated handle. Once the handle is
+ * visible in the vm_xa table, userspace could try to close it
+ * from under our feet, so we need to hold the extra reference
+ * first.
+ */
+ i915_vm_get(vm);
+
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return err;
-
- i915_vm_get(vm);
+ }
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
@@ -2195,18 +2206,26 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
lockdep_assert_held(&file_priv->proto_context_lock);
- ctx = i915_gem_create_context(file_priv->dev_priv, pc);
+ ctx = i915_gem_create_context(file_priv->i915, pc);
if (IS_ERR(ctx))
return ctx;
+ /*
+ * One for the xarray and one for the caller. We need to grab
+ * the reference *prior* to making the ctx visble to userspace
+ * in gem_context_register(), as at any point after that
+ * userspace can try to race us with another thread destroying
+ * the context under our feet.
+ */
+ i915_gem_context_get(ctx);
+
gem_context_register(ctx, file_priv, id);
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
- /* One for the xarray and one for the caller */
- return i915_gem_context_get(ctx);
+ return ctx;
}
struct i915_gem_context *
@@ -2333,7 +2352,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
GEM_WARN_ON(ctx && pc);
if (pc)
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
if (ctx)
context_close(ctx);
@@ -2486,7 +2505,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
* GEM_CONTEXT_CREATE starting with graphics
* version 13.
*/
- WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
+ WARN_ON(GRAPHICS_VER(file_priv->i915) > 12);
ret = set_proto_ctx_param(file_priv, pc, args);
} else {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 33673fe7ee0a..bfe1dbda4cb7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -5,6 +5,7 @@
#include <drm/drm_fourcc.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -143,7 +144,8 @@ object_free:
}
/**
- * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
+ * __i915_gem_object_create_user - Creates a new object using the same path as
+ * DRM_I915_GEM_CREATE_EXT
* @i915: i915 private
* @size: size of the buffer, in bytes
* @placements: possible placement regions, in priority order
@@ -214,7 +216,7 @@ i915_gem_dumb_create(struct drm_file *file,
}
/**
- * Creates a new mm object and returns a handle to it.
+ * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
@@ -384,7 +386,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags)
return -EINVAL;
- if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
+ if (!intel_pxp_is_enabled(ext_data->i915->pxp))
return -ENODEV;
ext_data->flags |= I915_BO_PROTECTED;
@@ -398,7 +400,7 @@ static const i915_user_extension_fn create_extensions[] = {
};
/**
- * Creates a new mm object and returns a handle to it.
+ * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index d44a152ce680..d2d5a24301b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -4,6 +4,7 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
@@ -17,6 +18,8 @@
#include "i915_gem_object.h"
#include "i915_vma.h"
+#define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */
+
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -113,7 +116,8 @@ void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
}
/**
- * Moves a single object to the WC read, and possibly write domain.
+ * i915_gem_object_set_to_wc_domain - Moves a single object to the WC read, and
+ * possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
@@ -174,7 +178,8 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Moves a single object to the GTT read, and possibly write domain.
+ * i915_gem_object_set_to_gtt_domain - Moves a single object to the GTT read,
+ * and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
@@ -243,7 +248,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Changes the cache-level of an object across all VMA.
+ * i915_gem_object_set_cache_level - Changes the cache-level of an object across all VMA.
* @obj: object to act on
* @cache_level: new cache level to set for the object
*
@@ -424,6 +429,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
+ /* VT-d may overfetch before/after the vma, so pad with scratch */
+ if (intel_scanout_needs_vtd_wa(i915)) {
+ unsigned int guard = VTD_GUARD;
+
+ if (i915_gem_object_is_tiled(obj))
+ guard = max(guard,
+ i915_gem_object_get_tile_row_size(obj));
+
+ flags |= PIN_OFFSET_GUARD | guard;
+ }
+
/*
* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
@@ -444,7 +460,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return vma;
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ vma->display_alignment = max(vma->display_alignment, alignment);
i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
@@ -453,7 +469,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
}
/**
- * Moves a single object to the CPU read, and possibly write domain.
+ * i915_gem_object_set_to_cpu_domain - Moves a single object to the CPU read,
+ * and possibly write domain.
* @obj: object to act on
* @write: requesting write or read-only access
*
@@ -497,7 +514,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Called when user space prepares to use an object with the CPU, either
+ * i915_gem_set_domain_ioctl - Called when user space prepares to use an
+ * object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
* @dev: drm device
* @data: ioctl data blob
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index da09767fda07..3aeede6aee4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -379,22 +379,25 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
const struct i915_vma *vma,
unsigned int flags)
{
- if (vma->node.size < entry->pad_to_size)
+ const u64 start = i915_vma_offset(vma);
+ const u64 size = i915_vma_size(vma);
+
+ if (size < entry->pad_to_size)
return true;
- if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+ if (entry->alignment && !IS_ALIGNED(start, entry->alignment))
return true;
if (flags & EXEC_OBJECT_PINNED &&
- vma->node.start != entry->offset)
+ start != entry->offset)
return true;
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
- vma->node.start < BATCH_OFFSET_BIAS)
+ start < BATCH_OFFSET_BIAS)
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size + 4095) >> 32)
+ (start + size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -440,7 +443,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
int err;
if (vma->node.size)
- pin_flags = vma->node.start;
+ pin_flags = __i915_vma_offset(vma);
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
@@ -663,8 +666,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
if (err)
return err;
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
@@ -730,37 +733,74 @@ static int eb_reserve(struct i915_execbuffer *eb)
bool unpinned;
/*
- * Attempt to pin all of the buffers into the GTT.
- * This is done in 2 phases:
+ * We have one more buffers that we couldn't bind, which could be due to
+ * various reasons. To resolve this we have 4 passes, with every next
+ * level turning the screws tighter:
+ *
+ * 0. Unbind all objects that do not match the GTT constraints for the
+ * execbuffer (fenceable, mappable, alignment etc). Bind all new
+ * objects. This avoids unnecessary unbinding of later objects in order
+ * to make room for the earlier objects *unless* we need to defragment.
*
- * 1. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 2. Bind new objects.
+ * 1. Reorder the buffers, where objects with the most restrictive
+ * placement requirements go first (ignoring fixed location buffers for
+ * now). For example, objects needing the mappable aperture (the first
+ * 256M of GTT), should go first vs objects that can be placed just
+ * about anywhere. Repeat the previous pass.
*
- * This avoid unnecessary unbinding of later objects in order to make
- * room for the earlier objects *unless* we need to defragment.
+ * 2. Consider buffers that are pinned at a fixed location. Also try to
+ * evict the entire VM this time, leaving only objects that we were
+ * unable to lock. Try again to bind the buffers. (still using the new
+ * buffer order).
*
- * Defragmenting is skipped if all objects are pinned at a fixed location.
+ * 3. We likely have object lock contention for one or more stubborn
+ * objects in the VM, for which we need to evict to make forward
+ * progress (perhaps we are fighting the shrinker?). When evicting the
+ * VM this time around, anything that we can't lock we now track using
+ * the busy_bo, using the full lock (after dropping the vm->mutex to
+ * prevent deadlocks), instead of trylock. We then continue to evict the
+ * VM, this time with the stubborn object locked, which we can now
+ * hopefully unbind (if still bound in the VM). Repeat until the VM is
+ * evicted. Finally we should be able bind everything.
*/
- for (pass = 0; pass <= 2; pass++) {
+ for (pass = 0; pass <= 3; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;
if (pass == 0)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
- unpinned = eb_unbind(eb, pass == 2);
+ unpinned = eb_unbind(eb, pass >= 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
- err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
mutex_unlock(&eb->context->vm->mutex);
}
if (err)
return err;
}
+ if (pass == 3) {
+retry:
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ struct drm_i915_gem_object *busy_bo = NULL;
+
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
+ mutex_unlock(&eb->context->vm->mutex);
+ if (err && busy_bo) {
+ err = i915_gem_object_lock(busy_bo, &eb->ww);
+ i915_gem_object_put(busy_bo);
+ if (!err)
+ goto retry;
+ }
+ }
+ if (err)
+ return err;
+ }
+
list_for_each_entry(ev, &eb->unbound, bind_link) {
err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
@@ -869,7 +909,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
+ err = intel_pxp_key_check(eb->i915->pxp, obj, true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -984,8 +1024,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
return err;
if (!err) {
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
@@ -1066,7 +1106,7 @@ static inline u64
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
const struct i915_vma *target)
{
- return gen8_canonical_addr((int)reloc->delta + target->node.start);
+ return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target));
}
static void reloc_cache_init(struct reloc_cache *cache,
@@ -1275,7 +1315,7 @@ static void *reloc_iomap(struct i915_vma *batch,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- cache->node.start = vma->node.start;
+ cache->node.start = i915_ggtt_offset(vma);
cache->node.mm = (void *)vma;
}
}
@@ -1438,7 +1478,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -2368,7 +2408,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
}
err = rq->context->engine->emit_bb_start(rq,
- batch->node.start +
+ i915_vma_offset(batch) +
eb->batch_start_offset,
batch_len,
eb->batch_flags);
@@ -2379,7 +2419,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
GEM_BUG_ON(intel_context_is_parallel(rq->context));
GEM_BUG_ON(eb->batch_start_offset);
err = rq->context->engine->emit_bb_start(rq,
- eb->trampoline->node.start +
+ i915_vma_offset(eb->trampoline) +
batch_len, 0, 0);
if (err)
return err;
@@ -2409,11 +2449,6 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}
-static int num_vcs_engines(struct drm_i915_private *i915)
-{
- return hweight_long(VDBOX_MASK(to_gt(i915)));
-}
-
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2427,7 +2462,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(num_vcs_engines(dev_priv));
+ get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
@@ -2615,7 +2650,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
return -1;
}
- if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
+ if (user_ring_id == I915_EXEC_BSD &&
+ i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO] > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -3446,6 +3482,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
+ if (unlikely(eb.gem_context->syncobj)) {
+ drm_syncobj_replace_fence(eb.gem_context->syncobj,
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
+ }
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -3457,13 +3500,6 @@ err_request:
}
}
- if (unlikely(eb.gem_context->syncobj)) {
- drm_syncobj_replace_fence(eb.gem_context->syncobj,
- eb.composite_fence ?
- eb.composite_fence :
- &eb.requests[0]->fence);
- }
-
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index f66bcefc09ec..6bc26b4b06b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -35,11 +35,15 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
- unsigned int npages;
+ unsigned int npages; /* restricted by sg_alloc_table */
int max_order = MAX_ORDER;
unsigned int max_segment;
gfp_t gfp;
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
+ return -E2BIG;
+
+ npages = obj->base.size >> PAGE_SHIFT;
max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
max_order = min(max_order, get_order(max_segment));
@@ -55,7 +59,6 @@ create_st:
if (!st)
return -ENOMEM;
- npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 8949fb0a944f..3198b64ad7db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
memcpy(map, data, size);
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c29efdef8313..d3c1dee16af2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -369,7 +369,7 @@ retry:
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
- ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
@@ -395,7 +395,7 @@ retry:
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret)
@@ -697,7 +697,7 @@ insert:
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
- drm_vma_node_allow(&mmo->vma_node, file);
+ drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo;
err:
@@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
i915_gem_object_put(obj);
return -EINVAL;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
@@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1a0886b8aaa1..4666bb82f312 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -427,10 +427,11 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ pgoff_t idx = offset >> PAGE_SHIFT;
void *src_map;
void *src_ptr;
- src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
src_ptr = src_map + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
@@ -443,9 +444,10 @@ i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset,
static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ pgoff_t idx = offset >> PAGE_SHIFT;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
void __iomem *src_map;
void __iomem *src_ptr;
- dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
src_map = io_mapping_map_wc(&obj->mm.region->iomap,
dma - obj->mm.region->region.start,
@@ -484,6 +486,7 @@ static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
*/
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
GEM_BUG_ON(offset >= obj->base.size);
GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -872,7 +875,7 @@ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
return ret < 0 ? ret : 0;
}
-/**
+/*
* i915_gem_object_has_unknown_state - Return true if the object backing pages are
* in an unknown_state. This means that userspace must NEVER be allowed to touch
* the pages, with either the GPU or CPU.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3db53769864c..885ccde9dc3c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -20,26 +20,10 @@
enum intel_region_id;
-/*
- * XXX: There is a prevalence of the assumption that we fit the
- * object's page count inside a 32bit _signed_ variable. Let's document
- * this and catch if we ever need to fix it. In the meantime, if you do
- * spot such a local variable, please consider fixing!
- *
- * Aside from our own locals (for which we have no excuse!):
- * - sg_table embeds unsigned int for num_pages
- * - get_user_pages*() mixed ints with longs
- */
-#define GEM_CHECK_SIZE_OVERFLOW(sz) \
- GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
-
static inline bool i915_gem_object_size_2big(u64 size)
{
struct drm_i915_gem_object *obj;
- if (GEM_CHECK_SIZE_OVERFLOW(size))
- return true;
-
if (overflows_type(size, obj->base.size))
return true;
@@ -319,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->frontbuffer);
+ return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
}
static inline unsigned int
@@ -363,44 +347,289 @@ i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride);
+/**
+ * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * i915_gem_object_page_iter
+ * @obj: i915 GEM buffer object
+ * @iter: i915 GEM buffer object page iterator
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
+ * Takes and releases the RCU lock to search the radix_tree of
+ * i915_gem_object_page_iter.
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
+ */
struct scatterlist *
-__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- struct i915_gem_object_page_iter *iter,
- unsigned int n,
- unsigned int *offset, bool dma);
+__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ pgoff_t n,
+ unsigned int *offset);
+/**
+ * i915_gem_object_page_iter_get_sg - wrapper macro for
+ * __i915_gem_object_page_iter_get_sg()
+ * @obj: i915 GEM buffer object
+ * @it: i915 GEM buffer object page iterator
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
+ * Takes and releases the RCU lock to search the radix_tree of
+ * i915_gem_object_page_iter.
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_page_iter_get_sg().
+ */
+#define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_sg - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
+ * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_sg()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
static inline struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *offset)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
+ return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
}
+/**
+ * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_sg().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_sg(obj, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_sg(obj, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
+ * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
static inline struct scatterlist *
-i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *offset)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
+ return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
}
+/**
+ * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_sg_dma().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_sg_dma(obj, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_sg_dma(obj, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_page - helper to find the target page with a page offset
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
+ * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
+ * internally.
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_page()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
+/**
+ * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_page().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_page(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_page(obj, n); \
+})
+
+/**
+ * __i915_gem_object_get_dirty_page - helper to find the target page with a page
+ * offset
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
+ */
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
+
+/**
+ * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dirty_page().
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
+ */
+#define i915_gem_object_get_dirty_page(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dirty_page(obj, n); \
+})
+/**
+ * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
+ * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @len: DMA mapped scatterlist's DMA bus addresses length to return
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
+ */
dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len);
+__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *len);
+
+/**
+ * i915_gem_object_get_dma_address_len - wrapper macro for
+ * __i915_gem_object_get_dma_address_len
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @len: DMA mapped scatterlist's DMA bus addresses length to return
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dma_address_len().
+ * See also __i915_gem_object_page_iter_get_sg() and
+ * __i915_gem_object_get_dma_address_len()
+ */
+#define i915_gem_object_get_dma_address_len(obj, n, len) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dma_address_len(obj, n, len); \
+})
+/**
+ * __i915_gem_object_get_dma_address - helper to get bus addresses of
+ * targeted DMA mapped scatterlist from i915 GEM buffer object
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlis
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
+ */
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
+__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
+
+/**
+ * i915_gem_object_get_dma_address - wrapper macro for
+ * __i915_gem_object_get_dma_address
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dma_address().
+ * See also __i915_gem_object_page_iter_get_sg() and
+ * __i915_gem_object_get_dma_address()
+ */
+#define i915_gem_object_get_dma_address(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dma_address(obj, n); \
+})
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ab4c2f90a564..5dcbbef31d44 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -10,7 +10,7 @@
#include <linux/mmu_notifier.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
@@ -491,6 +491,9 @@ struct drm_i915_gem_object {
*/
unsigned int cache_dirty:1;
+ /* @is_dpt: Object houses a display page table (DPT) */
+ unsigned int is_dpt:1;
+
/**
* @read_domains: Read memory domains.
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 05a27723ebb8..ecd86130b74f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -521,14 +521,16 @@ void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
}
struct scatterlist *
-__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- struct i915_gem_object_page_iter *iter,
- unsigned int n,
- unsigned int *offset,
- bool dma)
+__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ pgoff_t n,
+ unsigned int *offset)
+
{
- struct scatterlist *sg;
+ const bool dma = iter == &obj->mm.get_dma_page ||
+ iter == &obj->ttm.get_io_page;
unsigned int idx, count;
+ struct scatterlist *sg;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
@@ -636,7 +638,7 @@ lookup:
}
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct scatterlist *sg;
unsigned int offset;
@@ -649,8 +651,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
+__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct page *page;
@@ -662,9 +663,8 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len)
+__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ pgoff_t n, unsigned int *len)
{
struct scatterlist *sg;
unsigned int offset;
@@ -678,8 +678,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
+__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
{
return i915_gem_object_get_dma_address_len(obj, n, NULL);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 68453572275b..76efe98eaa14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -28,6 +28,10 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
void *dst;
int i;
+ /* Contiguous chunk, with a single scatterlist element */
+ if (overflows_type(obj->base.size, sg->length))
+ return -E2BIG;
+
if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9c759df700ca..37d1efcd3ca6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -60,7 +60,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
struct address_space *mapping,
unsigned int max_segment)
{
- const unsigned long page_count = size / PAGE_SIZE;
+ unsigned int page_count; /* restricted by sg_alloc_table */
unsigned long i;
struct scatterlist *sg;
struct page *page;
@@ -68,6 +68,10 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
gfp_t noreclaim;
int ret;
+ if (overflows_type(size / PAGE_SIZE, page_count))
+ return -E2BIG;
+
+ page_count = size / PAGE_SIZE;
/*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
@@ -193,7 +197,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mem = obj->mm.region;
struct address_space *mapping = obj->base.filp->f_mapping;
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
struct sgt_iter sgt_iter;
@@ -235,8 +238,8 @@ rebuild_st:
goto rebuild_st;
} else {
dev_warn(i915->drm.dev,
- "Failed to DMA remap %lu pages\n",
- page_count);
+ "Failed to DMA remap %zu pages\n",
+ obj->base.size >> PAGE_SHIFT);
goto err_pages;
}
}
@@ -538,6 +541,20 @@ static int __create_shmem(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, obj, size);
+ /* XXX: The __shmem_file_setup() function returns -EINVAL if size is
+ * greater than MAX_LFS_FILESIZE.
+ * To handle the same error as other code that returns -E2BIG when
+ * the size is too large, we add a code that returns -E2BIG when the
+ * size is larger than the size that can be handled.
+ * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
+ * so we only needs to check when BITS_PER_LONG is 64.
+ * If BITS_PER_LONG is 32, E2BIG checks are processed when
+ * i915_gem_object_size_2big() is called before init_object() callback
+ * is called.
+ */
+ if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
+ return -E2BIG;
+
if (i915->mm.gemfs)
filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
flags);
@@ -579,7 +596,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 8dc5c8874d8a..b1672e054b21 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -400,7 +400,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
- unsigned long count = vma->node.size >> PAGE_SHIFT;
+ unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index bc9521078807..8ac376c24aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -110,9 +110,7 @@ static int adjust_stolen(struct drm_i915_private *i915,
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_res =
- (struct resource) DEFINE_RES_MEM(ggtt_start,
- ggtt_total_entries(ggtt) * 4);
+ ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
stolen[0].end = ggtt_res.start;
@@ -211,7 +209,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
IS_GM45(i915) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
IS_GM45(i915) ? "CTG" : "ELK", reg_val);
@@ -276,7 +274,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -365,7 +363,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -414,7 +412,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
/*
- * Initialize i915->dsm_reserved to contain the reserved space within the Data
+ * Initialize i915->dsm.reserved to contain the reserved space within the Data
* Stolen Memory. This is a range on the top of DSM that is reserved, not to
* be used by driver, so must be excluded from the region passed to the
* allocator later. In the spec this is also called as WOPCM.
@@ -430,7 +428,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
resource_size_t reserved_size;
int ret = 0;
- stolen_top = i915->dsm.end + 1;
+ stolen_top = i915->dsm.stolen.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
@@ -471,13 +469,12 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
goto bail_out;
}
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
drm_err(&i915->drm,
"Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ &i915->dsm.reserved, &i915->dsm.stolen);
ret = -EINVAL;
goto bail_out;
}
@@ -485,8 +482,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
return 0;
bail_out:
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, 0);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
return ret;
}
@@ -517,27 +513,27 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
if (request_smem_stolen(i915, &mem->region))
return -ENOSPC;
- i915->dsm = mem->region;
+ i915->dsm.stolen = mem->region;
if (init_reserved_stolen(i915))
return -ENOSPC;
/* Exclude the reserved region from driver use */
- mem->region.end = i915->dsm_reserved.start - 1;
+ mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region));
- i915->stolen_usable_size = resource_size(&mem->region);
+ i915->dsm.usable_size = resource_size(&mem->region);
drm_dbg(&i915->drm,
"Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- (u64)i915->stolen_usable_size >> 10);
+ (u64)resource_size(&i915->dsm.stolen) >> 10,
+ (u64)i915->dsm.usable_size >> 10);
- if (i915->stolen_usable_size == 0)
+ if (i915->dsm.usable_size == 0)
return -ENOSPC;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
return 0;
}
@@ -587,7 +583,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -607,7 +603,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -894,8 +890,9 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
/*
* MTL dsm size is in GGC register.
- * Also MTL uses offset to DSMBASE in ptes, so i915
- * uses dsm_base = 0 to setup stolen region.
+ * Also MTL uses offset to GSMBASE in ptes, so i915
+ * uses dsm_base = 8MBs to setup stolen region, since
+ * DSMBASE = GSMBASE + 8MB.
*/
ret = mtl_get_gms_size(uncore);
if (ret < 0) {
@@ -903,27 +900,25 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
return ERR_PTR(ret);
}
- dsm_base = 0;
+ dsm_base = SZ_8M;
dsm_size = (resource_size_t)(ret * SZ_1M);
GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
- GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
+ GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
- dsm_size = lmem_size - dsm_base;
+ dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
- io_size = dsm_size;
- if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
- io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
- } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
+ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
+ io_size = dsm_size;
}
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index fd42b89b7162..a049ca0b7980 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -168,11 +168,11 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
return true;
size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
- if (vma->node.size < size)
+ if (i915_vma_size(vma) < size)
return false;
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
- if (!IS_ALIGNED(vma->node.start, alignment))
+ if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
return false;
return true;
@@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride;
- i915_gem_object_unlock(obj);
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->bit_17 = NULL;
}
+ i915_gem_object_unlock(obj);
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_object_release_mmap_gtt(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1e50fb0d6bfc..9227f8146a58 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,8 +5,8 @@
#include <linux/shmem_fs.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/drm_buddy.h>
#include "i915_drv.h"
@@ -140,13 +140,16 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
if (offset != I915_BO_INVALID_OFFSET) {
+ WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn));
place->fpfn = offset >> PAGE_SHIFT;
+ WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
+ WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
place->lpfn = mr->io_size >> PAGE_SHIFT;
}
}
@@ -271,8 +274,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
bdev);
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
unsigned long ccs_pages = 0;
enum ttm_caching caching;
@@ -286,8 +287,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (!i915_tt)
return NULL;
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
- man->use_tt)
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+ ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj);
@@ -471,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -510,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -599,13 +606,16 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- int err;
+ long err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
- err = ttm_bo_wait(bo, true, false);
- if (err)
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, 15 * HZ);
+ if (err < 0)
return err;
+ if (err == 0)
+ return -EBUSY;
err = i915_ttm_move_notify(bo);
if (err)
@@ -689,7 +699,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
GEM_WARN_ON(bo->ttm);
base = obj->mm.region->iomap.base - obj->mm.region->region.start;
- sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
+ sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
@@ -832,6 +842,10 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
struct ttm_placement placement;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
@@ -1048,7 +1062,27 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (!i915_ttm_resource_mappable(bo->resource)) {
+ /*
+ * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+ * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+ * far as far doing a ttm_bo_move_null(), which should skip all the
+ * other junk.
+ */
+ if (!bo->resource) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true, /* should be idle already */
+ };
+ int err;
+
+ GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+ } else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1240,7 +1274,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
}
}
-/**
+/*
* __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
* @mem: The initial memory region for the object.
* @obj: The gem object.
@@ -1302,6 +1336,17 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
&i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
+
+ /*
+ * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size
+ * is too big to add vma. The direct function that returns -ENOSPC is
+ * drm_mm_insert_node_in_range(). To handle the same error as other code
+ * that returns -E2BIG when the size is too large, it converts -ENOSPC to
+ * -E2BIG.
+ */
+ if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC)
+ ret = -E2BIG;
+
if (ret)
return i915_ttm_err_to_gem(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 2a94a99ef76b..f8f6bed1b297 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != I915_PL_SYSTEM;
+ return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index f59f812dc6d2..dd188dfcc423 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -3,7 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
#include "i915_deps.h"
#include "i915_drv.h"
@@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level;
+ unsigned int mem_flags;
unsigned int i;
+ int mem_type;
+
+ /*
+ * We might have been purged (or swapped out) if the resource is NULL,
+ * in which case the SYSTEM placement is the closest match to describe
+ * the current domain. If the object is ever used in this state then we
+ * will require moving it again.
+ */
+ if (!bo->resource) {
+ mem_flags = I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = I915_PL_SYSTEM;
+ cache_level = I915_CACHE_NONE;
+ } else {
+ mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+ I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = bo->resource->mem_type;
+ cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+ bo->ttm);
+ }
/*
* If object was moved to an allowable region, update the object
@@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
* in an allowable region, it's evicted and we don't update the
* object region.
*/
- if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
for (i = 0; i < obj->mm.n_placements; ++i) {
struct intel_memory_region *mr = obj->mm.placements[i];
- if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ if (intel_region_to_ttm_type(mr) == mem_type &&
mr != obj->mm.region) {
i915_gem_object_release_memory_region(obj);
i915_gem_object_init_memory_region(obj, mr);
@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
}
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+ obj->mem_flags |= mem_flags;
- obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
- I915_BO_FLAG_STRUCT_PAGE;
-
- cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
- bo->ttm);
i915_gem_object_set_cache_coherency(obj, cache_level);
}
@@ -237,6 +253,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
* @_src_iter: Storage space for the source kmap iterator.
* @dst_iter: Pointer to the destination kmap iterator.
* @src_iter: Pointer to the source kmap iterator.
+ * @num_pages: Number of pages
* @clear: Whether to clear instead of copy.
* @src_rsgt: Refcounted scatter-gather list of source memory.
* @dst_rsgt: Refcounted scatter-gather list of destination memory.
@@ -541,6 +558,8 @@ out:
* i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object.
* @evict: Whether this is an eviction.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits should be
+ * performed if waiting
* @dst_mem: The destination ttm resource.
* @hop: If we need multihop, what temporary memory type to move to.
*
@@ -565,6 +584,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
+ if (!bo->resource) {
+ if (dst_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ /*
+ * This is only reached when first creating the object, or if
+ * the object was purged or swapped out (pipeline-gutting). For
+ * the former we can safely skip all of the below since we are
+ * only using a dummy SYSTEM placement here. And with the latter
+ * we will always re-enter here with bo->resource set correctly
+ * (as per the above), since this is part of a multi-hop
+ * sequence, where at the end we can do the move for real.
+ *
+ * The special case here is when the dst_mem is TTM_PL_SYSTEM,
+ * which doens't require any kind of move, so it should be safe
+ * to skip all the below and call ttm_bo_move_null() here, where
+ * the caller in __i915_ttm_get_pages() will take care of the
+ * rest, since we should have a valid ttm_tt.
+ */
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+
ret = i915_ttm_move_notify(bo);
if (ret)
return ret;
@@ -669,6 +714,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
+
+ if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
+ return -EINVAL;
+
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 7e67742bc65e..ad649523d5e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
- if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -144,8 +144,7 @@ void i915_ttm_recover_region(struct intel_memory_region *mr)
/**
* i915_ttm_backup_region - Back up all objects of a region to smem.
* @mr: The memory region
- * @allow_gpu: Whether to allow the gpu blitter for this backup.
- * @backup_pinned: Backup also pinned objects.
+ * @flags: TTM backup flags
*
* Loops over all objects of a region and either evicts them if they are
* evictable or backs them up using a backup object if they are pinned.
@@ -187,7 +186,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!backup_bo->resource)
+ err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
+ if (!err)
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
@@ -209,7 +211,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
/**
* i915_ttm_restore_region - Restore backed-up objects of a region from smem.
* @mr: The memory region
- * @allow_gpu: Whether to allow the gpu blitter to recover.
+ * @flags: TTM backup flags
*
* Loops over all objects of a region and if they are backed-up, restores
* them from smem.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 9348b1804d53..1d3ebdf4069b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -128,12 +128,16 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
struct sg_table *st;
struct page **pvec;
+ unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
int ret;
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
+ return -E2BIG;
+
+ num_pages = obj->base.size >> PAGE_SHIFT;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index e6e01c2a74a6..4a33ad2d122b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -161,7 +161,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
/**
- * Waits for rendering to the object to be completed
+ * i915_gem_object_wait - Waits for rendering to the object to be completed
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index cbd9b624a788..bac957755068 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -29,11 +29,15 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
- const unsigned long npages = obj->base.size / PAGE_SIZE;
+ unsigned int npages; /* restricted by sg_alloc_table */
struct scatterlist *sg, *src, *end;
struct sg_table *pages;
unsigned long n;
+ if (overflows_type(obj->base.size / PAGE_SIZE, npages))
+ return -E2BIG;
+
+ npages = obj->base.size / PAGE_SIZE;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index beaf27e09e8a..99f39a5feca1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -84,6 +84,10 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
unsigned int sg_page_sizes;
u64 rem;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
st = kmalloc(sizeof(*st), GFP);
if (!st)
return -ENOMEM;
@@ -111,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order >= MAX_ORDER);
+ GEM_BUG_ON(order > MAX_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
@@ -212,6 +216,10 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
struct scatterlist *sg;
u64 rem;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
st = kmalloc(sizeof(*st), GFP);
if (!st)
return -ENOMEM;
@@ -400,7 +408,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
* Maintaining alignment is required to utilise huge pages in the ppGGT.
*/
if (i915_gem_object_is_lmem(obj) &&
- IS_ALIGNED(vma->node.start, SZ_2M) &&
+ IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
vma->page_sizes.sg & SZ_2M &&
vma->resource->page_sizes_gtt < SZ_2M) {
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
@@ -1847,7 +1855,7 @@ static int igt_shrink_thp(void *arg)
I915_SHRINK_ACTIVE);
i915_vma_unpin(vma);
if (err)
- goto out_put;
+ goto out_wf;
/*
* Now that the pages are *unpinned* shrinking should invoke
@@ -1863,19 +1871,19 @@ static int igt_shrink_thp(void *arg)
pr_err("unexpected pages mismatch, should_swap=%s\n",
str_yes_no(should_swap));
err = -EINVAL;
- goto out_put;
+ goto out_wf;
}
if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
pr_err("unexpected residual page-size bits, should_swap=%s\n",
str_yes_no(should_swap));
err = -EINVAL;
- goto out_put;
+ goto out_wf;
}
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_put;
+ goto out_wf;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 692a16914ca0..ff81af4c8202 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -108,31 +108,30 @@ struct tiled_blits {
u32 height;
};
-static bool supports_x_tiling(const struct drm_i915_private *i915)
+static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
int gen = GRAPHICS_VER(i915);
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ drm_WARN_ON(&i915->drm, gen < 9);
+
if (gen < 12)
return true;
- if (!HAS_LMEM(i915) || IS_DG1(i915))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return false;
- return true;
+ return HAS_DISPLAY(i915);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
{
- int gen = GRAPHICS_VER(buf->vma->vm->i915);
-
- if (gen < 9)
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
return false;
- if (gen < 12)
- return true;
-
/* filter out platforms with unsupported X-tile support in fastblit */
- if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
return false;
return true;
@@ -194,12 +193,12 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
*cs++ = 0;
*cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
- *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
+ *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
*cs++ = 0;
*cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
- *cs++ = upper_32_bits(src->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(src->vma));
+ *cs++ = upper_32_bits(i915_vma_offset(src->vma));
} else {
if (ver >= 6) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
@@ -240,14 +239,14 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
*cs++ = 0;
*cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
*cs++ = 0;
*cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(src->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(src->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(src->vma));
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -462,7 +461,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
{
int err;
- if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+ if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
err = i915_vma_unbind_unlocked(vma);
if (err)
return err;
@@ -472,6 +471,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
if (err)
return err;
+ GEM_BUG_ON(i915_vma_offset(vma) != addr);
return 0;
}
@@ -518,8 +518,8 @@ tiled_blit(struct tiled_blits *t,
err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- t->batch->node.start,
- t->batch->node.size,
+ i915_vma_offset(t->batch),
+ i915_vma_size(t->batch),
0);
i915_request_get(rq);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index c228fe4aba50..3bef1beec7cb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -222,7 +222,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
}
if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index a0ff51d71d07..a81fa6a20f5a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -469,7 +469,8 @@ static int gpu_fill(struct intel_context *ce,
static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
{
const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
- unsigned int n, m, need_flush;
+ unsigned int need_flush;
+ unsigned long n, m;
int err;
i915_gem_object_lock(obj, NULL);
@@ -499,7 +500,8 @@ out:
static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int idx, unsigned int max)
{
- unsigned int n, m, needs_flush;
+ unsigned int needs_flush;
+ unsigned long n;
int err;
i915_gem_object_lock(obj, NULL);
@@ -508,7 +510,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
goto out_unlock;
for (n = 0; n < real_page_count(obj); n++) {
- u32 *map;
+ u32 *map, m;
map = kmap_atomic(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
@@ -516,7 +518,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ pr_err("%pS: Invalid value at object %d page %ld/%ld, offset %d/%d: found %x expected %x\n",
__builtin_return_address(0), idx,
n, real_page_count(obj), m, max,
map[m], m);
@@ -527,7 +529,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ pr_err("%pS: Invalid value at object %d page %ld, offset %d: found %x expected %x (uninitialised)\n",
__builtin_return_address(0), idx, n, m,
map[m], STACK_MAGIC);
err = -EINVAL;
@@ -914,8 +916,8 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs,
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base));
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
+ *cmd++ = upper_32_bits(i915_vma_offset(vma));
*cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(rpcs, 0, 64);
@@ -999,7 +1001,8 @@ retry:
}
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
if (err)
goto skip_request;
@@ -1548,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (err)
goto skip_request;
@@ -1560,7 +1561,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), 0);
if (err)
goto skip_request;
@@ -1665,7 +1667,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = offset;
*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
*cmd++ = reg;
- *cmd++ = vma->node.start + result;
+ *cmd++ = i915_vma_offset(vma) + result;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
@@ -1682,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
@@ -1694,7 +1694,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), flags);
if (err)
goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3f658d5717d8..56279908ed30 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -97,11 +97,11 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gtt_view view;
struct i915_vma *vma;
+ unsigned long offset;
unsigned long page;
u32 __iomem *io;
struct page *p;
unsigned int n;
- u64 offset;
u32 *cpu;
int err;
@@ -158,7 +158,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
cpu = kmap(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
- pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
page, n,
view.partial.offset,
view.partial.size,
@@ -214,10 +214,10 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
for_each_prime_number_from(page, 1, npages) {
struct i915_gtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+ unsigned long offset;
u32 __iomem *io;
struct page *p;
unsigned int n;
- u64 offset;
u32 *cpu;
GEM_BUG_ON(view.partial.size > nreal);
@@ -254,7 +254,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
cpu = kmap(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
- pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
page, n,
view.partial.offset,
view.partial.size,
@@ -1609,7 +1609,7 @@ retry:
err = i915_vma_move_to_active(vma, rq, 0);
- err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
i915_request_get(rq);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index bdf5bb40ccf1..19e374f68ff7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -33,10 +33,10 @@ out:
static int igt_gem_huge(void *arg)
{
- const unsigned int nreal = 509; /* just to be awkward */
+ const unsigned long nreal = 509; /* just to be awkward */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
- unsigned int n;
+ unsigned long n;
int err;
/* Basic sanitycheck of our huge fake object allocation */
@@ -49,7 +49,7 @@ static int igt_gem_huge(void *arg)
err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ pr_err("Failed to allocate %lu pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
}
@@ -57,7 +57,7 @@ static int igt_gem_huge(void *arg)
for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
if (i915_gem_object_get_page(obj, n) !=
i915_gem_object_get_page(obj, n % nreal)) {
- pr_err("Page lookup mismatch at index %u [%u]\n",
+ pr_err("Page lookup mismatch at index %lu [%lu]\n",
n, n % nreal);
err = -EINVAL;
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 374b10ac430e..20a232a140b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -62,8 +62,8 @@ igt_emit_store_dw(struct i915_vma *vma,
goto err;
}
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
+ offset += i915_vma_offset(vma);
for (n = 0; n < count; n++) {
if (ver >= 8) {
@@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch;
}
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
+ err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto skip_request;
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
@@ -147,7 +143,8 @@ int igt_gpu_fill_dw(struct intel_context *ce,
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
flags);
skip_request:
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 1379fbc14431..71a3ca8a8865 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -38,7 +38,7 @@ igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
int err;
i915_vma_lock(vma);
- err = _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index 317efb145787..d38b914d1206 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -106,7 +106,7 @@ static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
static u32 batch_addr(const struct batch_chunk *bc)
{
- return bc->vma->node.start;
+ return i915_vma_offset(bc->vma);
}
static void batch_add(struct batch_chunk *bc, const u32 d)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e94365b08f1e..2aa63ec521b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -528,7 +528,7 @@ retry:
return rq;
}
-struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+struct i915_request *intel_context_get_active_request(struct intel_context *ce)
{
struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
@@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
active = rq;
}
+ if (active)
+ active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fb62b7b8cbcd..48f888c3da08 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -14,6 +14,7 @@
#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_gt_pm.h"
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
#include "i915_trace.h"
@@ -207,8 +208,11 @@ void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
lockdep_assert_held(&ce->timeline->mutex);
- if (!ce->active_count++)
- ce->ops->enter(ce);
+ if (ce->active_count++)
+ return;
+
+ ce->ops->enter(ce);
+ intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@@ -222,8 +226,11 @@ static inline void intel_context_exit(struct intel_context *ce)
{
lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count);
- if (!--ce->active_count)
- ce->ops->exit(ce);
+ if (--ce->active_count)
+ return;
+
+ intel_gt_pm_put_async(ce->vm->gt);
+ ce->ops->exit(ce);
}
static inline struct intel_context *intel_context_get(struct intel_context *ce)
@@ -268,8 +275,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
-struct i915_request *
-intel_context_find_active_request(struct intel_context *ce);
+struct i915_request *intel_context_get_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cbc8b857d5f7..b58c30ac8ef0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -172,6 +172,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
+#define I915_GEM_HWS_GSC 0x62
+#define I915_GEM_HWS_GSC_ADDR (I915_GEM_HWS_GSC * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
#define I915_HWS_CSB_BUF0_INDEX 0x10
@@ -248,8 +250,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now);
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c33e0d72d670..5c6c9a6d469c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -894,6 +895,24 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
+ /*
+ * The only use of the GSC CS is to load and communicate with the GSC
+ * FW, so we have no use for it if we don't have the FW.
+ *
+ * IMPORTANT: in cases where we don't have the GSC FW, we have a
+ * catch-22 situation that breaks media C6 due to 2 requirements:
+ * 1) once turned on, the GSC power well will not go to sleep unless the
+ * GSC FW is loaded.
+ * 2) to enable idling (which is required for media C6) we need to
+ * initialize the IDLE_MSG register for the GSC CS and do at least 1
+ * submission, which will wake up the GSC power well.
+ */
+ if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(&gt->uc)) {
+ drm_notice(&gt->i915->drm,
+ "No GSC FW selected, disabling GSC CS and media C6\n");
+ info->engine_mask &= ~BIT(GSC0);
+ }
+
return info->engine_mask;
}
@@ -1125,12 +1144,130 @@ err_put:
return ret;
}
+static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
+{
+ static const union intel_engine_tlb_inv_reg gen8_regs[] = {
+ [RENDER_CLASS].reg = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
+ [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
+ };
+ static const union intel_engine_tlb_inv_reg gen12_regs[] = {
+ [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xehp_regs[] = {
+ [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = engine->i915;
+ const unsigned int instance = engine->instance;
+ const unsigned int class = engine->class;
+ const union intel_engine_tlb_inv_reg *regs;
+ union intel_engine_tlb_inv_reg reg;
+ unsigned int num = 0;
+ u32 val;
+
+ /*
+ * New platforms should not be added with catch-all-newer (>=)
+ * condition so that any later platform added triggers the below warning
+ * and in turn mandates a human cross-check of whether the invalidation
+ * flows have compatible semantics.
+ *
+ * For instance with the 11.00 -> 12.00 transition three out of five
+ * respective engine registers were moved to masked type. Then after the
+ * 12.00 -> 12.50 transition multi cast handling is required too.
+ */
+
+ if (engine->gt->type == GT_MEDIA) {
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ regs = xelpmp_regs;
+ num = ARRAY_SIZE(xelpmp_regs);
+ }
+ } else {
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
+ regs = xehp_regs;
+ num = ARRAY_SIZE(xehp_regs);
+ } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return 0;
+ }
+ }
+
+ if (gt_WARN_ONCE(engine->gt, !num,
+ "Platform does not implement TLB invalidation!"))
+ return -ENODEV;
+
+ if (gt_WARN_ON_ONCE(engine->gt,
+ class >= num ||
+ (!regs[class].reg.reg &&
+ !regs[class].mcr_reg.reg)))
+ return -ERANGE;
+
+ reg = regs[class];
+
+ if (regs == xelpmp_regs && class == OTHER_CLASS) {
+ /*
+ * There's only a single GSC instance, but it uses register bit
+ * 1 instead of either 0 or OTHER_GSC_INSTANCE.
+ */
+ GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
+ val = 1;
+ } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
+ reg.reg = GEN8_M2TCR;
+ val = 0;
+ } else {
+ val = instance;
+ }
+
+ val = BIT(val);
+
+ engine->tlb_inv.mcr = regs == xehp_regs;
+ engine->tlb_inv.reg = reg;
+ engine->tlb_inv.done = val;
+
+ if (GRAPHICS_VER(i915) >= 12 &&
+ (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS ||
+ engine->class == OTHER_CLASS))
+ engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ else
+ engine->tlb_inv.request = val;
+
+ return 0;
+}
+
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
init_llist_head(&engine->barrier_tasks);
+ err = intel_engine_init_tlb_invalidation(engine);
+ if (err)
+ return err;
+
err = init_status_page(engine);
if (err)
return err;
@@ -1291,8 +1428,8 @@ create_kernel_context(struct intel_engine_cs *engine)
&kernel, "kernel_context");
}
-/**
- * intel_engines_init_common - initialize cengine state which might require hw access
+/*
+ * engine_init_common - initialize engine state which might require hw access
* @engine: Engine to initialize.
*
* Initializes @engine@ structure members shared between legacy and execlists
@@ -1476,10 +1613,12 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
/*
- * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
+ * Wa_22011802037: Prior to doing a reset, ensure CS is
* stopped, set ring stop bit and prefetch disable bit to halt CS
*/
- if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
@@ -1564,11 +1703,8 @@ static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
};
u32 val;
- if (!_reg[engine->id].reg) {
- drm_err(&engine->i915->drm,
- "MSG IDLE undefined for engine id %u\n", engine->id);
+ if (!_reg[engine->id].reg)
return 0;
- }
val = intel_uncore_read(engine->uncore, _reg[engine->id]);
@@ -1922,13 +2058,13 @@ static const char *repr_timer(const struct timer_list *t)
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
- if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(i915)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
@@ -1949,7 +2085,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
- if (GRAPHICS_VER(dev_priv) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
@@ -1966,15 +2102,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
- else if (GRAPHICS_VER(dev_priv) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -1984,7 +2120,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
+ if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2050,7 +2186,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
rcu_read_unlock();
i915_sched_engine_active_unlock_bh(engine->sched_engine);
- } else if (GRAPHICS_VER(dev_priv) > 6) {
+ } else if (GRAPHICS_VER(i915) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
@@ -2094,17 +2230,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
static unsigned long read_ul(void *p, size_t x)
{
return *(unsigned long *)(p + x);
@@ -2196,11 +2321,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
}
}
-static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
+static void engine_dump_active_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
+ struct intel_context *hung_ce = NULL;
struct i915_request *hung_rq = NULL;
- struct intel_context *ce;
- bool guc;
/*
* No need for an engine->irq_seqno_barrier() before the seqno reads.
@@ -2209,27 +2334,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
* But the intention here is just to report an instantaneous snapshot
* so that's fine.
*/
- lockdep_assert_held(&engine->sched_engine->lock);
+ intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
- guc = intel_uc_uses_guc_submission(&engine->gt->uc);
- if (guc) {
- ce = intel_engine_get_hung_context(engine);
- if (ce)
- hung_rq = intel_context_find_active_request(ce);
- } else {
- hung_rq = intel_engine_execlist_find_hung_request(engine);
- }
-
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung");
+ else if (hung_ce)
+ drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
- if (guc)
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m);
else
- intel_engine_dump_active_requests(&engine->sched_engine->requests,
- hung_rq, m);
+ intel_execlists_dump_active_requests(engine, hung_rq, m);
+
+ if (hung_rq)
+ i915_request_put(hung_rq);
}
void intel_engine_dump(struct intel_engine_cs *engine,
@@ -2239,7 +2359,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
- unsigned long flags;
ktime_t dummy;
if (header) {
@@ -2276,13 +2395,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_count(error));
print_properties(engine, m);
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
engine_dump_active_requests(engine, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
- spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
-
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
@@ -2328,8 +2442,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
return siblings[0]->cops->create_virtual(siblings, count, flags);
}
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
@@ -2381,6 +2494,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
return active;
}
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq)
+{
+ unsigned long flags;
+
+ *ce = intel_engine_get_hung_context(engine);
+ if (*ce) {
+ intel_engine_clear_hung_context(engine);
+
+ *rq = intel_context_get_active_request(*ce);
+ return;
+ }
+
+ /*
+ * Getting here with GuC enabled means it is a forced error capture
+ * with no actual hang. So, no need to attempt the execlist search.
+ */
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ return;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ *rq = engine_execlist_find_hung_request(engine);
+ if (*rq)
+ *rq = i915_request_get_rcu(*rq);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index b0a4a2dbe3ee..ee531a5c142c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -15,6 +15,22 @@
#include "intel_rc6.h"
#include "intel_ring.h"
#include "shmem_utils.h"
+#include "intel_gt_regs.h"
+
+static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (IS_METEORLAKE(i915) && engine->id == GSC0) {
+ intel_uncore_write(engine->gt->uncore,
+ RC_PSMI_CTRL_GSCCS,
+ _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+ /* hysteresis 0xA=5us as recommended in spec*/
+ intel_uncore_write(engine->gt->uncore,
+ PWRCTX_MAXCNT_GSCCS,
+ 0xA);
+ }
+}
static void dbg_poison_ce(struct intel_context *ce)
{
@@ -99,6 +115,15 @@ __queue_and_release_pm(struct i915_request *rq,
ENGINE_TRACE(engine, "parking\n");
/*
+ * Open coded one half of intel_context_enter, which we have to omit
+ * here (see the large comment below) and because the other part must
+ * not be called due constructing directly with __i915_request_create
+ * which increments active count via intel_context_mark_active.
+ */
+ GEM_BUG_ON(rq->context->active_count != 1);
+ __intel_gt_pm_get(engine->gt);
+
+ /*
* We have to serialise all potential retirement paths with our
* submission, as we don't want to underflow either the
* engine->wakeref.counter or our timeline->active_count.
@@ -275,6 +300,8 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
intel_engine_init_heartbeat(engine);
+
+ intel_gsc_idle_msg_enable(engine);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index ee3efd06ee54..6b9d9f837669 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -81,6 +81,7 @@
#define RING_EIR(base) _MMIO((base) + 0xb0)
#define RING_EMR(base) _MMIO((base) + 0xb4)
#define RING_ESR(base) _MMIO((base) + 0xb8)
+#define GEN12_STATE_ACK_DEBUG(base) _MMIO((base) + 0xbc)
#define RING_INSTPM(base) _MMIO((base) + 0xc0)
#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4)
#define ACTHD(base) _MMIO((base) + 0xc8)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 4fd54fb8810f..960291f88fd6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -53,6 +53,8 @@ struct intel_gt;
struct intel_ring;
struct intel_uncore;
struct intel_breadcrumbs;
+struct intel_engine_cs;
+struct i915_perf_group;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
@@ -341,6 +343,18 @@ struct intel_engine_guc_stats {
u64 start_gt_clk;
};
+union intel_engine_tlb_inv_reg {
+ i915_reg_t reg;
+ i915_mcr_reg_t mcr_reg;
+};
+
+struct intel_engine_tlb_inv {
+ bool mcr;
+ union intel_engine_tlb_inv_reg reg;
+ u32 request;
+ u32 done;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -372,6 +386,8 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ struct intel_engine_tlb_inv tlb_inv;
+
/*
* Some w/a require forcewake to be held (which prevents RC6) while
* a particular engine is active. If so, we set fw_domain to which
@@ -603,6 +619,14 @@ struct intel_engine_cs {
} props, defaults;
I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
+
+ /*
+ * The perf group maps to one OA unit which controls one OA buffer. All
+ * reports corresponding to this engine will be reported to this OA
+ * buffer. An engine will map to a single OA unit, but a single OA unit
+ * can generate reports for multiple engines.
+ */
+ struct i915_perf_group *oa_group;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2daffa7c7dfd..750326434677 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* inspecting the queue to see if we need to resumbit.
*/
if (*prev != *execlists->active) { /* elide lite-restores */
+ struct intel_context *prev_ce = NULL, *active_ce = NULL;
+
/*
* Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU
@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and correct overselves later when updating from HW.
*/
if (*prev)
- lrc_runtime_stop((*prev)->context);
+ prev_ce = (*prev)->context;
if (*execlists->active)
- lrc_runtime_start((*execlists->active)->context);
+ active_ce = (*execlists->active)->context;
+ if (prev_ce != active_ce) {
+ if (prev_ce)
+ lrc_runtime_stop(prev_ce);
+ if (active_ce)
+ lrc_runtime_start(active_ce);
+ }
new_timeslice(execlists);
}
@@ -2989,10 +2997,12 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/*
- * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
- if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_engine_wait_for_pending_mi_fw(engine);
engine->execlists.reset_ccid = active_ccid(engine);
@@ -4148,6 +4158,22 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
+
+ drm_printf(m, "\tOn hold?: %zu\n",
+ list_count_nodes(&engine->sched_engine->hold));
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a1aa92c983a5..d2c7d45ea062 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent),
unsigned int max);
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
bool
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 8145851ad23d..3c7f1ed92f5b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,9 +8,11 @@
#include <linux/types.h>
#include <linux/stop_machine.h>
+#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include <drm/intel-gtt.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_ggtt_gmch.h"
@@ -26,13 +28,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-static inline bool suspend_retains_ptes(struct i915_address_space *vm)
-{
- return GRAPHICS_VER(vm->i915) >= 8 &&
- !HAS_LMEM(vm->i915) &&
- vm->is_ggtt;
-}
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -104,23 +99,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
-/*
- * Return the value of the last GGTT pte cast to an u64, if
- * the system is supposed to retain ptes across resume. 0 otherwise.
- */
-static u64 read_last_pte(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *ptep;
-
- if (!suspend_retains_ptes(vm))
- return 0;
-
- GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
- ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
- return readq(ptep);
-}
-
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -184,10 +162,7 @@ retry:
i915_gem_object_unlock(obj);
}
- if (!suspend_retains_ptes(vm))
- vm->clear_range(vm, 0, vm->total);
- else
- i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
+ vm->clear_range(vm, 0, vm->total);
vm->skip_pte_rewrite = save_skip_rewrite;
@@ -196,10 +171,13 @@ retry:
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
+ struct intel_gt *gt;
+
i915_ggtt_suspend_vm(&ggtt->vm);
ggtt->invalidate(ggtt);
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_check_and_clear_faults(gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -225,16 +203,21 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct drm_i915_private *i915 = ggtt->vm.i915;
gen8_ggtt_invalidate(ggtt);
- if (GRAPHICS_VER(i915) >= 12)
- intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
- GEN12_GUC_TLB_INV_CR_INVALIDATE);
- else
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ if (GRAPHICS_VER(i915) >= 12) {
+ struct intel_gt *gt;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_uncore_write_fw(gt->uncore,
+ GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ } else {
+ intel_uncore_write_fw(ggtt->vm.gt->uncore,
+ GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ }
}
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
@@ -287,8 +270,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+ gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+ end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
gen8_set_pte(gte++, pte_encode | addr);
@@ -305,6 +291,27 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -338,9 +345,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+ gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+ end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
@@ -361,27 +371,6 @@ static void nop_clear_range(struct i915_address_space *vm,
{
}
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
/*
@@ -551,8 +540,6 @@ static int init_ggtt(struct i915_ggtt *ggtt)
struct drm_mm_node *entry;
int ret;
- ggtt->pte_lost = true;
-
/*
* GuC requires all resources that we're sharing with it to be placed in
* non-WOPCM memory. If GuC is not present or not in use we still need a
@@ -585,8 +572,12 @@ static int init_ggtt(struct i915_ggtt *ggtt)
* paths, and we trust that 0 will remain reserved. However,
* the only likely reason for failure to insert is a driver
* bug, which we expect to cause other failures...
+ *
+ * Since CPU can perform speculative reads on error capture
+ * (write-combining allows it) add scratch page after error
+ * capture to avoid DMAR errors.
*/
- ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE;
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
drm_mm_insert_node_in_range(&ggtt->vm.mm,
@@ -596,11 +587,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
}
- if (drm_mm_node_allocated(&ggtt->error_capture))
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
+ u64 start = ggtt->error_capture.start;
+ u64 size = ggtt->error_capture.size;
+
+ ggtt->vm.scratch_range(&ggtt->vm, start, size);
drm_dbg(&ggtt->vm.i915->drm,
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
- ggtt->error_capture.start,
- ggtt->error_capture.start + ggtt->error_capture.size);
+ start, start + size);
+ }
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -920,8 +915,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
- return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -953,8 +948,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -979,15 +973,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
- ggtt->invalidate = gen8_ggtt_invalidate;
+ if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
+ ggtt->invalidate = guc_ggtt_invalidate;
+ else
+ ggtt->invalidate = gen8_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
- setup_private_pat(ggtt->vm.gt);
-
return ggtt_probe_common(ggtt, size);
}
@@ -1115,8 +1110,9 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ if (!HAS_FULL_PPGTT(i915))
ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
ggtt->vm.cleanup = gen6_gmch_remove;
@@ -1196,7 +1192,14 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
*/
int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
+
+ for_each_gt(gt, i915, i) {
+ ret = intel_gt_assign_ggtt(gt);
+ if (ret)
+ return ret;
+ }
ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
if (ret)
@@ -1208,35 +1211,25 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return 0;
}
-int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
{
- if (GRAPHICS_VER(i915) < 6)
- return intel_ggtt_gmch_enable_hw(i915);
+ struct i915_ggtt *ggtt;
- return 0;
-}
+ ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt)
+ return ERR_PTR(-ENOMEM);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
-{
- GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+ INIT_LIST_HEAD(&ggtt->gt_list);
- ggtt->invalidate = guc_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
+ return ggtt;
}
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- /* XXX Temporary pardon for error unload */
- if (ggtt->invalidate == gen8_ggtt_invalidate)
- return;
-
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-
- ggtt->invalidate = gen8_ggtt_invalidate;
+ if (GRAPHICS_VER(i915) < 6)
+ return intel_ggtt_gmch_enable_hw(i915);
- ggtt->invalidate(ggtt);
+ return 0;
}
/**
@@ -1253,20 +1246,11 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
- bool retained_ptes;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
- /*
- * First fill our portion of the GTT with scratch pages if
- * they were not retained across suspend.
- */
- retained_ptes = suspend_retains_ptes(vm) &&
- !i915_vm_to_ggtt(vm)->pte_lost &&
- !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
-
- if (!retained_ptes)
- vm->clear_range(vm, 0, vm->total);
+ /* First fill our portion of the GTT with scratch pages */
+ vm->clear_range(vm, 0, vm->total);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
@@ -1275,16 +1259,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes) {
- /*
- * Clear the bound flags of the vma resource to allow
- * ptes to be repopulated.
- */
- vma->resource->bound_flags = 0;
- vma->ops->bind_vma(vm, NULL, vma->resource,
- obj ? obj->cache_level : 0,
- was_bound);
- }
+
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
+ vma->ops->bind_vma(vm, NULL, vma->resource,
+ obj ? obj->cache_level : 0,
+ was_bound);
+
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
@@ -1296,24 +1280,22 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
+ struct intel_gt *gt;
bool flush;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_check_and_clear_faults(gt);
flush = i915_ggtt_resume_vm(&ggtt->vm);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
+ ggtt->error_capture.size);
+
ggtt->invalidate(ggtt);
if (flush)
wbinvd_on_all_cpus();
- if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
- setup_private_pat(ggtt->vm.gt);
-
intel_ggtt_restore_fences(ggtt);
}
-
-void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
-{
- to_gt(i915)->ggtt->pte_lost = val;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 995082d45cb2..37d0b0fe791d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
@@ -220,7 +221,8 @@ static int fence_update(struct i915_fence_reg *fence,
return ret;
}
- fence->start = vma->node.start;
+ GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
+ fence->start = i915_ggtt_offset(vma);
fence->size = vma->fence_size;
fence->stride = i915_gem_object_get_stride(vma->obj);
fence->tiling = i915_gem_object_get_tiling(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 4e2163a1aa46..d6a74ae2527b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -6,7 +6,6 @@
#include "intel_ggtt_gmch.h"
#include <drm/intel-gtt.h>
-#include <drm/i915_drm.h>
#include <linux/agp_backend.h>
@@ -81,7 +80,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -89,8 +88,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+ ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
@@ -104,6 +102,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = gmch_ggtt_insert_page;
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
+ ggtt->vm.scratch_range = gmch_ggtt_clear_range;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index f50ea92910d9..5d143e2a8db0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -21,6 +21,7 @@
#define INSTR_CLIENT_SHIFT 29
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
+#define INSTR_GSC_CLIENT 0x2 /* MTL+ */
#define INSTR_RC_CLIENT 0x3
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
@@ -393,6 +394,7 @@
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+#define MI_DO_COMPARE REG_BIT(21)
#define STATE_BASE_ADDRESS \
((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
@@ -432,6 +434,14 @@
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+#define GSC_INSTR(opcode, data, flags) \
+ (__INSTR(INSTR_GSC_CLIENT) | (opcode) << 22 | (data) << 9 | (flags))
+
+#define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
+#define HECI1_FW_LIMIT_VALID (1 << 31)
+
+#define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6)
+
/*
* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 976fdf27e790..bcc3605158db 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -174,6 +174,14 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
intf->irq = -1;
intf->id = intf_id;
+ /*
+ * On the multi-tile setups the GSC is functional on the first tile only
+ */
+ if (gsc_to_gt(gsc)->info.id != 0) {
+ drm_dbg(&i915->drm, "Not initializing gsc for remote tiles\n");
+ return;
+ }
+
if (intf_id == 0 && !HAS_HECI_PXP(i915))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index fcac1775e9c3..7ab3ca0f9f26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -33,7 +33,7 @@ struct intel_gsc {
} intf[INTEL_GSC_NUM_INTERFACES];
};
-void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915);
void intel_gsc_fini(struct intel_gsc *gsc);
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 767e329e1cc5..7a008e829d4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -8,7 +8,6 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
-#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
@@ -23,12 +22,12 @@
#include "intel_gt_debugfs.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
#include "intel_pci_config.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
@@ -90,9 +89,8 @@ static int intel_gt_probe_lmem(struct intel_gt *gt)
if (err == -ENODEV)
return 0;
- drm_err(&i915->drm,
- "Failed to setup region(%d) type=%d\n",
- err, INTEL_MEMORY_LOCAL);
+ gt_err(gt, "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
return err;
}
@@ -110,9 +108,18 @@ static int intel_gt_probe_lmem(struct intel_gt *gt)
int intel_gt_assign_ggtt(struct intel_gt *gt)
{
- gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
+ /* Media GT shares primary GT's GGTT */
+ if (gt->type == GT_MEDIA) {
+ gt->ggtt = to_gt(gt->i915)->ggtt;
+ } else {
+ gt->ggtt = i915_ggtt_create(gt->i915);
+ if (IS_ERR(gt->ggtt))
+ return PTR_ERR(gt->ggtt);
+ }
+
+ list_add_tail(&gt->ggtt_link, &gt->ggtt->gt_list);
- return gt->ggtt ? 0 : -ENOMEM;
+ return 0;
}
int intel_gt_init_mmio(struct intel_gt *gt)
@@ -192,14 +199,14 @@ int intel_gt_init_hw(struct intel_gt *gt)
ret = i915_ppgtt_init_hw(gt);
if (ret) {
- drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret);
+ gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
goto out;
}
@@ -210,21 +217,6 @@ out:
return ret;
}
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
{
GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
@@ -250,22 +242,22 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
u32 eir;
if (GRAPHICS_VER(i915) != 2)
- clear_register(uncore, PGTBL_ER);
+ intel_uncore_write(uncore, PGTBL_ER, 0);
if (GRAPHICS_VER(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
else
- clear_register(uncore, IPEIR_I965);
+ intel_uncore_write(uncore, IPEIR_I965, 0);
- clear_register(uncore, EIR);
+ intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
+ gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
+ intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
@@ -275,10 +267,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 12) {
- rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine;
@@ -298,16 +290,16 @@ static void gen6_check_faults(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
- drm_dbg(&engine->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
@@ -334,17 +326,17 @@ static void xehp_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- drm_dbg(&gt->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -375,17 +367,17 @@ static void gen8_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -479,7 +471,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Failed to allocate scratch page\n");
+ gt_err(gt, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
@@ -734,8 +726,7 @@ int intel_gt_init(struct intel_gt *gt)
err = intel_gt_init_hwconfig(gt);
if (err)
- drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
- ERR_PTR(err));
+ gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
err = __engines_record_defaults(gt);
if (err)
@@ -745,15 +736,13 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_gt;
- intel_uc_init_late(&gt->uc);
-
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;
- intel_migrate_init(&gt->migrate, gt);
+ intel_uc_init_late(&gt->uc);
- intel_pxp_init(&gt->pxp);
+ intel_migrate_init(&gt->migrate, gt);
goto out_fw;
err_gt:
@@ -794,7 +783,28 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_rps_driver_unregister(&gt->rps);
intel_gsc_fini(&gt->gsc);
- intel_pxp_fini(&gt->pxp);
+ /*
+ * If we unload the driver and wedge before the GSC worker is complete,
+ * the worker will hit an error on its submission to the GSC engine and
+ * then exit. This is hard to hit for a user, but it is reproducible
+ * with skipping selftests. The error is handled gracefully by the
+ * worker, so there are no functional issues, but we still end up with
+ * an error message in dmesg, which is something we want to avoid as
+ * this is a supported scenario. We could modify the worker to better
+ * handle a wedging occurring during its execution, but that gets
+ * complicated for a couple of reasons:
+ * - We do want the error on runtime wedging, because there are
+ * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
+ * only the error on driver unload that we want to silence.
+ * - The worker is responsible for multiple submissions (GSC FW load,
+ * HuC auth, SW proxy), so all of those will have to be adapted to
+ * handle the wedged_on_fini scenario.
+ * Therefore, it's much simpler to just wait for the worker to be done
+ * before wedging on driver removal, also considering that the worker
+ * will likely already be idle in the great majority of non-selftest
+ * scenarios.
+ */
+ intel_gsc_uc_flush_work(&gt->uc.gsc);
/*
* Upon unregistering the device to prevent any new users, cancel
@@ -896,7 +906,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
gt->name = "Primary GT";
gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
- drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ gt_dbg(gt, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
@@ -921,7 +931,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
gt->info.engine_mask = gtdef->engine_mask;
gt->info.id = i;
- drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ gt_dbg(gt, "Setting up %s\n", gt->name);
if (GEM_WARN_ON(range_overflows_t(resource_size_t,
gtdef->mapping_base,
SZ_16M,
@@ -994,36 +1004,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
-struct reg_and_bit {
- union {
- i915_reg_t reg;
- i915_mcr_reg_t mcr_reg;
- };
- u32 bit;
-};
-
-static struct reg_and_bit
-get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
- const i915_reg_t *regs, const unsigned int num)
-{
- const unsigned int class = engine->class;
- struct reg_and_bit rb = { };
-
- if (drm_WARN_ON_ONCE(&engine->i915->drm,
- class >= num || !regs[class].reg))
- return rb;
-
- rb.reg = regs[class];
- if (gen8 && class == VIDEO_DECODE_CLASS)
- rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
- else
- rb.bit = engine->instance;
-
- rb.bit = BIT(rb.bit);
-
- return rb;
-}
-
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -1037,14 +1017,20 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
-static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
+static int wait_for_invalidate(struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
+ if (engine->tlb_inv.mcr)
+ return intel_gt_mcr_wait_for_reg(engine->gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
- return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
+ return __intel_wait_for_register_fw(engine->gt->uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
@@ -1052,78 +1038,35 @@ static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
static void mmio_invalidate_full(struct intel_gt *gt)
{
- static const i915_reg_t gen8_regs[] = {
- [RENDER_CLASS] = GEN8_RTCR,
- [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
- [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
- [COPY_ENGINE_CLASS] = GEN8_BTCR,
- };
- static const i915_reg_t gen12_regs[] = {
- [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
- };
- static const i915_mcr_reg_t xehp_regs[] = {
- [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR,
- };
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
- const i915_reg_t *regs;
- unsigned int num = 0;
+ unsigned long flags;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- regs = NULL;
- num = ARRAY_SIZE(xehp_regs);
- } else if (GRAPHICS_VER(i915) == 12) {
- regs = gen12_regs;
- num = ARRAY_SIZE(gen12_regs);
- } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
- regs = gen8_regs;
- num = ARRAY_SIZE(gen8_regs);
- } else if (GRAPHICS_VER(i915) < 8) {
- return;
- }
-
- if (drm_WARN_ONCE(&i915->drm, !num,
- "Platform does not implement TLB invalidation!"))
+ if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
awake = 0;
for_each_engine(engine, gt, id) {
- struct reg_and_bit rb;
-
if (!intel_engine_pm_is_awake(engine))
continue;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
- xehp_regs[engine->class],
- BIT(engine->instance));
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
- if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS))
- rb.bit = _MASKED_BIT_ENABLE(rb.bit);
-
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
- }
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.request);
+ else
+ intel_uncore_write_fw(uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.request);
+
awake |= engine->mask;
}
@@ -1138,22 +1081,14 @@ static void mmio_invalidate_full(struct intel_gt *gt)
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
- spin_unlock_irq(&uncore->lock);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
- struct reg_and_bit rb;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- rb.mcr_reg = xehp_regs[engine->class];
- rb.bit = BIT(engine->instance);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- }
-
- if (wait_for_invalidate(gt, rb))
- drm_err_ratelimited(&gt->i915->drm,
- "%s TLB invalidation did not complete in %ums!\n",
- engine->name, TLB_INVAL_TIMEOUT_MS);
+ if (wait_for_invalidate(engine))
+ gt_err_ratelimited(gt,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, TLB_INVAL_TIMEOUT_MS);
}
/*
@@ -1198,3 +1133,7 @@ unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index e0365d556248..d2f4fbde5f9f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -39,6 +39,11 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
return container_of(huc, struct intel_gt, uc.huc);
}
+static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
+{
+ return container_of(gsc_uc, struct intel_gt, uc.gsc);
+}
+
static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
{
return container_of(gsc, struct intel_gt, gsc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 2a6a4ca7fdad..7c9be4fd1c8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,6 +7,7 @@
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
static u32 read_reference_ts_freq(struct intel_uncore *uncore)
@@ -193,10 +194,9 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
- dev_err(gt->i915->drm.dev,
- "GT clock frequency changed, was %uHz, now %uHz!\n",
- gt->clock_frequency,
- read_clock_frequency(gt->uncore));
+ gt_err(gt, "GT clock frequency changed, was %uHz, now %uHz!\n",
+ gt->clock_frequency,
+ read_clock_frequency(gt->uncore));
}
}
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index dd53641f3637..4dc23b8d3aa2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -12,7 +12,6 @@
#include "intel_gt_mcr.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_sseu_debugfs.h"
-#include "pxp/intel_pxp_debugfs.h"
#include "uc/intel_uc_debugfs.h"
int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
@@ -84,11 +83,13 @@ static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
void intel_gt_debugfs_register(struct intel_gt *gt)
{
struct dentry *root;
+ char gtname[4];
if (!gt->i915->drm.primary->debugfs_root)
return;
- root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id);
+ root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root);
if (IS_ERR(root))
return;
@@ -99,7 +100,6 @@ void intel_gt_debugfs_register(struct intel_gt *gt)
intel_sseu_debugfs_register(gt, root);
intel_uc_debugfs_register(&gt->uc, root);
- intel_pxp_debugfs_register(&gt->pxp, root);
}
void intel_gt_debugfs_register_files(struct dentry *root,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 6f6b9e04d916..1b25a6039152 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -10,6 +10,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_uncore.h"
#include "intel_rps.h"
@@ -47,9 +48,8 @@ gen11_gt_engine_identity(struct intel_gt *gt,
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
- drm_err(&gt->i915->drm,
- "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
- bank, bit, ident);
+ gt_err(gt, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
return 0;
}
@@ -76,7 +76,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
- return intel_pxp_irq_handler(&gt->pxp, iir);
+ return intel_pxp_irq_handler(gt->i915->pxp, iir);
if (instance == OTHER_GSC_INSTANCE)
return intel_gsc_irq_handler(gt, iir);
@@ -378,8 +378,7 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_CS_MASTER_ERROR_INTERRUPT))
- drm_dbg(&gt->i915->drm, "Command parser error, gt_iir 0x%08x\n",
- gt_iir);
+ gt_dbg(gt, "Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index ea86c1ab5dc5..0b414eae1683 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_gt_mcr.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
/**
@@ -34,7 +35,7 @@
* ignored.
*/
-#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+#define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
@@ -143,6 +144,8 @@ void intel_gt_mcr_init(struct intel_gt *gt)
unsigned long fuse;
int i;
+ spin_lock_init(&gt->mcr_lock);
+
/*
* An mslice is unavailable only if both the meml3 for the slice is
* disabled *and* all of the DSS in the slice (quadrant) are disabled.
@@ -156,14 +159,21 @@ void intel_gt_mcr_init(struct intel_gt *gt)
GEN12_MEML3_EN_MASK);
if (!gt->info.mslice_mask) /* should be impossible! */
- drm_warn(&i915->drm, "mslice mask all zero!\n");
+ gt_warn(gt, "mslice mask all zero!\n");
}
if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
- fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
- intel_uncore_read(gt->uncore, XEHP_FUSE4));
+ /* Wa_14016747170 */
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
+ fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
+ intel_uncore_read(gt->uncore,
+ MTL_GT_ACTIVITY_FACTOR));
+ else
+ fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
+ intel_uncore_read(gt->uncore, XEHP_FUSE4));
/*
* Despite the register field being named "exclude mask" the
@@ -196,7 +206,7 @@ void intel_gt_mcr_init(struct intel_gt *gt)
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
if (!gt->info.l3bank_mask) /* should be impossible! */
- drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
+ gt_warn(gt, "L3 bank mask is all zero!\n");
} else if (GRAPHICS_VER(i915) >= 11) {
/*
* We expect all modern platforms to have at least some
@@ -221,24 +231,26 @@ static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
/*
* rw_with_mcr_steering_fw - Access a register with specific MCR steering
- * @uncore: pointer to struct intel_uncore
+ * @gt: GT to read register from
* @reg: register being accessed
* @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
* @group: group number (documented as "sliceid" on older platforms)
* @instance: instance number (documented as "subsliceid" on older platforms)
* @value: register value to be written (ignored for read)
*
+ * Context: The caller must hold the MCR lock
* Return: 0 for write access. register value for read access.
*
* Caller needs to make sure the relevant forcewake wells are up.
*/
-static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance, u32 value)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
- lockdep_assert_held(&uncore->lock);
+ lockdep_assert_held(&gt->mcr_lock);
if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
/*
@@ -308,12 +320,14 @@ static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
return val;
}
-static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
+static u32 rw_with_mcr_steering(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance,
u32 value)
{
+ struct intel_uncore *uncore = gt->uncore;
enum forcewake_domains fw_domains;
+ unsigned long flags;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
@@ -322,24 +336,98 @@ static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
- spin_lock_irq(&uncore->lock);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
+ val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irq(&uncore->lock);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
return val;
}
/**
+ * intel_gt_mcr_lock - Acquire MCR steering lock
+ * @gt: GT structure
+ * @flags: storage to save IRQ flags to
+ *
+ * Performs locking to protect the steering for the duration of an MCR
+ * operation. On MTL and beyond, a hardware lock will also be taken to
+ * serialize access not only for the driver, but also for external hardware and
+ * firmware agents.
+ *
+ * Context: Takes gt->mcr_lock. uncore->lock should *not* be held when this
+ * function is called, although it may be acquired after this
+ * function call.
+ */
+void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+ __acquires(&gt->mcr_lock)
+{
+ unsigned long __flags;
+ int err = 0;
+
+ lockdep_assert_not_held(&gt->uncore->lock);
+
+ /*
+ * Starting with MTL, we need to coordinate not only with other
+ * driver threads, but also with hardware/firmware agents. A dedicated
+ * locking register is used.
+ */
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ err = wait_for(intel_uncore_read_fw(gt->uncore,
+ MTL_STEER_SEMAPHORE) == 0x1, 100);
+
+ /*
+ * Even on platforms with a hardware lock, we'll continue to grab
+ * a software spinlock too for lockdep purposes. If the hardware lock
+ * was already acquired, there should never be contention on the
+ * software lock.
+ */
+ spin_lock_irqsave(&gt->mcr_lock, __flags);
+
+ *flags = __flags;
+
+ /*
+ * In theory we should never fail to acquire the HW semaphore; this
+ * would indicate some hardware/firmware is misbehaving and not
+ * releasing it properly.
+ */
+ if (err == -ETIMEDOUT) {
+ gt_err_ratelimited(gt, "hardware MCR steering semaphore timed out");
+ add_taint_for_CI(gt->i915, TAINT_WARN); /* CI is now unreliable */
+ }
+}
+
+/**
+ * intel_gt_mcr_unlock - Release MCR steering lock
+ * @gt: GT structure
+ * @flags: IRQ flags to restore
+ *
+ * Releases the lock acquired by intel_gt_mcr_lock().
+ *
+ * Context: Releases gt->mcr_lock
+ */
+void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+ __releases(&gt->mcr_lock)
+{
+ spin_unlock_irqrestore(&gt->mcr_lock, flags);
+
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+}
+
+/**
* intel_gt_mcr_read - read a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to read
* @group: the MCR group
* @instance: the MCR instance
*
+ * Context: Takes and releases gt->mcr_lock
+ *
* Returns the value read from an MCR register after steering toward a specific
* group/instance.
*/
@@ -347,7 +435,7 @@ u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
int group, int instance)
{
- return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
+ return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0);
}
/**
@@ -360,11 +448,13 @@ u32 intel_gt_mcr_read(struct intel_gt *gt,
*
* Write an MCR register in unicast mode after steering toward a specific
* group/instance.
+ *
+ * Context: Calls a function that takes and releases gt->mcr_lock
*/
void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
int group, int instance)
{
- rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
+ rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value);
}
/**
@@ -374,10 +464,16 @@ void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 val
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances.
+ *
+ * Context: Takes and releases gt->mcr_lock
*/
void intel_gt_mcr_multicast_write(struct intel_gt *gt,
i915_mcr_reg_t reg, u32 value)
{
+ unsigned long flags;
+
+ intel_gt_mcr_lock(gt, &flags);
+
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
@@ -386,6 +482,8 @@ void intel_gt_mcr_multicast_write(struct intel_gt *gt,
intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
+
+ intel_gt_mcr_unlock(gt, flags);
}
/**
@@ -398,9 +496,13 @@ void intel_gt_mcr_multicast_write(struct intel_gt *gt,
* function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
* be obtained automatically.
+ *
+ * Context: The caller must hold gt->mcr_lock.
*/
void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
{
+ lockdep_assert_held(&gt->mcr_lock);
+
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
@@ -427,6 +529,8 @@ void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u3
* domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
* be obtained automatically.
*
+ * Context: Calls functions that take and release gt->mcr_lock
+ *
* Returns the old (unmodified) value read.
*/
u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
@@ -457,12 +561,15 @@ static bool reg_needs_read_steering(struct intel_gt *gt,
i915_mcr_reg_t reg,
enum intel_steering_type type)
{
- const u32 offset = i915_mmio_reg_offset(reg);
+ u32 offset = i915_mmio_reg_offset(reg);
const struct intel_mmio_range *entry;
if (likely(!gt->steering_table[type]))
return false;
+ if (IS_GSI_REG(offset))
+ offset += gt->uncore->gsi_offset;
+
for (entry = gt->steering_table[type]; entry->end; entry++) {
if (offset >= entry->start && offset <= entry->end)
return true;
@@ -578,6 +685,8 @@ void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
* domains; use intel_gt_mcr_read_any() in cases where forcewake should be
* obtained automatically.
*
+ * Context: The caller must hold gt->mcr_lock.
+ *
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
@@ -585,10 +694,12 @@ u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
int type;
u8 group, instance;
+ lockdep_assert_held(&gt->mcr_lock);
+
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
- return rw_with_mcr_steering_fw(gt->uncore, reg,
+ return rw_with_mcr_steering_fw(gt, reg,
FW_REG_READ,
group, instance, 0);
}
@@ -605,6 +716,8 @@ u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
*
+ * Context: Calls a function that takes and releases gt->mcr_lock.
+ *
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
@@ -615,7 +728,7 @@ u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
- return rw_with_mcr_steering(gt->uncore, reg,
+ return rw_with_mcr_steering(gt, reg,
FW_REG_READ,
group, instance, 0);
}
@@ -728,6 +841,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
* Note that this routine assumes the caller holds forcewake asserted, it is
* not suitable for very long waits.
*
+ * Context: Calls a function that takes and releases gt->mcr_lock
* Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
@@ -739,7 +853,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
{
int ret;
- lockdep_assert_not_held(&gt->uncore->lock);
+ lockdep_assert_not_held(&gt->mcr_lock);
#define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index ae93b20e1c17..41684495b7da 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -9,6 +9,8 @@
#include "intel_gt_types.h"
void intel_gt_mcr_init(struct intel_gt *gt);
+void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
+void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 16db85fab0b1..e02cb90723ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -14,37 +14,16 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
-#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
-static void mtl_media_busy(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
@@ -92,9 +71,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
- /* Wa_14017073508: mtl */
- mtl_media_busy(gt);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -144,9 +120,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- /* Wa_14017073508: mtl */
- mtl_media_idle(gt);
-
return 0;
}
@@ -275,8 +248,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- i915_probe_error(gt->i915,
- "Failed to initialize GPU, declaring it wedged!\n");
+ gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -293,9 +265,8 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- drm_err(&gt->i915->drm,
- "Failed to restart %s (%d)\n",
- engine->name, err);
+ gt_err(gt, "Failed to restart %s (%d)\n",
+ engine->name, err);
goto err_wedged;
}
}
@@ -304,8 +275,6 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uc_resume(&gt->uc);
- intel_pxp_resume(&gt->pxp);
-
user_forcewake(gt, false);
out_fw:
@@ -339,8 +308,6 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
{
user_forcewake(gt, true);
wait_for_suspend(gt);
-
- intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@@ -365,7 +332,6 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
- intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@@ -393,7 +359,6 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
- intel_pxp_runtime_suspend(&gt->pxp);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -411,8 +376,6 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
- intel_pxp_runtime_resume(&gt->pxp);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 83df4cd5e06c..80dbbef86b1d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data)
}
DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
- perf_limit_reasons_clear, "%llu\n");
+ perf_limit_reasons_clear, "0x%llx\n");
void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
new file mode 100644
index 000000000000..55a336a9ff06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_PRINT__
+#define __INTEL_GT_PRINT__
+
+#include <drm/drm_print.h>
+#include "intel_gt_types.h"
+#include "i915_utils.h"
+
+#define gt_err(_gt, _fmt, ...) \
+ drm_err(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_warn(_gt, _fmt, ...) \
+ drm_warn(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_notice(_gt, _fmt, ...) \
+ drm_notice(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_info(_gt, _fmt, ...) \
+ drm_info(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_dbg(_gt, _fmt, ...) \
+ drm_dbg(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_err_ratelimited(_gt, _fmt, ...) \
+ drm_err_ratelimited(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_notice_ratelimited(_gt, _fmt, ...) \
+ dev_notice_ratelimited((_gt)->i915->drm.dev, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_probe_error(_gt, _fmt, ...) \
+ do { \
+ if (i915_error_injected()) \
+ gt_dbg(_gt, _fmt, ##__VA_ARGS__); \
+ else \
+ gt_err(_gt, _fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define gt_WARN(_gt, _condition, _fmt, ...) \
+ drm_WARN(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
+ drm_WARN_ONCE(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_WARN_ON(_gt, _condition) \
+ gt_WARN(_gt, _condition, "%s", "gt_WARN_ON(" __stringify(_condition) ")")
+
+#define gt_WARN_ON_ONCE(_gt, _condition) \
+ gt_WARN_ONCE(_gt, _condition, "%s", "gt_WARN_ONCE(" __stringify(_condition) ")")
+
+#endif /* __INTEL_GT_PRINT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index c3cd92691795..fd1f9cd35e9d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -9,8 +9,6 @@
#include "i915_reg_defs.h"
#include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */
-#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
-
/*
* The perf control registers are technically multicast registers, but the
* driver never needs to read/write them directly; we only use them to build
@@ -67,6 +65,7 @@
#define GMD_ID_MEDIA _MMIO(MTL_MEDIA_GSI_BASE + 0xd8c)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
+#define MTL_STEER_SEMAPHORE _MMIO(0xfd0)
#define MTL_MCR_SELECTOR _MMIO(0xfd4)
#define SF_MCR_SELECTOR _MMIO(0xfd8)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -406,13 +405,16 @@
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
-#define CHICKEN_RASTER_1 _MMIO(0x6204)
+#define XEHP_CULLBIT1 MCR_REG(0x6100)
+
+#define CHICKEN_RASTER_1 MCR_REG(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
-#define CHICKEN_RASTER_2 _MMIO(0x6208)
+#define CHICKEN_RASTER_2 MCR_REG(0x6208)
#define TBIMR_FAST_CLIP REG_BIT(5)
#define VFLSKPD MCR_REG(0x62a8)
+#define VF_PREFETCH_TLB_DIS REG_BIT(5)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -429,9 +431,10 @@
#define RC_OP_FLUSH_ENABLE (1 << 0)
#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
+#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE REG_BIT(6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE REG_BIT(6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE REG_BIT(1)
#define GEN7_GT_MODE _MMIO(0x7008)
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
@@ -454,9 +457,14 @@
#define HZ_DEPTH_TEST_LE_GE_OPT_DISABLE REG_BIT(13)
#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
+#define XEHP_CULLBIT2 MCR_REG(0x7030)
+
#define GEN8_L3CNTLREG _MMIO(0x7034)
#define GEN8_ERRDETBCTRL (1 << 9)
+#define XEHP_PSS_MODE2 MCR_REG(0x703c)
+#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
@@ -470,6 +478,9 @@
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define COMMON_SLICE_CHICKEN4 _MMIO(0x7300)
+#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
@@ -676,10 +687,7 @@
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
-
-#define GEN8_MISCCPCTL MCR_REG(0x9424)
-#define GEN8_DOP_CLOCK_GATE_ENABLE REG_BIT(0)
+#define GEN7_DOP_CLOCK_GATE_ENABLE REG_BIT(0)
#define GEN12_DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
@@ -762,9 +770,6 @@
#define GEN10_DFR_RATIO_EN_AND_CHICKEN MCR_REG(0x9550)
#define DFR_DISABLE (1 << 9)
-#define INF_UNIT_LEVEL_CLKGATE MCR_REG(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
#define MICRO_BP0_0 _MMIO(0x9800)
#define MICRO_BP0_2 _MMIO(0x9804)
#define MICRO_BP0_1 _MMIO(0x9808)
@@ -917,6 +922,10 @@
#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9)
#define MSG_IDLE_FW_SHIFT 9
+#define RC_PSMI_CTRL_GSCCS _MMIO(0x11a050)
+#define IDLE_MSG_DISABLE REG_BIT(0)
+#define PWRCTX_MAXCNT_GSCCS _MMIO(0x11a054)
+
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
@@ -949,10 +958,11 @@
#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
#define GEN8_GARBCNTL _MMIO(0xb004)
-#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
-#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
-#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
-#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
+#define GEN11_ARBITRATION_PRIO_ORDER_MASK REG_GENMASK(27, 22)
+#define GEN12_BUS_HASH_CTL_BIT_EXC REG_BIT(7)
+#define GEN9_GAPS_TSV_CREDIT_DISABLE REG_BIT(7)
+#define GEN11_HASH_CTRL_EXCL_MASK REG_GENMASK(6, 0)
+#define GEN11_HASH_CTRL_EXCL_BIT0 REG_FIELD_PREP(GEN11_HASH_CTRL_EXCL_MASK, 0x1)
#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
@@ -964,7 +974,8 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
-#define XEHPC_LNCFMISCCFGREG0 _MMIO(0xb01c)
+#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
+#define XEHPC_HOSTCACHEEN REG_BIT(1)
#define XEHPC_OVRLSCCC REG_BIT(0)
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
@@ -1026,7 +1037,7 @@
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
-#define XEHPC_L3SCRUB _MMIO(0xb18c)
+#define XEHPC_L3SCRUB MCR_REG(0xb18c)
#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
@@ -1080,20 +1091,24 @@
#define XEHP_BLT_TLB_INV_CR MCR_REG(0xcee4)
#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
+#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
#define COMP_MOD_CTRL MCR_REG(0xcf30)
-#define VDBX_MOD_CTRL MCR_REG(0xcf34)
-#define VEBX_MOD_CTRL MCR_REG(0xcf38)
+#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
+#define XEHP_VDBX_MOD_CTRL MCR_REG(0xcf34)
+#define XELPMP_VDBX_MOD_CTRL _MMIO(0xcf34)
+#define XEHP_VEBX_MOD_CTRL MCR_REG(0xcf38)
+#define XELPMP_VEBX_MOD_CTRL _MMIO(0xcf38)
#define FORCE_MISS_FTLB REG_BIT(3)
-#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define XEHP_GAMSTLB_CTRL MCR_REG(0xcf4c)
#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
-#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define XEHP_GAMCNTRL_CTRL MCR_REG(0xcf54)
#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
@@ -1129,6 +1144,8 @@
#define ENABLE_SMALLPL REG_BIT(15)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+#define MTL_DISABLE_SAMPLER_SC_OOO REG_BIT(3)
+#define GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
#define GEN9_HALF_SLICE_CHICKEN7 MCR_REG(0xe194)
#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
@@ -1140,7 +1157,13 @@
#define ENABLE_EU_COUNT_FOR_TDL_FLUSH REG_BIT(10)
#define DISABLE_ECC REG_BIT(5)
#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+/*
+ * We have both ENABLE and DISABLE defines below using the same bit because the
+ * meaning depends on the target platform. There are no platform prefix for them
+ * because different steppings of DG2 pick one or the other semantics.
+ */
#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define DISABLE_PREFETCH_INTO_IC REG_BIT(3)
#define EU_PERF_CNTL0 PERF_REG(0xe458)
#define EU_PERF_CNTL4 PERF_REG(0xe45c)
@@ -1155,7 +1178,9 @@
#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
+#define GEN9_ROW_CHICKEN3 MCR_REG(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+#define MTL_DISABLE_FIX_FOR_EOT_FLUSH REG_BIT(9)
#define GEN8_ROW_CHICKEN MCR_REG(0xe4f0)
#define FLOW_CONTROL_ENABLE REG_BIT(15)
@@ -1524,6 +1549,9 @@
#define MTL_MEDIA_MC6 _MMIO(0x138048)
+#define MTL_GT_ACTIVITY_FACTOR _MMIO(0x138010)
+#define MTL_GT_L3_EXC_MASK REG_GENMASK(5, 3)
+
#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 9486dd3bed99..33cba406b569 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
+#include "intel_gt_print.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_gt_types.h"
@@ -71,7 +72,7 @@ static void kobj_gt_release(struct kobject *kobj)
{
}
-static struct kobj_type kobj_gt_type = {
+static const struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
@@ -105,8 +106,7 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
exit_fail:
kobject_put(&gt->sysfs_gt);
- drm_warn(&gt->i915->drm,
- "failed to initialize gt%d sysfs root\n", gt->info.id);
+ gt_warn(gt, "failed to initialize sysfs root\n");
}
void intel_gt_sysfs_unregister(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index cf71305ad586..28f27091cd3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -11,6 +11,7 @@
#include "i915_reg.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
@@ -164,7 +165,6 @@ sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
NULL); \
INTEL_GT_ATTR_RO(_name)
-#ifdef CONFIG_PM
static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
{
intel_wakeref_t wakeref;
@@ -300,14 +300,12 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
- if (!HAS_RC6(gt->i915))
+ if (!IS_ENABLED(CONFIG_PM) || !HAS_RC6(gt->i915))
return;
ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RC6 sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RC6 sysfs files (%pe)\n", ERR_PTR(ret));
/*
* cannot use the is_visible() attribute because
@@ -316,24 +314,15 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
if (HAS_RC6p(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RC6p sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RC6p sysfs files (%pe)\n", ERR_PTR(ret));
}
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create media %u RC6 sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create media RC6 sysfs files (%pe)\n", ERR_PTR(ret));
}
}
-#else
-static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
-{
-}
-#endif /* CONFIG_PM */
static u32 __act_freq_mhz_show(struct intel_gt *gt)
{
@@ -745,9 +734,7 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
ret = intel_sysfs_rps_init(gt, kobj);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RPS sysfs files (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RPS sysfs files (%pe)", ERR_PTR(ret));
/* end of the legacy interfaces */
if (!is_object_gt(kobj))
@@ -755,29 +742,22 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
ret = sysfs_create_file(kobj, &attr_punit_req_freq_mhz.attr);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create throttle sysfs files (%pe)", ERR_PTR(ret));
}
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create media_perf_power_attrs sysfs (%pe)\n",
+ ERR_PTR(ret));
}
ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to add gt%u rps defaults (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to add rps defaults (%pe)\n", ERR_PTR(ret));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index c1d9cd255e06..f08c2556aa25 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -30,7 +30,6 @@
#include "intel_rps_types.h"
#include "intel_migrate_types.h"
#include "intel_wakeref.h"
-#include "pxp/intel_pxp_types.h"
#include "intel_wopcm.h"
struct drm_i915_private;
@@ -233,6 +232,14 @@ struct intel_gt {
u8 instanceid;
} default_steering;
+ /**
+ * @mcr_lock: Protects the MCR steering register
+ *
+ * Protects the MCR steering register (e.g., GEN8_MCR_SELECTOR).
+ * Should be taken before uncore->lock in cases where both are desired.
+ */
+ spinlock_t mcr_lock;
+
/*
* Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
*/
@@ -267,8 +274,6 @@ struct intel_gt {
u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
} mocs;
- struct intel_pxp pxp;
-
/* gt/gtN sysfs */
struct kobject sysfs_gt;
@@ -277,6 +282,9 @@ struct intel_gt {
struct kobject *sysfs_defaults;
struct i915_perf_gt perf;
+
+ /** link: &ggtt.gt_list */
+ struct list_head ggtt_link;
};
struct intel_gt_definition {
@@ -296,12 +304,6 @@ enum intel_gt_scratch_field {
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
-
- /* 6 * 8 bytes */
- INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
-
- /* 4 bytes */
- INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
};
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 2ba3983984b9..4f436ba7a3c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
@@ -461,9 +462,9 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
+ gt_WARN_ON_ONCE(gt, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
}
}
@@ -482,14 +483,25 @@ static void tgl_setup_private_ppat(struct intel_uncore *uncore)
static void xehp_setup_private_ppat(struct intel_gt *gt)
{
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
+ enum forcewake_domains fw;
+ unsigned long flags;
+
+ fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg),
+ FW_REG_WRITE);
+ intel_uncore_forcewake_get(gt->uncore, fw);
+
+ intel_gt_mcr_lock(gt, &flags);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
+ intel_gt_mcr_unlock(gt, flags);
+
+ intel_uncore_forcewake_put(gt->uncore, fw);
}
static void icl_setup_private_ppat(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 4d75ba4bb41d..69ce55f517f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -298,6 +298,8 @@ struct i915_address_space {
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
u64 start, u64 length);
+ void (*scratch_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -355,19 +357,6 @@ struct i915_ggtt {
bool do_idle_maps;
- /**
- * @pte_lost: Are ptes lost on resume?
- *
- * Whether the system was recently restored from hibernate and
- * thus may have lost pte content.
- */
- bool pte_lost;
-
- /**
- * @probed_pte: Probed pte value on suspend. Re-checked on resume.
- */
- u64 probed_pte;
-
int mtrr;
/** Bit 6 swizzling required for X tiling */
@@ -390,6 +379,9 @@ struct i915_ggtt {
struct mutex error_mutex;
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
+
+ /** List of GTs mapping this GGTT */
+ struct list_head gt_list;
};
struct i915_ppgtt {
@@ -579,11 +571,10 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *i915);
void i915_ggtt_driver_release(struct drm_i915_private *i915);
void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
+struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915);
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
{
@@ -600,17 +591,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-/**
- * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
- * @i915 The device private.
- * @val whether the ptes should be marked as lost.
- *
- * In some cases pte content is retained across suspend, but typically lost
- * across hibernate. Typically they should be marked as lost on
- * hibernation restore and such marking cleared on suspend.
- */
-void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7771a19008c6..81a96c52a92b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -288,39 +288,6 @@ static const u8 dg2_xcs_offsets[] = {
END
};
-static const u8 mtl_xcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
- NOP(4),
-
- NOP(1),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- END
-};
-
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@@ -739,9 +706,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
- return mtl_xcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
@@ -1351,16 +1316,16 @@ static u32 *
dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG);
+ *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG(ce->engine->mmio_base));
*cs++ = 0x21;
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
- *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT1);
+ *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT1);
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
- *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT2);
+ *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT2);
return cs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 5fb74e71f27b..3f638f198796 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -352,6 +352,8 @@ static int max_pte_pkt_size(struct i915_request *rq, int pkt)
return pkt;
}
+#define I915_EMIT_PTE_NUM_DWORDS 6
+
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
enum i915_cache_level cache_level,
@@ -393,7 +395,7 @@ static int emit_pte(struct i915_request *rq,
offset += (u64)rq->engine->instance << 32;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -416,7 +418,7 @@ static int emit_pte(struct i915_request *rq,
intel_ring_advance(rq, cs);
intel_ring_update_space(ring);
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 49fdd509527a..69b489e8dfed 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -613,14 +613,17 @@ static u32 l3cc_combine(u16 low, u16 high)
static void init_l3cc_table(struct intel_gt *gt,
const struct drm_i915_mocs_table *table)
{
+ unsigned long flags;
unsigned int i;
u32 l3cc;
+ intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
+ intel_gt_mcr_unlock(gt, flags);
}
void intel_mocs_init_engine(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 2ee4051e4d96..8f3cd68d14f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -301,7 +301,7 @@ static int chv_rc6_init(struct intel_rc6 *rc6)
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
- paddr = i915->dsm.end + 1 - pctx_size;
+ paddr = i915->dsm.stolen.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -325,7 +325,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
/* BIOS set it up already, grab the pre-alloc'd space */
resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.stolen.start;
pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
pcbr_offset,
pctx_size,
@@ -354,10 +354,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
}
GEM_BUG_ON(range_overflows_end_t(u64,
- i915->dsm.start,
+ i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
- pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ pctx_paddr = i915->dsm.stolen.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
out:
@@ -420,6 +420,21 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
}
+bool intel_check_bios_c6_setup(struct intel_rc6 *rc6)
+{
+ if (!rc6->bios_state_captured) {
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ rc6->bios_rc_state = intel_uncore_read(uncore, GEN6_RC_STATE);
+
+ rc6->bios_state_captured = true;
+ }
+
+ return rc6->bios_rc_state & RC_SW_TARGET_STATE_MASK;
+}
+
static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
@@ -448,8 +463,8 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
*/
rc6_ctx_base =
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
- rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ if (!(rc6_ctx_base >= i915->dsm.reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm.reserved.end)) {
drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -486,6 +501,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
@@ -502,6 +518,20 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
}
+ if (IS_METEORLAKE(gt->i915) &&
+ !intel_check_bios_c6_setup(rc6)) {
+ drm_notice(&i915->drm,
+ "C6 disabled by BIOS\n");
+ return false;
+ }
+
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA) {
+ drm_notice(&i915->drm,
+ "Media RC6 disabled on A step\n");
+ return false;
+ }
+
return true;
}
@@ -699,9 +729,14 @@ void intel_rc6_disable(struct intel_rc6 *rc6)
void intel_rc6_fini(struct intel_rc6 *rc6)
{
struct drm_i915_gem_object *pctx;
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
intel_rc6_disable(rc6);
+ /* We want the BIOS C6 state preserved across loads for MTL */
+ if (IS_METEORLAKE(rc6_to_i915(rc6)) && rc6->bios_state_captured)
+ set(uncore, GEN6_RC_STATE, rc6->bios_rc_state);
+
pctx = fetch_and_zero(&rc6->pctx);
if (pctx)
i915_gem_object_put(pctx);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 456fa668a276..e137c2c397c2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -27,4 +27,6 @@ u64 intel_rc6_residency_us(struct intel_rc6 *rc6, enum intel_rc6_res_type id);
void intel_rc6_print_residency(struct seq_file *m, const char *title,
enum intel_rc6_res_type id);
+bool intel_check_bios_c6_setup(struct intel_rc6 *rc6);
+
#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index fa23c4dce00b..cd4587098162 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -29,6 +29,7 @@ struct intel_rc6 {
u64 cur_residency[INTEL_RC6_RES_MAX];
u32 ctl_enable;
+ u32 bios_rc_state;
struct drm_i915_gem_object *pctx;
@@ -36,6 +37,7 @@ struct intel_rc6 {
bool enabled : 1;
bool manual : 1;
bool wakeref : 1;
+ bool bios_state_captured : 1;
};
#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f3ad93db0b21..2a3217e2890f 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -54,6 +54,7 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
struct resource *root_res;
resource_size_t rebar_size;
resource_size_t current_size;
+ intel_wakeref_t wakeref;
u32 pci_cmd;
int i;
@@ -102,15 +103,25 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
return;
}
- /* First disable PCI memory decoding references */
- pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
- pci_write_config_dword(pdev, PCI_COMMAND,
- pci_cmd & ~PCI_COMMAND_MEMORY);
+ /*
+ * Releasing forcewake during BAR resizing results in later forcewake
+ * ack timeouts and former can happen any time - it is asynchronous.
+ * Grabbing all forcewakes prevents it.
+ */
+ with_intel_runtime_pm(i915->uncore.rpm, wakeref) {
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
+ /* First disable PCI memory decoding references */
+ pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_dword(pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_MEMORY);
- pci_assign_unassigned_bus_resources(pdev->bus);
- pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
+ _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ }
}
#else
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
@@ -158,7 +169,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1(uncore->i915))
return false;
*start = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 9c1ae070ee7b..4b56ec3743cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -63,7 +63,7 @@ static int render_state_setup(struct intel_renderstate *so,
u32 s = rodata->batch[i];
if (i * 4 == rodata->reloc[reloc_index]) {
- u64 r = s + so->vma->node.start;
+ u64 r = s + i915_vma_offset(so->vma);
s = lower_32_bits(r);
if (HAS_64BIT_RELOC(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 24736ebee17c..797ea8340467 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -14,6 +14,8 @@
#include "gt/intel_gt_regs.h"
+#include "gt/uc/intel_gsc_fw.h"
+
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gpu_error.h"
@@ -35,16 +37,6 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
-static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw_fw(uncore, reg, 0, set);
-}
-
-static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw_fw(uncore, reg, clr, 0);
-}
-
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -212,7 +204,7 @@ static int g4x_do_reset(struct intel_gt *gt,
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
@@ -234,7 +226,7 @@ static int g4x_do_reset(struct intel_gt *gt,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
@@ -278,25 +270,52 @@ out:
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
struct intel_uncore *uncore = gt->uncore;
+ int loops;
int err;
/*
+ * On some platforms, e.g. Jasperlake, we see that the engine register
+ * state is not cleared until shortly after GDRST reports completion,
+ * causing a failure as we try to immediately resume while the internal
+ * state is still in flux. If we immediately repeat the reset, the
+ * second reset appears to serialise with the first, and since it is a
+ * no-op, the registers should retain their reset value. However, there
+ * is still a concern that upon leaving the second reset, the internal
+ * engine state is still in flux and not ready for resuming.
+ *
+ * Starting on MTL, there are some prep steps that we need to do when
+ * resetting some engines that need to be applied every time we write to
+ * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
+ * to perform that twice, so, since the Jasperlake issue hasn't been
+ * observed on MTL, we avoid repeating the reset on newer platforms.
+ */
+ loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
+
+ /*
* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
- intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
-
- /* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(uncore,
- GEN6_GDRST, hw_domain_mask, 0,
- 500, 0,
- NULL);
+ do {
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+
+ /* Wait for the device to ack the reset requests. */
+ err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+ hw_domain_mask, 0,
+ 2000, 0,
+ NULL);
+ } while (err == 0 && --loops);
if (err)
GT_TRACE(gt,
"Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
+ /*
+ * As we have observed that the engine state is still volatile
+ * after GDRST is acked, impose a small delay to let everything settle.
+ */
+ udelay(50);
+
return err;
}
@@ -448,7 +467,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
* to reset it as well (we will unlock it once the reset sequence is
* completed).
*/
- rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+ intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore,
sfc_lock.ack_reg,
@@ -498,7 +517,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
get_sfc_forced_lock_data(engine, &sfc_lock);
- rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+ intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
}
static int __gen11_reset_engines(struct intel_gt *gt,
@@ -678,6 +697,74 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
return NULL;
}
+static int __reset_guc(struct intel_gt *gt)
+{
+ u32 guc_domain =
+ GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+
+ return gen6_hw_domain_reset(gt, guc_domain);
+}
+
+static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+{
+ if (!IS_METEORLAKE(gt->i915) || !HAS_ENGINE(gt, GSC0))
+ return false;
+
+ if (!__HAS_ENGINE(engine_mask, GSC0))
+ return false;
+
+ return intel_gsc_uc_fw_init_done(&gt->uc.gsc);
+}
+
+static intel_engine_mask_t
+wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
+{
+ if (!needs_wa_14015076503(gt, engine_mask))
+ return engine_mask;
+
+ /*
+ * wa_14015076503: if the GSC FW is loaded, we need to alert it that
+ * we're going to do a GSC engine reset and then wait for 200ms for the
+ * FW to get ready for it. However, if this is the first ALL_ENGINES
+ * reset attempt and the GSC is not busy, we can try to instead reset
+ * the GuC and all the other engines individually to avoid the 200ms
+ * wait.
+ * Skipping the GSC engine is safe because, differently from other
+ * engines, the GSCCS only role is to forward the commands to the GSC
+ * FW, so it doesn't have any HW outside of the CS itself and therefore
+ * it has no state that we don't explicitly re-init on resume or on
+ * context switch LRC or power context). The HW for the GSC uC is
+ * managed by the GSC FW so we don't need to care about that.
+ */
+ if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
+ __reset_guc(gt);
+ engine_mask = gt->info.engine_mask & ~BIT(GSC0);
+ } else {
+ intel_uncore_rmw(gt->uncore,
+ HECI_H_GS1(MTL_GSC_HECI2_BASE),
+ 0, HECI_H_GS1_ER_PREP);
+
+ /* make sure the reset bit is clear when writing the CSR reg */
+ intel_uncore_rmw(gt->uncore,
+ HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ HECI_H_CSR_RST, HECI_H_CSR_IG);
+ msleep(200);
+ }
+
+ return engine_mask;
+}
+
+static void
+wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+{
+ if (!needs_wa_14015076503(gt, engine_mask))
+ return;
+
+ intel_uncore_rmw(gt->uncore,
+ HECI_H_GS1(MTL_GSC_HECI2_BASE),
+ HECI_H_GS1_ER_PREP, 0);
+}
+
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
@@ -695,10 +782,16 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
+ intel_engine_mask_t reset_mask;
+
+ reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
+
+ GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
preempt_disable();
- ret = reset(gt, engine_mask, retry);
+ ret = reset(gt, reset_mask, retry);
preempt_enable();
+
+ wa_14015076503_end(gt, reset_mask);
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
@@ -723,14 +816,12 @@ bool intel_has_reset_engine(const struct intel_gt *gt)
int intel_reset_guc(struct intel_gt *gt)
{
- u32 guc_domain =
- GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
int ret;
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(gt, guc_domain);
+ ret = __reset_guc(gt);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 9312b29f5a97..80351f0a856c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -51,7 +51,7 @@ struct intel_reset {
/**
* Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
+ * that wait for i915->mm.wedged to settle.
*/
wait_queue_head_t queue;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 15ec64d881c4..fb99143be98e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -53,7 +53,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (unlikely(ret))
goto err_unpin;
- if (i915_vma_is_map_and_fenceable(vma)) {
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
} else {
int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
@@ -98,7 +98,7 @@ void intel_ring_unpin(struct intel_ring *ring)
return;
i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
i915_vma_unpin_iomap(vma);
else
i915_gem_object_unpin_map(vma->obj);
@@ -116,7 +116,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_PM_VOLATILE);
- if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
+ if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 356c787e11d3..3fd795c3263f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -897,7 +897,7 @@ static int clear_residuals(struct i915_request *rq)
}
ret = engine->emit_bb_start(rq,
- engine->wa_ctx.vma->node.start, 0,
+ i915_vma_offset(engine->wa_ctx.vma), 0,
0);
if (ret)
return ret;
@@ -1052,9 +1052,9 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
static void ring_release(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
+ drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 9ad3bc7201cb..b2671ac59dc0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
@@ -1676,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1685,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- i915->mem_freq = 800;
- break;
- case 2:
- i915->mem_freq = 1066;
- break;
- case 3:
- i915->mem_freq = 1333;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1726,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps)
static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1735,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_cck_read(i915, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- i915->mem_freq = 2000;
- break;
- default:
- i915->mem_freq = 1600;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -2074,16 +2046,6 @@ void intel_rps_sanitize(struct intel_rps *rps)
rps_disable_interrupts(rps);
}
-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps)
-{
- struct drm_i915_private *i915 = rps_to_i915(rps);
- i915_reg_t rpstat;
-
- rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
-
- return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat);
-}
-
u32 intel_rps_read_rpstat(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
@@ -2094,7 +2056,7 @@ u32 intel_rps_read_rpstat(struct intel_rps *rps)
return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
}
-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 cagf;
@@ -2117,10 +2079,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
return cagf;
}
-static u32 read_cagf(struct intel_rps *rps)
+static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ i915_reg_t r = INVALID_MMIO_REG;
u32 freq;
/*
@@ -2128,22 +2091,30 @@ static u32 read_cagf(struct intel_rps *rps)
* registers will return 0 freq when GT is in RC6
*/
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
- freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
+ r = MTL_MIRROR_TARGET_WP1;
} else if (GRAPHICS_VER(i915) >= 12) {
- freq = intel_uncore_read(uncore, GEN12_RPSTAT1);
+ r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_punit_get(i915);
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
} else if (GRAPHICS_VER(i915) >= 6) {
- freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ r = GEN6_RPSTAT1;
} else {
- freq = intel_uncore_read(uncore, MEMSTAT_ILK);
+ r = MEMSTAT_ILK;
}
+ if (i915_mmio_reg_valid(r))
+ freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
+
return intel_rps_get_cagf(rps, freq);
}
+static u32 read_cagf(struct intel_rps *rps)
+{
+ return __read_cagf(rps, true);
+}
+
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
{
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
@@ -2156,7 +2127,12 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
return freq;
}
-u32 intel_rps_read_punit_req(struct intel_rps *rps)
+u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
+{
+ return intel_gpu_freq(rps, __read_cagf(rps, false));
+}
+
+static u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
@@ -2670,7 +2646,7 @@ bool rps_read_mask_mmio(struct intel_rps *rps,
static struct drm_i915_private __rcu *ips_mchdev;
-/**
+/*
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
*
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9e1cad9ba0e9..a3fa987aa91f 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -12,6 +12,9 @@
struct i915_request;
struct drm_printer;
+#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
+
void intel_rps_init_early(struct intel_rps *rps);
void intel_rps_init(struct intel_rps *rps);
void intel_rps_sanitize(struct intel_rps *rps);
@@ -34,8 +37,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val);
-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
+u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
@@ -46,10 +49,8 @@ int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
-u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
u32 intel_rps_read_rpstat(struct intel_rps *rps);
-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps);
void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
void intel_rps_raise_unslice(struct intel_rps *rps);
void intel_rps_lower_unslice(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 9173ec75f2b8..6507fa3f6d1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
+ * i915->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6c6198a257ac..1141f875f5bd 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -6,6 +6,7 @@
#include <linux/string_helpers.h>
#include "i915_drv.h"
+#include "i915_perf_types.h"
#include "intel_engine_regs.h"
#include "intel_gt_regs.h"
#include "intel_sseu.h"
@@ -677,7 +678,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
* If i915/perf is active, we want a stable powergating configuration
* on the system. Use the configuration pinned by i915/perf.
*/
- if (gt->perf.exclusive_stream)
+ if (gt->perf.group && gt->perf.group[PERF_GROUP_OAG].exclusive_stream)
req_sseu = &gt->perf.sseu;
slices = hweight8(req_sseu->slice_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index aa87d3832d60..d7e8c374f153 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -27,7 +27,7 @@ struct drm_printer;
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
* I915_MAX_SS_FUSE_BITS value below).
*/
-#define GEN_MAX_SS_PER_HSW_SLICE 6
+#define GEN_MAX_SS_PER_HSW_SLICE 8
/*
* Maximum number of 32-bit registers used by hardware to express the
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 2afb4f80a954..b925ef47304b 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -30,6 +30,9 @@
* creation to have a "primed golden context", i.e. a context image that
* already contains the changes needed to all the registers.
*
+ * Context workarounds should be implemented in the \*_ctx_workarounds_init()
+ * variants respective to the targeted platforms.
+ *
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
* common power domain and they are reset together. This happens on some
@@ -42,15 +45,28 @@
* saves/restores their values before/after the reset takes place. See
* ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
*
+ * Workarounds for registers specific to RCS and CCS should be implemented in
+ * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
+ * registers belonging to BCS, VCS or VECS should be implemented in
+ * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
+ * engine's MMIO range but that are part of of the common RCS/CCS reset domain
+ * should be implemented in general_render_compute_wa_init().
+ *
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
*
+ * GT workarounds should be implemented in the \*_gt_workarounds_init()
+ * variants respective to the targeted platforms.
+ *
* - Register whitelist: some workarounds need to be implemented in userspace,
* but need to touch privileged registers. The whitelist in the kernel
* instructs the hardware to allow the access to happen. From the kernel side,
* this is just a special case of a MMIO workaround (as we write the list of
* these to/be-whitelisted registers to some special HW registers).
*
+ * Register whitelisting should be done in the \*_whitelist_build() variants
+ * respective to the targeted platforms.
+ *
* - Workaround batchbuffers: buffers that get executed automatically by the
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
@@ -225,6 +241,12 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
+wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
+{
+ wa_mcr_write_clr_set(wal, reg, ~0, set);
+}
+
+static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
@@ -645,7 +667,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
+ wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_mcr_add(wal,
@@ -721,9 +743,13 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_224,
0, false);
- if (!IS_DG1(i915))
+ if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
+
+ /* Wa_1606376872 */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
+ }
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -771,11 +797,45 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_14014947963:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
- IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+ IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
+ /* Wa_18018764978:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
+ IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+ wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+
/* Wa_15010599737:dg2 */
- wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+ wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+
+ /* Wa_18019271663:dg2 */
+ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+}
+
+static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_14014947963 */
+ wa_masked_field_set(wal, VF_PREEMPTION,
+ PREEMPTION_VERTEX_COUNT, 0x4000);
+
+ /* Wa_16013271637 */
+ wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_18019627453 */
+ wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
+
+ /* Wa_18018764978 */
+ wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+ }
+
+ /* Wa_18019271663 */
+ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
@@ -864,7 +924,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_PONTEVECCHIO(i915))
+ if (IS_METEORLAKE(i915))
+ mtl_ctx_workarounds_init(engine, wal);
+ else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
@@ -1347,6 +1409,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
@@ -1405,54 +1474,17 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gen12_gt_workarounds_init(gt, wal);
-
- /* Wa_1409420604:tgl */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
-
- /* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
- /* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
-}
-
-static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
-
gen12_gt_workarounds_init(gt, wal);
- /* Wa_1607087056:dg1 */
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
/* Wa_1409420604:dg1 */
- if (IS_DG1(i915))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
+ wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
- if (IS_DG1(i915))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1465,6 +1497,12 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1409757795:xehpsdv */
wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
/* Wa_16011155590:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
@@ -1507,6 +1545,16 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011060649:xehpsdv */
wa_14011060649(gt, wal);
+
+ /* Wa_14012362059:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_14014368820:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
+ INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010670810:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1547,6 +1595,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
DSS_ROUTER_CLKGATE_DIS);
}
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012362059:dg2 */
+ wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
/* Wa_14010948348:dg2_g10 */
wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
@@ -1592,20 +1646,32 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011028019:dg2_g10 */
wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+
+ /* Wa_14010680813:dg2_g10 */
+ wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
+ CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS |
+ TAG_BLOCK_CLKGATE_DIS);
}
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
-
/* Wa_14015795083 */
- wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_1509235366:dg2 */
+ wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
+ INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010648519:dg2 */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1614,13 +1680,30 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
pvc_init_mcr(gt, wal);
/* Wa_14015795083 */
- wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16016694945 */
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* FIXME: Actual workarounds will be added in future patch(es) */
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_14014830051 */
+ wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ }
/*
* Unlike older platforms, we no longer setup implicit steering here;
@@ -1632,16 +1715,53 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* FIXME: Actual workarounds will be added in future patch(es) */
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_18018781329
+ *
+ * Note that although these registers are MCR on the primary
+ * GT, the media GT's versions are regular singleton registers.
+ */
+ wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+ }
debug_dump_steering(gt);
}
+/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them through the
+ * workaround infrastructure to make sure they're (re)applied at the proper
+ * times.
+ *
+ * The programming in this function is for settings that persist through
+ * engine resets and also are not part of any engine's register state context.
+ * I.e., settings that only need to be re-applied in the event of a full GT
+ * reset.
+ */
+static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ wa_mcr_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
+ }
+
+ if (IS_DG2(gt->i915)) {
+ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+}
+
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ gt_tuning_settings(gt, wal);
+
if (gt->type == GT_MEDIA) {
if (MEDIA_VER(i915) >= 13)
xelpmp_gt_workarounds_init(gt, wal);
@@ -1661,8 +1781,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
- else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
@@ -1752,7 +1870,8 @@ static void wa_list_apply(const struct i915_wa_list *wal)
fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&uncore->lock, flags);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
@@ -1781,7 +1900,8 @@ static void wa_list_apply(const struct i915_wa_list *wal)
}
intel_uncore_forcewake_put__locked(uncore, fw);
- spin_unlock_irqrestore(&uncore->lock, flags);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
}
void intel_gt_apply_workarounds(struct intel_gt *gt)
@@ -1802,7 +1922,8 @@ static bool wa_list_verify(struct intel_gt *gt,
fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&uncore->lock, flags);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
@@ -1812,7 +1933,8 @@ static bool wa_list_verify(struct intel_gt *gt,
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
- spin_unlock_irqrestore(&uncore->lock, flags);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
return ok;
}
@@ -2065,37 +2187,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
+
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
+
break;
default:
break;
}
}
-static void dg1_whitelist_build(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- tgl_whitelist_build(engine);
-
- /* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
- (engine->class == RENDER_CLASS ||
- engine->class == COPY_ENGINE_CLASS))
- whitelist_reg_ext(w, RING_ID(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
-}
-
-static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
-{
- allow_read_ctx_timestamp(engine);
-}
-
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
- allow_read_ctx_timestamp(engine);
-
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -2112,6 +2217,9 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
break;
case COMPUTE_CLASS:
/* Wa_16011157294:dg2_g10 */
@@ -2143,12 +2251,25 @@ static void blacklist_trtt(struct intel_engine_cs *engine)
static void pvc_whitelist_build(struct intel_engine_cs *engine)
{
- allow_read_ctx_timestamp(engine);
-
/* Wa_16014440446:pvc */
blacklist_trtt(engine);
}
+static void mtl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2156,14 +2277,14 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, engine->gt, "whitelist", engine->name);
- if (IS_PONTEVECCHIO(i915))
+ if (IS_METEORLAKE(i915))
+ mtl_whitelist_build(engine);
+ else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
- xehpsdv_whitelist_build(engine);
- else if (IS_DG1(i915))
- dg1_whitelist_build(engine);
+ ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2266,24 +2387,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG2(i915)) {
- /* Wa_1509235366:dg2 */
- wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
- GLOBAL_INVALIDATION_MODE);
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_22014600077 */
+ wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
+ ENABLE_EU_COUNT_FOR_TDL_FLUSH);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
- /* Wa_14013392000:dg2_g11 */
- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
- }
-
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
- /* Wa_1509727124:dg2 */
+ /* Wa_1509727124 */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
SC_DISABLE_POWER_OPTIMIZATION_EBB);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
+ IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
+ /* Wa_22012856258 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14013392000:dg2_g11 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+ }
+
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
@@ -2291,21 +2423,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_G11(i915)) {
- /*
- * Wa_22012826095:dg2
- * Wa_22013059131:dg2
- */
- wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
- MAXREQS_PER_BANK,
- REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
-
- /* Wa_22013059131:dg2 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
- FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
- }
-
/* Wa_1308578152:dg2_g10 when first gslice is fused off */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
needs_wa_1308578152(engine)) {
@@ -2315,14 +2432,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
- /* Wa_22013037850:dg2 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
- DISABLE_128B_EVICTION_COMMAND_UDW);
-
- /* Wa_22012856258:dg2 */
- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
- GEN12_DISABLE_READ_SUPPRESSION);
-
/*
* Wa_22010960976:dg2
* Wa_14013347512:dg2
@@ -2338,28 +2447,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
-
- /*
- * Wa_14010918519:dg2_g10
- *
- * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
- * so ignoring verification.
- */
- wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
- FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
- 0, false);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
/* Wa_22010430635:dg2 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010648519:dg2 */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2371,18 +2466,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
- /* Wa_14010680813:dg2_g10 */
- wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
- EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
- }
-
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
- /* Wa_14012362059:dg2 */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
- }
-
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
IS_DG2_G10(i915)) {
/* Wa_22014600077:dg2 */
@@ -2392,27 +2475,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
true);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1607138336:tgl[a0],dg1[a0]
- * Wa_1607063988:tgl[a0],dg1[a0]
- */
- wa_write_or(wal,
- GEN9_CTX_PREEMPT_REG,
- GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- }
-
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1606679103:tgl
- * (see also Wa_1606682166:icl)
- */
- wa_write_or(wal,
- GEN7_SARCHKMD,
- GEN7_DISABLE_SAMPLER_PREFETCH);
- }
-
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
@@ -2442,30 +2504,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
+ /* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
- */
+ /* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
- * Wa_1607030317:tgl
- * Wa_1607186500:tgl
- * Wa_1607297627:tgl,rkl,dg1[a0],adlp
+ * Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
- * For DG1 this only applies to A0.
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
@@ -2532,13 +2586,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_ENABLE_32_PLANE_MODE);
/*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
@@ -2892,30 +2939,9 @@ static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
- if (IS_PONTEVECCHIO(i915)) {
- wa_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- }
-
- if (IS_DG2(i915)) {
- wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ if (IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
- /*
- * This is also listed as Wa_22012654132 for certain DG2
- * steppings, but the tuning setting programming is a superset
- * since it applies to all DG2 variants and steppings.
- *
- * Note that register 0xE420 is write-only and cannot be read
- * back for verification on DG2 (due to Wa_14012342262), so
- * we need to explicitly skip the readback.
- */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
- 0 /* write-only, so skip validation */,
- true);
- }
-
/*
* This tuning setting proves beneficial only on ATS-M designs; the
* default "age based" setting is optimal on regular DG2 and other
@@ -2924,6 +2950,9 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
/*
@@ -2942,9 +2971,93 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
add_render_compute_tuning_settings(i915, wal);
- if (IS_PONTEVECCHIO(i915)) {
- /* Wa_16016694945 */
- wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
+ if (GRAPHICS_VER(i915) >= 11) {
+ /* This is not a Wa (although referred to as
+ * WaSetInidrectStateOverride in places), this allows
+ * applications that reference sampler states through
+ * the BindlessSamplerStateBaseAddress to have their
+ * border color relative to DynamicStateBaseAddress
+ * rather than BindlessSamplerStateBaseAddress.
+ *
+ * Otherwise SAMPLER_STATE border colors have to be
+ * copied in multiple heaps (DynamicStateBaseAddress &
+ * BindlessSamplerStateBaseAddress)
+ *
+ * BSpec: 46052
+ */
+ wa_mcr_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
+ }
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
+ /* Wa_14017856879 */
+ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
+ /*
+ * Wa_14017066071
+ * Wa_14017654203
+ */
+ wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
+ MTL_DISABLE_SAMPLER_SC_OOO);
+
+ if (IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
+ /* Wa_22015279794 */
+ wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
+ DISABLE_PREFETCH_INTO_IC);
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_22013037850 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
+ }
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_PONTEVECCHIO(i915) ||
+ IS_DG2(i915)) {
+ /* Wa_22014226127 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+ }
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2(i915)) {
+ /* Wa_18017747507 */
+ wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+ */
+ wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
+
+ /* Wa_22013059131:dg2 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_14010918519:dg2_g10
+ *
+ * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+ * so ignoring verification.
+ */
+ wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ 0, false);
}
if (IS_XEHPSDV(i915)) {
@@ -2958,42 +3071,17 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-
/* Wa_14010449647:xehpsdv */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
-
- /* Wa_14012362059:xehpsdv */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_14014368820:xehpsdv */
- wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
- GLOBAL_INVALIDATION_MODE);
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
- /* Wa_22014226127:dg2,pvc */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
-
/* Wa_16015675438:dg2,pvc */
wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
-
- /* Wa_18018781329:dg2,pvc */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
if (IS_DG2(i915)) {
@@ -3002,10 +3090,20 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
-
- /* Wa_18017747507:dg2 */
- wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
}
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915))
+ /*
+ * Wa_22012654132
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
}
static void
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 881b64f3e7b9..542ce6d2de19 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -178,7 +178,7 @@ static int perf_mi_bb_start(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, 8,
+ i915_vma_offset(batch), 8,
0);
if (err)
goto out;
@@ -321,7 +321,7 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- base->node.start, 8,
+ i915_vma_offset(base), 8,
0);
if (err)
goto out;
@@ -331,8 +331,8 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- nop->node.start,
- nop->node.size,
+ i915_vma_offset(nop),
+ i915_vma_size(nop),
0);
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index ab2e9a6a2452..736b89a8ecf5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -2737,11 +2737,11 @@ static int create_gang(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
if (*prev) {
- u64 offset = (*prev)->batch->node.start;
+ u64 offset = i915_vma_offset((*prev)->batch);
/* Terminate the spinner in the next lower priority batch. */
*cs++ = MI_STORE_DWORD_IMM_GEN4;
@@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine,
rq->batch = i915_vma_get(vma);
i915_request_get(rq);
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
i915_request_add(rq);
if (err)
goto err_rq;
@@ -3095,7 +3093,7 @@ create_gpr_user(struct intel_engine_cs *engine,
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
- addr = result->node.start + offset + i * sizeof(*cs);
+ addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = CS_GPR(engine, 2 * i);
*cs++ = lower_32_bits(addr);
@@ -3105,8 +3103,8 @@ create_gpr_user(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(result));
+ *cs++ = upper_32_bits(i915_vma_offset(result));
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -3177,16 +3175,14 @@ create_gpr_client(struct intel_engine_cs *engine,
goto out_batch;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
i915_vma_lock(batch);
if (!err)
err = i915_vma_move_to_active(batch, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- batch->node.start,
+ i915_vma_offset(batch),
PAGE_SIZE, 0);
i915_vma_unlock(batch);
i915_vma_unpin(batch);
@@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
}
if (vma) {
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
}
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index b46425aeb2f0..0971241707ce 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -63,8 +63,8 @@ static void measure_clocks(struct intel_engine_cs *engine,
udelay(1000);
- dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += read_timestamp(engine);
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
local_irq_enable();
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index bc05ef48c194..8b0d84f2aad2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -96,7 +96,8 @@ err_ctx:
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
- return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+ return i915_vma_offset(hws) +
+ offset_in_page(sizeof(u32) * rq->fence.context);
}
static struct i915_request *
@@ -180,8 +181,8 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
+ *batch++ = upper_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
@@ -194,7 +195,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
@@ -207,7 +208,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -219,7 +220,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
if (GRAPHICS_VER(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index cfd736d88939..779fadcec7c4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -3,7 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include "intel_pm.h" /* intel_gpu_freq() */
#include "selftest_llc.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 7c56ffd2c659..a78a3d2c2e16 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
*cs++ = 0;
}
- i915_vma_lock(scratch);
- err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
+ err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
@@ -1030,8 +1028,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
while (len--) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = hw[dw];
- *cs++ = lower_32_bits(scratch->node.start + x);
- *cs++ = upper_32_bits(scratch->node.start + x);
+ *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+ *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
dw += 2;
x += 4;
@@ -1098,8 +1096,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_before->node.start);
- *cs++ = upper_32_bits(b_before->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_before));
+ *cs++ = upper_32_bits(i915_vma_offset(b_before));
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
@@ -1114,8 +1112,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_after->node.start);
- *cs++ = upper_32_bits(b_after->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_after));
+ *cs++ = upper_32_bits(i915_vma_offset(b_after));
intel_ring_advance(rq, cs);
@@ -1236,8 +1234,8 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(batch->node.start);
- *cs++ = upper_32_bits(batch->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(batch));
+ *cs++ = upper_32_bits(i915_vma_offset(batch));
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 0dc5309c90a4..e677f2da093d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "selftests/igt_spinner.h"
#include "selftests/i915_random.h"
static const unsigned int sizes[] = {
@@ -486,7 +487,8 @@ global_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
static int live_migrate_copy(void *arg)
{
- struct intel_migrate *migrate = arg;
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
@@ -507,7 +509,8 @@ static int live_migrate_copy(void *arg)
static int live_migrate_clear(void *arg)
{
- struct intel_migrate *migrate = arg;
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
@@ -527,6 +530,149 @@ static int live_migrate_clear(void *arg)
return 0;
}
+struct spinner_timer {
+ struct timer_list timer;
+ struct igt_spinner spin;
+};
+
+static void spinner_kill(struct timer_list *timer)
+{
+ struct spinner_timer *st = from_timer(st, timer, timer);
+
+ igt_spinner_end(&st->spin);
+ pr_info("%s\n", __func__);
+}
+
+static int live_emit_pte_full_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq, *prev;
+ struct spinner_timer st;
+ struct sgt_dma it;
+ int len, sz, err;
+ u32 *cs;
+
+ /*
+ * Simple regression test to check that we don't trample the
+ * rq->reserved_space when returning from emit_pte(), if the ring is
+ * nearly full.
+ */
+
+ if (igt_spinner_init(&st.spin, to_gt(i915)))
+ return -ENOMEM;
+
+ obj = i915_gem_object_create_internal(i915, 2 * PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_spinner;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto out_obj;
+
+ ce = intel_migrate_create_context(migrate);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_obj;
+ }
+
+ ce->ring_size = SZ_4K; /* Not too big */
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto out_put;
+
+ rq = igt_spinner_create_request(&st.spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&st.spin, rq)) {
+ err = -EIO;
+ goto out_unpin;
+ }
+
+ /*
+ * Fill the rest of the ring leaving I915_EMIT_PTE_NUM_DWORDS +
+ * ring->reserved_space at the end. To actually emit the PTEs we require
+ * slightly more than I915_EMIT_PTE_NUM_DWORDS, since our object size is
+ * greater than PAGE_SIZE. The correct behaviour is to wait for more
+ * ring space in emit_pte(), otherwise we trample on the reserved_space
+ * resulting in crashes when later submitting the rq.
+ */
+
+ prev = NULL;
+ do {
+ if (prev)
+ i915_request_add(rq);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) -
+ I915_EMIT_PTE_NUM_DWORDS;
+ sz = min_t(u32, sz, (SZ_1K - rq->reserved_space) / sizeof(u32) -
+ I915_EMIT_PTE_NUM_DWORDS);
+ cs = intel_ring_begin(rq, sz);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ memset32(cs, MI_NOOP, sz);
+ cs += sz;
+ intel_ring_advance(rq, cs);
+
+ pr_info("%s emit=%u sz=%d\n", __func__, rq->ring->emit, sz);
+
+ prev = rq;
+ } while (rq->ring->space > (rq->reserved_space +
+ I915_EMIT_PTE_NUM_DWORDS * sizeof(u32)));
+
+ timer_setup_on_stack(&st.timer, spinner_kill, 0);
+ mod_timer(&st.timer, jiffies + 2 * HZ);
+
+ /*
+ * This should wait for the spinner to be killed, otherwise we should go
+ * down in flames when doing i915_request_add().
+ */
+ pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space);
+ it = sg_sgt(obj->mm.pages->sgl);
+ len = emit_pte(rq, &it, obj->cache_level, false, 0, CHUNK_SZ);
+ if (!len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+ if (len < 0) {
+ err = len;
+ goto out_rq;
+ }
+
+out_rq:
+ i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
+ del_timer_sync(&st.timer);
+ destroy_timer_on_stack(&st.timer);
+out_unpin:
+ intel_context_unpin(ce);
+out_put:
+ intel_context_put(ce);
+out_obj:
+ i915_gem_object_put(obj);
+out_spinner:
+ igt_spinner_fini(&st.spin);
+ return err;
+}
+
struct threaded_migrate {
struct intel_migrate *migrate;
struct task_struct *tsk;
@@ -593,7 +739,10 @@ static int __thread_migrate_copy(void *arg)
static int thread_migrate_copy(void *arg)
{
- return threaded_migrate(arg, __thread_migrate_copy, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_migrate_copy, 0);
}
static int __thread_global_copy(void *arg)
@@ -605,7 +754,10 @@ static int __thread_global_copy(void *arg)
static int thread_global_copy(void *arg)
{
- return threaded_migrate(arg, __thread_global_copy, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_global_copy, 0);
}
static int __thread_migrate_clear(void *arg)
@@ -624,12 +776,18 @@ static int __thread_global_clear(void *arg)
static int thread_migrate_clear(void *arg)
{
- return threaded_migrate(arg, __thread_migrate_clear, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_migrate_clear, 0);
}
static int thread_global_clear(void *arg)
{
- return threaded_migrate(arg, __thread_global_clear, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_global_clear, 0);
}
int intel_migrate_live_selftests(struct drm_i915_private *i915)
@@ -637,6 +795,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_migrate_copy),
SUBTEST(live_migrate_clear),
+ SUBTEST(live_emit_pte_full_ring),
SUBTEST(thread_migrate_copy),
SUBTEST(thread_migrate_clear),
SUBTEST(thread_global_copy),
@@ -647,7 +806,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
if (!gt->migrate.context)
return 0;
- return i915_subtests(tests, &gt->migrate);
+ return intel_gt_live_subtests(tests, gt);
}
static struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index f27cc28608d4..ca009a6a13bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
if (IS_ERR(rq))
return PTR_ERR(rq);
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 37c38bdd5f47..a9e0a91bc0e0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -20,7 +20,7 @@ __igt_reset_stolen(struct intel_gt *gt,
const char *msg)
{
struct i915_ggtt *ggtt = gt->ggtt;
- const struct resource *dsm = &gt->i915->dsm;
+ const struct resource *dsm = &gt->i915->dsm.stolen;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 70f9ac1ec2c7..87ceb0f374b6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -50,7 +50,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
} else {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
}
- *cs++ = vma->node.start + 4000;
+ *cs++ = i915_vma_offset(vma) + 4000;
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 39f1b7564170..84e77e8dbba1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -122,14 +122,14 @@ create_spin_counter(struct intel_engine_cs *engine,
if (srm) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
- *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
}
}
*cs++ = MI_BATCH_BUFFER_START_GEN8;
- *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
GEM_BUG_ON(cs - base > end);
i915_gem_object_flush_map(obj);
@@ -299,13 +299,13 @@ int live_rps_clock_interval(void *arg)
for (i = 0; i < 5; i++) {
preempt_disable();
- dt_[i] = ktime_get();
cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_get();
udelay(1000);
- dt_[i] = ktime_sub(ktime_get(), dt_[i]);
cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
preempt_enable();
}
@@ -537,8 +537,8 @@ static u64 __measure_frequency(u32 *cntr, int duration_ms)
{
u64 dc, dt;
- dt = ktime_get();
dc = READ_ONCE(*cntr);
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = READ_ONCE(*cntr) - dc;
dt = ktime_get() - dt;
@@ -566,8 +566,8 @@ static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
{
u64 dc, dt;
- dt = ktime_get();
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
dt = ktime_get() - dt;
@@ -655,7 +655,7 @@ int live_rps_frequency_cs(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
@@ -794,7 +794,7 @@ int live_rps_frequency_srm(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
@@ -1094,8 +1094,8 @@ static u64 __measure_power(int duration_ms)
{
u64 dE, dt;
- dt = ktime_get();
dE = librapl_energy_uJ();
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dE = librapl_energy_uJ() - dE;
dt = ktime_get() - dt;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 522d0190509c..9f536c251179 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -825,7 +825,8 @@ static bool cmp_gte(u32 a, u32 b)
return a >= b;
}
-static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
+static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt,
+ struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -834,7 +835,10 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
if (IS_ERR(obj))
return PTR_ERR(obj);
- w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ /* keep the same cache settings as timeline */
+ i915_gem_object_set_cache_coherency(obj, tl->hwsp_ggtt->obj->cache_level);
+ w->map = i915_gem_object_pin_map_unlocked(obj,
+ page_unmask_bits(tl->hwsp_ggtt->obj->mm.mapping));
if (IS_ERR(w->map)) {
i915_gem_object_put(obj);
return PTR_ERR(w->map);
@@ -1004,8 +1008,10 @@ static int live_hwsp_read(void *arg)
if (!tl->has_initial_breadcrumb)
goto out_free;
+ selftest_tl_pin(tl);
+
for (i = 0; i < ARRAY_SIZE(watcher); i++) {
- err = setup_watcher(&watcher[i], gt);
+ err = setup_watcher(&watcher[i], gt, tl);
if (err)
goto out;
}
@@ -1160,6 +1166,8 @@ out:
for (i = 0; i < ARRAY_SIZE(watcher); i++)
cleanup_watcher(&watcher[i]);
+ intel_timeline_unpin(tl);
+
if (igt_flush_test(gt->i915))
err = -EIO;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
new file mode 100644
index 000000000000..e6cac1f15d6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
+
+#include "gen8_engine_cs.h"
+#include "i915_gem_ww.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val)
+{
+ GEM_BUG_ON(addr < i915_vma_offset(vma));
+ GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val));
+ memset64(page_mask_bits(vma->obj->mm.mapping) +
+ (addr - i915_vma_offset(vma)), val, 1);
+}
+
+static int
+pte_tlbinv(struct intel_context *ce,
+ struct i915_vma *va,
+ struct i915_vma *vb,
+ u64 align,
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
+ u64 length,
+ struct rnd_state *prng)
+{
+ struct drm_i915_gem_object *batch;
+ struct drm_mm_node vb_node;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u64 addr;
+ int err;
+ u32 *cs;
+
+ batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ vma = i915_vma_instance(batch, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out;
+
+ /* Pin va at random but aligned offset after vma */
+ addr = round_up(vma->node.start + vma->node.size, align);
+ /* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */
+ addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
+ va->size, align);
+ err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER);
+ if (err) {
+ pr_err("Cannot pin at %llx+%llx\n", addr, va->size);
+ goto out;
+ }
+ GEM_BUG_ON(i915_vma_offset(va) != addr);
+ if (vb != va) {
+ vb_node = vb->node;
+ vb->node = va->node; /* overwrites the _same_ PTE */
+ }
+
+ /*
+ * Now choose random dword at the 1st pinned page.
+ *
+ * SZ_64K pages on dg1 require that the whole PT be marked
+ * containing 64KiB entries. So we make sure that vma
+ * covers the whole PT, despite being randomly aligned to 64KiB
+ * and restrict our sampling to the 2MiB PT within where
+ * we know that we will be using 64KiB pages.
+ */
+ if (align == SZ_64K)
+ addr = round_up(addr, SZ_2M);
+ addr = igt_random_offset(prng, addr, addr + align, 8, 8);
+
+ if (va != vb)
+ pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg,
+ addr & -length, length);
+
+ cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
+ *cs++ = MI_NOOP; /* for later termination */
+ /*
+ * Sample the target to see if we spot the updated backing store.
+ * Gen8 VCS compares immediate value with bitwise-and of two
+ * consecutive DWORDS pointed by addr, other gen/engines compare value
+ * with DWORD pointed by addr. Moreover we want to exercise DWORD size
+ * invalidations. To fulfill all these requirements below values
+ * have been chosen.
+ */
+ *cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2;
+ *cs++ = 0; /* break if *addr == 0 */
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+ vma_set_qw(va, addr, -1);
+ vma_set_qw(vb, addr, 0);
+
+ /* Keep sampling until we get bored */
+ *cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
+
+ i915_gem_object_flush_map(batch);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_va;
+ }
+
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
+ if (err) {
+ i915_request_add(rq);
+ goto out_va;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /* Short sleep to sanitycheck the batch is spinning before we begin */
+ msleep(10);
+ if (va == vb) {
+ if (!i915_request_completed(rq)) {
+ pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg);
+ err = -EIO;
+ }
+ } else if (!i915_request_completed(rq)) {
+ struct i915_vma_resource vb_res = {
+ .bi.pages = vb->obj->mm.pages,
+ .bi.page_sizes = vb->obj->mm.page_sizes,
+ .start = i915_vma_offset(vb),
+ .vma_size = i915_vma_size(vb)
+ };
+ unsigned int pte_flags = 0;
+
+ /* Flip the PTE between A and B */
+ if (i915_gem_object_is_lmem(vb->obj))
+ pte_flags |= PTE_LM;
+ ce->vm->insert_entries(ce->vm, &vb_res, 0, pte_flags);
+
+ /* Flush the PTE update to concurrent HW */
+ tlbinv(ce->vm, addr & -length, length);
+
+ if (wait_for(i915_request_completed(rq), HZ / 2)) {
+ pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n",
+ ce->engine->name);
+ err = -EINVAL;
+ }
+ } else {
+ pr_err("Spinner ended unexpectedly\n");
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+ cs = page_mask_bits(batch->mm.mapping);
+ *cs = MI_BATCH_BUFFER_END;
+ wmb();
+
+out_va:
+ if (vb != va)
+ vb->node = vb_node;
+ i915_vma_unpin(va);
+ if (i915_vma_unbind_unlocked(va))
+ err = -EIO;
+out:
+ i915_gem_object_put(batch);
+ return err;
+}
+
+static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
+{
+ /*
+ * Allocation of largest possible page size allows to test all types
+ * of pages.
+ */
+ return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
+{
+ /*
+ * SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1).
+ * While that does not require the whole 2M block to be contiguous
+ * it is easier to make it so, since we need that for SZ_2M pagees.
+ * Since we randomly offset the start of the vma, we need a 4M object
+ * so that there is a 2M range within it is suitable for SZ_64K PTE.
+ */
+ return i915_gem_object_create_internal(gt->i915, SZ_4M);
+}
+
+static int
+mem_tlbinv(struct intel_gt *gt,
+ struct drm_i915_gem_object *(*create_fn)(struct intel_gt *),
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
+{
+ unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *A, *B;
+ struct i915_ppgtt *ppgtt;
+ struct i915_vma *va, *vb;
+ enum intel_engine_id id;
+ I915_RND_STATE(prng);
+ void *vaddr;
+ int err;
+
+ /*
+ * Check that the TLB invalidate is able to revoke an active
+ * page. We load a page into a spinning COND_BBE loop and then
+ * remap that page to a new physical address. The old address, and
+ * so the loop keeps spinning, is retained in the TLB cache until
+ * we issue an invalidate.
+ */
+
+ A = create_fn(gt);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_a;
+ }
+
+ B = create_fn(gt);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto out_a;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_b;
+ }
+
+ GEM_BUG_ON(A->base.size != B->base.size);
+ if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
+ pr_warn("Failed to allocate contiguous pages for size %zx\n",
+ A->base.size);
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_b;
+ }
+
+ va = i915_vma_instance(A, &ppgtt->vm, NULL);
+ if (IS_ERR(va)) {
+ err = PTR_ERR(va);
+ goto out_vm;
+ }
+
+ vb = i915_vma_instance(B, &ppgtt->vm, NULL);
+ if (IS_ERR(vb)) {
+ err = PTR_ERR(vb);
+ goto out_vm;
+ }
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct i915_gem_ww_ctx ww;
+ struct intel_context *ce;
+ int bit;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+
+ for_i915_gem_ww(&ww, err, true)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_put;
+
+ for_each_set_bit(bit,
+ (unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes,
+ BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) {
+ unsigned int len;
+
+ if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
+ continue;
+
+ /* sanitycheck the semaphore wake up */
+ err = pte_tlbinv(ce, va, va,
+ BIT_ULL(bit),
+ NULL, SZ_4K,
+ &prng);
+ if (err)
+ goto err_unpin;
+
+ for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) {
+ err = pte_tlbinv(ce, va, vb,
+ BIT_ULL(bit),
+ tlbinv,
+ BIT_ULL(len),
+ &prng);
+ if (err)
+ goto err_unpin;
+ if (len == ppgtt_size)
+ break;
+ }
+ }
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+out_vm:
+ i915_vm_put(&ppgtt->vm);
+out_b:
+ i915_gem_object_put(B);
+out_a:
+ i915_gem_object_put(A);
+ return err;
+}
+
+static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
+{
+ intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+}
+
+static int invalidate_full(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int err;
+
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0; /* TLB invalidate not implemented */
+
+ err = mem_tlbinv(gt, create_smem, tlbinv_full);
+ if (err == 0)
+ err = mem_tlbinv(gt, create_lmem, tlbinv_full);
+ if (err == -ENODEV || err == -ENXIO)
+ err = 0;
+
+ return err;
+}
+
+int intel_tlb_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(invalidate_full),
+ };
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ int err;
+
+ if (intel_gt_is_wedged(gt))
+ continue;
+
+ err = intel_gt_live_subtests(tests, gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 96e3861706d6..14a8b25b6204 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce)
goto err_pin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_req;
@@ -521,7 +519,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
struct i915_gem_ww_ctx ww;
- u64 addr = scratch->node.start;
+ u64 addr = i915_vma_offset(scratch);
struct i915_request *rq;
u32 srm, lrm, rsvd;
u32 expect;
@@ -640,7 +638,7 @@ retry:
goto err_request;
err = engine->emit_bb_start(rq,
- batch->node.start, PAGE_SIZE,
+ i915_vma_offset(batch), PAGE_SIZE,
0);
if (err)
goto err_request;
@@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
if (IS_ERR(rq))
return PTR_ERR(rq);
- i915_vma_lock(results);
- err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(results);
+ err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_req;
@@ -870,7 +866,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
}
for (i = 0; i < engine->whitelist.count; i++) {
- u64 offset = results->node.start + sizeof(u32) * i;
+ u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
/* Clear non priv flags */
@@ -935,14 +931,12 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
goto err_request;
}
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
+ err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto err_request;
/* Perform the writes from an unprivileged "user" batch */
- err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
err_request:
err = request_add_sync(rq, err);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 402f085f3a02..449c9ed44382 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -8,6 +8,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include "i915_drv.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
@@ -32,6 +33,8 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ enum i915_map_type map_type;
struct file *file;
void *ptr;
@@ -41,8 +44,8 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
- I915_MAP_WC : I915_MAP_WB);
+ map_type = i915_coherent_map_type(i915, obj, true);
+ ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index f2d9858d827c..021f51d9b456 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -24,37 +24,37 @@ static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
static ssize_t
name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+ return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
}
-static struct kobj_attribute name_attr =
+static const struct kobj_attribute name_attr =
__ATTR(name, 0444, name_show, NULL);
static ssize_t
class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
}
-static struct kobj_attribute class_attr =
+static const struct kobj_attribute class_attr =
__ATTR(class, 0444, class_show, NULL);
static ssize_t
inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
}
-static struct kobj_attribute inst_attr =
+static const struct kobj_attribute inst_attr =
__ATTR(instance, 0444, inst_show, NULL);
static ssize_t
mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+ return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
}
-static struct kobj_attribute mmio_attr =
+static const struct kobj_attribute mmio_attr =
__ATTR(mmio_base, 0444, mmio_show, NULL);
static const char * const vcs_caps[] = {
@@ -107,11 +107,9 @@ __caps_show(struct intel_engine_cs *engine,
for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
if (n >= count || !repr[n]) {
if (GEM_WARN_ON(show_unknown))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "[%x] ", n);
+ len += sysfs_emit_at(buf, len, "[%x] ", n);
} else {
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s ", repr[n]);
+ len += sysfs_emit_at(buf, len, "%s ", repr[n]);
}
if (GEM_WARN_ON(len >= PAGE_SIZE))
break;
@@ -127,7 +125,7 @@ caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(engine, engine->uabi_capabilities, buf, true);
}
-static struct kobj_attribute caps_attr =
+static const struct kobj_attribute caps_attr =
__ATTR(capabilities, 0444, caps_show, NULL);
static ssize_t
@@ -136,7 +134,7 @@ all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(kobj_to_engine(kobj), -1, buf, false);
}
-static struct kobj_attribute all_caps_attr =
+static const struct kobj_attribute all_caps_attr =
__ATTR(known_capabilities, 0444, all_caps_show, NULL);
static ssize_t
@@ -182,10 +180,10 @@ max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_attr =
+static const struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
@@ -193,10 +191,10 @@ max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_def =
+static const struct kobj_attribute max_spin_def =
__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
static ssize_t
@@ -236,10 +234,10 @@ timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
@@ -247,10 +245,10 @@ timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
static ssize_t
@@ -287,10 +285,10 @@ stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_attr =
+static const struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
@@ -298,10 +296,10 @@ stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_def =
+static const struct kobj_attribute stop_timeout_def =
__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
static ssize_t
@@ -343,10 +341,10 @@ preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
@@ -355,10 +353,10 @@ preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
static ssize_t
@@ -399,10 +397,10 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_attr =
+static const struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
static ssize_t
@@ -410,10 +408,10 @@ heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_def =
+static const struct kobj_attribute heartbeat_interval_def =
__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
static void kobj_engine_release(struct kobject *kobj)
@@ -421,7 +419,7 @@ static void kobj_engine_release(struct kobject *kobj)
kfree(kobj);
}
-static struct kobj_type kobj_engine_type = {
+static const struct kobj_type kobj_engine_type = {
.release = kobj_engine_release,
.sysfs_ops = &kobj_sysfs_ops
};
@@ -449,7 +447,7 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
static void add_defaults(struct kobj_engine *parent)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&max_spin_def.attr,
&stop_timeout_def.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
@@ -485,7 +483,7 @@ static void add_defaults(struct kobj_engine *parent)
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&name_attr.attr,
&class_attr.attr,
&inst_attr.attr,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index 8085fb181274..bcb1129b3610 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -21,6 +21,9 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02,
INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03,
INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04,
+ INTEL_GUC_LOAD_STATUS_HWCONFIG_START = 0x05,
+ INTEL_GUC_LOAD_STATUS_HWCONFIG_DONE = 0x06,
+ INTEL_GUC_LOAD_STATUS_HWCONFIG_ERROR = 0x07,
INTEL_GUC_LOAD_STATUS_GDT_DONE = 0x10,
INTEL_GUC_LOAD_STATUS_IDT_DONE = 0x20,
INTEL_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
@@ -38,4 +41,18 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
};
+enum intel_bootrom_load_status {
+ INTEL_BOOTROM_STATUS_NO_KEY_FOUND = 0x13,
+ INTEL_BOOTROM_STATUS_AES_PROD_KEY_FOUND = 0x1A,
+ INTEL_BOOTROM_STATUS_RSA_FAILED = 0x50,
+ INTEL_BOOTROM_STATUS_PAVPC_FAILED = 0x73,
+ INTEL_BOOTROM_STATUS_WOPCM_FAILED = 0x74,
+ INTEL_BOOTROM_STATUS_LOADLOC_FAILED = 0x75,
+ INTEL_BOOTROM_STATUS_JUMP_PASSED = 0x76,
+ INTEL_BOOTROM_STATUS_JUMP_FAILED = 0x77,
+ INTEL_BOOTROM_STATUS_RC6CTXCONFIG_FAILED = 0x79,
+ INTEL_BOOTROM_STATUS_MPUMAP_INCORRECT = 0x7A,
+ INTEL_BOOTROM_STATUS_EXCEPTION = 0x7E,
+};
+
#endif /* _ABI_GUC_ERRORS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 3624abfd22d1..9d589c28f40f 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -73,7 +73,7 @@ struct guc_debug_capture_list_header {
struct guc_debug_capture_list {
struct guc_debug_capture_list_header header;
- struct guc_mmio_reg regs[0];
+ struct guc_mmio_reg regs[];
} __packed;
/**
@@ -125,7 +125,7 @@ struct guc_state_capture_header_t {
struct guc_state_capture_t {
struct guc_state_capture_header_t header;
- struct guc_mmio_reg mmio_entries[0];
+ struct guc_mmio_reg mmio_entries[];
} __packed;
enum guc_capture_group_types {
@@ -145,7 +145,7 @@ struct guc_state_capture_group_header_t {
/* this is the top level structure where an error-capture dump starts */
struct guc_state_capture_group_t {
struct guc_state_capture_group_header_t grp_header;
- struct guc_state_capture_t capture_entries[0];
+ struct guc_state_capture_t capture_entries[];
} __packed;
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
new file mode 100644
index 000000000000..1d9fdfb11268
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
+#include "gt/intel_ring.h"
+#include "intel_gsc_fw.h"
+
+#define GSC_FW_STATUS_REG _MMIO(0x116C40)
+#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
+#define GSC_FW_CURRENT_STATE_RESET 0
+#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
+
+static bool gsc_is_in_reset(struct intel_uncore *uncore)
+{
+ u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+
+ return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
+ GSC_FW_CURRENT_STATE_RESET;
+}
+
+bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
+{
+ struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
+ u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+
+ return fw_status & GSC_FW_INIT_COMPLETE_BIT;
+}
+
+static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
+{
+ u32 offset = i915_ggtt_offset(gsc->local);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GSC_FW_LOAD;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gsc_fw_load(struct intel_gsc_uc *gsc)
+{
+ struct intel_context *ce = gsc->ce;
+ struct i915_request *rq;
+ int err;
+
+ if (!ce)
+ return -ENODEV;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = emit_gsc_fw_load(rq, gsc);
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ if (err)
+ gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
+ ERR_PTR(err));
+
+ return err;
+}
+
+static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ void *src, *dst;
+
+ if (!gsc->local)
+ return -ENODEV;
+
+ obj = gsc->local->obj;
+
+ if (obj->base.size < gsc->fw.size)
+ return -ENOSPC;
+
+ dst = i915_gem_object_pin_map_unlocked(obj,
+ i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
+ i915_coherent_map_type(i915, gsc->fw.obj, true));
+ if (IS_ERR(src)) {
+ i915_gem_object_unpin_map(obj);
+ return PTR_ERR(src);
+ }
+
+ memset(dst, 0, obj->base.size);
+ memcpy(dst, src, gsc->fw.size);
+
+ i915_gem_object_unpin_map(gsc->fw.obj);
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int gsc_fw_wait(struct intel_gt *gt)
+{
+ return intel_wait_for_register(gt->uncore,
+ GSC_FW_STATUS_REG,
+ GSC_FW_INIT_COMPLETE_BIT,
+ GSC_FW_INIT_COMPLETE_BIT,
+ 500);
+}
+
+int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct intel_uc_fw *gsc_fw = &gsc->fw;
+ int err;
+
+ /* check current fw status */
+ if (intel_gsc_uc_fw_init_done(gsc)) {
+ if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
+ intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+ return -EEXIST;
+ }
+
+ if (!intel_uc_fw_is_loadable(gsc_fw))
+ return -ENOEXEC;
+
+ /* FW blob is ok, so clean the status */
+ intel_uc_fw_sanitize(&gsc->fw);
+
+ if (!gsc_is_in_reset(gt->uncore))
+ return -EIO;
+
+ err = gsc_fw_load_prepare(gsc);
+ if (err)
+ goto fail;
+
+ /*
+ * GSC is only killed by an FLR, so we need to trigger one on unload to
+ * make sure we stop it. This is because we assign a chunk of memory to
+ * the GSC as part of the FW load , so we need to make sure it stops
+ * using it when we release it to the system on driver unload. Note that
+ * this is not a problem of the unload per-se, because the GSC will not
+ * touch that memory unless there are requests for it coming from the
+ * driver; therefore, no accesses will happen while i915 is not loaded,
+ * but if we re-load the driver then the GSC might wake up and try to
+ * access that old memory location again.
+ * Given that an FLR is a very disruptive action (see the FLR function
+ * for details), we want to do it as the last action before releasing
+ * the access to the MMIO bar, which means we need to do it as part of
+ * the primary uncore cleanup.
+ * An alternative approach to the FLR would be to use a memory location
+ * that survives driver unload, like e.g. stolen memory, and keep the
+ * GSC loaded across reloads. However, this requires us to make sure we
+ * preserve that memory location on unload and then determine and
+ * reserve its offset on each subsequent load, which is not trivial, so
+ * it is easier to just kill everything and start fresh.
+ */
+ intel_uncore_set_flr_on_fini(&gt->i915->uncore);
+
+ err = gsc_fw_load(gsc);
+ if (err)
+ goto fail;
+
+ err = gsc_fw_wait(gt);
+ if (err)
+ goto fail;
+
+ /* FW is not fully operational until we enable SW proxy */
+ intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+
+ gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
+
+ return 0;
+
+fail:
+ return intel_uc_fw_mark_load_failed(gsc_fw, err);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
new file mode 100644
index 000000000000..f4c1106bb2a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_FW_H_
+#define _INTEL_GSC_FW_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+struct intel_uncore;
+
+int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
+bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
new file mode 100644
index 000000000000..2d5b70b3384c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
+#include "intel_gsc_uc.h"
+#include "intel_gsc_fw.h"
+#include "i915_drv.h"
+
+static void gsc_work(struct work_struct *work)
+{
+ struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ intel_gsc_uc_fw_upload(gsc);
+}
+
+static bool gsc_engine_supported(struct intel_gt *gt)
+{
+ intel_engine_mask_t mask;
+
+ /*
+ * We reach here from i915_driver_early_probe for the primary GT before
+ * its engine mask is set, so we use the device info engine mask for it.
+ * For other GTs we expect the GT-specific mask to be set before we
+ * call this function.
+ */
+ GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
+
+ if (gt_is_root(gt))
+ mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+ else
+ mask = gt->info.engine_mask;
+
+ return __HAS_ENGINE(mask, GSC0);
+}
+
+void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
+{
+ intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC);
+ INIT_WORK(&gsc->work, gsc_work);
+
+ /* we can arrive here from i915_driver_early_probe for primary
+ * GT with it being not fully setup hence check device info's
+ * engine mask
+ */
+ if (!gsc_engine_supported(gsc_uc_to_gt(gsc))) {
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+ return;
+ }
+}
+
+int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
+{
+ static struct lock_class_key gsc_lock;
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct intel_engine_cs *engine = gt->engine[GSC0];
+ struct intel_context *ce;
+ struct i915_vma *vma;
+ int err;
+
+ err = intel_uc_fw_init(&gsc->fw);
+ if (err)
+ goto out;
+
+ vma = intel_guc_allocate_vma(&gt->uc.guc, SZ_8M);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_fw;
+ }
+
+ gsc->local = vma;
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_GSC_ADDR,
+ &gsc_lock, "gsc_context");
+ if (IS_ERR(ce)) {
+ gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
+ err = PTR_ERR(ce);
+ goto out_vma;
+ }
+
+ gsc->ce = ce;
+
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out_vma:
+ i915_vma_unpin_and_release(&gsc->local, 0);
+out_fw:
+ intel_uc_fw_fini(&gsc->fw);
+out:
+ gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
+ return err;
+}
+
+void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ flush_work(&gsc->work);
+
+ if (gsc->ce)
+ intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
+
+ i915_vma_unpin_and_release(&gsc->local, 0);
+
+ intel_uc_fw_fini(&gsc->fw);
+}
+
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ flush_work(&gsc->work);
+}
+
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ /*
+ * we only want to start the GSC worker from here in the actual resume
+ * flow and not during driver load. This is because GSC load is slow and
+ * therefore we want to make sure that the default state init completes
+ * first to not slow down the init thread. A separate call to
+ * intel_gsc_uc_load_start will ensure that the GSC is loaded during
+ * driver load.
+ */
+ if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
+ return;
+
+ intel_gsc_uc_load_start(gsc);
+}
+
+void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ if (intel_gsc_uc_fw_init_done(gsc))
+ return;
+
+ queue_work(system_unbound_wq, &gsc->work);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
new file mode 100644
index 000000000000..5f50fa1ff8b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_UC_H_
+#define _INTEL_GSC_UC_H_
+
+#include "intel_uc_fw.h"
+
+struct i915_vma;
+struct intel_context;
+
+struct intel_gsc_uc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* GSC-specific additions */
+ struct i915_vma *local; /* private memory for GSC usage */
+ struct intel_context *ce; /* for submission to GSC FW via GSC engine */
+
+ struct work_struct work; /* for delayed load */
+};
+
+void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
+int intel_gsc_uc_init(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_fini(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
+
+static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
+{
+ return intel_uc_fw_is_supported(&gsc->fw);
+}
+
+static inline bool intel_gsc_uc_is_wanted(struct intel_gsc_uc *gsc)
+{
+ return intel_uc_fw_is_enabled(&gsc->fw);
+}
+
+static inline bool intel_gsc_uc_is_used(struct intel_gsc_uc *gsc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&gsc->fw);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
new file mode 100644
index 000000000000..ea0da06e2f39
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
+#include "intel_gsc_uc_heci_cmd_submit.h"
+
+struct gsc_heci_pkt {
+ u64 addr_in;
+ u32 size_in;
+ u64 addr_out;
+ u32 size_out;
+};
+
+static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GSC_HECI_CMD_PKT;
+ *cs++ = lower_32_bits(pkt->addr_in);
+ *cs++ = upper_32_bits(pkt->addr_in);
+ *cs++ = pkt->size_in;
+ *cs++ = lower_32_bits(pkt->addr_out);
+ *cs++ = upper_32_bits(pkt->addr_out);
+ *cs++ = pkt->size_out;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in,
+ u32 size_in, u64 addr_out,
+ u32 size_out)
+{
+ struct intel_context *ce = gsc->ce;
+ struct i915_request *rq;
+ struct gsc_heci_pkt pkt = {
+ .addr_in = addr_in,
+ .size_in = size_in,
+ .addr_out = addr_out,
+ .size_out = size_out
+ };
+ int err;
+
+ if (!ce)
+ return -ENODEV;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = emit_gsc_heci_pkt(rq, &pkt);
+
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ if (err)
+ drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
+ "Request submission for GSC heci cmd failed (%d)\n",
+ err);
+
+ return err;
+}
+
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id)
+{
+ host_session_id &= ~HOST_SESSION_MASK;
+ if (heci_client_id == HECI_MEADDRESS_PXP)
+ host_session_id |= HOST_SESSION_PXP_SINGLE;
+
+ header->validity_marker = GSC_HECI_VALIDITY_MARKER;
+ header->heci_client_id = heci_client_id;
+ header->host_session_handle = host_session_id;
+ header->header_version = MTL_GSC_HEADER_VERSION;
+ header->message_size = message_size;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
new file mode 100644
index 000000000000..3d56ae501991
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+#define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+struct intel_gsc_mtl_header {
+ u32 validity_marker;
+#define GSC_HECI_VALIDITY_MARKER 0xA578875A
+
+ u8 heci_client_id;
+#define HECI_MEADDRESS_PXP 17
+#define HECI_MEADDRESS_HDCP 18
+
+ u8 reserved1;
+
+ u16 header_version;
+#define MTL_GSC_HEADER_VERSION 1
+
+ /*
+ * FW allows host to decide host_session handle
+ * as it sees fit.
+ * For intertracebility reserving select bits(60-63)
+ * to differentiate caller-target subsystem
+ * 0000 - HDCP
+ * 0001 - PXP Single Session
+ */
+ u64 host_session_handle;
+#define HOST_SESSION_MASK REG_GENMASK64(63, 60)
+#define HOST_SESSION_PXP_SINGLE BIT_ULL(60)
+ u64 gsc_message_handle;
+
+ u32 message_size; /* lower 20 bits only, upper 12 are reserved */
+
+ /*
+ * Flags mask:
+ * Bit 0: Pending
+ * Bit 1: Session Cleanup;
+ * Bits 2-15: Flags
+ * Bits 16-31: Extension Size
+ * According to internal spec flags are either input or output
+ * we distinguish the flags using OUTFLAG or INFLAG
+ */
+ u32 flags;
+#define GSC_OUTFLAG_MSG_PENDING 1
+
+ u32 status;
+} __packed;
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc,
+ u64 addr_in, u32 size_in,
+ u64 addr_out, u32 size_out);
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id);
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 52aede324788..d76508fa3af7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -11,6 +11,7 @@
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
+#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -94,8 +95,8 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
spin_lock_irq(gt->irq_lock);
- WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
- gt->pm_guc_events);
+ guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
@@ -274,8 +275,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
flags |= GUC_WA_GAM_CREDITS;
- /* Wa_14014475959:dg2 */
- if (IS_DG2(gt->i915))
+ /* Wa_14014475959 */
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
/*
@@ -289,7 +291,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
- if (IS_GRAPHICS_VER(gt->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(gt->i915) >= 11 &&
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70)))
flags |= GUC_WA_PRE_PARSER;
/* Wa_16011777198:dg2 */
@@ -339,7 +343,7 @@ static void guc_init_params(struct intel_guc *guc)
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+ guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
}
/*
@@ -386,7 +390,6 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
int intel_guc_init(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
int ret;
ret = intel_uc_fw_init(&guc->fw);
@@ -430,9 +433,6 @@ int intel_guc_init(struct intel_guc *guc)
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params(guc);
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(gt->ggtt);
-
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
@@ -451,19 +451,15 @@ err_fw:
intel_uc_fw_fini(&guc->fw);
out:
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- i915_probe_error(gt->i915, "failed with %d\n", ret);
+ guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
return ret;
}
void intel_guc_fini(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
if (!intel_uc_fw_is_loadable(&guc->fw))
return;
- i915_ggtt_disable_guc(gt->ggtt);
-
if (intel_guc_slpc_is_used(guc))
intel_guc_slpc_fini(&guc->slpc);
@@ -484,7 +480,6 @@ void intel_guc_fini(struct intel_guc *guc)
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 header;
int i;
@@ -519,7 +514,7 @@ retry:
10, 10, &header);
if (unlikely(ret)) {
timeout:
- drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
+ guc_err(guc, "mmio request %#x: no reply %x\n",
request[0], header);
goto out;
}
@@ -541,7 +536,7 @@ timeout:
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
- drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
+ guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
request[0], reason);
goto retry;
}
@@ -550,7 +545,7 @@ timeout:
u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
- drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
+ guc_err(guc, "mmio request %#x: failure %x/%u\n",
request[0], error, hint);
ret = -ENXIO;
goto out;
@@ -558,7 +553,7 @@ timeout:
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
- drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
+ guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
request[0], header);
ret = -EPROTO;
goto out;
@@ -601,9 +596,9 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
msg = payload[0] & guc->msg_enabled_mask;
if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
- drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
+ guc_err(guc, "Received early crash dump notification!\n");
if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
- drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
+ guc_err(guc, "Received early exception notification!\n");
return 0;
}
@@ -657,7 +652,8 @@ int intel_guc_suspend(struct intel_guc *guc)
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret)
- DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
+ guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
+ ERR_PTR(ret));
}
/* Signal that the GuC isn't running. */
@@ -832,12 +828,11 @@ static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 va
static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int err = __guc_action_self_cfg(guc, key, len, value);
if (unlikely(err))
- i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
- ERR_PTR(err), key, value);
+ guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
+ ERR_PTR(err), key, value);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 1bb3f9829286..e46aac1a41e6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -42,6 +42,8 @@ struct intel_guc {
/** @capture: the error-state-capture module's data and objects */
struct intel_guc_state_capture *capture;
+ struct dentry *dbgfs_node;
+
/** @sched_engine: Global engine used to submit requests to GuC */
struct i915_sched_engine *sched_engine;
/**
@@ -158,6 +160,9 @@ struct intel_guc {
bool submission_selected;
/** @submission_initialized: tracks whether GuC submission has been initialised */
bool submission_initialized;
+ /** @submission_version: Submission API version of the currently loaded firmware */
+ struct intel_uc_fw_ver submission_version;
+
/**
* @rc_supported: tracks whether we support GuC rc on the current platform
*/
@@ -268,6 +273,14 @@ struct intel_guc {
#endif
};
+/*
+ * GuC version number components are only 8-bit, so converting to a 32bit 8.8.8
+ * integer works.
+ */
+#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
+#define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
+
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
{
return container_of(log, struct intel_guc, log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index a7f737c4792e..69ce06faf8cd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -15,6 +15,7 @@
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -427,7 +428,7 @@ static long guc_mmio_reg_state_create(struct intel_guc *guc)
guc->ads_regset = temp_set.storage;
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
+ guc_dbg(guc, "Used %zu KB for temporary ADS regset\n",
(temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
return total * sizeof(struct guc_mmio_reg);
@@ -621,7 +622,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
engine = find_engine_state(gt, engine_class);
if (!engine) {
- drm_err(&gt->i915->drm, "No engine state recorded for class %d!\n",
+ guc_err(guc, "No engine state recorded for class %d!\n",
engine_class);
ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
@@ -646,7 +647,6 @@ static int
guc_capture_prep_lists(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
struct guc_gt_system_info local_info;
struct iosys_map info_map;
@@ -751,7 +751,7 @@ engine_instance_list:
}
if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
- drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
+ guc_warn(guc, "ADS capture alloc size changed from %d to %d\n",
guc->ads_capture_size, PAGE_ALIGN(total_size));
return PAGE_ALIGN(total_size);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 1c1b85073b4b..e0e793167d61 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -15,6 +15,7 @@
#include "guc_capture_fwif.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
@@ -30,12 +31,14 @@
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
#define COMMON_GEN9BASE_GLOBAL \
- { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
- { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+#define GEN9_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
+
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
@@ -141,6 +144,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
static const struct __guc_mmio_reg_descr default_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN9BASE_GLOBAL,
+ GEN9_GLOBAL,
};
static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
@@ -353,7 +357,6 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
u32 ipver)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct sseu_dev_info *sseu;
int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
@@ -402,7 +405,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
}
}
- drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
guc->capture->extlists = extlists;
}
@@ -477,7 +480,6 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
u32 i = 0, j = 0;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
@@ -509,8 +511,7 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
}
}
if (i < num_entries)
- drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
- (int)i, (int)num_entries);
+ guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
return 0;
}
@@ -540,12 +541,11 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
if (!gc->reglists) {
- drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ guc_warn(guc, "No capture reglist for this device\n");
return -ENODEV;
}
@@ -557,9 +557,9 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
!guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ guc_warn(guc, "Missing capture reglist: global!\n");
else
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
__stringify_type(type), type,
__stringify_engclass(classid), classid);
return -ENODATA;
@@ -592,7 +592,6 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_debug_capture_list *listnode;
int ret, num_regs;
u8 *caplist, *tmp;
@@ -623,7 +622,7 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
caplist = kzalloc(size, GFP_KERNEL);
if (!caplist) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ guc_dbg(guc, "Failed to alloc cached register capture list");
return -ENOMEM;
}
@@ -653,7 +652,6 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
void **outptr, size_t *size)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tmp = sizeof(u32) * 4;
void *null_header;
@@ -665,7 +663,7 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
null_header = kzalloc(tmp, GFP_KERNEL);
if (!null_header) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ guc_dbg(guc, "Failed to alloc cached register capture null list");
return -ENOMEM;
}
@@ -727,7 +725,6 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
static void check_guc_capture_size(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int min_size = guc_capture_output_min_size_est(guc);
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
@@ -741,13 +738,13 @@ static void check_guc_capture_size(struct intel_guc *guc)
* INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
*/
if (min_size < 0)
- drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
- drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
- drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
buffer_size, spare_size, min_size);
}
@@ -848,7 +845,6 @@ static int
guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
u32 *dw)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tries = 2;
int avail = 0;
u32 *src_data;
@@ -865,7 +861,7 @@ guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *
return 4;
}
if (avail)
- drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
buf->rd = 0;
}
@@ -1118,13 +1114,12 @@ static void
__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *node = NULL;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int i;
for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
node = guc_capture_alloc_one_node(guc);
if (!node) {
- drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ guc_warn(guc, "Register capture pre-alloc-cache failure\n");
/* dont free the priors, use what we got and cleanup at shutdown */
return;
}
@@ -1169,7 +1164,6 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc)
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct __guc_capture_parsed_output *node = NULL;
@@ -1183,7 +1177,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
if (!i)
return -ENODATA;
if (i % sizeof(u32)) {
- drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ guc_warn(guc, "Got mis-aligned register capture entries\n");
ret = -EIO;
goto bailout;
}
@@ -1301,7 +1295,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
break;
}
if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ guc_dbg(guc, "Register capture missing global dump: %08x!\n",
datatype);
}
node->is_partial = is_partial;
@@ -1322,7 +1316,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
if (numregs > guc->capture->max_mmio_per_node) {
- drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
numregs = guc->capture->max_mmio_per_node;
}
node->reginfo[datatype].num_regs = numregs;
@@ -1367,7 +1361,6 @@ static void __guc_capture_process_output(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, full_count;
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_log_buffer_state log_buf_state_local;
struct guc_log_buffer_state *log_buf_state;
struct __guc_capture_bufstate buf;
@@ -1403,7 +1396,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -1506,7 +1500,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!ebuf || !ee)
return -EINVAL;
- cap = ee->capture;
+ cap = ee->guc_capture;
if (!cap || !ee->engine)
return -ENODEV;
@@ -1571,13 +1565,34 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
return;
- guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
- ee->capture = NULL;
+ guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
+ ee->guc_capture = NULL;
ee->guc_capture_node = NULL;
}
@@ -1586,13 +1601,11 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_context *ce)
{
struct __guc_capture_parsed_output *n, *ntmp;
- struct drm_i915_private *i915;
struct intel_guc *guc;
if (!gt || !ee || !ce)
return;
- i915 = gt->i915;
guc = &gt->uc.guc;
if (!guc->capture)
return;
@@ -1606,16 +1619,18 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
- n->guc_id && n->guc_id == ce->guc_id.id &&
- (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
- (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
- ee->capture = guc->capture;
+ ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
return;
}
}
- drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+
+ guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
+ ce->guc_id.id, ce->lrc.lrca);
}
void intel_guc_capture_process(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 2b22065e87bf..1803a633ed64 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -11,38 +11,23 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
-#include "gt/intel_gt.h"
+#include "intel_guc_print.h"
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
{
return container_of(ct, struct intel_guc, ct);
}
-static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
-{
- return guc_to_gt(ct_to_guc(ct));
-}
-
-static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
-{
- return ct_to_gt(ct)->i915;
-}
-
-static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
-{
- return &ct_to_i915(ct)->drm;
-}
-
#define CT_ERROR(_ct, _fmt, ...) \
- drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
#define CT_DEBUG(_ct, _fmt, ...) \
- drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
#define CT_DEBUG(...) do { } while (0)
#endif
#define CT_PROBE_ERROR(_ct, _fmt, ...) \
- i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__)
/**
* DOC: CTB Blob
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 5b86b2e286e0..6fda3aec5c66 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -12,7 +12,9 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#include "gt/intel_rps.h"
#include "intel_guc_fw.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
@@ -38,9 +40,8 @@ static void guc_prepare_xfer(struct intel_gt *gt)
if (GRAPHICS_VER(uncore->i915) == 9) {
/* DOP Clock Gating Enable for GuC clocks */
- intel_gt_mcr_multicast_write(gt, GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- intel_gt_mcr_read_any(gt, GEN8_MISCCPCTL));
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0,
+ GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
@@ -88,29 +89,80 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
+ * the value matches either completion or a known failure code.
*
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
-static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
+static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool *success)
{
u32 val = intel_uncore_read(uncore, GUC_STATUS);
u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
+ u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, val);
*status = val;
- return uk_val == INTEL_GUC_LOAD_STATUS_READY;
+ switch (uk_val) {
+ case INTEL_GUC_LOAD_STATUS_READY:
+ *success = true;
+ return true;
+
+ case INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
+ case INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
+ case INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
+ case INTEL_GUC_LOAD_STATUS_HWCONFIG_ERROR:
+ case INTEL_GUC_LOAD_STATUS_DPC_ERROR:
+ case INTEL_GUC_LOAD_STATUS_EXCEPTION:
+ case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
+ case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
+ case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ *success = false;
+ return true;
+ }
+
+ switch (br_val) {
+ case INTEL_BOOTROM_STATUS_NO_KEY_FOUND:
+ case INTEL_BOOTROM_STATUS_RSA_FAILED:
+ case INTEL_BOOTROM_STATUS_PAVPC_FAILED:
+ case INTEL_BOOTROM_STATUS_WOPCM_FAILED:
+ case INTEL_BOOTROM_STATUS_LOADLOC_FAILED:
+ case INTEL_BOOTROM_STATUS_JUMP_FAILED:
+ case INTEL_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
+ case INTEL_BOOTROM_STATUS_MPUMAP_INCORRECT:
+ case INTEL_BOOTROM_STATUS_EXCEPTION:
+ *success = false;
+ return true;
+ }
+
+ return false;
}
-static int guc_wait_ucode(struct intel_uncore *uncore)
+/*
+ * Use a longer timeout for debug builds so that problems can be detected
+ * and analysed. But a shorter timeout for releases so that user's don't
+ * wait forever to find out there is a problem. Note that the only reason
+ * an end user should hit the timeout is in case of extreme thermal throttling.
+ * And a system that is that hot during boot is probably dead anyway!
+ */
+#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define GUC_LOAD_RETRY_LIMIT 20
+#else
+#define GUC_LOAD_RETRY_LIMIT 3
+#endif
+
+static int guc_wait_ucode(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ ktime_t before, after, delta;
+ bool success;
u32 status;
- int ret;
+ int ret, count;
+ u64 delta_ms;
+ u32 before_freq;
/*
* Wait for the GuC to start up.
- * NB: Docs recommend not using the interrupt for completion.
+ *
* Measurements indicate this should take no more than 20ms
* (assuming the GT clock is at maximum frequency). So, a
* timeout here indicates that the GuC has failed and is unusable.
@@ -124,31 +176,80 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
* issues to be resolved. In the meantime bump the timeout to
* 200ms. Even at slowest clock, this should be sufficient. And
* in the working case, a larger timeout makes no difference.
+ *
+ * IFWI updates have also been seen to cause sporadic failures due to
+ * the requested frequency not being granted and thus the firmware
+ * load is attempted at minimum frequency. That can lead to load times
+ * in the seconds range. However, there is a limit on how long an
+ * individual wait_for() can wait. So wrap it in a loop.
*/
- ret = wait_for(guc_ready(uncore, &status), 200);
- if (ret) {
- struct drm_device *drm = &uncore->i915->drm;
-
- drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_info(drm, "GuC load failed: status: Reset = %d, "
- "BootROM = 0x%02X, UKernel = 0x%02X, "
- "MIA = 0x%02X, Auth = 0x%02X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status),
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
-
- if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_info(drm, "GuC firmware signature verification failed\n");
+ before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps);
+ before = ktime_get();
+ for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) {
+ ret = wait_for(guc_load_done(uncore, &status, &success), 1000);
+ if (!ret || !success)
+ break;
+
+ guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz\n",
+ count, intel_rps_read_actual_frequency(&uncore->gt->rps));
+ }
+ after = ktime_get();
+ delta = ktime_sub(after, before);
+ delta_ms = ktime_to_ms(delta);
+ if (ret || !success) {
+ u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
+ u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+
+ guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n",
+ status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret);
+ guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ bootrom, ukernel,
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+
+ switch (bootrom) {
+ case INTEL_BOOTROM_STATUS_NO_KEY_FOUND:
+ guc_info(guc, "invalid key requested, header = 0x%08X\n",
+ intel_uncore_read(uncore, GUC_HEADER_INFO));
ret = -ENOEXEC;
+ break;
+
+ case INTEL_BOOTROM_STATUS_RSA_FAILED:
+ guc_info(guc, "firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ break;
}
- if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
- drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ switch (ukernel) {
+ case INTEL_GUC_LOAD_STATUS_EXCEPTION:
+ guc_info(guc, "firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
+ break;
+
+ case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ guc_info(guc, "illegal register in save/restore workaround list\n");
+ ret = -EPERM;
+ break;
+
+ case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
+ guc_info(guc, "still extracting hwconfig table.\n");
+ ret = -ETIMEDOUT;
+ break;
}
+
+ /* Uncommon/unexpected error, see earlier status code print for details */
+ if (ret == 0)
+ ret = -ENXIO;
+ } else if (delta_ms > 200) {
+ guc_warn(guc, "excessive init time: %lldms! [freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d]\n",
+ delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
+ before_freq, status, count, ret);
+ } else {
+ guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
+ before_freq, status, count, ret);
}
return ret;
@@ -194,7 +295,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
if (ret)
goto out;
- ret = guc_wait_ucode(uncore);
+ ret = guc_wait_ucode(guc);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index 4781fccc2687..852bea0208ce 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -102,7 +102,7 @@ static bool has_table(struct drm_i915_private *i915)
return false;
}
-/**
+/*
* intel_guc_hwconfig_init - Initialize the HWConfig
*
* Retrieve the HWConfig table from the GuC and save it locally.
@@ -136,7 +136,7 @@ static int guc_hwconfig_init(struct intel_gt *gt)
return 0;
}
-/**
+/*
* intel_gt_init_hwconfig - Initialize the HWConfig if available
*
* Retrieve the HWConfig table if available on the current platform.
@@ -149,7 +149,7 @@ int intel_gt_init_hwconfig(struct intel_gt *gt)
return guc_hwconfig_init(gt);
}
-/**
+/*
* intel_gt_fini_hwconfig - Finalize the HWConfig
*
* Free up the memory allocation holding the table.
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 68331c538b0a..55bc8b55fbc0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -12,6 +12,7 @@
#include "i915_memcpy.h"
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
+#include "intel_guc_print.h"
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
@@ -39,7 +40,6 @@ struct guc_log_section {
static void _guc_log_init_sizes(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
{
GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
@@ -82,12 +82,12 @@ static void _guc_log_init_sizes(struct intel_guc_log *log)
}
if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
- drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n",
sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
if (!log->sizes[i].count) {
- drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ guc_err(guc, "Zero log %s size!\n", sections[i].name);
} else {
/* Size is +1 unit */
log->sizes[i].count--;
@@ -95,14 +95,14 @@ static void _guc_log_init_sizes(struct intel_guc_log *log)
/* Clip to field size */
if (log->sizes[i].count > sections[i].max) {
- drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ guc_err(guc, "log %s size too large: %d vs %d!\n",
sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
log->sizes[i].count = sections[i].max;
}
}
if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
- drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n",
log->sizes[GUC_LOG_SECTIONS_CRASH].units,
log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
@@ -333,8 +333,7 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
- "GuC log buffer overflow\n");
+ guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n");
}
return overflow;
@@ -374,6 +373,7 @@ size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
@@ -383,7 +383,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_created(log)))
+ if (guc_WARN_ON(guc, !intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -398,7 +398,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
- DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
+ guc_err_ratelimited(guc, "no sub-buffer to copy general logs\n");
log->relay.full_count++;
goto out_unlock;
@@ -451,7 +451,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- DRM_ERROR("invalid log buffer state\n");
+ guc_err(guc, "invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -542,12 +542,15 @@ static int guc_log_relay_create(struct intel_guc_log *log)
*/
n_subbufs = 8;
+ if (!guc->dbgfs_node)
+ return -ENOENT;
+
guc_log_relay_chan = relay_open("guc_log",
- dev_priv->drm.primary->debugfs_root,
+ guc->dbgfs_node,
subbuf_size, n_subbufs,
- &relay_callbacks, dev_priv);
+ &relay_callbacks, i915);
if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ guc_err(guc, "Couldn't create relay channel for logging\n");
ret = -ENOMEM;
return ret;
@@ -570,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -579,7 +582,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -596,9 +599,8 @@ static u32 __get_default_log_level(struct intel_guc_log *log)
}
if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915->params.guc_log_level,
- "verbosity too high");
+ guc_warn(guc, "Log verbosity param out of range: %d > %d!\n",
+ i915->params.guc_log_level, GUC_LOG_LEVEL_MAX);
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
@@ -641,15 +643,15 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->buf_addr = vaddr;
log->level = __get_default_log_level(log);
- DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
- log->level, str_enabled_disabled(log->level),
- str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
- GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
+ guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, str_enabled_disabled(log->level),
+ str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
err:
- DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
+ guc_err(guc, "Failed to allocate or map log buffer %pe\n", ERR_PTR(ret));
return ret;
}
@@ -662,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -676,25 +678,25 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
- DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
+ guc_dbg(guc, "guc_log_control action failed %pe\n", ERR_PTR(ret));
goto out_unlock;
}
log->level = level;
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -905,7 +907,7 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
- DRM_DEBUG("Failed to pin object\n");
+ guc_dbg(guc, "Failed to pin log object: %pe\n", map);
drm_puts(p, "(log data unaccessible)\n");
free_page((unsigned long)page);
return PTR_ERR(map);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
new file mode 100644
index 000000000000..2465d05638b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GUC_PRINT__
+#define __INTEL_GUC_PRINT__
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
+
+#define guc_printk(_guc, _level, _fmt, ...) \
+ gt_##_level(guc_to_gt(_guc), "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_err(_guc, _fmt, ...) \
+ guc_printk((_guc), err, _fmt, ##__VA_ARGS__)
+
+#define guc_warn(_guc, _fmt, ...) \
+ guc_printk((_guc), warn, _fmt, ##__VA_ARGS__)
+
+#define guc_notice(_guc, _fmt, ...) \
+ guc_printk((_guc), notice, _fmt, ##__VA_ARGS__)
+
+#define guc_info(_guc, _fmt, ...) \
+ guc_printk((_guc), info, _fmt, ##__VA_ARGS__)
+
+#define guc_dbg(_guc, _fmt, ...) \
+ guc_printk((_guc), dbg, _fmt, ##__VA_ARGS__)
+
+#define guc_err_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define guc_notice_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), notice_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define guc_probe_error(_guc, _fmt, ...) \
+ guc_printk((_guc), probe_error, _fmt, ##__VA_ARGS__)
+
+#define guc_WARN(_guc, _cond, _fmt, ...) \
+ gt_WARN(guc_to_gt(_guc), _cond, "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_WARN_ONCE(_guc, _cond, _fmt, ...) \
+ gt_WARN_ONCE(guc_to_gt(_guc), _cond, "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_WARN_ON(_guc, _cond) \
+ gt_WARN(guc_to_gt(_guc), _cond, "%s(%s)", "guc_WARN_ON", __stringify(_cond))
+
+#define guc_WARN_ON_ONCE(_guc, _cond) \
+ gt_WARN_ONCE(guc_to_gt(_guc), _cond, "%s(%s)", "guc_WARN_ON_ONCE", __stringify(_cond))
+
+#endif /* __INTEL_GUC_PRINT__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index b5855091cf6a..1adec6de223c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -6,25 +6,15 @@
#include <linux/string_helpers.h>
#include "intel_guc_rc.h"
+#include "intel_guc_print.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
static bool __guc_rc_supported(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
- /*
- * Wa_14017073508: mtl
- * Do not enable gucrc to avoid additional interrupts which
- * may disrupt pcode wa.
- */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- return false;
-
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(gt->i915) >= 12;
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
@@ -70,13 +60,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to %s RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
- drm_info(&gt->i915->drm, "GuC RC: %s\n",
- str_enabled_disabled(enable));
+ guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 9915de32e894..3fd798837502 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -18,8 +18,6 @@
#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
-#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
-#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
@@ -32,6 +30,8 @@
#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
+#define GUC_HEADER_INFO _MMIO(0xc014)
+
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 63464933cbce..026d73855f36 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_guc_print.h"
#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
@@ -171,14 +172,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
static int slpc_query_task_state(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- i915_probe_error(i915, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -188,15 +187,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int ret;
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -212,8 +210,8 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -236,9 +234,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_notice(&i915->drm,
- "Failed to send set_param for min freq(%d): (%d)\n",
- freq, ret);
+ guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
+ freq, ERR_PTR(ret));
}
return ret;
@@ -267,7 +264,6 @@ static void slpc_boost_work(struct work_struct *work)
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
@@ -275,9 +271,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- i915_probe_error(i915,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
return err;
}
@@ -338,7 +332,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
static int slpc_reset(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
@@ -346,15 +339,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- i915_probe_error(i915, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ guc_probe_error(guc, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -495,8 +487,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val < slpc->rp1_freq);
if (ret) {
- i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
+ ERR_PTR(ret));
goto out;
}
@@ -611,15 +603,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int slpc_min_freq;
int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
if (ret) {
- drm_err(&i915->drm,
- "Failed to get min freq: (%d)\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
return false;
}
@@ -685,9 +674,8 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
if (ret)
- drm_err(&i915->drm,
- "Override gucrc mode %d failed %d\n",
- mode, ret);
+ guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
+ mode, ERR_PTR(ret));
}
return ret;
@@ -702,9 +690,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
if (ret)
- drm_err(&i915->drm,
- "Unsetting gucrc mode failed %d\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
}
return ret;
@@ -725,7 +711,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
*/
int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(!slpc->vma);
@@ -734,8 +720,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -743,7 +728,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(to_gt(i915));
+ intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
@@ -753,16 +738,14 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0a42f1807f52..88e881b100cf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -27,6 +27,7 @@
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
+#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -1351,6 +1352,16 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
return ns_to_ktime(total);
}
+static void guc_enable_busyness_worker(struct intel_guc *guc)
+{
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
+}
+
+static void guc_cancel_busyness_worker(struct intel_guc *guc)
+{
+ cancel_delayed_work_sync(&guc->timestamp.work);
+}
+
static void __reset_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1359,7 +1370,7 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1415,8 +1426,7 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
@@ -1431,21 +1441,26 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_init_engine_stats(struct intel_guc *guc)
+static int guc_init_engine_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ int ret;
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ ret = guc_action_enable_usage_stats(guc);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
- int ret = guc_action_enable_usage_stats(guc);
+ if (ret)
+ guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
+ else
+ guc_enable_busyness_worker(guc);
- if (ret)
- drm_err(&gt->i915->drm,
- "Failed to enable usage stats: %d!\n", ret);
- }
+ return ret;
+}
+
+static void guc_fini_engine_stats(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
}
void intel_guc_busyness_park(struct intel_gt *gt)
@@ -1460,7 +1475,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
* and causes an unclaimed register access warning. Cancel the worker
* synchronously here.
*/
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1487,8 +1502,7 @@ void intel_guc_busyness_unpark(struct intel_gt *gt)
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static inline bool
@@ -1621,7 +1635,7 @@ static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/*
- * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
intel_engine_wait_for_pending_mi_fw(engine);
@@ -1702,7 +1716,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
goto next_context;
guilty = false;
- rq = intel_context_find_active_request(ce);
+ rq = intel_context_get_active_request(ce);
if (!rq) {
head = ce->ring->tail;
goto out_replay;
@@ -1715,6 +1729,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
+ i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context:
@@ -1890,7 +1905,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (guc->submission_initialized)
return 0;
- if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
@@ -2330,7 +2345,7 @@ static int register_context(struct intel_context *ce, bool loop)
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
ret = register_context_v70(guc, ce, loop);
else
ret = register_context_v69(guc, ce, loop);
@@ -2342,7 +2357,7 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
guc_context_policy_init_v70(ce, loop);
}
@@ -2534,6 +2549,7 @@ static void prepare_context_registration_info_v69(struct intel_context *ce)
i915_gem_object_is_lmem(ce->ring->vma->obj));
desc = __get_lrc_desc_v69(guc, ctx_id);
+ GEM_BUG_ON(!desc);
desc->engine_class = engine_class_to_guc_class(engine->class);
desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
@@ -2956,7 +2972,7 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id);
@@ -3283,7 +3299,7 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
@@ -3584,8 +3600,7 @@ static int guc_request_alloc(struct i915_request *rq)
intel_context_sched_disable_unpin(ce);
else if (intel_context_is_closed(ce))
if (wait_for(context_close_done(ce), 1500))
- drm_warn(&guc_to_gt(guc)->i915->drm,
- "timed out waiting on context sched close before realloc\n");
+ guc_warn(guc, "timed out waiting on context sched close before realloc\n");
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -4101,9 +4116,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = guc_submit_request;
}
-static inline void guc_kernel_context_pin(struct intel_guc *guc,
- struct intel_context *ce)
+static inline int guc_kernel_context_pin(struct intel_guc *guc,
+ struct intel_context *ce)
{
+ int ret;
+
/*
* Note: we purposefully do not check the returns below because
* the registration can only fail if a reset is just starting.
@@ -4111,16 +4128,24 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc,
* isn't happening and even it did this code would be run again.
*/
- if (context_guc_id_invalid(ce))
- pin_guc_id(guc, ce);
+ if (context_guc_id_invalid(ce)) {
+ ret = pin_guc_id(guc, ce);
+
+ if (ret < 0)
+ return ret;
+ }
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
guc_context_init(ce);
- try_context_registration(ce, true);
+ ret = try_context_registration(ce, true);
+ if (ret)
+ unpin_guc_id(guc, ce);
+
+ return ret;
}
-static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+static inline int guc_init_submission(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -4147,9 +4172,17 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
- pinned_contexts_link)
- guc_kernel_context_pin(guc, ce);
+ pinned_contexts_link) {
+ int ret = guc_kernel_context_pin(guc, ce);
+
+ if (ret) {
+ /* No point in trying to clean up as i915 will wedge on failure */
+ return ret;
+ }
+ }
}
+
+ return 0;
}
static void guc_release(struct intel_engine_cs *engine)
@@ -4202,8 +4235,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
/* Wa_14014475959:dg2 */
- if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ if (engine->class == COMPUTE_CLASS)
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ IS_DG2(engine->i915))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4346,11 +4381,14 @@ static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
ret = intel_guc_send(guc, (u32 *)&policy->h2g,
__guc_scheduling_policy_action_size(policy));
- if (ret < 0)
+ if (ret < 0) {
+ guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n",
+ ERR_PTR(ret));
return ret;
+ }
if (ret != policy->count) {
- drm_warn(&guc_to_gt(guc)->i915->drm, "GuC global scheduler policy processed %d of %d KLVs!",
+ guc_warn(guc, "global scheduler policy processed %d of %d KLVs!",
ret, policy->count);
if (ret > policy->count)
return -EPROTO;
@@ -4364,9 +4402,9 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
struct scheduling_policy policy;
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
- int ret = 0;
+ int ret;
- if (GET_UC_VER(guc) < MAKE_UC_VER(70, 3, 0))
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
return 0;
__guc_scheduling_policy_start_klv(&policy);
@@ -4382,39 +4420,62 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
yield, ARRAY_SIZE(yield));
ret = __guc_action_set_scheduling_policies(guc, &policy);
- if (ret)
- i915_probe_error(gt->i915,
- "Failed to configure global scheduling policies: %pe!\n",
- ERR_PTR(ret));
}
return ret;
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ u32 val;
- /* Enable and route to GuC */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
- GUC_SEM_INTR_ROUTE_TO_GUC |
- GUC_SEM_INTR_ENABLE_ALL);
+ if (GRAPHICS_VER(gt->i915) < 12)
+ return;
+
+ if (to_guc)
+ val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
+ else
+ val = 0;
- guc_init_lrc_mapping(guc);
- guc_init_engine_stats(guc);
- guc_init_global_schedule_policy(guc);
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
}
-void intel_guc_submission_disable(struct intel_guc *guc)
+int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ /* Semaphore interrupt enable and route to GuC */
+ guc_route_semaphores(guc, true);
+
+ ret = guc_init_submission(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_engine_stats(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ goto fail_stats;
+
+ return 0;
+
+fail_stats:
+ guc_fini_engine_stats(guc);
+fail_sem:
+ guc_route_semaphores(guc, false);
+ return ret;
+}
- /* Note: By the time we're here, GuC may have already been reset */
+/* Note: By the time we're here, GuC may have already been reset */
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
- /* Disable and route to host */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
+ /* Semaphore interrupt disable and route to host */
+ guc_route_semaphores(guc, false);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -4484,21 +4545,18 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
struct intel_context *ce;
if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Invalid ctx_id %u\n", ctx_id);
+ guc_err(guc, "Invalid ctx_id %u\n", ctx_id);
return NULL;
}
ce = __get_context(guc, ctx_id);
if (unlikely(!ce)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is NULL, ctx_id %u\n", ctx_id);
+ guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id);
return NULL;
}
if (unlikely(intel_context_is_child(ce))) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is child, ctx_id %u\n", ctx_id);
+ guc_err(guc, "Context is child, ctx_id %u\n", ctx_id);
return NULL;
}
@@ -4513,7 +4571,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
u32 ctx_id;
if (unlikely(len < 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
@@ -4565,7 +4623,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
u32 ctx_id;
if (unlikely(len < 2)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
@@ -4577,8 +4635,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
if (unlikely(context_destroyed(ce) ||
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, ctx_id %u\n",
+ guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n",
ce->guc_state.sched_state, ctx_id);
return -EPROTO;
}
@@ -4662,12 +4719,16 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
+ guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ ce->guc_id.id, ce->engine->name,
+ str_yes_no(intel_context_is_exiting(ce)),
+ str_yes_no(intel_context_is_banned(ce)));
+
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
- drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of exiting context 0x%04X on %s",
+ guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
@@ -4680,7 +4741,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
int ctx_id;
if (unlikely(len != 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
@@ -4713,13 +4774,13 @@ int intel_guc_error_capture_process_msg(struct intel_guc *guc,
u32 status;
if (unlikely(len != 1)) {
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ guc_dbg(guc, "Invalid length %u", len);
return -EPROTO;
}
status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
- drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
+ guc_warn(guc, "No space for error capture");
intel_guc_capture_process(guc);
@@ -4751,24 +4812,36 @@ static void reset_fail_worker_func(struct work_struct *w)
guc->submission_state.reset_fail_mask = 0;
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
- if (likely(reset_fail_mask))
+ if (likely(reset_fail_mask)) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * GuC is toast at this point - it dead loops after sending the failed
+ * reset notification. So need to manually determine the guilty context.
+ * Note that it should be reliable to do this here because the GuC is
+ * toast and will not be scheduling behind the KMD's back.
+ */
+ for_each_engine_masked(engine, gt, reset_fail_mask, id)
+ intel_guc_find_hung_context(engine);
+
intel_gt_handle_error(gt, reset_fail_mask,
I915_ERROR_CAPTURE,
- "GuC failed to reset engine mask=0x%x\n",
+ "GuC failed to reset engine mask=0x%x",
reset_fail_mask);
+ }
}
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_engine_cs *engine;
- struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
unsigned long flags;
if (unlikely(len != 3)) {
- drm_err(&gt->i915->drm, "Invalid length %u", len);
+ guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
@@ -4778,8 +4851,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
engine = intel_guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
- drm_err(&gt->i915->drm,
- "Invalid engine %d:%d", guc_class, instance);
+ guc_err(guc, "Invalid engine %d:%d", guc_class, instance);
return -EPROTO;
}
@@ -4787,7 +4859,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
* This is an unexpected failure of a hardware feature. So, log a real
* error message not just the informational that comes with the reset.
*/
- drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
+ guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X",
guc_class, instance, engine->name, reason);
spin_lock_irqsave(&guc->submission_state.lock, flags);
@@ -4817,6 +4889,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
+ bool found;
+
if (!kref_get_unless_zero(&ce->ref))
continue;
@@ -4833,10 +4907,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
goto next;
}
+ found = false;
+ spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
+ found = true;
+ break;
+ }
+ spin_unlock(&ce->guc_state.lock);
+
+ if (found) {
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
@@ -4844,6 +4926,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock(&guc->context_lookup);
goto done;
}
+
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
@@ -4905,6 +4988,9 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
if (!sched_engine)
return;
+ drm_printf(p, "GuC Submission API Version: %d.%d.%d\n",
+ guc->submission_version.major, guc->submission_version.minor,
+ guc->submission_version.patch);
drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
atomic_read(&guc->outstanding_submission_g2h));
drm_printf(p, "GuC tasklet count: %u\n",
@@ -5336,8 +5422,8 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
+ guc_dbg(guc, "duplicate %s entry in load balancer\n",
+ sibling->name);
err = -EINVAL;
goto err_put;
}
@@ -5346,8 +5432,8 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.logical_mask |= sibling->logical_mask;
if (n != 0 && ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
+ guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
err = -EINVAL;
goto err_put;
} else if (n == 0) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5a95a9f0a8e3..c57b29cdb1a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -15,7 +15,7 @@ struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-void intel_guc_submission_enable(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 410905da8e97..04724ff56ded 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -13,6 +14,15 @@
#include <linux/device/bus.h>
#include <linux/mei_aux.h>
+#define huc_printk(_huc, _level, _fmt, ...) \
+ gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__)
+#define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__)
+#define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__)
+#define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__)
+#define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__)
+#define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__)
+#define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__)
+
/**
* DOC: HuC
*
@@ -107,11 +117,9 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti
if (!intel_huc_is_authenticated(huc)) {
if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI GSC init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI GSC\n");
else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI PXP init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI PXP\n");
else
MISSING_CASE(huc->delayed_load.status);
@@ -174,8 +182,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d
case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
- drm_info(&huc_to_gt(huc)->i915->drm,
- "mei driver not bound, disabling HuC load\n");
+ huc_info(huc, "MEI driver not bound, disabling load\n");
gsc_init_error(huc);
break;
}
@@ -183,7 +190,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d
return 0;
}
-void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
+void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
{
int ret;
@@ -193,14 +200,13 @@ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus
huc->delayed_load.nb.notifier_call = gsc_notifier;
ret = bus_register_notifier(bus, &huc->delayed_load.nb);
if (ret) {
- drm_err(&huc_to_gt(huc)->i915->drm,
- "failed to register GSC notifier\n");
+ huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
huc->delayed_load.nb.notifier_call = NULL;
gsc_init_error(huc);
}
}
-void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
+void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
{
if (!huc->delayed_load.nb.notifier_call)
return;
@@ -235,6 +241,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc)
i915_sw_fence_fini(&huc->delayed_load.fence);
}
+int intel_huc_sanitize(struct intel_huc *huc)
+{
+ delayed_huc_load_complete(huc);
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
+
static bool vcs_supported(struct intel_gt *gt)
{
intel_engine_mask_t mask = gt->info.engine_mask;
@@ -306,29 +319,25 @@ static int check_huc_loading_mode(struct intel_huc *huc)
GSC_LOADS_HUC;
if (fw_needs_gsc != hw_uses_gsc) {
- drm_err(&gt->i915->drm,
- "mismatch between HuC FW (%s) and HW (%s) load modes\n",
- HUC_LOAD_MODE_STRING(fw_needs_gsc),
- HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc));
return -ENOEXEC;
}
/* make sure we can access the GSC via the mei driver if we need it */
if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
fw_needs_gsc) {
- drm_info(&gt->i915->drm,
- "Can't load HuC due to missing MEI modules\n");
+ huc_info(huc, "can't load due to missing MEI modules\n");
return -EIO;
}
- drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+ huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc));
return 0;
}
int intel_huc_init(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
err = check_huc_loading_mode(huc);
@@ -345,7 +354,7 @@ int intel_huc_init(struct intel_huc *huc)
out:
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- drm_info(&i915->drm, "HuC init failed with %d\n", err);
+ huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
return err;
}
@@ -389,13 +398,13 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
delayed_huc_load_complete(huc);
if (ret) {
- drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
+ huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret));
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
- drm_info(&gt->i915->drm, "HuC authenticated\n");
+ huc_info(huc, "authenticated!\n");
return 0;
}
@@ -430,7 +439,7 @@ int intel_huc_auth(struct intel_huc *huc)
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
- DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret));
goto fail;
}
@@ -442,7 +451,7 @@ int intel_huc_auth(struct intel_huc *huc)
return 0;
fail:
- i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 52db03620c60..0789184d81a2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,6 +41,7 @@ struct intel_huc {
} delayed_load;
};
+int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
@@ -51,14 +52,8 @@ int intel_huc_check_status(struct intel_huc *huc);
void intel_huc_update_auth_status(struct intel_huc *huc);
bool intel_huc_is_authenticated(struct intel_huc *huc);
-void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
-void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
-
-static inline int intel_huc_sanitize(struct intel_huc *huc)
-{
- intel_uc_fw_sanitize(&huc->fw);
- return 0;
-}
+void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus);
+void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus);
static inline bool intel_huc_is_supported(struct intel_huc *huc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 4f246416db17..534b0aa43316 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -32,7 +32,7 @@ int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc)
GEM_WARN_ON(intel_uc_fw_is_loaded(&huc->fw));
- ret = intel_pxp_huc_load_and_auth(&huc_to_gt(huc)->pxp);
+ ret = intel_pxp_huc_load_and_auth(huc_to_gt(huc)->i915->pxp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 2a508b137e90..4ccb4be4c9cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -6,9 +6,13 @@
#include <linux/string_helpers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_reset.h"
+#include "intel_gsc_fw.h"
+#include "intel_gsc_uc.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
+#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "gt/intel_rps.h"
#include "intel_uc.h"
@@ -65,29 +69,29 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
ret = intel_reset_guc(gt);
if (ret) {
- DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC, ret = %d\n", ret);
return ret;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
+ gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
return ret;
}
static void __confirm_options(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
- drm_dbg(&i915->drm,
- "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
- i915->params.enable_guc,
- str_yes_no(intel_uc_wants_guc(uc)),
- str_yes_no(intel_uc_wants_guc_submission(uc)),
- str_yes_no(intel_uc_wants_huc(uc)),
- str_yes_no(intel_uc_wants_guc_slpc(uc)));
+ gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
+ i915->params.enable_guc,
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
@@ -98,26 +102,22 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC submission is N/A");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC submission is N/A");
if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "undocumented flag");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -126,6 +126,7 @@ void intel_uc_init_early(struct intel_uc *uc)
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
+ intel_gsc_uc_init_early(&uc->gsc);
__confirm_options(uc);
@@ -138,6 +139,7 @@ void intel_uc_init_early(struct intel_uc *uc)
void intel_uc_init_late(struct intel_uc *uc)
{
intel_guc_init_late(&uc->guc);
+ intel_gsc_uc_load_start(&uc->gsc);
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -249,15 +251,13 @@ static int guc_enable_communication(struct intel_guc *guc)
intel_guc_ct_event_handler(&guc->ct);
spin_unlock_irq(gt->irq_lock);
- drm_dbg(&i915->drm, "GuC communication enabled\n");
+ guc_dbg(guc, "communication enabled\n");
return 0;
}
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/*
* Events generated during or after CT disable are logged by guc in
* via mmio. Make sure the register is clear before disabling CT since
@@ -277,11 +277,12 @@ static void guc_disable_communication(struct intel_guc *guc)
*/
guc_get_mmio_msg(guc);
- drm_dbg(&i915->drm, "GuC communication disabled\n");
+ guc_dbg(guc, "communication disabled\n");
}
static void __uc_fetch_firmwares(struct intel_uc *uc)
{
+ struct intel_gt *gt = uc_to_gt(uc);
int err;
GEM_BUG_ON(!intel_uc_wants_guc(uc));
@@ -290,21 +291,30 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
if (err) {
/* Make sure we transition out of transient "SELECTED" state */
if (intel_uc_wants_huc(uc)) {
- drm_dbg(&uc_to_gt(uc)->i915->drm,
- "Failed to fetch GuC: %d disabling HuC\n", err);
+ gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err));
intel_uc_fw_change_status(&uc->huc.fw,
INTEL_UC_FIRMWARE_ERROR);
}
+ if (intel_uc_wants_gsc_uc(uc)) {
+ gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err));
+ intel_uc_fw_change_status(&uc->gsc.fw,
+ INTEL_UC_FIRMWARE_ERROR);
+ }
+
return;
}
if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
+
+ if (intel_uc_wants_gsc_uc(uc))
+ intel_uc_fw_fetch(&uc->gsc.fw);
}
static void __uc_cleanup_firmwares(struct intel_uc *uc)
{
+ intel_uc_fw_cleanup_fetch(&uc->gsc.fw);
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
@@ -330,11 +340,15 @@ static int __uc_init(struct intel_uc *uc)
if (intel_uc_uses_huc(uc))
intel_huc_init(huc);
+ if (intel_uc_uses_gsc_uc(uc))
+ intel_gsc_uc_init(&uc->gsc);
+
return 0;
}
static void __uc_fini(struct intel_uc *uc)
{
+ intel_gsc_uc_fini(&uc->gsc);
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
}
@@ -364,7 +378,7 @@ static int uc_init_wopcm(struct intel_uc *uc)
int err;
if (unlikely(!base || !size)) {
- i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n");
return -E2BIG;
}
@@ -395,13 +409,13 @@ static int uc_init_wopcm(struct intel_uc *uc)
return 0;
err_out:
- i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
- i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
- i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
- intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
- i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
- i915_mmio_reg_offset(GUC_WOPCM_SIZE),
- intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+ gt_probe_error(gt, "Failed to init uC WOPCM registers!\n");
+ gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
return err;
}
@@ -431,20 +445,19 @@ static int __uc_check_hw(struct intel_uc *uc)
return 0;
}
-static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
+static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
-
- drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
- intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
- fw->file_selected.major_ver,
- fw->file_selected.minor_ver,
- fw->file_selected.patch_ver);
+ gt_info(gt, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.ver.major,
+ fw->file_selected.ver.minor,
+ fw->file_selected.ver.patch);
}
static int __uc_init_hw(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret, attempts;
@@ -452,10 +465,10 @@ static int __uc_init_hw(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
- print_fw_ver(uc, &guc->fw);
+ print_fw_ver(gt, &guc->fw);
if (intel_uc_uses_huc(uc))
- print_fw_ver(uc, &huc->fw);
+ print_fw_ver(gt, &huc->fw);
if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
@@ -496,8 +509,8 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret == 0)
break;
- DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", ret, attempts);
+ gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n",
+ ERR_PTR(ret), attempts);
}
/* Did we succeded or run out of retries? */
@@ -519,8 +532,11 @@ static int __uc_init_hw(struct intel_uc *uc)
else
intel_huc_auth(huc);
- if (intel_uc_uses_guc_submission(uc))
- intel_guc_submission_enable(guc);
+ if (intel_uc_uses_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_log_capture;
+ }
if (intel_uc_uses_guc_slpc(uc)) {
ret = intel_guc_slpc_enable(&guc->slpc);
@@ -531,10 +547,8 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- drm_info(&i915->drm, "GuC submission %s\n",
- str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
- drm_info(&i915->drm, "GuC SLPC %s\n",
- str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+ guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
@@ -552,12 +566,12 @@ err_out:
__uc_sanitize(uc);
if (!ret) {
- drm_notice(&i915->drm, "GuC is uninitialized\n");
+ gt_notice(gt, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
- i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+ gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret));
/* We want to keep KMS alive */
return -EIO;
@@ -659,6 +673,9 @@ void intel_uc_suspend(struct intel_uc *uc)
intel_wakeref_t wakeref;
int err;
+ /* flush the GSC worker */
+ intel_gsc_uc_flush_work(&uc->gsc);
+
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
return;
@@ -667,7 +684,7 @@ void intel_uc_suspend(struct intel_uc *uc)
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
if (err)
- DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+ guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err));
}
}
@@ -695,10 +712,12 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
err = intel_guc_resume(guc);
if (err) {
- DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err));
return err;
}
+ intel_gsc_uc_resume(&uc->gsc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index a8f38c2c60e2..5d0f1bcc381e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -6,6 +6,7 @@
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
+#include "intel_gsc_uc.h"
#include "intel_guc.h"
#include "intel_guc_rc.h"
#include "intel_guc_submission.h"
@@ -27,6 +28,7 @@ struct intel_uc_ops {
struct intel_uc {
struct intel_uc_ops const *ops;
+ struct intel_gsc_uc gsc;
struct intel_guc guc;
struct intel_huc huc;
@@ -87,6 +89,7 @@ uc_state_checkers(huc, huc);
uc_state_checkers(guc, guc_submission);
uc_state_checkers(guc, guc_slpc);
uc_state_checkers(guc, guc_rc);
+uc_state_checkers(gsc, gsc_uc);
#undef uc_state_checkers
#undef __uc_state_checker
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 284d6fbc2d08..2f93cc4e408a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -54,6 +54,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
if (IS_ERR(root))
return;
+ uc->guc.dbgfs_node = root;
+
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
intel_guc_debugfs_register(&uc->guc, root);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 0c80ba51a4bd..c36e68e23a14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_print.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -19,11 +20,18 @@
static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
- if (type == INTEL_UC_FW_TYPE_GUC)
+ GEM_BUG_ON(type >= INTEL_UC_FW_NUM_TYPES);
+
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+ case INTEL_UC_FW_TYPE_HUC:
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+ case INTEL_UC_FW_TYPE_GSC:
+ return container_of(uc_fw, struct intel_gt, uc.gsc.fw);
+ }
- GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
- return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+ return NULL;
}
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
@@ -37,11 +45,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -118,35 +125,35 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
*/
#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
"i915/" \
- __stringify(prefix_) name_ ".bin"
+ __stringify(prefix_) "_" name_ ".bin"
#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
"i915/" \
- __stringify(prefix_) name_ \
+ __stringify(prefix_) "_" name_ "_" \
__stringify(major_) ".bin"
#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
- __stringify(prefix_) name_ \
+ __stringify(prefix_) "_" name_ "_" \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
/* Minor for internal driver use, not part of file name */
#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
- __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH_MMP(prefix_, "guc", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
- __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "huc")
#define MAKE_HUC_FW_PATH_GSC(prefix_) \
- __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc_gsc")
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "huc_gsc")
#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_)
/*
* All blobs need to be declared via MODULE_FIRMWARE().
@@ -238,7 +245,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
- static bool verified;
+ static bool verified[INTEL_UC_FW_NUM_TYPES];
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
@@ -247,6 +254,14 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
bool found;
/*
+ * GSC FW support is still not fully in place, so we're not defining
+ * the FW blob yet because we don't want the driver to attempt to load
+ * it until we're ready for it.
+ */
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
+ return;
+
+ /*
* The only difference between the ADL GuC FWs is the HWConfig support.
* ADL-N does not support HWConfig, so we should use the same binary as
* ADL-S, otherwise the GuC might attempt to fetch a config table that
@@ -278,8 +293,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->file_selected.path = blob->path;
uc_fw->file_wanted.path = blob->path;
- uc_fw->file_wanted.major_ver = blob->major;
- uc_fw->file_wanted.minor_ver = blob->minor;
+ uc_fw->file_wanted.ver.major = blob->major;
+ uc_fw->file_wanted.ver.minor = blob->minor;
uc_fw->loaded_via_gsc = blob->loaded_via_gsc;
found = true;
break;
@@ -291,8 +306,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
}
/* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
- verified = true;
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified[uc_fw->type]) {
+ verified[uc_fw->type] = true;
for (i = 1; i < fw_count; i++) {
/* Next platform is good: */
@@ -343,7 +358,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
continue;
bad:
- drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_uc_fw_type_repr(uc_fw->type),
intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
fw_blobs[i - 1].blob.legacy ? "L" : "v",
fw_blobs[i - 1].blob.major,
@@ -374,6 +390,11 @@ static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
return "";
}
+static const char *__override_gsc_firmware_path(struct drm_i915_private *i915)
+{
+ return i915->params.gsc_firmware_path;
+}
+
static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const char *path = NULL;
@@ -385,6 +406,9 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
case INTEL_UC_FW_TYPE_HUC:
path = __override_huc_firmware_path(i915);
break;
+ case INTEL_UC_FW_TYPE_GSC:
+ path = __override_gsc_firmware_path(i915);
+ break;
}
if (unlikely(path)) {
@@ -438,59 +462,127 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
- uc_fw->file_wanted.major_ver += 1;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major += 1;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
- uc_fw->file_wanted.minor_ver += 1;
+ uc_fw->file_wanted.ver.minor += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.major_ver &&
+ } else if (uc_fw->file_wanted.ver.major &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
- uc_fw->file_wanted.major_ver -= 1;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major -= 1;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.minor_ver &&
+ } else if (uc_fw->file_wanted.ver.minor &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
- uc_fw->file_wanted.minor_ver -= 1;
+ uc_fw->file_wanted.ver.minor -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
- uc_fw->file_wanted.major_ver = 0;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major = 0;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = true;
}
}
-static int check_gsc_manifest(const struct firmware *fw,
+static int check_gsc_manifest(struct intel_gt *gt,
+ const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
u32 *dw = (u32 *)fw->data;
- u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
- u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
+ u32 version_hi, version_lo;
+ size_t min_size;
+
+ /* Check the size of the blob before examining buffer contents */
+ min_size = sizeof(u32) * (HUC_GSC_VERSION_LO_DW + 1);
+ if (unlikely(fw->size < min_size)) {
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, min_size);
+ return -ENODATA;
+ }
+
+ version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ version_lo = dw[HUC_GSC_VERSION_LO_DW];
- uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
- uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
- uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
+ uc_fw->file_selected.ver.major = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.ver.minor = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.ver.patch = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
return 0;
}
+static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
+{
+ /* Get version numbers from the CSS header */
+ ver->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css_value);
+ ver->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css_value);
+ ver->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css_value);
+}
+
+static void guc_read_css_info(struct intel_uc_fw *uc_fw, struct uc_css_header *css)
+{
+ struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
+
+ /*
+ * The GuC firmware includes an extra version number to specify the
+ * submission API level. This allows submission code to work with
+ * multiple GuC versions without having to know the absolute firmware
+ * version number (there are likely to be multiple firmware releases
+ * which all support the same submission API level).
+ *
+ * Note that the spec for the CSS header defines this version number
+ * as 'vf_version' as it was originally intended for virtualisation.
+ * However, it is applicable to native submission as well.
+ *
+ * Unfortunately, due to an oversight, this version number was only
+ * exposed in the CSS header from v70.6.0.
+ */
+ if (uc_fw->file_selected.ver.major >= 70) {
+ if (uc_fw->file_selected.ver.minor >= 6) {
+ /* v70.6.0 adds CSS header support */
+ uc_unpack_css_version(&guc->submission_version, css->vf_version);
+ } else if (uc_fw->file_selected.ver.minor >= 3) {
+ /* v70.3.0 introduced v1.1.0 */
+ guc->submission_version.major = 1;
+ guc->submission_version.minor = 1;
+ guc->submission_version.patch = 0;
+ } else {
+ /* v70.0.0 introduced v1.0.0 */
+ guc->submission_version.major = 1;
+ guc->submission_version.minor = 0;
+ guc->submission_version.patch = 0;
+ }
+ } else if (uc_fw->file_selected.ver.major >= 69) {
+ /* v69.0.0 introduced v0.10.0 */
+ guc->submission_version.major = 0;
+ guc->submission_version.minor = 10;
+ guc->submission_version.patch = 0;
+ } else {
+ /* Prior versions were v0.1.0 */
+ guc->submission_version.major = 0;
+ guc->submission_version.minor = 1;
+ guc->submission_version.patch = 0;
+ }
+
+ uc_fw->private_data_size = css->private_data_size;
+}
+
static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = gt->i915;
struct uc_css_header *css;
size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -500,10 +592,9 @@ static int check_ccs_header(struct intel_gt *gt,
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm,
- "%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -516,31 +607,107 @@ static int check_ccs_header(struct intel_gt *gt,
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, size);
return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= gt->wopcm.size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- size, (size_t)gt->wopcm.size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ size, (size_t)gt->wopcm.size);
return -E2BIG;
}
- /* Get version numbers from the CSS header */
- uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->sw_version);
- uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->sw_version);
- uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
- css->sw_version);
+ uc_unpack_css_version(&uc_fw->file_selected.ver, css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
- uc_fw->private_data_size = css->private_data_size;
+ guc_read_css_info(uc_fw, css);
+
+ return 0;
+}
+
+static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
+{
+ return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
+}
+
+static int guc_check_version_range(struct intel_uc_fw *uc_fw)
+{
+ struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+
+ /*
+ * GuC version number components are defined as being 8-bits.
+ * The submission code relies on this to optimise version comparison
+ * tests. So enforce the restriction here.
+ */
+
+ if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
+ gt_warn(gt, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
+ return -EINVAL;
+ }
+
+ if (!is_ver_8bit(&guc->submission_version)) {
+ gt_warn(gt, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ guc->submission_version.major,
+ guc->submission_version.minor,
+ guc->submission_version.patch);
+ return -EINVAL;
+ }
+
+ return i915_inject_probe_error(gt->i915, -EINVAL);
+}
+
+static int check_fw_header(struct intel_gt *gt,
+ const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ int err = 0;
+
+ /* GSC FW version is queried after the FW is loaded */
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
+ return 0;
+
+ if (uc_fw->loaded_via_gsc)
+ err = check_gsc_manifest(gt, fw, uc_fw);
+ else
+ err = check_ccs_header(gt, fw, uc_fw);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct device *dev = gt->i915->drm.dev;
+ int err;
+
+ err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
+
+ if (err)
+ return err;
+
+ if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
+ gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+
+ /* try to find another blob to load */
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENOENT;
+ }
return 0;
}
@@ -558,7 +725,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
- struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
bool old_ver = false;
@@ -574,20 +740,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
- if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
-
- /* try to find another blob to load */
- release_firmware(fw);
- err = -ENOENT;
- }
-
/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
@@ -608,50 +763,51 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
break;
}
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
}
if (err)
goto fail;
- if (uc_fw->loaded_via_gsc)
- err = check_gsc_manifest(fw, uc_fw);
- else
- err = check_ccs_header(gt, fw, uc_fw);
+ err = check_fw_header(gt, fw, uc_fw);
if (err)
goto fail;
- if (uc_fw->file_wanted.major_ver) {
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+ err = guc_check_version_range(uc_fw);
+ if (err)
+ goto fail;
+ }
+
+ if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
- if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
- uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
+ gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
}
} else {
- if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
old_ver = true;
}
}
- if (old_ver) {
+ if (old_ver && uc_fw->file_selected.ver.major) {
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
- drm_notice(&i915->drm,
- "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_wanted.path,
- uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
- uc_fw->file_selected.path,
- uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
+ gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
+ gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
}
if (HAS_LMEM(i915)) {
@@ -679,10 +835,10 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
- drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+ gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
+ gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
@@ -788,9 +944,9 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uncore_read_fw(uncore, DMA_CTRL));
+ gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
@@ -800,6 +956,19 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
return ret;
}
+int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+
+ GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
+
+ gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+
+ return err;
+}
+
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
@@ -836,11 +1005,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
return 0;
fail:
- i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
- return err;
+ return intel_uc_fw_mark_load_failed(uc_fw, err);
}
static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
@@ -924,15 +1089,15 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out;
}
err = uc_fw_rsa_data_create(uc_fw);
if (err) {
- DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out_unpin;
}
@@ -1054,7 +1219,7 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
- u32 ver_sel, ver_want;
+ bool got_wanted;
drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
@@ -1063,25 +1228,32 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
- ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
- ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
- uc_fw->file_wanted.minor_ver,
- uc_fw->file_wanted.patch_ver);
- if (ver_sel < ver_want)
+
+ if (uc_fw->file_selected.ver.major < uc_fw->file_wanted.ver.major)
+ got_wanted = false;
+ else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
+ (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor))
+ got_wanted = false;
+ else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
+ (uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
+ (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
+ got_wanted = false;
+ else
+ got_wanted = true;
+
+ if (!got_wanted)
drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
- uc_fw->file_wanted.major_ver,
- uc_fw->file_wanted.minor_ver,
- uc_fw->file_wanted.patch_ver,
- uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
+ uc_fw->file_wanted.ver.major,
+ uc_fw->file_wanted.ver.minor,
+ uc_fw->file_wanted.ver.patch,
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
else
drm_printf(p, "\tversion: found %u.%u.%u\n",
- uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index bc898ba5355d..6ba00e6b3975 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -61,9 +61,16 @@ enum intel_uc_fw_status {
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC = 0,
- INTEL_UC_FW_TYPE_HUC
+ INTEL_UC_FW_TYPE_HUC,
+ INTEL_UC_FW_TYPE_GSC,
+};
+#define INTEL_UC_FW_NUM_TYPES 3
+
+struct intel_uc_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 patch;
};
-#define INTEL_UC_FW_NUM_TYPES 2
/*
* The firmware build process will generate a version header file with major and
@@ -72,9 +79,7 @@ enum intel_uc_fw_type {
*/
struct intel_uc_fw_file {
const char *path;
- u16 major_ver;
- u16 minor_ver;
- u16 patch_ver;
+ struct intel_uc_fw_ver ver;
};
/*
@@ -110,11 +115,6 @@ struct intel_uc_fw {
bool loaded_via_gsc;
};
-#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
-#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
- (uc)->fw.file_selected.minor_ver, \
- (uc)->fw.file_selected.patch_ver))
-
/*
* When we load the uC binaries, we pin them in a reserved section at the top of
* the GGTT, which is ~18 MBs. On multi-GT systems where the GTs share the GGTT,
@@ -205,6 +205,8 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
return "GuC";
case INTEL_UC_FW_TYPE_HUC:
return "HuC";
+ case INTEL_UC_FW_TYPE_GSC:
+ return "GSC";
}
return "uC";
}
@@ -287,6 +289,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
+int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index 7a411178bdbf..646fa8aa6cf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -74,7 +74,8 @@ struct uc_css_header {
#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
- u32 reserved0[13];
+ u32 vf_version;
+ u32 reserved0[12];
union {
u32 private_data_size; /* only applies to GuC */
u32 reserved1;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index e28518fe8b90..1fd760539f77 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,6 +3,8 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
+#include "intel_guc_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -65,7 +67,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
goto err;
}
@@ -86,7 +88,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
goto err;
}
@@ -96,7 +98,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
i915_request_put(last[i]);
@@ -113,7 +115,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -153,7 +155,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- drm_err(&gt->i915->drm, "Context array allocation failed\n");
+ guc_err(guc, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -166,25 +168,25 @@ static int intel_guc_steal_guc_ids(void *arg)
ce[context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -192,9 +194,9 @@ static int intel_guc_steal_guc_ids(void *arg)
while (ret != -EAGAIN) {
ce[++context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
- ret = PTR_ERR(ce[context_index--]);
- ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
+ ce[context_index--] = NULL;
goto err_spin_rq;
}
@@ -203,8 +205,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
- context_index, ret);
+ guc_err(guc, "Failed to create request %d: %pe\n",
+ context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
@@ -218,7 +220,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
+ guc_err(guc, "Spin request failed to complete: %pe\n", ERR_PTR(ret));
i915_request_put(last);
goto err_spin_rq;
}
@@ -230,7 +232,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ guc_err(guc, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -238,7 +240,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
+ guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
goto err_spin_rq;
}
@@ -246,20 +248,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
+ guc_err(guc, "Request with stolen guc_id failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ guc_err(guc, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- drm_err(&gt->i915->drm, "No guc_id was stolen");
+ guc_err(guc, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index d91b58f70403..34b5d952e2bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -3,6 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -45,7 +46,7 @@ static int intel_hang_guc(void *arg)
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
- drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ gt_err(gt, "Failed get kernel context: %pe\n", ctx);
return PTR_ERR(ctx);
}
@@ -54,7 +55,7 @@ static int intel_hang_guc(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err;
}
@@ -63,13 +64,13 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -77,28 +78,28 @@ static int intel_hang_guc(void *arg)
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin;
}
ret = intel_reset_guc(gt);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC: %pe\n", ERR_PTR(ret));
goto err_spin;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ gt_err(gt, "Failed to reset GuC: status = 0x%08X\n", guc_status);
ret = -EIO;
goto err_spin;
}
@@ -107,12 +108,12 @@ static int intel_hang_guc(void *arg)
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin;
}
if (i915_reset_count(global) == reset_count) {
- drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ gt_err(gt, "Failed to record a GPU reset\n");
ret = -EINVAL;
goto err_spin;
}
@@ -132,7 +133,7 @@ err_spin:
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ gt_err(gt, "No-op failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index d17982c36d25..a40e7c32e613 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -3,6 +3,7 @@
* Copyright �� 2019 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -115,30 +116,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
+ gt_err(gt, "Failed creating contexts: %pe\n", parent);
return PTR_ERR(parent);
} else if (!parent) {
- drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
+ gt_dbg(gt, "Not enough engines in class: %d\n", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
+ gt_err(gt, "Failed creating requests: %pe\n", rq);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
+ gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret));
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0ebf5fbf0e39..3c4ae1da0d41 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -49,6 +49,7 @@
#include "i915_pvinfo.h"
#include "trace.h"
+#include "display/intel_display.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 9f1c209d9251..baccbf1761b7 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -147,9 +147,25 @@ vgpu_scan_nonprivbb_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
- vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
- "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
+ vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
+ "0x%llx\n");
+
+static int vgpu_status_get(void *data, u64 *val)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+ *val = 0;
+
+ if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
&vgpu_mmio_diff_fops);
- debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
- &vgpu_scan_nonprivbb_fops);
+ debugfs_create_file_unsafe("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
+ debugfs_create_file_unsafe("status", 0644, vgpu->debugfs, vgpu,
+ &vgpu_status_fops);
}
/**
@@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
- debugfs_remove_recursive(vgpu->debugfs);
- vgpu->debugfs = NULL;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root && gvt->debugfs_root) {
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+ }
}
/**
@@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- debugfs_remove_recursive(gvt->debugfs_root);
- gvt->debugfs_root = NULL;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root) {
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c033249e73f4..e0c5dfb788eb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
@@ -62,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -78,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -186,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
- ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE);
+ vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &=
+ ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK;
@@ -247,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -505,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -583,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
* @turnon: Turn ON/OFF vblank_timer
*
* This function is used to turn on/off or update the per-vGPU vblank_timer
- * when PIPECONF is enabled or disabled. vblank_timer period is also updated
+ * when TRANSCONF is enabled or disabled. vblank_timer period is also updated
* if guest changed the refresh rate.
*
*/
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 355f1c0e8664..6834f9fe40cf 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -42,8 +42,7 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
-static int vgpu_gem_get_pages(
- struct drm_i915_gem_object *obj)
+static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_vgpu *vgpu;
@@ -52,8 +51,12 @@ static int vgpu_gem_get_pages(
int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
- u32 page_num;
+ unsigned int page_num; /* limited by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num))
+ return -E2BIG;
+
+ page_num = obj->base.size >> PAGE_SHIFT;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
@@ -66,7 +69,6 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- page_num = obj->base.size >> PAGE_SHIFT;
ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
@@ -134,7 +136,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
- if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+ !list_empty(&vgpu->dmabuf_obj_list_head)) {
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1b509c1a1e33..7c49a3d673a5 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -32,6 +32,7 @@
*
*/
+#include "display/intel_dp_aux_regs.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 0daa3931aef7..4eff44194439 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,7 +38,7 @@
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a683c22d5b64..4dd52ac2043e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -45,7 +45,7 @@ struct gvt_firmware_header {
u64 cfg_space_offset; /* offset in the file */
u64 mmio_size;
u64 mmio_offset; /* offset in the file */
- unsigned char data[1];
+ unsigned char data[];
};
#define dev_to_drm_minor(d) dev_get_drvdata((d))
@@ -77,7 +77,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
unsigned long size, crc32_start;
int ret;
- size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
+ size = offsetof(struct gvt_firmware_header, data) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
@@ -171,7 +171,7 @@ static int verify_firmware(struct intel_gvt *gvt,
mem = (fw->data + h->cfg_space_offset);
id = *(u16 *)(mem + PCI_VENDOR_ID);
- VERIFY("vender id", id, pdev->vendor);
+ VERIFY("vendor id", id, pdev->vendor);
id = *(u16 *)(mem + PCI_DEVICE_ID);
VERIFY("device id", id, pdev->device);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 51e5e8fb505b..4ec85308379a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
int idx;
bool ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return false;
idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
@@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
- if (ret) {
- ppgtt_invalidate_spt(spt);
- return ret;
- }
+ if (ret)
+ goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
+err:
+ /* Cancel the existing addess mappings of DMA addr. */
+ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(sub_spt, &sub_se);
+ }
+ /* Release the new allocated spt. */
+ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+ sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+ ppgtt_free_spt(sub_spt);
+ return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62823c0e13ab..2d65800d8e93 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+enum {
+ INTEL_VGPU_STATUS_ATTACHED = 0,
+ INTEL_VGPU_STATUS_ACTIVE,
+ INTEL_VGPU_STATUS_NR_BITS,
+};
+
struct intel_vgpu {
struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- bool active;
- bool attached;
+ DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
- for_each_if(vgpu->active)
+ for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 735fc83e7026..4b45a041ac5c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -43,8 +43,13 @@
#include "intel_mchbar_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
+#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_fbc.h"
+#include "display/intel_fdi_regs.h"
+#include "display/intel_pps_regs.h"
+#include "display/intel_psr_regs.h"
+#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -666,8 +671,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -697,12 +702,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & PIPECONF_ENABLE) {
- vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
+ if (data & TRANSCONF_ENABLE) {
+ vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
vgpu_update_refresh_rate(vgpu);
vgpu_update_vblank_emulation(vgpu, true);
} else {
- vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
+ vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
vgpu_update_vblank_emulation(vgpu, false);
}
return 0;
@@ -2262,10 +2267,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index a6b2021b665f..68eca023bbc6 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f5451adcd489..de675d799c7d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!itr->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
continue;
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (vgpu->attached)
- return -EEXIST;
-
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
- vgpu->attached = true;
-
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
+ set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -698,12 +695,11 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (!vgpu->attached)
- return;
-
intel_gvt_release_vgpu(vgpu);
- debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
+ clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
+ debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
vgpu->dma_addr_cache = RB_ROOT;
intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
- vgpu->attached = false;
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
- if (WARN_ON_ONCE(vgpu->attached))
- return;
-
vfio_unregister_group_dev(&vgpu->vfio_device);
vfio_put_device(&vgpu->vfio_device);
}
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
- return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+ return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
struct gvt_dma *entry;
int ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
struct gvt_dma *entry;
int ret = 0;
- if (!vgpu->attached)
- return -ENODEV;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
{
struct gvt_dma *entry;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return;
mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
- if (vgpu->active)
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
intel_vgpu_emulate_vblank(vgpu);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9cd8fcbf7cad..f4055804aad1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
+ intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
@@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
goto out;
}
- if (!scheduler->current_vgpu->active ||
+ if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+ scheduler->current_vgpu->status) ||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3c529c2705dd..08ad1bd651f1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = true;
- mutex_unlock(&vgpu->vgpu_lock);
+ set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
}
/**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = false;
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
- drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+ "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
if (ret)
goto out_free_vgpu;
- vgpu->active = false;
-
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
return vgpu;
out_free_vgpu:
@@ -325,7 +323,7 @@ int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_unlock;;
+ goto out_unlock;
vgpu->id = ret;
vgpu->sched_ctl.weight = conf->weight;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7412abf166a8..8ef93889061a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -422,12 +421,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
* we can use it to substitute for the pending idle-barrer
* request that we want to emit on the kernel_context.
*/
- __active_del_barrier(ref, node_from_active(active));
- return true;
+ return __active_del_barrier(ref, node_from_active(active));
}
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ u64 idx = i915_request_timeline(rq)->fence_context;
struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -437,16 +436,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
if (err)
return err;
- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
- if (!active) {
- err = -ENOMEM;
- goto out;
- }
+ do {
+ active = active_instance(ref, idx);
+ if (!active) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ } while (unlikely(is_barrier(active)));
- if (replace_barrier(ref, active)) {
- RCU_INIT_POINTER(active->fence, NULL);
- atomic_dec(&ref->count);
- }
if (!__i915_active_fence_set(active, fence))
__i915_active_acquire(ref);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f93e6122f247..ddf49c2dbb91 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1471,8 +1471,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
/* Defer failure until attempted use */
jump_whitelist = alloc_whitelist(batch_length);
- shadow_addr = gen8_canonical_addr(shadow->node.start);
- batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+ shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
+ batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
/*
* We use the batch length as size because the shadow object is as
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index afb828dab53b..24e5bb8a670e 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -3,7 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <linux/kernel.h>
+
+#include "i915_config.h"
+#include "i915_utils.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_config.h b/drivers/gpu/drm/i915/i915_config.h
new file mode 100644
index 000000000000..10e18b036489
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_config.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_CONFIG_H__
+#define __I915_CONFIG_H__
+
+#include <linux/types.h>
+#include <linux/limits.h>
+
+struct drm_i915_private;
+
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+ u64 context);
+
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return i915_fence_context_timeout(i915, U64_MAX);
+}
+
+#endif /* __I915_CONFIG_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6c7ac73b69a5..80c2bf98e341 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -52,7 +52,6 @@
#include "i915_irq.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -183,7 +182,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
stringify_vma_type(vma),
- vma->node.start, vma->node.size,
+ i915_vma_offset(vma), i915_vma_size(vma),
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
@@ -575,14 +574,34 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
static int i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *i915 = data;
+ struct intel_gt *gt;
+ unsigned int i;
- return intel_gt_debugfs_reset_show(to_gt(i915), val);
+ *val = 0;
+
+ for_each_gt(gt, i915, i) {
+ int ret;
+
+ ret = intel_gt_debugfs_reset_show(gt, val);
+ if (ret)
+ return ret;
+
+ /* at least one tile should be wedged */
+ if (*val)
+ break;
+ }
+
+ return 0;
}
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- intel_gt_debugfs_reset_store(to_gt(i915), val);
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ intel_gt_debugfs_reset_store(gt, val);
return 0;
}
@@ -648,13 +667,14 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
+
static int
gt_drop_caches(struct intel_gt *gt, u64 val)
{
int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ wait_for(intel_engines_are_idle(gt), 200))
intel_gt_set_wedged(gt);
if (val & DROP_RETIRE)
@@ -732,7 +752,11 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ intel_gt_pm_debugfs_forcewake_user_open(gt);
return 0;
}
@@ -740,7 +764,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ intel_gt_pm_debugfs_forcewake_user_release(gt);
return 0;
}
@@ -762,7 +790,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
-#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
static const struct i915_debugfs_files {
const char *name;
@@ -795,6 +822,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
}
drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
+ ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 783c8676eee2..614bde321589 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -230,27 +230,16 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
&i915_param_charp_fops);
}
-static __always_inline void
-_i915_param_create_file(struct dentry *parent, const char *name,
- const char *type, int mode, void *value)
-{
- if (!mode)
- return;
-
- if (!__builtin_strcmp(type, "bool"))
- debugfs_create_bool(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "int"))
- i915_debugfs_create_int(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned int"))
- i915_debugfs_create_uint(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned long"))
- debugfs_create_ulong(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "char *"))
- i915_debugfs_create_charp(name, mode, parent, value);
- else
- WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
- type, name);
-}
+#define _i915_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool *: debugfs_create_bool, \
+ int *: i915_debugfs_create_int, \
+ unsigned int *: i915_debugfs_create_uint, \
+ unsigned long *: debugfs_create_ulong, \
+ char **: i915_debugfs_create_charp)(name, mode, parent, valp); \
+ } while(0)
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
@@ -269,7 +258,7 @@ struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
* just let the generic create file fail silently with -EEXIST.
*/
-#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, mode, &params->x);
I915_PARAMS_FOR_EACH(REGISTER);
#undef REGISTER
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 297b8e4e42ee..91c61864285a 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -6,7 +6,7 @@
#include <linux/dma-fence.h>
#include <linux/slab.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include "i915_deps.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 69103ae37779..93fdc40d724f 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
@@ -73,13 +72,18 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "pxp/intel_pxp.h"
+#include "pxp/intel_pxp_debugfs.h"
#include "pxp/intel_pxp_pm.h"
-#include "i915_file_private.h"
+#include "soc/intel_dram.h"
+#include "soc/intel_gmch.h"
+
#include "i915_debugfs.h"
#include "i915_driver.h"
#include "i915_drm_client.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_getparam.h"
#include "i915_hwmon.h"
#include "i915_ioc32.h"
@@ -93,155 +97,16 @@
#include "i915_sysfs.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
-#include "intel_dram.h"
+#include "intel_clock_gating.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
-/* Intel Rapid Start Technology ACPI device name */
-static const char irst_name[] = "INT3392";
-
static const struct drm_driver i915_drm_driver;
-static void i915_release_bridge_dev(struct drm_device *dev,
- void *bridge)
-{
- pci_dev_put(bridge);
-}
-
-static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
-{
- int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
-
- dev_priv->bridge_dev =
- pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
- if (!dev_priv->bridge_dev) {
- drm_err(&dev_priv->drm, "bridge device not found\n");
- return -EIO;
- }
-
- return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
- dev_priv->bridge_dev);
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
-{
- int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret;
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
- return 0;
-#endif
-
- /* Get some space for it */
- dev_priv->mch_res.name = "i915 MCHBAR";
- dev_priv->mch_res.flags = IORESOURCE_MEM;
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
- &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- return ret;
- }
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
- return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool enabled;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = false;
-
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- return;
-
- if (intel_alloc_mchbar_resource(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-}
-
-static void
-intel_teardown_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
- if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- u32 deven_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
- &deven_val);
- deven_val &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- deven_val);
- } else {
- u32 mchbar_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
- &mchbar_val);
- mchbar_val &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
- mchbar_val);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
/*
@@ -302,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+ pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -383,22 +250,14 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_pm_setup(dev_priv);
- ret = intel_power_domains_init(dev_priv);
- if (ret < 0)
- goto err_gem;
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
- intel_init_clock_gating_hooks(dev_priv);
+ intel_clock_gating_hooks_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
return 0;
-err_gem:
- i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release_all(dev_priv);
- i915_drm_clients_fini(&dev_priv->clients);
err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
@@ -447,7 +306,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- ret = i915_get_bridge_dev(dev_priv);
+ ret = intel_gmch_bridge_setup(dev_priv);
if (ret < 0)
return ret;
@@ -464,7 +323,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
}
/* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
+ intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
for_each_gt(gt, dev_priv, i) {
@@ -479,7 +338,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
return ret;
}
@@ -490,7 +349,7 @@ err_uncore:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
}
/**
@@ -610,11 +469,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- i915_perf_init(dev_priv);
-
- ret = intel_gt_assign_ggtt(to_gt(dev_priv));
+ ret = i915_perf_init(dev_priv);
if (ret)
- goto err_perf;
+ return ret;
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
@@ -628,13 +485,17 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_memory_regions_hw_probe(dev_priv);
+ /*
+ * Make sure we probe lmem before we probe stolen-lmem. The BAR size
+ * might be different due to bar resizing.
+ */
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_ggtt;
- ret = intel_gt_tiles_init(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
- goto err_mem_regions;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
@@ -676,7 +537,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_pcode_init(dev_priv);
if (ret)
- goto err_msi;
+ goto err_opregion;
/*
* Fill the dram structure to get the system dram info. This will be
@@ -697,6 +558,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
+err_opregion:
+ intel_opregion_cleanup(dev_priv);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -722,6 +585,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
+ intel_opregion_cleanup(dev_priv);
+
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -763,6 +628,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
intel_gt_driver_register(gt);
+ intel_pxp_debugfs_register(dev_priv->pxp);
+
i915_hwmon_register(dev_priv);
intel_display_driver_register(dev_priv);
@@ -794,6 +661,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
+ intel_pxp_fini(dev_priv);
+
for_each_gt(gt, dev_priv, i)
intel_gt_driver_unregister(gt);
@@ -937,6 +806,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
+ intel_pxp_init(i915);
+
ret = intel_modeset_init(i915);
if (ret)
goto out_cleanup_gem;
@@ -1071,10 +942,9 @@ static void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- intel_fbdev_restore_mode(dev);
+ intel_fbdev_restore_mode(i915);
- if (HAS_DISPLAY(i915))
- vga_switcheroo_process_delayed_switch();
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1138,7 +1008,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
- intel_dmc_ucode_suspend(i915);
+ intel_dmc_suspend(i915);
i915_gem_suspend(i915);
@@ -1168,10 +1038,19 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static void i915_drm_complete(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_resume_complete(i915->pxp);
+}
+
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
+ intel_pxp_suspend_prepare(i915->pxp);
+
/*
* NB intel_display_suspend() may issue new requests after we've
* ostensibly marked the GPU as ready-to-sleep here. We need to
@@ -1206,8 +1085,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev_priv);
-
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
@@ -1221,7 +1098,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_dmc_ucode_suspend(dev_priv);
+ intel_dmc_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1252,6 +1129,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(rpm);
+ intel_pxp_suspend(dev_priv->pxp);
+
i915_gem_suspend_late(dev_priv);
for_each_gt(gt, dev_priv, i)
@@ -1316,7 +1195,8 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1331,10 +1211,15 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
+
+ for_each_gt(gt, dev_priv, i)
+ if (GRAPHICS_VER(gt->i915) >= 8)
+ setup_private_pat(gt);
+
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
- intel_dmc_ucode_resume(dev_priv);
+ intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
@@ -1359,7 +1244,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev_priv);
- intel_init_clock_gating(dev_priv);
+ intel_clock_gating_init(dev_priv);
intel_hpd_init(dev_priv);
/* MST sideband requires HPD interrupts enabled */
@@ -1494,8 +1379,6 @@ static int i915_pm_suspend(struct device *kdev)
return -ENODEV;
}
- i915_ggtt_mark_pte_lost(i915, false);
-
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1548,17 +1431,19 @@ static int i915_pm_resume(struct device *kdev)
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- /*
- * If IRST is enabled, or if we can't detect whether it's enabled,
- * then we must assume we lost the GGTT page table entries, since
- * they are not retained if IRST decided to enter S4.
- */
- if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
- i915_ggtt_mark_pte_lost(i915, true);
-
return i915_drm_resume(&i915->drm);
}
+static void i915_pm_complete(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return;
+
+ i915_drm_complete(&i915->drm);
+}
+
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
@@ -1615,9 +1500,6 @@ static int i915_pm_restore_early(struct device *kdev)
static int i915_pm_restore(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
-
- i915_ggtt_mark_pte_lost(i915, true);
return i915_pm_resume(kdev);
}
@@ -1641,6 +1523,8 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
+ intel_pxp_runtime_suspend(dev_priv->pxp);
+
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_suspend(gt);
@@ -1745,6 +1629,8 @@ static int intel_runtime_resume(struct device *kdev)
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_resume(gt);
+ intel_pxp_runtime_resume(dev_priv->pxp);
+
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
@@ -1778,6 +1664,7 @@ const struct dev_pm_ops i915_pm_ops = {
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
+ .complete = i915_pm_complete,
/*
* S4 event handlers
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index b09d1d386574..e8fa172ebe5e 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -130,7 +130,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f)
{
struct drm_file *file = f->private_data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct drm_i915_private *i915 = file_priv->i915;
struct i915_drm_client *client = file_priv->client;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int i;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a380db36d52c..e771fdc3099c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,7 +36,7 @@
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
@@ -49,6 +49,8 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "soc/intel_pch.h"
+
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -58,31 +60,46 @@
#include "i915_utils.h"
#include "intel_device_info.h"
#include "intel_memory_region.h"
-#include "intel_pch.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
struct drm_i915_clock_gating_funcs;
-struct drm_i915_gem_object;
-struct drm_i915_private;
-struct intel_connector;
-struct intel_dp;
-struct intel_encoder;
-struct intel_limit;
-struct intel_overlay_error_state;
struct vlv_s0ix_state;
+struct intel_pxp;
-#define I915_GEM_GPU_DOMAINS \
- (I915_GEM_DOMAIN_RENDER | \
- I915_GEM_DOMAIN_SAMPLER | \
- I915_GEM_DOMAIN_COMMAND | \
- I915_GEM_DOMAIN_INSTRUCTION | \
- I915_GEM_DOMAIN_VERTEX)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
-#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
+struct i915_dsm {
+ /*
+ * The start and end of DSM which we can optionally use to create GEM
+ * objects backed by stolen memory.
+ *
+ * Note that usable_size tells us exactly how much of this we are
+ * actually allowed to use, given that some portion of it is in fact
+ * reserved for use by hardware functions.
+ */
+ struct resource stolen;
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+ /*
+ * Reserved portion of DSM.
+ */
+ struct resource reserved;
+
+ /*
+ * Total size minus reserved ranges.
+ *
+ * DSM is segmented in hardware with different portions offlimits to
+ * certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of DSM is reserved for hardware
+ * functions and similarly removed from the accessible range.
+ */
+ resource_size_t usable_size;
+};
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -161,19 +178,6 @@ struct i915_gem_mm {
u32 shrink_count;
};
-#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
-
-unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
- u64 context);
-
-static inline unsigned long
-i915_fence_timeout(const struct drm_i915_private *i915)
-{
- return i915_fence_context_timeout(i915, U64_MAX);
-}
-
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -203,29 +207,7 @@ struct drm_i915_private {
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
- /**
- * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
- * end of stolen which we can optionally use to create GEM objects
- * backed by stolen memory. Note that stolen_usable_size tells us
- * exactly how much of this we are actually allowed to use, given that
- * some portion of it is in fact reserved for use by hardware functions.
- */
- struct resource dsm;
- /**
- * Reseved portion of Data Stolen Memory
- */
- struct resource dsm_reserved;
-
- /*
- * Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
+ struct i915_dsm dsm;
struct intel_uncore uncore;
struct intel_uncore_mmio_debug mmio_debug;
@@ -234,13 +216,15 @@ struct drm_i915_private {
struct intel_gvt *gvt;
- struct pci_dev *bridge_dev;
+ struct {
+ struct pci_dev *pdev;
+ struct resource mch_res;
+ bool mchbar_need_disable;
+ } gmch;
struct rb_root uabi_engines;
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- struct resource mch_res;
-
/* protects the irq masks */
spinlock_t irq_lock;
@@ -286,8 +270,6 @@ struct drm_i915_private {
struct i915_gem_mm mm;
- bool mchbar_need_disable;
-
struct intel_l3_parity l3_parity;
/*
@@ -298,14 +280,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
- /*
- * Shadows for CHV DPLL_MD regs to keep the state
- * checker somewhat working in the presence hardware
- * crappiness (can't read out DPLL_MD for pipes B & C).
- */
- u32 chv_dpll_md[I915_MAX_PIPES];
- u32 bxt_phy_grc;
-
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
@@ -364,19 +338,13 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- u8 pch_ssc_use;
+ struct intel_pxp *pxp;
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
bool irq_enabled;
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
-
struct i915_pmu pmu;
struct i915_drm_clients clients;
@@ -465,8 +433,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
-#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
-
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
@@ -614,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
#define IS_ADLP_RPLP(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_RPLU(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -687,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
- (IS_TGL_UY(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GRAPHICS_STEP(p, since, until) \
- (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_DG1_DISPLAY_STEP(p, since, until) \
- (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
-
#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
@@ -726,6 +681,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
IS_GRAPHICS_STEP(__i915, since, until))
+#define IS_MTL_DISPLAY_STEP(__i915, since, until) \
+ (IS_METEORLAKE(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
+
+#define IS_MTL_MEDIA_STEP(__i915, since, until) \
+ (IS_METEORLAKE(__i915) && \
+ IS_MEDIA_STEP(__i915, since, until))
+
/*
* DG2 hardware steppings are a bit unusual. The hardware design was forked to
* create three variants (G10, G11, and G12) which each have distinct
@@ -850,6 +813,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
+#define HAS_DPT(dev_priv) (DISPLAY_VER(dev_priv) >= 13)
+
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
@@ -874,6 +839,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -892,6 +860,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(INTEL_INFO(dev_priv)->has_oa_bpc_reporting)
#define HAS_OA_SLICE_CONTRIB_LIMITS(dev_priv) \
(INTEL_INFO(dev_priv)->has_oa_slice_contrib_limits)
+#define HAS_OAM(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_oam)
/*
* Set this flag, when platform requires 64K GTT page sizes or larger for
@@ -899,7 +869,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv))
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
@@ -918,10 +889,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
-#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
- INTEL_INFO(dev_priv)->has_pxp) && \
- VDBOX_MASK(to_gt(dev_priv)))
-
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
@@ -935,9 +902,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
-#define GT_FREQUENCY_MULTIPLIER 50
-#define GEN9_FREQ_SCALER 3
-
#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h
index f42877869692..c9cb8eecacde 100644
--- a/drivers/gpu/drm/i915/i915_file_private.h
+++ b/drivers/gpu/drm/i915/i915_file_private.h
@@ -15,7 +15,7 @@ struct drm_file;
struct i915_drm_client;
struct drm_i915_file_private {
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
union {
struct drm_file *file;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8468ca9885fd..0a78bdbd36b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,7 +58,7 @@
#include "i915_file_private.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_pm.h"
+#include "intel_clock_gating.h"
static int
insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
@@ -229,8 +229,9 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
unsigned int needs_clflush;
- unsigned int idx, offset;
char __user *user_data;
+ unsigned long offset;
+ pgoff_t idx;
u64 remain;
int ret;
@@ -383,13 +384,17 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
struct i915_vma *vma;
- u64 remain, offset;
int ret = 0;
+ if (overflows_type(args->size, remain) ||
+ overflows_type(args->offset, offset))
+ return -EINVAL;
+
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_gtt_prepare(obj, &node, false);
@@ -439,7 +444,7 @@ out_rpm:
}
/**
- * Reads data from the object referenced by handle.
+ * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
@@ -528,7 +533,7 @@ ggtt_write(struct io_mapping *mapping,
}
/**
- * This is the fast pwrite path, where we copy the data directly from the
+ * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
* @obj: i915 GEM object
* @args: pwrite arguments structure
@@ -540,13 +545,17 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
- u64 remain, offset;
void __user *user_data;
int ret = 0;
+ if (overflows_type(args->size, remain) ||
+ overflows_type(args->offset, offset))
+ return -EINVAL;
+
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -654,8 +663,9 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
{
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
- unsigned int offset, idx;
void __user *user_data;
+ unsigned long offset;
+ pgoff_t idx;
u64 remain;
int ret;
@@ -713,7 +723,7 @@ err_unlock:
}
/**
- * Writes data to the object referenced by handle.
+ * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
@@ -798,7 +808,7 @@ err:
}
/**
- * Called when user space has done writes to this buffer
+ * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
@@ -1099,7 +1109,7 @@ void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
while (atomic_read(&i915->mm.free_count)) {
flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
+ drain_workqueue(i915->bdev.wq);
rcu_barrier();
}
}
@@ -1143,6 +1153,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i) {
intel_uc_fetch_firmwares(&gt->uc);
intel_wopcm_init(&gt->wopcm);
+ if (GRAPHICS_VER(dev_priv) >= 8)
+ setup_private_pat(gt);
}
ret = i915_init_ggtt(dev_priv);
@@ -1152,7 +1164,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
/*
- * Despite its name intel_init_clock_gating applies both display
+ * Despite its name intel_clock_gating_init applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
* GT power context workaround. Worse, sometimes it includes a context
* register workaround which we need to apply before we record the
@@ -1160,7 +1172,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*
* FIXME: break up the workarounds and apply them at the right time!
*/
- intel_init_clock_gating(dev_priv);
+ intel_clock_gating_init(dev_priv);
for_each_gt(gt, dev_priv, i) {
ret = intel_gt_init(gt);
@@ -1204,7 +1216,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
- intel_init_clock_gating(dev_priv);
+ intel_clock_gating_init(dev_priv);
}
i915_gem_drain_freed_objects(dev_priv);
@@ -1301,7 +1313,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
}
file->driver_priv = file_priv;
- file_priv->dev_priv = i915;
+ file_priv->i915 = i915;
file_priv->file = file;
file_priv->client = client;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a5cdf6662d01..82e9d289398c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -39,6 +39,13 @@ struct i915_gem_ww_ctx;
struct i915_gtt_view;
struct i915_vma;
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
+
void i915_gem_init_early(struct drm_i915_private *i915);
void i915_gem_cleanup_early(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index f025ee4fa526..c02ebd6900ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,16 +43,25 @@ static bool dying_vma(struct i915_vma *vma)
return !kref_read(&vma->obj->base.refcount);
}
-static int ggtt_flush(struct intel_gt *gt)
+static int ggtt_flush(struct i915_address_space *vm)
{
- /*
- * Not everything in the GGTT is tracked via vma (otherwise we
- * could evict as required with minimal stalling) so we are forced
- * to idle the GPU and explicitly retire outstanding requests in
- * the hopes that we can then remove contexts and the like only
- * bound by their active reference.
- */
- return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct intel_gt *gt;
+ int ret = 0;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
+ * could evict as required with minimal stalling) so we are forced
+ * to idle the GPU and explicitly retire outstanding requests in
+ * the hopes that we can then remove contexts and the like only
+ * bound by their active reference.
+ */
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ return ret;
}
static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
@@ -149,6 +158,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
struct i915_vma *active;
+ struct intel_gt *gt;
int ret;
lockdep_assert_held(&vm->mutex);
@@ -174,7 +184,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, color,
start, end, mode);
- intel_gt_retire_requests(vm->gt);
+ if (i915_is_ggtt(vm)) {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_retire_requests(gt);
+ } else {
+ intel_gt_retire_requests(vm->gt);
+ }
search_again:
active = NULL;
@@ -246,7 +263,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(vm->gt);
+ ret = ggtt_flush(vm);
if (ret)
return ret;
@@ -332,7 +349,15 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- intel_gt_retire_requests(vm->gt);
+ if (i915_is_ggtt(vm)) {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct intel_gt *gt;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_retire_requests(gt);
+ } else {
+ intel_gt_retire_requests(vm->gt);
+ }
if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
@@ -416,6 +441,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* @vm: Address space to cleanse
* @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
* will be able to evict vma's locked by the ww as well.
+ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
+ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
+ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
+ * the vm->mutex, before trying again to acquire the contended lock. The caller
+ * also owns a reference to the object.
*
* This function evicts all vmas from a vm.
*
@@ -425,7 +455,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo)
{
int ret = 0;
@@ -438,7 +469,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->gt);
+ ret = ggtt_flush(vm);
if (ret)
return ret;
}
@@ -457,15 +488,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* the resv is shared among multiple objects, we still
* need the object ref.
*/
- if (dying_vma(vma) ||
+ if (!i915_gem_object_get_rcu(vma->obj) ||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
__i915_vma_pin(vma);
list_add(&vma->evict_link, &locked_eviction_list);
continue;
}
- if (!i915_gem_object_trylock(vma->obj, ww))
+ if (!i915_gem_object_trylock(vma->obj, ww)) {
+ if (busy_bo) {
+ *busy_bo = vma->obj; /* holds ref */
+ ret = -EBUSY;
+ break;
+ }
+ i915_gem_object_put(vma->obj);
continue;
+ }
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
@@ -473,25 +511,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;
- ret = 0;
/* Unbind locked objects first, before unlocking the eviction_list */
list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ if (!dying_vma(vma))
+ i915_gem_object_put(vma->obj);
}
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
}
} while (ret == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
index e593c530f9bd..bf0ee0e4fe60 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.h
+++ b/drivers/gpu/drm/i915/i915_gem_evict.h
@@ -11,6 +11,7 @@
struct drm_mm_node;
struct i915_address_space;
struct i915_gem_ww_ctx;
+struct drm_i915_gem_object;
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm,
- struct i915_gem_ww_ctx *ww);
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo);
#endif /* __I915_GEM_EVICT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8c2f57eb5dda..3d77679bf211 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -18,6 +18,8 @@ struct drm_i915_gem_object;
struct i915_address_space;
struct i915_gem_ww_ctx;
+#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
@@ -44,7 +46,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_VALIDATE BIT_ULL(8) /* validate placement only, no need to call unpin() */
+#define PIN_OFFSET_GUARD BIT_ULL(8)
+#define PIN_VALIDATE BIT_ULL(9) /* validate placement only, no need to call unpin() */
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 61ef2d9cfa62..2238e096c957 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -173,7 +173,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = INTEL_INFO(i915)->has_coherent_ggtt;
break;
case I915_PARAM_PERF_REVISION:
- value = i915_perf_ioctl_version();
+ value = i915_perf_ioctl_version(i915);
break;
case I915_PARAM_OA_TIMESTAMP_FREQUENCY:
value = i915_perf_oa_timestamp_frequency(i915);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d5d5a397b64..f020c0086fbc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -505,6 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
ctx->total_runtime, ctx->avg_runtime);
+ err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
}
static struct i915_vma_coredump *
@@ -1370,14 +1371,14 @@ static void engine_record_execlists(struct intel_engine_coredump *ee)
}
static bool record_context(struct i915_gem_context_coredump *e,
- const struct i915_request *rq)
+ struct intel_context *ce)
{
struct i915_gem_context *ctx;
struct task_struct *task;
bool simulated;
rcu_read_lock();
- ctx = rcu_dereference(rq->context->gem_context);
+ ctx = rcu_dereference(ce->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
@@ -1395,9 +1396,11 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+ e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
+ *ce->timeline->hwsp_seqno : ~0U;
- e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
- e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
+ e->total_runtime = intel_context_get_total_runtime_ns(ce);
+ e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
simulated = i915_gem_context_no_error_capture(ctx);
@@ -1532,15 +1535,37 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_
return ee;
}
+static struct intel_engine_capture_vma *
+engine_coredump_add_context(struct intel_engine_coredump *ee,
+ struct intel_context *ce,
+ gfp_t gfp)
+{
+ struct intel_engine_capture_vma *vma = NULL;
+
+ ee->simulated |= record_context(&ee->context, ce);
+ if (ee->simulated)
+ return NULL;
+
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
+ vma = capture_vma(vma, ce->state, "HW context", gfp);
+
+ return vma;
+}
+
struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
struct i915_request *rq,
gfp_t gfp)
{
- struct intel_engine_capture_vma *vma = NULL;
+ struct intel_engine_capture_vma *vma;
- ee->simulated |= record_context(&ee->context, rq);
- if (ee->simulated)
+ vma = engine_coredump_add_context(ee, rq->context, gfp);
+ if (!vma)
return NULL;
/*
@@ -1550,8 +1575,6 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
*/
vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
vma = capture_user(vma, rq, gfp);
- vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
- vma = capture_vma(vma, rq->context->state, "HW context", gfp);
ee->rq_head = rq->head;
ee->rq_post = rq->postfix;
@@ -1596,54 +1619,36 @@ capture_engine(struct intel_engine_cs *engine,
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
- struct intel_context *ce;
+ struct intel_context *ce = NULL;
struct i915_request *rq = NULL;
- unsigned long flags;
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
- ce = intel_engine_get_hung_context(engine);
- if (ce) {
- intel_engine_clear_hung_context(engine);
- rq = intel_context_find_active_request(ce);
- if (!rq || !i915_request_started(rq))
- goto no_request_capture;
- } else {
- /*
- * Getting here with GuC enabled means it is a forced error capture
- * with no actual hang. So, no need to attempt the execlist search.
- */
- if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
- rq = intel_engine_execlist_find_hung_request(engine);
- spin_unlock_irqrestore(&engine->sched_engine->lock,
- flags);
- }
- }
- if (rq)
- rq = i915_request_get_rcu(rq);
+ intel_engine_get_hung_entity(engine, &ce, &rq);
+ if (rq && !i915_request_started(rq))
+ drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
- if (!rq)
- goto no_request_capture;
-
- capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
- if (!capture) {
+ if (rq) {
+ capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
i915_request_put(rq);
- goto no_request_capture;
+ } else if (ce) {
+ capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
}
- if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
- intel_guc_capture_get_matching_node(engine->gt, ee, ce);
- intel_engine_coredump_add_vma(ee, capture, compress);
- i915_request_put(rq);
+ if (capture) {
+ intel_engine_coredump_add_vma(ee, capture, compress);
- return ee;
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_get_matching_node(engine->gt, ee, ce);
+ } else {
+ kfree(ee);
+ ee = NULL;
+ }
-no_request_capture:
- kfree(ee);
- return NULL;
+ return ee;
}
static void
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h